aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile.am51
-rw-r--r--configure.ac12
-rwxr-xr-xcontrib/devtools/check-doc.py2
-rwxr-xr-xcontrib/filter-lcov.py25
-rw-r--r--contrib/gitian-keys/laanwj-key.pgpbin1713 -> 17689 bytes
-rw-r--r--contrib/rpm/README.md2
-rw-r--r--contrib/rpm/bitcoin.if2
-rw-r--r--contrib/verifybinaries/README.md8
-rwxr-xr-xcontrib/verifybinaries/verify.sh43
-rw-r--r--depends/packages/expat.mk4
-rw-r--r--doc/developer-notes.md19
-rw-r--r--doc/release-notes/release-notes-0.14.2.md102
-rw-r--r--share/pixmaps/nsis-header.bmpbin25818 -> 25820 bytes
-rw-r--r--share/pixmaps/nsis-wizard.bmpbin154542 -> 154544 bytes
-rw-r--r--src/.clang-format2
-rw-r--r--src/Makefile.am1
-rw-r--r--src/addrdb.cpp214
-rw-r--r--src/addrdb.h2
-rw-r--r--src/arith_uint256.cpp2
-rw-r--r--src/arith_uint256.h10
-rw-r--r--src/base58.cpp2
-rw-r--r--src/bench/bench_bitcoin.cpp2
-rw-r--r--src/bench/checkblock.cpp2
-rw-r--r--src/bitcoin-tx.cpp1
-rw-r--r--src/chain.cpp21
-rw-r--r--src/chain.h3
-rw-r--r--src/chainparams.cpp20
-rw-r--r--src/chainparams.h4
-rw-r--r--src/coins.cpp24
-rw-r--r--src/coins.h35
-rw-r--r--src/compat/glibc_sanity.cpp2
-rw-r--r--src/compat/glibcxx_sanity.cpp2
-rw-r--r--src/consensus/consensus.h8
-rw-r--r--src/consensus/tx_verify.cpp8
-rw-r--r--src/cuckoocache.h35
-rw-r--r--src/dbwrapper.cpp4
-rw-r--r--src/httprpc.cpp57
-rw-r--r--src/httpserver.cpp46
-rw-r--r--src/httpserver.h4
-rw-r--r--src/init.cpp181
-rw-r--r--src/keystore.h24
-rw-r--r--src/leveldb/db/version_set.h2
-rw-r--r--src/net.cpp91
-rw-r--r--src/net.h11
-rw-r--r--src/net_processing.cpp27
-rw-r--r--src/netaddress.cpp48
-rw-r--r--src/netaddress.h8
-rw-r--r--src/netbase.cpp9
-rw-r--r--src/policy/fees.cpp90
-rw-r--r--src/policy/fees.h36
-rw-r--r--src/protocol.cpp2
-rw-r--r--src/pubkey.cpp2
-rw-r--r--src/qt/bitcoin.cpp1
-rw-r--r--src/qt/clientmodel.cpp5
-rw-r--r--src/qt/coincontroldialog.cpp17
-rw-r--r--src/qt/forms/optionsdialog.ui6
-rw-r--r--src/qt/intro.cpp2
-rw-r--r--src/qt/modaloverlay.cpp2
-rw-r--r--src/qt/rpcconsole.cpp4
-rw-r--r--src/qt/sendcoinsdialog.cpp12
-rw-r--r--src/qt/splashscreen.cpp24
-rw-r--r--src/qt/splashscreen.h8
-rw-r--r--src/qt/test/rpcnestedtests.cpp2
-rw-r--r--src/qt/transactionview.cpp4
-rw-r--r--src/qt/walletmodel.cpp2
-rw-r--r--src/random.cpp74
-rw-r--r--src/random.h3
-rw-r--r--src/rest.cpp2
-rw-r--r--src/rpc/blockchain.cpp2
-rw-r--r--src/rpc/client.cpp1
-rw-r--r--src/rpc/mining.cpp55
-rw-r--r--src/rpc/mining.h15
-rw-r--r--src/rpc/net.cpp18
-rw-r--r--src/rpc/rawtransaction.cpp12
-rw-r--r--src/rpc/server.cpp17
-rw-r--r--src/script/bitcoinconsensus.cpp2
-rw-r--r--src/script/interpreter.cpp4
-rw-r--r--src/script/interpreter.h6
-rw-r--r--src/script/sigcache.cpp8
-rw-r--r--src/script/sigcache.h2
-rw-r--r--src/script/sign.cpp4
-rw-r--r--src/script/sign.h8
-rw-r--r--src/script/standard.cpp2
-rw-r--r--src/secp256k1/sage/group_prover.sage2
-rw-r--r--src/secp256k1/src/asm/field_10x26_arm.s2
-rw-r--r--src/support/events.h10
-rw-r--r--src/support/lockedpool.cpp12
-rw-r--r--src/test/addrman_tests.cpp2
-rw-r--r--src/test/allocator_tests.cpp6
-rw-r--r--src/test/arith_uint256_tests.cpp2
-rw-r--r--src/test/coins_tests.cpp24
-rw-r--r--src/test/data/script_tests.json2
-rw-r--r--src/test/data/tx_valid.json2
-rw-r--r--src/test/net_tests.cpp4
-rw-r--r--src/test/netbase_tests.cpp18
-rw-r--r--src/test/script_tests.cpp2
-rw-r--r--src/test/test_bitcoin.cpp2
-rw-r--r--src/test/txvalidationcache_tests.cpp284
-rw-r--r--src/test/versionbits_tests.cpp10
-rw-r--r--src/torcontrol.cpp6
-rw-r--r--src/txdb.cpp82
-rw-r--r--src/txdb.h19
-rw-r--r--src/txmempool.cpp14
-rw-r--r--src/txmempool.h17
-rw-r--r--src/ui_interface.h3
-rw-r--r--src/util.cpp20
-rw-r--r--src/util.h130
-rw-r--r--src/utilstrencodings.cpp8
-rw-r--r--src/validation.cpp348
-rw-r--r--src/validation.h12
-rw-r--r--src/validationinterface.cpp3
-rw-r--r--src/validationinterface.h3
-rw-r--r--src/versionbits.cpp12
-rw-r--r--src/wallet/crypter.cpp1
-rw-r--r--src/wallet/crypter.h10
-rw-r--r--src/wallet/db.cpp2
-rw-r--r--src/wallet/feebumper.cpp2
-rw-r--r--src/wallet/rpcdump.cpp30
-rw-r--r--src/wallet/rpcwallet.cpp49
-rw-r--r--src/wallet/wallet.cpp112
-rw-r--r--src/wallet/wallet.h10
-rw-r--r--src/wallet/walletdb.cpp10
-rw-r--r--src/zmq/zmqpublishnotifier.h12
-rw-r--r--test/README.md154
-rw-r--r--test/functional/README.md174
-rwxr-xr-xtest/functional/bip9-softforks.py7
-rwxr-xr-xtest/functional/blockchain.py25
-rwxr-xr-xtest/functional/bumpfee.py4
-rwxr-xr-xtest/functional/dbcrash.py281
-rwxr-xr-xtest/functional/example_test.py219
-rwxr-xr-xtest/functional/fundrawtransaction.py4
-rwxr-xr-xtest/functional/keypool.py2
-rwxr-xr-xtest/functional/listtransactions.py2
-rwxr-xr-xtest/functional/merkle_blocks.py25
-rwxr-xr-xtest/functional/multi_rpc.py58
-rwxr-xr-xtest/functional/p2p-segwit.py12
-rwxr-xr-xtest/functional/pruning.py6
-rwxr-xr-xtest/functional/receivedby.py2
-rwxr-xr-xtest/functional/rest.py22
-rwxr-xr-xtest/functional/rpcbind_test.py4
-rwxr-xr-xtest/functional/smartfees.py2
-rwxr-xr-xtest/functional/test_framework/mininode.py2
-rwxr-xr-xtest/functional/test_framework/test_framework.py204
-rw-r--r--test/functional/test_framework/util.py615
-rwxr-xr-xtest/functional/test_runner.py11
-rwxr-xr-xtest/functional/uptime.py32
-rwxr-xr-xtest/functional/wallet-dump.py4
-rwxr-xr-xtest/functional/wallet-encryption.py64
-rwxr-xr-xtest/functional/wallet-hd.py3
-rwxr-xr-xtest/functional/walletbackup.py3
-rwxr-xr-xtest/functional/zapwallettxes.py2
-rwxr-xr-xtest/functional/zmq_test.py65
152 files changed, 3418 insertions, 1525 deletions
diff --git a/Makefile.am b/Makefile.am
index 40114a551f..8216b7d608 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -59,10 +59,10 @@ OSX_PACKAGING = $(OSX_DEPLOY_SCRIPT) $(OSX_FANCY_PLIST) $(OSX_INSTALLER_ICONS) \
$(top_srcdir)/contrib/macdeploy/detached-sig-apply.sh \
$(top_srcdir)/contrib/macdeploy/detached-sig-create.sh
-COVERAGE_INFO = baseline_filtered_combined.info baseline.info \
- leveldb_baseline.info test_bitcoin_filtered.info total_coverage.info \
+COVERAGE_INFO = baseline.info \
+ test_bitcoin_filtered.info total_coverage.info \
baseline_filtered.info functional_test.info functional_test_filtered.info \
- leveldb_baseline_filtered.info test_bitcoin_coverage.info test_bitcoin.info
+ test_bitcoin_coverage.info test_bitcoin.info
dist-hook:
-$(GIT) archive --format=tar HEAD -- src/clientversion.cpp | $(AMTAR) -C $(top_distdir) -xf -
@@ -166,52 +166,45 @@ $(BITCOIN_CLI_BIN): FORCE
$(MAKE) -C src $(@F)
if USE_LCOV
+LCOV_FILTER_PATTERN=-p "/usr/include/" -p "src/leveldb/" -p "src/bench/" -p "src/univalue" -p "src/crypto/ctaes" -p "src/secp256k1"
baseline.info:
$(LCOV) -c -i -d $(abs_builddir)/src -o $@
baseline_filtered.info: baseline.info
- $(LCOV) -r $< "/usr/include/*" -o $@
+ $(abs_builddir)/contrib/filter-lcov.py $(LCOV_FILTER_PATTERN) $< $@
+ $(LCOV) -a $@ $(LCOV_OPTS) -o $@
-leveldb_baseline.info: baseline_filtered.info
- $(LCOV) -c -i -d $(abs_builddir)/src/leveldb -b $(abs_builddir)/src/leveldb -o $@
-
-leveldb_baseline_filtered.info: leveldb_baseline.info
- $(LCOV) -r $< "/usr/include/*" -o $@
-
-baseline_filtered_combined.info: leveldb_baseline_filtered.info baseline_filtered.info
- $(LCOV) -a leveldb_baseline_filtered.info -a baseline_filtered.info -o $@
-
-test_bitcoin.info: baseline_filtered_combined.info
+test_bitcoin.info: baseline_filtered.info
$(MAKE) -C src/ check
- $(LCOV) -c -d $(abs_builddir)/src -t test_bitcoin -o $@
- $(LCOV) -z -d $(abs_builddir)/src
- $(LCOV) -z -d $(abs_builddir)/src/leveldb
+ $(LCOV) -c $(LCOV_OPTS) -d $(abs_builddir)/src -t test_bitcoin -o $@
+ $(LCOV) -z $(LCOV_OPTS) -d $(abs_builddir)/src
test_bitcoin_filtered.info: test_bitcoin.info
- $(LCOV) -r $< "/usr/include/*" -o $@
+ $(abs_builddir)/contrib/filter-lcov.py $(LCOV_FILTER_PATTERN) $< $@
+ $(LCOV) -a $@ $(LCOV_OPTS) -o $@
functional_test.info: test_bitcoin_filtered.info
- -@TIMEOUT=15 python test/functional/test_runner.py $(EXTENDED_FUNCTIONAL_TESTS)
- $(LCOV) -c -d $(abs_builddir)/src --t functional-tests -o $@
- $(LCOV) -z -d $(abs_builddir)/src
- $(LCOV) -z -d $(abs_builddir)/src/leveldb
+ -@TIMEOUT=15 test/functional/test_runner.py $(EXTENDED_FUNCTIONAL_TESTS)
+ $(LCOV) -c $(LCOV_OPTS) -d $(abs_builddir)/src --t functional-tests -o $@
+ $(LCOV) -z $(LCOV_OPTS) -d $(abs_builddir)/src
functional_test_filtered.info: functional_test.info
- $(LCOV) -r $< "/usr/include/*" -o $@
+ $(abs_builddir)/contrib/filter-lcov.py $(LCOV_FILTER_PATTERN) $< $@
+ $(LCOV) -a $@ $(LCOV_OPTS) -o $@
-test_bitcoin_coverage.info: baseline_filtered_combined.info test_bitcoin_filtered.info
- $(LCOV) -a baseline_filtered.info -a leveldb_baseline_filtered.info -a test_bitcoin_filtered.info -o $@
+test_bitcoin_coverage.info: baseline_filtered.info test_bitcoin_filtered.info
+ $(LCOV) -a $(LCOV_OPTS) baseline_filtered.info -a test_bitcoin_filtered.info -o $@
-total_coverage.info: baseline_filtered_combined.info test_bitcoin_filtered.info functional_test_filtered.info
- $(LCOV) -a baseline_filtered.info -a leveldb_baseline_filtered.info -a test_bitcoin_filtered.info -a functional_test_filtered.info -o $@ | $(GREP) "\%" | $(AWK) '{ print substr($$3,2,50) "/" $$5 }' > coverage_percent.txt
+total_coverage.info: test_bitcoin_filtered.info functional_test_filtered.info
+ $(LCOV) -a $(LCOV_OPTS) baseline_filtered.info -a test_bitcoin_filtered.info -a functional_test_filtered.info -o $@ | $(GREP) "\%" | $(AWK) '{ print substr($$3,2,50) "/" $$5 }' > coverage_percent.txt
test_bitcoin.coverage/.dirstamp: test_bitcoin_coverage.info
- $(GENHTML) -s $< -o $(@D)
+ $(GENHTML) -s $(LCOV_OPTS) $< -o $(@D)
@touch $@
total.coverage/.dirstamp: total_coverage.info
- $(GENHTML) -s $< -o $(@D)
+ $(GENHTML) -s $(LCOV_OPTS) $< -o $(@D)
@touch $@
cov: test_bitcoin.coverage/.dirstamp total.coverage/.dirstamp
diff --git a/configure.ac b/configure.ac
index 5ad01d44e8..a90063d9de 100644
--- a/configure.ac
+++ b/configure.ac
@@ -158,6 +158,12 @@ AC_ARG_ENABLE([lcov],
[enable lcov testing (default is no)])],
[use_lcov=yes],
[use_lcov=no])
+
+AC_ARG_ENABLE([lcov-branch-coverage],
+ [AS_HELP_STRING([--enable-lcov-branch-coverage],
+ [enable lcov testing branch coverage (default is no)])],
+ [use_lcov_branch=yes],
+ [use_lcov_branch=no])
AC_ARG_ENABLE([glibc-back-compat],
[AS_HELP_STRING([--enable-glibc-back-compat],
@@ -442,6 +448,12 @@ if test x$use_lcov = xyes; then
[AC_MSG_ERROR("lcov testing requested but --coverage linker flag does not work")])
AX_CHECK_COMPILE_FLAG([--coverage],[CXXFLAGS="$CXXFLAGS --coverage"],
[AC_MSG_ERROR("lcov testing requested but --coverage flag does not work")])
+ AC_DEFINE(USE_COVERAGE, 1, [Define this symbol if coverage is enabled])
+ CXXFLAGS="$CXXFLAGS -Og"
+fi
+
+if test x$use_lcov_branch != xno; then
+ AC_SUBST(LCOV_OPTS, "$LCOV_OPTS --rc lcov_branch_coverage=1")
fi
dnl Check for endianness
diff --git a/contrib/devtools/check-doc.py b/contrib/devtools/check-doc.py
index 445175ec2b..150f368513 100755
--- a/contrib/devtools/check-doc.py
+++ b/contrib/devtools/check-doc.py
@@ -21,7 +21,7 @@ CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_RO
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
-SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-sendfreetransactions'])
+SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True)
diff --git a/contrib/filter-lcov.py b/contrib/filter-lcov.py
new file mode 100755
index 0000000000..299377d691
--- /dev/null
+++ b/contrib/filter-lcov.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python3
+
+import argparse
+
+parser = argparse.ArgumentParser(description='Remove the coverage data from a tracefile for all files matching the pattern.')
+parser.add_argument('--pattern', '-p', action='append', help='the pattern of files to remove', required=True)
+parser.add_argument('tracefile', help='the tracefile to remove the coverage data from')
+parser.add_argument('outfile', help='filename for the output to be written to')
+
+args = parser.parse_args()
+tracefile = args.tracefile
+pattern = args.pattern
+outfile = args.outfile
+
+in_remove = False
+with open(tracefile, 'r') as f:
+ with open(outfile, 'w') as wf:
+ for line in f:
+ for p in pattern:
+ if line.startswith("SF:") and p in line:
+ in_remove = True
+ if not in_remove:
+ wf.write(line)
+ if line == 'end_of_record\n':
+ in_remove = False
diff --git a/contrib/gitian-keys/laanwj-key.pgp b/contrib/gitian-keys/laanwj-key.pgp
index 559295109d..eed232a872 100644
--- a/contrib/gitian-keys/laanwj-key.pgp
+++ b/contrib/gitian-keys/laanwj-key.pgp
Binary files differ
diff --git a/contrib/rpm/README.md b/contrib/rpm/README.md
index e1fd0b317b..4ab2f35680 100644
--- a/contrib/rpm/README.md
+++ b/contrib/rpm/README.md
@@ -181,5 +181,5 @@ knows what they are getting when installing the GUI package.
As far as minor differences, I generally prefer to assign the file permissions
in the `%files` portion of an RPM spec file rather than specifying the
-permissions of a file during `%install` and other minor things like that that
+permissions of a file during `%install` and other minor things like that
are largely just cosmetic.
diff --git a/contrib/rpm/bitcoin.if b/contrib/rpm/bitcoin.if
index 2b096c24dc..b206866cc5 100644
--- a/contrib/rpm/bitcoin.if
+++ b/contrib/rpm/bitcoin.if
@@ -121,7 +121,7 @@ interface(`bitcoin_manage_lib_dirs',`
########################################
## <summary>
## All of the rules required to administrate
-## an bitcoin environment
+## a bitcoin environment
## </summary>
## <param name="domain">
## <summary>
diff --git a/contrib/verifybinaries/README.md b/contrib/verifybinaries/README.md
index ed3e14fb6c..3ffe0a2f28 100644
--- a/contrib/verifybinaries/README.md
+++ b/contrib/verifybinaries/README.md
@@ -26,6 +26,14 @@ The script returns 0 if everything passes the checks. It returns 1 if either the
./verify.sh bitcoin-core-0.13.0-rc3
```
+If you only want to download the binaries of certain platform, add the corresponding suffix, e.g.:
+
+```sh
+./verify.sh bitcoin-core-0.11.2-osx
+./verify.sh 0.12.0-linux
+./verify.sh bitcoin-core-0.13.0-rc3-win64
+```
+
If you do not want to keep the downloaded binaries, specify anything as the second parameter.
```sh
diff --git a/contrib/verifybinaries/verify.sh b/contrib/verifybinaries/verify.sh
index e20770c96a..c2cc2b7013 100755
--- a/contrib/verifybinaries/verify.sh
+++ b/contrib/verifybinaries/verify.sh
@@ -42,13 +42,36 @@ if [ -n "$1" ]; then
VERSION="$VERSIONPREFIX$1"
fi
- #now let's see if the version string contains "rc", and strip it off if it does
- # and simultaneously add RCSUBDIR to BASEDIR, where we will look for SIGNATUREFILENAME
- if [[ $VERSION == *"$RCVERSIONSTRING"* ]]; then
- BASEDIR="$BASEDIR${VERSION/%-$RCVERSIONSTRING*}/"
- BASEDIR="$BASEDIR$RCSUBDIR.$RCVERSIONSTRING${VERSION: -1}/"
+ STRIPPEDLAST="${VERSION%-*}"
+
+ #now let's see if the version string contains "rc" or a platform name (e.g. "osx")
+ if [[ "$STRIPPEDLAST-" == "$VERSIONPREFIX" ]]; then
+ BASEDIR="$BASEDIR$VERSION/"
else
+ # let's examine the last part to see if it's rc and/or platform name
+ STRIPPEDNEXTTOLAST="${STRIPPEDLAST%-*}"
+ if [[ "$STRIPPEDNEXTTOLAST-" == "$VERSIONPREFIX" ]]; then
+
+ LASTSUFFIX="${VERSION##*-}"
+ VERSION="$STRIPPEDLAST"
+
+ if [[ $LASTSUFFIX == *"$RCVERSIONSTRING"* ]]; then
+ RCVERSION="$LASTSUFFIX"
+ else
+ PLATFORM="$LASTSUFFIX"
+ fi
+
+ else
+ RCVERSION="${STRIPPEDLAST##*-}"
+ PLATFORM="${VERSION##*-}"
+
+ VERSION="$STRIPPEDNEXTTOLAST"
+ fi
+
BASEDIR="$BASEDIR$VERSION/"
+ if [[ $RCVERSION == *"$RCVERSIONSTRING"* ]]; then
+ BASEDIR="$BASEDIR$RCSUBDIR.$RCVERSION/"
+ fi
fi
SIGNATUREFILE="$BASEDIR$SIGNATUREFILENAME"
@@ -92,12 +115,22 @@ if [ $RET -ne 0 ]; then
exit "$RET"
fi
+if [ -n "$PLATFORM" ]; then
+ grep $PLATFORM $TMPFILE > "$TMPFILE-plat"
+ TMPFILESIZE=$(stat -c%s "$TMPFILE-plat")
+ if [ $TMPFILESIZE -eq 0 ]; then
+ echo "error: no files matched the platform specified" && exit 3
+ fi
+ mv "$TMPFILE-plat" $TMPFILE
+fi
+
#here we extract the filenames from the signature file
FILES=$(awk '{print $2}' "$TMPFILE")
#and download these one by one
for file in $FILES
do
+ echo "Downloading $file"
wget --quiet -N "$BASEDIR$file"
done
diff --git a/depends/packages/expat.mk b/depends/packages/expat.mk
index 81a660e83a..7f484724a4 100644
--- a/depends/packages/expat.mk
+++ b/depends/packages/expat.mk
@@ -1,8 +1,8 @@
package=expat
-$(package)_version=2.2.0
+$(package)_version=2.2.1
$(package)_download_path=https://downloads.sourceforge.net/project/expat/expat/$($(package)_version)
$(package)_file_name=$(package)-$($(package)_version).tar.bz2
-$(package)_sha256_hash=d9e50ff2d19b3538bd2127902a89987474e1a4db8e43a66a4d1a712ab9a504ff
+$(package)_sha256_hash=1868cadae4c82a018e361e2b2091de103cd820aaacb0d6cfa49bd2cd83978885
define $(package)_set_vars
$(package)_config_opts=--disable-static
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index a596ea0117..81bdcc9fdb 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -68,7 +68,7 @@ public:
return true;
}
}
-}
+} // namespace foo
```
Doxygen comments
@@ -287,7 +287,7 @@ General C++
- Assertions should not have side-effects
- - *Rationale*: Even though the source code is set to to refuse to compile
+ - *Rationale*: Even though the source code is set to refuse to compile
with assertions disabled, having side-effects in assertions is unexpected and
makes the code harder to understand
@@ -438,6 +438,21 @@ Source code organization
- *Rationale*: Avoids symbol conflicts
+- Terminate namespaces with a comment (`// namespace mynamespace`). The comment
+ should be placed on the same line as the brace closing the namespace, e.g.
+
+```c++
+namespace mynamespace {
+ ...
+} // namespace mynamespace
+
+namespace {
+ ...
+} // namespace
+```
+
+ - *Rationale*: Avoids confusion about the namespace context
+
GUI
-----
diff --git a/doc/release-notes/release-notes-0.14.2.md b/doc/release-notes/release-notes-0.14.2.md
new file mode 100644
index 0000000000..0ad554b773
--- /dev/null
+++ b/doc/release-notes/release-notes-0.14.2.md
@@ -0,0 +1,102 @@
+Bitcoin Core version 0.14.2 is now available from:
+
+ <https://bitcoin.org/bin/bitcoin-core-0.14.2/>
+
+This is a new minor version release, including various bugfixes and
+performance improvements, as well as updated translations.
+
+Please report bugs using the issue tracker at github:
+
+ <https://github.com/bitcoin/bitcoin/issues>
+
+To receive security and update notifications, please subscribe to:
+
+ <https://bitcoincore.org/en/list/announcements/join/>
+
+Compatibility
+==============
+
+Bitcoin Core is extensively tested on multiple operating systems using
+the Linux kernel, macOS 10.8+, and Windows Vista and later.
+
+Microsoft ended support for Windows XP on [April 8th, 2014](https://www.microsoft.com/en-us/WindowsForBusiness/end-of-xp-support),
+No attempt is made to prevent installing or running the software on Windows XP, you
+can still do so at your own risk but be aware that there are known instabilities and issues.
+Please do not report issues about Windows XP to the issue tracker.
+
+Bitcoin Core should also work on most other Unix-like systems but is not
+frequently tested on them.
+
+Notable changes
+===============
+
+miniupnp CVE-2017-8798
+----------------------------
+
+Bundled miniupnpc was updated to 2.0.20170509. This fixes an integer signedness error
+(present in MiniUPnPc v1.4.20101221 through v2.0) that allows remote attackers
+(within the LAN) to cause a denial of service or possibly have unspecified
+other impact.
+
+This only affects users that have explicitly enabled UPnP through the GUI
+setting or through the `-upnp` option, as since the last UPnP vulnerability
+(in Bitcoin Core 0.10.3) it has been disabled by default.
+
+If you use this option, it is recommended to upgrade to this version as soon as
+possible.
+
+Known Bugs
+==========
+
+Since 0.14.0 the approximate transaction fee shown in Bitcoin-Qt when using coin
+control and smart fee estimation does not reflect any change in target from the
+smart fee slider. It will only present an approximate fee calculated using the
+default target. The fee calculated using the correct target is still applied to
+the transaction and shown in the final send confirmation dialog.
+
+0.14.2 Change log
+=================
+
+Detailed release notes follow. This overview includes changes that affect
+behavior, not code moves, refactors and string updates. For convenience in locating
+the code changes and accompanying discussion, both the pull request and
+git merge commit are mentioned.
+
+### RPC and other APIs
+- #10410 `321419b` Fix importwallet edge case rescan bug (ryanofsky)
+
+### P2P protocol and network code
+- #10424 `37a8fc5` Populate services in GetLocalAddress (morcos)
+- #10441 `9e3ad50` Only enforce expected services for half of outgoing connections (theuni)
+
+### Build system
+- #10414 `ffb0c4b` miniupnpc 2.0.20170509 (fanquake)
+- #10228 `ae479bc` Regenerate bitcoin-config.h as necessary (theuni)
+
+### Miscellaneous
+- #10245 `44a17f2` Minor fix in build documentation for FreeBSD 11 (shigeya)
+- #10215 `0aee4a1` Check interruptNet during dnsseed lookups (TheBlueMatt)
+
+### GUI
+- #10231 `1e936d7` Reduce a significant cs_main lock freeze (jonasschnelli)
+
+### Wallet
+- #10294 `1847642` Unset change position when there is no change (instagibbs)
+
+Credits
+=======
+
+Thanks to everyone who directly contributed to this release:
+
+- Alex Morcos
+- Cory Fields
+- fanquake
+- Gregory Sanders
+- Jonas Schnelli
+- Matt Corallo
+- Russell Yanofsky
+- Shigeya Suzuki
+- Wladimir J. van der Laan
+
+As well as everyone that helped translating on [Transifex](https://www.transifex.com/projects/p/bitcoin/).
+
diff --git a/share/pixmaps/nsis-header.bmp b/share/pixmaps/nsis-header.bmp
index 9ab0ce2591..f54e249a0c 100644
--- a/share/pixmaps/nsis-header.bmp
+++ b/share/pixmaps/nsis-header.bmp
Binary files differ
diff --git a/share/pixmaps/nsis-wizard.bmp b/share/pixmaps/nsis-wizard.bmp
index 71255c6850..1434952885 100644
--- a/share/pixmaps/nsis-wizard.bmp
+++ b/share/pixmaps/nsis-wizard.bmp
Binary files differ
diff --git a/src/.clang-format b/src/.clang-format
index 9a700b750d..2d2ee67035 100644
--- a/src/.clang-format
+++ b/src/.clang-format
@@ -46,6 +46,6 @@ SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
-Standard: Cpp03
+Standard: Cpp11
TabWidth: 8
UseTab: Never
diff --git a/src/Makefile.am b/src/Makefile.am
index a37c6a502b..06b09404a7 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -128,6 +128,7 @@ BITCOIN_CORE_H = \
reverselock.h \
rpc/blockchain.h \
rpc/client.h \
+ rpc/mining.h \
rpc/protocol.h \
rpc/server.h \
rpc/register.h \
diff --git a/src/addrdb.cpp b/src/addrdb.cpp
index a3743cd0d4..7f85c16585 100644
--- a/src/addrdb.cpp
+++ b/src/addrdb.cpp
@@ -15,25 +15,31 @@
#include "tinyformat.h"
#include "util.h"
+namespace {
-CBanDB::CBanDB()
+template <typename Stream, typename Data>
+bool SerializeDB(Stream& stream, const Data& data)
{
- pathBanlist = GetDataDir() / "banlist.dat";
+ // Write and commit header, data
+ try {
+ CHashWriter hasher(SER_DISK, CLIENT_VERSION);
+ stream << FLATDATA(Params().MessageStart()) << data;
+ hasher << FLATDATA(Params().MessageStart()) << data;
+ stream << hasher.GetHash();
+ } catch (const std::exception& e) {
+ return error("%s: Serialize or I/O error - %s", __func__, e.what());
+ }
+
+ return true;
}
-bool CBanDB::Write(const banmap_t& banSet)
+template <typename Data>
+bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data& data)
{
// Generate random temporary filename
unsigned short randv = 0;
GetRandBytes((unsigned char*)&randv, sizeof(randv));
- std::string tmpfn = strprintf("banlist.dat.%04x", randv);
-
- // serialize banlist, checksum data up to that point, then append csum
- CDataStream ssBanlist(SER_DISK, CLIENT_VERSION);
- ssBanlist << FLATDATA(Params().MessageStart());
- ssBanlist << banSet;
- uint256 hash = Hash(ssBanlist.begin(), ssBanlist.end());
- ssBanlist << hash;
+ std::string tmpfn = strprintf("%s.%04x", prefix, randv);
// open temp output file, and associate with CAutoFile
fs::path pathTmp = GetDataDir() / tmpfn;
@@ -42,69 +48,41 @@ bool CBanDB::Write(const banmap_t& banSet)
if (fileout.IsNull())
return error("%s: Failed to open file %s", __func__, pathTmp.string());
- // Write and commit header, data
- try {
- fileout << ssBanlist;
- }
- catch (const std::exception& e) {
- return error("%s: Serialize or I/O error - %s", __func__, e.what());
- }
+ // Serialize
+ if (!SerializeDB(fileout, data)) return false;
FileCommit(fileout.Get());
fileout.fclose();
- // replace existing banlist.dat, if any, with new banlist.dat.XXXX
- if (!RenameOver(pathTmp, pathBanlist))
+ // replace existing file, if any, with new file
+ if (!RenameOver(pathTmp, path))
return error("%s: Rename-into-place failed", __func__);
return true;
}
-bool CBanDB::Read(banmap_t& banSet)
+template <typename Stream, typename Data>
+bool DeserializeDB(Stream& stream, Data& data, bool fCheckSum = true)
{
- // open input file, and associate with CAutoFile
- FILE *file = fsbridge::fopen(pathBanlist, "rb");
- CAutoFile filein(file, SER_DISK, CLIENT_VERSION);
- if (filein.IsNull())
- return error("%s: Failed to open file %s", __func__, pathBanlist.string());
-
- // use file size to size memory buffer
- uint64_t fileSize = fs::file_size(pathBanlist);
- uint64_t dataSize = 0;
- // Don't try to resize to a negative number if file is small
- if (fileSize >= sizeof(uint256))
- dataSize = fileSize - sizeof(uint256);
- std::vector<unsigned char> vchData;
- vchData.resize(dataSize);
- uint256 hashIn;
-
- // read data and checksum from file
- try {
- filein.read((char *)&vchData[0], dataSize);
- filein >> hashIn;
- }
- catch (const std::exception& e) {
- return error("%s: Deserialize or I/O error - %s", __func__, e.what());
- }
- filein.fclose();
-
- CDataStream ssBanlist(vchData, SER_DISK, CLIENT_VERSION);
-
- // verify stored checksum matches input data
- uint256 hashTmp = Hash(ssBanlist.begin(), ssBanlist.end());
- if (hashIn != hashTmp)
- return error("%s: Checksum mismatch, data corrupted", __func__);
-
- unsigned char pchMsgTmp[4];
try {
+ CHashVerifier<Stream> verifier(&stream);
// de-serialize file header (network specific magic number) and ..
- ssBanlist >> FLATDATA(pchMsgTmp);
-
+ unsigned char pchMsgTmp[4];
+ verifier >> FLATDATA(pchMsgTmp);
// ... verify the network matches ours
if (memcmp(pchMsgTmp, Params().MessageStart(), sizeof(pchMsgTmp)))
return error("%s: Invalid network magic number", __func__);
- // de-serialize ban data
- ssBanlist >> banSet;
+ // de-serialize data
+ verifier >> data;
+
+ // verify checksum
+ if (fCheckSum) {
+ uint256 hashTmp;
+ stream >> hashTmp;
+ if (hashTmp != verifier.GetHash()) {
+ return error("%s: Checksum mismatch, data corrupted", __func__);
+ }
+ }
}
catch (const std::exception& e) {
return error("%s: Deserialize or I/O error - %s", __func__, e.what());
@@ -113,106 +91,56 @@ bool CBanDB::Read(banmap_t& banSet)
return true;
}
-CAddrDB::CAddrDB()
+template <typename Data>
+bool DeserializeFileDB(const fs::path& path, Data& data)
{
- pathAddr = GetDataDir() / "peers.dat";
+ // open input file, and associate with CAutoFile
+ FILE *file = fsbridge::fopen(path, "rb");
+ CAutoFile filein(file, SER_DISK, CLIENT_VERSION);
+ if (filein.IsNull())
+ return error("%s: Failed to open file %s", __func__, path.string());
+
+ return DeserializeDB(filein, data);
}
-bool CAddrDB::Write(const CAddrMan& addr)
-{
- // Generate random temporary filename
- unsigned short randv = 0;
- GetRandBytes((unsigned char*)&randv, sizeof(randv));
- std::string tmpfn = strprintf("peers.dat.%04x", randv);
+}
- // serialize addresses, checksum data up to that point, then append csum
- CDataStream ssPeers(SER_DISK, CLIENT_VERSION);
- ssPeers << FLATDATA(Params().MessageStart());
- ssPeers << addr;
- uint256 hash = Hash(ssPeers.begin(), ssPeers.end());
- ssPeers << hash;
+CBanDB::CBanDB()
+{
+ pathBanlist = GetDataDir() / "banlist.dat";
+}
- // open temp output file, and associate with CAutoFile
- fs::path pathTmp = GetDataDir() / tmpfn;
- FILE *file = fsbridge::fopen(pathTmp, "wb");
- CAutoFile fileout(file, SER_DISK, CLIENT_VERSION);
- if (fileout.IsNull())
- return error("%s: Failed to open file %s", __func__, pathTmp.string());
+bool CBanDB::Write(const banmap_t& banSet)
+{
+ return SerializeFileDB("banlist", pathBanlist, banSet);
+}
- // Write and commit header, data
- try {
- fileout << ssPeers;
- }
- catch (const std::exception& e) {
- return error("%s: Serialize or I/O error - %s", __func__, e.what());
- }
- FileCommit(fileout.Get());
- fileout.fclose();
+bool CBanDB::Read(banmap_t& banSet)
+{
+ return DeserializeFileDB(pathBanlist, banSet);
+}
- // replace existing peers.dat, if any, with new peers.dat.XXXX
- if (!RenameOver(pathTmp, pathAddr))
- return error("%s: Rename-into-place failed", __func__);
+CAddrDB::CAddrDB()
+{
+ pathAddr = GetDataDir() / "peers.dat";
+}
- return true;
+bool CAddrDB::Write(const CAddrMan& addr)
+{
+ return SerializeFileDB("peers", pathAddr, addr);
}
bool CAddrDB::Read(CAddrMan& addr)
{
- // open input file, and associate with CAutoFile
- FILE *file = fsbridge::fopen(pathAddr, "rb");
- CAutoFile filein(file, SER_DISK, CLIENT_VERSION);
- if (filein.IsNull())
- return error("%s: Failed to open file %s", __func__, pathAddr.string());
-
- // use file size to size memory buffer
- uint64_t fileSize = fs::file_size(pathAddr);
- uint64_t dataSize = 0;
- // Don't try to resize to a negative number if file is small
- if (fileSize >= sizeof(uint256))
- dataSize = fileSize - sizeof(uint256);
- std::vector<unsigned char> vchData;
- vchData.resize(dataSize);
- uint256 hashIn;
-
- // read data and checksum from file
- try {
- filein.read((char *)&vchData[0], dataSize);
- filein >> hashIn;
- }
- catch (const std::exception& e) {
- return error("%s: Deserialize or I/O error - %s", __func__, e.what());
- }
- filein.fclose();
-
- CDataStream ssPeers(vchData, SER_DISK, CLIENT_VERSION);
-
- // verify stored checksum matches input data
- uint256 hashTmp = Hash(ssPeers.begin(), ssPeers.end());
- if (hashIn != hashTmp)
- return error("%s: Checksum mismatch, data corrupted", __func__);
-
- return Read(addr, ssPeers);
+ return DeserializeFileDB(pathAddr, addr);
}
bool CAddrDB::Read(CAddrMan& addr, CDataStream& ssPeers)
{
- unsigned char pchMsgTmp[4];
- try {
- // de-serialize file header (network specific magic number) and ..
- ssPeers >> FLATDATA(pchMsgTmp);
-
- // ... verify the network matches ours
- if (memcmp(pchMsgTmp, Params().MessageStart(), sizeof(pchMsgTmp)))
- return error("%s: Invalid network magic number", __func__);
-
- // de-serialize address data into one CAddrMan object
- ssPeers >> addr;
- }
- catch (const std::exception& e) {
- // de-serialization has failed, ensure addrman is left in a clean state
+ bool ret = DeserializeDB(ssPeers, addr, false);
+ if (!ret) {
+ // Ensure addrman is left in a clean state
addr.Clear();
- return error("%s: Deserialize or I/O error - %s", __func__, e.what());
}
-
- return true;
+ return ret;
}
diff --git a/src/addrdb.h b/src/addrdb.h
index c3d509bd3a..6cb36dfac4 100644
--- a/src/addrdb.h
+++ b/src/addrdb.h
@@ -85,7 +85,7 @@ public:
CAddrDB();
bool Write(const CAddrMan& addr);
bool Read(CAddrMan& addr);
- bool Read(CAddrMan& addr, CDataStream& ssPeers);
+ static bool Read(CAddrMan& addr, CDataStream& ssPeers);
};
/** Access to the banlist database (banlist.dat) */
diff --git a/src/arith_uint256.cpp b/src/arith_uint256.cpp
index dd34a313b7..b4952af6f4 100644
--- a/src/arith_uint256.cpp
+++ b/src/arith_uint256.cpp
@@ -15,6 +15,8 @@
template <unsigned int BITS>
base_uint<BITS>::base_uint(const std::string& str)
{
+ static_assert(BITS/32 > 0 && BITS%32 == 0, "Template parameter BITS must be a positive multiple of 32.");
+
SetHex(str);
}
diff --git a/src/arith_uint256.h b/src/arith_uint256.h
index 0f6b3d4fba..c7734035df 100644
--- a/src/arith_uint256.h
+++ b/src/arith_uint256.h
@@ -31,12 +31,16 @@ public:
base_uint()
{
+ static_assert(BITS/32 > 0 && BITS%32 == 0, "Template parameter BITS must be a positive multiple of 32.");
+
for (int i = 0; i < WIDTH; i++)
pn[i] = 0;
}
base_uint(const base_uint& b)
{
+ static_assert(BITS/32 > 0 && BITS%32 == 0, "Template parameter BITS must be a positive multiple of 32.");
+
for (int i = 0; i < WIDTH; i++)
pn[i] = b.pn[i];
}
@@ -50,6 +54,8 @@ public:
base_uint(uint64_t b)
{
+ static_assert(BITS/32 > 0 && BITS%32 == 0, "Template parameter BITS must be a positive multiple of 32.");
+
pn[0] = (unsigned int)b;
pn[1] = (unsigned int)(b >> 32);
for (int i = 2; i < WIDTH; i++)
@@ -174,7 +180,7 @@ public:
{
// prefix operator
int i = 0;
- while (++pn[i] == 0 && i < WIDTH-1)
+ while (i < WIDTH && ++pn[i] == 0)
i++;
return *this;
}
@@ -191,7 +197,7 @@ public:
{
// prefix operator
int i = 0;
- while (--pn[i] == (uint32_t)-1 && i < WIDTH-1)
+ while (i < WIDTH && --pn[i] == (uint32_t)-1)
i++;
return *this;
}
diff --git a/src/base58.cpp b/src/base58.cpp
index 36b3523692..efa1beb1e4 100644
--- a/src/base58.cpp
+++ b/src/base58.cpp
@@ -225,7 +225,7 @@ public:
bool operator()(const CNoDestination& no) const { return false; }
};
-} // anon namespace
+} // namespace
bool CBitcoinAddress::Set(const CKeyID& id)
{
diff --git a/src/bench/bench_bitcoin.cpp b/src/bench/bench_bitcoin.cpp
index 61a0b31aed..226861aa7f 100644
--- a/src/bench/bench_bitcoin.cpp
+++ b/src/bench/bench_bitcoin.cpp
@@ -7,10 +7,12 @@
#include "key.h"
#include "validation.h"
#include "util.h"
+#include "random.h"
int
main(int argc, char** argv)
{
+ RandomInit();
ECC_Start();
SetupEnvironment();
fPrintToDebugLog = false; // don't want to write to debug.log file
diff --git a/src/bench/checkblock.cpp b/src/bench/checkblock.cpp
index 195388839e..7bb1b93668 100644
--- a/src/bench/checkblock.cpp
+++ b/src/bench/checkblock.cpp
@@ -11,7 +11,7 @@
namespace block_bench {
#include "bench/data/block413567.raw.h"
-}
+} // namespace block_bench
// These are the two major time-sinks which happen after we have fully received
// a block off the wire, but before we can relay the block on to peers using
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index 714ee555ec..6093f78fb1 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -299,7 +299,6 @@ static void MutateTxAddOutPubKey(CMutableTransaction& tx, const std::string& str
if (!pubkey.IsFullyValid())
throw std::runtime_error("invalid TX output pubkey");
CScript scriptPubKey = GetScriptForRawPubKey(pubkey);
- CBitcoinAddress addr(scriptPubKey);
// Extract and validate FLAGS
bool bSegWit = false;
diff --git a/src/chain.cpp b/src/chain.cpp
index a5b369c4fc..ffd58d471d 100644
--- a/src/chain.cpp
+++ b/src/chain.cpp
@@ -126,7 +126,7 @@ arith_uint256 GetBlockProof(const CBlockIndex& block)
if (fNegative || fOverflow || bnTarget == 0)
return 0;
// We need to compute 2**256 / (bnTarget+1), but we can't represent 2**256
- // as it's too large for a arith_uint256. However, as 2**256 is at least as large
+ // as it's too large for an arith_uint256. However, as 2**256 is at least as large
// as bnTarget+1, it is equal to ((2**256 - bnTarget - 1) / (bnTarget+1)) + 1,
// or ~bnTarget / (nTarget+1) + 1.
return (~bnTarget / (bnTarget + 1)) + 1;
@@ -148,3 +148,22 @@ int64_t GetBlockProofEquivalentTime(const CBlockIndex& to, const CBlockIndex& fr
}
return sign * r.GetLow64();
}
+
+/** Find the last common ancestor two blocks have.
+ * Both pa and pb must be non-NULL. */
+const CBlockIndex* LastCommonAncestor(const CBlockIndex* pa, const CBlockIndex* pb) {
+ if (pa->nHeight > pb->nHeight) {
+ pa = pa->GetAncestor(pb->nHeight);
+ } else if (pb->nHeight > pa->nHeight) {
+ pb = pb->GetAncestor(pa->nHeight);
+ }
+
+ while (pa != pb && pa && pb) {
+ pa = pa->pprev;
+ pb = pb->pprev;
+ }
+
+ // Eventually all chain branches meet at the genesis block.
+ assert(pa == pb);
+ return pa;
+}
diff --git a/src/chain.h b/src/chain.h
index de120d2d75..c5304b7d6f 100644
--- a/src/chain.h
+++ b/src/chain.h
@@ -362,6 +362,9 @@ public:
arith_uint256 GetBlockProof(const CBlockIndex& block);
/** Return the time it would take to redo the work difference between from and to, assuming the current hashrate corresponds to the difficulty at tip, in seconds. */
int64_t GetBlockProofEquivalentTime(const CBlockIndex& to, const CBlockIndex& from, const CBlockIndex& tip, const Consensus::Params&);
+/** Find the forking point between two chain tips. */
+const CBlockIndex* LastCommonAncestor(const CBlockIndex* pa, const CBlockIndex* pb);
+
/** Used to marshal pointers into hashes for db storage. */
class CDiskBlockIndex : public CBlockIndex
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index 3b42c5fb23..dc4d2621ee 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -124,12 +124,12 @@ public:
assert(genesis.hashMerkleRoot == uint256S("0x4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"));
// Note that of those with the service bits flag, most only support a subset of possible options
- vSeeds.push_back(CDNSSeedData("bitcoin.sipa.be", "seed.bitcoin.sipa.be", true)); // Pieter Wuille, only supports x1, x5, x9, and xd
- vSeeds.push_back(CDNSSeedData("bluematt.me", "dnsseed.bluematt.me", true)); // Matt Corallo, only supports x9
- vSeeds.push_back(CDNSSeedData("dashjr.org", "dnsseed.bitcoin.dashjr.org")); // Luke Dashjr
- vSeeds.push_back(CDNSSeedData("bitcoinstats.com", "seed.bitcoinstats.com", true)); // Christian Decker, supports x1 - xf
- vSeeds.push_back(CDNSSeedData("bitcoin.jonasschnelli.ch", "seed.bitcoin.jonasschnelli.ch", true)); // Jonas Schnelli, only supports x1, x5, x9, and xd
- vSeeds.push_back(CDNSSeedData("petertodd.org", "seed.btc.petertodd.org", true)); // Peter Todd, only supports x1, x5, x9, and xd
+ vSeeds.emplace_back("seed.bitcoin.sipa.be", true); // Pieter Wuille, only supports x1, x5, x9, and xd
+ vSeeds.emplace_back("dnsseed.bluematt.me", true); // Matt Corallo, only supports x9
+ vSeeds.emplace_back("dnsseed.bitcoin.dashjr.org", false); // Luke Dashjr
+ vSeeds.emplace_back("seed.bitcoinstats.com", true); // Christian Decker, supports x1 - xf
+ vSeeds.emplace_back("seed.bitcoin.jonasschnelli.ch", true); // Jonas Schnelli, only supports x1, x5, x9, and xd
+ vSeeds.emplace_back("seed.btc.petertodd.org", true); // Peter Todd, only supports x1, x5, x9, and xd
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,0);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,5);
@@ -225,10 +225,10 @@ public:
vFixedSeeds.clear();
vSeeds.clear();
// nodes with support for servicebits filtering should be at the top
- vSeeds.push_back(CDNSSeedData("testnetbitcoin.jonasschnelli.ch", "testnet-seed.bitcoin.jonasschnelli.ch", true));
- vSeeds.push_back(CDNSSeedData("petertodd.org", "seed.tbtc.petertodd.org", true));
- vSeeds.push_back(CDNSSeedData("bluematt.me", "testnet-seed.bluematt.me"));
- vSeeds.push_back(CDNSSeedData("bitcoin.schildbach.de", "testnet-seed.bitcoin.schildbach.de"));
+ vSeeds.emplace_back("testnet-seed.bitcoin.jonasschnelli.ch", true);
+ vSeeds.emplace_back("seed.tbtc.petertodd.org", true);
+ vSeeds.emplace_back("testnet-seed.bluematt.me", false);
+ vSeeds.emplace_back("testnet-seed.bitcoin.schildbach.de", false);
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,111);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,196);
diff --git a/src/chainparams.h b/src/chainparams.h
index a2f136171b..f55ae4cf7f 100644
--- a/src/chainparams.h
+++ b/src/chainparams.h
@@ -15,9 +15,9 @@
#include <vector>
struct CDNSSeedData {
- std::string name, host;
+ std::string host;
bool supportsServiceBitsFiltering;
- CDNSSeedData(const std::string &strName, const std::string &strHost, bool supportsServiceBitsFilteringIn = false) : name(strName), host(strHost), supportsServiceBitsFiltering(supportsServiceBitsFilteringIn) {}
+ CDNSSeedData(const std::string &strHost, bool supportsServiceBitsFilteringIn) : host(strHost), supportsServiceBitsFiltering(supportsServiceBitsFilteringIn) {}
};
struct SeedSpec6 {
diff --git a/src/coins.cpp b/src/coins.cpp
index b45fc76338..b5dc6197bd 100644
--- a/src/coins.cpp
+++ b/src/coins.cpp
@@ -11,16 +11,22 @@
#include <assert.h>
bool CCoinsView::GetCoin(const COutPoint &outpoint, Coin &coin) const { return false; }
-bool CCoinsView::HaveCoin(const COutPoint &outpoint) const { return false; }
uint256 CCoinsView::GetBestBlock() const { return uint256(); }
+std::vector<uint256> CCoinsView::GetHeadBlocks() const { return std::vector<uint256>(); }
bool CCoinsView::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { return false; }
CCoinsViewCursor *CCoinsView::Cursor() const { return 0; }
+bool CCoinsView::HaveCoin(const COutPoint &outpoint) const
+{
+ Coin coin;
+ return GetCoin(outpoint, coin);
+}
CCoinsViewBacked::CCoinsViewBacked(CCoinsView *viewIn) : base(viewIn) { }
bool CCoinsViewBacked::GetCoin(const COutPoint &outpoint, Coin &coin) const { return base->GetCoin(outpoint, coin); }
bool CCoinsViewBacked::HaveCoin(const COutPoint &outpoint) const { return base->HaveCoin(outpoint); }
uint256 CCoinsViewBacked::GetBestBlock() const { return base->GetBestBlock(); }
+std::vector<uint256> CCoinsViewBacked::GetHeadBlocks() const { return base->GetHeadBlocks(); }
void CCoinsViewBacked::SetBackend(CCoinsView &viewIn) { base = &viewIn; }
bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { return base->BatchWrite(mapCoins, hashBlock); }
CCoinsViewCursor *CCoinsViewBacked::Cursor() const { return base->Cursor(); }
@@ -55,7 +61,7 @@ bool CCoinsViewCache::GetCoin(const COutPoint &outpoint, Coin &coin) const {
CCoinsMap::const_iterator it = FetchCoin(outpoint);
if (it != cacheCoins.end()) {
coin = it->second.coin;
- return true;
+ return !coin.IsSpent();
}
return false;
}
@@ -81,19 +87,20 @@ void CCoinsViewCache::AddCoin(const COutPoint &outpoint, Coin&& coin, bool possi
cachedCoinsUsage += it->second.coin.DynamicMemoryUsage();
}
-void AddCoins(CCoinsViewCache& cache, const CTransaction &tx, int nHeight) {
+void AddCoins(CCoinsViewCache& cache, const CTransaction &tx, int nHeight, bool check) {
bool fCoinbase = tx.IsCoinBase();
const uint256& txid = tx.GetHash();
for (size_t i = 0; i < tx.vout.size(); ++i) {
- // Pass fCoinbase as the possible_overwrite flag to AddCoin, in order to correctly
+ bool overwrite = check ? cache.HaveCoin(COutPoint(txid, i)) : fCoinbase;
+ // Always set the possible_overwrite flag to AddCoin for coinbase txn, in order to correctly
// deal with the pre-BIP30 occurrences of duplicate coinbase transactions.
- cache.AddCoin(COutPoint(txid, i), Coin(tx.vout[i], nHeight, fCoinbase), fCoinbase);
+ cache.AddCoin(COutPoint(txid, i), Coin(tx.vout[i], nHeight, fCoinbase), overwrite);
}
}
-void CCoinsViewCache::SpendCoin(const COutPoint &outpoint, Coin* moveout) {
+bool CCoinsViewCache::SpendCoin(const COutPoint &outpoint, Coin* moveout) {
CCoinsMap::iterator it = FetchCoin(outpoint);
- if (it == cacheCoins.end()) return;
+ if (it == cacheCoins.end()) return false;
cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage();
if (moveout) {
*moveout = std::move(it->second.coin);
@@ -104,6 +111,7 @@ void CCoinsViewCache::SpendCoin(const COutPoint &outpoint, Coin* moveout) {
it->second.flags |= CCoinsCacheEntry::DIRTY;
it->second.coin.Clear();
}
+ return true;
}
static const Coin coinEmpty;
@@ -124,7 +132,7 @@ bool CCoinsViewCache::HaveCoin(const COutPoint &outpoint) const {
bool CCoinsViewCache::HaveCoinInCache(const COutPoint &outpoint) const {
CCoinsMap::const_iterator it = cacheCoins.find(outpoint);
- return it != cacheCoins.end();
+ return (it != cacheCoins.end() && !it->second.coin.IsSpent());
}
uint256 CCoinsViewCache::GetBestBlock() const {
diff --git a/src/coins.h b/src/coins.h
index dc3210b8ac..efb5ce869c 100644
--- a/src/coins.h
+++ b/src/coins.h
@@ -145,16 +145,24 @@ private:
class CCoinsView
{
public:
- //! Retrieve the Coin (unspent transaction output) for a given outpoint.
+ /** Retrieve the Coin (unspent transaction output) for a given outpoint.
+ * Returns true only when an unspent coin was found, which is returned in coin.
+ * When false is returned, coin's value is unspecified.
+ */
virtual bool GetCoin(const COutPoint &outpoint, Coin &coin) const;
- //! Just check whether we have data for a given outpoint.
- //! This may (but cannot always) return true for spent outputs.
+ //! Just check whether a given outpoint is unspent.
virtual bool HaveCoin(const COutPoint &outpoint) const;
//! Retrieve the block hash whose state this CCoinsView currently represents
virtual uint256 GetBestBlock() const;
+ //! Retrieve the range of blocks that may have been only partially written.
+ //! If the database is in a consistent state, the result is the empty vector.
+ //! Otherwise, a two-element vector is returned consisting of the new and
+ //! the old block hash, in that order.
+ virtual std::vector<uint256> GetHeadBlocks() const;
+
//! Do a bulk modification (multiple Coin changes + BestBlock change).
//! The passed mapCoins can be modified.
virtual bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock);
@@ -181,6 +189,7 @@ public:
bool GetCoin(const COutPoint &outpoint, Coin &coin) const override;
bool HaveCoin(const COutPoint &outpoint) const override;
uint256 GetBestBlock() const override;
+ std::vector<uint256> GetHeadBlocks() const override;
void SetBackend(CCoinsView &viewIn);
bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override;
CCoinsViewCursor *Cursor() const override;
@@ -224,8 +233,13 @@ public:
/**
* Return a reference to Coin in the cache, or a pruned one if not found. This is
- * more efficient than GetCoin. Modifications to other cache entries are
- * allowed while accessing the returned pointer.
+ * more efficient than GetCoin.
+ *
+ * Generally, do not hold the reference returned for more than a short scope.
+ * While the current implementation allows for modifications to the contents
+ * of the cache while holding the reference, this behavior should not be relied
+ * on! To be safe, best to not hold the returned reference through any other
+ * calls to this cache.
*/
const Coin& AccessCoin(const COutPoint &output) const;
@@ -240,7 +254,7 @@ public:
* If no unspent output exists for the passed outpoint, this call
* has no effect.
*/
- void SpendCoin(const COutPoint &outpoint, Coin* moveto = nullptr);
+ bool SpendCoin(const COutPoint &outpoint, Coin* moveto = nullptr);
/**
* Push the modifications applied to this cache to its base.
@@ -284,12 +298,17 @@ private:
};
//! Utility function to add all of a transaction's outputs to a cache.
-// It assumes that overwrites are only possible for coinbase transactions,
+// When check is false, this assumes that overwrites are only possible for coinbase transactions.
+// When check is true, the underlying view may be queried to determine whether an addition is
+// an overwrite.
// TODO: pass in a boolean to limit these possible overwrites to known
// (pre-BIP34) cases.
-void AddCoins(CCoinsViewCache& cache, const CTransaction& tx, int nHeight);
+void AddCoins(CCoinsViewCache& cache, const CTransaction& tx, int nHeight, bool check = false);
//! Utility function to find any unspent output with a given txid.
+// This function can be quite expensive because in the event of a transaction
+// which is not found in the cache, it can cause up to MAX_OUTPUTS_PER_BLOCK
+// lookups to database, so it should be used with care.
const Coin& AccessByTxid(const CCoinsViewCache& cache, const uint256& txid);
#endif // BITCOIN_COINS_H
diff --git a/src/compat/glibc_sanity.cpp b/src/compat/glibc_sanity.cpp
index d62d74d462..b4d1c90992 100644
--- a/src/compat/glibc_sanity.cpp
+++ b/src/compat/glibc_sanity.cpp
@@ -56,7 +56,7 @@ bool sanity_test_fdelt()
}
#endif
-} // anon namespace
+} // namespace
bool glibc_sanity_test()
{
diff --git a/src/compat/glibcxx_sanity.cpp b/src/compat/glibcxx_sanity.cpp
index cee8a98c7f..569fb1bbe8 100644
--- a/src/compat/glibcxx_sanity.cpp
+++ b/src/compat/glibcxx_sanity.cpp
@@ -38,7 +38,7 @@ bool sanity_test_list(unsigned int size)
return true;
}
-} // anon namespace
+} // namespace
// trigger: string::at(x) on an empty string to trigger __throw_out_of_range_fmt.
// test: force std::string to throw an out_of_range exception. Verify that
diff --git a/src/consensus/consensus.h b/src/consensus/consensus.h
index 351911a3a4..58b2ed4b3e 100644
--- a/src/consensus/consensus.h
+++ b/src/consensus/consensus.h
@@ -12,7 +12,13 @@
static const unsigned int MAX_BLOCK_SERIALIZED_SIZE = 4000000;
/** The maximum allowed weight for a block, see BIP 141 (network rule) */
static const unsigned int MAX_BLOCK_WEIGHT = 4000000;
-/** The maximum allowed size for a block excluding witness data, in bytes (network rule) */
+/**
+ * The maximum allowed size for a block excluding witness data, in bytes (network rule).
+ * This parameter is largely superfluous because it is directly implied by the above block
+ * weight limit, even when BIP 141 is not active. It continues to exist for use in
+ * various early tests that run before the witness data has been checked.
+ * All tests related to it could be removed without breaking consensus compatibility.
+ */
static const unsigned int MAX_BLOCK_BASE_SIZE = 1000000;
/** The maximum allowed number of signature check operations in a block (network rule) */
static const int64_t MAX_BLOCK_SIGOPS_COST = 80000;
diff --git a/src/consensus/tx_verify.cpp b/src/consensus/tx_verify.cpp
index bf68f8754b..0671cbc132 100644
--- a/src/consensus/tx_verify.cpp
+++ b/src/consensus/tx_verify.cpp
@@ -126,7 +126,9 @@ unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& in
unsigned int nSigOps = 0;
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
- const CTxOut &prevout = inputs.AccessCoin(tx.vin[i].prevout).out;
+ const Coin& coin = inputs.AccessCoin(tx.vin[i].prevout);
+ assert(!coin.IsSpent());
+ const CTxOut &prevout = coin.out;
if (prevout.scriptPubKey.IsPayToScriptHash())
nSigOps += prevout.scriptPubKey.GetSigOpCount(tx.vin[i].scriptSig);
}
@@ -146,7 +148,9 @@ int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& i
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
- const CTxOut &prevout = inputs.AccessCoin(tx.vin[i].prevout).out;
+ const Coin& coin = inputs.AccessCoin(tx.vin[i].prevout);
+ assert(!coin.IsSpent());
+ const CTxOut &prevout = coin.out;
nSigOps += CountWitnessSigOps(tx.vin[i].scriptSig, prevout.scriptPubKey, &tx.vin[i].scriptWitness, flags);
}
return nSigOps;
diff --git a/src/cuckoocache.h b/src/cuckoocache.h
index 5837549455..fd24d05ee7 100644
--- a/src/cuckoocache.h
+++ b/src/cuckoocache.h
@@ -176,7 +176,7 @@ private:
*/
mutable std::vector<bool> epoch_flags;
- /** epoch_heuristic_counter is used to determine when a epoch might be aged
+ /** epoch_heuristic_counter is used to determine when an epoch might be aged
* & an expensive scan should be done. epoch_heuristic_counter is
* decremented on insert and reset to the new number of inserts which would
* cause the epoch to reach epoch_size when it reaches zero.
@@ -184,7 +184,7 @@ private:
uint32_t epoch_heuristic_counter;
/** epoch_size is set to be the number of elements supposed to be in a
- * epoch. When the number of non-erased elements in a epoch
+ * epoch. When the number of non-erased elements in an epoch
* exceeds epoch_size, a new epoch should be started and all
* current entries demoted. epoch_size is set to be 45% of size because
* we want to keep load around 90%, and we support 3 epochs at once --
@@ -206,6 +206,37 @@ private:
/** compute_hashes is convenience for not having to write out this
* expression everywhere we use the hash values of an Element.
*
+ * We need to map the 32-bit input hash onto a hash bucket in a range [0, size) in a
+ * manner which preserves as much of the hash's uniformity as possible. Ideally
+ * this would be done by bitmasking but the size is usually not a power of two.
+ *
+ * The naive approach would be to use a mod -- which isn't perfectly uniform but so
+ * long as the hash is much larger than size it is not that bad. Unfortunately,
+ * mod/division is fairly slow on ordinary microprocessors (e.g. 90-ish cycles on
+ * haswell, ARM doesn't even have an instruction for it.); when the divisor is a
+ * constant the compiler will do clever tricks to turn it into a multiply+add+shift,
+ * but size is a run-time value so the compiler can't do that here.
+ *
+ * One option would be to implement the same trick the compiler uses and compute the
+ * constants for exact division based on the size, as described in "{N}-bit Unsigned
+ * Division via {N}-bit Multiply-Add" by Arch D. Robison in 2005. But that code is
+ * somewhat complicated and the result is still slower than other options:
+ *
+ * Instead we treat the 32-bit random number as a Q32 fixed-point number in the range
+ * [0,1) and simply multiply it by the size. Then we just shift the result down by
+ * 32-bits to get our bucket number. The results has non-uniformity the same as a
+ * mod, but it is much faster to compute. More about this technique can be found at
+ * http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
+ *
+ * The resulting non-uniformity is also more equally distributed which would be
+ * advantageous for something like linear probing, though it shouldn't matter
+ * one way or the other for a cuckoo table.
+ *
+ * The primary disadvantage of this approach is increased intermediate precision is
+ * required but for a 32-bit random number we only need the high 32 bits of a
+ * 32*32->64 multiply, which means the operation is reasonably fast even on a
+ * typical 32-bit processor.
+ *
* @param e the element whose hashes will be returned
* @returns std::array<uint32_t, 8> of deterministic hashes derived from e
*/
diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp
index 3d2098c059..ba9e21cc1f 100644
--- a/src/dbwrapper.cpp
+++ b/src/dbwrapper.cpp
@@ -108,7 +108,7 @@ CDBWrapper::CDBWrapper(const fs::path& path, size_t nCacheSize, bool fMemory, bo
leveldb::Status result = leveldb::DestroyDB(path.string(), options);
dbwrapper_private::HandleError(result);
}
- TryCreateDirectory(path);
+ TryCreateDirectories(path);
LogPrintf("Opening LevelDB in %s\n", path.string());
}
leveldb::Status status = leveldb::DB::Open(options, path.string(), &pdb);
@@ -209,4 +209,4 @@ const std::vector<unsigned char>& GetObfuscateKey(const CDBWrapper &w)
return w.obfuscate_key;
}
-};
+} // namespace dbwrapper_private
diff --git a/src/httprpc.cpp b/src/httprpc.cpp
index 39da26afe1..a207d5ece4 100644
--- a/src/httprpc.cpp
+++ b/src/httprpc.cpp
@@ -46,11 +46,11 @@ public:
HTTPRPCTimerInterface(struct event_base* _base) : base(_base)
{
}
- const char* Name()
+ const char* Name() override
{
return "HTTP";
}
- RPCTimerBase* NewTimer(std::function<void(void)>& func, int64_t millis)
+ RPCTimerBase* NewTimer(std::function<void(void)>& func, int64_t millis) override
{
return new HTTPRPCTimer(base, func, millis);
}
@@ -91,35 +91,32 @@ static bool multiUserAuthorized(std::string strUserPass)
std::string strUser = strUserPass.substr(0, strUserPass.find(":"));
std::string strPass = strUserPass.substr(strUserPass.find(":") + 1);
- if (gArgs.IsArgSet("-rpcauth")) {
+ for (const std::string& strRPCAuth : gArgs.GetArgs("-rpcauth")) {
//Search for multi-user login/pass "rpcauth" from config
- for (std::string strRPCAuth : gArgs.GetArgs("-rpcauth"))
- {
- std::vector<std::string> vFields;
- boost::split(vFields, strRPCAuth, boost::is_any_of(":$"));
- if (vFields.size() != 3) {
- //Incorrect formatting in config file
- continue;
- }
-
- std::string strName = vFields[0];
- if (!TimingResistantEqual(strName, strUser)) {
- continue;
- }
-
- std::string strSalt = vFields[1];
- std::string strHash = vFields[2];
-
- static const unsigned int KEY_SIZE = 32;
- unsigned char out[KEY_SIZE];
-
- CHMAC_SHA256(reinterpret_cast<const unsigned char*>(strSalt.c_str()), strSalt.size()).Write(reinterpret_cast<const unsigned char*>(strPass.c_str()), strPass.size()).Finalize(out);
- std::vector<unsigned char> hexvec(out, out+KEY_SIZE);
- std::string strHashFromPass = HexStr(hexvec);
-
- if (TimingResistantEqual(strHashFromPass, strHash)) {
- return true;
- }
+ std::vector<std::string> vFields;
+ boost::split(vFields, strRPCAuth, boost::is_any_of(":$"));
+ if (vFields.size() != 3) {
+ //Incorrect formatting in config file
+ continue;
+ }
+
+ std::string strName = vFields[0];
+ if (!TimingResistantEqual(strName, strUser)) {
+ continue;
+ }
+
+ std::string strSalt = vFields[1];
+ std::string strHash = vFields[2];
+
+ static const unsigned int KEY_SIZE = 32;
+ unsigned char out[KEY_SIZE];
+
+ CHMAC_SHA256(reinterpret_cast<const unsigned char*>(strSalt.c_str()), strSalt.size()).Write(reinterpret_cast<const unsigned char*>(strPass.c_str()), strPass.size()).Finalize(out);
+ std::vector<unsigned char> hexvec(out, out+KEY_SIZE);
+ std::string strHashFromPass = HexStr(hexvec);
+
+ if (TimingResistantEqual(strHashFromPass, strHash)) {
+ return true;
}
}
return false;
diff --git a/src/httpserver.cpp b/src/httpserver.cpp
index 0d1cba3fd2..1c53d8d49d 100644
--- a/src/httpserver.cpp
+++ b/src/httpserver.cpp
@@ -21,13 +21,13 @@
#include <signal.h>
#include <future>
-#include <event2/event.h>
-#include <event2/http.h>
#include <event2/thread.h>
#include <event2/buffer.h>
#include <event2/util.h>
#include <event2/keyvalq_struct.h>
+#include "support/events.h"
+
#ifdef EVENT__HAVE_NETINET_IN_H
#include <netinet/in.h>
#ifdef _XOPEN_SOURCE_EXTENDED
@@ -46,7 +46,7 @@ public:
req(std::move(_req)), path(_path), func(_func)
{
}
- void operator()()
+ void operator()() override
{
func(req.get(), path);
}
@@ -196,18 +196,16 @@ static bool InitHTTPAllowList()
LookupHost("::1", localv6, false);
rpc_allow_subnets.push_back(CSubNet(localv4, 8)); // always allow IPv4 local subnet
rpc_allow_subnets.push_back(CSubNet(localv6)); // always allow IPv6 localhost
- if (gArgs.IsArgSet("-rpcallowip")) {
- for (const std::string& strAllow : gArgs.GetArgs("-rpcallowip")) {
- CSubNet subnet;
- LookupSubNet(strAllow.c_str(), subnet);
- if (!subnet.IsValid()) {
- uiInterface.ThreadSafeMessageBox(
- strprintf("Invalid -rpcallowip subnet specification: %s. Valid are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24).", strAllow),
- "", CClientUIInterface::MSG_ERROR);
- return false;
- }
- rpc_allow_subnets.push_back(subnet);
+ for (const std::string& strAllow : gArgs.GetArgs("-rpcallowip")) {
+ CSubNet subnet;
+ LookupSubNet(strAllow.c_str(), subnet);
+ if (!subnet.IsValid()) {
+ uiInterface.ThreadSafeMessageBox(
+ strprintf("Invalid -rpcallowip subnet specification: %s. Valid are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24).", strAllow),
+ "", CClientUIInterface::MSG_ERROR);
+ return false;
}
+ rpc_allow_subnets.push_back(subnet);
}
std::string strAllowed;
for (const CSubNet& subnet : rpc_allow_subnets)
@@ -367,9 +365,6 @@ static void libevent_log_cb(int severity, const char *msg)
bool InitHTTPServer()
{
- struct evhttp* http = 0;
- struct event_base* base = 0;
-
if (!InitHTTPAllowList())
return false;
@@ -395,17 +390,13 @@ bool InitHTTPServer()
evthread_use_pthreads();
#endif
- base = event_base_new(); // XXX RAII
- if (!base) {
- LogPrintf("Couldn't create an event_base: exiting\n");
- return false;
- }
+ raii_event_base base_ctr = obtain_event_base();
/* Create a new evhttp object to handle requests. */
- http = evhttp_new(base); // XXX RAII
+ raii_evhttp http_ctr = obtain_evhttp(base_ctr.get());
+ struct evhttp* http = http_ctr.get();
if (!http) {
LogPrintf("couldn't create evhttp. Exiting.\n");
- event_base_free(base);
return false;
}
@@ -416,8 +407,6 @@ bool InitHTTPServer()
if (!HTTPBindAddresses(http)) {
LogPrintf("Unable to bind any endpoint for RPC server\n");
- evhttp_free(http);
- event_base_free(base);
return false;
}
@@ -426,8 +415,9 @@ bool InitHTTPServer()
LogPrintf("HTTP: creating work queue of depth %d\n", workQueueDepth);
workQueue = new WorkQueue<HTTPClosure>(workQueueDepth);
- eventBase = base;
- eventHTTP = http;
+ // tranfer ownership to eventBase/HTTP via .release()
+ eventBase = base_ctr.release();
+ eventHTTP = http_ctr.release();
return true;
}
diff --git a/src/httpserver.h b/src/httpserver.h
index 6be9950682..9df56e5fc5 100644
--- a/src/httpserver.h
+++ b/src/httpserver.h
@@ -86,7 +86,7 @@ public:
/**
* Get the request header specified by hdr, or an empty string.
- * Return an pair (isPresent,string).
+ * Return a pair (isPresent,string).
*/
std::pair<bool, std::string> GetHeader(const std::string& hdr);
@@ -125,7 +125,7 @@ public:
virtual ~HTTPClosure() {}
};
-/** Event class. This can be used either as an cross-thread trigger or as a timer.
+/** Event class. This can be used either as a cross-thread trigger or as a timer.
*/
class HTTPEvent
{
diff --git a/src/init.cpp b/src/init.cpp
index b6e4cd06f6..672ef77e80 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -88,14 +88,6 @@ static CZMQNotificationInterface* pzmqNotificationInterface = NULL;
#define MIN_CORE_FILEDESCRIPTORS 150
#endif
-/** Used to pass flags to the Bind() function */
-enum BindFlags {
- BF_NONE = 0,
- BF_EXPLICIT = (1U << 0),
- BF_REPORT_ERROR = (1U << 1),
- BF_WHITELIST = (1U << 2),
-};
-
static const char* FEE_ESTIMATES_FILENAME="fee_estimates.dat";
//////////////////////////////////////////////////////////////////////////////
@@ -296,17 +288,6 @@ static void registerSignalHandler(int signal, void(*handler)(int))
}
#endif
-bool static Bind(CConnman& connman, const CService &addr, unsigned int flags) {
- if (!(flags & BF_EXPLICIT) && IsLimited(addr))
- return false;
- std::string strError;
- if (!connman.BindListenPort(addr, strError, (flags & BF_WHITELIST) != 0)) {
- if (flags & BF_REPORT_ERROR)
- return InitError(strError);
- return false;
- }
- return true;
-}
void OnRPCStarted()
{
uiInterface.NotifyBlockTip.connect(&RPCNotifyBlockChange);
@@ -355,6 +336,9 @@ std::string HelpMessage(HelpMessageMode mode)
#endif
}
strUsage += HelpMessageOpt("-datadir=<dir>", _("Specify data directory"));
+ if (showDebug) {
+ strUsage += HelpMessageOpt("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize));
+ }
strUsage += HelpMessageOpt("-dbcache=<n>", strprintf(_("Set database cache size in megabytes (%d to %d, default: %d)"), nMinDbCache, nMaxDbCache, nDefaultDbCache));
if (showDebug)
strUsage += HelpMessageOpt("-feefilter", strprintf("Tell other nodes to filter invs to us by our mempool min fee (default: %u)", DEFAULT_FEEFILTER));
@@ -463,7 +447,7 @@ std::string HelpMessage(HelpMessageMode mode)
{
strUsage += HelpMessageOpt("-logtimemicros", strprintf("Add microsecond precision to debug timestamps (default: %u)", DEFAULT_LOGTIMEMICROS));
strUsage += HelpMessageOpt("-mocktime=<n>", "Replace actual time with <n> seconds since epoch (default: 0)");
- strUsage += HelpMessageOpt("-maxsigcachesize=<n>", strprintf("Limit size of signature cache to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE));
+ strUsage += HelpMessageOpt("-maxsigcachesize=<n>", strprintf("Limit sum of signature cache and script execution cache sizes to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE));
strUsage += HelpMessageOpt("-maxtipage=<n>", strprintf("Maximum tip age in seconds to consider node in initial block download (default: %u)", DEFAULT_MAX_TIP_AGE));
}
strUsage += HelpMessageOpt("-maxtxfee=<amt>", strprintf(_("Maximum total fees (in %s) to use in a single wallet transaction or raw transaction; setting this too low may abort large transactions (default: %s)"),
@@ -822,7 +806,7 @@ int nUserMaxConnections;
int nFD;
ServiceFlags nLocalServices = NODE_NETWORK;
-}
+} // namespace
[[noreturn]] static void new_handler_terminate()
{
@@ -844,8 +828,6 @@ bool AppInitBasicSetup()
// Turn off Microsoft heap dump noise
_CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_WARN, CreateFileA("NUL", GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, 0));
-#endif
-#if _MSC_VER >= 1400
// Disable confusing "helpful" text message on abort, Ctrl-C
_set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT);
#endif
@@ -900,10 +882,14 @@ bool AppInitParameterInteraction()
return InitError(_("Prune mode is incompatible with -txindex."));
}
+ // -bind and -whitebind can't be set when not listening
+ size_t nUserBind = gArgs.GetArgs("-bind").size() + gArgs.GetArgs("-whitebind").size();
+ if (nUserBind != 0 && !gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) {
+ return InitError("Cannot set -bind or -whitebind together with -listen=0");
+ }
+
// Make sure enough file descriptors are available
- int nBind = std::max(
- (gArgs.IsArgSet("-bind") ? gArgs.GetArgs("-bind").size() : 0) +
- (gArgs.IsArgSet("-whitebind") ? gArgs.GetArgs("-whitebind").size() : 0), size_t(1));
+ int nBind = std::max(nUserBind, size_t(1));
nUserMaxConnections = GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);
nMaxConnections = std::max(nUserMaxConnections, 0);
@@ -935,15 +921,13 @@ bool AppInitParameterInteraction()
}
// Now remove the logging categories which were explicitly excluded
- if (gArgs.IsArgSet("-debugexclude")) {
- for (const std::string& cat : gArgs.GetArgs("-debugexclude")) {
- uint32_t flag = 0;
- if (!GetLogCategory(&flag, &cat)) {
- InitWarning(strprintf(_("Unsupported logging category %s=%s."), "-debugexclude", cat));
- continue;
- }
- logCategories &= ~flag;
+ for (const std::string& cat : gArgs.GetArgs("-debugexclude")) {
+ uint32_t flag = 0;
+ if (!GetLogCategory(&flag, &cat)) {
+ InitWarning(strprintf(_("Unsupported logging category %s=%s."), "-debugexclude", cat));
+ continue;
}
+ logCategories &= ~flag;
}
// Check for -debugnet
@@ -1030,14 +1014,7 @@ bool AppInitParameterInteraction()
if (nConnectTimeout <= 0)
nConnectTimeout = DEFAULT_CONNECT_TIMEOUT;
- // Fee-per-kilobyte amount required for mempool acceptance and relay
- // If you are mining, be careful setting this:
- // if you set it to zero then
- // a transaction spammer can cheaply fill blocks using
- // 0-fee transactions. It should be set above the real
- // cost to you of processing a transaction.
- if (IsArgSet("-minrelaytxfee"))
- {
+ if (IsArgSet("-minrelaytxfee")) {
CAmount n = 0;
if (!ParseMoney(GetArg("-minrelaytxfee", ""), n)) {
return InitError(AmountErrMsg("minrelaytxfee", GetArg("-minrelaytxfee", "")));
@@ -1170,6 +1147,7 @@ bool AppInitSanityChecks()
// ********************************************************* Step 4: sanity checks
// Initialize elliptic curve code
+ RandomInit();
ECC_Start();
globalVerifyHandle.reset(new ECCVerifyHandle());
@@ -1213,6 +1191,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
LogPrintf("Using at most %i automatic connections (%i file descriptors available)\n", nMaxConnections, nFD);
InitSignatureCache();
+ InitScriptExecutionCache();
LogPrintf("Using %u threads for script verification\n", nScriptCheckThreads);
if (nScriptCheckThreads) {
@@ -1259,13 +1238,10 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
// sanitize comments per BIP-0014, format user agent and check total size
std::vector<std::string> uacomments;
- if (gArgs.IsArgSet("-uacomment")) {
- for (std::string cmt : gArgs.GetArgs("-uacomment"))
- {
- if (cmt != SanitizeString(cmt, SAFE_CHARS_UA_COMMENT))
- return InitError(strprintf(_("User Agent comment (%s) contains unsafe characters."), cmt));
- uacomments.push_back(cmt);
- }
+ for (const std::string& cmt : gArgs.GetArgs("-uacomment")) {
+ if (cmt != SanitizeString(cmt, SAFE_CHARS_UA_COMMENT))
+ return InitError(strprintf(_("User Agent comment (%s) contains unsafe characters."), cmt));
+ uacomments.push_back(cmt);
}
strSubVersion = FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, uacomments);
if (strSubVersion.size() > MAX_SUBVERSION_LENGTH) {
@@ -1288,16 +1264,6 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
}
}
- if (gArgs.IsArgSet("-whitelist")) {
- for (const std::string& net : gArgs.GetArgs("-whitelist")) {
- CSubNet subnet;
- LookupSubNet(net.c_str(), subnet);
- if (!subnet.IsValid())
- return InitError(strprintf(_("Invalid netmask specified in -whitelist: '%s'"), net));
- connman.AddWhitelistedRange(subnet);
- }
- }
-
// Check for host lookup allowed before parsing any network related parameters
fNameLookup = GetBoolArg("-dns", DEFAULT_NAME_LOOKUP);
@@ -1348,44 +1314,12 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
fDiscover = GetBoolArg("-discover", true);
fRelayTxes = !GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY);
- if (fListen) {
- bool fBound = false;
- if (gArgs.IsArgSet("-bind")) {
- for (const std::string& strBind : gArgs.GetArgs("-bind")) {
- CService addrBind;
- if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false))
- return InitError(ResolveErrMsg("bind", strBind));
- fBound |= Bind(connman, addrBind, (BF_EXPLICIT | BF_REPORT_ERROR));
- }
- }
- if (gArgs.IsArgSet("-whitebind")) {
- for (const std::string& strBind : gArgs.GetArgs("-whitebind")) {
- CService addrBind;
- if (!Lookup(strBind.c_str(), addrBind, 0, false))
- return InitError(ResolveErrMsg("whitebind", strBind));
- if (addrBind.GetPort() == 0)
- return InitError(strprintf(_("Need to specify a port with -whitebind: '%s'"), strBind));
- fBound |= Bind(connman, addrBind, (BF_EXPLICIT | BF_REPORT_ERROR | BF_WHITELIST));
- }
- }
- if (!gArgs.IsArgSet("-bind") && !gArgs.IsArgSet("-whitebind")) {
- struct in_addr inaddr_any;
- inaddr_any.s_addr = INADDR_ANY;
- fBound |= Bind(connman, CService(in6addr_any, GetListenPort()), BF_NONE);
- fBound |= Bind(connman, CService(inaddr_any, GetListenPort()), !fBound ? BF_REPORT_ERROR : BF_NONE);
- }
- if (!fBound)
- return InitError(_("Failed to listen on any port. Use -listen=0 if you want this."));
- }
-
- if (gArgs.IsArgSet("-externalip")) {
- for (const std::string& strAddr : gArgs.GetArgs("-externalip")) {
- CService addrLocal;
- if (Lookup(strAddr.c_str(), addrLocal, GetListenPort(), fNameLookup) && addrLocal.IsValid())
- AddLocal(addrLocal, LOCAL_MANUAL);
- else
- return InitError(ResolveErrMsg("externalip", strAddr));
- }
+ for (const std::string& strAddr : gArgs.GetArgs("-externalip")) {
+ CService addrLocal;
+ if (Lookup(strAddr.c_str(), addrLocal, GetListenPort(), fNameLookup) && addrLocal.IsValid())
+ AddLocal(addrLocal, LOCAL_MANUAL);
+ else
+ return InitError(ResolveErrMsg("externalip", strAddr));
}
#if ENABLE_ZMQ
@@ -1407,8 +1341,6 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
fReindex = GetBoolArg("-reindex", false);
bool fReindexChainState = GetBoolArg("-reindex-chainstate", false);
- fs::create_directories(GetDataDir() / "blocks");
-
// cache size calculations
int64_t nTotalCache = (GetArg("-dbcache", nDefaultDbCache) << 20);
nTotalCache = std::max(nTotalCache, nMinDbCache << 20); // total cache cannot be less than nMinDbCache
@@ -1427,7 +1359,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
LogPrintf("* Using %.1fMiB for in-memory UTXO set (plus up to %.1fMiB of unused mempool space)\n", nCoinCacheUsage * (1.0 / 1024 / 1024), nMempoolSizeMax * (1.0 / 1024 / 1024));
bool fLoaded = false;
- while (!fLoaded) {
+ while (!fLoaded && !fRequestShutdown) {
bool fReset = fReindex;
std::string strLoadError;
@@ -1445,7 +1377,6 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
pblocktree = new CBlockTreeDB(nBlockTreeDBCache, false, fReindex);
pcoinsdbview = new CCoinsViewDB(nCoinDBCache, false, fReindex || fReindexChainState);
pcoinscatcher = new CCoinsViewErrorCatcher(pcoinsdbview);
- pcoinsTip = new CCoinsViewCache(pcoinscatcher);
if (fReindex) {
pblocktree->WriteReindexing(true);
@@ -1459,6 +1390,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
break;
}
}
+ if (fRequestShutdown) break;
if (!LoadBlockIndex(chainparams)) {
strLoadError = _("Error loading block database");
@@ -1489,6 +1421,13 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
break;
}
+ if (!ReplayBlocks(chainparams, pcoinsdbview)) {
+ strLoadError = _("Unable to replay blocks. You will need to rebuild the database using -reindex-chainstate.");
+ break;
+ }
+ pcoinsTip = new CCoinsViewCache(pcoinscatcher);
+ LoadChainTip(chainparams);
+
if (!fReindex && chainActive.Tip() != NULL) {
uiInterface.InitMessage(_("Rewinding blocks..."));
if (!RewindBlockIndex(chainparams)) {
@@ -1529,7 +1468,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
fLoaded = true;
} while(false);
- if (!fLoaded) {
+ if (!fLoaded && !fRequestShutdown) {
// first suggest a reindex
if (!fReset) {
bool fRet = uiInterface.ThreadSafeQuestion(
@@ -1616,10 +1555,8 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
uiInterface.NotifyBlockTip.connect(BlockNotifyCallback);
std::vector<fs::path> vImportFiles;
- if (gArgs.IsArgSet("-loadblock"))
- {
- for (const std::string& strFile : gArgs.GetArgs("-loadblock"))
- vImportFiles.push_back(strFile);
+ for (const std::string& strFile : gArgs.GetArgs("-loadblock")) {
+ vImportFiles.push_back(strFile);
}
threadGroup.create_thread(boost::bind(&ThreadImport, vImportFiles));
@@ -1646,7 +1583,6 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
// Map ports with UPnP
MapPort(GetBoolArg("-upnp", DEFAULT_UPNP));
- std::string strNodeError;
CConnman::Options connOptions;
connOptions.nLocalServices = nLocalServices;
connOptions.nRelevantServices = nRelevantServices;
@@ -1662,12 +1598,39 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
connOptions.nMaxOutboundTimeframe = nMaxOutboundTimeframe;
connOptions.nMaxOutboundLimit = nMaxOutboundLimit;
+ for (const std::string& strBind : gArgs.GetArgs("-bind")) {
+ CService addrBind;
+ if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false)) {
+ return InitError(ResolveErrMsg("bind", strBind));
+ }
+ connOptions.vBinds.push_back(addrBind);
+ }
+ for (const std::string& strBind : gArgs.GetArgs("-whitebind")) {
+ CService addrBind;
+ if (!Lookup(strBind.c_str(), addrBind, 0, false)) {
+ return InitError(ResolveErrMsg("whitebind", strBind));
+ }
+ if (addrBind.GetPort() == 0) {
+ return InitError(strprintf(_("Need to specify a port with -whitebind: '%s'"), strBind));
+ }
+ connOptions.vWhiteBinds.push_back(addrBind);
+ }
+
+ for (const auto& net : gArgs.GetArgs("-whitelist")) {
+ CSubNet subnet;
+ LookupSubNet(net.c_str(), subnet);
+ if (!subnet.IsValid())
+ return InitError(strprintf(_("Invalid netmask specified in -whitelist: '%s'"), net));
+ connOptions.vWhitelistedRange.push_back(subnet);
+ }
+
if (gArgs.IsArgSet("-seednode")) {
connOptions.vSeedNodes = gArgs.GetArgs("-seednode");
}
- if (!connman.Start(scheduler, strNodeError, connOptions))
- return InitError(strNodeError);
+ if (!connman.Start(scheduler, connOptions)) {
+ return false;
+ }
// ********************************************************* Step 12: finished
diff --git a/src/keystore.h b/src/keystore.h
index a2621f2de4..965ae0c79a 100644
--- a/src/keystore.h
+++ b/src/keystore.h
@@ -60,9 +60,9 @@ protected:
WatchOnlySet setWatchOnly;
public:
- bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey);
- bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const;
- bool HaveKey(const CKeyID &address) const
+ bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override;
+ bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const override;
+ bool HaveKey(const CKeyID &address) const override
{
bool result;
{
@@ -71,7 +71,7 @@ public:
}
return result;
}
- void GetKeys(std::set<CKeyID> &setAddress) const
+ void GetKeys(std::set<CKeyID> &setAddress) const override
{
setAddress.clear();
{
@@ -84,7 +84,7 @@ public:
}
}
}
- bool GetKey(const CKeyID &address, CKey &keyOut) const
+ bool GetKey(const CKeyID &address, CKey &keyOut) const override
{
{
LOCK(cs_KeyStore);
@@ -97,14 +97,14 @@ public:
}
return false;
}
- virtual bool AddCScript(const CScript& redeemScript);
- virtual bool HaveCScript(const CScriptID &hash) const;
- virtual bool GetCScript(const CScriptID &hash, CScript& redeemScriptOut) const;
+ virtual bool AddCScript(const CScript& redeemScript) override;
+ virtual bool HaveCScript(const CScriptID &hash) const override;
+ virtual bool GetCScript(const CScriptID &hash, CScript& redeemScriptOut) const override;
- virtual bool AddWatchOnly(const CScript &dest);
- virtual bool RemoveWatchOnly(const CScript &dest);
- virtual bool HaveWatchOnly(const CScript &dest) const;
- virtual bool HaveWatchOnly() const;
+ virtual bool AddWatchOnly(const CScript &dest) override;
+ virtual bool RemoveWatchOnly(const CScript &dest) override;
+ virtual bool HaveWatchOnly(const CScript &dest) const override;
+ virtual bool HaveWatchOnly() const override;
};
typedef std::vector<unsigned char, secure_allocator<unsigned char> > CKeyingMaterial;
diff --git a/src/leveldb/db/version_set.h b/src/leveldb/db/version_set.h
index c4e7ac360b..7935a965a7 100644
--- a/src/leveldb/db/version_set.h
+++ b/src/leveldb/db/version_set.h
@@ -376,7 +376,7 @@ class Compaction {
// Each compaction reads inputs from "level_" and "level_+1"
std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs
- // State used to check for number of of overlapping grandparent files
+ // State used to check for number of overlapping grandparent files
// (parent == level_ + 1, grandparent == level_ + 2)
std::vector<FileMetaData*> grandparents_;
size_t grandparent_index_; // Index in grandparent_starts_
diff --git a/src/net.cpp b/src/net.cpp
index 73f020273b..301cf58b87 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -64,6 +64,14 @@
#endif
#endif
+/** Used to pass flags to the Bind() function */
+enum BindFlags {
+ BF_NONE = 0,
+ BF_EXPLICIT = (1U << 0),
+ BF_REPORT_ERROR = (1U << 1),
+ BF_WHITELIST = (1U << 2),
+};
+
const static std::string NET_MESSAGE_COMMAND_OTHER = "*other*";
static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL; // SHA256("netgroup")[0:8]
@@ -240,7 +248,7 @@ bool RemoveLocal(const CService& addr)
/** Make a particular network entirely off-limits (no automatic connects to it) */
void SetLimited(enum Network net, bool fLimited)
{
- if (net == NET_UNROUTABLE)
+ if (net == NET_UNROUTABLE || net == NET_INTERNAL)
return;
LOCK(cs_mapLocalHost);
vfLimited[net] = fLimited;
@@ -601,7 +609,6 @@ void CConnman::SetBannedSetDirty(bool dirty)
bool CConnman::IsWhitelistedRange(const CNetAddr &addr) {
- LOCK(cs_vWhitelistedRange);
for (const CSubNet& subnet : vWhitelistedRange) {
if (subnet.Match(addr))
return true;
@@ -609,12 +616,6 @@ bool CConnman::IsWhitelistedRange(const CNetAddr &addr) {
return false;
}
-void CConnman::AddWhitelistedRange(const CSubNet &subnet) {
- LOCK(cs_vWhitelistedRange);
- vWhitelistedRange.push_back(subnet);
-}
-
-
std::string CNode::GetAddrName() const {
LOCK(cs_addrName);
return addrName;
@@ -1604,7 +1605,12 @@ void CConnman::ThreadDNSAddressSeed()
std::vector<CNetAddr> vIPs;
std::vector<CAddress> vAdd;
ServiceFlags requiredServiceBits = nRelevantServices;
- if (LookupHost(GetDNSHost(seed, &requiredServiceBits).c_str(), vIPs, 0, true))
+ std::string host = GetDNSHost(seed, &requiredServiceBits);
+ CNetAddr resolveSource;
+ if (!resolveSource.SetInternal(host)) {
+ continue;
+ }
+ if (LookupHost(host.c_str(), vIPs, 0, true))
{
for (const CNetAddr& ip : vIPs)
{
@@ -1614,18 +1620,7 @@ void CConnman::ThreadDNSAddressSeed()
vAdd.push_back(addr);
found++;
}
- }
- if (interruptNet) {
- return;
- }
- // TODO: The seed name resolve may fail, yielding an IP of [::], which results in
- // addrman assigning the same source to results from different seeds.
- // This should switch to a hard-coded stable dummy IP for each seed name, so that the
- // resolve is not required at all.
- if (!vIPs.empty()) {
- CService seedSource;
- Lookup(seed.name.c_str(), seedSource, 0, true);
- addrman.Add(vAdd, seedSource);
+ addrman.Add(vAdd, resolveSource);
}
}
}
@@ -1682,7 +1677,7 @@ void CConnman::ProcessOneShot()
void CConnman::ThreadOpenConnections()
{
// Connect to specific addresses
- if (gArgs.IsArgSet("-connect") && gArgs.GetArgs("-connect").size() > 0)
+ if (gArgs.IsArgSet("-connect"))
{
for (int64_t nLoop = 0;; nLoop++)
{
@@ -1724,7 +1719,7 @@ void CConnman::ThreadOpenConnections()
if (!done) {
LogPrintf("Adding fixed seed nodes as DNS doesn't seem to be available.\n");
CNetAddr local;
- LookupHost("127.0.0.1", local, false);
+ local.SetInternal("fixedseeds");
addrman.Add(convertSeed6(Params().FixedSeeds()), local);
done = true;
}
@@ -1908,8 +1903,7 @@ void CConnman::ThreadOpenAddedConnections()
{
{
LOCK(cs_vAddedNodes);
- if (gArgs.IsArgSet("-addnode"))
- vAddedNodes = gArgs.GetArgs("-addnode");
+ vAddedNodes = gArgs.GetArgs("-addnode");
}
while (true)
@@ -2226,7 +2220,38 @@ NodeId CConnman::GetNewNodeId()
return nLastNodeId.fetch_add(1, std::memory_order_relaxed);
}
-bool CConnman::Start(CScheduler& scheduler, std::string& strNodeError, Options connOptions)
+
+bool CConnman::Bind(const CService &addr, unsigned int flags) {
+ if (!(flags & BF_EXPLICIT) && IsLimited(addr))
+ return false;
+ std::string strError;
+ if (!BindListenPort(addr, strError, (flags & BF_WHITELIST) != 0)) {
+ if ((flags & BF_REPORT_ERROR) && clientInterface) {
+ clientInterface->ThreadSafeMessageBox(strError, "", CClientUIInterface::MSG_ERROR);
+ }
+ return false;
+ }
+ return true;
+}
+
+bool CConnman::InitBinds(const std::vector<CService>& binds, const std::vector<CService>& whiteBinds) {
+ bool fBound = false;
+ for (const auto& addrBind : binds) {
+ fBound |= Bind(addrBind, (BF_EXPLICIT | BF_REPORT_ERROR));
+ }
+ for (const auto& addrBind : whiteBinds) {
+ fBound |= Bind(addrBind, (BF_EXPLICIT | BF_REPORT_ERROR | BF_WHITELIST));
+ }
+ if (binds.empty() && whiteBinds.empty()) {
+ struct in_addr inaddr_any;
+ inaddr_any.s_addr = INADDR_ANY;
+ fBound |= Bind(CService(in6addr_any, GetListenPort()), BF_NONE);
+ fBound |= Bind(CService(inaddr_any, GetListenPort()), !fBound ? BF_REPORT_ERROR : BF_NONE);
+ }
+ return fBound;
+}
+
+bool CConnman::Start(CScheduler& scheduler, Options connOptions)
{
nTotalBytesRecv = 0;
nTotalBytesSent = 0;
@@ -2248,11 +2273,23 @@ bool CConnman::Start(CScheduler& scheduler, std::string& strNodeError, Options c
SetBestHeight(connOptions.nBestHeight);
+ clientInterface = connOptions.uiInterface;
+
+ if (fListen && !InitBinds(connOptions.vBinds, connOptions.vWhiteBinds)) {
+ if (clientInterface) {
+ clientInterface->ThreadSafeMessageBox(
+ _("Failed to listen on any port. Use -listen=0 if you want this."),
+ "", CClientUIInterface::MSG_ERROR);
+ }
+ return false;
+ }
+
+ vWhitelistedRange = connOptions.vWhitelistedRange;
+
for (const auto& strDest : connOptions.vSeedNodes) {
AddOneShot(strDest);
}
- clientInterface = connOptions.uiInterface;
if (clientInterface) {
clientInterface->InitMessage(_("Loading P2P addresses..."));
}
diff --git a/src/net.h b/src/net.h
index 704ef05634..b9a11c62f2 100644
--- a/src/net.h
+++ b/src/net.h
@@ -143,13 +143,14 @@ public:
uint64_t nMaxOutboundTimeframe = 0;
uint64_t nMaxOutboundLimit = 0;
std::vector<std::string> vSeedNodes;
+ std::vector<CSubNet> vWhitelistedRange;
+ std::vector<CService> vBinds, vWhiteBinds;
};
CConnman(uint64_t seed0, uint64_t seed1);
~CConnman();
- bool Start(CScheduler& scheduler, std::string& strNodeError, Options options);
+ bool Start(CScheduler& scheduler, Options options);
void Stop();
void Interrupt();
- bool BindListenPort(const CService &bindAddr, std::string& strError, bool fWhitelisted = false);
bool GetNetworkActive() const { return fNetworkActive; };
void SetNetworkActive(bool active);
bool OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = NULL, const char *strDest = NULL, bool fOneShot = false, bool fFeeler = false, bool fAddnode = false);
@@ -243,8 +244,6 @@ public:
unsigned int GetSendBufferSize() const;
- void AddWhitelistedRange(const CSubNet &subnet);
-
ServiceFlags GetLocalServices() const;
//!set the max outbound target in bytes
@@ -288,6 +287,9 @@ private:
ListenSocket(SOCKET socket_, bool whitelisted_) : socket(socket_), whitelisted(whitelisted_) {}
};
+ bool BindListenPort(const CService &bindAddr, std::string& strError, bool fWhitelisted = false);
+ bool Bind(const CService &addr, unsigned int flags);
+ bool InitBinds(const std::vector<CService>& binds, const std::vector<CService>& whiteBinds);
void ThreadOpenAddedConnections();
void AddOneShot(const std::string& strDest);
void ProcessOneShot();
@@ -345,7 +347,6 @@ private:
// Whitelisted ranges. Any node connecting from these is automatically
// whitelisted (as well as those connecting to whitelisted binds).
std::vector<CSubNet> vWhitelistedRange;
- CCriticalSection cs_vWhitelistedRange;
unsigned int nSendBufferMaxSize;
unsigned int nReceiveFloodSize;
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index a8ad41dbab..a743f04dd1 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -121,7 +121,7 @@ namespace {
MapRelay mapRelay;
/** Expiration-time ordered list of (expire time, relay map entry) pairs, protected by cs_main). */
std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration;
-} // anon namespace
+} // namespace
//////////////////////////////////////////////////////////////////////////////
//
@@ -343,7 +343,9 @@ bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex*
// Short-circuit most stuff in case its from the same node
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
- *pit = &itInFlight->second.second;
+ if (pit) {
+ *pit = &itInFlight->second.second;
+ }
return false;
}
@@ -451,25 +453,6 @@ bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
return false;
}
-/** Find the last common ancestor two blocks have.
- * Both pa and pb must be non-NULL. */
-const CBlockIndex* LastCommonAncestor(const CBlockIndex* pa, const CBlockIndex* pb) {
- if (pa->nHeight > pb->nHeight) {
- pa = pa->GetAncestor(pb->nHeight);
- } else if (pb->nHeight > pa->nHeight) {
- pb = pb->GetAncestor(pa->nHeight);
- }
-
- while (pa != pb && pa && pb) {
- pa = pa->pprev;
- pb = pb->pprev;
- }
-
- // Eventually all chain branches meet at the genesis block.
- assert(pa == pb);
- return pa;
-}
-
/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
* at most count entries. */
void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) {
@@ -558,7 +541,7 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<con
}
}
-} // anon namespace
+} // namespace
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
LOCK(cs_main);
diff --git a/src/netaddress.cpp b/src/netaddress.cpp
index 34a7029862..89f257c642 100644
--- a/src/netaddress.cpp
+++ b/src/netaddress.cpp
@@ -15,6 +15,9 @@
static const unsigned char pchIPv4[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff };
static const unsigned char pchOnionCat[] = {0xFD,0x87,0xD8,0x7E,0xEB,0x43};
+// 0xFD + sha256("bitcoin")[0:5]
+static const unsigned char g_internal_prefix[] = { 0xFD, 0x6B, 0x88, 0xC0, 0x87, 0x24 };
+
void CNetAddr::Init()
{
memset(ip, 0, sizeof(ip));
@@ -42,6 +45,18 @@ void CNetAddr::SetRaw(Network network, const uint8_t *ip_in)
}
}
+bool CNetAddr::SetInternal(const std::string &name)
+{
+ if (name.empty()) {
+ return false;
+ }
+ unsigned char hash[32] = {};
+ CSHA256().Write((const unsigned char*)name.data(), name.size()).Finalize(hash);
+ memcpy(ip, g_internal_prefix, sizeof(g_internal_prefix));
+ memcpy(ip + sizeof(g_internal_prefix), hash, sizeof(ip) - sizeof(g_internal_prefix));
+ return true;
+}
+
bool CNetAddr::SetSpecial(const std::string &strName)
{
if (strName.size()>6 && strName.substr(strName.size() - 6, 6) == ".onion") {
@@ -84,7 +99,7 @@ bool CNetAddr::IsIPv4() const
bool CNetAddr::IsIPv6() const
{
- return (!IsIPv4() && !IsTor());
+ return (!IsIPv4() && !IsTor() && !IsInternal());
}
bool CNetAddr::IsRFC1918() const
@@ -199,6 +214,9 @@ bool CNetAddr::IsValid() const
if (IsRFC3849())
return false;
+ if (IsInternal())
+ return false;
+
if (IsIPv4())
{
// INADDR_NONE
@@ -217,11 +235,19 @@ bool CNetAddr::IsValid() const
bool CNetAddr::IsRoutable() const
{
- return IsValid() && !(IsRFC1918() || IsRFC2544() || IsRFC3927() || IsRFC4862() || IsRFC6598() || IsRFC5737() || (IsRFC4193() && !IsTor()) || IsRFC4843() || IsLocal());
+ return IsValid() && !(IsRFC1918() || IsRFC2544() || IsRFC3927() || IsRFC4862() || IsRFC6598() || IsRFC5737() || (IsRFC4193() && !IsTor()) || IsRFC4843() || IsLocal() || IsInternal());
+}
+
+bool CNetAddr::IsInternal() const
+{
+ return memcmp(ip, g_internal_prefix, sizeof(g_internal_prefix)) == 0;
}
enum Network CNetAddr::GetNetwork() const
{
+ if (IsInternal())
+ return NET_INTERNAL;
+
if (!IsRoutable())
return NET_UNROUTABLE;
@@ -238,6 +264,8 @@ std::string CNetAddr::ToStringIP() const
{
if (IsTor())
return EncodeBase32(&ip[6], 10) + ".onion";
+ if (IsInternal())
+ return EncodeBase32(ip + sizeof(g_internal_prefix), sizeof(ip) - sizeof(g_internal_prefix)) + ".internal";
CService serv(*this, 0);
struct sockaddr_storage sockaddr;
socklen_t socklen = sizeof(sockaddr);
@@ -305,9 +333,15 @@ std::vector<unsigned char> CNetAddr::GetGroup() const
nClass = 255;
nBits = 0;
}
-
- // all unroutable addresses belong to the same group
- if (!IsRoutable())
+ // all internal-usage addresses get their own group
+ if (IsInternal())
+ {
+ nClass = NET_INTERNAL;
+ nStartByte = sizeof(g_internal_prefix);
+ nBits = (sizeof(ip) - sizeof(g_internal_prefix)) * 8;
+ }
+ // all other unroutable addresses belong to the same group
+ else if (!IsRoutable())
{
nClass = NET_UNROUTABLE;
nBits = 0;
@@ -393,7 +427,7 @@ int CNetAddr::GetReachabilityFrom(const CNetAddr *paddrPartner) const
REACH_PRIVATE
};
- if (!IsRoutable())
+ if (!IsRoutable() || IsInternal())
return REACH_UNREACHABLE;
int ourNet = GetExtNetwork(this);
@@ -552,7 +586,7 @@ std::string CService::ToStringPort() const
std::string CService::ToStringIPPort() const
{
- if (IsIPv4() || IsTor()) {
+ if (IsIPv4() || IsTor() || IsInternal()) {
return ToStringIP() + ":" + ToStringPort();
} else {
return "[" + ToStringIP() + "]:" + ToStringPort();
diff --git a/src/netaddress.h b/src/netaddress.h
index fbc4d1a65f..80716600d1 100644
--- a/src/netaddress.h
+++ b/src/netaddress.h
@@ -22,6 +22,7 @@ enum Network
NET_IPV4,
NET_IPV6,
NET_TOR,
+ NET_INTERNAL,
NET_MAX,
};
@@ -45,6 +46,12 @@ class CNetAddr
*/
void SetRaw(Network network, const uint8_t *data);
+ /**
+ * Transform an arbitrary string into a non-routable ipv6 address.
+ * Useful for mapping resolved addresses back to their source.
+ */
+ bool SetInternal(const std::string& name);
+
bool SetSpecial(const std::string &strName); // for Tor addresses
bool IsIPv4() const; // IPv4 mapped address (::FFFF:0:0/96, 0.0.0.0/0)
bool IsIPv6() const; // IPv6 address (not mapped IPv4, not Tor)
@@ -64,6 +71,7 @@ class CNetAddr
bool IsTor() const;
bool IsLocal() const;
bool IsRoutable() const;
+ bool IsInternal() const;
bool IsValid() const;
enum Network GetNetwork() const;
std::string ToString() const;
diff --git a/src/netbase.cpp b/src/netbase.cpp
index 32557dd179..a23f92e1ed 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -108,17 +108,22 @@ bool static LookupIntern(const char *pszName, std::vector<CNetAddr>& vIP, unsign
struct addrinfo *aiTrav = aiRes;
while (aiTrav != NULL && (nMaxSolutions == 0 || vIP.size() < nMaxSolutions))
{
+ CNetAddr resolved;
if (aiTrav->ai_family == AF_INET)
{
assert(aiTrav->ai_addrlen >= sizeof(sockaddr_in));
- vIP.push_back(CNetAddr(((struct sockaddr_in*)(aiTrav->ai_addr))->sin_addr));
+ resolved = CNetAddr(((struct sockaddr_in*)(aiTrav->ai_addr))->sin_addr);
}
if (aiTrav->ai_family == AF_INET6)
{
assert(aiTrav->ai_addrlen >= sizeof(sockaddr_in6));
struct sockaddr_in6* s6 = (struct sockaddr_in6*) aiTrav->ai_addr;
- vIP.push_back(CNetAddr(s6->sin6_addr, s6->sin6_scope_id));
+ resolved = CNetAddr(s6->sin6_addr, s6->sin6_scope_id);
+ }
+ /* Never allow resolving to an internal address. Consider any such result invalid */
+ if (!resolved.IsInternal()) {
+ vIP.push_back(resolved);
}
aiTrav = aiTrav->ai_next;
diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp
index 3c3a2fb651..771491770e 100644
--- a/src/policy/fees.cpp
+++ b/src/policy/fees.cpp
@@ -16,6 +16,26 @@
static constexpr double INF_FEERATE = 1e99;
+std::string StringForFeeReason(FeeReason reason) {
+ static const std::map<FeeReason, std::string> fee_reason_strings = {
+ {FeeReason::NONE, "None"},
+ {FeeReason::HALF_ESTIMATE, "Half Target 60% Threshold"},
+ {FeeReason::FULL_ESTIMATE, "Target 85% Threshold"},
+ {FeeReason::DOUBLE_ESTIMATE, "Double Target 95% Threshold"},
+ {FeeReason::CONSERVATIVE, "Conservative Double Target longer horizon"},
+ {FeeReason::MEMPOOL_MIN, "Mempool Min Fee"},
+ {FeeReason::PAYTXFEE, "PayTxFee set"},
+ {FeeReason::FALLBACK, "Fallback fee"},
+ {FeeReason::REQUIRED, "Minimum Required Fee"},
+ {FeeReason::MAXTXFEE, "MaxTxFee limit"}
+ };
+ auto reason_string = fee_reason_strings.find(reason);
+
+ if (reason_string == fee_reason_strings.end()) return "Unknown";
+
+ return reason_string->second;
+}
+
/**
* We will instantiate an instance of this class to track transactions that were
* included in a block. We will lump transactions into a bucket according to their
@@ -698,31 +718,36 @@ unsigned int CBlockPolicyEstimator::MaxUsableEstimate() const
* time horizon which tracks confirmations up to the desired target. If
* checkShorterHorizon is requested, also allow short time horizon estimates
* for a lower target to reduce the given answer */
-double CBlockPolicyEstimator::estimateCombinedFee(unsigned int confTarget, double successThreshold, bool checkShorterHorizon) const
+double CBlockPolicyEstimator::estimateCombinedFee(unsigned int confTarget, double successThreshold, bool checkShorterHorizon, EstimationResult *result) const
{
double estimate = -1;
if (confTarget >= 1 && confTarget <= longStats->GetMaxConfirms()) {
// Find estimate from shortest time horizon possible
if (confTarget <= shortStats->GetMaxConfirms()) { // short horizon
- estimate = shortStats->EstimateMedianVal(confTarget, SUFFICIENT_TXS_SHORT, successThreshold, true, nBestSeenHeight);
+ estimate = shortStats->EstimateMedianVal(confTarget, SUFFICIENT_TXS_SHORT, successThreshold, true, nBestSeenHeight, result);
}
else if (confTarget <= feeStats->GetMaxConfirms()) { // medium horizon
- estimate = feeStats->EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, successThreshold, true, nBestSeenHeight);
+ estimate = feeStats->EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, successThreshold, true, nBestSeenHeight, result);
}
else { // long horizon
- estimate = longStats->EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, successThreshold, true, nBestSeenHeight);
+ estimate = longStats->EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, successThreshold, true, nBestSeenHeight, result);
}
if (checkShorterHorizon) {
+ EstimationResult tempResult;
// If a lower confTarget from a more recent horizon returns a lower answer use it.
if (confTarget > feeStats->GetMaxConfirms()) {
- double medMax = feeStats->EstimateMedianVal(feeStats->GetMaxConfirms(), SUFFICIENT_FEETXS, successThreshold, true, nBestSeenHeight);
- if (medMax > 0 && (estimate == -1 || medMax < estimate))
+ double medMax = feeStats->EstimateMedianVal(feeStats->GetMaxConfirms(), SUFFICIENT_FEETXS, successThreshold, true, nBestSeenHeight, &tempResult);
+ if (medMax > 0 && (estimate == -1 || medMax < estimate)) {
estimate = medMax;
+ if (result) *result = tempResult;
+ }
}
if (confTarget > shortStats->GetMaxConfirms()) {
- double shortMax = shortStats->EstimateMedianVal(shortStats->GetMaxConfirms(), SUFFICIENT_TXS_SHORT, successThreshold, true, nBestSeenHeight);
- if (shortMax > 0 && (estimate == -1 || shortMax < estimate))
+ double shortMax = shortStats->EstimateMedianVal(shortStats->GetMaxConfirms(), SUFFICIENT_TXS_SHORT, successThreshold, true, nBestSeenHeight, &tempResult);
+ if (shortMax > 0 && (estimate == -1 || shortMax < estimate)) {
estimate = shortMax;
+ if (result) *result = tempResult;
+ }
}
}
}
@@ -732,16 +757,18 @@ double CBlockPolicyEstimator::estimateCombinedFee(unsigned int confTarget, doubl
/** Ensure that for a conservative estimate, the DOUBLE_SUCCESS_PCT is also met
* at 2 * target for any longer time horizons.
*/
-double CBlockPolicyEstimator::estimateConservativeFee(unsigned int doubleTarget) const
+double CBlockPolicyEstimator::estimateConservativeFee(unsigned int doubleTarget, EstimationResult *result) const
{
double estimate = -1;
+ EstimationResult tempResult;
if (doubleTarget <= shortStats->GetMaxConfirms()) {
- estimate = feeStats->EstimateMedianVal(doubleTarget, SUFFICIENT_FEETXS, DOUBLE_SUCCESS_PCT, true, nBestSeenHeight);
+ estimate = feeStats->EstimateMedianVal(doubleTarget, SUFFICIENT_FEETXS, DOUBLE_SUCCESS_PCT, true, nBestSeenHeight, result);
}
if (doubleTarget <= feeStats->GetMaxConfirms()) {
- double longEstimate = longStats->EstimateMedianVal(doubleTarget, SUFFICIENT_FEETXS, DOUBLE_SUCCESS_PCT, true, nBestSeenHeight);
+ double longEstimate = longStats->EstimateMedianVal(doubleTarget, SUFFICIENT_FEETXS, DOUBLE_SUCCESS_PCT, true, nBestSeenHeight, &tempResult);
if (longEstimate > estimate) {
estimate = longEstimate;
+ if (result) *result = tempResult;
}
}
return estimate;
@@ -754,12 +781,15 @@ double CBlockPolicyEstimator::estimateConservativeFee(unsigned int doubleTarget)
* estimates, however, required the 95% threshold at 2 * target be met for any
* longer time horizons also.
*/
-CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool, bool conservative) const
+CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, FeeCalculation *feeCalc, const CTxMemPool& pool, bool conservative) const
{
- if (answerFoundAtTarget)
- *answerFoundAtTarget = confTarget;
+ if (feeCalc) {
+ feeCalc->desiredTarget = confTarget;
+ feeCalc->returnedTarget = confTarget;
+ }
double median = -1;
+ EstimationResult tempResult;
{
LOCK(cs_feeEstimator);
@@ -780,7 +810,6 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoun
}
assert(confTarget > 0); //estimateCombinedFee and estimateConservativeFee take unsigned ints
-
/** true is passed to estimateCombined fee for target/2 and target so
* that we check the max confirms for shorter time horizons as well.
* This is necessary to preserve monotonically increasing estimates.
@@ -791,32 +820,49 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoun
* the purpose of conservative estimates is not to let short term
* fluctuations lower our estimates by too much.
*/
- double halfEst = estimateCombinedFee(confTarget/2, HALF_SUCCESS_PCT, true);
- double actualEst = estimateCombinedFee(confTarget, SUCCESS_PCT, true);
- double doubleEst = estimateCombinedFee(2 * confTarget, DOUBLE_SUCCESS_PCT, !conservative);
+ double halfEst = estimateCombinedFee(confTarget/2, HALF_SUCCESS_PCT, true, &tempResult);
+ if (feeCalc) {
+ feeCalc->est = tempResult;
+ feeCalc->reason = FeeReason::HALF_ESTIMATE;
+ }
median = halfEst;
+ double actualEst = estimateCombinedFee(confTarget, SUCCESS_PCT, true, &tempResult);
if (actualEst > median) {
median = actualEst;
+ if (feeCalc) {
+ feeCalc->est = tempResult;
+ feeCalc->reason = FeeReason::FULL_ESTIMATE;
+ }
}
+ double doubleEst = estimateCombinedFee(2 * confTarget, DOUBLE_SUCCESS_PCT, !conservative, &tempResult);
if (doubleEst > median) {
median = doubleEst;
+ if (feeCalc) {
+ feeCalc->est = tempResult;
+ feeCalc->reason = FeeReason::DOUBLE_ESTIMATE;
+ }
}
if (conservative || median == -1) {
- double consEst = estimateConservativeFee(2 * confTarget);
+ double consEst = estimateConservativeFee(2 * confTarget, &tempResult);
if (consEst > median) {
median = consEst;
+ if (feeCalc) {
+ feeCalc->est = tempResult;
+ feeCalc->reason = FeeReason::CONSERVATIVE;
+ }
}
}
} // Must unlock cs_feeEstimator before taking mempool locks
- if (answerFoundAtTarget)
- *answerFoundAtTarget = confTarget;
+ if (feeCalc) feeCalc->returnedTarget = confTarget;
// If mempool is limiting txs , return at least the min feerate from the mempool
CAmount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
- if (minPoolFee > 0 && minPoolFee > median)
+ if (minPoolFee > 0 && minPoolFee > median) {
+ if (feeCalc) feeCalc->reason = FeeReason::MEMPOOL_MIN;
return CFeeRate(minPoolFee);
+ }
if (median < 0)
return CFeeRate(0);
diff --git a/src/policy/fees.h b/src/policy/fees.h
index e99fec2c39..2029ce3744 100644
--- a/src/policy/fees.h
+++ b/src/policy/fees.h
@@ -48,7 +48,7 @@ class TxConfirmStats;
* in each bucket and the total amount of feerate paid in each bucket. Then we
* calculate how many blocks Y it took each transaction to be mined. We convert
* from a number of blocks to a number of periods Y' each encompassing "scale"
- * blocks. This is is tracked in 3 different data sets each up to a maximum
+ * blocks. This is tracked in 3 different data sets each up to a maximum
* number of periods. Within each data set we have an array of counters in each
* feerate bucket and we increment all the counters from Y' up to max periods
* representing that a tx was successfully confirmed in less than or equal to
@@ -74,6 +74,22 @@ enum FeeEstimateHorizon {
LONG_HALFLIFE = 2
};
+/* Enumeration of reason for returned fee estimate */
+enum class FeeReason {
+ NONE,
+ HALF_ESTIMATE,
+ FULL_ESTIMATE,
+ DOUBLE_ESTIMATE,
+ CONSERVATIVE,
+ MEMPOOL_MIN,
+ PAYTXFEE,
+ FALLBACK,
+ REQUIRED,
+ MAXTXFEE,
+};
+
+std::string StringForFeeReason(FeeReason reason);
+
/* Used to return detailed information about a feerate bucket */
struct EstimatorBucket
{
@@ -90,8 +106,16 @@ struct EstimationResult
{
EstimatorBucket pass;
EstimatorBucket fail;
- double decay;
- unsigned int scale;
+ double decay = 0;
+ unsigned int scale = 0;
+};
+
+struct FeeCalculation
+{
+ EstimationResult est;
+ FeeReason reason = FeeReason::NONE;
+ int desiredTarget = 0;
+ int returnedTarget = 0;
};
/**
@@ -173,7 +197,7 @@ public:
* the closest target where one can be given. 'conservative' estimates are
* valid over longer time horizons also.
*/
- CFeeRate estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool, bool conservative = true) const;
+ CFeeRate estimateSmartFee(int confTarget, FeeCalculation *feeCalc, const CTxMemPool& pool, bool conservative = true) const;
/** Return a specific fee estimate calculation with a given success
* threshold and time horizon, and optionally return detailed data about
@@ -223,9 +247,9 @@ private:
bool processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry* entry);
/** Helper for estimateSmartFee */
- double estimateCombinedFee(unsigned int confTarget, double successThreshold, bool checkShorterHorizon) const;
+ double estimateCombinedFee(unsigned int confTarget, double successThreshold, bool checkShorterHorizon, EstimationResult *result) const;
/** Helper for estimateSmartFee */
- double estimateConservativeFee(unsigned int doubleTarget) const;
+ double estimateConservativeFee(unsigned int doubleTarget, EstimationResult *result) const;
/** Number of blocks of data recorded while fee estimates have been running */
unsigned int BlockSpan() const;
/** Number of blocks of recorded fee estimate data represented in saved data file */
diff --git a/src/protocol.cpp b/src/protocol.cpp
index 6cd246ed53..da87e40091 100644
--- a/src/protocol.cpp
+++ b/src/protocol.cpp
@@ -39,7 +39,7 @@ const char *SENDCMPCT="sendcmpct";
const char *CMPCTBLOCK="cmpctblock";
const char *GETBLOCKTXN="getblocktxn";
const char *BLOCKTXN="blocktxn";
-};
+} // namespace NetMsgType
/** All known message types. Keep this in the same order as the list of
* messages above and in protocol.h.
diff --git a/src/pubkey.cpp b/src/pubkey.cpp
index e57fa238cb..a16457ea4e 100644
--- a/src/pubkey.cpp
+++ b/src/pubkey.cpp
@@ -11,7 +11,7 @@ namespace
{
/* Global secp256k1_context object used for verification. */
secp256k1_context* secp256k1_context_verify = NULL;
-}
+} // namespace
/** This function is taken from the libsecp256k1 distribution and implements
* DER parsing for ECDSA signatures, while supporting an arbitrary subset of
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index 6d8760c071..8a745cadce 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -578,6 +578,7 @@ int main(int argc, char *argv[])
// Need to pass name here as CAmount is a typedef (see http://qt-project.org/doc/qt-5/qmetatype.html#qRegisterMetaType)
// IMPORTANT if it is no longer a typedef use the normal variant above
qRegisterMetaType< CAmount >("CAmount");
+ qRegisterMetaType< std::function<void(void)> >("std::function<void(void)>");
/// 3. Application identification
// must be set before OptionsModel is initialized or translations are loaded,
diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp
index de00eacdb9..33f4535ee2 100644
--- a/src/qt/clientmodel.cpp
+++ b/src/qt/clientmodel.cpp
@@ -26,7 +26,6 @@
class CBlockIndex;
-static const int64_t nClientStartupTime = GetTime();
static int64_t nLastHeaderTipUpdateNotification = 0;
static int64_t nLastBlockTipUpdateNotification = 0;
@@ -238,7 +237,7 @@ bool ClientModel::isReleaseVersion() const
QString ClientModel::formatClientStartupTime() const
{
- return QDateTime::fromTime_t(nClientStartupTime).toString();
+ return QDateTime::fromTime_t(GetStartupTime()).toString();
}
QString ClientModel::dataDir() const
@@ -303,7 +302,7 @@ static void BlockTipChanged(ClientModel *clientmodel, bool initialSync, const CB
}
// if we are in-sync, update the UI regardless of last update time
if (!initialSync || now - nLastUpdateNotification > MODEL_UPDATE_DELAY) {
- //pass a async signal to the UI thread
+ //pass an async signal to the UI thread
QMetaObject::invokeMethod(clientmodel, "numBlocksChanged", Qt::QueuedConnection,
Q_ARG(int, pIndex->nHeight),
Q_ARG(QDateTime, QDateTime::fromTime_t(pIndex->GetBlockTime())),
diff --git a/src/qt/coincontroldialog.cpp b/src/qt/coincontroldialog.cpp
index 06b599f3e2..af9a888d94 100644
--- a/src/qt/coincontroldialog.cpp
+++ b/src/qt/coincontroldialog.cpp
@@ -499,7 +499,7 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog)
{
// there is some fudging in these numbers related to the actual virtual transaction size calculation that will keep this estimate from being exact.
// usually, the result will be an overestimate within a couple of satoshis so that the confirmation dialog ends up displaying a slightly smaller fee.
- // also, the witness stack size value value is a variable sized integer. usually, the number of stack items will be well under the single byte var int limit.
+ // also, the witness stack size value is a variable sized integer. usually, the number of stack items will be well under the single byte var int limit.
nBytes += 2; // account for the serialized marker and flag bytes
nBytes += nQuantity; // account for the witness byte that holds the number of stack items for each input.
}
@@ -510,7 +510,7 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog)
nBytes -= 34;
// Fee
- nPayFee = CWallet::GetMinimumFee(nBytes, nTxConfirmTarget, ::mempool, ::feeEstimator);
+ nPayFee = CWallet::GetMinimumFee(nBytes, coinControl->nConfirmTarget, ::mempool, ::feeEstimator);
if (nPayAmount > 0)
{
@@ -524,13 +524,10 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog)
CTxOut txout(nChange, (CScript)std::vector<unsigned char>(24, 0));
if (IsDust(txout, ::dustRelayFee))
{
- if (CoinControlDialog::fSubtractFeeFromAmount) // dust-change will be raised until no dust
- nChange = GetDustThreshold(txout, ::dustRelayFee);
- else
- {
- nPayFee += nChange;
- nChange = 0;
- }
+ nPayFee += nChange;
+ nChange = 0;
+ if (CoinControlDialog::fSubtractFeeFromAmount)
+ nBytes -= 34; // we didn't detect lack of change above
}
}
@@ -588,7 +585,7 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog)
if (payTxFee.GetFeePerK() > 0)
dFeeVary = (double)std::max(CWallet::GetRequiredFee(1000), payTxFee.GetFeePerK()) / 1000;
else {
- dFeeVary = (double)std::max(CWallet::GetRequiredFee(1000), ::feeEstimator.estimateSmartFee(nTxConfirmTarget, NULL, ::mempool).GetFeePerK()) / 1000;
+ dFeeVary = (double)std::max(CWallet::GetRequiredFee(1000), ::feeEstimator.estimateSmartFee(coinControl->nConfirmTarget, NULL, ::mempool).GetFeePerK()) / 1000;
}
QString toolTip4 = tr("Can vary +/- %1 satoshi(s) per input.").arg(dFeeVary);
diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui
index 0f1b3f4a73..14078b9ee8 100644
--- a/src/qt/forms/optionsdialog.ui
+++ b/src/qt/forms/optionsdialog.ui
@@ -315,7 +315,7 @@
<bool>false</bool>
</property>
<property name="toolTip">
- <string>Shows, if the supplied default SOCKS5 proxy is used to reach peers via this network type.</string>
+ <string>Shows if the supplied default SOCKS5 proxy is used to reach peers via this network type.</string>
</property>
<property name="text">
<string/>
@@ -338,7 +338,7 @@
<bool>false</bool>
</property>
<property name="toolTip">
- <string>Shows, if the supplied default SOCKS5 proxy is used to reach peers via this network type.</string>
+ <string>Shows if the supplied default SOCKS5 proxy is used to reach peers via this network type.</string>
</property>
<property name="text">
<string/>
@@ -361,7 +361,7 @@
<bool>false</bool>
</property>
<property name="toolTip">
- <string>Shows, if the supplied default SOCKS5 proxy is used to reach peers via this network type.</string>
+ <string>Shows if the supplied default SOCKS5 proxy is used to reach peers via this network type.</string>
</property>
<property name="text">
<string/>
diff --git a/src/qt/intro.cpp b/src/qt/intro.cpp
index 2460a59109..1e0d472b6a 100644
--- a/src/qt/intro.cpp
+++ b/src/qt/intro.cpp
@@ -214,7 +214,7 @@ bool Intro::pickDataDirectory()
}
dataDir = intro.getDataDirectory();
try {
- TryCreateDirectory(GUIUtil::qstringToBoostPath(dataDir));
+ TryCreateDirectories(GUIUtil::qstringToBoostPath(dataDir));
break;
} catch (const fs::filesystem_error&) {
QMessageBox::critical(0, tr(PACKAGE_NAME),
diff --git a/src/qt/modaloverlay.cpp b/src/qt/modaloverlay.cpp
index 4779ffa43f..a83f285034 100644
--- a/src/qt/modaloverlay.cpp
+++ b/src/qt/modaloverlay.cpp
@@ -126,7 +126,7 @@ void ModalOverlay::tipUpdate(int count, const QDateTime& blockDate, double nVeri
return;
// estimate the number of headers left based on nPowTargetSpacing
- // and check if the gui is not aware of the the best header (happens rarely)
+ // and check if the gui is not aware of the best header (happens rarely)
int estimateNumHeadersLeft = bestHeaderDate.secsTo(currentDate) / Params().GetConsensus().nPowTargetSpacing;
bool hasBestHeader = bestHeaderHeight >= count;
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index b17693e1ca..ec0580b81c 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -672,7 +672,7 @@ void RPCConsole::setFontSize(int newSize)
{
QSettings settings;
- //don't allow a insane font size
+ //don't allow an insane font size
if (newSize < FONT_RANGE.width() || newSize > FONT_RANGE.height())
return;
@@ -738,7 +738,7 @@ void RPCConsole::clear(bool clearHistory)
tr("Use up and down arrows to navigate history, and %1 to clear screen.").arg("<b>"+clsKey+"</b>") + "<br>" +
tr("Type <b>help</b> for an overview of available commands.")) +
"<br><span class=\"secwarning\">" +
- tr("WARNING: Scammers have been active, telling users to type commands here, stealing their wallet contents. Do not use this console without fully understanding the ramification of a command.") +
+ tr("WARNING: Scammers have been active, telling users to type commands here, stealing their wallet contents. Do not use this console without fully understanding the ramifications of a command.") +
"</span>",
true);
}
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index cda33076f8..12d2d0f31c 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -651,8 +651,8 @@ void SendCoinsDialog::updateSmartFeeLabel()
return;
int nBlocksToConfirm = ui->sliderSmartFee->maximum() - ui->sliderSmartFee->value() + 2;
- int estimateFoundAtBlocks = nBlocksToConfirm;
- CFeeRate feeRate = ::feeEstimator.estimateSmartFee(nBlocksToConfirm, &estimateFoundAtBlocks, ::mempool);
+ FeeCalculation feeCalc;
+ CFeeRate feeRate = ::feeEstimator.estimateSmartFee(nBlocksToConfirm, &feeCalc, ::mempool);
if (feeRate <= CFeeRate(0)) // not enough data => minfee
{
ui->labelSmartFee->setText(BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(),
@@ -670,7 +670,7 @@ void SendCoinsDialog::updateSmartFeeLabel()
ui->labelSmartFee->setText(BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(),
std::max(feeRate.GetFeePerK(), CWallet::GetRequiredFee(1000))) + "/kB");
ui->labelSmartFee2->hide();
- ui->labelFeeEstimation->setText(tr("Estimated to begin confirmation within %n block(s).", "", estimateFoundAtBlocks));
+ ui->labelFeeEstimation->setText(tr("Estimated to begin confirmation within %n block(s).", "", feeCalc.returnedTarget));
ui->fallbackFeeWarningLabel->setVisible(false);
}
@@ -822,6 +822,12 @@ void SendCoinsDialog::coinControlUpdateLabels()
// set pay amounts
CoinControlDialog::payAmounts.clear();
CoinControlDialog::fSubtractFeeFromAmount = false;
+ if (ui->radioSmartFee->isChecked()) {
+ CoinControlDialog::coinControl->nConfirmTarget = ui->sliderSmartFee->maximum() - ui->sliderSmartFee->value() + 2;
+ } else {
+ CoinControlDialog::coinControl->nConfirmTarget = model->getDefaultConfirmTarget();
+ }
+
for(int i = 0; i < ui->entries->count(); ++i)
{
SendCoinsEntry *entry = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(i)->widget());
diff --git a/src/qt/splashscreen.cpp b/src/qt/splashscreen.cpp
index 10966e42eb..1b7cc69231 100644
--- a/src/qt/splashscreen.cpp
+++ b/src/qt/splashscreen.cpp
@@ -131,6 +131,7 @@ SplashScreen::SplashScreen(Qt::WindowFlags f, const NetworkStyle *networkStyle)
move(QApplication::desktop()->screenGeometry().center() - r.center());
subscribeToCoreSignals();
+ installEventFilter(this);
}
SplashScreen::~SplashScreen()
@@ -138,6 +139,16 @@ SplashScreen::~SplashScreen()
unsubscribeFromCoreSignals();
}
+bool SplashScreen::eventFilter(QObject * obj, QEvent * ev) {
+ if (ev->type() == QEvent::KeyPress) {
+ QKeyEvent *keyEvent = static_cast<QKeyEvent *>(ev);
+ if(keyEvent->text()[0] == 'q' && breakAction != nullptr) {
+ breakAction();
+ }
+ }
+ return QObject::eventFilter(obj, ev);
+}
+
void SplashScreen::slotFinish(QWidget *mainWin)
{
Q_UNUSED(mainWin);
@@ -164,6 +175,18 @@ static void ShowProgress(SplashScreen *splash, const std::string &title, int nPr
InitMessage(splash, title + strprintf("%d", nProgress) + "%");
}
+void SplashScreen::setBreakAction(const std::function<void(void)> &action)
+{
+ breakAction = action;
+}
+
+static void SetProgressBreakAction(SplashScreen *splash, const std::function<void(void)> &action)
+{
+ QMetaObject::invokeMethod(splash, "setBreakAction",
+ Qt::QueuedConnection,
+ Q_ARG(std::function<void(void)>, action));
+}
+
#ifdef ENABLE_WALLET
void SplashScreen::ConnectWallet(CWallet* wallet)
{
@@ -177,6 +200,7 @@ void SplashScreen::subscribeToCoreSignals()
// Connect signals to client
uiInterface.InitMessage.connect(boost::bind(InitMessage, this, _1));
uiInterface.ShowProgress.connect(boost::bind(ShowProgress, this, _1, _2));
+ uiInterface.SetProgressBreakAction.connect(boost::bind(SetProgressBreakAction, this, _1));
#ifdef ENABLE_WALLET
uiInterface.LoadWallet.connect(boost::bind(&SplashScreen::ConnectWallet, this, _1));
#endif
diff --git a/src/qt/splashscreen.h b/src/qt/splashscreen.h
index 95a65cc53c..a88ebb98a8 100644
--- a/src/qt/splashscreen.h
+++ b/src/qt/splashscreen.h
@@ -5,6 +5,7 @@
#ifndef BITCOIN_QT_SPLASHSCREEN_H
#define BITCOIN_QT_SPLASHSCREEN_H
+#include <functional>
#include <QSplashScreen>
class CWallet;
@@ -35,6 +36,11 @@ public Q_SLOTS:
/** Show message and progress */
void showMessage(const QString &message, int alignment, const QColor &color);
+ /** Sets the break action */
+ void setBreakAction(const std::function<void(void)> &action);
+protected:
+ bool eventFilter(QObject * obj, QEvent * ev);
+
private:
/** Connect core signals to splash screen */
void subscribeToCoreSignals();
@@ -49,6 +55,8 @@ private:
int curAlignment;
QList<CWallet*> connectedWallets;
+
+ std::function<void(void)> breakAction;
};
#endif // BITCOIN_QT_SPLASHSCREEN_H
diff --git a/src/qt/test/rpcnestedtests.cpp b/src/qt/test/rpcnestedtests.cpp
index dada689731..26dec3c610 100644
--- a/src/qt/test/rpcnestedtests.cpp
+++ b/src/qt/test/rpcnestedtests.cpp
@@ -33,8 +33,6 @@ static const CRPCCommand vRPCCommands[] =
void RPCNestedTests::rpcNestedTests()
{
- UniValue jsonRPCError;
-
// do some test setup
// could be moved to a more generic place when we add more tests on QT level
const CChainParams& chainparams = Params();
diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp
index e3e070b27f..43d6e8826b 100644
--- a/src/qt/transactionview.cpp
+++ b/src/qt/transactionview.cpp
@@ -336,6 +336,10 @@ void TransactionView::changedAmount(const QString &amount)
void TransactionView::exportClicked()
{
+ if (!model || !model->getOptionsModel()) {
+ return;
+ }
+
// CSV is currently the only supported format
QString filename = GUIUtil::getSaveFileName(this,
tr("Export Transaction History"), QString(),
diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp
index 2b635e018c..7eff783fe8 100644
--- a/src/qt/walletmodel.cpp
+++ b/src/qt/walletmodel.cpp
@@ -338,7 +338,7 @@ WalletModel::SendCoinsReturn WalletModel::sendCoins(WalletModelTransaction &tran
transaction_array.append(&(ssTx[0]), ssTx.size());
}
- // Add addresses / update labels that we've sent to to the address book,
+ // Add addresses / update labels that we've sent to the address book,
// and emit coinsSent signal for each recipient
for (const SendCoinsRecipient &rcp : transaction.getRecipients())
{
diff --git a/src/random.cpp b/src/random.cpp
index e1ccfa5f24..67efc7d945 100644
--- a/src/random.cpp
+++ b/src/random.cpp
@@ -65,6 +65,70 @@ static inline int64_t GetPerformanceCounter()
#endif
}
+
+#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__)
+static std::atomic<bool> hwrand_initialized{false};
+static bool rdrand_supported = false;
+static constexpr uint32_t CPUID_F1_ECX_RDRAND = 0x40000000;
+static void RDRandInit()
+{
+ uint32_t eax, ecx, edx;
+#if defined(__i386__) && ( defined(__PIC__) || defined(__PIE__))
+ // Avoid clobbering ebx, as that is used for PIC on x86.
+ uint32_t tmp;
+ __asm__ ("mov %%ebx, %1; cpuid; mov %1, %%ebx": "=a"(eax), "=g"(tmp), "=c"(ecx), "=d"(edx) : "a"(1));
+#else
+ uint32_t ebx;
+ __asm__ ("cpuid": "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(1));
+#endif
+ //! When calling cpuid function #1, ecx register will have this set if RDRAND is available.
+ if (ecx & CPUID_F1_ECX_RDRAND) {
+ LogPrintf("Using RdRand as entropy source\n");
+ rdrand_supported = true;
+ }
+ hwrand_initialized.store(true);
+}
+#else
+static void RDRandInit() {}
+#endif
+
+static bool GetHWRand(unsigned char* ent32) {
+#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__)
+ assert(hwrand_initialized.load(std::memory_order_relaxed));
+ if (rdrand_supported) {
+ uint8_t ok;
+ // Not all assemblers support the rdrand instruction, write it in hex.
+#ifdef __i386__
+ for (int iter = 0; iter < 4; ++iter) {
+ uint32_t r1, r2;
+ __asm__ volatile (".byte 0x0f, 0xc7, 0xf0;" // rdrand %eax
+ ".byte 0x0f, 0xc7, 0xf2;" // rdrand %edx
+ "setc %2" :
+ "=a"(r1), "=d"(r2), "=q"(ok) :: "cc");
+ if (!ok) return false;
+ WriteLE32(ent32 + 8 * iter, r1);
+ WriteLE32(ent32 + 8 * iter + 4, r2);
+ }
+#else
+ uint64_t r1, r2, r3, r4;
+ __asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf0, " // rdrand %rax
+ "0x48, 0x0f, 0xc7, 0xf3, " // rdrand %rbx
+ "0x48, 0x0f, 0xc7, 0xf1, " // rdrand %rcx
+ "0x48, 0x0f, 0xc7, 0xf2; " // rdrand %rdx
+ "setc %4" :
+ "=a"(r1), "=b"(r2), "=c"(r3), "=d"(r4), "=q"(ok) :: "cc");
+ if (!ok) return false;
+ WriteLE64(ent32, r1);
+ WriteLE64(ent32 + 8, r2);
+ WriteLE64(ent32 + 16, r3);
+ WriteLE64(ent32 + 24, r4);
+#endif
+ return true;
+ }
+#endif
+ return false;
+}
+
void RandAddSeed()
{
// Seed with CPU performance counter
@@ -255,6 +319,11 @@ void GetStrongRandBytes(unsigned char* out, int num)
GetOSRand(buf);
hasher.Write(buf, 32);
+ // Third source: HW RNG, if available.
+ if (GetHWRand(buf)) {
+ hasher.Write(buf, 32);
+ }
+
// Combine with and update state
{
std::unique_lock<std::mutex> lock(cs_rng_state);
@@ -381,3 +450,8 @@ FastRandomContext::FastRandomContext(bool fDeterministic) : requires_seed(!fDete
uint256 seed;
rng.SetKey(seed.begin(), 32);
}
+
+void RandomInit()
+{
+ RDRandInit();
+}
diff --git a/src/random.h b/src/random.h
index dcb74eadb5..c60ab36179 100644
--- a/src/random.h
+++ b/src/random.h
@@ -140,4 +140,7 @@ void GetOSRand(unsigned char *ent32);
*/
bool Random_SanityCheck();
+/** Initialize the RNG. */
+void RandomInit();
+
#endif // BITCOIN_RANDOM_H
diff --git a/src/rest.cpp b/src/rest.cpp
index 8fb0c13fa5..33e3fb4529 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -413,7 +413,7 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart)
boost::split(uriParts, strUriParams, boost::is_any_of("/"));
}
- // throw exception in case of a empty request
+ // throw exception in case of an empty request
std::string strRequestMutable = req->ReadBody();
if (strRequestMutable.length() == 0 && uriParts.size() == 0)
return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request");
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 8f7f76841d..c17ca2fa3a 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -991,7 +991,7 @@ UniValue gettxout(const JSONRPCRequest& request)
if (fMempool) {
LOCK(mempool.cs);
CCoinsViewMemPool view(pcoinsTip, mempool);
- if (!view.GetCoin(out, coin) || mempool.isSpent(out)) { // TODO: filtering spent coins should be done by the CCoinsViewMemPool
+ if (!view.GetCoin(out, coin) || mempool.isSpent(out)) {
return NullUniValue;
}
} else {
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index c5585a9fba..cb1539dce5 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -76,6 +76,7 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "listunspent", 0, "minconf" },
{ "listunspent", 1, "maxconf" },
{ "listunspent", 2, "addresses" },
+ { "listunspent", 3, "include_unsafe" },
{ "listunspent", 4, "query_options" },
{ "getblock", 1, "verbosity" },
{ "getblockheader", 1, "verbose" },
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 9af29652cf..e50742f36e 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -18,6 +18,7 @@
#include "policy/fees.h"
#include "pow.h"
#include "rpc/blockchain.h"
+#include "rpc/mining.h"
#include "rpc/server.h"
#include "txmempool.h"
#include "util.h"
@@ -141,42 +142,6 @@ UniValue generateBlocks(std::shared_ptr<CReserveScript> coinbaseScript, int nGen
return blockHashes;
}
-UniValue generate(const JSONRPCRequest& request)
-{
- if (request.fHelp || request.params.size() < 1 || request.params.size() > 2)
- throw std::runtime_error(
- "generate nblocks ( maxtries )\n"
- "\nMine up to nblocks blocks immediately (before the RPC call returns)\n"
- "\nArguments:\n"
- "1. nblocks (numeric, required) How many blocks are generated immediately.\n"
- "2. maxtries (numeric, optional) How many iterations to try (default = 1000000).\n"
- "\nResult:\n"
- "[ blockhashes ] (array) hashes of blocks generated\n"
- "\nExamples:\n"
- "\nGenerate 11 blocks\n"
- + HelpExampleCli("generate", "11")
- );
-
- int nGenerate = request.params[0].get_int();
- uint64_t nMaxTries = 1000000;
- if (request.params.size() > 1) {
- nMaxTries = request.params[1].get_int();
- }
-
- std::shared_ptr<CReserveScript> coinbaseScript;
- GetMainSignals().ScriptForMining(coinbaseScript);
-
- // If the keypool is exhausted, no script is returned at all. Catch this.
- if (!coinbaseScript)
- throw JSONRPCError(RPC_WALLET_KEYPOOL_RAN_OUT, "Error: Keypool ran out, please call keypoolrefill first");
-
- //throw an error if no script was provided
- if (coinbaseScript->reserveScript.empty())
- throw JSONRPCError(RPC_INTERNAL_ERROR, "No coinbase script available (mining requires a wallet)");
-
- return generateBlocks(coinbaseScript, nGenerate, nMaxTries, true);
-}
-
UniValue generatetoaddress(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() < 2 || request.params.size() > 3)
@@ -722,19 +687,16 @@ protected:
UniValue submitblock(const JSONRPCRequest& request)
{
+ // We allow 2 arguments for compliance with BIP22. Argument 2 is ignored.
if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) {
throw std::runtime_error(
- "submitblock \"hexdata\" ( \"jsonparametersobject\" )\n"
+ "submitblock \"hexdata\" ( \"dummy\" )\n"
"\nAttempts to submit new block to network.\n"
- "The 'jsonparametersobject' parameter is currently ignored.\n"
"See https://en.bitcoin.it/wiki/BIP_0022 for full specification.\n"
"\nArguments\n"
"1. \"hexdata\" (string, required) the hex-encoded block data to submit\n"
- "2. \"parameters\" (string, optional) object of optional parameters\n"
- " {\n"
- " \"workid\" : \"id\" (string, optional) if the server provided a workid, it MUST be included with submissions\n"
- " }\n"
+ "2. \"dummy\" (optional) dummy value, for compatibility with BIP22. This value is ignored.\n"
"\nResult:\n"
"\nExamples:\n"
+ HelpExampleCli("submitblock", "\"mydata\"")
@@ -867,10 +829,10 @@ UniValue estimatesmartfee(const JSONRPCRequest& request)
}
UniValue result(UniValue::VOBJ);
- int answerFound;
- CFeeRate feeRate = ::feeEstimator.estimateSmartFee(nBlocks, &answerFound, ::mempool, conservative);
+ FeeCalculation feeCalc;
+ CFeeRate feeRate = ::feeEstimator.estimateSmartFee(nBlocks, &feeCalc, ::mempool, conservative);
result.push_back(Pair("feerate", feeRate == CFeeRate(0) ? -1.0 : ValueFromAmount(feeRate.GetFeePerK())));
- result.push_back(Pair("blocks", answerFound));
+ result.push_back(Pair("blocks", feeCalc.returnedTarget));
return result;
}
@@ -963,9 +925,8 @@ static const CRPCCommand commands[] =
{ "mining", "getmininginfo", &getmininginfo, true, {} },
{ "mining", "prioritisetransaction", &prioritisetransaction, true, {"txid","dummy","fee_delta"} },
{ "mining", "getblocktemplate", &getblocktemplate, true, {"template_request"} },
- { "mining", "submitblock", &submitblock, true, {"hexdata","parameters"} },
+ { "mining", "submitblock", &submitblock, true, {"hexdata","dummy"} },
- { "generating", "generate", &generate, true, {"nblocks","maxtries"} },
{ "generating", "generatetoaddress", &generatetoaddress, true, {"nblocks","address","maxtries"} },
{ "util", "estimatefee", &estimatefee, true, {"nblocks"} },
diff --git a/src/rpc/mining.h b/src/rpc/mining.h
new file mode 100644
index 0000000000..a148d851da
--- /dev/null
+++ b/src/rpc/mining.h
@@ -0,0 +1,15 @@
+// Copyright (c) 2017 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_RPC_MINING_H
+#define BITCOIN_RPC_MINING_H
+
+#include "script/script.h"
+
+#include <univalue.h>
+
+/** Generate blocks (mine) */
+UniValue generateBlocks(std::shared_ptr<CReserveScript> coinbaseScript, int nGenerate, uint64_t nMaxTries, bool keepScript);
+
+#endif
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index c52ea75840..89e0c5dbc5 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -74,7 +74,7 @@ UniValue getpeerinfo(const JSONRPCRequest& request)
"[\n"
" {\n"
" \"id\": n, (numeric) Peer index\n"
- " \"addr\":\"host:port\", (string) The ip address and port of the peer\n"
+ " \"addr\":\"host:port\", (string) The IP address and port of the peer\n"
" \"addrbind\":\"ip:port\", (string) Bind address of the connection to the peer\n"
" \"addrlocal\":\"ip:port\", (string) Local address as reported by the peer\n"
" \"services\":\"xxxxxxxxxxxxxxxx\", (string) The services offered\n"
@@ -198,7 +198,7 @@ UniValue addnode(const JSONRPCRequest& request)
(strCommand != "onetry" && strCommand != "add" && strCommand != "remove"))
throw std::runtime_error(
"addnode \"node\" \"add|remove|onetry\"\n"
- "\nAttempts add or remove a node from the addnode list.\n"
+ "\nAttempts to add or remove a node from the addnode list.\n"
"Or try a connection to a node once.\n"
"\nArguments:\n"
"1. \"node\" (string, required) The node (see getpeerinfo for nodes)\n"
@@ -289,7 +289,7 @@ UniValue getaddednodeinfo(const JSONRPCRequest& request)
"\nResult:\n"
"[\n"
" {\n"
- " \"addednode\" : \"192.168.0.201\", (string) The node ip address or name (as provided to addnode)\n"
+ " \"addednode\" : \"192.168.0.201\", (string) The node IP address or name (as provided to addnode)\n"
" \"connected\" : true|false, (boolean) If connected\n"
" \"addresses\" : [ (list of objects) Only when connected = true\n"
" {\n"
@@ -396,7 +396,7 @@ static UniValue GetNetworksInfo()
for(int n=0; n<NET_MAX; ++n)
{
enum Network network = static_cast<enum Network>(n);
- if(network == NET_UNROUTABLE)
+ if(network == NET_UNROUTABLE || network == NET_INTERNAL)
continue;
proxyType proxy;
UniValue obj(UniValue::VOBJ);
@@ -496,12 +496,12 @@ UniValue setban(const JSONRPCRequest& request)
(strCommand != "add" && strCommand != "remove"))
throw std::runtime_error(
"setban \"subnet\" \"add|remove\" (bantime) (absolute)\n"
- "\nAttempts add or remove a IP/Subnet from the banned list.\n"
+ "\nAttempts to add or remove an IP/Subnet from the banned list.\n"
"\nArguments:\n"
- "1. \"subnet\" (string, required) The IP/Subnet (see getpeerinfo for nodes ip) with a optional netmask (default is /32 = single ip)\n"
- "2. \"command\" (string, required) 'add' to add a IP/Subnet to the list, 'remove' to remove a IP/Subnet from the list\n"
- "3. \"bantime\" (numeric, optional) time in seconds how long (or until when if [absolute] is set) the ip is banned (0 or empty means using the default time of 24h which can also be overwritten by the -bantime startup argument)\n"
- "4. \"absolute\" (boolean, optional) If set, the bantime must be a absolute timestamp in seconds since epoch (Jan 1 1970 GMT)\n"
+ "1. \"subnet\" (string, required) The IP/Subnet (see getpeerinfo for nodes IP) with an optional netmask (default is /32 = single IP)\n"
+ "2. \"command\" (string, required) 'add' to add an IP/Subnet to the list, 'remove' to remove an IP/Subnet from the list\n"
+ "3. \"bantime\" (numeric, optional) time in seconds how long (or until when if [absolute] is set) the IP is banned (0 or empty means using the default time of 24h which can also be overwritten by the -bantime startup argument)\n"
+ "4. \"absolute\" (boolean, optional) If set, the bantime must be an absolute timestamp in seconds since epoch (Jan 1 1970 GMT)\n"
"\nExamples:\n"
+ HelpExampleCli("setban", "\"192.168.0.6\" \"add\" 86400")
+ HelpExampleCli("setban", "\"192.168.0.0/24\" \"add\"")
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 42f3762bf6..527a4d6974 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -218,9 +218,13 @@ UniValue gettxoutproof(const JSONRPCRequest& request)
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
pblockindex = mapBlockIndex[hashBlock];
} else {
- const Coin& coin = AccessByTxid(*pcoinsTip, oneTxid);
- if (!coin.IsSpent() && coin.nHeight > 0 && coin.nHeight <= chainActive.Height()) {
- pblockindex = chainActive[coin.nHeight];
+ // Loop through txids and try to find which block they're in. Exit loop once a block is found.
+ for (const auto& tx : setTxids) {
+ const Coin& coin = AccessByTxid(*pcoinsTip, tx);
+ if (!coin.IsSpent()) {
+ pblockindex = chainActive[coin.nHeight];
+ break;
+ }
}
}
@@ -243,7 +247,7 @@ UniValue gettxoutproof(const JSONRPCRequest& request)
if (setTxids.count(tx->GetHash()))
ntxFound++;
if (ntxFound != setTxids.size())
- throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "(Not all) transactions not found in specified block");
+ throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Not all transactions found in specified or retrieved block");
CDataStream ssMB(SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS);
CMerkleBlock mb(block, setTxids);
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index 6f7ab437b1..63e4e9c630 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -257,6 +257,22 @@ UniValue stop(const JSONRPCRequest& jsonRequest)
return "Bitcoin server stopping";
}
+UniValue uptime(const JSONRPCRequest& jsonRequest)
+{
+ if (jsonRequest.fHelp || jsonRequest.params.size() > 1)
+ throw std::runtime_error(
+ "uptime\n"
+ "\nReturns the total uptime of the server.\n"
+ "\nResult:\n"
+ "ttt (numeric) The number of seconds that the server has been running\n"
+ "\nExamples:\n"
+ + HelpExampleCli("uptime", "")
+ + HelpExampleRpc("uptime", "")
+ );
+
+ return GetTime() - GetStartupTime();
+}
+
/**
* Call Table
*/
@@ -266,6 +282,7 @@ static const CRPCCommand vRPCCommands[] =
/* Overall control/query calls */
{ "control", "help", &help, true, {"command"} },
{ "control", "stop", &stop, true, {} },
+ { "control", "uptime", &uptime, true, {} },
};
CRPCTable::CRPCTable()
diff --git a/src/script/bitcoinconsensus.cpp b/src/script/bitcoinconsensus.cpp
index c4ab441e2c..4b71a42cdf 100644
--- a/src/script/bitcoinconsensus.cpp
+++ b/src/script/bitcoinconsensus.cpp
@@ -68,7 +68,7 @@ struct ECCryptoClosure
};
ECCryptoClosure instance_of_eccryptoclosure;
-}
+} // namespace
/** Check that all specified flags are part of the libconsensus interface. */
static bool verify_flags(unsigned int flags)
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index 222cff59ea..7149c938fc 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -31,7 +31,7 @@ inline bool set_error(ScriptError* ret, const ScriptError serror)
return false;
}
-} // anon namespace
+} // namespace
bool CastToBool(const valtype& vch)
{
@@ -1164,7 +1164,7 @@ uint256 GetOutputsHash(const CTransaction& txTo) {
return ss.GetHash();
}
-} // anon namespace
+} // namespace
PrecomputedTransactionData::PrecomputedTransactionData(const CTransaction& txTo)
{
diff --git a/src/script/interpreter.h b/src/script/interpreter.h
index 60f6f711e6..ab1dc4e681 100644
--- a/src/script/interpreter.h
+++ b/src/script/interpreter.h
@@ -160,9 +160,9 @@ protected:
public:
TransactionSignatureChecker(const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn) : txTo(txToIn), nIn(nInIn), amount(amountIn), txdata(NULL) {}
TransactionSignatureChecker(const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, const PrecomputedTransactionData& txdataIn) : txTo(txToIn), nIn(nInIn), amount(amountIn), txdata(&txdataIn) {}
- bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const;
- bool CheckLockTime(const CScriptNum& nLockTime) const;
- bool CheckSequence(const CScriptNum& nSequence) const;
+ bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override;
+ bool CheckLockTime(const CScriptNum& nLockTime) const override;
+ bool CheckSequence(const CScriptNum& nSequence) const override;
};
class MutableTransactionSignatureChecker : public TransactionSignatureChecker
diff --git a/src/script/sigcache.cpp b/src/script/sigcache.cpp
index 7bb8d9941b..ceb573b2ec 100644
--- a/src/script/sigcache.cpp
+++ b/src/script/sigcache.cpp
@@ -66,7 +66,7 @@ public:
* signatureCache could be made local to VerifySignature.
*/
static CSignatureCache signatureCache;
-}
+} // namespace
// To be called once in AppInitMain/BasicTestingSetup to initialize the
// signatureCache.
@@ -74,10 +74,10 @@ void InitSignatureCache()
{
// nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero,
// setup_bytes creates the minimum possible cache (2 elements).
- size_t nMaxCacheSize = std::min(std::max((int64_t)0, GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE)), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
+ size_t nMaxCacheSize = std::min(std::max((int64_t)0, GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
size_t nElems = signatureCache.setup_bytes(nMaxCacheSize);
- LogPrintf("Using %zu MiB out of %zu requested for signature cache, able to store %zu elements\n",
- (nElems*sizeof(uint256)) >>20, nMaxCacheSize>>20, nElems);
+ LogPrintf("Using %zu MiB out of %zu/2 requested for signature cache, able to store %zu elements\n",
+ (nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems);
}
bool CachingTransactionSignatureChecker::VerifySignature(const std::vector<unsigned char>& vchSig, const CPubKey& pubkey, const uint256& sighash) const
diff --git a/src/script/sigcache.h b/src/script/sigcache.h
index 55cec4cc8d..5832b264b3 100644
--- a/src/script/sigcache.h
+++ b/src/script/sigcache.h
@@ -48,7 +48,7 @@ private:
public:
CachingTransactionSignatureChecker(const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, bool storeIn, PrecomputedTransactionData& txdataIn) : TransactionSignatureChecker(txToIn, nInIn, amountIn, txdataIn), store(storeIn) {}
- bool VerifySignature(const std::vector<unsigned char>& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const;
+ bool VerifySignature(const std::vector<unsigned char>& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const override;
};
void InitSignatureCache();
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index 94120e6408..dc50467d3f 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -392,13 +392,13 @@ class DummySignatureChecker : public BaseSignatureChecker
public:
DummySignatureChecker() {}
- bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const
+ bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override
{
return true;
}
};
const DummySignatureChecker dummyChecker;
-}
+} // namespace
const BaseSignatureChecker& DummySignatureCreator::Checker() const
{
diff --git a/src/script/sign.h b/src/script/sign.h
index f3c0be4139..bd45862892 100644
--- a/src/script/sign.h
+++ b/src/script/sign.h
@@ -40,8 +40,8 @@ class TransactionSignatureCreator : public BaseSignatureCreator {
public:
TransactionSignatureCreator(const CKeyStore* keystoreIn, const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn=SIGHASH_ALL);
- const BaseSignatureChecker& Checker() const { return checker; }
- bool CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const;
+ const BaseSignatureChecker& Checker() const override { return checker; }
+ bool CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const override;
};
class MutableTransactionSignatureCreator : public TransactionSignatureCreator {
@@ -55,8 +55,8 @@ public:
class DummySignatureCreator : public BaseSignatureCreator {
public:
DummySignatureCreator(const CKeyStore* keystoreIn) : BaseSignatureCreator(keystoreIn) {}
- const BaseSignatureChecker& Checker() const;
- bool CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const;
+ const BaseSignatureChecker& Checker() const override;
+ bool CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const override;
};
struct SignatureData {
diff --git a/src/script/standard.cpp b/src/script/standard.cpp
index 3415a1ad7c..760a5305e5 100644
--- a/src/script/standard.cpp
+++ b/src/script/standard.cpp
@@ -272,7 +272,7 @@ public:
return true;
}
};
-}
+} // namespace
CScript GetScriptForDestination(const CTxDestination& dest)
{
diff --git a/src/secp256k1/sage/group_prover.sage b/src/secp256k1/sage/group_prover.sage
index ab580c5b23..5198724bea 100644
--- a/src/secp256k1/sage/group_prover.sage
+++ b/src/secp256k1/sage/group_prover.sage
@@ -3,7 +3,7 @@
# to independently set assumptions on input or intermediary variables.
#
# The general approach is:
-# * A constraint is a tuple of two sets of of symbolic expressions:
+# * A constraint is a tuple of two sets of symbolic expressions:
# the first of which are required to evaluate to zero, the second of which
# are required to evaluate to nonzero.
# - A constraint is said to be conflicting if any of its nonzero expressions
diff --git a/src/secp256k1/src/asm/field_10x26_arm.s b/src/secp256k1/src/asm/field_10x26_arm.s
index 5df561f2fc..bd2b629e1c 100644
--- a/src/secp256k1/src/asm/field_10x26_arm.s
+++ b/src/secp256k1/src/asm/field_10x26_arm.s
@@ -11,7 +11,7 @@ Note:
- To avoid unnecessary loads and make use of available registers, two
'passes' have every time been interleaved, with the odd passes accumulating c' and d'
- which will be added to c and d respectively in the the even passes
+ which will be added to c and d respectively in the even passes
*/
diff --git a/src/support/events.h b/src/support/events.h
index 4f2f3cf9ef..90690876ee 100644
--- a/src/support/events.h
+++ b/src/support/events.h
@@ -27,26 +27,26 @@ MAKE_RAII(evhttp);
MAKE_RAII(evhttp_request);
MAKE_RAII(evhttp_connection);
-raii_event_base obtain_event_base() {
+inline raii_event_base obtain_event_base() {
auto result = raii_event_base(event_base_new());
if (!result.get())
throw std::runtime_error("cannot create event_base");
return result;
}
-raii_event obtain_event(struct event_base* base, evutil_socket_t s, short events, event_callback_fn cb, void* arg) {
+inline raii_event obtain_event(struct event_base* base, evutil_socket_t s, short events, event_callback_fn cb, void* arg) {
return raii_event(event_new(base, s, events, cb, arg));
}
-raii_evhttp obtain_evhttp(struct event_base* base) {
+inline raii_evhttp obtain_evhttp(struct event_base* base) {
return raii_evhttp(evhttp_new(base));
}
-raii_evhttp_request obtain_evhttp_request(void(*cb)(struct evhttp_request *, void *), void *arg) {
+inline raii_evhttp_request obtain_evhttp_request(void(*cb)(struct evhttp_request *, void *), void *arg) {
return raii_evhttp_request(evhttp_request_new(cb, arg));
}
-raii_evhttp_connection obtain_evhttp_connection_base(struct event_base* base, std::string host, uint16_t port) {
+inline raii_evhttp_connection obtain_evhttp_connection_base(struct event_base* base, std::string host, uint16_t port) {
auto result = raii_evhttp_connection(evhttp_connection_base_new(base, NULL, host.c_str(), port));
if (!result.get())
throw std::runtime_error("create connection failed");
diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp
index 98c1581093..2df6b84a59 100644
--- a/src/support/lockedpool.cpp
+++ b/src/support/lockedpool.cpp
@@ -148,9 +148,9 @@ class Win32LockedPageAllocator: public LockedPageAllocator
{
public:
Win32LockedPageAllocator();
- void* AllocateLocked(size_t len, bool *lockingSuccess);
- void FreeLocked(void* addr, size_t len);
- size_t GetLimit();
+ void* AllocateLocked(size_t len, bool *lockingSuccess) override;
+ void FreeLocked(void* addr, size_t len) override;
+ size_t GetLimit() override;
private:
size_t page_size;
};
@@ -200,9 +200,9 @@ class PosixLockedPageAllocator: public LockedPageAllocator
{
public:
PosixLockedPageAllocator();
- void* AllocateLocked(size_t len, bool *lockingSuccess);
- void FreeLocked(void* addr, size_t len);
- size_t GetLimit();
+ void* AllocateLocked(size_t len, bool *lockingSuccess) override;
+ void FreeLocked(void* addr, size_t len) override;
+ size_t GetLimit() override;
private:
size_t page_size;
};
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index dc5372a070..bc6aef2c11 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -27,7 +27,7 @@ public:
insecure_rand = FastRandomContext(true);
}
- int RandomInt(int nMax)
+ int RandomInt(int nMax) override
{
state = (CHashWriter(SER_GETHASH, 0) << state).GetHash().GetCheapHash();
return (unsigned int)(state % nMax);
diff --git a/src/test/allocator_tests.cpp b/src/test/allocator_tests.cpp
index 3f15a0dec1..4a533b5bf2 100644
--- a/src/test/allocator_tests.cpp
+++ b/src/test/allocator_tests.cpp
@@ -131,7 +131,7 @@ class TestLockedPageAllocator: public LockedPageAllocator
{
public:
TestLockedPageAllocator(int count_in, int lockedcount_in): count(count_in), lockedcount(lockedcount_in) {}
- void* AllocateLocked(size_t len, bool *lockingSuccess)
+ void* AllocateLocked(size_t len, bool *lockingSuccess) override
{
*lockingSuccess = false;
if (count > 0) {
@@ -146,10 +146,10 @@ public:
}
return 0;
}
- void FreeLocked(void* addr, size_t len)
+ void FreeLocked(void* addr, size_t len) override
{
}
- size_t GetLimit()
+ size_t GetLimit() override
{
return std::numeric_limits<size_t>::max();
}
diff --git a/src/test/arith_uint256_tests.cpp b/src/test/arith_uint256_tests.cpp
index 45ae7d4636..2c98fbcfd6 100644
--- a/src/test/arith_uint256_tests.cpp
+++ b/src/test/arith_uint256_tests.cpp
@@ -219,7 +219,7 @@ BOOST_AUTO_TEST_CASE( unaryOperators ) // ! ~ -
// Check if doing _A_ _OP_ _B_ results in the same as applying _OP_ onto each
-// element of Aarray and Barray, and then converting the result into a arith_uint256.
+// element of Aarray and Barray, and then converting the result into an arith_uint256.
#define CHECKBITWISEOPERATOR(_A_,_B_,_OP_) \
for (unsigned int i = 0; i < 32; ++i) { TmpArray[i] = _A_##Array[i] _OP_ _B_##Array[i]; } \
BOOST_CHECK(arith_uint256V(std::vector<unsigned char>(TmpArray,TmpArray+32)) == (_A_##L _OP_ _B_##L));
diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp
index 33abfabe6b..e24431528a 100644
--- a/src/test/coins_tests.cpp
+++ b/src/test/coins_tests.cpp
@@ -50,12 +50,6 @@ public:
return true;
}
- bool HaveCoin(const COutPoint& outpoint) const override
- {
- Coin coin;
- return GetCoin(outpoint, coin);
- }
-
uint256 GetBestBlock() const override { return hashBestBlock_; }
bool BatchWrite(CCoinsMap& mapCoins, const uint256& hashBlock) override
@@ -99,7 +93,7 @@ public:
size_t& usage() { return cachedCoinsUsage; }
};
-}
+} // namespace
BOOST_FIXTURE_TEST_SUITE(coins_tests, BasicTestingSetup)
@@ -147,8 +141,22 @@ BOOST_AUTO_TEST_CASE(coins_cache_simulation_test)
{
uint256 txid = txids[InsecureRandRange(txids.size())]; // txid we're going to modify in this iteration.
Coin& coin = result[COutPoint(txid, 0)];
+
+ // Determine whether to test HaveCoin before or after Access* (or both). As these functions
+ // can influence each other's behaviour by pulling things into the cache, all combinations
+ // are tested.
+ bool test_havecoin_before = InsecureRandBits(2) == 0;
+ bool test_havecoin_after = InsecureRandBits(2) == 0;
+
+ bool result_havecoin = test_havecoin_before ? stack.back()->HaveCoin(COutPoint(txid, 0)) : false;
const Coin& entry = (InsecureRandRange(500) == 0) ? AccessByTxid(*stack.back(), txid) : stack.back()->AccessCoin(COutPoint(txid, 0));
BOOST_CHECK(coin == entry);
+ BOOST_CHECK(!test_havecoin_before || result_havecoin == !entry.IsSpent());
+
+ if (test_havecoin_after) {
+ bool ret = stack.back()->HaveCoin(COutPoint(txid, 0));
+ BOOST_CHECK(ret == !entry.IsSpent());
+ }
if (InsecureRandRange(5) == 0 || coin.IsSpent()) {
Coin newcoin;
@@ -628,7 +636,7 @@ BOOST_AUTO_TEST_CASE(ccoins_access)
CheckAccessCoin(ABSENT, VALUE2, VALUE2, FRESH , FRESH );
CheckAccessCoin(ABSENT, VALUE2, VALUE2, DIRTY , DIRTY );
CheckAccessCoin(ABSENT, VALUE2, VALUE2, DIRTY|FRESH, DIRTY|FRESH);
- CheckAccessCoin(PRUNED, ABSENT, PRUNED, NO_ENTRY , FRESH );
+ CheckAccessCoin(PRUNED, ABSENT, ABSENT, NO_ENTRY , NO_ENTRY );
CheckAccessCoin(PRUNED, PRUNED, PRUNED, 0 , 0 );
CheckAccessCoin(PRUNED, PRUNED, PRUNED, FRESH , FRESH );
CheckAccessCoin(PRUNED, PRUNED, PRUNED, DIRTY , DIRTY );
diff --git a/src/test/data/script_tests.json b/src/test/data/script_tests.json
index e35a7ce569..0390d6806d 100644
--- a/src/test/data/script_tests.json
+++ b/src/test/data/script_tests.json
@@ -2506,7 +2506,7 @@
],
["CHECKSEQUENCEVERIFY tests"],
-["", "CHECKSEQUENCEVERIFY", "CHECKSEQUENCEVERIFY", "INVALID_STACK_OPERATION", "CSV automatically fails on a empty stack"],
+["", "CHECKSEQUENCEVERIFY", "CHECKSEQUENCEVERIFY", "INVALID_STACK_OPERATION", "CSV automatically fails on an empty stack"],
["-1", "CHECKSEQUENCEVERIFY", "CHECKSEQUENCEVERIFY", "NEGATIVE_LOCKTIME", "CSV automatically fails if stack top is negative"],
["0x0100", "CHECKSEQUENCEVERIFY", "CHECKSEQUENCEVERIFY,MINIMALDATA", "UNKNOWN_ERROR", "CSV fails if stack top is not minimally encoded"],
["0", "CHECKSEQUENCEVERIFY", "CHECKSEQUENCEVERIFY", "UNSATISFIED_LOCKTIME", "CSV fails if stack top bit 1 << 31 is set and the tx version < 2"],
diff --git a/src/test/data/tx_valid.json b/src/test/data/tx_valid.json
index d70fa54333..e6b382af13 100644
--- a/src/test/data/tx_valid.json
+++ b/src/test/data/tx_valid.json
@@ -174,7 +174,7 @@
[[["5a6b0021a6042a686b6b94abc36b387bef9109847774e8b1e51eb8cc55c53921", 1, "DUP HASH160 0x14 0xee5a6aa40facefb2655ac23c0c28c57c65c41f9b EQUALVERIFY CHECKSIG"]],
"01000000012139c555ccb81ee5b1e87477840991ef7b386bc3ab946b6b682a04a621006b5a01000000fdb40148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a5800390148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a5800390121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f2204148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a5800390175ac4830450220646b72c35beeec51f4d5bc1cbae01863825750d7f490864af354e6ea4f625e9c022100f04b98432df3a9641719dbced53393022e7249fb59db993af1118539830aab870148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a580039017521038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ffffffff010000000000000000016a00000000", "P2SH"],
-["Finally CHECKMULTISIG removes all signatures prior to hashing the script containing those signatures. In conjunction with the SIGHASH_SINGLE bug this lets us test whether or not FindAndDelete() is actually present in scriptPubKey/redeemScript evaluation by including a signature of the digest 0x01 We can compute in advance for our pubkey, embed it it in the scriptPubKey, and then also using a normal SIGHASH_ALL signature. If FindAndDelete() wasn't run, the 'bugged' signature would still be in the hashed script, and the normal signature would fail."],
+["Finally CHECKMULTISIG removes all signatures prior to hashing the script containing those signatures. In conjunction with the SIGHASH_SINGLE bug this lets us test whether or not FindAndDelete() is actually present in scriptPubKey/redeemScript evaluation by including a signature of the digest 0x01 We can compute in advance for our pubkey, embed it in the scriptPubKey, and then also using a normal SIGHASH_ALL signature. If FindAndDelete() wasn't run, the 'bugged' signature would still be in the hashed script, and the normal signature would fail."],
["Here's an example on mainnet within a P2SH redeemScript. Remarkably it's a standard transaction in <0.9"],
[[["b5b598de91787439afd5938116654e0b16b7a0d0f82742ba37564219c5afcbf9", 0, "DUP HASH160 0x14 0xf6f365c40f0739b61de827a44751e5e99032ed8f EQUALVERIFY CHECKSIG"],
diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp
index 66354699b2..095d86834c 100644
--- a/src/test/net_tests.cpp
+++ b/src/test/net_tests.cpp
@@ -29,7 +29,7 @@ public:
class CAddrManUncorrupted : public CAddrManSerializationMock
{
public:
- void Serialize(CDataStream& s) const
+ void Serialize(CDataStream& s) const override
{
CAddrMan::Serialize(s);
}
@@ -38,7 +38,7 @@ public:
class CAddrManCorrupted : public CAddrManSerializationMock
{
public:
- void Serialize(CDataStream& s) const
+ void Serialize(CDataStream& s) const override
{
// Produces corrupt output that claims addrman has 20 addrs when it only has one addr.
unsigned char nVersion = 1;
diff --git a/src/test/netbase_tests.cpp b/src/test/netbase_tests.cpp
index e4b4b85720..b45a7fcc57 100644
--- a/src/test/netbase_tests.cpp
+++ b/src/test/netbase_tests.cpp
@@ -25,6 +25,13 @@ static CSubNet ResolveSubNet(const char* subnet)
return ret;
}
+static CNetAddr CreateInternal(const char* host)
+{
+ CNetAddr addr;
+ addr.SetInternal(host);
+ return addr;
+}
+
BOOST_AUTO_TEST_CASE(netbase_networks)
{
BOOST_CHECK(ResolveIP("127.0.0.1").GetNetwork() == NET_UNROUTABLE);
@@ -32,6 +39,7 @@ BOOST_AUTO_TEST_CASE(netbase_networks)
BOOST_CHECK(ResolveIP("8.8.8.8").GetNetwork() == NET_IPV4);
BOOST_CHECK(ResolveIP("2001::8888").GetNetwork() == NET_IPV6);
BOOST_CHECK(ResolveIP("FD87:D87E:EB43:edb1:8e4:3588:e546:35ca").GetNetwork() == NET_TOR);
+ BOOST_CHECK(CreateInternal("foo.com").GetNetwork() == NET_INTERNAL);
}
@@ -58,6 +66,8 @@ BOOST_AUTO_TEST_CASE(netbase_properties)
BOOST_CHECK(ResolveIP("8.8.8.8").IsRoutable());
BOOST_CHECK(ResolveIP("2001::1").IsRoutable());
BOOST_CHECK(ResolveIP("127.0.0.1").IsValid());
+ BOOST_CHECK(CreateInternal("FD6B:88C0:8724:edb1:8e4:3588:e546:35ca").IsInternal());
+ BOOST_CHECK(CreateInternal("bar.com").IsInternal());
}
@@ -103,6 +113,11 @@ BOOST_AUTO_TEST_CASE(netbase_lookupnumeric)
BOOST_CHECK(TestParse("[::]:8333", "[::]:8333"));
BOOST_CHECK(TestParse("[127.0.0.1]", "127.0.0.1:65535"));
BOOST_CHECK(TestParse(":::", "[::]:0"));
+
+ // verify that an internal address fails to resolve
+ BOOST_CHECK(TestParse("[fd6b:88c0:8724:1:2:3:4:5]", "[::]:0"));
+ // and that a one-off resolves correctly
+ BOOST_CHECK(TestParse("[fd6c:88c0:8724:1:2:3:4:5]", "[fd6c:88c0:8724:1:2:3:4:5]:65535"));
}
BOOST_AUTO_TEST_CASE(onioncat_test)
@@ -281,6 +296,9 @@ BOOST_AUTO_TEST_CASE(netbase_getgroup)
BOOST_CHECK(ResolveIP("2001:470:abcd:9999:9999:9999:9999:9999").GetGroup() == std::vector<unsigned char>({(unsigned char)NET_IPV6, 32, 1, 4, 112, 175})); //he.net
BOOST_CHECK(ResolveIP("2001:2001:9999:9999:9999:9999:9999:9999").GetGroup() == std::vector<unsigned char>({(unsigned char)NET_IPV6, 32, 1, 32, 1})); //IPv6
+ // baz.net sha256 hash: 12929400eb4607c4ac075f087167e75286b179c693eb059a01774b864e8fe505
+ std::vector<unsigned char> internal_group = {NET_INTERNAL, 0x12, 0x92, 0x94, 0x00, 0xeb, 0x46, 0x07, 0xc4, 0xac, 0x07};
+ BOOST_CHECK(CreateInternal("baz.net").GetGroup() == internal_group);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index 4ddea9bae7..a18471588a 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -467,7 +467,7 @@ std::string JSONPrettyPrint(const UniValue& univalue)
}
return ret;
}
-}
+} // namespace
BOOST_AUTO_TEST_CASE(script_build)
{
diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp
index b70ee96966..579e96524c 100644
--- a/src/test/test_bitcoin.cpp
+++ b/src/test/test_bitcoin.cpp
@@ -33,10 +33,12 @@ extern void noui_connect();
BasicTestingSetup::BasicTestingSetup(const std::string& chainName)
{
+ RandomInit();
ECC_Start();
SetupEnvironment();
SetupNetworking();
InitSignatureCache();
+ InitScriptExecutionCache();
fPrintToDebugLog = false; // don't want to write to debug.log file
fCheckBlockIndex = true;
SelectParams(chainName);
diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp
index c5367208ba..a74f40251a 100644
--- a/src/test/txvalidationcache_tests.cpp
+++ b/src/test/txvalidationcache_tests.cpp
@@ -10,11 +10,17 @@
#include "txmempool.h"
#include "random.h"
#include "script/standard.h"
+#include "script/sign.h"
#include "test/test_bitcoin.h"
#include "utiltime.h"
+#include "core_io.h"
+#include "keystore.h"
+#include "policy/policy.h"
#include <boost/test/unit_test.hpp>
+bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks);
+
BOOST_AUTO_TEST_SUITE(tx_validationcache_tests)
static bool
@@ -84,4 +90,282 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup)
BOOST_CHECK_EQUAL(mempool.size(), 0);
}
+// Run CheckInputs (using pcoinsTip) on the given transaction, for all script
+// flags. Test that CheckInputs passes for all flags that don't overlap with
+// the failing_flags argument, but otherwise fails.
+// CHECKLOCKTIMEVERIFY and CHECKSEQUENCEVERIFY (and future NOP codes that may
+// get reassigned) have an interaction with DISCOURAGE_UPGRADABLE_NOPS: if
+// the script flags used contain DISCOURAGE_UPGRADABLE_NOPS but don't contain
+// CHECKLOCKTIMEVERIFY (or CHECKSEQUENCEVERIFY), but the script does contain
+// OP_CHECKLOCKTIMEVERIFY (or OP_CHECKSEQUENCEVERIFY), then script execution
+// should fail.
+// Capture this interaction with the upgraded_nop argument: set it when evaluating
+// any script flag that is implemented as an upgraded NOP code.
+void ValidateCheckInputsForAllFlags(CMutableTransaction &tx, uint32_t failing_flags, bool add_to_cache, bool upgraded_nop)
+{
+ PrecomputedTransactionData txdata(tx);
+ // If we add many more flags, this loop can get too expensive, but we can
+ // rewrite in the future to randomly pick a set of flags to evaluate.
+ for (uint32_t test_flags=0; test_flags < (1U << 16); test_flags += 1) {
+ CValidationState state;
+ // Filter out incompatible flag choices
+ if ((test_flags & SCRIPT_VERIFY_CLEANSTACK)) {
+ // CLEANSTACK requires P2SH and WITNESS, see VerifyScript() in
+ // script/interpreter.cpp
+ test_flags |= SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS;
+ }
+ if ((test_flags & SCRIPT_VERIFY_WITNESS)) {
+ // WITNESS requires P2SH
+ test_flags |= SCRIPT_VERIFY_P2SH;
+ }
+ bool ret = CheckInputs(tx, state, pcoinsTip, true, test_flags, true, add_to_cache, txdata, nullptr);
+ // CheckInputs should succeed iff test_flags doesn't intersect with
+ // failing_flags
+ bool expected_return_value = !(test_flags & failing_flags);
+ if (expected_return_value && upgraded_nop) {
+ // If the script flag being tested corresponds to an upgraded NOP,
+ // then script execution should fail if DISCOURAGE_UPGRADABLE_NOPS
+ // is set.
+ expected_return_value = !(test_flags & SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_NOPS);
+ }
+ BOOST_CHECK_EQUAL(ret, expected_return_value);
+
+ // Test the caching
+ if (ret && add_to_cache) {
+ // Check that we get a cache hit if the tx was valid
+ std::vector<CScriptCheck> scriptchecks;
+ BOOST_CHECK(CheckInputs(tx, state, pcoinsTip, true, test_flags, true, add_to_cache, txdata, &scriptchecks));
+ BOOST_CHECK(scriptchecks.empty());
+ } else {
+ // Check that we get script executions to check, if the transaction
+ // was invalid, or we didn't add to cache.
+ std::vector<CScriptCheck> scriptchecks;
+ BOOST_CHECK(CheckInputs(tx, state, pcoinsTip, true, test_flags, true, add_to_cache, txdata, &scriptchecks));
+ BOOST_CHECK_EQUAL(scriptchecks.size(), tx.vin.size());
+ }
+ }
+}
+
+BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup)
+{
+ // Test that passing CheckInputs with one set of script flags doesn't imply
+ // that we would pass again with a different set of flags.
+ InitScriptExecutionCache();
+
+ CScript p2pk_scriptPubKey = CScript() << ToByteVector(coinbaseKey.GetPubKey()) << OP_CHECKSIG;
+ CScript p2sh_scriptPubKey = GetScriptForDestination(CScriptID(p2pk_scriptPubKey));
+ CScript p2pkh_scriptPubKey = GetScriptForDestination(coinbaseKey.GetPubKey().GetID());
+ CScript p2wpkh_scriptPubKey = GetScriptForWitness(p2pkh_scriptPubKey);
+
+ CBasicKeyStore keystore;
+ keystore.AddKey(coinbaseKey);
+ keystore.AddCScript(p2pk_scriptPubKey);
+
+ // flags to test: SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY, SCRIPT_VERIFY_CHECKSEQUENCE_VERIFY, SCRIPT_VERIFY_NULLDUMMY, uncompressed pubkey thing
+
+ // Create 2 outputs that match the three scripts above, spending the first
+ // coinbase tx.
+ CMutableTransaction spend_tx;
+
+ spend_tx.nVersion = 1;
+ spend_tx.vin.resize(1);
+ spend_tx.vin[0].prevout.hash = coinbaseTxns[0].GetHash();
+ spend_tx.vin[0].prevout.n = 0;
+ spend_tx.vout.resize(4);
+ spend_tx.vout[0].nValue = 11*CENT;
+ spend_tx.vout[0].scriptPubKey = p2sh_scriptPubKey;
+ spend_tx.vout[1].nValue = 11*CENT;
+ spend_tx.vout[1].scriptPubKey = p2wpkh_scriptPubKey;
+ spend_tx.vout[2].nValue = 11*CENT;
+ spend_tx.vout[2].scriptPubKey = CScript() << OP_CHECKLOCKTIMEVERIFY << OP_DROP << ToByteVector(coinbaseKey.GetPubKey()) << OP_CHECKSIG;
+ spend_tx.vout[3].nValue = 11*CENT;
+ spend_tx.vout[3].scriptPubKey = CScript() << OP_CHECKSEQUENCEVERIFY << OP_DROP << ToByteVector(coinbaseKey.GetPubKey()) << OP_CHECKSIG;
+
+ // Sign, with a non-DER signature
+ {
+ std::vector<unsigned char> vchSig;
+ uint256 hash = SignatureHash(p2pk_scriptPubKey, spend_tx, 0, SIGHASH_ALL, 0, SIGVERSION_BASE);
+ BOOST_CHECK(coinbaseKey.Sign(hash, vchSig));
+ vchSig.push_back((unsigned char) 0); // padding byte makes this non-DER
+ vchSig.push_back((unsigned char)SIGHASH_ALL);
+ spend_tx.vin[0].scriptSig << vchSig;
+ }
+
+ LOCK(cs_main);
+
+ // Test that invalidity under a set of flags doesn't preclude validity
+ // under other (eg consensus) flags.
+ // spend_tx is invalid according to DERSIG
+ CValidationState state;
+ {
+ PrecomputedTransactionData ptd_spend_tx(spend_tx);
+
+ BOOST_CHECK(!CheckInputs(spend_tx, state, pcoinsTip, true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG, true, true, ptd_spend_tx, nullptr));
+
+ // If we call again asking for scriptchecks (as happens in
+ // ConnectBlock), we should add a script check object for this -- we're
+ // not caching invalidity (if that changes, delete this test case).
+ std::vector<CScriptCheck> scriptchecks;
+ BOOST_CHECK(CheckInputs(spend_tx, state, pcoinsTip, true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG, true, true, ptd_spend_tx, &scriptchecks));
+ BOOST_CHECK_EQUAL(scriptchecks.size(), 1);
+
+ // Test that CheckInputs returns true iff DERSIG-enforcing flags are
+ // not present. Don't add these checks to the cache, so that we can
+ // test later that block validation works fine in the absence of cached
+ // successes.
+ ValidateCheckInputsForAllFlags(spend_tx, SCRIPT_VERIFY_DERSIG | SCRIPT_VERIFY_LOW_S | SCRIPT_VERIFY_STRICTENC, false, false);
+
+ // And if we produce a block with this tx, it should be valid (DERSIG not
+ // enabled yet), even though there's no cache entry.
+ CBlock block;
+
+ block = CreateAndProcessBlock({spend_tx}, p2pk_scriptPubKey);
+ BOOST_CHECK(chainActive.Tip()->GetBlockHash() == block.GetHash());
+ BOOST_CHECK(pcoinsTip->GetBestBlock() == block.GetHash());
+ }
+
+ // Test P2SH: construct a transaction that is valid without P2SH, and
+ // then test validity with P2SH.
+ {
+ CMutableTransaction invalid_under_p2sh_tx;
+ invalid_under_p2sh_tx.nVersion = 1;
+ invalid_under_p2sh_tx.vin.resize(1);
+ invalid_under_p2sh_tx.vin[0].prevout.hash = spend_tx.GetHash();
+ invalid_under_p2sh_tx.vin[0].prevout.n = 0;
+ invalid_under_p2sh_tx.vout.resize(1);
+ invalid_under_p2sh_tx.vout[0].nValue = 11*CENT;
+ invalid_under_p2sh_tx.vout[0].scriptPubKey = p2pk_scriptPubKey;
+ std::vector<unsigned char> vchSig2(p2pk_scriptPubKey.begin(), p2pk_scriptPubKey.end());
+ invalid_under_p2sh_tx.vin[0].scriptSig << vchSig2;
+
+ ValidateCheckInputsForAllFlags(invalid_under_p2sh_tx, SCRIPT_VERIFY_P2SH, true, false);
+ }
+
+ // Test CHECKLOCKTIMEVERIFY
+ {
+ CMutableTransaction invalid_with_cltv_tx;
+ invalid_with_cltv_tx.nVersion = 1;
+ invalid_with_cltv_tx.nLockTime = 100;
+ invalid_with_cltv_tx.vin.resize(1);
+ invalid_with_cltv_tx.vin[0].prevout.hash = spend_tx.GetHash();
+ invalid_with_cltv_tx.vin[0].prevout.n = 2;
+ invalid_with_cltv_tx.vin[0].nSequence = 0;
+ invalid_with_cltv_tx.vout.resize(1);
+ invalid_with_cltv_tx.vout[0].nValue = 11*CENT;
+ invalid_with_cltv_tx.vout[0].scriptPubKey = p2pk_scriptPubKey;
+
+ // Sign
+ std::vector<unsigned char> vchSig;
+ uint256 hash = SignatureHash(spend_tx.vout[2].scriptPubKey, invalid_with_cltv_tx, 0, SIGHASH_ALL, 0, SIGVERSION_BASE);
+ BOOST_CHECK(coinbaseKey.Sign(hash, vchSig));
+ vchSig.push_back((unsigned char)SIGHASH_ALL);
+ invalid_with_cltv_tx.vin[0].scriptSig = CScript() << vchSig << 101;
+
+ ValidateCheckInputsForAllFlags(invalid_with_cltv_tx, SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY, true, true);
+
+ // Make it valid, and check again
+ invalid_with_cltv_tx.vin[0].scriptSig = CScript() << vchSig << 100;
+ CValidationState state;
+ PrecomputedTransactionData txdata(invalid_with_cltv_tx);
+ BOOST_CHECK(CheckInputs(invalid_with_cltv_tx, state, pcoinsTip, true, SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY, true, true, txdata, nullptr));
+ }
+
+ // TEST CHECKSEQUENCEVERIFY
+ {
+ CMutableTransaction invalid_with_csv_tx;
+ invalid_with_csv_tx.nVersion = 2;
+ invalid_with_csv_tx.vin.resize(1);
+ invalid_with_csv_tx.vin[0].prevout.hash = spend_tx.GetHash();
+ invalid_with_csv_tx.vin[0].prevout.n = 3;
+ invalid_with_csv_tx.vin[0].nSequence = 100;
+ invalid_with_csv_tx.vout.resize(1);
+ invalid_with_csv_tx.vout[0].nValue = 11*CENT;
+ invalid_with_csv_tx.vout[0].scriptPubKey = p2pk_scriptPubKey;
+
+ // Sign
+ std::vector<unsigned char> vchSig;
+ uint256 hash = SignatureHash(spend_tx.vout[3].scriptPubKey, invalid_with_csv_tx, 0, SIGHASH_ALL, 0, SIGVERSION_BASE);
+ BOOST_CHECK(coinbaseKey.Sign(hash, vchSig));
+ vchSig.push_back((unsigned char)SIGHASH_ALL);
+ invalid_with_csv_tx.vin[0].scriptSig = CScript() << vchSig << 101;
+
+ ValidateCheckInputsForAllFlags(invalid_with_csv_tx, SCRIPT_VERIFY_CHECKSEQUENCEVERIFY, true, true);
+
+ // Make it valid, and check again
+ invalid_with_csv_tx.vin[0].scriptSig = CScript() << vchSig << 100;
+ CValidationState state;
+ PrecomputedTransactionData txdata(invalid_with_csv_tx);
+ BOOST_CHECK(CheckInputs(invalid_with_csv_tx, state, pcoinsTip, true, SCRIPT_VERIFY_CHECKSEQUENCEVERIFY, true, true, txdata, nullptr));
+ }
+
+ // TODO: add tests for remaining script flags
+
+ // Test that passing CheckInputs with a valid witness doesn't imply success
+ // for the same tx with a different witness.
+ {
+ CMutableTransaction valid_with_witness_tx;
+ valid_with_witness_tx.nVersion = 1;
+ valid_with_witness_tx.vin.resize(1);
+ valid_with_witness_tx.vin[0].prevout.hash = spend_tx.GetHash();
+ valid_with_witness_tx.vin[0].prevout.n = 1;
+ valid_with_witness_tx.vout.resize(1);
+ valid_with_witness_tx.vout[0].nValue = 11*CENT;
+ valid_with_witness_tx.vout[0].scriptPubKey = p2pk_scriptPubKey;
+
+ // Sign
+ SignatureData sigdata;
+ ProduceSignature(MutableTransactionSignatureCreator(&keystore, &valid_with_witness_tx, 0, 11*CENT, SIGHASH_ALL), spend_tx.vout[1].scriptPubKey, sigdata);
+ UpdateTransaction(valid_with_witness_tx, 0, sigdata);
+
+ // This should be valid under all script flags.
+ ValidateCheckInputsForAllFlags(valid_with_witness_tx, 0, true, false);
+
+ // Remove the witness, and check that it is now invalid.
+ valid_with_witness_tx.vin[0].scriptWitness.SetNull();
+ ValidateCheckInputsForAllFlags(valid_with_witness_tx, SCRIPT_VERIFY_WITNESS, true, false);
+ }
+
+ {
+ // Test a transaction with multiple inputs.
+ CMutableTransaction tx;
+
+ tx.nVersion = 1;
+ tx.vin.resize(2);
+ tx.vin[0].prevout.hash = spend_tx.GetHash();
+ tx.vin[0].prevout.n = 0;
+ tx.vin[1].prevout.hash = spend_tx.GetHash();
+ tx.vin[1].prevout.n = 1;
+ tx.vout.resize(1);
+ tx.vout[0].nValue = 22*CENT;
+ tx.vout[0].scriptPubKey = p2pk_scriptPubKey;
+
+ // Sign
+ for (int i=0; i<2; ++i) {
+ SignatureData sigdata;
+ ProduceSignature(MutableTransactionSignatureCreator(&keystore, &tx, i, 11*CENT, SIGHASH_ALL), spend_tx.vout[i].scriptPubKey, sigdata);
+ UpdateTransaction(tx, i, sigdata);
+ }
+
+ // This should be valid under all script flags
+ ValidateCheckInputsForAllFlags(tx, 0, true, false);
+
+ // Check that if the second input is invalid, but the first input is
+ // valid, the transaction is not cached.
+ // Invalidate vin[1]
+ tx.vin[1].scriptWitness.SetNull();
+
+ CValidationState state;
+ PrecomputedTransactionData txdata(tx);
+ // This transaction is now invalid under segwit, because of the second input.
+ BOOST_CHECK(!CheckInputs(tx, state, pcoinsTip, true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true, true, txdata, nullptr));
+
+ std::vector<CScriptCheck> scriptchecks;
+ // Make sure this transaction was not cached (ie because the first
+ // input was valid)
+ BOOST_CHECK(CheckInputs(tx, state, pcoinsTip, true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true, true, txdata, &scriptchecks));
+ // Should get 2 script checks back -- caching is on a whole-transaction basis.
+ BOOST_CHECK_EQUAL(scriptchecks.size(), 2);
+ }
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/versionbits_tests.cpp b/src/test/versionbits_tests.cpp
index faa2383d14..722f6ae059 100644
--- a/src/test/versionbits_tests.cpp
+++ b/src/test/versionbits_tests.cpp
@@ -22,11 +22,11 @@ private:
mutable ThresholdConditionCache cache;
public:
- int64_t BeginTime(const Consensus::Params& params) const { return TestTime(10000); }
- int64_t EndTime(const Consensus::Params& params) const { return TestTime(20000); }
- int Period(const Consensus::Params& params) const { return 1000; }
- int Threshold(const Consensus::Params& params) const { return 900; }
- bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const { return (pindex->nVersion & 0x100); }
+ int64_t BeginTime(const Consensus::Params& params) const override { return TestTime(10000); }
+ int64_t EndTime(const Consensus::Params& params) const override { return TestTime(20000); }
+ int Period(const Consensus::Params& params) const override { return 1000; }
+ int Threshold(const Consensus::Params& params) const override { return 900; }
+ bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override { return (pindex->nVersion & 0x100); }
ThresholdState GetStateFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateFor(pindexPrev, paramsDummy, cache); }
int GetStateSinceHeightFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateSinceHeightFor(pindexPrev, paramsDummy, cache); }
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index f273428625..3665e7e770 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -375,8 +375,10 @@ static std::pair<bool,std::string> ReadBinaryFile(const fs::path &filename, size
while ((n=fread(buffer, 1, sizeof(buffer), f)) > 0) {
// Check for reading errors so we don't return any data if we couldn't
// read the entire file (or up to maxsize)
- if (ferror(f))
+ if (ferror(f)) {
+ fclose(f);
return std::make_pair(false,"");
+ }
retval.append(buffer, buffer+n);
if (retval.size() > maxsize)
break;
@@ -404,7 +406,7 @@ static bool WriteBinaryFile(const fs::path &filename, const std::string &data)
/****** Bitcoin specific TorController implementation ********/
/** Controller that connects to Tor control socket, authenticate, then create
- * and maintain a ephemeral hidden service.
+ * and maintain an ephemeral hidden service.
*/
class TorController
{
diff --git a/src/txdb.cpp b/src/txdb.cpp
index c8f5090293..002f6550bc 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -7,8 +7,12 @@
#include "chainparams.h"
#include "hash.h"
+#include "random.h"
#include "pow.h"
#include "uint256.h"
+#include "util.h"
+#include "ui_interface.h"
+#include "init.h"
#include <stdint.h>
@@ -21,6 +25,7 @@ static const char DB_TXINDEX = 't';
static const char DB_BLOCK_INDEX = 'b';
static const char DB_BEST_BLOCK = 'B';
+static const char DB_HEAD_BLOCKS = 'H';
static const char DB_FLAG = 'F';
static const char DB_REINDEX_FLAG = 'R';
static const char DB_LAST_BLOCK = 'l';
@@ -68,10 +73,39 @@ uint256 CCoinsViewDB::GetBestBlock() const {
return hashBestChain;
}
+std::vector<uint256> CCoinsViewDB::GetHeadBlocks() const {
+ std::vector<uint256> vhashHeadBlocks;
+ if (!db.Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) {
+ return std::vector<uint256>();
+ }
+ return vhashHeadBlocks;
+}
+
bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
CDBBatch batch(db);
size_t count = 0;
size_t changed = 0;
+ size_t batch_size = (size_t)GetArg("-dbbatchsize", nDefaultDbBatchSize);
+ int crash_simulate = GetArg("-dbcrashratio", 0);
+ assert(!hashBlock.IsNull());
+
+ uint256 old_tip = GetBestBlock();
+ if (old_tip.IsNull()) {
+ // We may be in the middle of replaying.
+ std::vector<uint256> old_heads = GetHeadBlocks();
+ if (old_heads.size() == 2) {
+ assert(old_heads[0] == hashBlock);
+ old_tip = old_heads[1];
+ }
+ }
+
+ // In the first batch, mark the database as being in the middle of a
+ // transition from old_tip to hashBlock.
+ // A vector is used for future extensibility, as we may want to support
+ // interrupting after partial writes from multiple independent reorgs.
+ batch.Erase(DB_BEST_BLOCK);
+ batch.Write(DB_HEAD_BLOCKS, std::vector<uint256>{hashBlock, old_tip});
+
for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) {
if (it->second.flags & CCoinsCacheEntry::DIRTY) {
CoinEntry entry(&it->first);
@@ -84,10 +118,25 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
count++;
CCoinsMap::iterator itOld = it++;
mapCoins.erase(itOld);
+ if (batch.SizeEstimate() > batch_size) {
+ LogPrint(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0));
+ db.WriteBatch(batch);
+ batch.Clear();
+ if (crash_simulate) {
+ static FastRandomContext rng;
+ if (rng.randrange(crash_simulate) == 0) {
+ LogPrintf("Simulating a crash. Goodbye.\n");
+ _Exit(0);
+ }
+ }
+ }
}
- if (!hashBlock.IsNull())
- batch.Write(DB_BEST_BLOCK, hashBlock);
+ // In the last batch, mark the database as consistent with hashBlock again.
+ batch.Erase(DB_HEAD_BLOCKS);
+ batch.Write(DB_BEST_BLOCK, hashBlock);
+
+ LogPrint(BCLog::COINDB, "Writing final batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0));
bool ret = db.WriteBatch(batch);
LogPrint(BCLog::COINDB, "Committed %u changed transaction outputs (out of %u) to coin database...\n", (unsigned int)changed, (unsigned int)count);
return ret;
@@ -210,7 +259,7 @@ bool CBlockTreeDB::ReadFlag(const std::string &name, bool &fValue) {
return true;
}
-bool CBlockTreeDB::LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)> insertBlockIndex)
+bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex)
{
std::unique_ptr<CDBIterator> pcursor(NewIterator());
@@ -238,12 +287,12 @@ bool CBlockTreeDB::LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)
pindexNew->nStatus = diskindex.nStatus;
pindexNew->nTx = diskindex.nTx;
- if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, Params().GetConsensus()))
- return error("LoadBlockIndex(): CheckProofOfWork failed: %s", pindexNew->ToString());
+ if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams))
+ return error("%s: CheckProofOfWork failed: %s", __func__, pindexNew->ToString());
pcursor->Next();
} else {
- return error("LoadBlockIndex() : failed to read value");
+ return error("%s: failed to read value", __func__);
}
} else {
break;
@@ -319,13 +368,30 @@ bool CCoinsViewDB::Upgrade() {
return true;
}
- LogPrintf("Upgrading database...\n");
+ int64_t count = 0;
+ LogPrintf("Upgrading utxo-set database...\n");
+ LogPrintf("[0%%]...");
size_t batch_size = 1 << 24;
CDBBatch batch(db);
+ uiInterface.SetProgressBreakAction(StartShutdown);
+ int reportDone = 0;
while (pcursor->Valid()) {
boost::this_thread::interruption_point();
+ if (ShutdownRequested()) {
+ break;
+ }
std::pair<unsigned char, uint256> key;
if (pcursor->GetKey(key) && key.first == DB_COINS) {
+ if (count++ % 256 == 0) {
+ uint32_t high = 0x100 * *key.second.begin() + *(key.second.begin() + 1);
+ int percentageDone = (int)(high * 100.0 / 65536.0 + 0.5);
+ uiInterface.ShowProgress(_("Upgrading UTXO database") + "\n"+ _("(press q to shutdown and continue later)") + "\n", percentageDone);
+ if (reportDone < percentageDone/10) {
+ // report max. every 10% step
+ LogPrintf("[%d%%]...", percentageDone);
+ reportDone = percentageDone/10;
+ }
+ }
CCoins old_coins;
if (!pcursor->GetValue(old_coins)) {
return error("%s: cannot parse CCoins record", __func__);
@@ -350,5 +416,7 @@ bool CCoinsViewDB::Upgrade() {
}
}
db.WriteBatch(batch);
+ uiInterface.SetProgressBreakAction(std::function<void(void)>());
+ LogPrintf("[%s].\n", ShutdownRequested() ? "CANCELLED" : "DONE");
return true;
}
diff --git a/src/txdb.h b/src/txdb.h
index 974dd4ebe3..adcbc73380 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -19,12 +19,12 @@ class CBlockIndex;
class CCoinsViewDBCursor;
class uint256;
-//! Compensate for extra memory peak (x1.5-x1.9) at flush time.
-static constexpr int DB_PEAK_USAGE_FACTOR = 2;
//! No need to periodic flush if at least this much space still available.
-static constexpr int MAX_BLOCK_COINSDB_USAGE = 10 * DB_PEAK_USAGE_FACTOR;
+static constexpr int MAX_BLOCK_COINSDB_USAGE = 10;
//! -dbcache default (MiB)
static const int64_t nDefaultDbCache = 450;
+//! -dbbatchsize default (bytes)
+static const int64_t nDefaultDbBatchSize = 16 << 20;
//! max. -dbcache (MiB)
static const int64_t nMaxDbCache = sizeof(void*) > 4 ? 16384 : 1024;
//! min. -dbcache (MiB)
@@ -74,6 +74,7 @@ public:
bool GetCoin(const COutPoint &outpoint, Coin &coin) const override;
bool HaveCoin(const COutPoint &outpoint) const override;
uint256 GetBestBlock() const override;
+ std::vector<uint256> GetHeadBlocks() const override;
bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override;
CCoinsViewCursor *Cursor() const override;
@@ -88,12 +89,12 @@ class CCoinsViewDBCursor: public CCoinsViewCursor
public:
~CCoinsViewDBCursor() {}
- bool GetKey(COutPoint &key) const;
- bool GetValue(Coin &coin) const;
- unsigned int GetValueSize() const;
+ bool GetKey(COutPoint &key) const override;
+ bool GetValue(Coin &coin) const override;
+ unsigned int GetValueSize() const override;
- bool Valid() const;
- void Next();
+ bool Valid() const override;
+ void Next() override;
private:
CCoinsViewDBCursor(CDBIterator* pcursorIn, const uint256 &hashBlockIn):
@@ -122,7 +123,7 @@ public:
bool WriteTxIndex(const std::vector<std::pair<uint256, CDiskTxPos> > &list);
bool WriteFlag(const std::string &name, bool fValue);
bool ReadFlag(const std::string &name, bool &fValue);
- bool LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)> insertBlockIndex);
+ bool LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex);
};
#endif // BITCOIN_TXDB_H
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index 8ba48f3bb5..4a81055231 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -123,7 +123,7 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes
// accounted for in the state of their ancestors)
std::set<uint256> setAlreadyIncluded(vHashesToUpdate.begin(), vHashesToUpdate.end());
- // Iterate in reverse, so that whenever we are looking at at a transaction
+ // Iterate in reverse, so that whenever we are looking at a transaction
// we are sure that all in-mempool descendants have already been processed.
// This maximizes the benefit of the descendant cache and guarantees that
// setMemPoolChildren will be updated, an assumption made in
@@ -770,7 +770,7 @@ public:
return counta < countb;
}
};
-}
+} // namespace
std::vector<CTxMemPool::indexed_transaction_set::const_iterator> CTxMemPool::GetSortedDepthAndScore() const
{
@@ -904,11 +904,7 @@ bool CCoinsViewMemPool::GetCoin(const COutPoint &outpoint, Coin &coin) const {
return false;
}
}
- return (base->GetCoin(outpoint, coin) && !coin.IsSpent());
-}
-
-bool CCoinsViewMemPool::HaveCoin(const COutPoint &outpoint) const {
- return mempool.exists(outpoint) || base->HaveCoin(outpoint);
+ return base->GetCoin(outpoint, coin);
}
size_t CTxMemPool::DynamicMemoryUsage() const {
@@ -1051,9 +1047,7 @@ void CTxMemPool::TrimToSize(size_t sizelimit, std::vector<COutPoint>* pvNoSpends
for (const CTransaction& tx : txn) {
for (const CTxIn& txin : tx.vin) {
if (exists(txin.prevout.hash)) continue;
- if (!mapNextTx.count(txin.prevout)) {
- pvNoSpendsRemaining->push_back(txin.prevout);
- }
+ pvNoSpendsRemaining->push_back(txin.prevout);
}
}
}
diff --git a/src/txmempool.h b/src/txmempool.h
index 7ca3b18a1e..d272114a7c 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -618,13 +618,6 @@ public:
return (mapTx.count(hash) != 0);
}
- bool exists(const COutPoint& outpoint) const
- {
- LOCK(cs);
- auto it = mapTx.find(outpoint.hash);
- return (it != mapTx.end() && outpoint.n < it->GetTx().vout.size());
- }
-
CTransactionRef get(const uint256& hash) const;
TxMempoolInfo info(const uint256& hash) const;
std::vector<TxMempoolInfo> infoAll() const;
@@ -676,6 +669,13 @@ private:
/**
* CCoinsView that brings transactions from a memorypool into view.
* It does not check for spendings by memory pool transactions.
+ * Instead, it provides access to all Coins which are either unspent in the
+ * base CCoinsView, or are outputs from any mempool transaction!
+ * This allows transaction replacement to work as expected, as you want to
+ * have all inputs "available" to check signatures, and any cycles in the
+ * dependency graph are checked directly in AcceptToMemoryPool.
+ * It also allows you to sign a double-spend directly in signrawtransaction,
+ * as long as the conflicting transaction is not yet confirmed.
*/
class CCoinsViewMemPool : public CCoinsViewBacked
{
@@ -684,8 +684,7 @@ protected:
public:
CCoinsViewMemPool(CCoinsView* baseIn, const CTxMemPool& mempoolIn);
- bool GetCoin(const COutPoint &outpoint, Coin &coin) const;
- bool HaveCoin(const COutPoint &outpoint) const;
+ bool GetCoin(const COutPoint &outpoint, Coin &coin) const override;
};
/**
diff --git a/src/ui_interface.h b/src/ui_interface.h
index 090402aeed..762dd19b19 100644
--- a/src/ui_interface.h
+++ b/src/ui_interface.h
@@ -97,6 +97,9 @@ public:
/** Show progress e.g. for verifychain */
boost::signals2::signal<void (const std::string &title, int nProgress)> ShowProgress;
+ /** Set progress break action (possible "cancel button" triggers that action) */
+ boost::signals2::signal<void (std::function<void(void)> action)> SetProgressBreakAction;
+
/** New block has been accepted */
boost::signals2::signal<void (bool, const CBlockIndex *)> NotifyBlockTip;
diff --git a/src/util.cpp b/src/util.cpp
index 0a14e8bb9e..b76c173f90 100644
--- a/src/util.cpp
+++ b/src/util.cpp
@@ -84,6 +84,8 @@
#include <openssl/rand.h>
#include <openssl/conf.h>
+// Application startup time (used for uptime calculation)
+const int64_t nStartupTime = GetTime();
const char * const BITCOIN_CONF_FILENAME = "bitcoin.conf";
const char * const BITCOIN_PID_FILENAME = "bitcoind.pid";
@@ -420,7 +422,9 @@ void ArgsManager::ParseParameters(int argc, const char* const argv[])
std::vector<std::string> ArgsManager::GetArgs(const std::string& strArg)
{
LOCK(cs_args);
- return mapMultiArgs.at(strArg);
+ if (IsArgSet(strArg))
+ return mapMultiArgs.at(strArg);
+ return {};
}
bool ArgsManager::IsArgSet(const std::string& strArg)
@@ -651,21 +655,21 @@ bool RenameOver(fs::path src, fs::path dest)
}
/**
- * Ignores exceptions thrown by Boost's create_directory if the requested directory exists.
+ * Ignores exceptions thrown by Boost's create_directories if the requested directory exists.
* Specifically handles case where path p exists, but it wasn't possible for the user to
* write to the parent directory.
*/
-bool TryCreateDirectory(const fs::path& p)
+bool TryCreateDirectories(const fs::path& p)
{
try
{
- return fs::create_directory(p);
+ return fs::create_directories(p);
} catch (const fs::filesystem_error&) {
if (!fs::exists(p) || !fs::is_directory(p))
throw;
}
- // create_directory didn't create the directory, it had to have existed already
+ // create_directories didn't create the directory, it had to have existed already
return false;
}
@@ -889,3 +893,9 @@ std::string CopyrightHolders(const std::string& strPrefix)
}
return strCopyrightHolders;
}
+
+// Obtain the application startup time (used for uptime calculation)
+int64_t GetStartupTime()
+{
+ return nStartupTime;
+}
diff --git a/src/util.h b/src/util.h
index 8f8b249749..824ad51ac4 100644
--- a/src/util.h
+++ b/src/util.h
@@ -5,7 +5,7 @@
/**
* Server/client environment: argument handling, config file parsing,
- * logging, thread wrappers
+ * logging, thread wrappers, startup time
*/
#ifndef BITCOIN_UTIL_H
#define BITCOIN_UTIL_H
@@ -29,6 +29,9 @@
#include <boost/signals2/signal.hpp>
+// Application startup time (used for uptime calculation)
+int64_t GetStartupTime();
+
static const bool DEFAULT_LOGTIMEMICROS = false;
static const bool DEFAULT_LOGIPS = false;
static const bool DEFAULT_LOGTIMESTAMPS = true;
@@ -122,6 +125,17 @@ int LogPrintStr(const std::string &str);
/** Get format string from VA_ARGS for error reporting */
template<typename... Args> std::string FormatStringFromLogArgs(const char *fmt, const Args&... args) { return fmt; }
+static inline void MarkUsed() {}
+template<typename T, typename... Args> static inline void MarkUsed(const T& t, const Args&... args)
+{
+ (void)t;
+ MarkUsed(args...);
+}
+
+#ifdef USE_COVERAGE
+#define LogPrintf(...) do { MarkUsed(__VA_ARGS__); } while(0)
+#define LogPrint(category, ...) do { MarkUsed(__VA_ARGS__); } while(0)
+#else
#define LogPrintf(...) do { \
std::string _log_msg_; /* Unlikely name to avoid shadowing variables */ \
try { \
@@ -138,6 +152,7 @@ template<typename... Args> std::string FormatStringFromLogArgs(const char *fmt,
LogPrintf(__VA_ARGS__); \
} \
} while(0)
+#endif
template<typename... Args>
bool error(const char* fmt, const Args&... args)
@@ -152,7 +167,7 @@ bool TruncateFile(FILE *file, unsigned int length);
int RaiseFileDescriptorLimit(int nMinFD);
void AllocateFileRange(FILE *file, unsigned int offset, unsigned int length);
bool RenameOver(fs::path src, fs::path dest);
-bool TryCreateDirectory(const fs::path& p);
+bool TryCreateDirectories(const fs::path& p);
fs::path GetDefaultDataDir();
const fs::path &GetDataDir(bool fNetSpecific = true);
void ClearDatadirCache();
@@ -187,62 +202,63 @@ public:
void ParseParameters(int argc, const char*const argv[]);
void ReadConfigFile(const std::string& confPath);
std::vector<std::string> GetArgs(const std::string& strArg);
-/**
- * Return true if the given argument has been manually set
- *
- * @param strArg Argument to get (e.g. "-foo")
- * @return true if the argument has been set
- */
-bool IsArgSet(const std::string& strArg);
-
-/**
- * Return string argument or default value
- *
- * @param strArg Argument to get (e.g. "-foo")
- * @param default (e.g. "1")
- * @return command-line argument or default value
- */
-std::string GetArg(const std::string& strArg, const std::string& strDefault);
-
-/**
- * Return integer argument or default value
- *
- * @param strArg Argument to get (e.g. "-foo")
- * @param default (e.g. 1)
- * @return command-line argument (0 if invalid number) or default value
- */
-int64_t GetArg(const std::string& strArg, int64_t nDefault);
-
-/**
- * Return boolean argument or default value
- *
- * @param strArg Argument to get (e.g. "-foo")
- * @param default (true or false)
- * @return command-line argument or default value
- */
-bool GetBoolArg(const std::string& strArg, bool fDefault);
-
-/**
- * Set an argument if it doesn't already have a value
- *
- * @param strArg Argument to set (e.g. "-foo")
- * @param strValue Value (e.g. "1")
- * @return true if argument gets set, false if it already had a value
- */
-bool SoftSetArg(const std::string& strArg, const std::string& strValue);
-
-/**
- * Set a boolean argument if it doesn't already have a value
- *
- * @param strArg Argument to set (e.g. "-foo")
- * @param fValue Value (e.g. false)
- * @return true if argument gets set, false if it already had a value
- */
-bool SoftSetBoolArg(const std::string& strArg, bool fValue);
-// Forces an arg setting. Called by SoftSetArg() if the arg hasn't already
-// been set. Also called directly in testing.
-void ForceSetArg(const std::string& strArg, const std::string& strValue);
+ /**
+ * Return true if the given argument has been manually set
+ *
+ * @param strArg Argument to get (e.g. "-foo")
+ * @return true if the argument has been set
+ */
+ bool IsArgSet(const std::string& strArg);
+
+ /**
+ * Return string argument or default value
+ *
+ * @param strArg Argument to get (e.g. "-foo")
+ * @param default (e.g. "1")
+ * @return command-line argument or default value
+ */
+ std::string GetArg(const std::string& strArg, const std::string& strDefault);
+
+ /**
+ * Return integer argument or default value
+ *
+ * @param strArg Argument to get (e.g. "-foo")
+ * @param default (e.g. 1)
+ * @return command-line argument (0 if invalid number) or default value
+ */
+ int64_t GetArg(const std::string& strArg, int64_t nDefault);
+
+ /**
+ * Return boolean argument or default value
+ *
+ * @param strArg Argument to get (e.g. "-foo")
+ * @param default (true or false)
+ * @return command-line argument or default value
+ */
+ bool GetBoolArg(const std::string& strArg, bool fDefault);
+
+ /**
+ * Set an argument if it doesn't already have a value
+ *
+ * @param strArg Argument to set (e.g. "-foo")
+ * @param strValue Value (e.g. "1")
+ * @return true if argument gets set, false if it already had a value
+ */
+ bool SoftSetArg(const std::string& strArg, const std::string& strValue);
+
+ /**
+ * Set a boolean argument if it doesn't already have a value
+ *
+ * @param strArg Argument to set (e.g. "-foo")
+ * @param fValue Value (e.g. false)
+ * @return true if argument gets set, false if it already had a value
+ */
+ bool SoftSetBoolArg(const std::string& strArg, bool fValue);
+
+ // Forces an arg setting. Called by SoftSetArg() if the arg hasn't already
+ // been set. Also called directly in testing.
+ void ForceSetArg(const std::string& strArg, const std::string& strValue);
};
extern ArgsManager gArgs;
diff --git a/src/utilstrencodings.cpp b/src/utilstrencodings.cpp
index 74bf66fbf6..93abaec04b 100644
--- a/src/utilstrencodings.cpp
+++ b/src/utilstrencodings.cpp
@@ -437,7 +437,7 @@ bool ParseInt32(const std::string& str, int32_t *out)
errno = 0; // strtol will not set errno if valid
long int n = strtol(str.c_str(), &endp, 10);
if(out) *out = (int32_t)n;
- // Note that strtol returns a *long int*, so even if strtol doesn't report a over/underflow
+ // Note that strtol returns a *long int*, so even if strtol doesn't report an over/underflow
// we still have to check that the returned value is within the range of an *int32_t*. On 64-bit
// platforms the size of these types may be different.
return endp && *endp == 0 && !errno &&
@@ -453,7 +453,7 @@ bool ParseInt64(const std::string& str, int64_t *out)
errno = 0; // strtoll will not set errno if valid
long long int n = strtoll(str.c_str(), &endp, 10);
if(out) *out = (int64_t)n;
- // Note that strtoll returns a *long long int*, so even if strtol doesn't report a over/underflow
+ // Note that strtoll returns a *long long int*, so even if strtol doesn't report an over/underflow
// we still have to check that the returned value is within the range of an *int64_t*.
return endp && *endp == 0 && !errno &&
n >= std::numeric_limits<int64_t>::min() &&
@@ -470,7 +470,7 @@ bool ParseUInt32(const std::string& str, uint32_t *out)
errno = 0; // strtoul will not set errno if valid
unsigned long int n = strtoul(str.c_str(), &endp, 10);
if(out) *out = (uint32_t)n;
- // Note that strtoul returns a *unsigned long int*, so even if it doesn't report a over/underflow
+ // Note that strtoul returns a *unsigned long int*, so even if it doesn't report an over/underflow
// we still have to check that the returned value is within the range of an *uint32_t*. On 64-bit
// platforms the size of these types may be different.
return endp && *endp == 0 && !errno &&
@@ -487,7 +487,7 @@ bool ParseUInt64(const std::string& str, uint64_t *out)
errno = 0; // strtoull will not set errno if valid
unsigned long long int n = strtoull(str.c_str(), &endp, 10);
if(out) *out = (uint64_t)n;
- // Note that strtoull returns a *unsigned long long int*, so even if it doesn't report a over/underflow
+ // Note that strtoull returns a *unsigned long long int*, so even if it doesn't report an over/underflow
// we still have to check that the returned value is within the range of an *uint64_t*.
return endp && *endp == 0 && !errno &&
n <= std::numeric_limits<uint64_t>::max();
diff --git a/src/validation.cpp b/src/validation.cpp
index 04d180a084..09288be1ca 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -14,6 +14,7 @@
#include "consensus/merkle.h"
#include "consensus/tx_verify.h"
#include "consensus/validation.h"
+#include "cuckoocache.h"
#include "fs.h"
#include "hash.h"
#include "init.h"
@@ -97,7 +98,7 @@ namespace {
struct CBlockIndexWorkComparator
{
- bool operator()(CBlockIndex *pa, CBlockIndex *pb) const {
+ bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const {
// First sort by most total work, ...
if (pa->nChainWork > pb->nChainWork) return false;
if (pa->nChainWork < pb->nChainWork) return true;
@@ -190,7 +191,7 @@ enum FlushStateMode {
static bool FlushStateToDisk(const CChainParams& chainParams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight=0);
static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight);
static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight);
-static bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = NULL);
+bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr);
static FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly = false);
bool CheckFinalTx(const CTransaction &tx, int flags)
@@ -313,6 +314,9 @@ bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints* lp, bool
return EvaluateSequenceLocks(index, lockPair);
}
+// Returns the script flags which should be checked for a given block
+static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams);
+
static void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age) {
int expired = pool.Expire(GetTime() - age);
if (expired != 0) {
@@ -396,6 +400,42 @@ void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, bool f
LimitMempoolSize(mempool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
}
+// Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
+// were somehow broken and returning the wrong scriptPubKeys
+static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &view, CTxMemPool& pool,
+ unsigned int flags, bool cacheSigStore, PrecomputedTransactionData& txdata) {
+ AssertLockHeld(cs_main);
+
+ // pool.cs should be locked already, but go ahead and re-take the lock here
+ // to enforce that mempool doesn't change between when we check the view
+ // and when we actually call through to CheckInputs
+ LOCK(pool.cs);
+
+ assert(!tx.IsCoinBase());
+ for (const CTxIn& txin : tx.vin) {
+ const Coin& coin = view.AccessCoin(txin.prevout);
+
+ // At this point we haven't actually checked if the coins are all
+ // available (or shouldn't assume we have, since CheckInputs does).
+ // So we just return failure if the inputs are not available here,
+ // and then only have to check equivalence for available inputs.
+ if (coin.IsSpent()) return false;
+
+ const CTransactionRef& txFrom = pool.get(txin.prevout.hash);
+ if (txFrom) {
+ assert(txFrom->GetHash() == txin.prevout.hash);
+ assert(txFrom->vout.size() > txin.prevout.n);
+ assert(txFrom->vout[txin.prevout.n] == coin.out);
+ } else {
+ const Coin& coinFromDisk = pcoinsTip->AccessCoin(txin.prevout);
+ assert(!coinFromDisk.IsSpent());
+ assert(coinFromDisk.out == coin.out);
+ }
+ }
+
+ return CheckInputs(tx, state, view, true, flags, cacheSigStore, true, txdata);
+}
+
static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool& pool, CValidationState& state, const CTransactionRef& ptx, bool fLimitFree,
bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
bool fOverrideMempoolLimit, const CAmount& nAbsurdFee, std::vector<COutPoint>& coins_to_uncache)
@@ -431,8 +471,9 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
return state.DoS(0, false, REJECT_NONSTANDARD, "non-final");
// is it already in the memory pool?
- if (pool.exists(hash))
- return state.Invalid(false, REJECT_ALREADY_KNOWN, "txn-already-in-mempool");
+ if (pool.exists(hash)) {
+ return state.Invalid(false, REJECT_DUPLICATE, "txn-already-in-mempool");
+ }
// Check for conflicts with in-memory transactions
std::set<uint256> setConflicts;
@@ -470,8 +511,9 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
}
}
}
- if (fReplacementOptOut)
- return state.Invalid(false, REJECT_CONFLICT, "txn-mempool-conflict");
+ if (fReplacementOptOut) {
+ return state.Invalid(false, REJECT_DUPLICATE, "txn-mempool-conflict");
+ }
setConflicts.insert(ptxConflicting->GetHash());
}
@@ -498,7 +540,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
if (!had_coin_in_cache) {
coins_to_uncache.push_back(outpoint);
}
- return state.Invalid(false, REJECT_ALREADY_KNOWN, "txn-already-known");
+ return state.Invalid(false, REJECT_DUPLICATE, "txn-already-known");
}
}
@@ -750,32 +792,51 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// Check against previous transactions
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
PrecomputedTransactionData txdata(tx);
- if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true, txdata)) {
+ if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true, false, txdata)) {
// SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
// need to turn both off, and compare against just turning off CLEANSTACK
// to see if the failure is specifically due to witness validation.
CValidationState stateDummy; // Want reported failures to be from first CheckInputs
- if (!tx.HasWitness() && CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, txdata) &&
- !CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, txdata)) {
+ if (!tx.HasWitness() && CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
+ !CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
// Only the witness is missing, so the transaction itself may be fine.
state.SetCorruptionPossible();
}
return false; // state filled in by CheckInputs
}
- // Check again against just the consensus-critical mandatory script
- // verification flags, in case of bugs in the standard flags that cause
+ // Check again against the current block tip's script verification
+ // flags to cache our script execution flags. This is, of course,
+ // useless if the next block has different script flags from the
+ // previous one, but because the cache tracks script flags for us it
+ // will auto-invalidate and we'll just have a few blocks of extra
+ // misses on soft-fork activation.
+ //
+ // This is also useful in case of bugs in the standard flags that cause
// transactions to pass as valid when they're actually invalid. For
// instance the STRICTENC flag was incorrectly allowing certain
// CHECKSIG NOT scripts to pass, even though they were invalid.
//
// There is a similar check in CreateNewBlock() to prevent creating
- // invalid blocks, however allowing such transactions into the mempool
- // can be exploited as a DoS attack.
- if (!CheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true, txdata))
+ // invalid blocks (using TestBlockValidity), however allowing such
+ // transactions into the mempool can be exploited as a DoS attack.
+ unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(chainActive.Tip(), Params().GetConsensus());
+ if (!CheckInputsFromMempoolAndCache(tx, state, view, pool, currentBlockScriptVerifyFlags, true, txdata))
{
- return error("%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s, %s",
- __func__, hash.ToString(), FormatStateMessage(state));
+ // If we're using promiscuousmempoolflags, we may hit this normally
+ // Check if current block has some flags that scriptVerifyFlags
+ // does not before printing an ominous warning
+ if (!(~scriptVerifyFlags & currentBlockScriptVerifyFlags)) {
+ return error("%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against latest-block but not STANDARD flags %s, %s",
+ __func__, hash.ToString(), FormatStateMessage(state));
+ } else {
+ if (!CheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true, false, txdata)) {
+ return error("%s: ConnectInputs failed against MANDATORY but not STANDARD flags due to promiscuous mempool %s, %s",
+ __func__, hash.ToString(), FormatStateMessage(state));
+ } else {
+ LogPrintf("Warning: -promiscuousmempool flags set to not include currently enforced soft forks, this may break mining or otherwise cause instability!\n");
+ }
+ }
}
// Remove conflicting transactions from the mempool
@@ -1124,7 +1185,8 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txund
txundo.vprevout.reserve(tx.vin.size());
for (const CTxIn &txin : tx.vin) {
txundo.vprevout.emplace_back();
- inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
+ bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
+ assert(is_spent);
}
}
// add outputs
@@ -1150,12 +1212,34 @@ int GetSpendHeight(const CCoinsViewCache& inputs)
return pindexPrev->nHeight + 1;
}
+
+static CuckooCache::cache<uint256, SignatureCacheHasher> scriptExecutionCache;
+static uint256 scriptExecutionCacheNonce(GetRandHash());
+
+void InitScriptExecutionCache() {
+ // nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero,
+ // setup_bytes creates the minimum possible cache (2 elements).
+ size_t nMaxCacheSize = std::min(std::max((int64_t)0, GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
+ size_t nElems = scriptExecutionCache.setup_bytes(nMaxCacheSize);
+ LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n",
+ (nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems);
+}
+
/**
* Check whether all inputs of this transaction are valid (no double spends, scripts & sigs, amounts)
- * This does not modify the UTXO set. If pvChecks is not NULL, script checks are pushed onto it
- * instead of being performed inline.
+ * This does not modify the UTXO set.
+ *
+ * If pvChecks is not NULL, script checks are pushed onto it instead of being performed inline. Any
+ * script checks which are not necessary (eg due to script execution cache hits) are, obviously,
+ * not pushed onto pvChecks/run.
+ *
+ * Setting cacheSigStore/cacheFullScriptStore to false will remove elements from the corresponding cache
+ * which are matched. This is useful for checking blocks where we will likely never need the cache
+ * entry again.
+ *
+ * Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp
*/
-static bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks)
+bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks)
{
if (!tx.IsCoinBase())
{
@@ -1175,6 +1259,21 @@ static bool CheckInputs(const CTransaction& tx, CValidationState &state, const C
// Of course, if an assumed valid block is invalid due to false scriptSigs
// this optimization would allow an invalid chain to be accepted.
if (fScriptChecks) {
+ // First check if script executions have been cached with the same
+ // flags. Note that this assumes that the inputs provided are
+ // correct (ie that the transaction hash which is in tx's prevouts
+ // properly commits to the scriptPubKey in the inputs view of that
+ // transaction).
+ uint256 hashCacheEntry;
+ // We only use the first 19 bytes of nonce to avoid a second SHA
+ // round - giving us 19 + 32 + 4 = 55 bytes (+ 8 + 1 = 64)
+ static_assert(55 - sizeof(flags) - 32 >= 128/8, "Want at least 128 bits of nonce for script execution cache");
+ CSHA256().Write(scriptExecutionCacheNonce.begin(), 55 - sizeof(flags) - 32).Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin());
+ AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks
+ if (scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) {
+ return true;
+ }
+
for (unsigned int i = 0; i < tx.vin.size(); i++) {
const COutPoint &prevout = tx.vin[i].prevout;
const Coin& coin = inputs.AccessCoin(prevout);
@@ -1189,7 +1288,7 @@ static bool CheckInputs(const CTransaction& tx, CValidationState &state, const C
const CAmount amount = coin.out.nValue;
// Verify signature
- CScriptCheck check(scriptPubKey, amount, tx, i, flags, cacheStore, &txdata);
+ CScriptCheck check(scriptPubKey, amount, tx, i, flags, cacheSigStore, &txdata);
if (pvChecks) {
pvChecks->push_back(CScriptCheck());
check.swap(pvChecks->back());
@@ -1202,12 +1301,12 @@ static bool CheckInputs(const CTransaction& tx, CValidationState &state, const C
// avoid splitting the network between upgraded and
// non-upgraded nodes.
CScriptCheck check2(scriptPubKey, amount, tx, i,
- flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheStore, &txdata);
+ flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
if (check2())
return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
}
// Failures of other flags indicate a transaction that is
- // invalid in new blocks, e.g. a invalid P2SH. We DoS ban
+ // invalid in new blocks, e.g. an invalid P2SH. We DoS ban
// such nodes as they are not following the protocol. That
// said during an upgrade careful thought should be taken
// as to the correct behavior - we may want to continue
@@ -1216,6 +1315,12 @@ static bool CheckInputs(const CTransaction& tx, CValidationState &state, const C
return state.DoS(100,false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
}
}
+
+ if (cacheFullScriptStore && !pvChecks) {
+ // We executed all of the provided scripts, and were told to
+ // cache the result. Do so now.
+ scriptExecutionCache.insert(hashCacheEntry);
+ }
}
}
@@ -1295,7 +1400,7 @@ bool AbortNode(CValidationState& state, const std::string& strMessage, const std
return state.Error(strMessage);
}
-} // anon namespace
+} // namespace
enum DisconnectResult
{
@@ -1329,17 +1434,19 @@ int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
return DISCONNECT_FAILED; // adding output for transaction without known metadata
}
}
- view.AddCoin(out, std::move(undo), undo.fCoinBase);
+ // The potential_overwrite parameter to AddCoin is only allowed to be false if we know for
+ // sure that the coin did not already exist in the cache. As we have queried for that above
+ // using HaveCoin, we don't need to guess. When fClean is false, a coin already existed and
+ // it is an overwrite.
+ view.AddCoin(out, std::move(undo), !fClean);
return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
}
/** Undo the effects of this block (with given index) on the UTXO set represented by coins.
- * When UNCLEAN or FAILED is returned, view is left in an indeterminate state. */
+ * When FAILED is returned, view is left in an indeterminate state. */
static DisconnectResult DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view)
{
- assert(pindex->GetBlockHash() == view.GetBestBlock());
-
bool fClean = true;
CBlockUndo blockUndo;
@@ -1362,6 +1469,7 @@ static DisconnectResult DisconnectBlock(const CBlock& block, const CBlockIndex*
for (int i = block.vtx.size() - 1; i >= 0; i--) {
const CTransaction &tx = *(block.vtx[i]);
uint256 hash = tx.GetHash();
+ bool is_coinbase = tx.IsCoinBase();
// Check that all outputs are available and match the outputs in the block itself
// exactly.
@@ -1369,8 +1477,8 @@ static DisconnectResult DisconnectBlock(const CBlock& block, const CBlockIndex*
if (!tx.vout[o].scriptPubKey.IsUnspendable()) {
COutPoint out(hash, o);
Coin coin;
- view.SpendCoin(out, &coin);
- if (tx.vout[o] != coin.out) {
+ bool is_spent = view.SpendCoin(out, &coin);
+ if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
fClean = false; // transaction output mismatch
}
}
@@ -1460,12 +1568,12 @@ private:
public:
WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
- int64_t BeginTime(const Consensus::Params& params) const { return 0; }
- int64_t EndTime(const Consensus::Params& params) const { return std::numeric_limits<int64_t>::max(); }
- int Period(const Consensus::Params& params) const { return params.nMinerConfirmationWindow; }
- int Threshold(const Consensus::Params& params) const { return params.nRuleChangeActivationThreshold; }
+ int64_t BeginTime(const Consensus::Params& params) const override { return 0; }
+ int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); }
+ int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
+ int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
- bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const
+ bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
{
return ((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) &&
((pindex->nVersion >> bit) & 1) != 0 &&
@@ -1476,6 +1584,41 @@ public:
// Protected by cs_main
static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS];
+static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) {
+ AssertLockHeld(cs_main);
+
+ // BIP16 didn't become active until Apr 1 2012
+ int64_t nBIP16SwitchTime = 1333238400;
+ bool fStrictPayToScriptHash = (pindex->GetBlockTime() >= nBIP16SwitchTime);
+
+ unsigned int flags = fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE;
+
+ // Start enforcing the DERSIG (BIP66) rule
+ if (pindex->nHeight >= consensusparams.BIP66Height) {
+ flags |= SCRIPT_VERIFY_DERSIG;
+ }
+
+ // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
+ if (pindex->nHeight >= consensusparams.BIP65Height) {
+ flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
+ }
+
+ // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic.
+ if (VersionBitsState(pindex->pprev, consensusparams, Consensus::DEPLOYMENT_CSV, versionbitscache) == THRESHOLD_ACTIVE) {
+ flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
+ }
+
+ // Start enforcing WITNESS rules using versionbits logic.
+ if (IsWitnessEnabled(pindex->pprev, consensusparams)) {
+ flags |= SCRIPT_VERIFY_WITNESS;
+ flags |= SCRIPT_VERIFY_NULLDUMMY;
+ }
+
+ return flags;
+}
+
+
+
static int64_t nTimeCheck = 0;
static int64_t nTimeForks = 0;
static int64_t nTimeVerify = 0;
@@ -1579,34 +1722,14 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
}
}
- // BIP16 didn't become active until Apr 1 2012
- int64_t nBIP16SwitchTime = 1333238400;
- bool fStrictPayToScriptHash = (pindex->GetBlockTime() >= nBIP16SwitchTime);
-
- unsigned int flags = fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE;
-
- // Start enforcing the DERSIG (BIP66) rule
- if (pindex->nHeight >= chainparams.GetConsensus().BIP66Height) {
- flags |= SCRIPT_VERIFY_DERSIG;
- }
-
- // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
- if (pindex->nHeight >= chainparams.GetConsensus().BIP65Height) {
- flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
- }
-
// Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic.
int nLockTimeFlags = 0;
if (VersionBitsState(pindex->pprev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_CSV, versionbitscache) == THRESHOLD_ACTIVE) {
- flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
}
- // Start enforcing WITNESS rules using versionbits logic.
- if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus())) {
- flags |= SCRIPT_VERIFY_WITNESS;
- flags |= SCRIPT_VERIFY_NULLDUMMY;
- }
+ // Get the script flags for this block
+ unsigned int flags = GetBlockScriptFlags(pindex, chainparams.GetConsensus());
int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2 - nTime1), nTimeForks * 0.000001);
@@ -1667,7 +1790,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
std::vector<CScriptCheck> vChecks;
bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
- if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : NULL))
+ if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : NULL))
return error("ConnectBlock(): CheckInputs on %s failed with %s",
tx.GetHash().ToString(), FormatStateMessage(state));
control.Add(vChecks);
@@ -1777,7 +1900,7 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &
nLastSetChain = nNow;
}
int64_t nMempoolSizeMax = GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
- int64_t cacheSize = pcoinsTip->DynamicMemoryUsage() * DB_PEAK_USAGE_FACTOR;
+ int64_t cacheSize = pcoinsTip->DynamicMemoryUsage();
int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0);
// The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024);
@@ -1944,6 +2067,7 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara
int64_t nStart = GetTimeMicros();
{
CCoinsViewCache view(pcoinsTip);
+ assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK)
return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
bool flushed = view.Flush();
@@ -3323,7 +3447,7 @@ CBlockIndex * InsertBlockIndex(uint256 hash)
bool static LoadBlockIndexDB(const CChainParams& chainparams)
{
- if (!pblocktree->LoadBlockIndexGuts(InsertBlockIndex))
+ if (!pblocktree->LoadBlockIndexGuts(chainparams.GetConsensus(), InsertBlockIndex))
return false;
boost::this_thread::interruption_point();
@@ -3415,20 +3539,25 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams)
pblocktree->ReadFlag("txindex", fTxIndex);
LogPrintf("%s: transaction index %s\n", __func__, fTxIndex ? "enabled" : "disabled");
+ return true;
+}
+
+void LoadChainTip(const CChainParams& chainparams)
+{
+ if (chainActive.Tip() && chainActive.Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) return;
+
// Load pointer to end of best chain
BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock());
if (it == mapBlockIndex.end())
- return true;
+ return;
chainActive.SetTip(it->second);
PruneBlockIndexCandidates();
- LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__,
+ LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()),
GuessVerificationProgress(chainparams.TxData(), chainActive.Tip()));
-
- return true;
}
CVerifyDB::CVerifyDB()
@@ -3497,6 +3626,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
}
// check level 3: check for inconsistencies during memory-only disconnect of tip blocks
if (nCheckLevel >= 3 && pindex == pindexState && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) {
+ assert(coins.GetBestBlock() == pindex->GetBlockHash());
DisconnectResult res = DisconnectBlock(block, pindex, coins);
if (res == DISCONNECT_FAILED) {
return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
@@ -3536,6 +3666,92 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
return true;
}
+/** Apply the effects of a block on the utxo cache, ignoring that it may already have been applied. */
+static bool RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs, const CChainParams& params)
+{
+ // TODO: merge with ConnectBlock
+ CBlock block;
+ if (!ReadBlockFromDisk(block, pindex, params.GetConsensus())) {
+ return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
+ }
+
+ for (const CTransactionRef& tx : block.vtx) {
+ if (!tx->IsCoinBase()) {
+ for (const CTxIn &txin : tx->vin) {
+ inputs.SpendCoin(txin.prevout);
+ }
+ }
+ // Pass check = true as every addition may be an overwrite.
+ AddCoins(inputs, *tx, pindex->nHeight, true);
+ }
+ return true;
+}
+
+bool ReplayBlocks(const CChainParams& params, CCoinsView* view)
+{
+ LOCK(cs_main);
+
+ CCoinsViewCache cache(view);
+
+ std::vector<uint256> hashHeads = view->GetHeadBlocks();
+ if (hashHeads.empty()) return true; // We're already in a consistent state.
+ if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
+
+ uiInterface.ShowProgress(_("Replaying blocks..."), 0);
+ LogPrintf("Replaying blocks\n");
+
+ const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush.
+ const CBlockIndex* pindexNew; // New tip during the interrupted flush.
+ const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
+
+ if (mapBlockIndex.count(hashHeads[0]) == 0) {
+ return error("ReplayBlocks(): reorganization to unknown block requested");
+ }
+ pindexNew = mapBlockIndex[hashHeads[0]];
+
+ if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
+ if (mapBlockIndex.count(hashHeads[1]) == 0) {
+ return error("ReplayBlocks(): reorganization from unknown block requested");
+ }
+ pindexOld = mapBlockIndex[hashHeads[1]];
+ pindexFork = LastCommonAncestor(pindexOld, pindexNew);
+ assert(pindexFork != nullptr);
+ }
+
+ // Rollback along the old branch.
+ while (pindexOld != pindexFork) {
+ if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
+ CBlock block;
+ if (!ReadBlockFromDisk(block, pindexOld, params.GetConsensus())) {
+ return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
+ }
+ LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
+ DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
+ if (res == DISCONNECT_FAILED) {
+ return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
+ }
+ // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
+ // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
+ // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
+ // the result is still a version of the UTXO set with the effects of that block undone.
+ }
+ pindexOld = pindexOld->pprev;
+ }
+
+ // Roll forward from the forking point to the new tip.
+ int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
+ for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
+ const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight);
+ LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight);
+ if (!RollforwardBlock(pindex, cache, params)) return false;
+ }
+
+ cache.SetBestBlock(pindexNew->GetBlockHash());
+ cache.Flush();
+ uiInterface.ShowProgress("", 100);
+ return true;
+}
+
bool RewindBlockIndex(const CChainParams& params)
{
LOCK(cs_main);
@@ -3685,8 +3901,6 @@ bool InitBlockIndex(const CChainParams& chainparams)
CBlockIndex *pindex = AddToBlockIndex(block);
if (!ReceivedBlockTransactions(block, state, pindex, blockPos, chainparams.GetConsensus()))
return error("LoadBlockIndex(): genesis block not accepted");
- // Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
- return FlushStateToDisk(chainparams, state, FLUSH_STATE_ALWAYS);
} catch (const std::runtime_error& e) {
return error("LoadBlockIndex(): failed to initialize block database: %s", e.what());
}
diff --git a/src/validation.h b/src/validation.h
index 15e19bc511..a9f995abb8 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -260,6 +260,8 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
bool InitBlockIndex(const CChainParams& chainparams);
/** Load the block tree and coins database from disk */
bool LoadBlockIndex(const CChainParams& chainparams);
+/** Update the chain tip based on database information. */
+void LoadChainTip(const CChainParams& chainparams);
/** Unload database information */
void UnloadBlockIndex();
/** Run an instance of the script checking thread */
@@ -391,6 +393,9 @@ public:
ScriptError GetScriptError() const { return error; }
};
+/** Initializes the script-execution cache */
+void InitScriptExecutionCache();
+
/** Functions for disk access for blocks */
bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus::Params& consensusParams);
@@ -424,6 +429,9 @@ public:
bool VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth);
};
+/** Replay blocks that aren't fully applied to the database. */
+bool ReplayBlocks(const CChainParams& params, CCoinsView* view);
+
/** Find the last common block between the parameter chain and a locator. */
CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator);
@@ -469,10 +477,6 @@ int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Para
static const unsigned int REJECT_INTERNAL = 0x100;
/** Too high fee. Can not be triggered by P2P transactions */
static const unsigned int REJECT_HIGHFEE = 0x100;
-/** Transaction is already known (either in mempool or blockchain) */
-static const unsigned int REJECT_ALREADY_KNOWN = 0x101;
-/** Transaction conflicts with a transaction already known */
-static const unsigned int REJECT_CONFLICT = 0x102;
/** Get block file info entry for one block file */
CBlockFileInfo* GetBlockFileInfo(size_t n);
diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp
index 46d7c9b329..be2f20b863 100644
--- a/src/validationinterface.cpp
+++ b/src/validationinterface.cpp
@@ -21,12 +21,10 @@ void RegisterValidationInterface(CValidationInterface* pwalletIn) {
g_signals.Inventory.connect(boost::bind(&CValidationInterface::Inventory, pwalletIn, _1));
g_signals.Broadcast.connect(boost::bind(&CValidationInterface::ResendWalletTransactions, pwalletIn, _1, _2));
g_signals.BlockChecked.connect(boost::bind(&CValidationInterface::BlockChecked, pwalletIn, _1, _2));
- g_signals.ScriptForMining.connect(boost::bind(&CValidationInterface::GetScriptForMining, pwalletIn, _1));
g_signals.NewPoWValidBlock.connect(boost::bind(&CValidationInterface::NewPoWValidBlock, pwalletIn, _1, _2));
}
void UnregisterValidationInterface(CValidationInterface* pwalletIn) {
- g_signals.ScriptForMining.disconnect(boost::bind(&CValidationInterface::GetScriptForMining, pwalletIn, _1));
g_signals.BlockChecked.disconnect(boost::bind(&CValidationInterface::BlockChecked, pwalletIn, _1, _2));
g_signals.Broadcast.disconnect(boost::bind(&CValidationInterface::ResendWalletTransactions, pwalletIn, _1, _2));
g_signals.Inventory.disconnect(boost::bind(&CValidationInterface::Inventory, pwalletIn, _1));
@@ -39,7 +37,6 @@ void UnregisterValidationInterface(CValidationInterface* pwalletIn) {
}
void UnregisterAllValidationInterfaces() {
- g_signals.ScriptForMining.disconnect_all_slots();
g_signals.BlockChecked.disconnect_all_slots();
g_signals.Broadcast.disconnect_all_slots();
g_signals.Inventory.disconnect_all_slots();
diff --git a/src/validationinterface.h b/src/validationinterface.h
index 460aecf243..17545018df 100644
--- a/src/validationinterface.h
+++ b/src/validationinterface.h
@@ -40,7 +40,6 @@ protected:
virtual void Inventory(const uint256 &hash) {}
virtual void ResendWalletTransactions(int64_t nBestBlockTime, CConnman* connman) {}
virtual void BlockChecked(const CBlock&, const CValidationState&) {}
- virtual void GetScriptForMining(std::shared_ptr<CReserveScript>&) {};
virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& block) {};
friend void ::RegisterValidationInterface(CValidationInterface*);
friend void ::UnregisterValidationInterface(CValidationInterface*);
@@ -72,8 +71,6 @@ struct CMainSignals {
* callback was generated (not necessarily now)
*/
boost::signals2::signal<void (const CBlock&, const CValidationState&)> BlockChecked;
- /** Notifies listeners that a key for mining is required (coinbase) */
- boost::signals2::signal<void (std::shared_ptr<CReserveScript>&)> ScriptForMining;
/**
* Notifies listeners that a block which builds directly on our current tip
* has been received and connected to the headers tree, though not validated yet */
diff --git a/src/versionbits.cpp b/src/versionbits.cpp
index 4bb352f23c..8047e17aa8 100644
--- a/src/versionbits.cpp
+++ b/src/versionbits.cpp
@@ -174,12 +174,12 @@ private:
const Consensus::DeploymentPos id;
protected:
- int64_t BeginTime(const Consensus::Params& params) const { return params.vDeployments[id].nStartTime; }
- int64_t EndTime(const Consensus::Params& params) const { return params.vDeployments[id].nTimeout; }
- int Period(const Consensus::Params& params) const { return params.nMinerConfirmationWindow; }
- int Threshold(const Consensus::Params& params) const { return params.nRuleChangeActivationThreshold; }
+ int64_t BeginTime(const Consensus::Params& params) const override { return params.vDeployments[id].nStartTime; }
+ int64_t EndTime(const Consensus::Params& params) const override { return params.vDeployments[id].nTimeout; }
+ int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
+ int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
- bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const
+ bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
{
return (((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) && (pindex->nVersion & Mask(params)) != 0);
}
@@ -189,7 +189,7 @@ public:
uint32_t Mask(const Consensus::Params& params) const { return ((uint32_t)1) << params.vDeployments[id].bit; }
};
-}
+} // namespace
ThresholdState VersionBitsState(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos, VersionBitsCache& cache)
{
diff --git a/src/wallet/crypter.cpp b/src/wallet/crypter.cpp
index 33be47273f..dcce88cedc 100644
--- a/src/wallet/crypter.cpp
+++ b/src/wallet/crypter.cpp
@@ -273,7 +273,6 @@ bool CCryptoKeyStore::GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) co
// Check for watch-only pubkeys
return CBasicKeyStore::GetPubKey(address, vchPubKeyOut);
}
- return false;
}
bool CCryptoKeyStore::EncryptKeys(CKeyingMaterial& vMasterKeyIn)
diff --git a/src/wallet/crypter.h b/src/wallet/crypter.h
index f1c4f57428..1dc44e424f 100644
--- a/src/wallet/crypter.h
+++ b/src/wallet/crypter.h
@@ -157,8 +157,8 @@ public:
bool Lock();
virtual bool AddCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret);
- bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey);
- bool HaveKey(const CKeyID &address) const
+ bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override;
+ bool HaveKey(const CKeyID &address) const override
{
{
LOCK(cs_KeyStore);
@@ -168,9 +168,9 @@ public:
}
return false;
}
- bool GetKey(const CKeyID &address, CKey& keyOut) const;
- bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const;
- void GetKeys(std::set<CKeyID> &setAddress) const
+ bool GetKey(const CKeyID &address, CKey& keyOut) const override;
+ bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const override;
+ void GetKeys(std::set<CKeyID> &setAddress) const override
{
if (!IsCrypted())
{
diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp
index 3f495f23b9..da2d180756 100644
--- a/src/wallet/db.cpp
+++ b/src/wallet/db.cpp
@@ -73,7 +73,7 @@ bool CDBEnv::Open(const fs::path& pathIn)
strPath = pathIn.string();
fs::path pathLogDir = pathIn / "database";
- TryCreateDirectory(pathLogDir);
+ TryCreateDirectories(pathLogDir);
fs::path pathErrorFile = pathIn / "db.log";
LogPrintf("CDBEnv::Open: LogDir=%s ErrorFile=%s\n", pathLogDir.string(), pathErrorFile.string());
diff --git a/src/wallet/feebumper.cpp b/src/wallet/feebumper.cpp
index 46ef87b7b1..6a9e6cf9ff 100644
--- a/src/wallet/feebumper.cpp
+++ b/src/wallet/feebumper.cpp
@@ -165,7 +165,7 @@ CFeeBumper::CFeeBumper(const CWallet *pWallet, const uint256 txidIn, int newConf
nNewFee = totalFee;
nNewFeeRate = CFeeRate(totalFee, maxNewTxSize);
} else {
- nNewFee = CWallet::GetMinimumFee(maxNewTxSize, newConfirmTarget, mempool, ::feeEstimator, ignoreGlobalPayTxFee);
+ nNewFee = CWallet::GetMinimumFee(maxNewTxSize, newConfirmTarget, mempool, ::feeEstimator, nullptr, ignoreGlobalPayTxFee);
nNewFeeRate = CFeeRate(nNewFee, maxNewTxSize);
// New fee rate must be at least old rate + minimum incremental relay rate
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index 7627d625d0..9f42b1f266 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -147,7 +147,7 @@ UniValue importprivkey(const JSONRPCRequest& request)
pwallet->UpdateTimeFirstKey(1);
if (fRescan) {
- pwallet->ScanForWalletTransactions(chainActive.Genesis(), true);
+ pwallet->RescanFromTime(TIMESTAMP_MIN, true /* update */);
}
}
@@ -277,7 +277,7 @@ UniValue importaddress(const JSONRPCRequest& request)
if (fRescan)
{
- pwallet->ScanForWalletTransactions(chainActive.Genesis(), true);
+ pwallet->RescanFromTime(TIMESTAMP_MIN, true /* update */);
pwallet->ReacceptWalletTransactions();
}
@@ -361,7 +361,7 @@ UniValue removeprunedfunds(const JSONRPCRequest& request)
"\nExamples:\n"
+ HelpExampleCli("removeprunedfunds", "\"a8d0c0184dde994a09ec054286f1ce581bebf46446a512166eae7628734ea0a5\"") +
"\nAs a JSON-RPC call\n"
- + HelpExampleRpc("removprunedfunds", "\"a8d0c0184dde994a09ec054286f1ce581bebf46446a512166eae7628734ea0a5\"")
+ + HelpExampleRpc("removeprunedfunds", "\"a8d0c0184dde994a09ec054286f1ce581bebf46446a512166eae7628734ea0a5\"")
);
LOCK2(cs_main, pwallet->cs_wallet);
@@ -435,7 +435,7 @@ UniValue importpubkey(const JSONRPCRequest& request)
if (fRescan)
{
- pwallet->ScanForWalletTransactions(chainActive.Genesis(), true);
+ pwallet->RescanFromTime(TIMESTAMP_MIN, true /* update */);
pwallet->ReacceptWalletTransactions();
}
@@ -535,11 +535,7 @@ UniValue importwallet(const JSONRPCRequest& request)
file.close();
pwallet->ShowProgress("", 100); // hide progress dialog in GUI
pwallet->UpdateTimeFirstKey(nTimeBegin);
-
- CBlockIndex *pindex = chainActive.FindEarliestAtLeast(nTimeBegin - TIMESTAMP_WINDOW);
-
- LogPrintf("Rescanning last %i blocks\n", pindex ? chainActive.Height() - pindex->nHeight + 1 : 0);
- pwallet->ScanForWalletTransactions(pindex);
+ pwallet->RescanFromTime(nTimeBegin, false /* update */);
pwallet->MarkDirty();
if (!fGood)
@@ -1048,7 +1044,7 @@ UniValue importmulti(const JSONRPCRequest& mainRequest)
" \"redeemscript\": \"<script>\" , (string, optional) Allowed only if the scriptPubKey is a P2SH address or a P2SH scriptPubKey\n"
" \"pubkeys\": [\"<pubKey>\", ... ] , (array, optional) Array of strings giving pubkeys that must occur in the output or redeemscript\n"
" \"keys\": [\"<key>\", ... ] , (array, optional) Array of strings giving private keys whose corresponding public keys must occur in the output or redeemscript\n"
- " \"internal\": <true> , (boolean, optional, default: false) Stating whether matching outputs should be be treated as not incoming payments\n"
+ " \"internal\": <true> , (boolean, optional, default: false) Stating whether matching outputs should be treated as not incoming payments\n"
" \"watchonly\": <true> , (boolean, optional, default: false) Stating whether matching outputs should be considered watched even when they're not spendable, only allowed if keys are empty\n"
" \"label\": <label> , (string, optional, default: '') Label to assign to the address (aka account name, for now), only allowed with internal=false\n"
" }\n"
@@ -1125,14 +1121,10 @@ UniValue importmulti(const JSONRPCRequest& mainRequest)
}
if (fRescan && fRunScan && requests.size()) {
- CBlockIndex* pindex = nLowestTimestamp > minimumTimestamp ? chainActive.FindEarliestAtLeast(std::max<int64_t>(nLowestTimestamp - TIMESTAMP_WINDOW, 0)) : chainActive.Genesis();
- CBlockIndex* scanFailed = nullptr;
- if (pindex) {
- scanFailed = pwallet->ScanForWalletTransactions(pindex, true);
- pwallet->ReacceptWalletTransactions();
- }
+ int64_t scannedTime = pwallet->RescanFromTime(nLowestTimestamp, true /* update */);
+ pwallet->ReacceptWalletTransactions();
- if (scanFailed) {
+ if (scannedTime > nLowestTimestamp) {
std::vector<UniValue> results = response.getValues();
response.clear();
response.setArray();
@@ -1142,7 +1134,7 @@ UniValue importmulti(const JSONRPCRequest& mainRequest)
// range, or if the import result already has an error set, let
// the result stand unmodified. Otherwise replace the result
// with an error message.
- if (GetImportTimestamp(request, now) - TIMESTAMP_WINDOW > scanFailed->GetBlockTimeMax() || results.at(i).exists("error")) {
+ if (scannedTime <= GetImportTimestamp(request, now) || results.at(i).exists("error")) {
response.push_back(results.at(i));
} else {
UniValue result = UniValue(UniValue::VOBJ);
@@ -1158,7 +1150,7 @@ UniValue importmulti(const JSONRPCRequest& mainRequest)
"caused by pruning or data corruption (see bitcoind log for details) and could "
"be dealt with by downloading and rescanning the relevant blocks (see -reindex "
"and -rescan options).",
- GetImportTimestamp(request, now), scanFailed->GetBlockTimeMax(), TIMESTAMP_WINDOW)));
+ GetImportTimestamp(request, now), scannedTime - TIMESTAMP_WINDOW - 1, TIMESTAMP_WINDOW)));
response.push_back(std::move(result));
}
++i;
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 2e4105a569..e0c7ab9f0f 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -15,6 +15,7 @@
#include "policy/fees.h"
#include "policy/policy.h"
#include "policy/rbf.h"
+#include "rpc/mining.h"
#include "rpc/server.h"
#include "script/sign.h"
#include "timedata.h"
@@ -1064,7 +1065,6 @@ public:
bool operator()(const CNoDestination &dest) const { return false; }
bool operator()(const CKeyID &keyID) {
- CPubKey pubkey;
if (pwallet) {
CScript basescript = GetScriptForDestination(keyID);
isminetype typ;
@@ -2923,6 +2923,51 @@ UniValue bumpfee(const JSONRPCRequest& request)
return result;
}
+UniValue generate(const JSONRPCRequest& request)
+{
+ CWallet * const pwallet = GetWalletForJSONRPCRequest(request);
+
+ if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
+ return NullUniValue;
+ }
+
+ if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) {
+ throw std::runtime_error(
+ "generate nblocks ( maxtries )\n"
+ "\nMine up to nblocks blocks immediately (before the RPC call returns) to an address in the wallet.\n"
+ "\nArguments:\n"
+ "1. nblocks (numeric, required) How many blocks are generated immediately.\n"
+ "2. maxtries (numeric, optional) How many iterations to try (default = 1000000).\n"
+ "\nResult:\n"
+ "[ blockhashes ] (array) hashes of blocks generated\n"
+ "\nExamples:\n"
+ "\nGenerate 11 blocks\n"
+ + HelpExampleCli("generate", "11")
+ );
+ }
+
+ int num_generate = request.params[0].get_int();
+ uint64_t max_tries = 1000000;
+ if (request.params.size() > 1 && !request.params[1].isNull()) {
+ max_tries = request.params[1].get_int();
+ }
+
+ std::shared_ptr<CReserveScript> coinbase_script;
+ pwallet->GetScriptForMining(coinbase_script);
+
+ // If the keypool is exhausted, no script is returned at all. Catch this.
+ if (!coinbase_script) {
+ throw JSONRPCError(RPC_WALLET_KEYPOOL_RAN_OUT, "Error: Keypool ran out, please call keypoolrefill first");
+ }
+
+ //throw an error if no script was provided
+ if (coinbase_script->reserveScript.empty()) {
+ throw JSONRPCError(RPC_INTERNAL_ERROR, "No coinbase script available");
+ }
+
+ return generateBlocks(coinbase_script, num_generate, max_tries, true);
+}
+
extern UniValue abortrescan(const JSONRPCRequest& request); // in rpcdump.cpp
extern UniValue dumpprivkey(const JSONRPCRequest& request); // in rpcdump.cpp
extern UniValue importprivkey(const JSONRPCRequest& request);
@@ -2986,6 +3031,8 @@ static const CRPCCommand commands[] =
{ "wallet", "walletpassphrasechange", &walletpassphrasechange, true, {"oldpassphrase","newpassphrase"} },
{ "wallet", "walletpassphrase", &walletpassphrase, true, {"passphrase","timeout"} },
{ "wallet", "removeprunedfunds", &removeprunedfunds, true, {"txid"} },
+
+ { "generating", "generate", &generate, true, {"nblocks","maxtries"} },
};
void RegisterWalletRPCCommands(CRPCTable &t)
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 02de3cceed..0d1a86dd24 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -205,7 +205,6 @@ bool CWallet::AddCryptedKey(const CPubKey &vchPubKey,
vchCryptedSecret,
mapKeyMetadata[vchPubKey.GetID()]);
}
- return false;
}
bool CWallet::LoadKeyMetadata(const CTxDestination& keyID, const CKeyMetadata &meta)
@@ -221,6 +220,10 @@ bool CWallet::LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigne
return CCryptoKeyStore::AddCryptedKey(vchPubKey, vchCryptedSecret);
}
+/**
+ * Update wallet first key creation time. This should be called whenever keys
+ * are added to the wallet, with the oldest key creation time.
+ */
void CWallet::UpdateTimeFirstKey(int64_t nCreateTime)
{
AssertLockHeld(cs_wallet);
@@ -1469,6 +1472,34 @@ void CWalletTx::GetAmounts(std::list<COutputEntry>& listReceived,
}
/**
+ * Scan active chain for relevant transactions after importing keys. This should
+ * be called whenever new keys are added to the wallet, with the oldest key
+ * creation time.
+ *
+ * @return Earliest timestamp that could be successfully scanned from. Timestamp
+ * returned will be higher than startTime if relevant blocks could not be read.
+ */
+int64_t CWallet::RescanFromTime(int64_t startTime, bool update)
+{
+ AssertLockHeld(cs_main);
+ AssertLockHeld(cs_wallet);
+
+ // Find starting block. May be null if nCreateTime is greater than the
+ // highest blockchain timestamp, in which case there is nothing that needs
+ // to be scanned.
+ CBlockIndex* const startBlock = chainActive.FindEarliestAtLeast(startTime - TIMESTAMP_WINDOW);
+ LogPrintf("%s: Rescanning last %i blocks\n", __func__, startBlock ? chainActive.Height() - startBlock->nHeight + 1 : 0);
+
+ if (startBlock) {
+ const CBlockIndex* const failedBlock = ScanForWalletTransactions(startBlock, update);
+ if (failedBlock) {
+ return failedBlock->GetBlockTimeMax() + TIMESTAMP_WINDOW + 1;
+ }
+ }
+ return startTime;
+}
+
+/**
* Scan the block chain (starting in pindexStart) for transactions
* from or to us. If fUpdate is true, found transactions that already
* exist in the wallet will be updated.
@@ -1489,11 +1520,6 @@ CBlockIndex* CWallet::ScanForWalletTransactions(CBlockIndex* pindexStart, bool f
fAbortRescan = false;
fScanningWallet = true;
- // no need to read and scan block, if block was created before
- // our wallet birthday (as adjusted for block time variability)
- while (pindex && nTimeFirstKey && (pindex->GetBlockTime() < (nTimeFirstKey - TIMESTAMP_WINDOW)))
- pindex = chainActive.Next(pindex);
-
ShowProgress(_("Rescanning..."), 0); // show rescan progress in GUI as dialog or on splashscreen, if -rescan on startup
double dProgressStart = GuessVerificationProgress(chainParams.TxData(), pindex);
double dProgressTip = GuessVerificationProgress(chainParams.TxData(), chainActive.Tip());
@@ -2534,7 +2560,8 @@ bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CWalletT
assert(txNew.nLockTime <= (unsigned int)chainActive.Height());
assert(txNew.nLockTime < LOCKTIME_THRESHOLD);
-
+ FeeCalculation feeCalc;
+ unsigned int nBytes;
{
std::set<CInputCoin> setCoins;
LOCK2(cs_main, cs_wallet);
@@ -2633,28 +2660,6 @@ bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CWalletT
CTxOut newTxOut(nChange, scriptChange);
- // We do not move dust-change to fees, because the sender would end up paying more than requested.
- // This would be against the purpose of the all-inclusive feature.
- // So instead we raise the change and deduct from the recipient.
- if (nSubtractFeeFromAmount > 0 && IsDust(newTxOut, ::dustRelayFee))
- {
- CAmount nDust = GetDustThreshold(newTxOut, ::dustRelayFee) - newTxOut.nValue;
- newTxOut.nValue += nDust; // raise change until no more dust
- for (unsigned int i = 0; i < vecSend.size(); i++) // subtract from first recipient
- {
- if (vecSend[i].fSubtractFeeFromAmount)
- {
- txNew.vout[i].nValue -= nDust;
- if (IsDust(txNew.vout[i], ::dustRelayFee))
- {
- strFailReason = _("The transaction amount is too small to send after the fee has been deducted");
- return false;
- }
- break;
- }
- }
- }
-
// Never create dust outputs; if we would, just
// add the dust to the fee.
if (IsDust(newTxOut, ::dustRelayFee))
@@ -2706,9 +2711,7 @@ bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CWalletT
return false;
}
- unsigned int nBytes = GetVirtualTransactionSize(txNew);
-
- CTransaction txNewConst(txNew);
+ nBytes = GetVirtualTransactionSize(txNew);
// Remove scriptSigs to eliminate the fee calculation dummy signatures
for (auto& vin : txNew.vin) {
@@ -2721,7 +2724,7 @@ bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CWalletT
if (coinControl && coinControl->nConfirmTarget > 0)
currentConfirmationTarget = coinControl->nConfirmTarget;
- CAmount nFeeNeeded = GetMinimumFee(nBytes, currentConfirmationTarget, ::mempool, ::feeEstimator);
+ CAmount nFeeNeeded = GetMinimumFee(nBytes, currentConfirmationTarget, ::mempool, ::feeEstimator, &feeCalc);
if (coinControl && coinControl->fOverrideFeeRate)
nFeeNeeded = coinControl->nFeeRate.GetFee(nBytes);
@@ -2818,6 +2821,15 @@ bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CWalletT
return false;
}
}
+
+ LogPrintf("Fee Calculation: Fee:%d Bytes:%u Tgt:%d (requested %d) Reason:\"%s\" Decay %.5f: Estimation: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out) Fail: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out)\n",
+ nFeeRet, nBytes, feeCalc.returnedTarget, feeCalc.desiredTarget, StringForFeeReason(feeCalc.reason), feeCalc.est.decay,
+ feeCalc.est.pass.start, feeCalc.est.pass.end,
+ 100 * feeCalc.est.pass.withinTarget / (feeCalc.est.pass.totalConfirmed + feeCalc.est.pass.inMempool + feeCalc.est.pass.leftMempool),
+ feeCalc.est.pass.withinTarget, feeCalc.est.pass.totalConfirmed, feeCalc.est.pass.inMempool, feeCalc.est.pass.leftMempool,
+ feeCalc.est.fail.start, feeCalc.est.fail.end,
+ 100 * feeCalc.est.fail.withinTarget / (feeCalc.est.fail.totalConfirmed + feeCalc.est.fail.inMempool + feeCalc.est.fail.leftMempool),
+ feeCalc.est.fail.withinTarget, feeCalc.est.fail.totalConfirmed, feeCalc.est.fail.inMempool, feeCalc.est.fail.leftMempool);
return true;
}
@@ -2893,23 +2905,32 @@ CAmount CWallet::GetRequiredFee(unsigned int nTxBytes)
return std::max(minTxFee.GetFee(nTxBytes), ::minRelayTxFee.GetFee(nTxBytes));
}
-CAmount CWallet::GetMinimumFee(unsigned int nTxBytes, unsigned int nConfirmTarget, const CTxMemPool& pool, const CBlockPolicyEstimator& estimator, bool ignoreGlobalPayTxFee)
+CAmount CWallet::GetMinimumFee(unsigned int nTxBytes, unsigned int nConfirmTarget, const CTxMemPool& pool, const CBlockPolicyEstimator& estimator, FeeCalculation *feeCalc, bool ignoreGlobalPayTxFee)
{
// payTxFee is the user-set global for desired feerate
CAmount nFeeNeeded = payTxFee.GetFee(nTxBytes);
// User didn't set: use -txconfirmtarget to estimate...
if (nFeeNeeded == 0 || ignoreGlobalPayTxFee) {
- int estimateFoundTarget = nConfirmTarget;
- nFeeNeeded = estimator.estimateSmartFee(nConfirmTarget, &estimateFoundTarget, pool).GetFee(nTxBytes);
+ nFeeNeeded = estimator.estimateSmartFee(nConfirmTarget, feeCalc, pool, true).GetFee(nTxBytes);
// ... unless we don't have enough mempool data for estimatefee, then use fallbackFee
- if (nFeeNeeded == 0)
+ if (nFeeNeeded == 0) {
nFeeNeeded = fallbackFee.GetFee(nTxBytes);
+ if (feeCalc) feeCalc->reason = FeeReason::FALLBACK;
+ }
+ } else {
+ if (feeCalc) feeCalc->reason = FeeReason::PAYTXFEE;
}
// prevent user from paying a fee below minRelayTxFee or minTxFee
- nFeeNeeded = std::max(nFeeNeeded, GetRequiredFee(nTxBytes));
+ CAmount requiredFee = GetRequiredFee(nTxBytes);
+ if (requiredFee > nFeeNeeded) {
+ nFeeNeeded = requiredFee;
+ if (feeCalc) feeCalc->reason = FeeReason::REQUIRED;
+ }
// But always obey the maximum
- if (nFeeNeeded > maxTxFee)
+ if (nFeeNeeded > maxTxFee) {
nFeeNeeded = maxTxFee;
+ if (feeCalc) feeCalc->reason = FeeReason::MAXTXFEE;
+ }
return nFeeNeeded;
}
@@ -3847,11 +3868,11 @@ CWallet* CWallet::CreateWalletFromFile(const std::string walletFile)
else if (IsArgSet("-usehd")) {
bool useHD = GetBoolArg("-usehd", DEFAULT_USE_HD_WALLET);
if (walletInstance->IsHDEnabled() && !useHD) {
- InitError(strprintf(_("Error loading %s: You can't disable HD on a already existing HD wallet"), walletFile));
+ InitError(strprintf(_("Error loading %s: You can't disable HD on an already existing HD wallet"), walletFile));
return NULL;
}
if (!walletInstance->IsHDEnabled() && useHD) {
- InitError(strprintf(_("Error loading %s: You can't enable HD on a already existing non-HD wallet"), walletFile));
+ InitError(strprintf(_("Error loading %s: You can't enable HD on an already existing non-HD wallet"), walletFile));
return NULL;
}
}
@@ -3871,7 +3892,7 @@ CWallet* CWallet::CreateWalletFromFile(const std::string walletFile)
if (chainActive.Tip() && chainActive.Tip() != pindexRescan)
{
//We can't rescan beyond non-pruned blocks, stop and throw an error
- //this might happen if a user uses a old wallet within a pruned node
+ //this might happen if a user uses an old wallet within a pruned node
// or if he ran -disablewallet for a longer time, then decided to re-enable
if (fPruneMode)
{
@@ -3887,6 +3908,13 @@ CWallet* CWallet::CreateWalletFromFile(const std::string walletFile)
uiInterface.InitMessage(_("Rescanning..."));
LogPrintf("Rescanning last %i blocks (from block %i)...\n", chainActive.Height() - pindexRescan->nHeight, pindexRescan->nHeight);
+
+ // No need to read and scan block if block was created before
+ // our wallet birthday (as adjusted for block time variability)
+ while (pindexRescan && walletInstance->nTimeFirstKey && (pindexRescan->GetBlockTime() < (walletInstance->nTimeFirstKey - TIMESTAMP_WINDOW))) {
+ pindexRescan = chainActive.Next(pindexRescan);
+ }
+
nStart = GetTimeMillis();
walletInstance->ScanForWalletTransactions(pindexRescan, true);
LogPrintf(" rescan %15dms\n", GetTimeMillis() - nStart);
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index 6c6eb69180..4f558adc77 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -68,6 +68,8 @@ static const bool DEFAULT_USE_HD_WALLET = true;
extern const char * DEFAULT_WALLET_DAT;
+static const int64_t TIMESTAMP_MIN = 0;
+
class CBlockIndex;
class CCoinControl;
class COutput;
@@ -77,6 +79,7 @@ class CScheduler;
class CTxMemPool;
class CBlockPolicyEstimator;
class CWalletTx;
+struct FeeCalculation;
/** (client) version numbers for particular wallet features */
enum WalletFeature
@@ -918,6 +921,7 @@ public:
void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex *pindex, const std::vector<CTransactionRef>& vtxConflicted) override;
void BlockDisconnected(const std::shared_ptr<const CBlock>& pblock) override;
bool AddToWalletIfInvolvingMe(const CTransactionRef& tx, const CBlockIndex* pIndex, int posInBlock, bool fUpdate);
+ int64_t RescanFromTime(int64_t startTime, bool update);
CBlockIndex* ScanForWalletTransactions(CBlockIndex* pindexStart, bool fUpdate = false);
void ReacceptWalletTransactions();
void ResendWalletTransactions(int64_t nBestBlockTime, CConnman* connman) override;
@@ -959,7 +963,7 @@ public:
* Estimate the minimum fee considering user set parameters
* and the required fee
*/
- static CAmount GetMinimumFee(unsigned int nTxBytes, unsigned int nConfirmTarget, const CTxMemPool& pool, const CBlockPolicyEstimator& estimator, bool ignoreGlobalPayTxFee = false);
+ static CAmount GetMinimumFee(unsigned int nTxBytes, unsigned int nConfirmTarget, const CTxMemPool& pool, const CBlockPolicyEstimator& estimator, FeeCalculation *feeCalc = nullptr, bool ignoreGlobalPayTxFee = false);
/**
* Return the minimum required fee taking into account the
* floating relay fee and user set minimum transaction fee
@@ -1021,7 +1025,7 @@ public:
}
}
- void GetScriptForMining(std::shared_ptr<CReserveScript> &script) override;
+ void GetScriptForMining(std::shared_ptr<CReserveScript> &script);
unsigned int GetKeyPoolSize()
{
@@ -1149,7 +1153,7 @@ public:
void ReturnKey();
bool GetReservedKey(CPubKey &pubkey, bool internal = false);
void KeepKey();
- void KeepScript() { KeepKey(); }
+ void KeepScript() override { KeepKey(); }
};
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index bf4798264c..deb09a4771 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -75,8 +75,6 @@ bool CWalletDB::WriteCryptedKey(const CPubKey& vchPubKey,
const std::vector<unsigned char>& vchCryptedSecret,
const CKeyMetadata &keyMeta)
{
- const bool fEraseUnencryptedKey = true;
-
if (!WriteIC(std::make_pair(std::string("keymeta"), vchPubKey), keyMeta)) {
return false;
}
@@ -84,12 +82,8 @@ bool CWalletDB::WriteCryptedKey(const CPubKey& vchPubKey,
if (!WriteIC(std::make_pair(std::string("ckey"), vchPubKey), vchCryptedSecret, false)) {
return false;
}
- if (fEraseUnencryptedKey)
- {
- EraseIC(std::make_pair(std::string("key"), vchPubKey));
- EraseIC(std::make_pair(std::string("wkey"), vchPubKey));
- }
-
+ EraseIC(std::make_pair(std::string("key"), vchPubKey));
+ EraseIC(std::make_pair(std::string("wkey"), vchPubKey));
return true;
}
diff --git a/src/zmq/zmqpublishnotifier.h b/src/zmq/zmqpublishnotifier.h
index bcbecf1bde..1790fe5698 100644
--- a/src/zmq/zmqpublishnotifier.h
+++ b/src/zmq/zmqpublishnotifier.h
@@ -24,32 +24,32 @@ public:
*/
bool SendMessage(const char *command, const void* data, size_t size);
- bool Initialize(void *pcontext);
- void Shutdown();
+ bool Initialize(void *pcontext) override;
+ void Shutdown() override;
};
class CZMQPublishHashBlockNotifier : public CZMQAbstractPublishNotifier
{
public:
- bool NotifyBlock(const CBlockIndex *pindex);
+ bool NotifyBlock(const CBlockIndex *pindex) override;
};
class CZMQPublishHashTransactionNotifier : public CZMQAbstractPublishNotifier
{
public:
- bool NotifyTransaction(const CTransaction &transaction);
+ bool NotifyTransaction(const CTransaction &transaction) override;
};
class CZMQPublishRawBlockNotifier : public CZMQAbstractPublishNotifier
{
public:
- bool NotifyBlock(const CBlockIndex *pindex);
+ bool NotifyBlock(const CBlockIndex *pindex) override;
};
class CZMQPublishRawTransactionNotifier : public CZMQAbstractPublishNotifier
{
public:
- bool NotifyTransaction(const CTransaction &transaction);
+ bool NotifyTransaction(const CTransaction &transaction) override;
};
#endif // BITCOIN_ZMQ_ZMQPUBLISHNOTIFIER_H
diff --git a/test/README.md b/test/README.md
index 4dd512638d..15f6df790f 100644
--- a/test/README.md
+++ b/test/README.md
@@ -15,84 +15,152 @@ The util tests are run as part of `make check` target. The functional
tests are run by the travis continuous build process whenever a pull
request is opened. Both sets of tests can also be run locally.
-Functional Test dependencies
-============================
+# Running tests locally
+
+Build for your system first. Be sure to enable wallet, utils and daemon when you configure. Tests will not run otherwise.
+
+### Functional tests
+
+#### Dependencies
+
The ZMQ functional test requires a python ZMQ library. To install it:
- on Unix, run `sudo apt-get install python3-zmq`
- on mac OS, run `pip3 install pyzmq`
-Running tests locally
-=====================
+#### Running the tests
-Build for your system first. Be sure to enable wallet, utils and daemon when you configure. Tests will not run otherwise.
+Individual tests can be run by directly calling the test script, eg:
-Functional tests
-----------------
+```
+test/functional/replace-by-fee.py
+```
-You can run any single test by calling
+or can be run through the test_runner harness, eg:
- test/functional/test_runner.py <testname>
+```
+test/functional/test_runner.py replace-by-fee.py
+```
-Or you can run any combination (incl. duplicates) of tests by calling
+You can run any combination (incl. duplicates) of tests by calling:
- test/functional/test_runner.py <testname1> <testname2> <testname3> ...
+```
+test/functional/test_runner.py <testname1> <testname2> <testname3> ...
+```
-Run the regression test suite with
+Run the regression test suite with:
- test/functional/test_runner.py
+```
+test/functional/test_runner.py
+```
Run all possible tests with
- test/functional/test_runner.py --extended
+```
+test/functional/test_runner.py --extended
+```
+
+By default, up to 4 tests will be run in parallel by test_runner. To specify
+how many jobs to run, append `--jobs=n`
-By default, tests will be run in parallel. To specify how many jobs to run,
-append `--jobs=n` (default n=4).
+The individual tests and the test_runner harness have many command-line
+options. Run `test_runner.py -h` to see them all.
-If you want to create a basic coverage report for the RPC test suite, append `--coverage`.
+#### Troubleshooting and debugging test failures
-Possible options, which apply to each individual test run:
+##### Resource contention
+The P2P and RPC ports used by the bitcoind nodes-under-test are chosen to make
+conflicts with other processes unlikely. However, if there is another bitcoind
+process running on the system (perhaps from a previous test which hasn't successfully
+killed all its bitcoind nodes), then there may be a port conflict which will
+cause the test to fail. It is recommended that you run the tests on a system
+where no other bitcoind processes are running.
+
+On linux, the test_framework will warn if there is another
+bitcoind process running when the tests are started.
+
+If there are zombie bitcoind processes after test failure, you can kill them
+by running the following commands. **Note that these commands will kill all
+bitcoind processes running on the system, so should not be used if any non-test
+bitcoind processes are being run.**
+
+```bash
+killall bitcoind
```
- -h, --help show this help message and exit
- --nocleanup Leave bitcoinds and test.* datadir on exit or error
- --noshutdown Don't stop bitcoinds after the test execution
- --srcdir=SRCDIR Source directory containing bitcoind/bitcoin-cli
- (default: ../../src)
- --tmpdir=TMPDIR Root directory for datadirs
- --tracerpc Print out all RPC calls as they are made
- --coveragedir=COVERAGEDIR
- Write tested RPC commands into this directory
-```
-If you set the environment variable `PYTHON_DEBUG=1` you will get some debug
-output (example: `PYTHON_DEBUG=1 test/functional/test_runner.py wallet`).
+or
+
+```bash
+pkill -9 bitcoind
+```
-A 200-block -regtest blockchain and wallets for four nodes
-is created the first time a regression test is run and
-is stored in the cache/ directory. Each node has 25 mature
-blocks (25*50=1250 BTC) in its wallet.
-After the first run, the cache/ blockchain and wallets are
-copied into a temporary directory and used as the initial
-test state.
+##### Data directory cache
-If you get into a bad state, you should be able
-to recover with:
+A pre-mined blockchain with 200 blocks is generated the first time a
+functional test is run and is stored in test/cache. This speeds up
+test startup times since new blockchains don't need to be generated for
+each test. However, the cache may get into a bad state, in which case
+tests will fail. If this happens, remove the cache directory (and make
+sure bitcoind processes are stopped as above):
```bash
rm -rf cache
killall bitcoind
```
-Util tests
-----------
+##### Test logging
+
+The tests contain logging at different levels (debug, info, warning, etc). By
+default:
+
+- when run through the test_runner harness, *all* logs are written to
+ `test_framework.log` and no logs are output to the console.
+- when run directly, *all* logs are written to `test_framework.log` and INFO
+ level and above are output to the console.
+- when run on Travis, no logs are output to the console. However, if a test
+ fails, the `test_framework.log` and bitcoind `debug.log`s will all be dumped
+ to the console to help troubleshooting.
+
+To change the level of logs output to the console, use the `-l` command line
+argument.
+
+`test_framework.log` and bitcoind `debug.log`s can be combined into a single
+aggregate log by running the `combine_logs.py` script. The output can be plain
+text, colorized text or html. For example:
+
+```
+combine_logs.py -c <test data directory> | less -r
+```
+
+will pipe the colorized logs from the test into less.
+
+Use `--tracerpc` to trace out all the RPC calls and responses to the console. For
+some tests (eg any that use `submitblock` to submit a full block over RPC),
+this can result in a lot of screen output.
+
+By default, the test data directory will be deleted after a successful run.
+Use `--nocleanup` to leave the test data directory intact. The test data
+directory is never deleted after a failed test.
+
+##### Attaching a debugger
+
+A python debugger can be attached to tests at any point. Just add the line:
+
+```py
+import pdb; pdb.set_trace()
+```
+
+anywhere in the test. You will then be able to inspect variables, as well as
+call methods that interact with the bitcoind nodes-under-test.
+
+### Util tests
Util tests can be run locally by running `test/util/bitcoin-util-test.py`.
Use the `-v` option for verbose output.
-Writing functional tests
-========================
+# Writing functional tests
You are encouraged to write functional tests for new or existing features.
Further information about the functional test framework and individual
diff --git a/test/functional/README.md b/test/functional/README.md
index e6c4849702..96fe0becce 100644
--- a/test/functional/README.md
+++ b/test/functional/README.md
@@ -1,108 +1,154 @@
-Regression tests
-================
+# Functional tests
-### [test_framework/authproxy.py](test_framework/authproxy.py)
-Taken from the [python-bitcoinrpc repository](https://github.com/jgarzik/python-bitcoinrpc).
+### Writing Functional Tests
-### [test_framework/test_framework.py](test_framework/test_framework.py)
-Base class for new regression tests.
+#### Example test
-### [test_framework/util.py](test_framework/util.py)
-Generally useful functions.
+The [example_test.py](example_test.py) is a heavily commented example of a test case that uses both
+the RPC and P2P interfaces. If you are writing your first test, copy that file
+and modify to fit your needs.
-### [test_framework/mininode.py](test_framework/mininode.py)
-Basic code to support p2p connectivity to a bitcoind.
+#### Coverage
-### [test_framework/comptool.py](test_framework/comptool.py)
-Framework for comparison-tool style, p2p tests.
+Running `test_runner.py` with the `--coverage` argument tracks which RPCs are
+called by the tests and prints a report of uncovered RPCs in the summary. This
+can be used (along with the `--extended` argument) to find out which RPCs we
+don't have test cases for.
-### [test_framework/script.py](test_framework/script.py)
-Utilities for manipulating transaction scripts (originally from python-bitcoinlib)
+#### Style guidelines
-### [test_framework/blockstore.py](test_framework/blockstore.py)
-Implements disk-backed block and tx storage.
+- Where possible, try to adhere to [PEP-8 guidelines]([https://www.python.org/dev/peps/pep-0008/)
+- Use a python linter like flake8 before submitting PRs to catch common style
+ nits (eg trailing whitespace, unused imports, etc)
+- Avoid wildcard imports where possible
+- Use a module-level docstring to describe what the test is testing, and how it
+ is testing it.
+- When subclassing the BitcoinTestFramwork, place overrides for the
+ `__init__()`, and `setup_xxxx()` methods at the top of the subclass, then
+ locally-defined helper methods, then the `run_test()` method.
-### [test_framework/key.py](test_framework/key.py)
-Wrapper around OpenSSL EC_Key (originally from python-bitcoinlib)
+#### General test-writing advice
-### [test_framework/bignum.py](test_framework/bignum.py)
-Helpers for script.py
+- Set `self.num_nodes` to the minimum number of nodes necessary for the test.
+ Having additional unrequired nodes adds to the execution time of the test as
+ well as memory/CPU/disk requirements (which is important when running tests in
+ parallel or on Travis).
+- Avoid stop-starting the nodes multiple times during the test if possible. A
+ stop-start takes several seconds, so doing it several times blows up the
+ runtime of the test.
+- Set the `self.setup_clean_chain` variable in `__init__()` to control whether
+ or not to use the cached data directories. The cached data directories
+ contain a 200-block pre-mined blockchain and wallets for four nodes. Each node
+ has 25 mature blocks (25x50=1250 BTC) in its wallet.
+- When calling RPCs with lots of arguments, consider using named keyword
+ arguments instead of positional arguments to make the intent of the call
+ clear to readers.
-### [test_framework/blocktools.py](test_framework/blocktools.py)
-Helper functions for creating blocks and transactions.
+#### RPC and P2P definitions
-P2P test design notes
----------------------
+Test writers may find it helpful to refer to the definitions for the RPC and
+P2P messages. These can be found in the following source files:
-## Mininode
+- `/src/rpc/*` for RPCs
+- `/src/wallet/rpc*` for wallet RPCs
+- `ProcessMessage()` in `/src/net_processing.cpp` for parsing P2P messages
-* ```mininode.py``` contains all the definitions for objects that pass
-over the network (```CBlock```, ```CTransaction```, etc, along with the network-level
-wrappers for them, ```msg_block```, ```msg_tx```, etc).
+#### Using the P2P interface
-* P2P tests have two threads. One thread handles all network communication
+- `mininode.py` contains all the definitions for objects that pass
+over the network (`CBlock`, `CTransaction`, etc, along with the network-level
+wrappers for them, `msg_block`, `msg_tx`, etc).
+
+- P2P tests have two threads. One thread handles all network communication
with the bitcoind(s) being tested (using python's asyncore package); the other
implements the test logic.
-* ```NodeConn``` is the class used to connect to a bitcoind. If you implement
-a callback class that derives from ```NodeConnCB``` and pass that to the
-```NodeConn``` object, your code will receive the appropriate callbacks when
+- `NodeConn` is the class used to connect to a bitcoind. If you implement
+a callback class that derives from `NodeConnCB` and pass that to the
+`NodeConn` object, your code will receive the appropriate callbacks when
events of interest arrive.
-* You can pass the same handler to multiple ```NodeConn```'s if you like, or pass
-different ones to each -- whatever makes the most sense for your test.
-
-* Call ```NetworkThread.start()``` after all ```NodeConn``` objects are created to
+- Call `NetworkThread.start()` after all `NodeConn` objects are created to
start the networking thread. (Continue with the test logic in your existing
thread.)
-* RPC calls are available in p2p tests.
+- Can be used to write tests where specific P2P protocol behavior is tested.
+Examples tests are `p2p-accept-block.py`, `p2p-compactblocks.py`.
-* Can be used to write free-form tests, where specific p2p-protocol behavior
-is tested. Examples: ```p2p-accept-block.py```, ```p2p-compactblocks.py```.
+#### Comptool
-## Comptool
+- Comptool is a Testing framework for writing tests that compare the block/tx acceptance
+behavior of a bitcoind against 1 or more other bitcoind instances. It should not be used
+to write static tests with known outcomes, since that type of test is easier to write and
+maintain using the standard BitcoinTestFramework.
-* Testing framework for writing tests that compare the block/tx acceptance
-behavior of a bitcoind against 1 or more other bitcoind instances, or against
-known outcomes, or both.
-
-* Set the ```num_nodes``` variable (defined in ```ComparisonTestFramework```) to start up
-1 or more nodes. If using 1 node, then ```--testbinary``` can be used as a command line
+- Set the `num_nodes` variable (defined in `ComparisonTestFramework`) to start up
+1 or more nodes. If using 1 node, then `--testbinary` can be used as a command line
option to change the bitcoind binary used by the test. If using 2 or more nodes,
-then ```--refbinary``` can be optionally used to change the bitcoind that will be used
+then `--refbinary` can be optionally used to change the bitcoind that will be used
on nodes 2 and up.
-* Implement a (generator) function called ```get_tests()``` which yields ```TestInstance```s.
-Each ```TestInstance``` consists of:
- - a list of ```[object, outcome, hash]``` entries
- * ```object``` is a ```CBlock```, ```CTransaction```, or
- ```CBlockHeader```. ```CBlock```'s and ```CTransaction```'s are tested for
- acceptance. ```CBlockHeader```s can be used so that the test runner can deliver
+- Implement a (generator) function called `get_tests()` which yields `TestInstance`s.
+Each `TestInstance` consists of:
+ - a list of `[object, outcome, hash]` entries
+ * `object` is a `CBlock`, `CTransaction`, or
+ `CBlockHeader`. `CBlock`'s and `CTransaction`'s are tested for
+ acceptance. `CBlockHeader`s can be used so that the test runner can deliver
complete headers-chains when requested from the bitcoind, to allow writing
tests where blocks can be delivered out of order but still processed by
headers-first bitcoind's.
- * ```outcome``` is ```True```, ```False```, or ```None```. If ```True```
- or ```False```, the tip is compared with the expected tip -- either the
+ * `outcome` is `True`, `False`, or `None`. If `True`
+ or `False`, the tip is compared with the expected tip -- either the
block passed in, or the hash specified as the optional 3rd entry. If
- ```None``` is specified, then the test will compare all the bitcoind's
+ `None` is specified, then the test will compare all the bitcoind's
being tested to see if they all agree on what the best tip is.
- * ```hash``` is the block hash of the tip to compare against. Optional to
+ * `hash` is the block hash of the tip to compare against. Optional to
specify; if left out then the hash of the block passed in will be used as
the expected tip. This allows for specifying an expected tip while testing
the handling of either invalid blocks or blocks delivered out of order,
which complete a longer chain.
- - ```sync_every_block```: ```True/False```. If ```False```, then all blocks
+ - `sync_every_block`: `True/False`. If `False`, then all blocks
are inv'ed together, and the test runner waits until the node receives the
last one, and tests only the last block for tip acceptance using the
- outcome and specified tip. If ```True```, then each block is tested in
+ outcome and specified tip. If `True`, then each block is tested in
sequence and synced (this is slower when processing many blocks).
- - ```sync_every_transaction```: ```True/False```. Analogous to
- ```sync_every_block```, except if the outcome on the last tx is "None",
+ - `sync_every_transaction`: `True/False`. Analogous to
+ `sync_every_block`, except if the outcome on the last tx is "None",
then the contents of the entire mempool are compared across all bitcoind
- connections. If ```True``` or ```False```, then only the last tx's
+ connections. If `True` or `False`, then only the last tx's
acceptance is tested against the given outcome.
-* For examples of tests written in this framework, see
- ```invalidblockrequest.py``` and ```p2p-fullblocktest.py```.
+- For examples of tests written in this framework, see
+ `invalidblockrequest.py` and `p2p-fullblocktest.py`.
+
+### test-framework modules
+
+#### [test_framework/authproxy.py](test_framework/authproxy.py)
+Taken from the [python-bitcoinrpc repository](https://github.com/jgarzik/python-bitcoinrpc).
+
+#### [test_framework/test_framework.py](test_framework/test_framework.py)
+Base class for functional tests.
+#### [test_framework/util.py](test_framework/util.py)
+Generally useful functions.
+
+#### [test_framework/mininode.py](test_framework/mininode.py)
+Basic code to support P2P connectivity to a bitcoind.
+
+#### [test_framework/comptool.py](test_framework/comptool.py)
+Framework for comparison-tool style, P2P tests.
+
+#### [test_framework/script.py](test_framework/script.py)
+Utilities for manipulating transaction scripts (originally from python-bitcoinlib)
+
+#### [test_framework/blockstore.py](test_framework/blockstore.py)
+Implements disk-backed block and tx storage.
+
+#### [test_framework/key.py](test_framework/key.py)
+Wrapper around OpenSSL EC_Key (originally from python-bitcoinlib)
+
+#### [test_framework/bignum.py](test_framework/bignum.py)
+Helpers for script.py
+
+#### [test_framework/blocktools.py](test_framework/blocktools.py)
+Helper functions for creating blocks and transactions.
diff --git a/test/functional/bip9-softforks.py b/test/functional/bip9-softforks.py
index b90b0ca628..f00232c9ff 100755
--- a/test/functional/bip9-softforks.py
+++ b/test/functional/bip9-softforks.py
@@ -15,6 +15,10 @@ mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
"""
+from io import BytesIO
+import shutil
+import time
+import itertools
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
@@ -22,9 +26,6 @@ from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
-from io import BytesIO
-import time
-import itertools
class BIP9SoftForksTest(ComparisonTestFramework):
diff --git a/test/functional/blockchain.py b/test/functional/blockchain.py
index 6aef6d4489..a7034e6bcd 100755
--- a/test/functional/blockchain.py
+++ b/test/functional/blockchain.py
@@ -18,10 +18,13 @@ Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
+import http.client
+import subprocess
-from test_framework.test_framework import BitcoinTestFramework
+from test_framework.test_framework import (BitcoinTestFramework, BITCOIND_PROC_WAIT_TIMEOUT)
from test_framework.util import (
assert_equal,
+ assert_raises,
assert_raises_jsonrpc,
assert_is_hex_string,
assert_is_hash_string,
@@ -34,6 +37,7 @@ class BlockchainTest(BitcoinTestFramework):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
+ self.extra_args = [['-stopatheight=207']]
def run_test(self):
self._test_getchaintxstats()
@@ -41,7 +45,8 @@ class BlockchainTest(BitcoinTestFramework):
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
- self.nodes[0].verifychain(4, 0)
+ self._test_stopatheight()
+ assert self.nodes[0].verifychain(4, 0)
def _test_getchaintxstats(self):
chaintxstats = self.nodes[0].getchaintxstats(1)
@@ -129,5 +134,21 @@ class BlockchainTest(BitcoinTestFramework):
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
+ def _test_stopatheight(self):
+ assert_equal(self.nodes[0].getblockcount(), 200)
+ self.nodes[0].generate(6)
+ assert_equal(self.nodes[0].getblockcount(), 206)
+ self.log.debug('Node should not stop at this height')
+ assert_raises(subprocess.TimeoutExpired, lambda: self.bitcoind_processes[0].wait(timeout=3))
+ try:
+ self.nodes[0].generate(1)
+ except (ConnectionError, http.client.BadStatusLine):
+ pass # The node already shut down before response
+ self.log.debug('Node should stop at this height...')
+ self.bitcoind_processes[0].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
+ self.nodes[0] = self.start_node(0, self.options.tmpdir)
+ assert_equal(self.nodes[0].getblockcount(), 207)
+
+
if __name__ == '__main__':
BlockchainTest().main()
diff --git a/test/functional/bumpfee.py b/test/functional/bumpfee.py
index d42bab6cbf..9237f09240 100755
--- a/test/functional/bumpfee.py
+++ b/test/functional/bumpfee.py
@@ -10,7 +10,7 @@ its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
-added in the the future, they should try to follow the same convention and not
+added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
@@ -42,7 +42,7 @@ class BumpFeeTest(BitcoinTestFramework):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
- bitcoind_processes[1].wait()
+ self.bitcoind_processes[1].wait()
self.nodes[1] = self.start_node(1, self.options.tmpdir, extra_args[1])
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
diff --git a/test/functional/dbcrash.py b/test/functional/dbcrash.py
new file mode 100755
index 0000000000..8339305f5e
--- /dev/null
+++ b/test/functional/dbcrash.py
@@ -0,0 +1,281 @@
+#!/usr/bin/env python3
+# Copyright (c) 2017 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test recovery from a crash during chainstate writing.
+
+- 4 nodes
+ * node0, node1, and node2 will have different dbcrash ratios, and different
+ dbcache sizes
+ * node3 will be a regular node, with no crashing.
+ * The nodes will not connect to each other.
+
+- use default test framework starting chain. initialize starting_tip_height to
+ tip height.
+
+- Main loop:
+ * generate lots of transactions on node3, enough to fill up a block.
+ * uniformly randomly pick a tip height from starting_tip_height to
+ tip_height; with probability 1/(height_difference+4), invalidate this block.
+ * mine enough blocks to overtake tip_height at start of loop.
+ * for each node in [node0,node1,node2]:
+ - for each mined block:
+ * submit block to node
+ * if node crashed on/after submitting:
+ - restart until recovery succeeds
+ - check that utxo matches node3 using gettxoutsetinfo"""
+
+import errno
+import http.client
+import random
+import sys
+import time
+
+from test_framework.mininode import *
+from test_framework.script import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+HTTP_DISCONNECT_ERRORS = [http.client.CannotSendRequest]
+try:
+ HTTP_DISCONNECT_ERRORS.append(http.client.RemoteDisconnected)
+except AttributeError:
+ pass
+
+class ChainstateWriteCrashTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 4
+ self.setup_clean_chain = False
+
+ # Set -maxmempool=0 to turn off mempool memory sharing with dbcache
+ # Set -rpcservertimeout=900 to reduce socket disconnects in this
+ # long-running test
+ self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900", "-dbbatchsize=200000"]
+
+ # Set different crash ratios and cache sizes. Note that not all of
+ # -dbcache goes to pcoinsTip.
+ self.node0_args = ["-dbcrashratio=8", "-dbcache=4"] + self.base_args
+ self.node1_args = ["-dbcrashratio=16", "-dbcache=8"] + self.base_args
+ self.node2_args = ["-dbcrashratio=24", "-dbcache=16"] + self.base_args
+
+ # Node3 is a normal node with default args, except will mine full blocks
+ self.node3_args = ["-blockmaxweight=4000000"]
+ self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args]
+
+ def setup_network(self):
+ self.setup_nodes()
+ # Leave them unconnected, we'll use submitblock directly in this test
+
+ def restart_node(self, node_index, expected_tip):
+ """Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash.
+
+ Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up
+ after 60 seconds. Returns the utxo hash of the given node."""
+
+ time_start = time.time()
+ while time.time() - time_start < 60:
+ try:
+ # Any of these RPC calls could throw due to node crash
+ self.nodes[node_index] = self.start_node(node_index, self.options.tmpdir, self.extra_args[node_index])
+ self.nodes[node_index].waitforblock(expected_tip)
+ utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2']
+ return utxo_hash
+ except:
+ # An exception here should mean the node is about to crash.
+ # If bitcoind exits, then try again. wait_for_node_exit()
+ # should raise an exception if bitcoind doesn't exit.
+ self.wait_for_node_exit(node_index, timeout=10)
+ self.crashed_on_restart += 1
+ time.sleep(1)
+
+ # If we got here, bitcoind isn't coming back up on restart. Could be a
+ # bug in bitcoind, or we've gotten unlucky with our dbcrash ratio --
+ # perhaps we generated a test case that blew up our cache?
+ # TODO: If this happens a lot, we should try to restart without -dbcrashratio
+ # and make sure that recovery happens.
+ raise AssertionError("Unable to successfully restart node %d in allotted time", node_index)
+
+ def submit_block_catch_error(self, node_index, block):
+ """Try submitting a block to the given node.
+
+ Catch any exceptions that indicate the node has crashed.
+ Returns true if the block was submitted successfully; false otherwise."""
+
+ try:
+ self.nodes[node_index].submitblock(block)
+ return True
+ except http.client.BadStatusLine as e:
+ # Prior to 3.5 BadStatusLine('') was raised for a remote disconnect error.
+ if sys.version_info[0] == 3 and sys.version_info[1] < 5 and e.line == "''":
+ self.log.debug("node %d submitblock raised exception: %s", node_index, e)
+ return False
+ else:
+ raise
+ except tuple(HTTP_DISCONNECT_ERRORS) as e:
+ self.log.debug("node %d submitblock raised exception: %s", node_index, e)
+ return False
+ except OSError as e:
+ self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno)
+ if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]:
+ # The node has likely crashed
+ return False
+ else:
+ # Unexpected exception, raise
+ raise
+
+ def sync_node3blocks(self, block_hashes):
+ """Use submitblock to sync node3's chain with the other nodes
+
+ If submitblock fails, restart the node and get the new utxo hash.
+ If any nodes crash while updating, we'll compare utxo hashes to
+ ensure recovery was successful."""
+
+ node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
+
+ # Retrieve all the blocks from node3
+ blocks = []
+ for block_hash in block_hashes:
+ blocks.append([block_hash, self.nodes[3].getblock(block_hash, 0)])
+
+ # Deliver each block to each other node
+ for i in range(3):
+ nodei_utxo_hash = None
+ self.log.debug("Syncing blocks to node %d", i)
+ for (block_hash, block) in blocks:
+ # Get the block from node3, and submit to node_i
+ self.log.debug("submitting block %s", block_hash)
+ if not self.submit_block_catch_error(i, block):
+ # TODO: more carefully check that the crash is due to -dbcrashratio
+ # (change the exit code perhaps, and check that here?)
+ self.wait_for_node_exit(i, timeout=30)
+ self.log.debug("Restarting node %d after block hash %s", i, block_hash)
+ nodei_utxo_hash = self.restart_node(i, block_hash)
+ assert nodei_utxo_hash is not None
+ self.restart_counts[i] += 1
+ else:
+ # Clear it out after successful submitblock calls -- the cached
+ # utxo hash will no longer be correct
+ nodei_utxo_hash = None
+
+ # Check that the utxo hash matches node3's utxo set
+ # NOTE: we only check the utxo set if we had to restart the node
+ # after the last block submitted:
+ # - checking the utxo hash causes a cache flush, which we don't
+ # want to do every time; so
+ # - we only update the utxo cache after a node restart, since flushing
+ # the cache is a no-op at that point
+ if nodei_utxo_hash is not None:
+ self.log.debug("Checking txoutsetinfo matches for node %d", i)
+ assert_equal(nodei_utxo_hash, node3_utxo_hash)
+
+ def verify_utxo_hash(self):
+ """Verify that the utxo hash of each node matches node3.
+
+ Restart any nodes that crash while querying."""
+ node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
+ self.log.info("Verifying utxo hash matches for all nodes")
+
+ for i in range(3):
+ try:
+ nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_2']
+ except OSError:
+ # probably a crash on db flushing
+ nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash())
+ assert_equal(nodei_utxo_hash, node3_utxo_hash)
+
+ def generate_small_transactions(self, node, count, utxo_list):
+ FEE = 1000 # TODO: replace this with node relay fee based calculation
+ num_transactions = 0
+ random.shuffle(utxo_list)
+ while len(utxo_list) >= 2 and num_transactions < count:
+ tx = CTransaction()
+ input_amount = 0
+ for i in range(2):
+ utxo = utxo_list.pop()
+ tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout'])))
+ input_amount += int(utxo['amount'] * COIN)
+ output_amount = (input_amount - FEE) // 3
+
+ if output_amount <= 0:
+ # Sanity check -- if we chose inputs that are too small, skip
+ continue
+
+ for i in range(3):
+ tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey'])))
+
+ # Sign and send the transaction to get into the mempool
+ tx_signed_hex = node.signrawtransaction(ToHex(tx))['hex']
+ node.sendrawtransaction(tx_signed_hex)
+ num_transactions += 1
+
+ def run_test(self):
+ # Track test coverage statistics
+ self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2
+ self.crashed_on_restart = 0 # Track count of crashes during recovery
+
+ # Start by creating a lot of utxos on node3
+ initial_height = self.nodes[3].getblockcount()
+ utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000)
+ self.log.info("Prepped %d utxo entries", len(utxo_list))
+
+ # Sync these blocks with the other nodes
+ block_hashes_to_sync = []
+ for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1):
+ block_hashes_to_sync.append(self.nodes[3].getblockhash(height))
+
+ self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync))
+ # Syncing the blocks could cause nodes to crash, so the test begins here.
+ self.sync_node3blocks(block_hashes_to_sync)
+
+ starting_tip_height = self.nodes[3].getblockcount()
+
+ # Main test loop:
+ # each time through the loop, generate a bunch of transactions,
+ # and then either mine a single new block on the tip, or some-sized reorg.
+ for i in range(40):
+ self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts)
+ # Generate a bunch of small-ish transactions
+ self.generate_small_transactions(self.nodes[3], 2500, utxo_list)
+ # Pick a random block between current tip, and starting tip
+ current_height = self.nodes[3].getblockcount()
+ random_height = random.randint(starting_tip_height, current_height)
+ self.log.debug("At height %d, considering height %d", current_height, random_height)
+ if random_height > starting_tip_height:
+ # Randomly reorg from this point with some probability (1/4 for
+ # tip, 1/5 for tip-1, ...)
+ if random.random() < 1.0 / (current_height + 4 - random_height):
+ self.log.debug("Invalidating block at height %d", random_height)
+ self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height))
+
+ # Now generate new blocks until we pass the old tip height
+ self.log.debug("Mining longer tip")
+ block_hashes = []
+ while current_height + 1 > self.nodes[3].getblockcount():
+ block_hashes.extend(self.nodes[3].generate(min(10, current_height + 1 - self.nodes[3].getblockcount())))
+ self.log.debug("Syncing %d new blocks...", len(block_hashes))
+ self.sync_node3blocks(block_hashes)
+ utxo_list = self.nodes[3].listunspent()
+ self.log.debug("Node3 utxo count: %d", len(utxo_list))
+
+ # Check that the utxo hashes agree with node3
+ # Useful side effect: each utxo cache gets flushed here, so that we
+ # won't get crashes on shutdown at the end of the test.
+ self.verify_utxo_hash()
+
+ # Check the test coverage
+ self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart)
+
+ # If no nodes were restarted, we didn't test anything.
+ assert self.restart_counts != [0, 0, 0]
+
+ # Make sure we tested the case of crash-during-recovery.
+ assert self.crashed_on_restart > 0
+
+ # Warn if any of the nodes escaped restart.
+ for i in range(3):
+ if self.restart_counts[i] == 0:
+ self.log.warn("Node %d never crashed during utxo flush!", i)
+
+if __name__ == "__main__":
+ ChainstateWriteCrashTest().main()
diff --git a/test/functional/example_test.py b/test/functional/example_test.py
new file mode 100755
index 0000000000..1ba5f756cd
--- /dev/null
+++ b/test/functional/example_test.py
@@ -0,0 +1,219 @@
+#!/usr/bin/env python3
+# Copyright (c) 2017 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""An example functional test
+
+The module-level docstring should include a high-level description of
+what the test is doing. It's the first thing people see when they open
+the file and should give the reader information about *what* the test
+is testing and *how* it's being tested
+"""
+# Imports should be in PEP8 ordering (std library first, then third party
+# libraries then local imports).
+from collections import defaultdict
+
+# Avoid wildcard * imports if possible
+from test_framework.blocktools import (create_block, create_coinbase)
+from test_framework.mininode import (
+ CInv,
+ NetworkThread,
+ NodeConn,
+ NodeConnCB,
+ mininode_lock,
+ msg_block,
+ msg_getdata,
+ wait_until,
+)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ connect_nodes,
+ p2p_port,
+)
+
+# NodeConnCB is a class containing callbacks to be executed when a P2P
+# message is received from the node-under-test. Subclass NodeConnCB and
+# override the on_*() methods if you need custom behaviour.
+class BaseNode(NodeConnCB):
+ def __init__(self):
+ """Initialize the NodeConnCB
+
+ Used to inialize custom properties for the Node that aren't
+ included by default in the base class. Be aware that the NodeConnCB
+ base class already stores a counter for each P2P message type and the
+ last received message of each type, which should be sufficient for the
+ needs of most tests.
+
+ Call super().__init__() first for standard initialization and then
+ initialize custom properties."""
+ super().__init__()
+ # Stores a dictionary of all blocks received
+ self.block_receive_map = defaultdict(int)
+
+ def on_block(self, conn, message):
+ """Override the standard on_block callback
+
+ Store the hash of a received block in the dictionary."""
+ message.block.calc_sha256()
+ self.block_receive_map[message.block.sha256] += 1
+
+def custom_function():
+ """Do some custom behaviour
+
+ If this function is more generally useful for other tests, consider
+ moving it to a module in test_framework."""
+ # self.log.info("running custom_function") # Oops! Can't run self.log outside the BitcoinTestFramework
+ pass
+
+class ExampleTest(BitcoinTestFramework):
+ # Each functional test is a subclass of the BitcoinTestFramework class.
+
+ # Override the __init__(), add_options(), setup_chain(), setup_network()
+ # and setup_nodes() methods to customize the test setup as required.
+
+ def __init__(self):
+ """Initialize the test
+
+ Call super().__init__() first, and then override any test parameters
+ for your individual test."""
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 3
+ # Use self.extra_args to change command-line arguments for the nodes
+ self.extra_args = [[], ["-logips"], []]
+
+ # self.log.info("I've finished __init__") # Oops! Can't run self.log before run_test()
+
+ # Use add_options() to add specific command-line options for your test.
+ # In practice this is not used very much, since the tests are mostly written
+ # to be run in automated environments without command-line options.
+ # def add_options()
+ # pass
+
+ # Use setup_chain() to customize the node data directories. In practice
+ # this is not used very much since the default behaviour is almost always
+ # fine
+ # def setup_chain():
+ # pass
+
+ def setup_network(self):
+ """Setup the test network topology
+
+ Often you won't need to override this, since the standard network topology
+ (linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests.
+
+ If you do override this method, remember to start the nodes, assign
+ them to self.nodes, connect them and then sync."""
+
+ self.setup_nodes()
+
+ # In this test, we're not connecting node2 to node0 or node1. Calls to
+ # sync_all() should not include node2, since we're not expecting it to
+ # sync.
+ connect_nodes(self.nodes[0], 1)
+ self.sync_all([self.nodes[0:1]])
+
+ # Use setup_nodes() to customize the node start behaviour (for example if
+ # you don't want to start all nodes at the start of the test).
+ # def setup_nodes():
+ # pass
+
+ def custom_method(self):
+ """Do some custom behaviour for this test
+
+ Define it in a method here because you're going to use it repeatedly.
+ If you think it's useful in general, consider moving it to the base
+ BitcoinTestFramework class so other tests can use it."""
+
+ self.log.info("Running custom_method")
+
+ def run_test(self):
+ """Main test logic"""
+
+ # Create a P2P connection to one of the nodes
+ node0 = BaseNode()
+ connections = []
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
+ node0.add_connection(connections[0])
+
+ # Start up network handling in another thread. This needs to be called
+ # after the P2P connections have been created.
+ NetworkThread().start()
+ # wait_for_verack ensures that the P2P connection is fully up.
+ node0.wait_for_verack()
+
+ # Generating a block on one of the nodes will get us out of IBD
+ blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
+ self.sync_all([self.nodes[0:1]])
+
+ # Notice above how we called an RPC by calling a method with the same
+ # name on the node object. Notice also how we used a keyword argument
+ # to specify a named RPC argument. Neither of those are defined on the
+ # node object. Instead there's some __getattr__() magic going on under
+ # the covers to dispatch unrecognised attribute calls to the RPC
+ # interface.
+
+ # Logs are nice. Do plenty of them. They can be used in place of comments for
+ # breaking the test into sub-sections.
+ self.log.info("Starting test!")
+
+ self.log.info("Calling a custom function")
+ custom_function()
+
+ self.log.info("Calling a custom method")
+ self.custom_method()
+
+ self.log.info("Create some blocks")
+ self.tip = int(self.nodes[0].getbestblockhash(), 16)
+ self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
+
+ height = 1
+
+ for i in range(10):
+ # Use the mininode and blocktools functionality to manually build a block
+ # Calling the generate() rpc is easier, but this allows us to exactly
+ # control the blocks and transactions.
+ block = create_block(self.tip, create_coinbase(height), self.block_time)
+ block.solve()
+ block_message = msg_block(block)
+ # Send message is used to send a P2P message to the node over our NodeConn connection
+ node0.send_message(block_message)
+ self.tip = block.sha256
+ blocks.append(self.tip)
+ self.block_time += 1
+ height += 1
+
+ self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
+ self.nodes[1].waitforblockheight(11)
+
+ self.log.info("Connect node2 and node1")
+ connect_nodes(self.nodes[1], 2)
+
+ self.log.info("Add P2P connection to node2")
+ node2 = BaseNode()
+ connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
+ node2.add_connection(connections[1])
+ node2.wait_for_verack()
+
+ self.log.info("Wait for node2 reach current tip. Test that it has propogated all the blocks to us")
+
+ for block in blocks:
+ getdata_request = msg_getdata()
+ getdata_request.inv.append(CInv(2, block))
+ node2.send_message(getdata_request)
+
+ # wait_until() will loop until a predicate condition is met. Use it to test properties of the
+ # NodeConnCB objects.
+ assert wait_until(lambda: sorted(blocks) == sorted(list(node2.block_receive_map.keys())), timeout=5)
+
+ self.log.info("Check that each block was received only once")
+ # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
+ # messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
+ # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
+ with mininode_lock:
+ for block in node2.block_receive_map.values():
+ assert_equal(block, 1)
+
+if __name__ == '__main__':
+ ExampleTest().main()
diff --git a/test/functional/fundrawtransaction.py b/test/functional/fundrawtransaction.py
index 0a3166b89b..0baab6d01c 100755
--- a/test/functional/fundrawtransaction.py
+++ b/test/functional/fundrawtransaction.py
@@ -4,7 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
-from test_framework.test_framework import BitcoinTestFramework
+from test_framework.test_framework import BitcoinTestFramework, BITCOIND_PROC_WAIT_TIMEOUT
from test_framework.util import *
@@ -452,7 +452,7 @@ class RawTransactionsTest(BitcoinTestFramework):
self.stop_node(2)
self.stop_node(3)
self.nodes[1].encryptwallet("test")
- bitcoind_processes[1].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
+ self.bitcoind_processes[1].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
diff --git a/test/functional/keypool.py b/test/functional/keypool.py
index f23a427d1f..e8be559918 100755
--- a/test/functional/keypool.py
+++ b/test/functional/keypool.py
@@ -18,7 +18,7 @@ class KeyPoolTest(BitcoinTestFramework):
# Encrypt wallet and wait to terminate
nodes[0].encryptwallet('test')
- bitcoind_processes[0].wait()
+ self.bitcoind_processes[0].wait()
# Restart node 0
nodes[0] = self.start_node(0, self.options.tmpdir)
# Keep creating keys
diff --git a/test/functional/listtransactions.py b/test/functional/listtransactions.py
index f69f1c5724..f75a8e29cc 100755
--- a/test/functional/listtransactions.py
+++ b/test/functional/listtransactions.py
@@ -23,7 +23,7 @@ class ListTransactionsTest(BitcoinTestFramework):
def setup_nodes(self):
#This test requires mocktime
- enable_mocktime()
+ self.enable_mocktime()
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir)
def run_test(self):
diff --git a/test/functional/merkle_blocks.py b/test/functional/merkle_blocks.py
index 06af72ef10..bcc65c8408 100755
--- a/test/functional/merkle_blocks.py
+++ b/test/functional/merkle_blocks.py
@@ -39,7 +39,8 @@ class MerkleBlockTest(BitcoinTestFramework):
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
- assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1])
+ # This will raise an exception because the transaction is not yet in a block
+ assert_raises_jsonrpc(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
@@ -56,7 +57,7 @@ class MerkleBlockTest(BitcoinTestFramework):
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 49.98})
- self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
+ txid3 = self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
@@ -64,17 +65,21 @@ class MerkleBlockTest(BitcoinTestFramework):
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
- assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent])
- # ...but we can if we specify the block
+ assert_raises_jsonrpc(-5, "Transaction not yet in block", self.nodes[2].gettxoutproof, [txid_spent])
+ # We can get the proof if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
- # ...or if the first tx is not fully-spent
+ # We can't get the proof if we specify a non-existent block
+ assert_raises_jsonrpc(-5, "Block not found", self.nodes[2].gettxoutproof, [txid_spent], "00000000000000000000000000000000")
+ # We can get the proof if the transaction is unspent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
- try:
- assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
- except JSONRPCException:
- assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist)
- # ...or if we have a -txindex
+ # We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter.
+ assert_equal(sorted(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2]))), sorted(txlist))
+ assert_equal(sorted(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1]))), sorted(txlist))
+ # We can always get a proof if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
+ # We can't get a proof if we specify transactions from different blocks
+ assert_raises_jsonrpc(-5, "Not all transactions found in specified or retrieved block", self.nodes[2].gettxoutproof, [txid1, txid3])
+
if __name__ == '__main__':
MerkleBlockTest().main()
diff --git a/test/functional/multi_rpc.py b/test/functional/multi_rpc.py
index 6ff91a960b..a30e15ace9 100755
--- a/test/functional/multi_rpc.py
+++ b/test/functional/multi_rpc.py
@@ -16,16 +16,21 @@ class HTTPBasicsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
- self.num_nodes = 1
+ self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
+ rpcuser = "rpcuser=rpcuser💻"
+ rpcpassword = "rpcpassword=rpcpassword🔑"
with open(os.path.join(self.options.tmpdir+"/node0", "bitcoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
+ with open(os.path.join(self.options.tmpdir+"/node1", "bitcoin.conf"), 'a', encoding='utf8') as f:
+ f.write(rpcuser+"\n")
+ f.write(rpcpassword+"\n")
def run_test(self):
@@ -50,7 +55,7 @@ class HTTPBasicsTest (BitcoinTestFramework):
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
- assert_equal(resp.status==401, False)
+ assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
@@ -60,7 +65,7 @@ class HTTPBasicsTest (BitcoinTestFramework):
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
- assert_equal(resp.status==401, False)
+ assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
@@ -71,7 +76,7 @@ class HTTPBasicsTest (BitcoinTestFramework):
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
- assert_equal(resp.status==401, True)
+ assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
@@ -82,7 +87,7 @@ class HTTPBasicsTest (BitcoinTestFramework):
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
- assert_equal(resp.status==401, True)
+ assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
@@ -93,7 +98,7 @@ class HTTPBasicsTest (BitcoinTestFramework):
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
- assert_equal(resp.status==401, False)
+ assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
@@ -104,7 +109,46 @@ class HTTPBasicsTest (BitcoinTestFramework):
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
- assert_equal(resp.status==401, True)
+ assert_equal(resp.status, 401)
+ conn.close()
+
+ ###############################################################
+ # Check correctness of the rpcuser/rpcpassword config options #
+ ###############################################################
+ url = urllib.parse.urlparse(self.nodes[1].url)
+
+ # rpcuser and rpcpassword authpair
+ rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
+
+ headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
+
+ conn = http.client.HTTPConnection(url.hostname, url.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ resp = conn.getresponse()
+ assert_equal(resp.status, 200)
+ conn.close()
+
+ #Wrong login name with rpcuser's password
+ rpcuserauthpair = "rpcuserwrong:rpcpassword"
+ headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
+
+ conn = http.client.HTTPConnection(url.hostname, url.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ resp = conn.getresponse()
+ assert_equal(resp.status, 401)
+ conn.close()
+
+ #Wrong password for rpcuser
+ rpcuserauthpair = "rpcuser:rpcpasswordwrong"
+ headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
+
+ conn = http.client.HTTPConnection(url.hostname, url.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ resp = conn.getresponse()
+ assert_equal(resp.status, 401)
conn.close()
diff --git a/test/functional/p2p-segwit.py b/test/functional/p2p-segwit.py
index dbc61d21fc..63dfbb8ae6 100755
--- a/test/functional/p2p-segwit.py
+++ b/test/functional/p2p-segwit.py
@@ -1486,7 +1486,7 @@ class SegWitTest(BitcoinTestFramework):
# nodes would have stored, this requires special handling.
# To enable this test, pass --oldbinary=<path-to-pre-segwit-bitcoind> to
# the test.
- def test_upgrade_after_activation(self, node, node_id):
+ def test_upgrade_after_activation(self, node_id):
self.log.info("Testing software upgrade after softfork activation")
assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind
@@ -1502,14 +1502,14 @@ class SegWitTest(BitcoinTestFramework):
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
- assert(get_bip9_status(node, 'segwit')['status'] == "active")
+ assert(get_bip9_status(self.nodes[node_id], 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0.
- height = node.getblockcount()
+ height = self.nodes[node_id].getblockcount()
while height >= 0:
- block_hash = node.getblockhash(height)
+ block_hash = self.nodes[node_id].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
- assert_equal(self.nodes[0].getblock(block_hash), node.getblock(block_hash))
+ assert_equal(self.nodes[0].getblock(block_hash), self.nodes[node_id].getblock(block_hash))
height -= 1
@@ -1944,7 +1944,7 @@ class SegWitTest(BitcoinTestFramework):
self.test_signature_version_1()
self.test_non_standard_witness()
sync_blocks(self.nodes)
- self.test_upgrade_after_activation(self.nodes[2], 2)
+ self.test_upgrade_after_activation(node_id=2)
self.test_witness_sigops()
diff --git a/test/functional/pruning.py b/test/functional/pruning.py
index 4c3501ad77..0af91e0658 100755
--- a/test/functional/pruning.py
+++ b/test/functional/pruning.py
@@ -315,17 +315,17 @@ class PruneTest(BitcoinTestFramework):
# check that the pruning node's wallet is still in good shape
self.log.info("Stop and start pruning node to trigger wallet rescan")
self.stop_node(2)
- self.start_node(2, self.options.tmpdir, ["-prune=550"])
+ self.nodes[2] = self.start_node(2, self.options.tmpdir, ["-prune=550"])
self.log.info("Success")
- # check that wallet loads loads successfully when restarting a pruned node after IBD.
+ # check that wallet loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
self.log.info("Syncing node 5 to test wallet")
connect_nodes(self.nodes[0], 5)
nds = [self.nodes[0], self.nodes[5]]
sync_blocks(nds, wait=5, timeout=300)
self.stop_node(5) #stop and start to trigger rescan
- self.start_node(5, self.options.tmpdir, ["-prune=550"])
+ self.nodes[5] = self.start_node(5, self.options.tmpdir, ["-prune=550"])
self.log.info("Success")
def run_test(self):
diff --git a/test/functional/receivedby.py b/test/functional/receivedby.py
index 2cad6269ac..19d99c9c9e 100755
--- a/test/functional/receivedby.py
+++ b/test/functional/receivedby.py
@@ -31,7 +31,7 @@ class ReceivedByTest(BitcoinTestFramework):
def setup_nodes(self):
#This test requires mocktime
- enable_mocktime()
+ self.enable_mocktime()
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir)
def run_test(self):
diff --git a/test/functional/rest.py b/test/functional/rest.py
index fbcceba0fa..a69dbb5013 100755
--- a/test/functional/rest.py
+++ b/test/functional/rest.py
@@ -82,9 +82,9 @@ class RESTTest (BitcoinTestFramework):
n = vout['n']
- ######################################
- # GETUTXOS: query a unspent outpoint #
- ######################################
+ #######################################
+ # GETUTXOS: query an unspent outpoint #
+ #######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
@@ -97,9 +97,9 @@ class RESTTest (BitcoinTestFramework):
assert_equal(json_obj['utxos'][0]['value'], 0.1)
- ################################################
- # GETUTXOS: now query a already spent outpoint #
- ################################################
+ #################################################
+ # GETUTXOS: now query an already spent outpoint #
+ #################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
@@ -161,24 +161,24 @@ class RESTTest (BitcoinTestFramework):
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
- assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
+ assert_equal(len(json_obj['utxos']), 0) #there should be an outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
- assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
+ assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
- assert_equal(response.status, 400) #must be a 400 because we send a invalid json request
+ assert_equal(response.status, 400) #must be a 400 because we send an invalid json request
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
- assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request
+ assert_equal(response.status, 400) #must be a 400 because we send an invalid bin request
response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
- assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request
+ assert_equal(response.status, 400) #must be a 400 because we send an invalid bin request
#test limits
json_request = '/checkmempool/'
diff --git a/test/functional/rpcbind_test.py b/test/functional/rpcbind_test.py
index 5336cf2ec8..951685aa76 100755
--- a/test/functional/rpcbind_test.py
+++ b/test/functional/rpcbind_test.py
@@ -37,7 +37,7 @@ class RPCBindTest(BitcoinTestFramework):
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, [base_args + binds], connect_to)
- pid = bitcoind_processes[0].pid
+ pid = self.bitcoind_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
@@ -49,7 +49,7 @@ class RPCBindTest(BitcoinTestFramework):
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, [base_args])
# connect to node through non-loopback interface
- node = get_rpc_proxy(rpc_url(0, "%s:%d" % (rpchost, rpcport)), 0)
+ node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, "%s:%d" % (rpchost, rpcport)), 0)
node.getnetworkinfo()
self.stop_nodes()
diff --git a/test/functional/smartfees.py b/test/functional/smartfees.py
index 482c77863f..bc42a319df 100755
--- a/test/functional/smartfees.py
+++ b/test/functional/smartfees.py
@@ -10,7 +10,7 @@ from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_E
from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
-# So we can create many many transactions without needing to spend
+# So we can create many transactions without needing to spend
# time signing.
redeem_script_1 = CScript([OP_1, OP_DROP])
redeem_script_2 = CScript([OP_2, OP_DROP])
diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py
index fb3ed1473a..688347a68f 100755
--- a/test/functional/test_framework/mininode.py
+++ b/test/functional/test_framework/mininode.py
@@ -61,7 +61,7 @@ mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
-# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
+# NodeConn acquires this lock whenever delivering a message to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index 67abf35687..8d698a7327 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -5,7 +5,9 @@
"""Base class for RPC testing."""
from collections import deque
+import errno
from enum import Enum
+import http.client
import logging
import optparse
import os
@@ -14,33 +16,27 @@ import subprocess
import sys
import tempfile
import time
+import traceback
+from .authproxy import JSONRPCException
+from . import coverage
from .util import (
- PortSeed,
MAX_NODES,
- bitcoind_processes,
+ PortSeed,
+ assert_equal,
check_json_precision,
connect_nodes_bi,
- disable_mocktime,
disconnect_nodes,
- enable_coverage,
- enable_mocktime,
- get_mocktime,
get_rpc_proxy,
initialize_datadir,
+ get_datadir_path,
log_filename,
p2p_port,
rpc_url,
set_node_times,
- _start_node,
- _start_nodes,
- _stop_node,
- _stop_nodes,
sync_blocks,
sync_mempools,
- wait_for_bitcoind_start,
)
-from .authproxy import JSONRPCException
class TestStatus(Enum):
PASSED = 1
@@ -51,6 +47,8 @@ TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
+BITCOIND_PROC_WAIT_TIMEOUT = 60
+
class BitcoinTestFramework(object):
"""Base class for a bitcoin test script.
@@ -70,13 +68,15 @@ class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
- self.nodes = None
+ self.nodes = []
+ self.bitcoind_processes = {}
+ self.mocktime = 0
def add_options(self, parser):
pass
def setup_chain(self):
- self.log.info("Initializing test directory "+self.options.tmpdir)
+ self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
@@ -96,7 +96,7 @@ class BitcoinTestFramework(object):
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
- self.nodes = _start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
+ self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
def run_test(self):
raise NotImplementedError
@@ -110,9 +110,9 @@ class BitcoinTestFramework(object):
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
- parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
+ parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
help="Source directory containing bitcoind/bitcoin-cli (default: %default)")
- parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
+ parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
@@ -128,12 +128,9 @@ class BitcoinTestFramework(object):
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
- if self.options.coveragedir:
- enable_coverage(self.options.coveragedir)
-
PortSeed.n = self.options.port_seed
- os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
+ os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
@@ -187,7 +184,7 @@ class BitcoinTestFramework(object):
for fn in filenames:
try:
with open(fn, 'r') as f:
- print("From" , fn, ":")
+ print("From", fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
@@ -207,16 +204,88 @@ class BitcoinTestFramework(object):
# Public helper methods. These can be accessed by the subclass test scripts.
def start_node(self, i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
- return _start_node(i, dirname, extra_args, rpchost, timewait, binary, stderr)
+ """Start a bitcoind and return RPC connection to it"""
+
+ datadir = os.path.join(dirname, "node" + str(i))
+ if binary is None:
+ binary = os.getenv("BITCOIND", "bitcoind")
+ args = [binary, "-datadir=" + datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(self.mocktime), "-uacomment=testnode%d" % i]
+ if extra_args is not None:
+ args.extend(extra_args)
+ self.bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr)
+ self.log.debug("initialize_chain: bitcoind started, waiting for RPC to come up")
+ self._wait_for_bitcoind_start(self.bitcoind_processes[i], datadir, i, rpchost)
+ self.log.debug("initialize_chain: RPC successfully started")
+ proxy = get_rpc_proxy(rpc_url(datadir, i, rpchost), i, timeout=timewait)
+
+ if self.options.coveragedir:
+ coverage.write_all_rpc_commands(self.options.coveragedir, proxy)
+
+ return proxy
def start_nodes(self, num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
- return _start_nodes(num_nodes, dirname, extra_args, rpchost, timewait, binary)
+ """Start multiple bitcoinds, return RPC connections to them"""
+
+ if extra_args is None:
+ extra_args = [None] * num_nodes
+ if binary is None:
+ binary = [None] * num_nodes
+ assert_equal(len(extra_args), num_nodes)
+ assert_equal(len(binary), num_nodes)
+ rpcs = []
+ try:
+ for i in range(num_nodes):
+ rpcs.append(self.start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
+ except:
+ # If one node failed to start, stop the others
+ # TODO: abusing self.nodes in this way is a little hacky.
+ # Eventually we should do a better job of tracking nodes
+ self.nodes.extend(rpcs)
+ self.stop_nodes()
+ self.nodes = []
+ raise
+ return rpcs
+
+ def stop_node(self, i):
+ """Stop a bitcoind test node"""
- def stop_node(self, num_node):
- _stop_node(self.nodes[num_node], num_node)
+ self.log.debug("Stopping node %d" % i)
+ try:
+ self.nodes[i].stop()
+ except http.client.CannotSendRequest as e:
+ self.log.exception("Unable to stop node")
+ return_code = self.bitcoind_processes[i].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
+ del self.bitcoind_processes[i]
+ assert_equal(return_code, 0)
def stop_nodes(self):
- _stop_nodes(self.nodes)
+ """Stop multiple bitcoind test nodes"""
+
+ for i in range(len(self.nodes)):
+ self.stop_node(i)
+ assert not self.bitcoind_processes.values() # All connections must be gone now
+
+ def assert_start_raises_init_error(self, i, dirname, extra_args=None, expected_msg=None):
+ with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
+ try:
+ self.start_node(i, dirname, extra_args, stderr=log_stderr)
+ self.stop_node(i)
+ except Exception as e:
+ assert 'bitcoind exited' in str(e) # node must have shutdown
+ if expected_msg is not None:
+ log_stderr.seek(0)
+ stderr = log_stderr.read().decode('utf-8')
+ if expected_msg not in stderr:
+ raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
+ else:
+ if expected_msg is None:
+ assert_msg = "bitcoind should have exited with an error"
+ else:
+ assert_msg = "bitcoind should have exited with expected error " + expected_msg
+ raise AssertionError(assert_msg)
+
+ def wait_for_node_exit(self, i, timeout):
+ self.bitcoind_processes[i].wait(timeout)
def split_network(self):
"""
@@ -241,6 +310,21 @@ class BitcoinTestFramework(object):
sync_blocks(group)
sync_mempools(group)
+ def enable_mocktime(self):
+ """Enable mocktime for the script.
+
+ mocktime may be needed for scripts that use the cached version of the
+ blockchain. If the cached version of the blockchain is used without
+ mocktime then the mempools will not sync due to IBD.
+
+ For backwared compatibility of the python scripts with previous
+ versions of the cache, this helper function sets mocktime to Jan 1,
+ 2014 + (201 * 10 * 60)"""
+ self.mocktime = 1388534400 + (201 * 10 * 60)
+
+ def disable_mocktime(self):
+ self.mocktime = 0
+
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
@@ -256,7 +340,7 @@ class BitcoinTestFramework(object):
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
- formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
+ formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
@@ -298,15 +382,15 @@ class BitcoinTestFramework(object):
args = [os.getenv("BITCOIND", "bitcoind"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
- bitcoind_processes[i] = subprocess.Popen(args)
+ self.bitcoind_processes[i] = subprocess.Popen(args)
self.log.debug("initialize_chain: bitcoind started, waiting for RPC to come up")
- wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
+ self._wait_for_bitcoind_start(self.bitcoind_processes[i], datadir, i)
self.log.debug("initialize_chain: RPC successfully started")
self.nodes = []
for i in range(MAX_NODES):
try:
- self.nodes.append(get_rpc_proxy(rpc_url(i), i))
+ self.nodes.append(get_rpc_proxy(rpc_url(get_datadir_path(cachedir, i), i), i))
except:
self.log.exception("Error connecting to node %d" % i)
sys.exit(1)
@@ -318,8 +402,8 @@ class BitcoinTestFramework(object):
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
- enable_mocktime()
- block_time = get_mocktime() - (201 * 10 * 60)
+ self.enable_mocktime()
+ block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
@@ -332,7 +416,7 @@ class BitcoinTestFramework(object):
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
- disable_mocktime()
+ self.disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
@@ -353,18 +437,37 @@ class BitcoinTestFramework(object):
for i in range(num_nodes):
initialize_datadir(test_dir, i)
-# Test framework for doing p2p comparison testing, which sets up some bitcoind
-# binaries:
-# 1 binary: test binary
-# 2 binaries: 1 test binary, 1 ref binary
-# n>2 binaries: 1 test binary, n-1 ref binaries
-
-class SkipTest(Exception):
- """This exception is raised to skip a test"""
- def __init__(self, message):
- self.message = message
+ def _wait_for_bitcoind_start(self, process, datadir, i, rpchost=None):
+ """Wait for bitcoind to start.
+
+ This means that RPC is accessible and fully initialized.
+ Raise an exception if bitcoind exits during initialization."""
+ while True:
+ if process.poll() is not None:
+ raise Exception('bitcoind exited with status %i during initialization' % process.returncode)
+ try:
+ # Check if .cookie file to be created
+ rpc = get_rpc_proxy(rpc_url(datadir, i, rpchost), i, coveragedir=self.options.coveragedir)
+ rpc.getblockcount()
+ break # break out of loop on success
+ except IOError as e:
+ if e.errno != errno.ECONNREFUSED: # Port not yet open?
+ raise # unknown IO error
+ except JSONRPCException as e: # Initialization phase
+ if e.error['code'] != -28: # RPC in warmup?
+ raise # unknown JSON RPC exception
+ except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
+ if "No RPC credentials" not in str(e):
+ raise
+ time.sleep(0.25)
class ComparisonTestFramework(BitcoinTestFramework):
+ """Test framework for doing p2p comparison testing
+
+ Sets up some bitcoind binaries:
+ - 1 binary: test binary
+ - 2 binaries: 1 test binary, 1 ref binary
+ - n>2 binaries: 1 test binary, n-1 ref binaries"""
def __init__(self):
super().__init__()
@@ -380,8 +483,15 @@ class ComparisonTestFramework(BitcoinTestFramework):
help="bitcoind binary to use for reference nodes (if any)")
def setup_network(self):
+ extra_args = [['-whitelist=127.0.0.1']]*self.num_nodes
+ if hasattr(self, "extra_args"):
+ extra_args = self.extra_args
self.nodes = self.start_nodes(
- self.num_nodes, self.options.tmpdir,
- extra_args=[['-whitelist=127.0.0.1']] * self.num_nodes,
+ self.num_nodes, self.options.tmpdir, extra_args,
binary=[self.options.testbinary] +
- [self.options.refbinary]*(self.num_nodes-1))
+ [self.options.refbinary] * (self.num_nodes - 1))
+
+class SkipTest(Exception):
+ """This exception is raised to skip a test"""
+ def __init__(self, message):
+ self.message = message
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 2b0f32c2b6..8a2d8de50e 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -4,30 +4,162 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
-import os
-import sys
-
-from binascii import hexlify, unhexlify
from base64 import b64encode
+from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import json
-import http.client
+import logging
+import os
import random
-import shutil
-import subprocess
-import tempfile
-import time
import re
-import errno
-import logging
+import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
-COVERAGE_DIR = None
-
logger = logging.getLogger("TestFramework.utils")
+# Assert functions
+##################
+
+def assert_fee_amount(fee, tx_size, fee_per_kB):
+ """Assert the fee was in range"""
+ target_fee = tx_size * fee_per_kB / 1000
+ if fee < target_fee:
+ raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
+ # allow the wallet's estimation to be at most 2 bytes off
+ if fee > (tx_size + 2) * fee_per_kB / 1000:
+ raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
+
+def assert_equal(thing1, thing2, *args):
+ if thing1 != thing2 or any(thing1 != arg for arg in args):
+ raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
+
+def assert_greater_than(thing1, thing2):
+ if thing1 <= thing2:
+ raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
+
+def assert_greater_than_or_equal(thing1, thing2):
+ if thing1 < thing2:
+ raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
+
+def assert_raises(exc, fun, *args, **kwds):
+ assert_raises_message(exc, None, fun, *args, **kwds)
+
+def assert_raises_message(exc, message, fun, *args, **kwds):
+ try:
+ fun(*args, **kwds)
+ except exc as e:
+ if message is not None and message not in e.error['message']:
+ raise AssertionError("Expected substring not found:" + e.error['message'])
+ except Exception as e:
+ raise AssertionError("Unexpected exception raised: " + type(e).__name__)
+ else:
+ raise AssertionError("No exception raised")
+
+def assert_raises_jsonrpc(code, message, fun, *args, **kwds):
+ """Run an RPC and verify that a specific JSONRPC exception code and message is raised.
+
+ Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
+ and verifies that the error code and message are as expected. Throws AssertionError if
+ no JSONRPCException was returned or if the error code/message are not as expected.
+
+ Args:
+ code (int), optional: the error code returned by the RPC call (defined
+ in src/rpc/protocol.h). Set to None if checking the error code is not required.
+ message (string), optional: [a substring of] the error string returned by the
+ RPC call. Set to None if checking the error string is not required
+ fun (function): the function to call. This should be the name of an RPC.
+ args*: positional arguments for the function.
+ kwds**: named arguments for the function.
+ """
+ try:
+ fun(*args, **kwds)
+ except JSONRPCException as e:
+ # JSONRPCException was thrown as expected. Check the code and message values are correct.
+ if (code is not None) and (code != e.error["code"]):
+ raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
+ if (message is not None) and (message not in e.error['message']):
+ raise AssertionError("Expected substring not found:" + e.error['message'])
+ except Exception as e:
+ raise AssertionError("Unexpected exception raised: " + type(e).__name__)
+ else:
+ raise AssertionError("No exception raised")
+
+def assert_is_hex_string(string):
+ try:
+ int(string, 16)
+ except Exception as e:
+ raise AssertionError(
+ "Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
+
+def assert_is_hash_string(string, length=64):
+ if not isinstance(string, str):
+ raise AssertionError("Expected a string, got type %r" % type(string))
+ elif length and len(string) != length:
+ raise AssertionError(
+ "String of length %d expected; got %d" % (length, len(string)))
+ elif not re.match('[abcdef0-9]+$', string):
+ raise AssertionError(
+ "String %r contains invalid characters for a hash." % string)
+
+def assert_array_result(object_array, to_match, expected, should_not_find=False):
+ """
+ Pass in array of JSON objects, a dictionary with key/value pairs
+ to match against, and another dictionary with expected key/value
+ pairs.
+ If the should_not_find flag is true, to_match should not be found
+ in object_array
+ """
+ if should_not_find:
+ assert_equal(expected, {})
+ num_matched = 0
+ for item in object_array:
+ all_match = True
+ for key, value in to_match.items():
+ if item[key] != value:
+ all_match = False
+ if not all_match:
+ continue
+ elif should_not_find:
+ num_matched = num_matched + 1
+ for key, value in expected.items():
+ if item[key] != value:
+ raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
+ num_matched = num_matched + 1
+ if num_matched == 0 and not should_not_find:
+ raise AssertionError("No objects matched %s" % (str(to_match)))
+ if num_matched > 0 and should_not_find:
+ raise AssertionError("Objects were found %s" % (str(to_match)))
+
+# Utility functions
+###################
+
+def check_json_precision():
+ """Make sure json library being used does not lose precision converting BTC values"""
+ n = Decimal("20000000.00000003")
+ satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
+ if satoshis != 2000000000000003:
+ raise RuntimeError("JSON encode/decode loses precision")
+
+def count_bytes(hex_string):
+ return len(bytearray.fromhex(hex_string))
+
+def bytes_to_hex_str(byte_str):
+ return hexlify(byte_str).decode('ascii')
+
+def hex_str_to_bytes(hex_str):
+ return unhexlify(hex_str.encode('ascii'))
+
+def str_to_b64str(string):
+ return b64encode(string.encode('utf-8')).decode('ascii')
+
+def satoshi_round(amount):
+ return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
+
+# RPC/P2P connection constants and functions
+############################################
+
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
@@ -35,41 +167,11 @@ PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
-BITCOIND_PROC_WAIT_TIMEOUT = 60
-
-
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
-#Set Mocktime default to OFF.
-#MOCKTIME is only needed for scripts that use the
-#cached version of the blockchain. If the cached
-#version of the blockchain is used without MOCKTIME
-#then the mempools will not sync due to IBD.
-MOCKTIME = 0
-
-def enable_mocktime():
- #For backwared compatibility of the python scripts
- #with previous versions of the cache, set MOCKTIME
- #to Jan 1, 2014 + (201 * 10 * 60)
- global MOCKTIME
- MOCKTIME = 1388534400 + (201 * 10 * 60)
-
-def disable_mocktime():
- global MOCKTIME
- MOCKTIME = 0
-
-def get_mocktime():
- return MOCKTIME
-
-def enable_coverage(dirname):
- """Maintain a log of which RPC calls are made during testing."""
- global COVERAGE_DIR
- COVERAGE_DIR = dirname
-
-
-def get_rpc_proxy(url, node_number, timeout=None):
+def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
@@ -90,11 +192,10 @@ def get_rpc_proxy(url, node_number, timeout=None):
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
- COVERAGE_DIR, node_number) if COVERAGE_DIR else None
+ coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
-
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
@@ -102,24 +203,90 @@ def p2p_port(n):
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
-def check_json_precision():
- """Make sure json library being used does not lose precision converting BTC values"""
- n = Decimal("20000000.00000003")
- satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
- if satoshis != 2000000000000003:
- raise RuntimeError("JSON encode/decode loses precision")
+def rpc_url(datadir, i, rpchost=None):
+ rpc_u, rpc_p = get_auth_cookie(datadir, i)
+ host = '127.0.0.1'
+ port = rpc_port(i)
+ if rpchost:
+ parts = rpchost.split(':')
+ if len(parts) == 2:
+ host, port = parts
+ else:
+ host = rpchost
+ return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
-def count_bytes(hex_string):
- return len(bytearray.fromhex(hex_string))
+# Node functions
+################
-def bytes_to_hex_str(byte_str):
- return hexlify(byte_str).decode('ascii')
+def initialize_datadir(dirname, n):
+ datadir = os.path.join(dirname, "node" + str(n))
+ if not os.path.isdir(datadir):
+ os.makedirs(datadir)
+ with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f:
+ f.write("regtest=1\n")
+ f.write("port=" + str(p2p_port(n)) + "\n")
+ f.write("rpcport=" + str(rpc_port(n)) + "\n")
+ f.write("listenonion=0\n")
+ return datadir
-def hex_str_to_bytes(hex_str):
- return unhexlify(hex_str.encode('ascii'))
+def get_datadir_path(dirname, n):
+ return os.path.join(dirname, "node" + str(n))
+
+def get_auth_cookie(datadir, n):
+ user = None
+ password = None
+ if os.path.isfile(os.path.join(datadir, "bitcoin.conf")):
+ with open(os.path.join(datadir, "bitcoin.conf"), 'r') as f:
+ for line in f:
+ if line.startswith("rpcuser="):
+ assert user is None # Ensure that there is only one rpcuser line
+ user = line.split("=")[1].strip("\n")
+ if line.startswith("rpcpassword="):
+ assert password is None # Ensure that there is only one rpcpassword line
+ password = line.split("=")[1].strip("\n")
+ if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
+ with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
+ userpass = f.read()
+ split_userpass = userpass.split(':')
+ user = split_userpass[0]
+ password = split_userpass[1]
+ if user is None or password is None:
+ raise ValueError("No RPC credentials")
+ return user, password
-def str_to_b64str(string):
- return b64encode(string.encode('utf-8')).decode('ascii')
+def log_filename(dirname, n_node, logname):
+ return os.path.join(dirname, "node" + str(n_node), "regtest", logname)
+
+def get_bip9_status(node, key):
+ info = node.getblockchaininfo()
+ return info['bip9_softforks'][key]
+
+def set_node_times(nodes, t):
+ for node in nodes:
+ node.setmocktime(t)
+
+def disconnect_nodes(from_connection, node_num):
+ for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
+ from_connection.disconnectnode(nodeid=peer_id)
+
+ for _ in range(50):
+ if [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == []:
+ break
+ time.sleep(0.1)
+ else:
+ raise AssertionError("timed out waiting for disconnect")
+
+def connect_nodes(from_connection, node_num):
+ ip_port = "127.0.0.1:" + str(p2p_port(node_num))
+ from_connection.addnode(ip_port, "onetry")
+ # poll until version handshake complete to avoid race conditions
+ # with transaction relaying
+ while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
+ time.sleep(0.1)
+
+def connect_nodes_bi(nodes, a, b):
+ connect_nodes(nodes[a], b)
+ connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
@@ -152,7 +319,7 @@ def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
- if best_hash == [best_hash[0]]*len(best_hash):
+ if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
@@ -168,176 +335,15 @@ def sync_mempools(rpc_connections, *, wait=1, timeout=60):
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
- num_match = num_match+1
+ num_match = num_match + 1
if num_match == len(rpc_connections):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
-bitcoind_processes = {}
-
-def initialize_datadir(dirname, n):
- datadir = os.path.join(dirname, "node"+str(n))
- if not os.path.isdir(datadir):
- os.makedirs(datadir)
- rpc_u, rpc_p = rpc_auth_pair(n)
- with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f:
- f.write("regtest=1\n")
- f.write("rpcuser=" + rpc_u + "\n")
- f.write("rpcpassword=" + rpc_p + "\n")
- f.write("port="+str(p2p_port(n))+"\n")
- f.write("rpcport="+str(rpc_port(n))+"\n")
- f.write("listenonion=0\n")
- return datadir
-
-def rpc_auth_pair(n):
- return 'rpcuser💻' + str(n), 'rpcpass🔑' + str(n)
-
-def rpc_url(i, rpchost=None):
- rpc_u, rpc_p = rpc_auth_pair(i)
- host = '127.0.0.1'
- port = rpc_port(i)
- if rpchost:
- parts = rpchost.split(':')
- if len(parts) == 2:
- host, port = parts
- else:
- host = rpchost
- return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
-
-def wait_for_bitcoind_start(process, url, i):
- '''
- Wait for bitcoind to start. This means that RPC is accessible and fully initialized.
- Raise an exception if bitcoind exits during initialization.
- '''
- while True:
- if process.poll() is not None:
- raise Exception('bitcoind exited with status %i during initialization' % process.returncode)
- try:
- rpc = get_rpc_proxy(url, i)
- blocks = rpc.getblockcount()
- break # break out of loop on success
- except IOError as e:
- if e.errno != errno.ECONNREFUSED: # Port not yet open?
- raise # unknown IO error
- except JSONRPCException as e: # Initialization phase
- if e.error['code'] != -28: # RPC in warmup?
- raise # unknown JSON RPC exception
- time.sleep(0.25)
-
-
-def _start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
- """Start a bitcoind and return RPC connection to it
-
- This function should only be called from within test_framework, not by individual test scripts."""
-
- datadir = os.path.join(dirname, "node"+str(i))
- if binary is None:
- binary = os.getenv("BITCOIND", "bitcoind")
- args = [binary, "-datadir=" + datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(get_mocktime()), "-uacomment=testnode%d" % i]
- if extra_args is not None: args.extend(extra_args)
- bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr)
- logger.debug("initialize_chain: bitcoind started, waiting for RPC to come up")
- url = rpc_url(i, rpchost)
- wait_for_bitcoind_start(bitcoind_processes[i], url, i)
- logger.debug("initialize_chain: RPC successfully started")
- proxy = get_rpc_proxy(url, i, timeout=timewait)
-
- if COVERAGE_DIR:
- coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
-
- return proxy
-
-def assert_start_raises_init_error(i, dirname, extra_args=None, expected_msg=None):
- with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
- try:
- node = _start_node(i, dirname, extra_args, stderr=log_stderr)
- _stop_node(node, i)
- except Exception as e:
- assert 'bitcoind exited' in str(e) #node must have shutdown
- if expected_msg is not None:
- log_stderr.seek(0)
- stderr = log_stderr.read().decode('utf-8')
- if expected_msg not in stderr:
- raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
- else:
- if expected_msg is None:
- assert_msg = "bitcoind should have exited with an error"
- else:
- assert_msg = "bitcoind should have exited with expected error " + expected_msg
- raise AssertionError(assert_msg)
-
-def _start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
- """Start multiple bitcoinds, return RPC connections to them
-
- This function should only be called from within test_framework, not by individual test scripts."""
-
- if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
- if binary is None: binary = [ None for _ in range(num_nodes) ]
- assert_equal(len(extra_args), num_nodes)
- assert_equal(len(binary), num_nodes)
- rpcs = []
- try:
- for i in range(num_nodes):
- rpcs.append(_start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
- except: # If one node failed to start, stop the others
- _stop_nodes(rpcs)
- raise
- return rpcs
-
-def log_filename(dirname, n_node, logname):
- return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
-
-def _stop_node(node, i):
- """Stop a bitcoind test node
-
- This function should only be called from within test_framework, not by individual test scripts."""
-
- logger.debug("Stopping node %d" % i)
- try:
- node.stop()
- except http.client.CannotSendRequest as e:
- logger.exception("Unable to stop node")
- return_code = bitcoind_processes[i].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
- assert_equal(return_code, 0)
- del bitcoind_processes[i]
-
-def _stop_nodes(nodes):
- """Stop multiple bitcoind test nodes
-
- This function should only be called from within test_framework, not by individual test scripts."""
-
- for i, node in enumerate(nodes):
- _stop_node(node, i)
- assert not bitcoind_processes.values() # All connections must be gone now
-
-def set_node_times(nodes, t):
- for node in nodes:
- node.setmocktime(t)
-
-def disconnect_nodes(from_connection, node_num):
- for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
- from_connection.disconnectnode(nodeid=peer_id)
-
- for _ in range(50):
- if [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == []:
- break
- time.sleep(0.1)
- else:
- raise AssertionError("timed out waiting for disconnect")
-
-def connect_nodes(from_connection, node_num):
- ip_port = "127.0.0.1:"+str(p2p_port(node_num))
- from_connection.addnode(ip_port, "onetry")
- # poll until version handshake complete to avoid race conditions
- # with transaction relaying
- while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
- time.sleep(0.1)
-
-def connect_nodes_bi(nodes, a, b):
- connect_nodes(nodes[a], b)
- connect_nodes(nodes[b], a)
+# Transaction/Block functions
+#############################
def find_output(node, txid, amount):
"""
@@ -348,14 +354,13 @@ def find_output(node, txid, amount):
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
- raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
-
+ raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
- assert(confirmations_required >=0)
+ assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
@@ -363,9 +368,9 @@ def gather_inputs(from_node, amount_needed, confirmations_required=1):
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
- inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
+ inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
- raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
+ raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
@@ -373,13 +378,13 @@ def make_change(from_node, amount_in, amount_out, fee):
Create change output(s), return them
"""
outputs = {}
- amount = amount_out+fee
+ amount = amount_out + fee
change = amount_in - amount
- if change > amount*2:
+ if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
- outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
+ outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
@@ -392,9 +397,9 @@ def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
- fee = min_fee + fee_increment*random.randint(0,fee_variants)
+ fee = min_fee + fee_increment * random.randint(0, fee_variants)
- (total_in, inputs) = gather_inputs(from_node, amount+fee)
+ (total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
@@ -404,123 +409,13 @@ def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
return (txid, signresult["hex"], fee)
-def assert_fee_amount(fee, tx_size, fee_per_kB):
- """Assert the fee was in range"""
- target_fee = tx_size * fee_per_kB / 1000
- if fee < target_fee:
- raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
- # allow the wallet's estimation to be at most 2 bytes off
- if fee > (tx_size + 2) * fee_per_kB / 1000:
- raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
-
-def assert_equal(thing1, thing2, *args):
- if thing1 != thing2 or any(thing1 != arg for arg in args):
- raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
-
-def assert_greater_than(thing1, thing2):
- if thing1 <= thing2:
- raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
-
-def assert_greater_than_or_equal(thing1, thing2):
- if thing1 < thing2:
- raise AssertionError("%s < %s"%(str(thing1),str(thing2)))
-
-def assert_raises(exc, fun, *args, **kwds):
- assert_raises_message(exc, None, fun, *args, **kwds)
-
-def assert_raises_message(exc, message, fun, *args, **kwds):
- try:
- fun(*args, **kwds)
- except exc as e:
- if message is not None and message not in e.error['message']:
- raise AssertionError("Expected substring not found:"+e.error['message'])
- except Exception as e:
- raise AssertionError("Unexpected exception raised: "+type(e).__name__)
- else:
- raise AssertionError("No exception raised")
-
-def assert_raises_jsonrpc(code, message, fun, *args, **kwds):
- """Run an RPC and verify that a specific JSONRPC exception code and message is raised.
-
- Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
- and verifies that the error code and message are as expected. Throws AssertionError if
- no JSONRPCException was returned or if the error code/message are not as expected.
-
- Args:
- code (int), optional: the error code returned by the RPC call (defined
- in src/rpc/protocol.h). Set to None if checking the error code is not required.
- message (string), optional: [a substring of] the error string returned by the
- RPC call. Set to None if checking the error string is not required
- fun (function): the function to call. This should be the name of an RPC.
- args*: positional arguments for the function.
- kwds**: named arguments for the function.
- """
- try:
- fun(*args, **kwds)
- except JSONRPCException as e:
- # JSONRPCException was thrown as expected. Check the code and message values are correct.
- if (code is not None) and (code != e.error["code"]):
- raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
- if (message is not None) and (message not in e.error['message']):
- raise AssertionError("Expected substring not found:"+e.error['message'])
- except Exception as e:
- raise AssertionError("Unexpected exception raised: "+type(e).__name__)
- else:
- raise AssertionError("No exception raised")
-
-def assert_is_hex_string(string):
- try:
- int(string, 16)
- except Exception as e:
- raise AssertionError(
- "Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
-
-def assert_is_hash_string(string, length=64):
- if not isinstance(string, str):
- raise AssertionError("Expected a string, got type %r" % type(string))
- elif length and len(string) != length:
- raise AssertionError(
- "String of length %d expected; got %d" % (length, len(string)))
- elif not re.match('[abcdef0-9]+$', string):
- raise AssertionError(
- "String %r contains invalid characters for a hash." % string)
-
-def assert_array_result(object_array, to_match, expected, should_not_find = False):
- """
- Pass in array of JSON objects, a dictionary with key/value pairs
- to match against, and another dictionary with expected key/value
- pairs.
- If the should_not_find flag is true, to_match should not be found
- in object_array
- """
- if should_not_find == True:
- assert_equal(expected, { })
- num_matched = 0
- for item in object_array:
- all_match = True
- for key,value in to_match.items():
- if item[key] != value:
- all_match = False
- if not all_match:
- continue
- elif should_not_find == True:
- num_matched = num_matched+1
- for key,value in expected.items():
- if item[key] != value:
- raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
- num_matched = num_matched+1
- if num_matched == 0 and should_not_find != True:
- raise AssertionError("No objects matched %s"%(str(to_match)))
- if num_matched > 0 and should_not_find == True:
- raise AssertionError("Objects were found %s"%(str(to_match)))
-
-def satoshi_round(amount):
- return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
-
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
- node.generate(int(0.5*count)+101)
+ to_generate = int(0.5 * count) + 101
+ while to_generate > 0:
+ node.generate(min(25, to_generate))
+ to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
@@ -530,14 +425,14 @@ def create_confirmed_utxos(fee, node, count):
for i in range(iterations):
t = utxos.pop()
inputs = []
- inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
+ inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
- outputs[addr1] = satoshi_round(send_value/2)
- outputs[addr2] = satoshi_round(send_value/2)
+ outputs[addr1] = satoshi_round(send_value / 2)
+ outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
- txid = node.sendrawtransaction(signed_tx)
+ node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
@@ -552,8 +447,8 @@ def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
- script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
- for i in range (512):
+ script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
+ for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
@@ -567,8 +462,8 @@ def gen_return_txouts():
return txouts
def create_tx(node, coinbase, to_address, amount):
- inputs = [{ "txid" : coinbase, "vout" : 0}]
- outputs = { to_address : amount }
+ inputs = [{"txid": coinbase, "vout": 0}]
+ outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
@@ -581,7 +476,7 @@ def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
txids = []
for _ in range(num):
t = utxos.pop()
- inputs=[{ "txid" : t["txid"], "vout" : t["vout"]}]
+ inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
@@ -606,7 +501,3 @@ def mine_large_block(node, utxos=None):
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
-
-def get_bip9_status(node, key):
- info = node.getblockchaininfo()
- return info['bip9_softforks'][key]
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 4702f2d773..54f625514b 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -20,6 +20,7 @@ import datetime
import os
import time
import shutil
+import signal
import sys
import subprocess
import tempfile
@@ -78,7 +79,7 @@ BASE_SCRIPTS= [
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
- "zmq_test.py",
+ 'zmq_test.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
@@ -111,6 +112,8 @@ BASE_SCRIPTS= [
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
+ 'wallet-encryption.py',
+ 'uptime.py',
]
EXTENDED_SCRIPTS = [
@@ -122,6 +125,7 @@ EXTENDED_SCRIPTS = [
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
+ 'dbcrash.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
@@ -136,6 +140,7 @@ EXTENDED_SCRIPTS = [
'bip65-cltv-p2p.py',
'bipdersig-p2p.py',
'bipdersig.py',
+ 'example_test.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
@@ -389,6 +394,10 @@ class TestHandler:
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
+ if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
+ # In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
+ # providing useful output.
+ proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
diff --git a/test/functional/uptime.py b/test/functional/uptime.py
new file mode 100755
index 0000000000..b20d6f5cb6
--- /dev/null
+++ b/test/functional/uptime.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python3
+# Copyright (c) 2017 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the RPC call related to the uptime command.
+
+Test corresponds to code in rpc/server.cpp.
+"""
+
+import time
+
+from test_framework.test_framework import BitcoinTestFramework
+
+
+class UptimeTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+
+ self.num_nodes = 1
+ self.setup_clean_chain = True
+
+ def run_test(self):
+ self._test_uptime()
+
+ def _test_uptime(self):
+ wait_time = 10
+ self.nodes[0].setmocktime(int(time.time() + wait_time))
+ assert(self.nodes[0].uptime() >= wait_time)
+
+
+if __name__ == '__main__':
+ UptimeTest().main()
diff --git a/test/functional/wallet-dump.py b/test/functional/wallet-dump.py
index 9cb32d4650..569cc46e6c 100755
--- a/test/functional/wallet-dump.py
+++ b/test/functional/wallet-dump.py
@@ -7,7 +7,7 @@
import os
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import (assert_equal, bitcoind_processes)
+from test_framework.util import assert_equal
def read_dump(file_name, addrs, hd_master_addr_old):
@@ -95,7 +95,7 @@ class WalletDumpTest(BitcoinTestFramework):
#encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
- bitcoind_processes[0].wait()
+ self.bitcoind_processes[0].wait()
self.nodes[0] = self.start_node(0, self.options.tmpdir, self.extra_args[0])
self.nodes[0].walletpassphrase('test', 10)
# Should be a no-op:
diff --git a/test/functional/wallet-encryption.py b/test/functional/wallet-encryption.py
new file mode 100755
index 0000000000..ba72918fe1
--- /dev/null
+++ b/test/functional/wallet-encryption.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test Wallet encryption"""
+
+import time
+
+from test_framework.test_framework import BitcoinTestFramework, BITCOIND_PROC_WAIT_TIMEOUT
+from test_framework.util import (
+ assert_equal,
+ assert_raises_jsonrpc,
+)
+
+class WalletEncryptionTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def run_test(self):
+ passphrase = "WalletPassphrase"
+ passphrase2 = "SecondWalletPassphrase"
+
+ # Make sure the wallet isn't encrypted first
+ address = self.nodes[0].getnewaddress()
+ privkey = self.nodes[0].dumpprivkey(address)
+ assert_equal(privkey[:1], "c")
+ assert_equal(len(privkey), 52)
+
+ # Encrypt the wallet
+ self.nodes[0].encryptwallet(passphrase)
+ self.bitcoind_processes[0].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
+ self.nodes[0] = self.start_node(0, self.options.tmpdir)
+
+ # Test that the wallet is encrypted
+ assert_raises_jsonrpc(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
+
+ # Check that walletpassphrase works
+ self.nodes[0].walletpassphrase(passphrase, 2)
+ assert_equal(privkey, self.nodes[0].dumpprivkey(address))
+
+ # Check that the timeout is right
+ time.sleep(2)
+ assert_raises_jsonrpc(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
+
+ # Test wrong passphrase
+ assert_raises_jsonrpc(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
+
+ # Test walletlock
+ self.nodes[0].walletpassphrase(passphrase, 84600)
+ assert_equal(privkey, self.nodes[0].dumpprivkey(address))
+ self.nodes[0].walletlock()
+ assert_raises_jsonrpc(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
+
+ # Test passphrase changes
+ self.nodes[0].walletpassphrasechange(passphrase, passphrase2)
+ assert_raises_jsonrpc(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
+ self.nodes[0].walletpassphrase(passphrase2, 10)
+ assert_equal(privkey, self.nodes[0].dumpprivkey(address))
+
+if __name__ == '__main__':
+ WalletEncryptionTest().main()
diff --git a/test/functional/wallet-hd.py b/test/functional/wallet-hd.py
index e7ec72a248..dfd3dc83c5 100755
--- a/test/functional/wallet-hd.py
+++ b/test/functional/wallet-hd.py
@@ -8,7 +8,6 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
- assert_start_raises_init_error
)
import os
import shutil
@@ -27,7 +26,7 @@ class WalletHDTest(BitcoinTestFramework):
# Make sure can't switch off usehd after wallet creation
self.stop_node(1)
- assert_start_raises_init_error(1, self.options.tmpdir, ['-usehd=0'], 'already existing HD wallet')
+ self.assert_start_raises_init_error(1, self.options.tmpdir, ['-usehd=0'], 'already existing HD wallet')
self.nodes[1] = self.start_node(1, self.options.tmpdir, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
diff --git a/test/functional/walletbackup.py b/test/functional/walletbackup.py
index a4507182a2..ff51cba4b3 100755
--- a/test/functional/walletbackup.py
+++ b/test/functional/walletbackup.py
@@ -30,10 +30,11 @@ confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
+from random import randint
+import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
-from random import randint
class WalletBackupTest(BitcoinTestFramework):
diff --git a/test/functional/zapwallettxes.py b/test/functional/zapwallettxes.py
index a8600e82f6..e4d40520ef 100755
--- a/test/functional/zapwallettxes.py
+++ b/test/functional/zapwallettxes.py
@@ -70,7 +70,7 @@ class ZapWalletTXesTest (BitcoinTestFramework):
self.nodes[0] = self.start_node(0,self.options.tmpdir, ["-zapwallettxes=1"])
assert_raises(JSONRPCException, self.nodes[0].gettransaction, [txid3])
- #there must be a expection because the unconfirmed wallettx0 must be gone by now
+ #there must be an exception because the unconfirmed wallettx0 must be gone by now
tx0 = self.nodes[0].gettransaction(txid0)
assert_equal(tx0['txid'], txid0) #tx0 (confirmed) must still be available because it was confirmed
diff --git a/test/functional/zmq_test.py b/test/functional/zmq_test.py
index ce39cfefdc..26c946d215 100755
--- a/test/functional/zmq_test.py
+++ b/test/functional/zmq_test.py
@@ -8,15 +8,15 @@ import os
import struct
from test_framework.test_framework import BitcoinTestFramework, SkipTest
-from test_framework.util import *
+from test_framework.util import (assert_equal,
+ bytes_to_hex_str,
+ )
class ZMQTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
- self.num_nodes = 4
-
- port = 28332
+ self.num_nodes = 2
def setup_nodes(self):
# Try to import python3-zmq. Skip this test if the import fails.
@@ -28,7 +28,7 @@ class ZMQTest (BitcoinTestFramework):
# Check that bitcoin has been built with ZMQ enabled
config = configparser.ConfigParser()
if not self.options.configfile:
- self.options.configfile = os.path.dirname(__file__) + "/config.ini"
+ self.options.configfile = os.path.dirname(__file__) + "/../config.ini"
config.read_file(open(self.options.configfile))
if not config["components"].getboolean("ENABLE_ZMQ"):
@@ -36,59 +36,66 @@ class ZMQTest (BitcoinTestFramework):
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
+ self.zmqSubSocket.set(zmq.RCVTIMEO, 60000)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
- self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % self.port)
- self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args=[
- ['-zmqpubhashtx=tcp://127.0.0.1:'+str(self.port), '-zmqpubhashblock=tcp://127.0.0.1:'+str(self.port)],
- [],
- [],
- []
- ])
+ ip_address = "tcp://127.0.0.1:28332"
+ self.zmqSubSocket.connect(ip_address)
+ extra_args = [['-zmqpubhashtx=%s' % ip_address, '-zmqpubhashblock=%s' % ip_address], []]
+ self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
def run_test(self):
- self.sync_all()
+ try:
+ self._zmq_test()
+ finally:
+ # Destroy the zmq context
+ self.log.debug("Destroying zmq context")
+ self.zmqContext.destroy(linger=None)
+ def _zmq_test(self):
genhashes = self.nodes[0].generate(1)
self.sync_all()
- self.log.info("listen...")
+ self.log.info("Wait for tx")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashtx")
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
- assert_equal(msgSequence, 0) #must be sequence 0 on hashtx
+ assert_equal(msgSequence, 0) # must be sequence 0 on hashtx
+ self.log.info("Wait for block")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
- assert_equal(msgSequence, 0) #must be sequence 0 on hashblock
+ assert_equal(msgSequence, 0) # must be sequence 0 on hashblock
blkhash = bytes_to_hex_str(body)
- assert_equal(genhashes[0], blkhash) #blockhash from generate must be equal to the hash received over zmq
+ assert_equal(genhashes[0], blkhash) # blockhash from generate must be equal to the hash received over zmq
+ self.log.info("Generate 10 blocks (and 10 coinbase txes)")
n = 10
genhashes = self.nodes[1].generate(n)
self.sync_all()
zmqHashes = []
blockcount = 0
- for x in range(0,n*2):
+ for x in range(n * 2):
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
zmqHashes.append(bytes_to_hex_str(body))
msgSequence = struct.unpack('<I', msg[-1])[-1]
- assert_equal(msgSequence, blockcount+1)
+ assert_equal(msgSequence, blockcount + 1)
blockcount += 1
- for x in range(0,n):
- assert_equal(genhashes[x], zmqHashes[x]) #blockhash from generate must be equal to the hash received over zmq
+ for x in range(n):
+ assert_equal(genhashes[x], zmqHashes[x]) # blockhash from generate must be equal to the hash received over zmq
- #test tx from a second node
+ self.log.info("Wait for tx from second node")
+ # test tx from a second node
hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
@@ -96,14 +103,12 @@ class ZMQTest (BitcoinTestFramework):
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
- hashZMQ = ""
- if topic == b"hashtx":
- hashZMQ = bytes_to_hex_str(body)
- msgSequence = struct.unpack('<I', msg[-1])[-1]
- assert_equal(msgSequence, blockcount+1)
-
- assert_equal(hashRPC, hashZMQ) #blockhash from generate must be equal to the hash received over zmq
+ assert_equal(topic, b"hashtx")
+ hashZMQ = bytes_to_hex_str(body)
+ msgSequence = struct.unpack('<I', msg[-1])[-1]
+ assert_equal(msgSequence, blockcount + 1)
+ assert_equal(hashRPC, hashZMQ) # txid from sendtoaddress must be equal to the hash received over zmq
if __name__ == '__main__':
- ZMQTest ().main ()
+ ZMQTest().main()