aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--.travis.yml69
-rw-r--r--Makefile.am16
-rw-r--r--build-aux/m4/ax_pthread.m4550
-rw-r--r--configure.ac3
-rw-r--r--contrib/devtools/split-debug.sh.in10
-rw-r--r--contrib/gitian-descriptors/gitian-linux.yml69
-rw-r--r--contrib/gitian-descriptors/gitian-osx-signer.yml3
-rw-r--r--contrib/gitian-descriptors/gitian-osx.yml24
-rw-r--r--contrib/gitian-descriptors/gitian-win-signer.yml1
-rw-r--r--contrib/gitian-descriptors/gitian-win.yml30
-rw-r--r--contrib/gitian-keys/luke-jr-key.pgpbin6518 -> 6518 bytes
-rw-r--r--depends/Makefile10
-rw-r--r--depends/packages/bdb.mk3
-rw-r--r--depends/packages/packages.mk4
-rw-r--r--doc/bips.md1
-rw-r--r--doc/developer-notes.md57
-rw-r--r--doc/gitian-building.md12
-rw-r--r--doc/release-notes.md25
-rwxr-xr-xqa/pull-tester/rpc-tests.py3
-rw-r--r--qa/pull-tester/tests_config.py.in1
-rwxr-xr-xqa/rpc-tests/fundrawtransaction.py18
-rwxr-xr-xqa/rpc-tests/mempool_packages.py28
-rwxr-xr-xqa/rpc-tests/p2p-fullblocktest.py1105
-rwxr-xr-xqa/rpc-tests/rawtransactions.py14
-rw-r--r--qa/rpc-tests/test_framework/blockstore.py59
-rw-r--r--qa/rpc-tests/test_framework/blocktools.py23
-rwxr-xr-xqa/rpc-tests/test_framework/mininode.py12
-rw-r--r--qa/rpc-tests/test_framework/util.py9
-rwxr-xr-xqa/rpc-tests/wallet.py7
-rwxr-xr-xshare/genbuild.sh7
-rw-r--r--src/Makefile.am8
-rw-r--r--src/Makefile.leveldb.include56
-rw-r--r--src/Makefile.qt.include11
-rw-r--r--src/Makefile.test.include3
-rw-r--r--src/addrman.cpp20
-rw-r--r--src/addrman.h11
-rw-r--r--src/chainparams.cpp8
-rw-r--r--src/chainparams.h4
-rw-r--r--src/clientversion.cpp9
-rw-r--r--src/clientversion.h1
-rw-r--r--src/init.cpp17
-rw-r--r--src/main.cpp41
-rw-r--r--src/main.h2
-rw-r--r--src/miner.cpp449
-rw-r--r--src/miner.h56
-rw-r--r--src/net.cpp222
-rw-r--r--src/net.h24
-rw-r--r--src/netbase.cpp8
-rw-r--r--src/protocol.cpp4
-rw-r--r--src/protocol.h12
-rw-r--r--src/qt/bitcoinstrings.cpp2
-rw-r--r--src/qt/forms/receiverequestdialog.ui2
-rw-r--r--src/qt/guiconstants.h2
-rw-r--r--src/qt/locale/bitcoin_en.ts25
-rw-r--r--src/qt/receiverequestdialog.cpp22
-rw-r--r--src/qt/receiverequestdialog.h1
-rw-r--r--src/rpc/blockchain.cpp260
-rw-r--r--src/rpc/client.cpp2
-rw-r--r--src/rpc/mining.cpp4
-rw-r--r--src/rpc/net.cpp95
-rw-r--r--src/rpc/rawtransaction.cpp9
-rw-r--r--src/test/DoS_tests.cpp8
-rw-r--r--src/test/addrman_tests.cpp92
-rw-r--r--src/test/base58_tests.cpp4
-rw-r--r--src/test/miner_tests.cpp32
-rw-r--r--src/test/net_tests.cpp8
-rw-r--r--src/test/pmt_tests.cpp4
-rw-r--r--src/test/prevector_tests.cpp4
-rw-r--r--src/test/test_bitcoin.cpp2
-rw-r--r--src/wallet/rpcdump.cpp2
-rw-r--r--src/wallet/wallet.cpp97
-rw-r--r--src/wallet/wallet.h12
-rw-r--r--src/wallet/walletdb.cpp17
-rw-r--r--src/wallet/walletdb.h32
75 files changed, 2817 insertions, 1061 deletions
diff --git a/.gitignore b/.gitignore
index a8722aa593..ce40019dc3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -114,3 +114,4 @@ share/BitcoindComparisonTool.jar
/doc/doxygen/
libbitcoinconsensus.pc
+contrib/devtools/split-debug.sh
diff --git a/.travis.yml b/.travis.yml
index bc2c7faf7e..64227ac2a8 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,17 +1,17 @@
-# errata:
-# - A travis bug causes caches to trample eachother when using the same
-# compiler key (which we don't use anyway). This is worked around for now by
-# replacing the "compilers" with a build name prefixed by the no-op ":"
-# command. See: https://github.com/travis-ci/travis-ci/issues/4393
-# - sudo/dist/group are set so as to get Blue Box VMs, necessary for [loopback]
-# IPv6 support
-
sudo: required
dist: trusty
+#workaround for https://github.com/travis-ci/travis-ci/issues/5227
+addons:
+ hostname: bitcoin-tester
+
os: linux
-language: cpp
-compiler: gcc
+language: generic
+cache:
+ directories:
+ - depends/built
+ - depends/sdk-sources
+ - $HOME/.ccache
env:
global:
- MAKEJOBS=-j3
@@ -25,35 +25,25 @@ env:
- SDK_URL=https://bitcoincore.org/depends-sources/sdks
- PYTHON_DEBUG=1
- WINEDEBUG=fixme-all
-cache:
- apt: true
- directories:
- - depends/built
- - depends/sdk-sources
- - $HOME/.ccache
-matrix:
- fast_finish: true
- include:
- - compiler: ": ARM"
- env: HOST=arm-linux-gnueabihf PACKAGES="g++-arm-linux-gnueabihf" DEP_OPTS="NO_QT=1" CHECK_DOC=1 GOAL="install" BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports"
- - compiler: ": Win32"
- env: HOST=i686-w64-mingw32 DPKG_ADD_ARCH="i386" DEP_OPTS="NO_QT=1" PACKAGES="python3 nsis g++-mingw-w64-i686 wine1.6 bc" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-reduce-exports"
- - compiler: ": 32-bit + dash"
- env: HOST=i686-pc-linux-gnu PACKAGES="g++-multilib bc python3-zmq" DEP_OPTS="NO_QT=1" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-zmq --enable-glibc-back-compat --enable-reduce-exports LDFLAGS=-static-libstdc++" USE_SHELL="/bin/dash"
- - compiler: ": Win64"
- env: HOST=x86_64-w64-mingw32 DPKG_ADD_ARCH="i386" DEP_OPTS="NO_QT=1" PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine1.6 bc" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-reduce-exports"
- - compiler: ": bitcoind"
- env: HOST=x86_64-unknown-linux-gnu PACKAGES="bc python3-zmq" DEP_OPTS="NO_QT=1 NO_UPNP=1 DEBUG=1" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-zmq --enable-glibc-back-compat --enable-reduce-exports CPPFLAGS=-DDEBUG_LOCKORDER"
- - compiler: ": No wallet"
- env: HOST=x86_64-unknown-linux-gnu PACKAGES="python3" DEP_OPTS="NO_WALLET=1" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports"
- - compiler: ": Cross-Mac"
- env: HOST=x86_64-apple-darwin11 PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python-dev" BITCOIN_CONFIG="--enable-reduce-exports" OSX_SDK=10.9 GOAL="deploy"
- exclude:
- - compiler: gcc
+ matrix:
+# ARM
+ - HOST=arm-linux-gnueabihf PACKAGES="g++-arm-linux-gnueabihf" DEP_OPTS="NO_QT=1" CHECK_DOC=1 GOAL="install" BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports"
+# Win32
+ - HOST=i686-w64-mingw32 DPKG_ADD_ARCH="i386" DEP_OPTS="NO_QT=1" PACKAGES="python3 nsis g++-mingw-w64-i686 wine1.6 bc openjdk-7-jre-headless" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-reduce-exports"
+# 32-bit + dash
+ - HOST=i686-pc-linux-gnu PACKAGES="g++-multilib bc python3-zmq openjdk-7-jre-headless" DEP_OPTS="NO_QT=1" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-zmq --enable-glibc-back-compat --enable-reduce-exports LDFLAGS=-static-libstdc++" USE_SHELL="/bin/dash"
+# Win64
+ - HOST=x86_64-w64-mingw32 DPKG_ADD_ARCH="i386" DEP_OPTS="NO_QT=1" PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine1.6 bc openjdk-7-jre-headless" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-reduce-exports"
+# bitcoind
+ - HOST=x86_64-unknown-linux-gnu PACKAGES="bc python3-zmq openjdk-7-jre-headless" DEP_OPTS="NO_QT=1 NO_UPNP=1 DEBUG=1" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-zmq --enable-glibc-back-compat --enable-reduce-exports CPPFLAGS=-DDEBUG_LOCKORDER"
+# No wallet
+ - HOST=x86_64-unknown-linux-gnu PACKAGES=" openjdk-7-jre-headless python3" DEP_OPTS="NO_WALLET=1" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports"
+# Cross-Mac
+ - HOST=x86_64-apple-darwin11 PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python-dev" BITCOIN_CONFIG="--enable-reduce-exports" OSX_SDK=10.9 GOAL="deploy"
+
before_install:
- export PATH=$(echo $PATH | tr ':' "\n" | sed '/\/opt\/python/d' | tr "\n" ":" | sed "s|::|:|g")
install:
- - if [ -n "$PACKAGES" ]; then sudo rm -f /etc/apt/sources.list.d/google-chrome.list; fi
- if [ -n "$PPA" ]; then travis_retry sudo add-apt-repository "$PPA" -y; fi
- if [ -n "$DPKG_ADD_ARCH" ]; then sudo dpkg --add-architecture "$DPKG_ADD_ARCH" ; fi
- if [ -n "$PACKAGES" ]; then travis_retry sudo apt-get update; fi
@@ -72,10 +62,8 @@ script:
- BITCOIN_CONFIG_ALL="--disable-dependency-tracking --prefix=$TRAVIS_BUILD_DIR/depends/$HOST --bindir=$OUTDIR/bin --libdir=$OUTDIR/lib"
- depends/$HOST/native/bin/ccache --max-size=$CCACHE_SIZE
- test -n "$USE_SHELL" && eval '"$USE_SHELL" -c "./autogen.sh"' || ./autogen.sh
- - ./configure --cache-file=config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false)
- - make distdir PACKAGE=bitcoin VERSION=$HOST
- - cd bitcoin-$HOST
- - ./configure --cache-file=../config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false)
+ - mkdir build && cd build
+ - ../configure $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false)
- make $MAKEJOBS $GOAL || ( echo "Build failure. Verbose build follows." && make $GOAL V=1 ; false )
- export LD_LIBRARY_PATH=$TRAVIS_BUILD_DIR/depends/$HOST/lib
- if [ "$RUN_TESTS" = "true" ]; then make $MAKEJOBS check VERBOSE=1; fi
@@ -83,4 +71,3 @@ script:
after_script:
- echo $TRAVIS_COMMIT_RANGE
- echo $TRAVIS_COMMIT_LOG
- - if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then (echo "Upload goes here. Something like: scp -r $BASE_OUTDIR server" || echo "upload failed"); fi
diff --git a/Makefile.am b/Makefile.am
index 5783c1fdd8..b10d085066 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -28,7 +28,7 @@ OSX_DSSTORE_GEN=$(top_srcdir)/contrib/macdeploy/custom_dsstore.py
OSX_DEPLOY_SCRIPT=$(top_srcdir)/contrib/macdeploy/macdeployqtplus
OSX_FANCY_PLIST=$(top_srcdir)/contrib/macdeploy/fancy.plist
OSX_INSTALLER_ICONS=$(top_srcdir)/src/qt/res/icons/bitcoin.icns
-OSX_PLIST=$(top_srcdir)/share/qt/Info.plist #not installed
+OSX_PLIST=$(top_builddir)/share/qt/Info.plist #not installed
OSX_QT_TRANSLATIONS = da,de,es,hu,ru,uk,zh_CN,zh_TW
DIST_DOCS = $(wildcard doc/*.md) $(wildcard doc/release-notes/*.md)
@@ -53,18 +53,8 @@ COVERAGE_INFO = baseline_filtered_combined.info baseline.info block_test.info \
leveldb_baseline_filtered.info test_bitcoin_coverage.info test_bitcoin.info
dist-hook:
- -$(MAKE) -C $(top_distdir)/src/leveldb clean
- -$(MAKE) -C $(top_distdir)/src/secp256k1 distclean
-$(GIT) archive --format=tar HEAD -- src/clientversion.cpp | $(AMTAR) -C $(top_distdir) -xf -
-distcheck-hook:
- $(MKDIR_P) $(top_distdir)/_build/src/leveldb
- cp -rf $(top_srcdir)/src/leveldb/* $(top_distdir)/_build/src/leveldb/
- -$(MAKE) -C $(top_distdir)/_build/src/leveldb clean
-
-distcleancheck:
- @:
-
$(BITCOIN_WIN_INSTALLER): all-recursive
$(MKDIR_P) $(top_builddir)/release
STRIPPROG="$(STRIP)" $(INSTALL_STRIP_PROGRAM) $(BITCOIND_BIN) $(top_builddir)/release
@@ -234,7 +224,11 @@ EXTRA_DIST = $(top_srcdir)/share/genbuild.sh qa/pull-tester/rpc-tests.py qa/rpc-
CLEANFILES = $(OSX_DMG) $(BITCOIN_WIN_INSTALLER)
+# This file is problematic for out-of-tree builds if it exists.
+DISTCLEANFILES = qa/pull-tester/tests_config.pyc
+
.INTERMEDIATE: $(COVERAGE_INFO)
clean-local:
rm -rf coverage_percent.txt test_bitcoin.coverage/ total.coverage/ qa/tmp/ cache/ $(OSX_APP)
+ rm -rf qa/pull-tester/__pycache__
diff --git a/build-aux/m4/ax_pthread.m4 b/build-aux/m4/ax_pthread.m4
index d218d1af73..4c4051ea37 100644
--- a/build-aux/m4/ax_pthread.m4
+++ b/build-aux/m4/ax_pthread.m4
@@ -82,7 +82,7 @@
# modified version of the Autoconf Macro, you may extend this special
# exception to the GPL to apply to your modified version as well.
-#serial 22
+#serial 23
AU_ALIAS([ACX_PTHREAD], [AX_PTHREAD])
AC_DEFUN([AX_PTHREAD], [
@@ -100,22 +100,22 @@ ax_pthread_ok=no
# etcetera environment variables, and if threads linking works using
# them:
if test "x$PTHREAD_CFLAGS$PTHREAD_LIBS" != "x"; then
- ax_pthread_save_CC="$CC"
- ax_pthread_save_CFLAGS="$CFLAGS"
- ax_pthread_save_LIBS="$LIBS"
- AS_IF([test "x$PTHREAD_CC" != "x"], [CC="$PTHREAD_CC"])
- CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
- LIBS="$PTHREAD_LIBS $LIBS"
- AC_MSG_CHECKING([for pthread_join using $CC $PTHREAD_CFLAGS $PTHREAD_LIBS])
- AC_LINK_IFELSE([AC_LANG_CALL([], [pthread_join])], [ax_pthread_ok=yes])
- AC_MSG_RESULT([$ax_pthread_ok])
- if test "x$ax_pthread_ok" = "xno"; then
- PTHREAD_LIBS=""
- PTHREAD_CFLAGS=""
- fi
- CC="$ax_pthread_save_CC"
- CFLAGS="$ax_pthread_save_CFLAGS"
- LIBS="$ax_pthread_save_LIBS"
+ ax_pthread_save_CC="$CC"
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
+ AS_IF([test "x$PTHREAD_CC" != "x"], [CC="$PTHREAD_CC"])
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ AC_MSG_CHECKING([for pthread_join using $CC $PTHREAD_CFLAGS $PTHREAD_LIBS])
+ AC_LINK_IFELSE([AC_LANG_CALL([], [pthread_join])], [ax_pthread_ok=yes])
+ AC_MSG_RESULT([$ax_pthread_ok])
+ if test "x$ax_pthread_ok" = "xno"; then
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
+ fi
+ CC="$ax_pthread_save_CC"
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
fi
# We must check for the threads library under a number of different
@@ -152,50 +152,50 @@ ax_pthread_flags="pthreads none -Kthread -pthread -pthreads -mthreads pthread --
case $host_os in
- freebsd*)
+ freebsd*)
- # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
- # lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
+ # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
+ # lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
- ax_pthread_flags="-kthread lthread $ax_pthread_flags"
- ;;
+ ax_pthread_flags="-kthread lthread $ax_pthread_flags"
+ ;;
- hpux*)
+ hpux*)
- # From the cc(1) man page: "[-mt] Sets various -D flags to enable
- # multi-threading and also sets -lpthread."
+ # From the cc(1) man page: "[-mt] Sets various -D flags to enable
+ # multi-threading and also sets -lpthread."
- ax_pthread_flags="-mt -pthread pthread $ax_pthread_flags"
- ;;
+ ax_pthread_flags="-mt -pthread pthread $ax_pthread_flags"
+ ;;
- openedition*)
+ openedition*)
- # IBM z/OS requires a feature-test macro to be defined in order to
- # enable POSIX threads at all, so give the user a hint if this is
- # not set. (We don't define these ourselves, as they can affect
- # other portions of the system API in unpredictable ways.)
+ # IBM z/OS requires a feature-test macro to be defined in order to
+ # enable POSIX threads at all, so give the user a hint if this is
+ # not set. (We don't define these ourselves, as they can affect
+ # other portions of the system API in unpredictable ways.)
- AC_EGREP_CPP([AX_PTHREAD_ZOS_MISSING],
- [
-# if !defined(_OPEN_THREADS) && !defined(_UNIX03_THREADS)
- AX_PTHREAD_ZOS_MISSING
-# endif
- ],
- [AC_MSG_WARN([IBM z/OS requires -D_OPEN_THREADS or -D_UNIX03_THREADS to enable pthreads support.])])
- ;;
+ AC_EGREP_CPP([AX_PTHREAD_ZOS_MISSING],
+ [
+# if !defined(_OPEN_THREADS) && !defined(_UNIX03_THREADS)
+ AX_PTHREAD_ZOS_MISSING
+# endif
+ ],
+ [AC_MSG_WARN([IBM z/OS requires -D_OPEN_THREADS or -D_UNIX03_THREADS to enable pthreads support.])])
+ ;;
- solaris*)
+ solaris*)
- # On Solaris (at least, for some versions), libc contains stubbed
- # (non-functional) versions of the pthreads routines, so link-based
- # tests will erroneously succeed. (N.B.: The stubs are missing
- # pthread_cleanup_push, or rather a function called by this macro,
- # so we could check for that, but who knows whether they'll stub
- # that too in a future libc.) So we'll check first for the
- # standard Solaris way of linking pthreads (-mt -lpthread).
+ # On Solaris (at least, for some versions), libc contains stubbed
+ # (non-functional) versions of the pthreads routines, so link-based
+ # tests will erroneously succeed. (N.B.: The stubs are missing
+ # pthread_cleanup_push, or rather a function called by this macro,
+ # so we could check for that, but who knows whether they'll stub
+ # that too in a future libc.) So we'll check first for the
+ # standard Solaris way of linking pthreads (-mt -lpthread).
- ax_pthread_flags="-mt,pthread pthread $ax_pthread_flags"
- ;;
+ ax_pthread_flags="-mt,pthread pthread $ax_pthread_flags"
+ ;;
esac
# GCC generally uses -pthread, or -pthreads on some platforms (e.g. SPARC)
@@ -208,17 +208,17 @@ AS_IF([test "x$GCC" = "xyes"],
# correctly enabled
case $host_os in
- darwin* | hpux* | linux* | osf* | solaris*)
- ax_pthread_check_macro="_REENTRANT"
- ;;
+ darwin* | hpux* | linux* | osf* | solaris*)
+ ax_pthread_check_macro="_REENTRANT"
+ ;;
- aix* | freebsd*)
- ax_pthread_check_macro="_THREAD_SAFE"
- ;;
+ aix*)
+ ax_pthread_check_macro="_THREAD_SAFE"
+ ;;
- *)
- ax_pthread_check_macro="--"
- ;;
+ *)
+ ax_pthread_check_macro="--"
+ ;;
esac
AS_IF([test "x$ax_pthread_check_macro" = "x--"],
[ax_pthread_check_cond=0],
@@ -231,13 +231,13 @@ AC_CACHE_CHECK([whether $CC is Clang],
[ax_cv_PTHREAD_CLANG=no
# Note that Autoconf sets GCC=yes for Clang as well as GCC
if test "x$GCC" = "xyes"; then
- AC_EGREP_CPP([AX_PTHREAD_CC_IS_CLANG],
- [/* Note: Clang 2.7 lacks __clang_[a-z]+__ */
-# if defined(__clang__) && defined(__llvm__)
- AX_PTHREAD_CC_IS_CLANG
-# endif
- ],
- [ax_cv_PTHREAD_CLANG=yes])
+ AC_EGREP_CPP([AX_PTHREAD_CC_IS_CLANG],
+ [/* Note: Clang 2.7 lacks __clang_[a-z]+__ */
+# if defined(__clang__) && defined(__llvm__)
+ AX_PTHREAD_CC_IS_CLANG
+# endif
+ ],
+ [ax_cv_PTHREAD_CLANG=yes])
fi
])
ax_pthread_clang="$ax_cv_PTHREAD_CLANG"
@@ -249,222 +249,222 @@ ax_pthread_clang_warning=no
if test "x$ax_pthread_clang" = "xyes"; then
- # Clang takes -pthread; it has never supported any other flag
-
- # (Note 1: This will need to be revisited if a system that Clang
- # supports has POSIX threads in a separate library. This tends not
- # to be the way of modern systems, but it's conceivable.)
-
- # (Note 2: On some systems, notably Darwin, -pthread is not needed
- # to get POSIX threads support; the API is always present and
- # active. We could reasonably leave PTHREAD_CFLAGS empty. But
- # -pthread does define _REENTRANT, and while the Darwin headers
- # ignore this macro, third-party headers might not.)
-
- PTHREAD_CFLAGS="-pthread"
- PTHREAD_LIBS=
-
- ax_pthread_ok=yes
-
- # However, older versions of Clang make a point of warning the user
- # that, in an invocation where only linking and no compilation is
- # taking place, the -pthread option has no effect ("argument unused
- # during compilation"). They expect -pthread to be passed in only
- # when source code is being compiled.
- #
- # Problem is, this is at odds with the way Automake and most other
- # C build frameworks function, which is that the same flags used in
- # compilation (CFLAGS) are also used in linking. Many systems
- # supported by AX_PTHREAD require exactly this for POSIX threads
- # support, and in fact it is often not straightforward to specify a
- # flag that is used only in the compilation phase and not in
- # linking. Such a scenario is extremely rare in practice.
- #
- # Even though use of the -pthread flag in linking would only print
- # a warning, this can be a nuisance for well-run software projects
- # that build with -Werror. So if the active version of Clang has
- # this misfeature, we search for an option to squash it.
-
- AC_CACHE_CHECK([whether Clang needs flag to prevent "argument unused" warning when linking with -pthread],
- [ax_cv_PTHREAD_CLANG_NO_WARN_FLAG],
- [ax_cv_PTHREAD_CLANG_NO_WARN_FLAG=unknown
- # Create an alternate version of $ac_link that compiles and
- # links in two steps (.c -> .o, .o -> exe) instead of one
- # (.c -> exe), because the warning occurs only in the second
- # step
- ax_pthread_save_ac_link="$ac_link"
- ax_pthread_sed='s/conftest\.\$ac_ext/conftest.$ac_objext/g'
- ax_pthread_link_step=`$as_echo "$ac_link" | sed "$ax_pthread_sed"`
- ax_pthread_2step_ac_link="($ac_compile) && (echo ==== >&5) && ($ax_pthread_link_step)"
- ax_pthread_save_CFLAGS="$CFLAGS"
- for ax_pthread_try in '' -Qunused-arguments -Wno-unused-command-line-argument unknown; do
- AS_IF([test "x$ax_pthread_try" = "xunknown"], [break])
- CFLAGS="-Werror -Wunknown-warning-option $ax_pthread_try -pthread $ax_pthread_save_CFLAGS"
- ac_link="$ax_pthread_save_ac_link"
- AC_LINK_IFELSE([AC_LANG_SOURCE([[int main(void){return 0;}]])],
- [ac_link="$ax_pthread_2step_ac_link"
- AC_LINK_IFELSE([AC_LANG_SOURCE([[int main(void){return 0;}]])],
- [break])
- ])
- done
- ac_link="$ax_pthread_save_ac_link"
- CFLAGS="$ax_pthread_save_CFLAGS"
- AS_IF([test "x$ax_pthread_try" = "x"], [ax_pthread_try=no])
- ax_cv_PTHREAD_CLANG_NO_WARN_FLAG="$ax_pthread_try"
- ])
-
- case "$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG" in
- no | unknown) ;;
- *) PTHREAD_CFLAGS="$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG $PTHREAD_CFLAGS" ;;
- esac
+ # Clang takes -pthread; it has never supported any other flag
+
+ # (Note 1: This will need to be revisited if a system that Clang
+ # supports has POSIX threads in a separate library. This tends not
+ # to be the way of modern systems, but it's conceivable.)
+
+ # (Note 2: On some systems, notably Darwin, -pthread is not needed
+ # to get POSIX threads support; the API is always present and
+ # active. We could reasonably leave PTHREAD_CFLAGS empty. But
+ # -pthread does define _REENTRANT, and while the Darwin headers
+ # ignore this macro, third-party headers might not.)
+
+ PTHREAD_CFLAGS="-pthread"
+ PTHREAD_LIBS=
+
+ ax_pthread_ok=yes
+
+ # However, older versions of Clang make a point of warning the user
+ # that, in an invocation where only linking and no compilation is
+ # taking place, the -pthread option has no effect ("argument unused
+ # during compilation"). They expect -pthread to be passed in only
+ # when source code is being compiled.
+ #
+ # Problem is, this is at odds with the way Automake and most other
+ # C build frameworks function, which is that the same flags used in
+ # compilation (CFLAGS) are also used in linking. Many systems
+ # supported by AX_PTHREAD require exactly this for POSIX threads
+ # support, and in fact it is often not straightforward to specify a
+ # flag that is used only in the compilation phase and not in
+ # linking. Such a scenario is extremely rare in practice.
+ #
+ # Even though use of the -pthread flag in linking would only print
+ # a warning, this can be a nuisance for well-run software projects
+ # that build with -Werror. So if the active version of Clang has
+ # this misfeature, we search for an option to squash it.
+
+ AC_CACHE_CHECK([whether Clang needs flag to prevent "argument unused" warning when linking with -pthread],
+ [ax_cv_PTHREAD_CLANG_NO_WARN_FLAG],
+ [ax_cv_PTHREAD_CLANG_NO_WARN_FLAG=unknown
+ # Create an alternate version of $ac_link that compiles and
+ # links in two steps (.c -> .o, .o -> exe) instead of one
+ # (.c -> exe), because the warning occurs only in the second
+ # step
+ ax_pthread_save_ac_link="$ac_link"
+ ax_pthread_sed='s/conftest\.\$ac_ext/conftest.$ac_objext/g'
+ ax_pthread_link_step=`$as_echo "$ac_link" | sed "$ax_pthread_sed"`
+ ax_pthread_2step_ac_link="($ac_compile) && (echo ==== >&5) && ($ax_pthread_link_step)"
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ for ax_pthread_try in '' -Qunused-arguments -Wno-unused-command-line-argument unknown; do
+ AS_IF([test "x$ax_pthread_try" = "xunknown"], [break])
+ CFLAGS="-Werror -Wunknown-warning-option $ax_pthread_try -pthread $ax_pthread_save_CFLAGS"
+ ac_link="$ax_pthread_save_ac_link"
+ AC_LINK_IFELSE([AC_LANG_SOURCE([[int main(void){return 0;}]])],
+ [ac_link="$ax_pthread_2step_ac_link"
+ AC_LINK_IFELSE([AC_LANG_SOURCE([[int main(void){return 0;}]])],
+ [break])
+ ])
+ done
+ ac_link="$ax_pthread_save_ac_link"
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ AS_IF([test "x$ax_pthread_try" = "x"], [ax_pthread_try=no])
+ ax_cv_PTHREAD_CLANG_NO_WARN_FLAG="$ax_pthread_try"
+ ])
+
+ case "$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG" in
+ no | unknown) ;;
+ *) PTHREAD_CFLAGS="$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG $PTHREAD_CFLAGS" ;;
+ esac
fi # $ax_pthread_clang = yes
if test "x$ax_pthread_ok" = "xno"; then
for ax_pthread_try_flag in $ax_pthread_flags; do
- case $ax_pthread_try_flag in
- none)
- AC_MSG_CHECKING([whether pthreads work without any flags])
- ;;
-
- -mt,pthread)
- AC_MSG_CHECKING([whether pthreads work with -mt -lpthread])
- PTHREAD_CFLAGS="-mt"
- PTHREAD_LIBS="-lpthread"
- ;;
-
- -*)
- AC_MSG_CHECKING([whether pthreads work with $ax_pthread_try_flag])
- PTHREAD_CFLAGS="$ax_pthread_try_flag"
- ;;
-
- pthread-config)
- AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no])
- AS_IF([test "x$ax_pthread_config" = "xno"], [continue])
- PTHREAD_CFLAGS="`pthread-config --cflags`"
- PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
- ;;
-
- *)
- AC_MSG_CHECKING([for the pthreads library -l$ax_pthread_try_flag])
- PTHREAD_LIBS="-l$ax_pthread_try_flag"
- ;;
- esac
-
- ax_pthread_save_CFLAGS="$CFLAGS"
- ax_pthread_save_LIBS="$LIBS"
- CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
- LIBS="$PTHREAD_LIBS $LIBS"
-
- # Check for various functions. We must include pthread.h,
- # since some functions may be macros. (On the Sequent, we
- # need a special flag -Kthread to make this header compile.)
- # We check for pthread_join because it is in -lpthread on IRIX
- # while pthread_create is in libc. We check for pthread_attr_init
- # due to DEC craziness with -lpthreads. We check for
- # pthread_cleanup_push because it is one of the few pthread
- # functions on Solaris that doesn't have a non-functional libc stub.
- # We try pthread_create on general principles.
-
- AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>
-# if $ax_pthread_check_cond
-# error "$ax_pthread_check_macro must be defined"
-# endif
- static void routine(void *a) { a = 0; }
- static void *start_routine(void *a) { return a; }],
- [pthread_t th; pthread_attr_t attr;
- pthread_create(&th, 0, start_routine, 0);
- pthread_join(th, 0);
- pthread_attr_init(&attr);
- pthread_cleanup_push(routine, 0);
- pthread_cleanup_pop(0) /* ; */])],
- [ax_pthread_ok=yes],
- [])
-
- CFLAGS="$ax_pthread_save_CFLAGS"
- LIBS="$ax_pthread_save_LIBS"
-
- AC_MSG_RESULT([$ax_pthread_ok])
- AS_IF([test "x$ax_pthread_ok" = "xyes"], [break])
-
- PTHREAD_LIBS=""
- PTHREAD_CFLAGS=""
+ case $ax_pthread_try_flag in
+ none)
+ AC_MSG_CHECKING([whether pthreads work without any flags])
+ ;;
+
+ -mt,pthread)
+ AC_MSG_CHECKING([whether pthreads work with -mt -lpthread])
+ PTHREAD_CFLAGS="-mt"
+ PTHREAD_LIBS="-lpthread"
+ ;;
+
+ -*)
+ AC_MSG_CHECKING([whether pthreads work with $ax_pthread_try_flag])
+ PTHREAD_CFLAGS="$ax_pthread_try_flag"
+ ;;
+
+ pthread-config)
+ AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no])
+ AS_IF([test "x$ax_pthread_config" = "xno"], [continue])
+ PTHREAD_CFLAGS="`pthread-config --cflags`"
+ PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
+ ;;
+
+ *)
+ AC_MSG_CHECKING([for the pthreads library -l$ax_pthread_try_flag])
+ PTHREAD_LIBS="-l$ax_pthread_try_flag"
+ ;;
+ esac
+
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+
+ # Check for various functions. We must include pthread.h,
+ # since some functions may be macros. (On the Sequent, we
+ # need a special flag -Kthread to make this header compile.)
+ # We check for pthread_join because it is in -lpthread on IRIX
+ # while pthread_create is in libc. We check for pthread_attr_init
+ # due to DEC craziness with -lpthreads. We check for
+ # pthread_cleanup_push because it is one of the few pthread
+ # functions on Solaris that doesn't have a non-functional libc stub.
+ # We try pthread_create on general principles.
+
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>
+# if $ax_pthread_check_cond
+# error "$ax_pthread_check_macro must be defined"
+# endif
+ static void routine(void *a) { a = 0; }
+ static void *start_routine(void *a) { return a; }],
+ [pthread_t th; pthread_attr_t attr;
+ pthread_create(&th, 0, start_routine, 0);
+ pthread_join(th, 0);
+ pthread_attr_init(&attr);
+ pthread_cleanup_push(routine, 0);
+ pthread_cleanup_pop(0) /* ; */])],
+ [ax_pthread_ok=yes],
+ [])
+
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
+
+ AC_MSG_RESULT([$ax_pthread_ok])
+ AS_IF([test "x$ax_pthread_ok" = "xyes"], [break])
+
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
done
fi
# Various other checks:
if test "x$ax_pthread_ok" = "xyes"; then
- ax_pthread_save_CFLAGS="$CFLAGS"
- ax_pthread_save_LIBS="$LIBS"
- CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
- LIBS="$PTHREAD_LIBS $LIBS"
-
- # Detect AIX lossage: JOINABLE attribute is called UNDETACHED.
- AC_CACHE_CHECK([for joinable pthread attribute],
- [ax_cv_PTHREAD_JOINABLE_ATTR],
- [ax_cv_PTHREAD_JOINABLE_ATTR=unknown
- for ax_pthread_attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
- AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>],
- [int attr = $ax_pthread_attr; return attr /* ; */])],
- [ax_cv_PTHREAD_JOINABLE_ATTR=$ax_pthread_attr; break],
- [])
- done
- ])
- AS_IF([test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xunknown" && \
- test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xPTHREAD_CREATE_JOINABLE" && \
- test "x$ax_pthread_joinable_attr_defined" != "xyes"],
- [AC_DEFINE_UNQUOTED([PTHREAD_CREATE_JOINABLE],
- [$ax_cv_PTHREAD_JOINABLE_ATTR],
- [Define to necessary symbol if this constant
- uses a non-standard name on your system.])
- ax_pthread_joinable_attr_defined=yes
- ])
-
- AC_CACHE_CHECK([whether more special flags are required for pthreads],
- [ax_cv_PTHREAD_SPECIAL_FLAGS],
- [ax_cv_PTHREAD_SPECIAL_FLAGS=no
- case $host_os in
- solaris*)
- ax_cv_PTHREAD_SPECIAL_FLAGS="-D_POSIX_PTHREAD_SEMANTICS"
- ;;
- esac
- ])
- AS_IF([test "x$ax_cv_PTHREAD_SPECIAL_FLAGS" != "xno" && \
- test "x$ax_pthread_special_flags_added" != "xyes"],
- [PTHREAD_CFLAGS="$ax_cv_PTHREAD_SPECIAL_FLAGS $PTHREAD_CFLAGS"
- ax_pthread_special_flags_added=yes])
-
- AC_CACHE_CHECK([for PTHREAD_PRIO_INHERIT],
- [ax_cv_PTHREAD_PRIO_INHERIT],
- [AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <pthread.h>]],
- [[int i = PTHREAD_PRIO_INHERIT;]])],
- [ax_cv_PTHREAD_PRIO_INHERIT=yes],
- [ax_cv_PTHREAD_PRIO_INHERIT=no])
- ])
- AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes" && \
- test "x$ax_pthread_prio_inherit_defined" != "xyes"],
- [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.])
- ax_pthread_prio_inherit_defined=yes
- ])
-
- CFLAGS="$ax_pthread_save_CFLAGS"
- LIBS="$ax_pthread_save_LIBS"
-
- # More AIX lossage: compile with *_r variant
- if test "x$GCC" != "xyes"; then
- case $host_os in
- aix*)
- AS_CASE(["x/$CC"],
- [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6],
- [#handle absolute path differently from PATH based program lookup
- AS_CASE(["x$CC"],
- [x/*],
- [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])],
- [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])])
- ;;
- esac
- fi
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+
+ # Detect AIX lossage: JOINABLE attribute is called UNDETACHED.
+ AC_CACHE_CHECK([for joinable pthread attribute],
+ [ax_cv_PTHREAD_JOINABLE_ATTR],
+ [ax_cv_PTHREAD_JOINABLE_ATTR=unknown
+ for ax_pthread_attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>],
+ [int attr = $ax_pthread_attr; return attr /* ; */])],
+ [ax_cv_PTHREAD_JOINABLE_ATTR=$ax_pthread_attr; break],
+ [])
+ done
+ ])
+ AS_IF([test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xunknown" && \
+ test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xPTHREAD_CREATE_JOINABLE" && \
+ test "x$ax_pthread_joinable_attr_defined" != "xyes"],
+ [AC_DEFINE_UNQUOTED([PTHREAD_CREATE_JOINABLE],
+ [$ax_cv_PTHREAD_JOINABLE_ATTR],
+ [Define to necessary symbol if this constant
+ uses a non-standard name on your system.])
+ ax_pthread_joinable_attr_defined=yes
+ ])
+
+ AC_CACHE_CHECK([whether more special flags are required for pthreads],
+ [ax_cv_PTHREAD_SPECIAL_FLAGS],
+ [ax_cv_PTHREAD_SPECIAL_FLAGS=no
+ case $host_os in
+ solaris*)
+ ax_cv_PTHREAD_SPECIAL_FLAGS="-D_POSIX_PTHREAD_SEMANTICS"
+ ;;
+ esac
+ ])
+ AS_IF([test "x$ax_cv_PTHREAD_SPECIAL_FLAGS" != "xno" && \
+ test "x$ax_pthread_special_flags_added" != "xyes"],
+ [PTHREAD_CFLAGS="$ax_cv_PTHREAD_SPECIAL_FLAGS $PTHREAD_CFLAGS"
+ ax_pthread_special_flags_added=yes])
+
+ AC_CACHE_CHECK([for PTHREAD_PRIO_INHERIT],
+ [ax_cv_PTHREAD_PRIO_INHERIT],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <pthread.h>]],
+ [[int i = PTHREAD_PRIO_INHERIT;]])],
+ [ax_cv_PTHREAD_PRIO_INHERIT=yes],
+ [ax_cv_PTHREAD_PRIO_INHERIT=no])
+ ])
+ AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes" && \
+ test "x$ax_pthread_prio_inherit_defined" != "xyes"],
+ [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.])
+ ax_pthread_prio_inherit_defined=yes
+ ])
+
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
+
+ # More AIX lossage: compile with *_r variant
+ if test "x$GCC" != "xyes"; then
+ case $host_os in
+ aix*)
+ AS_CASE(["x/$CC"],
+ [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6],
+ [#handle absolute path differently from PATH based program lookup
+ AS_CASE(["x$CC"],
+ [x/*],
+ [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])],
+ [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])])
+ ;;
+ esac
+ fi
fi
test -n "$PTHREAD_CC" || PTHREAD_CC="$CC"
@@ -475,11 +475,11 @@ AC_SUBST([PTHREAD_CC])
# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
if test "x$ax_pthread_ok" = "xyes"; then
- ifelse([$1],,[AC_DEFINE([HAVE_PTHREAD],[1],[Define if you have POSIX threads libraries and header files.])],[$1])
- :
+ ifelse([$1],,[AC_DEFINE([HAVE_PTHREAD],[1],[Define if you have POSIX threads libraries and header files.])],[$1])
+ :
else
- ax_pthread_ok=no
- $2
+ ax_pthread_ok=no
+ $2
fi
AC_LANG_POP
])dnl AX_PTHREAD
diff --git a/configure.ac b/configure.ac
index a1c04daf53..97af58bd7c 100644
--- a/configure.ac
+++ b/configure.ac
@@ -75,6 +75,7 @@ AC_PATH_PROG(XGETTEXT,xgettext)
AC_PATH_PROG(HEXDUMP,hexdump)
AC_PATH_TOOL(READELF, readelf)
AC_PATH_TOOL(CPPFILT, c++filt)
+AC_PATH_TOOL(OBJCOPY, objcopy)
AC_ARG_VAR(PYTHONPATH, Augments the default search path for python module files)
@@ -1060,6 +1061,8 @@ AC_SUBST(MINIUPNPC_LIBS)
AC_CONFIG_FILES([Makefile src/Makefile share/setup.nsi share/qt/Info.plist src/test/buildenv.py])
AC_CONFIG_FILES([qa/pull-tester/run-bitcoind-for-test.sh],[chmod +x qa/pull-tester/run-bitcoind-for-test.sh])
AC_CONFIG_FILES([qa/pull-tester/tests_config.py],[chmod +x qa/pull-tester/tests_config.py])
+AC_CONFIG_FILES([contrib/devtools/split-debug.sh],[chmod +x contrib/devtools/split-debug.sh])
+AC_CONFIG_LINKS([qa/pull-tester/rpc-tests.py:qa/pull-tester/rpc-tests.py])
dnl boost's m4 checks do something really nasty: they export these vars. As a
dnl result, they leak into secp256k1's configure and crazy things happen.
diff --git a/contrib/devtools/split-debug.sh.in b/contrib/devtools/split-debug.sh.in
new file mode 100644
index 0000000000..deda49cc54
--- /dev/null
+++ b/contrib/devtools/split-debug.sh.in
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+if [ $# -ne 3 ];
+ then echo "usage: $0 <input> <stripped-binary> <debug-binary>"
+fi
+
+@OBJCOPY@ --enable-deterministic-archives -p --only-keep-debug $1 $3
+@OBJCOPY@ --enable-deterministic-archives -p --strip-debug $1 $2
+@STRIP@ --enable-deterministic-archives -p -s $2
+@OBJCOPY@ --enable-deterministic-archives -p --add-gnu-debuglink=$3 $2
diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml
index cfd254cf15..a2788c9d76 100644
--- a/contrib/gitian-descriptors/gitian-linux.yml
+++ b/contrib/gitian-descriptors/gitian-linux.yml
@@ -5,9 +5,19 @@ suites:
- "trusty"
architectures:
- "amd64"
-packages:
+packages:
- "curl"
-- "g++-multilib"
+- "g++-aarch64-linux-gnu"
+- "g++-4.8-aarch64-linux-gnu"
+- "gcc-4.8-aarch64-linux-gnu"
+- "binutils-aarch64-linux-gnu"
+- "g++-arm-linux-gnueabihf"
+- "g++-4.8-arm-linux-gnueabihf"
+- "gcc-4.8-arm-linux-gnueabihf"
+- "binutils-arm-linux-gnueabihf"
+- "g++-4.8-multilib"
+- "gcc-4.8-multilib"
+- "binutils-gold"
- "git-core"
- "pkg-config"
- "autoconf"
@@ -15,20 +25,25 @@ packages:
- "automake"
- "faketime"
- "bsdmainutils"
-- "binutils-gold"
- "ca-certificates"
- "python"
-reference_datetime: "2016-01-01 00:00:00"
remotes:
- "url": "https://github.com/bitcoin/bitcoin.git"
"dir": "bitcoin"
files: []
script: |
+
+ #unlock sudo
+ echo "ubuntu" | sudo -S true
+
+ sudo mkdir -p /usr/include/i386-linux-gnu/
+ sudo ln -s /usr/include/x86_64-linux-gnu/asm /usr/include/i386-linux-gnu/asm
+
WRAP_DIR=$HOME/wrapped
- HOSTS="i686-pc-linux-gnu x86_64-unknown-linux-gnu"
+ HOSTS="i686-pc-linux-gnu x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu"
CONFIGFLAGS="--enable-glibc-back-compat --enable-reduce-exports --disable-bench --disable-gui-tests"
FAKETIME_HOST_PROGS=""
- FAKETIME_PROGS="date ar ranlib nm strip objcopy"
+ FAKETIME_PROGS="date ar ranlib nm"
HOST_CFLAGS="-O2 -g"
HOST_CXXFLAGS="-O2 -g"
HOST_LDFLAGS=-static-libstdc++
@@ -45,27 +60,34 @@ script: |
mkdir -p ${BASE_CACHE} ${SOURCES_PATH}
fi
- # Create global faketime wrappers
+ function create_global_faketime_wrappers {
for prog in ${FAKETIME_PROGS}; do
echo '#!/bin/bash' > ${WRAP_DIR}/${prog}
echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog}
echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${prog}
- echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${prog}
+ echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog}
echo "\$REAL \$@" >> $WRAP_DIR/${prog}
chmod +x ${WRAP_DIR}/${prog}
done
+ }
- # Create per-host faketime wrappers
+ function create_per-host_faketime_wrappers {
for i in $HOSTS; do
for prog in ${FAKETIME_HOST_PROGS}; do
echo '#!/bin/bash' > ${WRAP_DIR}/${i}-${prog}
echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog}
echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${i}-${prog}
- echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${i}-${prog}
+ echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog}
echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog}
chmod +x ${WRAP_DIR}/${i}-${prog}
done
done
+ }
+
+ # Faketime for depends so intermediate results are comparable
+ export PATH_orig=${PATH}
+ create_global_faketime_wrappers "2000-01-01 12:00:00"
+ create_per-host_faketime_wrappers "2000-01-01 12:00:00"
export PATH=${WRAP_DIR}:${PATH}
cd bitcoin
@@ -75,6 +97,12 @@ script: |
make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}"
done
+ # Faketime for binaries
+ export PATH=${PATH_orig}
+ create_global_faketime_wrappers "${REFERENCE_DATETIME}"
+ create_per-host_faketime_wrappers "${REFERENCE_DATETIME}"
+ export PATH=${WRAP_DIR}:${PATH}
+
# Create the release tarball using (arbitrarily) the first host
./autogen.sh
CONFIG_SITE=${BASEPREFIX}/`echo "${HOSTS}" | awk '{print $1;}'`/share/config.site ./configure --prefix=/
@@ -101,14 +129,24 @@ script: |
CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}" LDFLAGS="${HOST_LDFLAGS}"
make ${MAKEOPTS}
make ${MAKEOPTS} -C src check-security
- make ${MAKEOPTS} -C src check-symbols
+
+ #TODO: This is a quick hack that disables symbol checking for arm.
+ # Instead, we should investigate why these are popping up.
+ # For aarch64, we'll need to bump up the min GLIBC version, as the abi
+ # support wasn't introduced until 2.17.
+ case $i in
+ aarch64-*) : ;;
+ arm-*) : ;;
+ *) make ${MAKEOPTS} -C src check-symbols ;;
+ esac
+
make install DESTDIR=${INSTALLPATH}
cd installed
find . -name "lib*.la" -delete
find . -name "lib*.a" -delete
rm -rf ${DISTNAME}/lib/pkgconfig
- find ${DISTNAME}/bin -type f -executable -exec objcopy --only-keep-debug {} {}.dbg \; -exec strip -s {} \; -exec objcopy --add-gnu-debuglink={}.dbg {} \;
- find ${DISTNAME}/lib -type f -exec objcopy --only-keep-debug {} {}.dbg \; -exec strip -s {} \; -exec objcopy --add-gnu-debuglink={}.dbg {} \;
+ find ${DISTNAME}/bin -type f -executable -exec ../contrib/devtools/split-debug.sh {} {} {}.dbg \;
+ find ${DISTNAME}/lib -type f -exec ../contrib/devtools/split-debug.sh {} {} {}.dbg \;
find ${DISTNAME} -not -name "*.dbg" | sort | tar --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}.tar.gz
find ${DISTNAME} -name "*.dbg" | sort | tar --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}-debug.tar.gz
cd ../../
@@ -116,8 +154,3 @@ script: |
done
mkdir -p $OUTDIR/src
mv $SOURCEDIST $OUTDIR/src
- mv ${OUTDIR}/${DISTNAME}-x86_64-*-debug.tar.gz ${OUTDIR}/${DISTNAME}-linux64-debug.tar.gz
- mv ${OUTDIR}/${DISTNAME}-i686-*-debug.tar.gz ${OUTDIR}/${DISTNAME}-linux32-debug.tar.gz
- mv ${OUTDIR}/${DISTNAME}-x86_64-*.tar.gz ${OUTDIR}/${DISTNAME}-linux64.tar.gz
- mv ${OUTDIR}/${DISTNAME}-i686-*.tar.gz ${OUTDIR}/${DISTNAME}-linux32.tar.gz
-
diff --git a/contrib/gitian-descriptors/gitian-osx-signer.yml b/contrib/gitian-descriptors/gitian-osx-signer.yml
index fac61aa3de..f6e9414ab1 100644
--- a/contrib/gitian-descriptors/gitian-osx-signer.yml
+++ b/contrib/gitian-descriptors/gitian-osx-signer.yml
@@ -6,7 +6,6 @@ architectures:
- "amd64"
packages:
- "faketime"
-reference_datetime: "2016-01-01 00:00:00"
remotes:
- "url": "https://github.com/bitcoin-core/bitcoin-detached-sigs.git"
"dir": "signature"
@@ -34,5 +33,5 @@ script: |
tar -xf ${UNSIGNED}
OSX_VOLNAME="$(cat osx_volname)"
./detached-sig-apply.sh ${UNSIGNED} signature/osx
- ${WRAP_DIR}/genisoimage -no-cache-inodes -D -l -probe -V "${OSX_VOLNAME}" -no-pad -r -apple -o uncompressed.dmg signed-app
+ ${WRAP_DIR}/genisoimage -no-cache-inodes -D -l -probe -V "${OSX_VOLNAME}" -no-pad -r -dir-mode 0755 -apple -o uncompressed.dmg signed-app
${WRAP_DIR}/dmg dmg uncompressed.dmg ${OUTDIR}/${SIGNED}
diff --git a/contrib/gitian-descriptors/gitian-osx.yml b/contrib/gitian-descriptors/gitian-osx.yml
index b37b35d76b..536fcfb105 100644
--- a/contrib/gitian-descriptors/gitian-osx.yml
+++ b/contrib/gitian-descriptors/gitian-osx.yml
@@ -5,7 +5,7 @@ suites:
- "trusty"
architectures:
- "amd64"
-packages:
+packages:
- "ca-certificates"
- "curl"
- "g++"
@@ -27,7 +27,6 @@ packages:
- "python-dev"
- "python-setuptools"
- "fonts-tuffy"
-reference_datetime: "2016-01-01 00:00:00"
remotes:
- "url": "https://github.com/bitcoin/bitcoin.git"
"dir": "bitcoin"
@@ -54,27 +53,34 @@ script: |
export ZERO_AR_DATE=1
- # Create global faketime wrappers
+ function create_global_faketime_wrappers {
for prog in ${FAKETIME_PROGS}; do
echo '#!/bin/bash' > ${WRAP_DIR}/${prog}
echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog}
echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${prog}
- echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${prog}
+ echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog}
echo "\$REAL \$@" >> $WRAP_DIR/${prog}
chmod +x ${WRAP_DIR}/${prog}
done
+ }
- # Create per-host faketime wrappers
+ function create_per-host_faketime_wrappers {
for i in $HOSTS; do
for prog in ${FAKETIME_HOST_PROGS}; do
echo '#!/bin/bash' > ${WRAP_DIR}/${i}-${prog}
echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog}
echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${i}-${prog}
- echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${i}-${prog}
+ echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog}
echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog}
chmod +x ${WRAP_DIR}/${i}-${prog}
done
done
+ }
+
+ # Faketime for depends so intermediate results are comparable
+ export PATH_orig=${PATH}
+ create_global_faketime_wrappers "2000-01-01 12:00:00"
+ create_per-host_faketime_wrappers "2000-01-01 12:00:00"
export PATH=${WRAP_DIR}:${PATH}
cd bitcoin
@@ -88,6 +94,12 @@ script: |
make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}"
done
+ # Faketime for binaries
+ export PATH=${PATH_orig}
+ create_global_faketime_wrappers "${REFERENCE_DATETIME}"
+ create_per-host_faketime_wrappers "${REFERENCE_DATETIME}"
+ export PATH=${WRAP_DIR}:${PATH}
+
# Create the release tarball using (arbitrarily) the first host
./autogen.sh
CONFIG_SITE=${BASEPREFIX}/`echo "${HOSTS}" | awk '{print $1;}'`/share/config.site ./configure --prefix=/
diff --git a/contrib/gitian-descriptors/gitian-win-signer.yml b/contrib/gitian-descriptors/gitian-win-signer.yml
index 88edb96627..3c1e0214a0 100644
--- a/contrib/gitian-descriptors/gitian-win-signer.yml
+++ b/contrib/gitian-descriptors/gitian-win-signer.yml
@@ -7,7 +7,6 @@ architectures:
packages:
- "libssl-dev"
- "autoconf"
-reference_datetime: "2016-01-01 00:00:00"
remotes:
- "url": "https://github.com/bitcoin-core/bitcoin-detached-sigs.git"
"dir": "signature"
diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml
index 65f76e8df8..32b57b3160 100644
--- a/contrib/gitian-descriptors/gitian-win.yml
+++ b/contrib/gitian-descriptors/gitian-win.yml
@@ -5,7 +5,7 @@ suites:
- "trusty"
architectures:
- "amd64"
-packages:
+packages:
- "curl"
- "g++"
- "git-core"
@@ -21,7 +21,6 @@ packages:
- "zip"
- "ca-certificates"
- "python"
-reference_datetime: "2016-01-01 00:00:00"
remotes:
- "url": "https://github.com/bitcoin/bitcoin.git"
"dir": "bitcoin"
@@ -47,29 +46,31 @@ script: |
mkdir -p ${BASE_CACHE} ${SOURCES_PATH}
fi
- # Create global faketime wrappers
+ function create_global_faketime_wrappers {
for prog in ${FAKETIME_PROGS}; do
echo '#!/bin/bash' > ${WRAP_DIR}/${prog}
echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog}
echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${prog}
- echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${prog}
+ echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog}
echo "\$REAL \$@" >> $WRAP_DIR/${prog}
chmod +x ${WRAP_DIR}/${prog}
done
+ }
- # Create per-host faketime wrappers
+ function create_per-host_faketime_wrappers {
for i in $HOSTS; do
for prog in ${FAKETIME_HOST_PROGS}; do
echo '#!/bin/bash' > ${WRAP_DIR}/${i}-${prog}
echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog}
echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${i}-${prog}
- echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${i}-${prog}
+ echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog}
echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog}
chmod +x ${WRAP_DIR}/${i}-${prog}
done
done
+ }
- # Create per-host linker wrapper
+ function create_per-host_linker_wrapper {
# This is only needed for trusty, as the mingw linker leaks a few bytes of
# heap, causing non-determinism. See discussion in https://github.com/bitcoin/bitcoin/pull/6900
for i in $HOSTS; do
@@ -85,13 +86,19 @@ script: |
echo '#!/bin/bash' > ${WRAP_DIR}/${i}-${prog}
echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog}
echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1' >> ${WRAP_DIR}/${i}-${prog}
- echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${i}-${prog}
+ echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog}
echo "export COMPILER_PATH=${WRAP_DIR}/${i}" >> ${WRAP_DIR}/${i}-${prog}
echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog}
chmod +x ${WRAP_DIR}/${i}-${prog}
done
done
+ }
+ # Faketime for depends so intermediate results are comparable
+ export PATH_orig=${PATH}
+ create_global_faketime_wrappers "2000-01-01 12:00:00"
+ create_per-host_faketime_wrappers "2000-01-01 12:00:00"
+ create_per-host_linker_wrapper "2000-01-01 12:00:00"
export PATH=${WRAP_DIR}:${PATH}
cd bitcoin
@@ -101,6 +108,13 @@ script: |
make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}"
done
+ # Faketime for binaries
+ export PATH=${PATH_orig}
+ create_global_faketime_wrappers "${REFERENCE_DATETIME}"
+ create_per-host_faketime_wrappers "${REFERENCE_DATETIME}"
+ create_per-host_linker_wrapper "${REFERENCE_DATETIME}"
+ export PATH=${WRAP_DIR}:${PATH}
+
# Create the release tarball using (arbitrarily) the first host
./autogen.sh
CONFIG_SITE=${BASEPREFIX}/`echo "${HOSTS}" | awk '{print $1;}'`/share/config.site ./configure --prefix=/
diff --git a/contrib/gitian-keys/luke-jr-key.pgp b/contrib/gitian-keys/luke-jr-key.pgp
index 4406e6d5be..a2d34e75e1 100644
--- a/contrib/gitian-keys/luke-jr-key.pgp
+++ b/contrib/gitian-keys/luke-jr-key.pgp
Binary files differ
diff --git a/depends/Makefile b/depends/Makefile
index 3ddfc85a45..dedb0674cf 100644
--- a/depends/Makefile
+++ b/depends/Makefile
@@ -89,13 +89,17 @@ $(host_arch)_$(host_os)_id_string+=$(shell $(host_CXX) --version 2>/dev/null)
$(host_arch)_$(host_os)_id_string+=$(shell $(host_RANLIB) --version 2>/dev/null)
$(host_arch)_$(host_os)_id_string+=$(shell $(host_STRIP) --version 2>/dev/null)
-qt_packages_$(NO_QT) = $(qt_packages) $(qt_$(host_os)_packages)
-qt_native_packages_$(NO_QT) = $(qt_native_packages)
+qt_packages_$(NO_QT) = $(qt_packages) $(qt_$(host_os)_packages) $(qt_$(host_arch)_$(host_os)_packages)
wallet_packages_$(NO_WALLET) = $(wallet_packages)
upnp_packages_$(NO_UPNP) = $(upnp_packages)
packages += $($(host_arch)_$(host_os)_packages) $($(host_os)_packages) $(qt_packages_) $(wallet_packages_) $(upnp_packages_)
-native_packages += $($(host_arch)_$(host_os)_native_packages) $($(host_os)_native_packages) $(qt_native_packages_)
+native_packages += $($(host_arch)_$(host_os)_native_packages) $($(host_os)_native_packages)
+
+ifneq ($(qt_packages_),)
+native_packages += $(qt_native_packages)
+endif
+
all_packages = $(packages) $(native_packages)
meta_depends = Makefile funcs.mk builders/default.mk hosts/default.mk hosts/$(host_os).mk builders/$(build_os).mk
diff --git a/depends/packages/bdb.mk b/depends/packages/bdb.mk
index 200d57314e..6c9876c2c7 100644
--- a/depends/packages/bdb.mk
+++ b/depends/packages/bdb.mk
@@ -14,7 +14,8 @@ endef
define $(package)_preprocess_cmds
sed -i.old 's/__atomic_compare_exchange/__atomic_compare_exchange_db/' dbinc/atomic.h && \
- sed -i.old 's/atomic_init/atomic_init_db/' dbinc/atomic.h mp/mp_region.c mp/mp_mvcc.c mp/mp_fget.c mutex/mut_method.c mutex/mut_tas.c
+ sed -i.old 's/atomic_init/atomic_init_db/' dbinc/atomic.h mp/mp_region.c mp/mp_mvcc.c mp/mp_fget.c mutex/mut_method.c mutex/mut_tas.c && \
+ cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub dist
endef
define $(package)_config_cmds
diff --git a/depends/packages/packages.mk b/depends/packages/packages.mk
index 59b009b66a..ac43ef4a2e 100644
--- a/depends/packages/packages.mk
+++ b/depends/packages/packages.mk
@@ -6,7 +6,9 @@ native_packages := native_ccache native_comparisontool
qt_native_packages = native_protobuf
qt_packages = qrencode protobuf
-qt_linux_packages= qt expat dbus libxcb xcb_proto libXau xproto freetype fontconfig libX11 xextproto libXext xtrans
+qt_x86_64_linux_packages:=qt expat dbus libxcb xcb_proto libXau xproto freetype fontconfig libX11 xextproto libXext xtrans
+qt_i686_linux_packages:=$(qt_x86_64_linux_packages)
+
qt_darwin_packages=qt
qt_mingw32_packages=qt
diff --git a/doc/bips.md b/doc/bips.md
index b4b62e781e..1ec03d2fb1 100644
--- a/doc/bips.md
+++ b/doc/bips.md
@@ -10,6 +10,7 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.13.0**):
* [`BIP 23`](https://github.com/bitcoin/bips/blob/master/bip-0023.mediawiki): Some extensions to GBT have been implemented since **v0.10.0rc1**, including longpolling and block proposals ([PR #1816](https://github.com/bitcoin/bitcoin/pull/1816)).
* [`BIP 30`](https://github.com/bitcoin/bips/blob/master/bip-0030.mediawiki): The evaluation rules to forbid creating new transactions with the same txid as previous not-fully-spent transactions were implemented since **v0.6.0**, and the rule took effect on *March 15th 2012* ([PR #915](https://github.com/bitcoin/bitcoin/pull/915)).
* [`BIP 31`](https://github.com/bitcoin/bips/blob/master/bip-0031.mediawiki): The 'pong' protocol message (and the protocol version bump to 60001) has been implemented since **v0.6.1** ([PR #1081](https://github.com/bitcoin/bitcoin/pull/1081)).
+* [`BIP 32`](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki): Hierarchical Deterministic Wallets has been implemented since **v0.13.0** ([PR #8035](https://github.com/bitcoin/bitcoin/pull/8035)).
* [`BIP 34`](https://github.com/bitcoin/bips/blob/master/bip-0034.mediawiki): The rule that requires blocks to contain their height (number) in the coinbase input, and the introduction of version 2 blocks has been implemented since **v0.7.0**. The rule took effect for version 2 blocks as of *block 224413* (March 5th 2013), and version 1 blocks are no longer allowed since *block 227931* (March 25th 2013) ([PR #1526](https://github.com/bitcoin/bitcoin/pull/1526)).
* [`BIP 35`](https://github.com/bitcoin/bips/blob/master/bip-0035.mediawiki): The 'mempool' protocol message (and the protocol version bump to 60002) has been implemented since **v0.7.0** ([PR #1641](https://github.com/bitcoin/bitcoin/pull/1641)).
* [`BIP 37`](https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki): The bloom filtering for transaction relaying, partial merkle trees for blocks, and the protocol version bump to 70001 (enabling low-bandwidth SPV clients) has been implemented since **v0.8.0** ([PR #1795](https://github.com/bitcoin/bitcoin/pull/1795)).
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index e40b73ffa7..95c46b05fe 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -265,7 +265,7 @@ General C++
the `.h` to the `.cpp` should not result in build errors
- Use the RAII (Resource Acquisition Is Initialization) paradigm where possible. For example by using
- `scoped_pointer` for allocations in a function.
+ `unique_ptr` for allocations in a function.
- *Rationale*: This avoids memory and resource leaks, and ensures exception safety
@@ -284,10 +284,9 @@ C++ data structures
- *Rationale*: Behavior is undefined. In C++ parlor this means "may reformat
the universe", in practice this has resulted in at least one hard-to-debug crash bug
-- Watch out for vector out-of-bounds exceptions. `&vch[0]` is illegal for an
- empty vector, `&vch[vch.size()]` is always illegal. Use `begin_ptr(vch)` and
- `end_ptr(vch)` to get the begin and end pointer instead (defined in
- `serialize.h`)
+- Watch out for out-of-bounds vector access. `&vch[vch.size()]` is illegal,
+ including `&vch[0]` for an empty vector. Use `vch.data()` and `vch.data() +
+ vch.size()` instead.
- Vector bounds checking is only enabled in debug mode. Do not rely on it
@@ -381,3 +380,51 @@ GUI
- *Rationale*: Model classes pass through events and data from the core, they
should not interact with the user. That's where View classes come in. The converse also
holds: try to not directly access core data structures from Views.
+
+Git and github tips
+---------------------
+
+- For resolving merge/rebase conflicts, it can be useful to enable diff3 style using
+ `git config merge.conflictstyle diff3`. Instead of
+
+ <<<
+ yours
+ ===
+ theirs
+ >>>
+
+ you will see
+
+ <<<
+ yours
+ |||
+ original
+ ===
+ theirs
+ >>>
+
+ This may make it much clearer what caused the conflict. In this style, you can often just look
+ at what changed between *original* and *theirs*, and mechanically apply that to *yours* (or the other way around).
+
+- When reviewing patches which change indentation in C++ files, use `git diff -w` and `git show -w`. This makes
+ the diff algorithm ignore whitespace changes. This feature is also available on github.com, by adding `?w=1`
+ at the end of any URL which shows a diff.
+
+- When reviewing patches that change symbol names in many places, use `git diff --word-diff`. This will instead
+ of showing the patch as deleted/added *lines*, show deleted/added *words*.
+
+- When reviewing patches that move code around, try using
+ `git diff --patience commit~:old/file.cpp commit:new/file/name.cpp`, and ignoring everything except the
+ moved body of code which should show up as neither `+` or `-` lines. In case it was not a pure move, this may
+ even work when combined with the `-w` or `--word-diff` options described above.
+
+- When looking at other's pull requests, it may make sense to add the following section to your `.git/config`
+ file:
+
+ [remote "upstream-pull"]
+ fetch = +refs/pull/*:refs/remotes/upstream-pull/*
+ url = git@github.com:bitcoin/bitcoin.git
+
+ This will add an `upstream-pull` remote to your git repository, which can be fetched using `git fetch --all`
+ or `git fetch upstream-pull`. Afterwards, you can use `upstream-pull/NUMBER/head` in arguments to `git show`,
+ `git checkout` and anywhere a commit id would be acceptable to see the changes from pull request NUMBER.
diff --git a/doc/gitian-building.md b/doc/gitian-building.md
index 791f209bb1..7796a5fc9c 100644
--- a/doc/gitian-building.md
+++ b/doc/gitian-building.md
@@ -1,7 +1,7 @@
Gitian building
================
-*Setup instructions for a Gitian build of Bitcoin using a Debian VM or physical system.*
+*Setup instructions for a Gitian build of Bitcoin Core using a Debian VM or physical system.*
Gitian is the deterministic build process that is used to build the Bitcoin
Core executables. It provides a way to be reasonably sure that the
@@ -26,7 +26,7 @@ Table of Contents
- [Installing Gitian](#installing-gitian)
- [Setting up the Gitian image](#setting-up-the-gitian-image)
- [Getting and building the inputs](#getting-and-building-the-inputs)
-- [Building Bitcoin](#building-bitcoin)
+- [Building Bitcoin Core](#building-bitcoin-core)
- [Building an alternative repository](#building-an-alternative-repository)
- [Signing externally](#signing-externally)
- [Uploading signatures](#uploading-signatures)
@@ -95,11 +95,11 @@ After creating the VM, we need to configure it.
- Click `Ok` twice to save.
-Get the [Debian 8.x net installer](http://cdimage.debian.org/debian-cd/8.4.0/amd64/iso-cd/debian-8.4.0-amd64-netinst.iso) (a more recent minor version should also work, see also [Debian Network installation](https://www.debian.org/CD/netinst/)).
+Get the [Debian 8.x net installer](http://cdimage.debian.org/debian-cd/8.5.0/amd64/iso-cd/debian-8.5.0-amd64-netinst.iso) (a more recent minor version should also work, see also [Debian Network installation](https://www.debian.org/CD/netinst/)).
This DVD image can be validated using a SHA256 hashing tool, for example on
Unixy OSes by entering the following in a terminal:
- echo "7a6b418e6a4ee3ca75dda04d79ed96c9e2c33bb0c703ca7e40c6374ab4590748 debian-8.4.0-amd64-netinst.iso" | sha256sum -c
+ echo "ad4e8c27c561ad8248d5ebc1d36eb172f884057bfeb2c22ead823f59fa8c3dff debian-8.5.0-amd64-netinst.iso" | sha256sum -c
# (must return OK)
Then start the VM. On the first launch you will be asked for a CD or DVD image. Choose the downloaded iso.
@@ -342,10 +342,10 @@ manual intervention. Also optionally follow the next step: 'Seed the Gitian sour
and offline git repositories' which will fetch the remaining files required for building
offline.
-Building Bitcoin
+Building Bitcoin Core
----------------
-To build Bitcoin (for Linux, OS X and Windows) just follow the steps under 'perform
+To build Bitcoin Core (for Linux, OS X and Windows) just follow the steps under 'perform
Gitian builds' in [doc/release-process.md](release-process.md#perform-gitian-builds) in the bitcoin repository.
This may take some time as it will build all the dependencies needed for each descriptor.
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 0d457714d5..6cc05989db 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -85,6 +85,13 @@ The following outputs are affected by this change:
- REST `/rest/block/` (JSON format when including extended tx details)
- `bitcoin-tx -json`
+New mempool information RPC calls
+---------------------------------
+
+RPC calls have been added to output detailed statistics for individual mempool
+entries, as well as to calculate the in-mempool ancestors or descendants of a
+transaction: see `getmempoolentry`, `getmempoolancestors`, `getmempooldescendants`.
+
### ZMQ
Each ZMQ notification now contains an up-counting sequence number that allows
@@ -117,6 +124,24 @@ feerate. [BIP 133](https://github.com/bitcoin/bips/blob/master/bip-0133.mediawik
### Wallet
+Hierarchical Deterministic Key Generation
+-----------------------------------------
+Newly created wallets will use hierarchical deterministic key generation
+according to BIP32 (keypath m/0'/0'/k').
+Existing wallets will still use traditional key generation.
+
+Backups of HD wallets, regardless of when they have been created, can
+therefore be used to re-generate all possible private keys, even the
+ones which haven't already been generated during the time of the backup.
+
+HD key generation for new wallets can be disabled by `-usehd=0`. Keep in
+mind that this flag only has affect on newly created wallets.
+You can't disable HD key generation once you have created a HD wallet.
+
+There is no distinction between internal (change) and external keys.
+
+[Pull request](https://github.com/bitcoin/bitcoin/pull/8035/files), [BIP 32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
+
### GUI
### Tests
diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py
index f810f89a59..57a576f1c7 100755
--- a/qa/pull-tester/rpc-tests.py
+++ b/qa/pull-tester/rpc-tests.py
@@ -29,6 +29,7 @@ import subprocess
import tempfile
import re
+sys.path.append("qa/pull-tester/")
from tests_config import *
BOLD = ("","")
@@ -37,7 +38,7 @@ if os.name == 'posix':
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
-RPC_TESTS_DIR = BUILDDIR + '/qa/rpc-tests/'
+RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/'
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
diff --git a/qa/pull-tester/tests_config.py.in b/qa/pull-tester/tests_config.py.in
index 2356b5200e..a0d0a3d98a 100644
--- a/qa/pull-tester/tests_config.py.in
+++ b/qa/pull-tester/tests_config.py.in
@@ -3,6 +3,7 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+SRCDIR="@abs_top_srcdir@"
BUILDDIR="@abs_top_builddir@"
EXEEXT="@EXEEXT@"
diff --git a/qa/rpc-tests/fundrawtransaction.py b/qa/rpc-tests/fundrawtransaction.py
index 998f822afe..228574e671 100755
--- a/qa/rpc-tests/fundrawtransaction.py
+++ b/qa/rpc-tests/fundrawtransaction.py
@@ -58,7 +58,6 @@ class RawTransactionsTest(BitcoinTestFramework):
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
- self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
@@ -552,7 +551,6 @@ class RawTransactionsTest(BitcoinTestFramework):
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
- self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
@@ -572,7 +570,6 @@ class RawTransactionsTest(BitcoinTestFramework):
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
- self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
@@ -603,7 +600,6 @@ class RawTransactionsTest(BitcoinTestFramework):
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
- self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
@@ -677,6 +673,15 @@ class RawTransactionsTest(BitcoinTestFramework):
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ #######################
+ # Test feeRate option #
+ #######################
+
+ # Make sure there is exactly one input so coin selection can't skew the result
+ assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress() : 1}
@@ -684,8 +689,9 @@ class RawTransactionsTest(BitcoinTestFramework):
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
- assert_equal(result['fee']*2, result2['fee'])
- assert_equal(result['fee']*10, result3['fee'])
+ result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
+ assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
+ assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
if __name__ == '__main__':
RawTransactionsTest().main()
diff --git a/qa/rpc-tests/mempool_packages.py b/qa/rpc-tests/mempool_packages.py
index 693ff593b3..45dc0e65c4 100755
--- a/qa/rpc-tests/mempool_packages.py
+++ b/qa/rpc-tests/mempool_packages.py
@@ -65,7 +65,14 @@ class MempoolPackagesTest(BitcoinTestFramework):
descendant_fees = 0
descendant_size = 0
+ descendants = []
+ ancestors = list(chain)
for x in reversed(chain):
+ # Check that getmempoolentry is consistent with getrawmempool
+ entry = self.nodes[0].getmempoolentry(x)
+ assert_equal(entry, mempool[x])
+
+ # Check that the descendant calculations are correct
assert_equal(mempool[x]['descendantcount'], descendant_count)
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'])
@@ -74,6 +81,27 @@ class MempoolPackagesTest(BitcoinTestFramework):
assert_equal(mempool[x]['descendantsize'], descendant_size)
descendant_count += 1
+ # Check that getmempooldescendants is correct
+ assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
+ descendants.append(x)
+
+ # Check that getmempoolancestors is correct
+ ancestors.remove(x)
+ assert_equal(sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)))
+
+ # Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
+ v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
+ assert_equal(len(v_ancestors), len(chain)-1)
+ for x in v_ancestors.keys():
+ assert_equal(mempool[x], v_ancestors[x])
+ assert(chain[-1] not in v_ancestors.keys())
+
+ v_descendants = self.nodes[0].getmempooldescendants(chain[0], True)
+ assert_equal(len(v_descendants), len(chain)-1)
+ for x in v_descendants.keys():
+ assert_equal(mempool[x], v_descendants[x])
+ assert(chain[0] not in v_descendants.keys())
+
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(chain[-1], 0, 1000)
diff --git a/qa/rpc-tests/p2p-fullblocktest.py b/qa/rpc-tests/p2p-fullblocktest.py
index aa0501c5e9..17fd40ef1d 100755
--- a/qa/rpc-tests/p2p-fullblocktest.py
+++ b/qa/rpc-tests/p2p-fullblocktest.py
@@ -9,7 +9,8 @@ from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
-from test_framework.script import CScript, SignatureHash, SIGHASH_ALL, OP_TRUE, OP_FALSE
+from test_framework.script import *
+import struct
class PreviousSpendableOutput(object):
def __init__(self, tx = CTransaction(), n = -1):
@@ -24,10 +25,36 @@ We use the testing framework in which we expect a particular answer from
each test.
'''
+def hash160(s):
+ return hashlib.new('ripemd160', sha256(s)).digest()
+
+# Use this class for tests that require behavior other than normal "mininode" behavior.
+# For now, it is used to serialize a bloated varint (b64).
+class CBrokenBlock(CBlock):
+ def __init__(self, header=None):
+ super(CBrokenBlock, self).__init__(header)
+
+ def initialize(self, base_block):
+ self.vtx = copy.deepcopy(base_block.vtx)
+ self.hashMerkleRoot = self.calc_merkle_root()
+
+ def serialize(self):
+ r = b""
+ r += super(CBlock, self).serialize()
+ r += struct.pack("<BQ", 255, len(self.vtx))
+ for tx in self.vtx:
+ r += tx.serialize()
+ return r
+
+ def normal_serialize(self):
+ r = b""
+ r += super(CBrokenBlock, self).serialize()
+ return r
+
class FullBlockTest(ComparisonTestFramework):
- ''' Can either run this test as 1 node with expected answers, or two and compare them.
- Change the "outcome" variable from each TestInstance object to only do the comparison. '''
+ # Can either run this test as 1 node with expected answers, or two and compare them.
+ # Change the "outcome" variable from each TestInstance object to only do the comparison.
def __init__(self):
super().__init__()
self.num_nodes = 1
@@ -35,66 +62,70 @@ class FullBlockTest(ComparisonTestFramework):
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
- self.block_time = int(time.time())+1
self.tip = None
self.blocks = {}
+ def add_options(self, parser):
+ super().add_options(parser)
+ parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True)
+
def run_test(self):
- test = TestManager(self, self.options.tmpdir)
- test.add_all_connections(self.nodes)
+ self.test = TestManager(self, self.options.tmpdir)
+ self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
- test.run()
+ self.test.run()
def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
block.vtx.extend(tx_list)
- block.hashMerkleRoot = block.calc_merkle_root()
- block.rehash()
- return block
-
- # Create a block on top of self.tip, and advance self.tip to point to the new block
- # if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output,
- # and rest will go to fees.
- def next_block(self, number, spend=None, additional_coinbase_value=0, script=None):
+
+ # this is a little handier to use than the version in blocktools.py
+ def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])):
+ tx = create_transaction(spend_tx, n, b"", value, script)
+ return tx
+
+ # sign a transaction, using the key we know about
+ # this signs input 0 in tx, which is assumed to be spending output n in spend_tx
+ def sign_tx(self, tx, spend_tx, n):
+ scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey)
+ if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
+ tx.vin[0].scriptSig = CScript()
+ return
+ (sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL)
+ tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
+
+ def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])):
+ tx = self.create_tx(spend_tx, n, value, script)
+ self.sign_tx(tx, spend_tx, n)
+ tx.rehash()
+ return tx
+
+ def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
if self.tip == None:
base_block_hash = self.genesis_hash
+ block_time = int(time.time())+1
else:
base_block_hash = self.tip.sha256
+ block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
- if (spend != None):
- coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
- block = create_block(base_block_hash, coinbase, self.block_time)
- if (spend != None):
- tx = CTransaction()
- tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # no signature yet
- # This copies the java comparison tool testing behavior: the first
- # txout has a garbage scriptPubKey, "to make sure we're not
- # pre-verifying too much" (?)
- tx.vout.append(CTxOut(0, CScript([random.randint(0,255), height & 255])))
- if script == None:
- tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
- else:
- tx.vout.append(CTxOut(1, script))
- # Now sign it if necessary
- scriptSig = b""
- scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey)
- if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend
- scriptSig = CScript([OP_TRUE])
- else:
- # We have to actually sign it
- (sighash, err) = SignatureHash(spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL)
- scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
- tx.vin[0].scriptSig = scriptSig
- # Now add the transaction to the block
- block = self.add_transactions_to_block(block, [tx])
- block.solve()
+ if spend == None:
+ block = create_block(base_block_hash, coinbase, block_time)
+ else:
+ coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
+ coinbase.rehash()
+ block = create_block(base_block_hash, coinbase, block_time)
+ tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi
+ self.sign_tx(tx, spend.tx, spend.n)
+ self.add_transactions_to_block(block, [tx])
+ block.hashMerkleRoot = block.calc_merkle_root()
+ if solve:
+ block.solve()
self.tip = block
self.block_heights[block.sha256] = height
- self.block_time += 1
assert number not in self.blocks
self.blocks[number] = block
return block
@@ -108,7 +139,7 @@ class FullBlockTest(ComparisonTestFramework):
def save_spendable_output():
spendable_outputs.append(self.tip)
- # get an output that we previous marked as spendable
+ # get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
@@ -122,26 +153,33 @@ class FullBlockTest(ComparisonTestFramework):
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
-
+
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
- # add transactions to a block produced by next_block
+ # adds transactions to the block and updates state
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
- old_hash = block.sha256
self.add_transactions_to_block(block, new_transactions)
+ old_sha256 = block.sha256
+ block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
- self.block_heights[block.sha256] = self.block_heights[old_hash]
- del self.block_heights[old_hash]
+ if block.sha256 != old_sha256:
+ self.block_heights[block.sha256] = self.block_heights[old_sha256]
+ del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
- # creates a new block and advances the tip to that block
+ # shorthand for functions
block = self.next_block
+ create_tx = self.create_tx
+ create_and_sign_tx = self.create_and_sign_transaction
+
+ # these must be updated if consensus changes
+ MAX_BLOCK_SIGOPS = 20000
# Create a new block
@@ -153,43 +191,44 @@ class FullBlockTest(ComparisonTestFramework):
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
- block(1000 + i)
+ block(5000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
+ # collect spendable outputs now to avoid cluttering the code later on
+ out = []
+ for i in range(33):
+ out.append(get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
- out0 = get_spendable_output()
- block(1, spend=out0)
+ block(1, spend=out[0])
save_spendable_output()
yield accepted()
- out1 = get_spendable_output()
- b2 = block(2, spend=out1)
+ block(2, spend=out[1])
yield accepted()
-
+ save_spendable_output()
# so fork like this:
- #
+ #
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
- #
+ #
# Nothing should happen at this point. We saw b2 first so it takes priority.
tip(1)
- b3 = block(3, spend=out1)
- txout_b3 = PreviousSpendableOutput(b3.vtx[1], 1)
+ b3 = block(3, spend=out[1])
+ txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0)
yield rejected()
# Now we add another block to make the alternative chain longer.
- #
+ #
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
- out2 = get_spendable_output()
- block(4, spend=out2)
+ block(4, spend=out[2])
yield accepted()
@@ -197,46 +236,41 @@ class FullBlockTest(ComparisonTestFramework):
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
tip(2)
- block(5, spend=out2)
+ block(5, spend=out[2])
save_spendable_output()
yield rejected()
- out3 = get_spendable_output()
- block(6, spend=out3)
+ block(6, spend=out[3])
yield accepted()
-
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
- block(7, spend=out2)
+ block(7, spend=out[2])
yield rejected()
- out4 = get_spendable_output()
- block(8, spend=out4)
+ block(8, spend=out[4])
yield rejected()
-
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
tip(6)
- block(9, spend=out4, additional_coinbase_value=1)
+ block(9, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
-
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
- block(10, spend=out3)
+ block(10, spend=out[3])
yield rejected()
- block(11, spend=out4, additional_coinbase_value=1)
+ block(11, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
@@ -246,19 +280,17 @@ class FullBlockTest(ComparisonTestFramework):
# (b12 added last)
# \-> b3 (1) -> b4 (2)
tip(5)
- b12 = block(12, spend=out3)
+ b12 = block(12, spend=out[3])
save_spendable_output()
- #yield TestInstance([[b12, False]])
- b13 = block(13, spend=out4)
+ b13 = block(13, spend=out[4])
# Deliver the block header for b12, and the block b13.
# b13 should be accepted but the tip won't advance until b12 is delivered.
yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
save_spendable_output()
- out5 = get_spendable_output()
# b14 is invalid, but the node won't know that until it tries to connect
# Tip still can't advance because b12 is missing
- block(14, spend=out5, additional_coinbase_value=1)
+ block(14, spend=out[5], additional_coinbase_value=1)
yield rejected()
yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
@@ -267,18 +299,18 @@ class FullBlockTest(ComparisonTestFramework):
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
-
+
# Test that a block with a lot of checksigs is okay
- lots_of_checksigs = CScript([OP_CHECKSIG] * (1000000 // 50 - 1))
+ lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
tip(13)
- block(15, spend=out5, script=lots_of_checksigs)
+ block(15, spend=out[5], script=lots_of_checksigs)
yield accepted()
+ save_spendable_output()
# Test that a block with too many checksigs is rejected
- out6 = get_spendable_output()
- too_many_checksigs = CScript([OP_CHECKSIG] * (1000000 // 50))
- block(16, spend=out6, script=too_many_checksigs)
+ too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
+ block(16, spend=out[6], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
@@ -299,7 +331,7 @@ class FullBlockTest(ComparisonTestFramework):
block(18, spend=txout_b3)
yield rejected()
- block(19, spend=out6)
+ block(19, spend=out[6])
yield rejected()
# Attempt to spend a coinbase at depth too low
@@ -307,8 +339,7 @@ class FullBlockTest(ComparisonTestFramework):
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
- out7 = get_spendable_output()
- block(20, spend=out7)
+ block(20, spend=out[7])
yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase'))
# Attempt to spend a coinbase at depth too low (on a fork this time)
@@ -317,10 +348,10 @@ class FullBlockTest(ComparisonTestFramework):
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
tip(13)
- block(21, spend=out6)
+ block(21, spend=out[6])
yield rejected()
- block(22, spend=out5)
+ block(22, spend=out[5])
yield rejected()
# Create a block on either side of MAX_BLOCK_SIZE and make sure its accepted/rejected
@@ -329,21 +360,21 @@ class FullBlockTest(ComparisonTestFramework):
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
- b23 = block(23, spend=out6)
- old_hash = b23.sha256
+ b23 = block(23, spend=out[6])
tx = CTransaction()
script_length = MAX_BLOCK_SIZE - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
- tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 1)))
+ tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_SIZE)
yield accepted()
+ save_spendable_output()
# Make the next block one byte bigger and check that it fails
tip(15)
- b24 = block(24, spend=out6)
+ b24 = block(24, spend=out[6])
script_length = MAX_BLOCK_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length+1)])
tx.vout = [CTxOut(0, script_output)]
@@ -351,7 +382,7 @@ class FullBlockTest(ComparisonTestFramework):
assert_equal(len(b24.serialize()), MAX_BLOCK_SIZE+1)
yield rejected(RejectResult(16, b'bad-blk-length'))
- b25 = block(25, spend=out7)
+ block(25, spend=out[7])
yield rejected()
# Create blocks with a coinbase input script size out of range
@@ -360,7 +391,7 @@ class FullBlockTest(ComparisonTestFramework):
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
tip(15)
- b26 = block(26, spend=out6)
+ b26 = block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
@@ -369,23 +400,20 @@ class FullBlockTest(ComparisonTestFramework):
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b26 chain to make sure bitcoind isn't accepting b26
- b27 = block(27, spend=out7)
- yield rejected()
+ b27 = block(27, spend=out[7])
+ yield rejected(RejectResult(16, b'bad-prevblk'))
# Now try a too-large-coinbase script
tip(15)
- b28 = block(28, spend=out6)
+ b28 = block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = update_block(28, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
- # Extend the b28 chain to make sure bitcoind isn't accepted b28
- b29 = block(29, spend=out7)
- # TODO: Should get a reject message back with "bad-prevblk", except
- # there's a bug that prevents this from being detected. Just note
- # failure for now, and add the reject result later.
- yield rejected()
+ # Extend the b28 chain to make sure bitcoind isn't accepting b28
+ b29 = block(29, spend=out[7])
+ yield rejected(RejectResult(16, b'bad-prevblk'))
# b30 has a max-sized coinbase scriptSig.
tip(23)
@@ -394,6 +422,871 @@ class FullBlockTest(ComparisonTestFramework):
b30.vtx[0].rehash()
b30 = update_block(30, [])
yield accepted()
+ save_spendable_output()
+
+ # b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
+ #
+ # genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
+ # \-> b36 (11)
+ # \-> b34 (10)
+ # \-> b32 (9)
+ #
+
+ # MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
+ lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
+ b31 = block(31, spend=out[8], script=lots_of_multisigs)
+ assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
+ yield accepted()
+ save_spendable_output()
+
+ # this goes over the limit because the coinbase has one sigop
+ too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
+ b32 = block(32, spend=out[9], script=too_many_multisigs)
+ assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+
+ # CHECKMULTISIGVERIFY
+ tip(31)
+ lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
+ block(33, spend=out[9], script=lots_of_multisigs)
+ yield accepted()
+ save_spendable_output()
+
+ too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
+ block(34, spend=out[10], script=too_many_multisigs)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+
+ # CHECKSIGVERIFY
+ tip(33)
+ lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
+ b35 = block(35, spend=out[10], script=lots_of_checksigs)
+ yield accepted()
+ save_spendable_output()
+
+ too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
+ block(36, spend=out[11], script=too_many_checksigs)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+
+ # Check spending of a transaction in a block which failed to connect
+ #
+ # b6 (3)
+ # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
+ # \-> b37 (11)
+ # \-> b38 (11/37)
+ #
+
+ # save 37's spendable output, but then double-spend out11 to invalidate the block
+ tip(35)
+ b37 = block(37, spend=out[11])
+ txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0)
+ tx = create_and_sign_tx(out[11].tx, out[11].n, 0)
+ b37 = update_block(37, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
+ tip(35)
+ block(38, spend=txout_b37)
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # Check P2SH SigOp counting
+ #
+ #
+ # 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
+ # \-> b40 (12)
+ #
+ # b39 - create some P2SH outputs that will require 6 sigops to spend:
+ #
+ # redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
+ # p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
+ #
+ tip(35)
+ b39 = block(39)
+ b39_outputs = 0
+ b39_sigops_per_output = 6
+
+ # Build the redeem script, hash it, use hash to create the p2sh script
+ redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG])
+ redeem_script_hash = hash160(redeem_script)
+ p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
+
+ # Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
+ # This must be signed because it is spending a coinbase
+ spend = out[11]
+ tx = create_tx(spend.tx, spend.n, 1, p2sh_script)
+ tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE])))
+ self.sign_tx(tx, spend.tx, spend.n)
+ tx.rehash()
+ b39 = update_block(39, [tx])
+ b39_outputs += 1
+
+ # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
+ tx_new = None
+ tx_last = tx
+ total_size=len(b39.serialize())
+ while(total_size < MAX_BLOCK_SIZE):
+ tx_new = create_tx(tx_last, 1, 1, p2sh_script)
+ tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
+ tx_new.rehash()
+ total_size += len(tx_new.serialize())
+ if total_size >= MAX_BLOCK_SIZE:
+ break
+ b39.vtx.append(tx_new) # add tx to block
+ tx_last = tx_new
+ b39_outputs += 1
+
+ b39 = update_block(39, [])
+ yield accepted()
+ save_spendable_output()
+
+
+ # Test sigops in P2SH redeem scripts
+ #
+ # b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
+ # The first tx has one sigop and then at the end we add 2 more to put us just over the max.
+ #
+ # b41 does the same, less one, so it has the maximum sigops permitted.
+ #
+ tip(39)
+ b40 = block(40, spend=out[12])
+ sigops = get_legacy_sigopcount_block(b40)
+ numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
+ assert_equal(numTxes <= b39_outputs, True)
+
+ lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
+ new_txs = []
+ for i in range(1, numTxes+1):
+ tx = CTransaction()
+ tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
+ tx.vin.append(CTxIn(lastOutpoint, b''))
+ # second input is corresponding P2SH output from b39
+ tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
+ # Note: must pass the redeem_script (not p2sh_script) to the signature hash function
+ (sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
+ sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
+ scriptSig = CScript([sig, redeem_script])
+
+ tx.vin[1].scriptSig = scriptSig
+ tx.rehash()
+ new_txs.append(tx)
+ lastOutpoint = COutPoint(tx.sha256, 0)
+
+ b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
+ tx = CTransaction()
+ tx.vin.append(CTxIn(lastOutpoint, b''))
+ tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
+ tx.rehash()
+ new_txs.append(tx)
+ update_block(40, new_txs)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+ # same as b40, but one less sigop
+ tip(39)
+ b41 = block(41, spend=None)
+ update_block(41, b40.vtx[1:-1])
+ b41_sigops_to_fill = b40_sigops_to_fill - 1
+ tx = CTransaction()
+ tx.vin.append(CTxIn(lastOutpoint, b''))
+ tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
+ tx.rehash()
+ update_block(41, [tx])
+ yield accepted()
+
+ # Fork off of b39 to create a constant base again
+ #
+ # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
+ # \-> b41 (12)
+ #
+ tip(39)
+ block(42, spend=out[12])
+ yield rejected()
+ save_spendable_output()
+
+ block(43, spend=out[13])
+ yield accepted()
+ save_spendable_output()
+
+
+ # Test a number of really invalid scenarios
+ #
+ # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
+ # \-> ??? (15)
+
+ # The next few blocks are going to be created "by hand" since they'll do funky things, such as having
+ # the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
+ height = self.block_heights[self.tip.sha256] + 1
+ coinbase = create_coinbase(height, self.coinbase_pubkey)
+ b44 = CBlock()
+ b44.nTime = self.tip.nTime + 1
+ b44.hashPrevBlock = self.tip.sha256
+ b44.nBits = 0x207fffff
+ b44.vtx.append(coinbase)
+ b44.hashMerkleRoot = b44.calc_merkle_root()
+ b44.solve()
+ self.tip = b44
+ self.block_heights[b44.sha256] = height
+ self.blocks[44] = b44
+ yield accepted()
+
+ # A block with a non-coinbase as the first tx
+ non_coinbase = create_tx(out[15].tx, out[15].n, 1)
+ b45 = CBlock()
+ b45.nTime = self.tip.nTime + 1
+ b45.hashPrevBlock = self.tip.sha256
+ b45.nBits = 0x207fffff
+ b45.vtx.append(non_coinbase)
+ b45.hashMerkleRoot = b45.calc_merkle_root()
+ b45.calc_sha256()
+ b45.solve()
+ self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1
+ self.tip = b45
+ self.blocks[45] = b45
+ yield rejected(RejectResult(16, b'bad-cb-missing'))
+
+ # A block with no txns
+ tip(44)
+ b46 = CBlock()
+ b46.nTime = b44.nTime+1
+ b46.hashPrevBlock = b44.sha256
+ b46.nBits = 0x207fffff
+ b46.vtx = []
+ b46.hashMerkleRoot = 0
+ b46.solve()
+ self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1
+ self.tip = b46
+ assert 46 not in self.blocks
+ self.blocks[46] = b46
+ s = ser_uint256(b46.hashMerkleRoot)
+ yield rejected(RejectResult(16, b'bad-blk-length'))
+
+ # A block with invalid work
+ tip(44)
+ b47 = block(47, solve=False)
+ target = uint256_from_compact(b47.nBits)
+ while b47.sha256 < target: #changed > to <
+ b47.nNonce += 1
+ b47.rehash()
+ yield rejected(RejectResult(16, b'high-hash'))
+
+ # A block with timestamp > 2 hrs in the future
+ tip(44)
+ b48 = block(48, solve=False)
+ b48.nTime = int(time.time()) + 60 * 60 * 3
+ b48.solve()
+ yield rejected(RejectResult(16, b'time-too-new'))
+
+ # A block with an invalid merkle hash
+ tip(44)
+ b49 = block(49)
+ b49.hashMerkleRoot += 1
+ b49.solve()
+ yield rejected(RejectResult(16, b'bad-txnmrklroot'))
+
+ # A block with an incorrect POW limit
+ tip(44)
+ b50 = block(50)
+ b50.nBits = b50.nBits - 1
+ b50.solve()
+ yield rejected(RejectResult(16, b'bad-diffbits'))
+
+ # A block with two coinbase txns
+ tip(44)
+ b51 = block(51)
+ cb2 = create_coinbase(51, self.coinbase_pubkey)
+ b51 = update_block(51, [cb2])
+ yield rejected(RejectResult(16, b'bad-cb-multiple'))
+
+ # A block w/ duplicate txns
+ # Note: txns have to be in the right position in the merkle tree to trigger this error
+ tip(44)
+ b52 = block(52, spend=out[15])
+ tx = create_tx(b52.vtx[1], 0, 1)
+ b52 = update_block(52, [tx, tx])
+ yield rejected(RejectResult(16, b'bad-txns-duplicate'))
+
+ # Test block timestamps
+ # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
+ # \-> b54 (15)
+ #
+ tip(43)
+ block(53, spend=out[14])
+ yield rejected() # rejected since b44 is at same height
+ save_spendable_output()
+
+ # invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast)
+ b54 = block(54, spend=out[15])
+ b54.nTime = b35.nTime - 1
+ b54.solve()
+ yield rejected(RejectResult(16, b'time-too-old'))
+
+ # valid timestamp
+ tip(53)
+ b55 = block(55, spend=out[15])
+ b55.nTime = b35.nTime
+ update_block(55, [])
+ yield accepted()
+ save_spendable_output()
+
+
+ # Test CVE-2012-2459
+ #
+ # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
+ # \-> b57 (16)
+ # \-> b56p2 (16)
+ # \-> b56 (16)
+ #
+ # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
+ # affecting the merkle root of a block, while still invalidating it.
+ # See: src/consensus/merkle.h
+ #
+ # b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
+ # Result: OK
+ #
+ # b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
+ # root but duplicate transactions.
+ # Result: Fails
+ #
+ # b57p2 has six transactions in its merkle tree:
+ # - coinbase, tx, tx1, tx2, tx3, tx4
+ # Merkle root calculation will duplicate as necessary.
+ # Result: OK.
+ #
+ # b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
+ # duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
+ # that the error was caught early, avoiding a DOS vulnerability.)
+
+ # b57 - a good block with 2 txs, don't submit until end
+ tip(55)
+ b57 = block(57)
+ tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
+ tx1 = create_tx(tx, 0, 1)
+ b57 = update_block(57, [tx, tx1])
+
+ # b56 - copy b57, add a duplicate tx
+ tip(55)
+ b56 = copy.deepcopy(b57)
+ self.blocks[56] = b56
+ assert_equal(len(b56.vtx),3)
+ b56 = update_block(56, [tx1])
+ assert_equal(b56.hash, b57.hash)
+ yield rejected(RejectResult(16, b'bad-txns-duplicate'))
+
+ # b57p2 - a good block with 6 tx'es, don't submit until end
+ tip(55)
+ b57p2 = block("57p2")
+ tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
+ tx1 = create_tx(tx, 0, 1)
+ tx2 = create_tx(tx1, 0, 1)
+ tx3 = create_tx(tx2, 0, 1)
+ tx4 = create_tx(tx3, 0, 1)
+ b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4])
+
+ # b56p2 - copy b57p2, duplicate two non-consecutive tx's
+ tip(55)
+ b56p2 = copy.deepcopy(b57p2)
+ self.blocks["b56p2"] = b56p2
+ assert_equal(b56p2.hash, b57p2.hash)
+ assert_equal(len(b56p2.vtx),6)
+ b56p2 = update_block("b56p2", [tx3, tx4])
+ yield rejected(RejectResult(16, b'bad-txns-duplicate'))
+
+ tip("57p2")
+ yield accepted()
+
+ tip(57)
+ yield rejected() #rejected because 57p2 seen first
+ save_spendable_output()
+
+ # Test a few invalid tx types
+ #
+ # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
+ # \-> ??? (17)
+ #
+
+ # tx with prevout.n out of range
+ tip(57)
+ b58 = block(58, spend=out[17])
+ tx = CTransaction()
+ assert(len(out[17].tx.vout) < 42)
+ tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff))
+ tx.vout.append(CTxOut(0, b""))
+ tx.calc_sha256()
+ b58 = update_block(58, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # tx with output value > input value out of range
+ tip(57)
+ b59 = block(59)
+ tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN)
+ b59 = update_block(59, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-in-belowout'))
+
+ # reset to good chain
+ tip(57)
+ b60 = block(60, spend=out[17])
+ yield accepted()
+ save_spendable_output()
+
+ # Test BIP30
+ #
+ # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
+ # \-> b61 (18)
+ #
+ # Blocks are not allowed to contain a transaction whose id matches that of an earlier,
+ # not-fully-spent transaction in the same chain. To test, make identical coinbases;
+ # the second one should be rejected.
+ #
+ tip(60)
+ b61 = block(61, spend=out[18])
+ b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases
+ b61.vtx[0].rehash()
+ b61 = update_block(61, [])
+ assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
+ yield rejected(RejectResult(16, b'bad-txns-BIP30'))
+
+
+ # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
+ #
+ # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
+ # \-> b62 (18)
+ #
+ tip(60)
+ b62 = block(62)
+ tx = CTransaction()
+ tx.nLockTime = 0xffffffff #this locktime is non-final
+ assert(out[18].n < len(out[18].tx.vout))
+ tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence
+ tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ assert(tx.vin[0].nSequence < 0xffffffff)
+ tx.calc_sha256()
+ b62 = update_block(62, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
+
+
+ # Test a non-final coinbase is also rejected
+ #
+ # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
+ # \-> b63 (-)
+ #
+ tip(60)
+ b63 = block(63)
+ b63.vtx[0].nLockTime = 0xffffffff
+ b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
+ b63.vtx[0].rehash()
+ b63 = update_block(63, [])
+ yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
+
+
+ # This checks that a block with a bloated VARINT between the block_header and the array of tx such that
+ # the block is > MAX_BLOCK_SIZE with the bloated varint, but <= MAX_BLOCK_SIZE without the bloated varint,
+ # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
+ # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
+ #
+ # What matters is that the receiving node should not reject the bloated block, and then reject the canonical
+ # block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
+ #
+ # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
+ # \
+ # b64a (18)
+ # b64a is a bloated block (non-canonical varint)
+ # b64 is a good block (same as b64 but w/ canonical varint)
+ #
+ tip(60)
+ regular_block = block("64a", spend=out[18])
+
+ # make it a "broken_block," with non-canonical serialization
+ b64a = CBrokenBlock(regular_block)
+ b64a.initialize(regular_block)
+ self.blocks["64a"] = b64a
+ self.tip = b64a
+ tx = CTransaction()
+
+ # use canonical serialization to calculate size
+ script_length = MAX_BLOCK_SIZE - len(b64a.normal_serialize()) - 69
+ script_output = CScript([b'\x00' * script_length])
+ tx.vout.append(CTxOut(0, script_output))
+ tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
+ b64a = update_block("64a", [tx])
+ assert_equal(len(b64a.serialize()), MAX_BLOCK_SIZE + 8)
+ yield TestInstance([[self.tip, None]])
+
+ # comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore
+ self.test.block_store.erase(b64a.sha256)
+
+ tip(60)
+ b64 = CBlock(b64a)
+ b64.vtx = copy.deepcopy(b64a.vtx)
+ assert_equal(b64.hash, b64a.hash)
+ assert_equal(len(b64.serialize()), MAX_BLOCK_SIZE)
+ self.blocks[64] = b64
+ update_block(64, [])
+ yield accepted()
+ save_spendable_output()
+
+ # Spend an output created in the block itself
+ #
+ # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
+ #
+ tip(64)
+ b65 = block(65)
+ tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue)
+ tx2 = create_and_sign_tx(tx1, 0, 0)
+ update_block(65, [tx1, tx2])
+ yield accepted()
+ save_spendable_output()
+
+ # Attempt to spend an output created later in the same block
+ #
+ # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
+ # \-> b66 (20)
+ tip(65)
+ b66 = block(66)
+ tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
+ tx2 = create_and_sign_tx(tx1, 0, 1)
+ update_block(66, [tx2, tx1])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # Attempt to double-spend a transaction created in a block
+ #
+ # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
+ # \-> b67 (20)
+ #
+ #
+ tip(65)
+ b67 = block(67)
+ tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
+ tx2 = create_and_sign_tx(tx1, 0, 1)
+ tx3 = create_and_sign_tx(tx1, 0, 2)
+ update_block(67, [tx1, tx2, tx3])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # More tests of block subsidy
+ #
+ # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
+ # \-> b68 (20)
+ #
+ # b68 - coinbase with an extra 10 satoshis,
+ # creates a tx that has 9 satoshis from out[20] go to fees
+ # this fails because the coinbase is trying to claim 1 satoshi too much in fees
+ #
+ # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
+ # this succeeds
+ #
+ tip(65)
+ b68 = block(68, additional_coinbase_value=10)
+ tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9)
+ update_block(68, [tx])
+ yield rejected(RejectResult(16, b'bad-cb-amount'))
+
+ tip(65)
+ b69 = block(69, additional_coinbase_value=10)
+ tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10)
+ update_block(69, [tx])
+ yield accepted()
+ save_spendable_output()
+
+ # Test spending the outpoint of a non-existent transaction
+ #
+ # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
+ # \-> b70 (21)
+ #
+ tip(69)
+ block(70, spend=out[21])
+ bogus_tx = CTransaction()
+ bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
+ tx.vout.append(CTxOut(1, b""))
+ update_block(70, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+
+ # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
+ #
+ # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
+ # \-> b71 (21)
+ #
+ # b72 is a good block.
+ # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71.
+ #
+ tip(69)
+ b72 = block(72)
+ tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2)
+ tx2 = create_and_sign_tx(tx1, 0, 1)
+ b72 = update_block(72, [tx1, tx2]) # now tip is 72
+ b71 = copy.deepcopy(b72)
+ b71.vtx.append(tx2) # add duplicate tx2
+ self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
+ self.blocks[71] = b71
+
+ assert_equal(len(b71.vtx), 4)
+ assert_equal(len(b72.vtx), 3)
+ assert_equal(b72.sha256, b71.sha256)
+
+ tip(71)
+ yield rejected(RejectResult(16, b'bad-txns-duplicate'))
+ tip(72)
+ yield accepted()
+ save_spendable_output()
+
+
+ # Test some invalid scripts and MAX_BLOCK_SIGOPS
+ #
+ # -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
+ # \-> b** (22)
+ #
+
+ # b73 - tx with excessive sigops that are placed after an excessively large script element.
+ # The purpose of the test is to make sure those sigops are counted.
+ #
+ # script is a bytearray of size 20,526
+ #
+ # bytearray[0-19,998] : OP_CHECKSIG
+ # bytearray[19,999] : OP_PUSHDATA4
+ # bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
+ # bytearray[20,004-20,525]: unread data (script_element)
+ # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
+ #
+ tip(72)
+ b73 = block(73)
+ size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
+ a = bytearray([OP_CHECKSIG] * size)
+ a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4
+
+ element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
+ a[MAX_BLOCK_SIGOPS] = element_size % 256
+ a[MAX_BLOCK_SIGOPS+1] = element_size // 256
+ a[MAX_BLOCK_SIGOPS+2] = 0
+ a[MAX_BLOCK_SIGOPS+3] = 0
+
+ tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
+ b73 = update_block(73, [tx])
+ assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+ # b74/75 - if we push an invalid script element, all prevous sigops are counted,
+ # but sigops after the element are not counted.
+ #
+ # The invalid script element is that the push_data indicates that
+ # there will be a large amount of data (0xffffff bytes), but we only
+ # provide a much smaller number. These bytes are CHECKSIGS so they would
+ # cause b75 to fail for excessive sigops, if those bytes were counted.
+ #
+ # b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
+ # b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
+ #
+ #
+ tip(72)
+ b74 = block(74)
+ size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
+ a = bytearray([OP_CHECKSIG] * size)
+ a[MAX_BLOCK_SIGOPS] = 0x4e
+ a[MAX_BLOCK_SIGOPS+1] = 0xfe
+ a[MAX_BLOCK_SIGOPS+2] = 0xff
+ a[MAX_BLOCK_SIGOPS+3] = 0xff
+ a[MAX_BLOCK_SIGOPS+4] = 0xff
+ tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
+ b74 = update_block(74, [tx])
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+ tip(72)
+ b75 = block(75)
+ size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
+ a = bytearray([OP_CHECKSIG] * size)
+ a[MAX_BLOCK_SIGOPS-1] = 0x4e
+ a[MAX_BLOCK_SIGOPS] = 0xff
+ a[MAX_BLOCK_SIGOPS+1] = 0xff
+ a[MAX_BLOCK_SIGOPS+2] = 0xff
+ a[MAX_BLOCK_SIGOPS+3] = 0xff
+ tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
+ b75 = update_block(75, [tx])
+ yield accepted()
+ save_spendable_output()
+
+ # Check that if we push an element filled with CHECKSIGs, they are not counted
+ tip(75)
+ b76 = block(76)
+ size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
+ a = bytearray([OP_CHECKSIG] * size)
+ a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
+ tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a))
+ b76 = update_block(76, [tx])
+ yield accepted()
+ save_spendable_output()
+
+ # Test transaction resurrection
+ #
+ # -> b77 (24) -> b78 (25) -> b79 (26)
+ # \-> b80 (25) -> b81 (26) -> b82 (27)
+ #
+ # b78 creates a tx, which is spent in b79. After b82, both should be in mempool
+ #
+ # The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
+ # rather obscure reason that the Python signature code does not distinguish between
+ # Low-S and High-S values (whereas the bitcoin code has custom code which does so);
+ # as a result of which, the odds are 50% that the python code will use the right
+ # value and the transaction will be accepted into the mempool. Until we modify the
+ # test framework to support low-S signing, we are out of luck.
+ #
+ # To get around this issue, we construct transactions which are not signed and which
+ # spend to OP_TRUE. If the standard-ness rules change, this test would need to be
+ # updated. (Perhaps to spend to a P2SH OP_TRUE script)
+ #
+ tip(76)
+ block(77)
+ tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN)
+ update_block(77, [tx77])
+ yield accepted()
+ save_spendable_output()
+
+ block(78)
+ tx78 = create_tx(tx77, 0, 9*COIN)
+ update_block(78, [tx78])
+ yield accepted()
+
+ block(79)
+ tx79 = create_tx(tx78, 0, 8*COIN)
+ update_block(79, [tx79])
+ yield accepted()
+
+ # mempool should be empty
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
+
+ tip(77)
+ block(80, spend=out[25])
+ yield rejected()
+ save_spendable_output()
+
+ block(81, spend=out[26])
+ yield rejected() # other chain is same length
+ save_spendable_output()
+
+ block(82, spend=out[27])
+ yield accepted() # now this chain is longer, triggers re-org
+ save_spendable_output()
+
+ # now check that tx78 and tx79 have been put back into the peer's mempool
+ mempool = self.nodes[0].getrawmempool()
+ assert_equal(len(mempool), 2)
+ assert(tx78.hash in mempool)
+ assert(tx79.hash in mempool)
+
+
+ # Test invalid opcodes in dead execution paths.
+ #
+ # -> b81 (26) -> b82 (27) -> b83 (28)
+ #
+ b83 = block(83)
+ op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
+ script = CScript(op_codes)
+ tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script)
+
+ tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE]))
+ tx2.vin[0].scriptSig = CScript([OP_FALSE])
+ tx2.rehash()
+
+ update_block(83, [tx1, tx2])
+ yield accepted()
+ save_spendable_output()
+
+
+ # Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
+ #
+ # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
+ # \-> b85 (29) -> b86 (30) \-> b89a (32)
+ #
+ #
+ b84 = block(84)
+ tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN]))
+ tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx1.calc_sha256()
+ self.sign_tx(tx1, out[29].tx, out[29].n)
+ tx1.rehash()
+ tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN]))
+ tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
+ tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN]))
+ tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE]))
+ tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
+ tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN]))
+
+ update_block(84, [tx1,tx2,tx3,tx4,tx5])
+ yield accepted()
+ save_spendable_output()
+
+ tip(83)
+ block(85, spend=out[29])
+ yield rejected()
+
+ block(86, spend=out[30])
+ yield accepted()
+
+ tip(84)
+ block(87, spend=out[30])
+ yield rejected()
+ save_spendable_output()
+
+ block(88, spend=out[31])
+ yield accepted()
+ save_spendable_output()
+
+ # trying to spend the OP_RETURN output is rejected
+ block("89a", spend=out[32])
+ tx = create_tx(tx1, 0, 0, CScript([OP_TRUE]))
+ update_block("89a", [tx])
+ yield rejected()
+
+
+ # Test re-org of a week's worth of blocks (1088 blocks)
+ # This test takes a minute or two and can be accomplished in memory
+ #
+ if self.options.runbarelyexpensive:
+ tip(88)
+ LARGE_REORG_SIZE = 1088
+ test1 = TestInstance(sync_every_block=False)
+ spend=out[32]
+ for i in range(89, LARGE_REORG_SIZE + 89):
+ b = block(i, spend)
+ tx = CTransaction()
+ script_length = MAX_BLOCK_SIZE - len(b.serialize()) - 69
+ script_output = CScript([b'\x00' * script_length])
+ tx.vout.append(CTxOut(0, script_output))
+ tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
+ b = update_block(i, [tx])
+ assert_equal(len(b.serialize()), MAX_BLOCK_SIZE)
+ test1.blocks_and_transactions.append([self.tip, True])
+ save_spendable_output()
+ spend = get_spendable_output()
+
+ yield test1
+ chain1_tip = i
+
+ # now create alt chain of same length
+ tip(88)
+ test2 = TestInstance(sync_every_block=False)
+ for i in range(89, LARGE_REORG_SIZE + 89):
+ block("alt"+str(i))
+ test2.blocks_and_transactions.append([self.tip, False])
+ yield test2
+
+ # extend alt chain to trigger re-org
+ block("alt" + str(chain1_tip + 1))
+ yield accepted()
+
+ # ... and re-org back to the first chain
+ tip(chain1_tip)
+ block(chain1_tip + 1)
+ yield rejected()
+ block(chain1_tip + 2)
+ yield accepted()
+
+ chain1_tip += 2
+
if __name__ == '__main__':
diff --git a/qa/rpc-tests/rawtransactions.py b/qa/rpc-tests/rawtransactions.py
index aa403f058c..ab6d2e8def 100755
--- a/qa/rpc-tests/rawtransactions.py
+++ b/qa/rpc-tests/rawtransactions.py
@@ -143,6 +143,20 @@ class RawTransactionsTest(BitcoinTestFramework):
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
+
+ inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
+ outputs = { self.nodes[0].getnewaddress() : 1 }
+ assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
+
+ inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
+ outputs = { self.nodes[0].getnewaddress() : 1 }
+ assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
+
+ inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
+ outputs = { self.nodes[0].getnewaddress() : 1 }
+ rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
+ decrawtx= self.nodes[0].decoderawtransaction(rawtx)
+ assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
if __name__ == '__main__':
RawTransactionsTest().main()
diff --git a/qa/rpc-tests/test_framework/blockstore.py b/qa/rpc-tests/test_framework/blockstore.py
index 4bc279032b..6120dd574b 100644
--- a/qa/rpc-tests/test_framework/blockstore.py
+++ b/qa/rpc-tests/test_framework/blockstore.py
@@ -13,20 +13,31 @@ class BlockStore(object):
self.blockDB = dbm.ndbm.open(datadir + "/blocks", 'c')
self.currentBlock = 0
self.headers_map = dict()
-
+
def close(self):
self.blockDB.close()
+ def erase(self, blockhash):
+ del self.blockDB[repr(blockhash)]
+
+ # lookup an entry and return the item as raw bytes
def get(self, blockhash):
- serialized_block = None
+ value = None
try:
- serialized_block = self.blockDB[repr(blockhash)]
+ value = self.blockDB[repr(blockhash)]
except KeyError:
return None
- f = BytesIO(serialized_block)
- ret = CBlock()
- ret.deserialize(f)
- ret.calc_sha256()
+ return value
+
+ # lookup an entry and return it as a CBlock
+ def get_block(self, blockhash):
+ ret = None
+ serialized_block = self.get(blockhash)
+ if serialized_block is not None:
+ f = BytesIO(serialized_block)
+ ret = CBlock()
+ ret.deserialize(f)
+ ret.calc_sha256()
return ret
def get_header(self, blockhash):
@@ -75,13 +86,16 @@ class BlockStore(object):
def add_header(self, header):
self.headers_map[header.sha256] = header
+ # lookup the hashes in "inv", and return p2p messages for delivering
+ # blocks found.
def get_blocks(self, inv):
responses = []
for i in inv:
if (i.type == 2): # MSG_BLOCK
- block = self.get(i.hash)
- if block is not None:
- responses.append(msg_block(block))
+ data = self.get(i.hash)
+ if data is not None:
+ # Use msg_generic to avoid re-serialization
+ responses.append(msg_generic(b"block", data))
return responses
def get_locator(self, current_tip=None):
@@ -90,11 +104,11 @@ class BlockStore(object):
r = []
counter = 0
step = 1
- lastBlock = self.get(current_tip)
+ lastBlock = self.get_block(current_tip)
while lastBlock is not None:
r.append(lastBlock.hashPrevBlock)
for i in range(step):
- lastBlock = self.get(lastBlock.hashPrevBlock)
+ lastBlock = self.get_block(lastBlock.hashPrevBlock)
if lastBlock is None:
break
counter += 1
@@ -111,16 +125,23 @@ class TxStore(object):
def close(self):
self.txDB.close()
+ # lookup an entry and return the item as raw bytes
def get(self, txhash):
- serialized_tx = None
+ value = None
try:
- serialized_tx = self.txDB[repr(txhash)]
+ value = self.txDB[repr(txhash)]
except KeyError:
return None
- f = BytesIO(serialized_tx)
- ret = CTransaction()
- ret.deserialize(f)
- ret.calc_sha256()
+ return value
+
+ def get_transaction(self, txhash):
+ ret = None
+ serialized_tx = self.get(txhash)
+ if serialized_tx is not None:
+ f = BytesIO(serialized_tx)
+ ret = CTransaction()
+ ret.deserialize(f)
+ ret.calc_sha256()
return ret
def add_transaction(self, tx):
@@ -136,5 +157,5 @@ class TxStore(object):
if (i.type == 1): # MSG_TX
tx = self.get(i.hash)
if tx is not None:
- responses.append(msg_tx(tx))
+ responses.append(msg_generic(b"tx", tx))
return responses
diff --git a/qa/rpc-tests/test_framework/blocktools.py b/qa/rpc-tests/test_framework/blocktools.py
index 44232153ac..26cc396315 100644
--- a/qa/rpc-tests/test_framework/blocktools.py
+++ b/qa/rpc-tests/test_framework/blocktools.py
@@ -56,12 +56,27 @@ def create_coinbase(height, pubkey = None):
coinbase.calc_sha256()
return coinbase
-# Create a transaction with an anyone-can-spend output, that spends the
-# nth output of prevtx.
-def create_transaction(prevtx, n, sig, value):
+# Create a transaction.
+# If the scriptPubKey is not specified, make it anyone-can-spend.
+def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
- tx.vout.append(CTxOut(value, b""))
+ tx.vout.append(CTxOut(value, scriptPubKey))
tx.calc_sha256()
return tx
+
+def get_legacy_sigopcount_block(block, fAccurate=True):
+ count = 0
+ for tx in block.vtx:
+ count += get_legacy_sigopcount_tx(tx, fAccurate)
+ return count
+
+def get_legacy_sigopcount_tx(tx, fAccurate=True):
+ count = 0
+ for i in tx.vout:
+ count += i.scriptPubKey.GetSigOpCount(fAccurate)
+ for j in tx.vin:
+ # scriptSig might be of type bytes, so convert to CScript for the moment
+ count += CScript(j.scriptSig).GetSigOpCount(fAccurate)
+ return count
diff --git a/qa/rpc-tests/test_framework/mininode.py b/qa/rpc-tests/test_framework/mininode.py
index 1617daa200..c0b59f3857 100755
--- a/qa/rpc-tests/test_framework/mininode.py
+++ b/qa/rpc-tests/test_framework/mininode.py
@@ -836,6 +836,18 @@ class msg_block(object):
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
+# for cases where a user needs tighter control over what is sent over the wire
+# note that the user must supply the name of the command, and the data
+class msg_generic(object):
+ def __init__(self, command, data=None):
+ self.command = command
+ self.data = data
+
+ def serialize(self):
+ return self.data
+
+ def __repr__(self):
+ return "msg_generic()"
class msg_getaddr(object):
command = b"getaddr"
diff --git a/qa/rpc-tests/test_framework/util.py b/qa/rpc-tests/test_framework/util.py
index fc66ef287d..782df52d62 100644
--- a/qa/rpc-tests/test_framework/util.py
+++ b/qa/rpc-tests/test_framework/util.py
@@ -477,6 +477,15 @@ def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
return (txid, signresult["hex"], fee)
+def assert_fee_amount(fee, tx_size, fee_per_kB):
+ """Assert the fee was in range"""
+ target_fee = tx_size * fee_per_kB / 1000
+ if fee < target_fee:
+ raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
+ # allow the wallet's estimation to be at most 2 bytes off
+ if fee > (tx_size + 2) * fee_per_kB / 1000:
+ raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
+
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
diff --git a/qa/rpc-tests/wallet.py b/qa/rpc-tests/wallet.py
index ba84f0d568..5d96e7a6e5 100755
--- a/qa/rpc-tests/wallet.py
+++ b/qa/rpc-tests/wallet.py
@@ -11,12 +11,7 @@ class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
- target_fee = fee_per_byte * tx_size
- if fee < target_fee:
- raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
- # allow the node's estimation to be at most 2 bytes off
- if fee > fee_per_byte * (tx_size + 2):
- raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
+ assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
def __init__(self):
diff --git a/share/genbuild.sh b/share/genbuild.sh
index a15cb34e47..1ef77d706f 100755
--- a/share/genbuild.sh
+++ b/share/genbuild.sh
@@ -15,7 +15,6 @@ fi
DESC=""
SUFFIX=""
-LAST_COMMIT_DATE=""
if [ -e "$(which git 2>/dev/null)" -a "$(git rev-parse --is-inside-work-tree 2>/dev/null)" = "true" ]; then
# clean 'dirty' status of touched files that haven't been modified
git diff >/dev/null 2>/dev/null
@@ -29,9 +28,6 @@ if [ -e "$(which git 2>/dev/null)" -a "$(git rev-parse --is-inside-work-tree 2>/
# otherwise generate suffix from git, i.e. string like "59887e8-dirty"
SUFFIX=$(git rev-parse --short HEAD)
git diff-index --quiet HEAD -- || SUFFIX="$SUFFIX-dirty"
-
- # get a string like "2012-04-10 16:27:19 +0200"
- LAST_COMMIT_DATE="$(git log -n 1 --format="%ci")"
fi
if [ -n "$DESC" ]; then
@@ -45,7 +41,4 @@ fi
# only update build.h if necessary
if [ "$INFO" != "$NEWINFO" ]; then
echo "$NEWINFO" >"$FILE"
- if [ -n "$LAST_COMMIT_DATE" ]; then
- echo "#define BUILD_DATE \"$LAST_COMMIT_DATE\"" >> "$FILE"
- fi
fi
diff --git a/src/Makefile.am b/src/Makefile.am
index 2f38ecde02..3df8e267bb 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -418,8 +418,8 @@ CTAES_DIST += crypto/ctaes/ctaes.h
CTAES_DIST += crypto/ctaes/README.md
CTAES_DIST += crypto/ctaes/test.c
-CLEANFILES = leveldb/libleveldb.a leveldb/libmemenv.a
-CLEANFILES += $(EXTRA_LIBRARIES)
+CLEANFILES = $(EXTRA_LIBRARIES)
+
CLEANFILES += *.gcda *.gcno
CLEANFILES += compat/*.gcda compat/*.gcno
CLEANFILES += consensus/*.gcda consensus/*.gcno
@@ -435,14 +435,14 @@ CLEANFILES += zmq/*.gcda zmq/*.gcno
DISTCLEANFILES = obj/build.h
-EXTRA_DIST = leveldb $(CTAES_DIST)
+EXTRA_DIST = $(CTAES_DIST)
clean-local:
- -$(MAKE) -C leveldb clean
-$(MAKE) -C secp256k1 clean
-$(MAKE) -C univalue clean
-rm -f leveldb/*/*.gcda leveldb/*/*.gcno leveldb/helpers/memenv/*.gcda leveldb/helpers/memenv/*.gcno
-rm -f config.h
+ -rm -rf test/__pycache__
.rc.o:
@test -f $(WINDRES)
diff --git a/src/Makefile.leveldb.include b/src/Makefile.leveldb.include
index 88bb0c1932..4b3cd6364a 100644
--- a/src/Makefile.leveldb.include
+++ b/src/Makefile.leveldb.include
@@ -26,6 +26,61 @@ leveldb_libleveldb_a_CPPFLAGS = $(AM_CPPFLAGS) $(LEVELDB_CPPFLAGS_INT) $(LEVELDB
leveldb_libleveldb_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
leveldb_libleveldb_a_SOURCES=
+leveldb_libleveldb_a_SOURCES += leveldb/port/atomic_pointer.h
+leveldb_libleveldb_a_SOURCES += leveldb/port/port_example.h
+leveldb_libleveldb_a_SOURCES += leveldb/port/port_posix.h
+leveldb_libleveldb_a_SOURCES += leveldb/port/win/stdint.h
+leveldb_libleveldb_a_SOURCES += leveldb/port/port.h
+leveldb_libleveldb_a_SOURCES += leveldb/port/port_win.h
+leveldb_libleveldb_a_SOURCES += leveldb/port/thread_annotations.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/db.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/options.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/comparator.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/filter_policy.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/slice.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/table_builder.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/env.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/c.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/iterator.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/cache.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/dumpfile.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/table.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/write_batch.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/status.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/log_format.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/memtable.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/version_set.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/write_batch_internal.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/filename.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/version_edit.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/dbformat.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/builder.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/log_writer.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/db_iter.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/skiplist.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/db_impl.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/table_cache.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/snapshot.h
+leveldb_libleveldb_a_SOURCES += leveldb/db/log_reader.h
+leveldb_libleveldb_a_SOURCES += leveldb/table/filter_block.h
+leveldb_libleveldb_a_SOURCES += leveldb/table/block_builder.h
+leveldb_libleveldb_a_SOURCES += leveldb/table/block.h
+leveldb_libleveldb_a_SOURCES += leveldb/table/two_level_iterator.h
+leveldb_libleveldb_a_SOURCES += leveldb/table/merger.h
+leveldb_libleveldb_a_SOURCES += leveldb/table/format.h
+leveldb_libleveldb_a_SOURCES += leveldb/table/iterator_wrapper.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/crc32c.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/arena.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/random.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/posix_logger.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/hash.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/histogram.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/coding.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/testutil.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/mutexlock.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/logging.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/testharness.h
+
leveldb_libleveldb_a_SOURCES += leveldb/db/builder.cc
leveldb_libleveldb_a_SOURCES += leveldb/db/c.cc
leveldb_libleveldb_a_SOURCES += leveldb/db/dbformat.cc
@@ -76,3 +131,4 @@ endif
leveldb_libmemenv_a_CPPFLAGS = $(leveldb_libleveldb_a_CPPFLAGS)
leveldb_libmemenv_a_CXXFLAGS = $(leveldb_libleveldb_a_CXXFLAGS)
leveldb_libmemenv_a_SOURCES = leveldb/helpers/memenv/memenv.cc
+leveldb_libmemenv_a_SOURCES += leveldb/helpers/memenv/memenv.h
diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include
index 3b39919441..9381cca9f2 100644
--- a/src/Makefile.qt.include
+++ b/src/Makefile.qt.include
@@ -390,19 +390,20 @@ QT_QM=$(QT_TS:.ts=.qm)
SECONDARY: $(QT_QM)
-qt/bitcoinstrings.cpp: $(libbitcoin_server_a_SOURCES) $(libbitcoin_wallet_a_SOURCES)
+$(srcdir)/qt/bitcoinstrings.cpp: $(libbitcoin_server_a_SOURCES) $(libbitcoin_wallet_a_SOURCES)
@test -n $(XGETTEXT) || echo "xgettext is required for updating translations"
$(AM_V_GEN) cd $(srcdir); XGETTEXT=$(XGETTEXT) PACKAGE_NAME="$(PACKAGE_NAME)" COPYRIGHT_HOLDERS="$(COPYRIGHT_HOLDERS)" COPYRIGHT_HOLDERS_SUBSTITUTION="$(COPYRIGHT_HOLDERS_SUBSTITUTION)" $(PYTHON) ../share/qt/extract_strings_qt.py $^
-translate: qt/bitcoinstrings.cpp $(QT_FORMS_UI) $(QT_FORMS_UI) $(BITCOIN_QT_CPP) $(BITCOIN_QT_H) $(BITCOIN_MM)
+translate: $(srcdir)/qt/bitcoinstrings.cpp $(QT_FORMS_UI) $(QT_FORMS_UI) $(BITCOIN_QT_CPP) $(BITCOIN_QT_H) $(BITCOIN_MM)
@test -n $(LUPDATE) || echo "lupdate is required for updating translations"
- $(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(LUPDATE) $^ -locations relative -no-obsolete -ts qt/locale/bitcoin_en.ts
+ $(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(LUPDATE) $^ -locations relative -no-obsolete -ts $(srcdir)/qt/locale/bitcoin_en.ts
$(QT_QRC_LOCALE_CPP): $(QT_QRC_LOCALE) $(QT_QM)
@test -f $(RCC)
- @test -f $(@D)/$(<F) || cp -f $< $(@D)
- $(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(RCC) -name bitcoin_locale $(@D)/$(<F) | \
+ @cp -f $< $(@D)/temp_$(<F)
+ $(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(RCC) -name bitcoin_locale $(@D)/temp_$(<F) | \
$(SED) -e '/^\*\*.*Created:/d' -e '/^\*\*.*by:/d' > $@
+ @rm $(@D)/temp_$(<F)
$(QT_QRC_CPP): $(QT_QRC) $(QT_FORMS_H) $(RES_ICONS) $(RES_IMAGES) $(RES_MOVIES) $(PROTOBUF_H)
@test -f $(RCC)
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 2d7791232d..41d811fb54 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -125,6 +125,9 @@ CLEAN_BITCOIN_TEST = test/*.gcda test/*.gcno $(GENERATED_TEST_FILES)
CLEANFILES += $(CLEAN_BITCOIN_TEST)
+# This file is problematic for out-of-tree builds if it exists.
+DISTCLEANFILES += test/buildenv.pyc
+
bitcoin_test: $(TEST_BINARY)
bitcoin_test_check: $(TEST_BINARY) FORCE
diff --git a/src/addrman.cpp b/src/addrman.cpp
index 00f6fe99e0..cebb1c8e5e 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -263,7 +263,7 @@ bool CAddrMan::Add_(const CAddress& addr, const CNetAddr& source, int64_t nTimeP
pinfo->nTime = std::max((int64_t)0, addr.nTime - nTimePenalty);
// add services
- pinfo->nServices |= addr.nServices;
+ pinfo->nServices = ServiceFlags(pinfo->nServices | addr.nServices);
// do not update if no new information is present
if (!addr.nTime || (pinfo->nTime && addr.nTime <= pinfo->nTime))
@@ -502,6 +502,24 @@ void CAddrMan::Connected_(const CService& addr, int64_t nTime)
info.nTime = nTime;
}
+void CAddrMan::SetServices_(const CService& addr, ServiceFlags nServices)
+{
+ CAddrInfo* pinfo = Find(addr);
+
+ // if not found, bail out
+ if (!pinfo)
+ return;
+
+ CAddrInfo& info = *pinfo;
+
+ // check whether we are talking about the exact same CService (including same port)
+ if (info != addr)
+ return;
+
+ // update info
+ info.nServices = nServices;
+}
+
int CAddrMan::RandomInt(int nMax){
return GetRandInt(nMax);
}
diff --git a/src/addrman.h b/src/addrman.h
index c5923e9417..1caf540758 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -256,6 +256,9 @@ protected:
//! Mark an entry as currently-connected-to.
void Connected_(const CService &addr, int64_t nTime);
+ //! Update an entry's service bits.
+ void SetServices_(const CService &addr, ServiceFlags nServices);
+
public:
/**
* serialized format:
@@ -589,6 +592,14 @@ public:
}
}
+ void SetServices(const CService &addr, ServiceFlags nServices)
+ {
+ LOCK(cs);
+ Check();
+ SetServices_(addr, nServices);
+ Check();
+ }
+
};
#endif // BITCOIN_ADDRMAN_H
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index 0005115671..8c27a578bb 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -16,14 +16,6 @@
#include "chainparamsseeds.h"
-std::string CDNSSeedData::getHost(uint64_t requiredServiceBits) const {
- //use default host for non-filter-capable seeds or if we use the default service bits (NODE_NETWORK)
- if (!supportsServiceBitsFiltering || requiredServiceBits == NODE_NETWORK)
- return host;
-
- return strprintf("x%x.%s", requiredServiceBits, host);
-}
-
static CBlock CreateGenesisBlock(const char* pszTimestamp, const CScript& genesisOutputScript, uint32_t nTime, uint32_t nNonce, uint32_t nBits, int32_t nVersion, const CAmount& genesisReward)
{
CMutableTransaction txNew;
diff --git a/src/chainparams.h b/src/chainparams.h
index 7168daaf43..638893e9ad 100644
--- a/src/chainparams.h
+++ b/src/chainparams.h
@@ -13,11 +13,9 @@
#include <vector>
-class CDNSSeedData {
-public:
+struct CDNSSeedData {
std::string name, host;
bool supportsServiceBitsFiltering;
- std::string getHost(uint64_t requiredServiceBits) const;
CDNSSeedData(const std::string &strName, const std::string &strHost, bool supportsServiceBitsFilteringIn = false) : name(strName), host(strHost), supportsServiceBitsFiltering(supportsServiceBitsFilteringIn) {}
};
diff --git a/src/clientversion.cpp b/src/clientversion.cpp
index aae0569bba..bfe9e16f80 100644
--- a/src/clientversion.cpp
+++ b/src/clientversion.cpp
@@ -67,16 +67,7 @@ const std::string CLIENT_NAME("Satoshi");
#endif
#endif
-#ifndef BUILD_DATE
-#ifdef GIT_COMMIT_DATE
-#define BUILD_DATE GIT_COMMIT_DATE
-#else
-#define BUILD_DATE __DATE__ ", " __TIME__
-#endif
-#endif
-
const std::string CLIENT_BUILD(BUILD_DESC CLIENT_VERSION_SUFFIX);
-const std::string CLIENT_DATE(BUILD_DATE);
static std::string FormatVersion(int nVersion)
{
diff --git a/src/clientversion.h b/src/clientversion.h
index 6f255d69c9..47263d5344 100644
--- a/src/clientversion.h
+++ b/src/clientversion.h
@@ -59,7 +59,6 @@ static const int CLIENT_VERSION =
extern const std::string CLIENT_NAME;
extern const std::string CLIENT_BUILD;
-extern const std::string CLIENT_DATE;
std::string FormatFullVersion();
diff --git a/src/init.cpp b/src/init.cpp
index 3a260d16db..b572bfc327 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -479,11 +479,20 @@ std::string HelpMessage(HelpMessageMode mode)
std::string LicenseInfo()
{
+ const std::string URL_SOURCE_CODE = "<https://github.com/bitcoin/bitcoin>";
+ const std::string URL_WEBSITE = "<https://bitcoincore.org>";
// todo: remove urls from translations on next change
return CopyrightHolders(strprintf(_("Copyright (C) %i-%i"), 2009, COPYRIGHT_YEAR) + " ") + "\n" +
"\n" +
- _("This is experimental software.") + "\n" +
+ strprintf(_("Please contribute if you find %s useful. "
+ "Visit %s for further information about the software."),
+ PACKAGE_NAME, URL_WEBSITE) +
+ "\n" +
+ strprintf(_("The source code is available from %s."),
+ URL_SOURCE_CODE) +
"\n" +
+ "\n" +
+ _("This is experimental software.") + "\n" +
_("Distributed under the MIT software license, see the accompanying file COPYING or <http://www.opensource.org/licenses/mit-license.php>.") + "\n" +
"\n" +
_("This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit <https://www.openssl.org/> and cryptographic software written by Eric Young and UPnP software written by Thomas Bernard.") +
@@ -748,7 +757,7 @@ void InitLogging()
fLogIPs = GetBoolArg("-logips", DEFAULT_LOGIPS);
LogPrintf("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
- LogPrintf("Bitcoin version %s (%s)\n", FormatFullVersion(), CLIENT_DATE);
+ LogPrintf("Bitcoin version %s\n", FormatFullVersion());
}
/** Initialize bitcoin.
@@ -950,7 +959,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
SetMockTime(GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op
if (GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS))
- nLocalServices |= NODE_BLOOM;
+ nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM);
nMaxTipAge = GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE);
@@ -1361,7 +1370,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
// after any wallet rescanning has taken place.
if (fPruneMode) {
LogPrintf("Unsetting NODE_NETWORK on prune mode\n");
- nLocalServices &= ~NODE_NETWORK;
+ nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK);
if (!fReindex) {
uiInterface.InitMessage(_("Pruning blockstore..."));
PruneAndFlush();
diff --git a/src/main.cpp b/src/main.cpp
index 6d006e8789..bdb3457f8e 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -3449,8 +3449,9 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
}
/** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */
-static bool AcceptBlock(const CBlock& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp)
+static bool AcceptBlock(const CBlock& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock)
{
+ if (fNewBlock) *fNewBlock = false;
AssertLockHeld(cs_main);
CBlockIndex *pindexDummy = NULL;
@@ -3479,6 +3480,7 @@ static bool AcceptBlock(const CBlock& block, CValidationState& state, const CCha
if (!fHasMoreWork) return true; // Don't process less-work chains
if (fTooFarAhead) return true; // Block height is too high
}
+ if (fNewBlock) *fNewBlock = true;
if ((!CheckBlock(block, state, chainparams.GetConsensus(), GetAdjustedTime())) || !ContextualCheckBlock(block, state, pindex->pprev)) {
if (state.IsInvalid() && !state.CorruptionPossible()) {
@@ -3526,7 +3528,7 @@ static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned
}
-bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, const CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp)
+bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp)
{
{
LOCK(cs_main);
@@ -3535,9 +3537,11 @@ bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, c
// Store to disk
CBlockIndex *pindex = NULL;
- bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fRequested, dbp);
+ bool fNewBlock = false;
+ bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fRequested, dbp, &fNewBlock);
if (pindex && pfrom) {
mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId();
+ if (fNewBlock) pfrom->nLastBlockTime = GetTime();
}
CheckBlockIndex(chainparams.GetConsensus());
if (!ret)
@@ -4107,7 +4111,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) {
LOCK(cs_main);
CValidationState state;
- if (AcceptBlock(block, state, chainparams, NULL, true, dbp))
+ if (AcceptBlock(block, state, chainparams, NULL, true, dbp, NULL))
nLoaded++;
if (state.IsError())
break;
@@ -4140,7 +4144,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
head.ToString());
LOCK(cs_main);
CValidationState dummy;
- if (AcceptBlock(block, dummy, chainparams, NULL, true, &it->second))
+ if (AcceptBlock(block, dummy, chainparams, NULL, true, &it->second, NULL))
{
nLoaded++;
queue.push_back(block.GetHash());
@@ -4611,7 +4615,22 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
CAddress addrMe;
CAddress addrFrom;
uint64_t nNonce = 1;
- vRecv >> pfrom->nVersion >> pfrom->nServices >> nTime >> addrMe;
+ uint64_t nServiceInt;
+ vRecv >> pfrom->nVersion >> nServiceInt >> nTime >> addrMe;
+ pfrom->nServices = ServiceFlags(nServiceInt);
+ if (!pfrom->fInbound)
+ {
+ addrman.SetServices(pfrom->addr, pfrom->nServices);
+ }
+ if (pfrom->nServicesExpected & ~pfrom->nServices)
+ {
+ LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, pfrom->nServices, pfrom->nServicesExpected);
+ pfrom->PushMessage(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
+ strprintf("Expected to offer services %08x", pfrom->nServicesExpected));
+ pfrom->fDisconnect = true;
+ return false;
+ }
+
if (pfrom->nVersion < MIN_PEER_PROTO_VERSION)
{
// disconnect from peers older than this proto version
@@ -4772,6 +4791,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
{
boost::this_thread::interruption_point();
+ if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES)
+ continue;
+
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
addr.nTime = nNow - 5 * 24 * 60 * 60;
pfrom->AddAddressKnown(addr);
@@ -5040,6 +5062,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
RelayTransaction(tx);
vWorkQueue.push_back(inv.hash);
+ pfrom->nLastTXTime = GetTime();
+
LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
pfrom->id,
tx.GetHash().ToString(),
@@ -5609,6 +5633,11 @@ bool ProcessMessages(CNode* pfrom)
// Allow exceptions from over-long size
LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
}
+ else if (strstr(e.what(), "non-canonical ReadCompactSize()"))
+ {
+ // Allow exceptions from non-canonical encoding
+ LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
+ }
else
{
PrintExceptionContinue(&e, "ProcessMessages()");
diff --git a/src/main.h b/src/main.h
index 9b99ae7c87..e2bfdfdf6e 100644
--- a/src/main.h
+++ b/src/main.h
@@ -215,7 +215,7 @@ void UnregisterNodeSignals(CNodeSignals& nodeSignals);
* @param[out] dbp The already known disk position of pblock, or NULL if not yet stored.
* @return True if state.IsValid()
*/
-bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, const CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp);
+bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp);
/** Check whether enough disk space is available for an incoming block */
bool CheckDiskSpace(uint64_t nAdditionalBytes = 0);
/** Open a block file (blk?????.dat) */
diff --git a/src/miner.cpp b/src/miner.cpp
index eaf29a767b..99eb0a2ebd 100644
--- a/src/miner.cpp
+++ b/src/miner.cpp
@@ -71,231 +71,304 @@ int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParam
return nNewTime - nOldTime;
}
-CBlockTemplate* CreateNewBlock(const CChainParams& chainparams, const CScript& scriptPubKeyIn)
+BlockAssembler::BlockAssembler(const CChainParams& _chainparams)
+ : chainparams(_chainparams)
{
- // Create new block
- std::unique_ptr<CBlockTemplate> pblocktemplate(new CBlockTemplate());
+ // Largest block you're willing to create:
+ nBlockMaxSize = GetArg("-blockmaxsize", DEFAULT_BLOCK_MAX_SIZE);
+ // Limit to between 1K and MAX_BLOCK_SIZE-1K for sanity:
+ nBlockMaxSize = std::max((unsigned int)1000, std::min((unsigned int)(MAX_BLOCK_SIZE-1000), nBlockMaxSize));
+
+ // Minimum block size you want to create; block will be filled with free transactions
+ // until there are no more or the block reaches this size:
+ nBlockMinSize = GetArg("-blockminsize", DEFAULT_BLOCK_MIN_SIZE);
+ nBlockMinSize = std::min(nBlockMaxSize, nBlockMinSize);
+}
+
+void BlockAssembler::resetBlock()
+{
+ inBlock.clear();
+
+ // Reserve space for coinbase tx
+ nBlockSize = 1000;
+ nBlockSigOps = 100;
+
+ // These counters do not include coinbase tx
+ nBlockTx = 0;
+ nFees = 0;
+
+ lastFewTxs = 0;
+ blockFinished = false;
+}
+
+CBlockTemplate* BlockAssembler::CreateNewBlock(const CScript& scriptPubKeyIn)
+{
+ resetBlock();
+
+ pblocktemplate.reset(new CBlockTemplate());
+
if(!pblocktemplate.get())
return NULL;
- CBlock *pblock = &pblocktemplate->block; // pointer for convenience
-
- // Create coinbase tx
- CMutableTransaction txNew;
- txNew.vin.resize(1);
- txNew.vin[0].prevout.SetNull();
- txNew.vout.resize(1);
- txNew.vout[0].scriptPubKey = scriptPubKeyIn;
+ pblock = &pblocktemplate->block; // pointer for convenience
// Add dummy coinbase tx as first transaction
pblock->vtx.push_back(CTransaction());
pblocktemplate->vTxFees.push_back(-1); // updated at end
pblocktemplate->vTxSigOps.push_back(-1); // updated at end
- // Largest block you're willing to create:
- unsigned int nBlockMaxSize = GetArg("-blockmaxsize", DEFAULT_BLOCK_MAX_SIZE);
- // Limit to between 1K and MAX_BLOCK_SIZE-1K for sanity:
- nBlockMaxSize = std::max((unsigned int)1000, std::min((unsigned int)(MAX_BLOCK_SIZE-1000), nBlockMaxSize));
+ LOCK2(cs_main, mempool.cs);
+ CBlockIndex* pindexPrev = chainActive.Tip();
+ nHeight = pindexPrev->nHeight + 1;
+
+ pblock->nVersion = ComputeBlockVersion(pindexPrev, chainparams.GetConsensus());
+ // -regtest only: allow overriding block.nVersion with
+ // -blockversion=N to test forking scenarios
+ if (chainparams.MineBlocksOnDemand())
+ pblock->nVersion = GetArg("-blockversion", pblock->nVersion);
+
+ pblock->nTime = GetAdjustedTime();
+ const int64_t nMedianTimePast = pindexPrev->GetMedianTimePast();
+
+ nLockTimeCutoff = (STANDARD_LOCKTIME_VERIFY_FLAGS & LOCKTIME_MEDIAN_TIME_PAST)
+ ? nMedianTimePast
+ : pblock->GetBlockTime();
+
+ addPriorityTxs();
+ addScoreTxs();
+
+ nLastBlockTx = nBlockTx;
+ nLastBlockSize = nBlockSize;
+ LogPrintf("CreateNewBlock(): total size %u txs: %u fees: %ld sigops %d\n", nBlockSize, nBlockTx, nFees, nBlockSigOps);
+
+ // Create coinbase transaction.
+ CMutableTransaction coinbaseTx;
+ coinbaseTx.vin.resize(1);
+ coinbaseTx.vin[0].prevout.SetNull();
+ coinbaseTx.vout.resize(1);
+ coinbaseTx.vout[0].scriptPubKey = scriptPubKeyIn;
+ coinbaseTx.vout[0].nValue = nFees + GetBlockSubsidy(nHeight, chainparams.GetConsensus());
+ coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0;
+ pblock->vtx[0] = coinbaseTx;
+ pblocktemplate->vTxFees[0] = -nFees;
+
+ // Fill in header
+ pblock->hashPrevBlock = pindexPrev->GetBlockHash();
+ UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev);
+ pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus());
+ pblock->nNonce = 0;
+ pblocktemplate->vTxSigOps[0] = GetLegacySigOpCount(pblock->vtx[0]);
+
+ CValidationState state;
+ if (!TestBlockValidity(state, chainparams, *pblock, pindexPrev, false, false)) {
+ throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, FormatStateMessage(state)));
+ }
- // How much of the block should be dedicated to high-priority transactions,
- // included regardless of the fees they pay
- unsigned int nBlockPrioritySize = GetArg("-blockprioritysize", DEFAULT_BLOCK_PRIORITY_SIZE);
- nBlockPrioritySize = std::min(nBlockMaxSize, nBlockPrioritySize);
+ return pblocktemplate.release();
+}
- // Minimum block size you want to create; block will be filled with free transactions
- // until there are no more or the block reaches this size:
- unsigned int nBlockMinSize = GetArg("-blockminsize", DEFAULT_BLOCK_MIN_SIZE);
- nBlockMinSize = std::min(nBlockMaxSize, nBlockMinSize);
+bool BlockAssembler::isStillDependent(CTxMemPool::txiter iter)
+{
+ BOOST_FOREACH(CTxMemPool::txiter parent, mempool.GetMemPoolParents(iter))
+ {
+ if (!inBlock.count(parent)) {
+ return true;
+ }
+ }
+ return false;
+}
- // Collect memory pool transactions into the block
- CTxMemPool::setEntries inBlock;
- CTxMemPool::setEntries waitSet;
- // This vector will be sorted into a priority queue:
- vector<TxCoinAgePriority> vecPriority;
- TxCoinAgePriorityCompare pricomparer;
- std::map<CTxMemPool::txiter, double, CTxMemPool::CompareIteratorByHash> waitPriMap;
- typedef std::map<CTxMemPool::txiter, double, CTxMemPool::CompareIteratorByHash>::iterator waitPriIter;
- double actualPriority = -1;
- std::priority_queue<CTxMemPool::txiter, std::vector<CTxMemPool::txiter>, ScoreCompare> clearedTxs;
+bool BlockAssembler::TestForBlock(CTxMemPool::txiter iter)
+{
+ if (nBlockSize + iter->GetTxSize() >= nBlockMaxSize) {
+ // If the block is so close to full that no more txs will fit
+ // or if we've tried more than 50 times to fill remaining space
+ // then flag that the block is finished
+ if (nBlockSize > nBlockMaxSize - 100 || lastFewTxs > 50) {
+ blockFinished = true;
+ return false;
+ }
+ // Once we're within 1000 bytes of a full block, only look at 50 more txs
+ // to try to fill the remaining space.
+ if (nBlockSize > nBlockMaxSize - 1000) {
+ lastFewTxs++;
+ }
+ return false;
+ }
+
+ if (nBlockSigOps + iter->GetSigOpCount() >= MAX_BLOCK_SIGOPS) {
+ // If the block has room for no more sig ops then
+ // flag that the block is finished
+ if (nBlockSigOps > MAX_BLOCK_SIGOPS - 2) {
+ blockFinished = true;
+ return false;
+ }
+ // Otherwise attempt to find another tx with fewer sigops
+ // to put in the block.
+ return false;
+ }
+
+ // Must check that lock times are still valid
+ // This can be removed once MTP is always enforced
+ // as long as reorgs keep the mempool consistent.
+ if (!IsFinalTx(iter->GetTx(), nHeight, nLockTimeCutoff))
+ return false;
+
+ return true;
+}
+
+void BlockAssembler::AddToBlock(CTxMemPool::txiter iter)
+{
+ pblock->vtx.push_back(iter->GetTx());
+ pblocktemplate->vTxFees.push_back(iter->GetFee());
+ pblocktemplate->vTxSigOps.push_back(iter->GetSigOpCount());
+ nBlockSize += iter->GetTxSize();
+ ++nBlockTx;
+ nBlockSigOps += iter->GetSigOpCount();
+ nFees += iter->GetFee();
+ inBlock.insert(iter);
+
bool fPrintPriority = GetBoolArg("-printpriority", DEFAULT_PRINTPRIORITY);
- uint64_t nBlockSize = 1000;
- uint64_t nBlockTx = 0;
- unsigned int nBlockSigOps = 100;
- int lastFewTxs = 0;
- CAmount nFees = 0;
+ if (fPrintPriority) {
+ double dPriority = iter->GetPriority(nHeight);
+ CAmount dummy;
+ mempool.ApplyDeltas(iter->GetTx().GetHash(), dPriority, dummy);
+ LogPrintf("priority %.1f fee %s txid %s\n",
+ dPriority,
+ CFeeRate(iter->GetModifiedFee(), iter->GetTxSize()).ToString(),
+ iter->GetTx().GetHash().ToString());
+ }
+}
+void BlockAssembler::addScoreTxs()
+{
+ std::priority_queue<CTxMemPool::txiter, std::vector<CTxMemPool::txiter>, ScoreCompare> clearedTxs;
+ CTxMemPool::setEntries waitSet;
+ CTxMemPool::indexed_transaction_set::index<mining_score>::type::iterator mi = mempool.mapTx.get<mining_score>().begin();
+ CTxMemPool::txiter iter;
+ while (!blockFinished && (mi != mempool.mapTx.get<mining_score>().end() || !clearedTxs.empty()))
{
- LOCK2(cs_main, mempool.cs);
- CBlockIndex* pindexPrev = chainActive.Tip();
- const int nHeight = pindexPrev->nHeight + 1;
- pblock->nTime = GetAdjustedTime();
- const int64_t nMedianTimePast = pindexPrev->GetMedianTimePast();
-
- pblock->nVersion = ComputeBlockVersion(pindexPrev, chainparams.GetConsensus());
- // -regtest only: allow overriding block.nVersion with
- // -blockversion=N to test forking scenarios
- if (chainparams.MineBlocksOnDemand())
- pblock->nVersion = GetArg("-blockversion", pblock->nVersion);
-
- int64_t nLockTimeCutoff = (STANDARD_LOCKTIME_VERIFY_FLAGS & LOCKTIME_MEDIAN_TIME_PAST)
- ? nMedianTimePast
- : pblock->GetBlockTime();
-
- bool fPriorityBlock = nBlockPrioritySize > 0;
- if (fPriorityBlock) {
- vecPriority.reserve(mempool.mapTx.size());
- for (CTxMemPool::indexed_transaction_set::iterator mi = mempool.mapTx.begin();
- mi != mempool.mapTx.end(); ++mi)
- {
- double dPriority = mi->GetPriority(nHeight);
- CAmount dummy;
- mempool.ApplyDeltas(mi->GetTx().GetHash(), dPriority, dummy);
- vecPriority.push_back(TxCoinAgePriority(dPriority, mi));
- }
- std::make_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
+ // If no txs that were previously postponed are available to try
+ // again, then try the next highest score tx
+ if (clearedTxs.empty()) {
+ iter = mempool.mapTx.project<0>(mi);
+ mi++;
+ }
+ // If a previously postponed tx is available to try again, then it
+ // has higher score than all untried so far txs
+ else {
+ iter = clearedTxs.top();
+ clearedTxs.pop();
}
- CTxMemPool::indexed_transaction_set::index<mining_score>::type::iterator mi = mempool.mapTx.get<mining_score>().begin();
- CTxMemPool::txiter iter;
-
- while (mi != mempool.mapTx.get<mining_score>().end() || !clearedTxs.empty())
- {
- bool priorityTx = false;
- if (fPriorityBlock && !vecPriority.empty()) { // add a tx from priority queue to fill the blockprioritysize
- priorityTx = true;
- iter = vecPriority.front().second;
- actualPriority = vecPriority.front().first;
- std::pop_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
- vecPriority.pop_back();
- }
- else if (clearedTxs.empty()) { // add tx with next highest score
- iter = mempool.mapTx.project<0>(mi);
- mi++;
- }
- else { // try to add a previously postponed child tx
- iter = clearedTxs.top();
- clearedTxs.pop();
- }
+ // If tx already in block, skip (added by addPriorityTxs)
+ if (inBlock.count(iter)) {
+ continue;
+ }
+
+ // If tx is dependent on other mempool txs which haven't yet been included
+ // then put it in the waitSet
+ if (isStillDependent(iter)) {
+ waitSet.insert(iter);
+ continue;
+ }
- if (inBlock.count(iter))
- continue; // could have been added to the priorityBlock
+ // If the fee rate is below the min fee rate for mining, then we're done
+ // adding txs based on score (fee rate)
+ if (iter->GetModifiedFee() < ::minRelayTxFee.GetFee(iter->GetTxSize()) && nBlockSize >= nBlockMinSize) {
+ return;
+ }
- const CTransaction& tx = iter->GetTx();
+ // If this tx fits in the block add it, otherwise keep looping
+ if (TestForBlock(iter)) {
+ AddToBlock(iter);
- bool fOrphan = false;
- BOOST_FOREACH(CTxMemPool::txiter parent, mempool.GetMemPoolParents(iter))
+ // This tx was successfully added, so
+ // add transactions that depend on this one to the priority queue to try again
+ BOOST_FOREACH(CTxMemPool::txiter child, mempool.GetMemPoolChildren(iter))
{
- if (!inBlock.count(parent)) {
- fOrphan = true;
- break;
+ if (waitSet.count(child)) {
+ clearedTxs.push(child);
+ waitSet.erase(child);
}
}
- if (fOrphan) {
- if (priorityTx)
- waitPriMap.insert(std::make_pair(iter,actualPriority));
- else
- waitSet.insert(iter);
- continue;
- }
+ }
+ }
+}
- unsigned int nTxSize = iter->GetTxSize();
- if (fPriorityBlock &&
- (nBlockSize + nTxSize >= nBlockPrioritySize || !AllowFree(actualPriority))) {
- fPriorityBlock = false;
- waitPriMap.clear();
- }
- if (!priorityTx &&
- (iter->GetModifiedFee() < ::minRelayTxFee.GetFee(nTxSize) && nBlockSize >= nBlockMinSize)) {
- break;
- }
- if (nBlockSize + nTxSize >= nBlockMaxSize) {
- if (nBlockSize > nBlockMaxSize - 100 || lastFewTxs > 50) {
- break;
- }
- // Once we're within 1000 bytes of a full block, only look at 50 more txs
- // to try to fill the remaining space.
- if (nBlockSize > nBlockMaxSize - 1000) {
- lastFewTxs++;
- }
- continue;
- }
+void BlockAssembler::addPriorityTxs()
+{
+ // How much of the block should be dedicated to high-priority transactions,
+ // included regardless of the fees they pay
+ unsigned int nBlockPrioritySize = GetArg("-blockprioritysize", DEFAULT_BLOCK_PRIORITY_SIZE);
+ nBlockPrioritySize = std::min(nBlockMaxSize, nBlockPrioritySize);
- if (!IsFinalTx(tx, nHeight, nLockTimeCutoff))
- continue;
+ if (nBlockPrioritySize == 0) {
+ return;
+ }
- unsigned int nTxSigOps = iter->GetSigOpCount();
- if (nBlockSigOps + nTxSigOps >= MAX_BLOCK_SIGOPS) {
- if (nBlockSigOps > MAX_BLOCK_SIGOPS - 2) {
- break;
- }
- continue;
- }
+ // This vector will be sorted into a priority queue:
+ vector<TxCoinAgePriority> vecPriority;
+ TxCoinAgePriorityCompare pricomparer;
+ std::map<CTxMemPool::txiter, double, CTxMemPool::CompareIteratorByHash> waitPriMap;
+ typedef std::map<CTxMemPool::txiter, double, CTxMemPool::CompareIteratorByHash>::iterator waitPriIter;
+ double actualPriority = -1;
- CAmount nTxFees = iter->GetFee();
- // Added
- pblock->vtx.push_back(tx);
- pblocktemplate->vTxFees.push_back(nTxFees);
- pblocktemplate->vTxSigOps.push_back(nTxSigOps);
- nBlockSize += nTxSize;
- ++nBlockTx;
- nBlockSigOps += nTxSigOps;
- nFees += nTxFees;
-
- if (fPrintPriority)
- {
- double dPriority = iter->GetPriority(nHeight);
- CAmount dummy;
- mempool.ApplyDeltas(tx.GetHash(), dPriority, dummy);
- LogPrintf("priority %.1f fee %s txid %s\n",
- dPriority , CFeeRate(iter->GetModifiedFee(), nTxSize).ToString(), tx.GetHash().ToString());
- }
+ vecPriority.reserve(mempool.mapTx.size());
+ for (CTxMemPool::indexed_transaction_set::iterator mi = mempool.mapTx.begin();
+ mi != mempool.mapTx.end(); ++mi)
+ {
+ double dPriority = mi->GetPriority(nHeight);
+ CAmount dummy;
+ mempool.ApplyDeltas(mi->GetTx().GetHash(), dPriority, dummy);
+ vecPriority.push_back(TxCoinAgePriority(dPriority, mi));
+ }
+ std::make_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
+
+ CTxMemPool::txiter iter;
+ while (!vecPriority.empty() && !blockFinished) { // add a tx from priority queue to fill the blockprioritysize
+ iter = vecPriority.front().second;
+ actualPriority = vecPriority.front().first;
+ std::pop_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
+ vecPriority.pop_back();
+
+ // If tx already in block, skip
+ if (inBlock.count(iter)) {
+ assert(false); // shouldn't happen for priority txs
+ continue;
+ }
+
+ // If tx is dependent on other mempool txs which haven't yet been included
+ // then put it in the waitSet
+ if (isStillDependent(iter)) {
+ waitPriMap.insert(std::make_pair(iter, actualPriority));
+ continue;
+ }
- inBlock.insert(iter);
+ // If this tx fits in the block add it, otherwise keep looping
+ if (TestForBlock(iter)) {
+ AddToBlock(iter);
- // Add transactions that depend on this one to the priority queue
+ // If now that this txs is added we've surpassed our desired priority size
+ // or have dropped below the AllowFreeThreshold, then we're done adding priority txs
+ if (nBlockSize >= nBlockPrioritySize || !AllowFree(actualPriority)) {
+ return;
+ }
+
+ // This tx was successfully added, so
+ // add transactions that depend on this one to the priority queue to try again
BOOST_FOREACH(CTxMemPool::txiter child, mempool.GetMemPoolChildren(iter))
{
- if (fPriorityBlock) {
- waitPriIter wpiter = waitPriMap.find(child);
- if (wpiter != waitPriMap.end()) {
- vecPriority.push_back(TxCoinAgePriority(wpiter->second,child));
- std::push_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
- waitPriMap.erase(wpiter);
- }
- }
- else {
- if (waitSet.count(child)) {
- clearedTxs.push(child);
- waitSet.erase(child);
- }
+ waitPriIter wpiter = waitPriMap.find(child);
+ if (wpiter != waitPriMap.end()) {
+ vecPriority.push_back(TxCoinAgePriority(wpiter->second,child));
+ std::push_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
+ waitPriMap.erase(wpiter);
}
}
}
- nLastBlockTx = nBlockTx;
- nLastBlockSize = nBlockSize;
- LogPrintf("CreateNewBlock(): total size %u txs: %u fees: %ld sigops %d\n", nBlockSize, nBlockTx, nFees, nBlockSigOps);
-
- // Compute final coinbase transaction.
- txNew.vout[0].nValue = nFees + GetBlockSubsidy(nHeight, chainparams.GetConsensus());
- txNew.vin[0].scriptSig = CScript() << nHeight << OP_0;
- pblock->vtx[0] = txNew;
- pblocktemplate->vTxFees[0] = -nFees;
-
- // Fill in header
- pblock->hashPrevBlock = pindexPrev->GetBlockHash();
- UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev);
- pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus());
- pblock->nNonce = 0;
- pblocktemplate->vTxSigOps[0] = GetLegacySigOpCount(pblock->vtx[0]);
-
- CValidationState state;
- if (!TestBlockValidity(state, chainparams, *pblock, pindexPrev, false, false)) {
- throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, FormatStateMessage(state)));
- }
}
-
- return pblocktemplate.release();
}
void IncrementExtraNonce(CBlock* pblock, const CBlockIndex* pindexPrev, unsigned int& nExtraNonce)
diff --git a/src/miner.h b/src/miner.h
index cd0f136625..74f19693c4 100644
--- a/src/miner.h
+++ b/src/miner.h
@@ -7,14 +7,17 @@
#define BITCOIN_MINER_H
#include "primitives/block.h"
+#include "txmempool.h"
#include <stdint.h>
+#include <memory>
class CBlockIndex;
class CChainParams;
class CReserveKey;
class CScript;
class CWallet;
+
namespace Consensus { struct Params; };
static const bool DEFAULT_PRINTPRIORITY = false;
@@ -27,7 +30,58 @@ struct CBlockTemplate
};
/** Generate a new block, without valid proof-of-work */
-CBlockTemplate* CreateNewBlock(const CChainParams& chainparams, const CScript& scriptPubKeyIn);
+class BlockAssembler
+{
+private:
+ // The constructed block template
+ std::unique_ptr<CBlockTemplate> pblocktemplate;
+ // A convenience pointer that always refers to the CBlock in pblocktemplate
+ CBlock* pblock;
+
+ // Configuration parameters for the block size
+ unsigned int nBlockMaxSize, nBlockMinSize;
+
+ // Information on the current status of the block
+ uint64_t nBlockSize;
+ uint64_t nBlockTx;
+ unsigned int nBlockSigOps;
+ CAmount nFees;
+ CTxMemPool::setEntries inBlock;
+
+ // Chain context for the block
+ int nHeight;
+ int64_t nLockTimeCutoff;
+ const CChainParams& chainparams;
+
+ // Variables used for addScoreTxs and addPriorityTxs
+ int lastFewTxs;
+ bool blockFinished;
+
+public:
+ BlockAssembler(const CChainParams& chainparams);
+ /** Construct a new block template with coinbase to scriptPubKeyIn */
+ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn);
+
+private:
+ // utility functions
+ /** Clear the block's state and prepare for assembling a new block */
+ void resetBlock();
+ /** Add a tx to the block */
+ void AddToBlock(CTxMemPool::txiter iter);
+
+ // Methods for how to add transactions to a block.
+ /** Add transactions based on modified feerate */
+ void addScoreTxs();
+ /** Add transactions based on tx "priority" */
+ void addPriorityTxs();
+
+ // helper function for addScoreTxs and addPriorityTxs
+ /** Test if tx will still "fit" in the block */
+ bool TestForBlock(CTxMemPool::txiter iter);
+ /** Test if tx still has unconfirmed parents not yet in block */
+ bool isStillDependent(CTxMemPool::txiter iter);
+};
+
/** Modify the extranonce in a block */
void IncrementExtraNonce(CBlock* pblock, const CBlockIndex* pindexPrev, unsigned int& nExtraNonce);
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev);
diff --git a/src/net.cpp b/src/net.cpp
index 173eba57c8..4eca3d75cc 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -71,12 +71,15 @@ namespace {
const static std::string NET_MESSAGE_COMMAND_OTHER = "*other*";
+/** Services this node implementation cares about */
+static const ServiceFlags nRelevantServices = NODE_NETWORK;
+
//
// Global state variables
//
bool fDiscover = true;
bool fListen = true;
-uint64_t nLocalServices = NODE_NETWORK;
+ServiceFlags nLocalServices = NODE_NETWORK;
bool fRelayTxes = true;
CCriticalSection cs_mapLocalHost;
std::map<CNetAddr, LocalServiceInfo> mapLocalHost;
@@ -159,7 +162,7 @@ static std::vector<CAddress> convertSeed6(const std::vector<SeedSpec6> &vSeedsIn
{
struct in6_addr ip;
memcpy(&ip, i->addr, sizeof(ip));
- CAddress addr(CService(ip, i->port));
+ CAddress addr(CService(ip, i->port), NODE_NETWORK);
addr.nTime = GetTime() - GetRand(nOneWeek) - nOneWeek;
vSeedsOut.push_back(addr);
}
@@ -172,13 +175,12 @@ static std::vector<CAddress> convertSeed6(const std::vector<SeedSpec6> &vSeedsIn
// one by discovery.
CAddress GetLocalAddress(const CNetAddr *paddrPeer)
{
- CAddress ret(CService("0.0.0.0",GetListenPort()),0);
+ CAddress ret(CService("0.0.0.0",GetListenPort()), NODE_NONE);
CService addr;
if (GetLocal(addr, paddrPeer))
{
- ret = CAddress(addr);
+ ret = CAddress(addr, nLocalServices);
}
- ret.nServices = nLocalServices;
ret.nTime = GetAdjustedTime();
return ret;
}
@@ -398,6 +400,26 @@ CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure
return NULL;
}
+ if (pszDest && addrConnect.IsValid()) {
+ // It is possible that we already have a connection to the IP/port pszDest resolved to.
+ // In that case, drop the connection that was just created, and return the existing CNode instead.
+ // Also store the name we used to connect in that CNode, so that future FindNode() calls to that
+ // name catch this early.
+ CNode* pnode = FindNode((CService)addrConnect);
+ if (pnode)
+ {
+ pnode->AddRef();
+ {
+ LOCK(cs_vNodes);
+ if (pnode->addrName.empty()) {
+ pnode->addrName = std::string(pszDest);
+ }
+ }
+ CloseSocket(hSocket);
+ return pnode;
+ }
+ }
+
addrman.Attempt(addrConnect, fCountFailure);
// Add node
@@ -409,6 +431,7 @@ CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure
vNodes.push_back(pnode);
}
+ pnode->nServicesExpected = ServiceFlags(addrConnect.nServices & nRelevantServices);
pnode->nTimeConnected = GetTime();
return pnode;
@@ -461,14 +484,14 @@ void CNode::PushVersion()
int nBestHeight = GetNodeSignals().GetHeight().get_value_or(0);
int64_t nTime = (fInbound ? GetAdjustedTime() : GetTime());
- CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService("0.0.0.0",0)));
+ CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService("0.0.0.0", 0), addr.nServices));
CAddress addrMe = GetLocalAddress(&addr);
GetRandBytes((unsigned char*)&nLocalHostNonce, sizeof(nLocalHostNonce));
if (fLogIPs)
LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nBestHeight, addrMe.ToString(), addrYou.ToString(), id);
else
LogPrint("net", "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nBestHeight, addrMe.ToString(), id);
- PushMessage(NetMsgType::VERSION, PROTOCOL_VERSION, nLocalServices, nTime, addrYou, addrMe,
+ PushMessage(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalServices, nTime, addrYou, addrMe,
nLocalHostNonce, strSubVersion, nBestHeight, ::fRelayTxes);
}
@@ -838,6 +861,11 @@ struct NodeEvictionCandidate
NodeId id;
int64_t nTimeConnected;
int64_t nMinPingUsecTime;
+ int64_t nLastBlockTime;
+ int64_t nLastTXTime;
+ bool fNetworkNode;
+ bool fRelayTxes;
+ bool fBloomFilter;
CAddress addr;
uint64_t nKeyedNetGroup;
};
@@ -854,7 +882,24 @@ static bool ReverseCompareNodeTimeConnected(const NodeEvictionCandidate &a, cons
static bool CompareNetGroupKeyed(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) {
return a.nKeyedNetGroup < b.nKeyedNetGroup;
-};
+}
+
+static bool CompareNodeBlockTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
+{
+ // There is a fall-through here because it is common for a node to have many peers which have not yet relayed a block.
+ if (a.nLastBlockTime != b.nLastBlockTime) return a.nLastBlockTime < b.nLastBlockTime;
+ if (a.fNetworkNode != b.fNetworkNode) return b.fNetworkNode;
+ return a.nTimeConnected > b.nTimeConnected;
+}
+
+static bool CompareNodeTXTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
+{
+ // There is a fall-through here because it is common for a node to have more than a few peers that have not yet relayed txn.
+ if (a.nLastTXTime != b.nLastTXTime) return a.nLastTXTime < b.nLastTXTime;
+ if (a.fRelayTxes != b.fRelayTxes) return b.fRelayTxes;
+ if (a.fBloomFilter != b.fBloomFilter) return a.fBloomFilter;
+ return a.nTimeConnected > b.nTimeConnected;
+}
/** Try to find a connection to evict when the node is full.
* Extreme care must be taken to avoid opening the node to attacker
@@ -864,7 +909,7 @@ static bool CompareNetGroupKeyed(const NodeEvictionCandidate &a, const NodeEvict
* to forge. In order to partition a node the attacker must be
* simultaneously better at all of them than honest peers.
*/
-static bool AttemptToEvictConnection(bool fPreferNewConnection) {
+static bool AttemptToEvictConnection() {
std::vector<NodeEvictionCandidate> vEvictionCandidates;
{
LOCK(cs_vNodes);
@@ -876,7 +921,9 @@ static bool AttemptToEvictConnection(bool fPreferNewConnection) {
continue;
if (node->fDisconnect)
continue;
- NodeEvictionCandidate candidate = {node->id, node->nTimeConnected, node->nMinPingUsecTime, node->addr, node->nKeyedNetGroup};
+ NodeEvictionCandidate candidate = {node->id, node->nTimeConnected, node->nMinPingUsecTime,
+ node->nLastBlockTime, node->nLastTXTime, node->fNetworkNode,
+ node->fRelayTxes, node->pfilter != NULL, node->addr, node->nKeyedNetGroup};
vEvictionCandidates.push_back(candidate);
}
}
@@ -899,6 +946,20 @@ static bool AttemptToEvictConnection(bool fPreferNewConnection) {
if (vEvictionCandidates.empty()) return false;
+ // Protect 4 nodes that most recently sent us transactions.
+ // An attacker cannot manipulate this metric without performing useful work.
+ std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), CompareNodeTXTime);
+ vEvictionCandidates.erase(vEvictionCandidates.end() - std::min(4, static_cast<int>(vEvictionCandidates.size())), vEvictionCandidates.end());
+
+ if (vEvictionCandidates.empty()) return false;
+
+ // Protect 4 nodes that most recently sent us blocks.
+ // An attacker cannot manipulate this metric without performing useful work.
+ std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), CompareNodeBlockTime);
+ vEvictionCandidates.erase(vEvictionCandidates.end() - std::min(4, static_cast<int>(vEvictionCandidates.size())), vEvictionCandidates.end());
+
+ if (vEvictionCandidates.empty()) return false;
+
// Protect the half of the remaining nodes which have been connected the longest.
// This replicates the non-eviction implicit behavior, and precludes attacks that start later.
std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), ReverseCompareNodeTimeConnected);
@@ -927,13 +988,6 @@ static bool AttemptToEvictConnection(bool fPreferNewConnection) {
// Reduce to the network group with the most connections
vEvictionCandidates = std::move(mapAddrCounts[naMostConnections]);
- // Do not disconnect peers if there is only one unprotected connection from their network group.
- // This step excessively favors netgroup diversity, and should be removed once more protective criteria are established.
- if (vEvictionCandidates.size() <= 1)
- // unless we prefer the new connection (for whitelisted peers)
- if (!fPreferNewConnection)
- return false;
-
// Disconnect from the network group with the most connections
NodeId evicted = vEvictionCandidates.front().id;
LOCK(cs_vNodes);
@@ -999,7 +1053,7 @@ static void AcceptConnection(const ListenSocket& hListenSocket) {
if (nInbound >= nMaxInbound)
{
- if (!AttemptToEvictConnection(whitelisted)) {
+ if (!AttemptToEvictConnection()) {
// No connection to evict, disconnect the new connection
LogPrint("net", "failed to find an eviction candidate - connection dropped (full)\n");
CloseSocket(hSocket);
@@ -1412,6 +1466,18 @@ void MapPort(bool)
+static std::string GetDNSHost(const CDNSSeedData& data, ServiceFlags* requiredServiceBits)
+{
+ //use default host for non-filter-capable seeds or if we use the default service bits (NODE_NETWORK)
+ if (!data.supportsServiceBitsFiltering || *requiredServiceBits == NODE_NETWORK) {
+ *requiredServiceBits = NODE_NETWORK;
+ return data.host;
+ }
+
+ return strprintf("x%x.%s", *requiredServiceBits, data.host);
+}
+
+
void ThreadDNSAddressSeed()
{
// goal: only query DNS seeds if address need is acute
@@ -1437,8 +1503,8 @@ void ThreadDNSAddressSeed()
} else {
std::vector<CNetAddr> vIPs;
std::vector<CAddress> vAdd;
- uint64_t requiredServiceBits = NODE_NETWORK;
- if (LookupHost(seed.getHost(requiredServiceBits).c_str(), vIPs, 0, true))
+ ServiceFlags requiredServiceBits = nRelevantServices;
+ if (LookupHost(GetDNSHost(seed, &requiredServiceBits).c_str(), vIPs, 0, true))
{
BOOST_FOREACH(const CNetAddr& ip, vIPs)
{
@@ -1520,7 +1586,7 @@ void ThreadOpenConnections()
ProcessOneShot();
BOOST_FOREACH(const std::string& strAddr, mapMultiArgs["-connect"])
{
- CAddress addr;
+ CAddress addr(CService(), NODE_NONE);
OpenNetworkConnection(addr, false, NULL, strAddr.c_str());
for (int i = 0; i < 10 && i < nLoop; i++)
{
@@ -1592,6 +1658,10 @@ void ThreadOpenConnections()
if (IsLimited(addr))
continue;
+ // only connect to full nodes
+ if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES)
+ continue;
+
// only consider very recently tried nodes after 30 failed attempts
if (nANow - addr.nLastTry < 600 && nTries < 30)
continue;
@@ -1609,66 +1679,79 @@ void ThreadOpenConnections()
}
}
-void ThreadOpenAddedConnections()
+std::vector<AddedNodeInfo> GetAddedNodeInfo()
{
+ std::vector<AddedNodeInfo> ret;
+
+ std::list<std::string> lAddresses(0);
{
LOCK(cs_vAddedNodes);
- vAddedNodes = mapMultiArgs["-addnode"];
+ ret.reserve(vAddedNodes.size());
+ BOOST_FOREACH(const std::string& strAddNode, vAddedNodes)
+ lAddresses.push_back(strAddNode);
}
- if (HaveNameProxy()) {
- while(true) {
- std::list<std::string> lAddresses(0);
- {
- LOCK(cs_vAddedNodes);
- BOOST_FOREACH(const std::string& strAddNode, vAddedNodes)
- lAddresses.push_back(strAddNode);
+
+ // Build a map of all already connected addresses (by IP:port and by name) to inbound/outbound and resolved CService
+ std::map<CService, bool> mapConnected;
+ std::map<std::string, std::pair<bool, CService>> mapConnectedByName;
+ {
+ LOCK(cs_vNodes);
+ for (const CNode* pnode : vNodes) {
+ if (pnode->addr.IsValid()) {
+ mapConnected[pnode->addr] = pnode->fInbound;
}
- BOOST_FOREACH(const std::string& strAddNode, lAddresses) {
- CAddress addr;
- CSemaphoreGrant grant(*semOutbound);
- OpenNetworkConnection(addr, false, &grant, strAddNode.c_str());
- MilliSleep(500);
+ if (!pnode->addrName.empty()) {
+ mapConnectedByName[pnode->addrName] = std::make_pair(pnode->fInbound, static_cast<const CService&>(pnode->addr));
+ }
+ }
+ }
+
+ BOOST_FOREACH(const std::string& strAddNode, lAddresses) {
+ CService service(strAddNode, Params().GetDefaultPort());
+ if (service.IsValid()) {
+ // strAddNode is an IP:port
+ auto it = mapConnected.find(service);
+ if (it != mapConnected.end()) {
+ ret.push_back(AddedNodeInfo{strAddNode, service, true, it->second});
+ } else {
+ ret.push_back(AddedNodeInfo{strAddNode, CService(), false, false});
+ }
+ } else {
+ // strAddNode is a name
+ auto it = mapConnectedByName.find(strAddNode);
+ if (it != mapConnectedByName.end()) {
+ ret.push_back(AddedNodeInfo{strAddNode, it->second.second, true, it->second.first});
+ } else {
+ ret.push_back(AddedNodeInfo{strAddNode, CService(), false, false});
}
- MilliSleep(120000); // Retry every 2 minutes
}
}
+ return ret;
+}
+
+void ThreadOpenAddedConnections()
+{
+ {
+ LOCK(cs_vAddedNodes);
+ vAddedNodes = mapMultiArgs["-addnode"];
+ }
+
for (unsigned int i = 0; true; i++)
{
- std::list<std::string> lAddresses(0);
- {
- LOCK(cs_vAddedNodes);
- BOOST_FOREACH(const std::string& strAddNode, vAddedNodes)
- lAddresses.push_back(strAddNode);
+ std::vector<AddedNodeInfo> vInfo = GetAddedNodeInfo();
+ for (const AddedNodeInfo& info : vInfo) {
+ if (!info.fConnected) {
+ CSemaphoreGrant grant(*semOutbound);
+ // If strAddedNode is an IP/port, decode it immediately, so
+ // OpenNetworkConnection can detect existing connections to that IP/port.
+ CService service(info.strAddedNode, Params().GetDefaultPort());
+ OpenNetworkConnection(CAddress(service, NODE_NONE), false, &grant, info.strAddedNode.c_str(), false);
+ MilliSleep(500);
+ }
}
- std::list<std::vector<CService> > lservAddressesToAdd(0);
- BOOST_FOREACH(const std::string& strAddNode, lAddresses) {
- std::vector<CService> vservNode(0);
- if(Lookup(strAddNode.c_str(), vservNode, Params().GetDefaultPort(), fNameLookup, 0))
- lservAddressesToAdd.push_back(vservNode);
- }
- // Attempt to connect to each IP for each addnode entry until at least one is successful per addnode entry
- // (keeping in mind that addnode entries can have many IPs if fNameLookup)
- {
- LOCK(cs_vNodes);
- BOOST_FOREACH(CNode* pnode, vNodes)
- for (std::list<std::vector<CService> >::iterator it = lservAddressesToAdd.begin(); it != lservAddressesToAdd.end(); it++)
- BOOST_FOREACH(const CService& addrNode, *(it))
- if (pnode->addr == addrNode)
- {
- it = lservAddressesToAdd.erase(it);
- it--;
- break;
- }
- }
- BOOST_FOREACH(std::vector<CService>& vserv, lservAddressesToAdd)
- {
- CSemaphoreGrant grant(*semOutbound);
- OpenNetworkConnection(CAddress(vserv[i % vserv.size()]), false, &grant);
- MilliSleep(500);
- }
MilliSleep(120000); // Retry every 2 minutes
}
}
@@ -2324,7 +2407,8 @@ CNode::CNode(SOCKET hSocketIn, const CAddress& addrIn, const std::string& addrNa
addrKnown(5000, 0.001),
filterInventoryKnown(50000, 0.000001)
{
- nServices = 0;
+ nServices = NODE_NONE;
+ nServicesExpected = NODE_NONE;
hSocket = hSocketIn;
nRecvVersion = INIT_PROTO_VERSION;
nLastSend = 0;
@@ -2358,6 +2442,8 @@ CNode::CNode(SOCKET hSocketIn, const CAddress& addrIn, const std::string& addrNa
fSentAddr = false;
pfilter = new CBloomFilter();
timeLastMempoolReq = 0;
+ nLastBlockTime = 0;
+ nLastTXTime = 0;
nPingNonceSent = 0;
nPingUsecStart = 0;
nPingUsecTime = 0;
diff --git a/src/net.h b/src/net.h
index 5c1f7e3e89..67b95fe0e4 100644
--- a/src/net.h
+++ b/src/net.h
@@ -72,6 +72,8 @@ static const bool DEFAULT_FORCEDNSSEED = false;
static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000;
static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000;
+static const ServiceFlags REQUIRED_SERVICES = NODE_NETWORK;
+
// NOTE: When adjusting this, update rpcnet:setban's help ("24h")
static const unsigned int DEFAULT_MISBEHAVING_BANTIME = 60 * 60 * 24; // Default 24-hour ban
@@ -152,7 +154,7 @@ CAddress GetLocalAddress(const CNetAddr *paddrPeer = NULL);
extern bool fDiscover;
extern bool fListen;
-extern uint64_t nLocalServices;
+extern ServiceFlags nLocalServices;
extern bool fRelayTxes;
extern uint64_t nLocalHostNonce;
extern CAddrMan addrman;
@@ -186,7 +188,7 @@ class CNodeStats
{
public:
NodeId nodeid;
- uint64_t nServices;
+ ServiceFlags nServices;
bool fRelayTxes;
int64_t nLastSend;
int64_t nLastRecv;
@@ -316,7 +318,8 @@ class CNode
{
public:
// socket
- uint64_t nServices;
+ ServiceFlags nServices;
+ ServiceFlags nServicesExpected;
SOCKET hSocket;
CDataStream ssSend;
size_t nSendSize; // total size of all vSendMsg entries
@@ -416,6 +419,11 @@ public:
// Last time a "MEMPOOL" request was serviced.
std::atomic<int64_t> timeLastMempoolReq;
+
+ // Block and TXN accept times
+ std::atomic<int64_t> nLastBlockTime;
+ std::atomic<int64_t> nLastTXTime;
+
// Ping time measurement:
// The pong reply we're expecting, or 0 if no pong expected.
uint64_t nPingNonceSent;
@@ -815,4 +823,14 @@ public:
/** Return a timestamp in the future (in microseconds) for exponentially distributed events. */
int64_t PoissonNextSend(int64_t nNow, int average_interval_seconds);
+struct AddedNodeInfo
+{
+ std::string strAddedNode;
+ CService resolvedAddress;
+ bool fConnected;
+ bool fInbound;
+};
+
+std::vector<AddedNodeInfo> GetAddedNodeInfo();
+
#endif // BITCOIN_NET_H
diff --git a/src/netbase.cpp b/src/netbase.cpp
index 572ae70871..e2a516986c 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -621,10 +621,10 @@ bool ConnectSocketByName(CService &addr, SOCKET& hSocketRet, const char *pszDest
proxyType nameProxy;
GetNameProxy(nameProxy);
- CService addrResolved;
- if (Lookup(strDest.c_str(), addrResolved, port, fNameLookup && !HaveNameProxy())) {
- if (addrResolved.IsValid()) {
- addr = addrResolved;
+ std::vector<CService> addrResolved;
+ if (Lookup(strDest.c_str(), addrResolved, port, fNameLookup && !HaveNameProxy(), 256)) {
+ if (addrResolved.size() > 0) {
+ addr = addrResolved[GetRand(addrResolved.size())];
return ConnectSocket(addr, hSocketRet, nTimeout);
}
}
diff --git a/src/protocol.cpp b/src/protocol.cpp
index 8c4bd05725..422ef6f636 100644
--- a/src/protocol.cpp
+++ b/src/protocol.cpp
@@ -133,7 +133,7 @@ CAddress::CAddress() : CService()
Init();
}
-CAddress::CAddress(CService ipIn, uint64_t nServicesIn) : CService(ipIn)
+CAddress::CAddress(CService ipIn, ServiceFlags nServicesIn) : CService(ipIn)
{
Init();
nServices = nServicesIn;
@@ -141,7 +141,7 @@ CAddress::CAddress(CService ipIn, uint64_t nServicesIn) : CService(ipIn)
void CAddress::Init()
{
- nServices = NODE_NETWORK;
+ nServices = NODE_NONE;
nTime = 100000000;
}
diff --git a/src/protocol.h b/src/protocol.h
index 1b049e52af..ab0a581783 100644
--- a/src/protocol.h
+++ b/src/protocol.h
@@ -223,7 +223,9 @@ extern const char *FEEFILTER;
const std::vector<std::string> &getAllNetMessageTypes();
/** nServices flags */
-enum {
+enum ServiceFlags : uint64_t {
+ // Nothing
+ NODE_NONE = 0,
// NODE_NETWORK means that the node is capable of serving the block chain. It is currently
// set by all Bitcoin Core nodes, and is unset by SPV clients or other peers that just want
// network services but don't provide them.
@@ -251,7 +253,7 @@ class CAddress : public CService
{
public:
CAddress();
- explicit CAddress(CService ipIn, uint64_t nServicesIn = NODE_NETWORK);
+ explicit CAddress(CService ipIn, ServiceFlags nServicesIn);
void Init();
@@ -267,13 +269,15 @@ public:
if ((nType & SER_DISK) ||
(nVersion >= CADDR_TIME_VERSION && !(nType & SER_GETHASH)))
READWRITE(nTime);
- READWRITE(nServices);
+ uint64_t nServicesInt = nServices;
+ READWRITE(nServicesInt);
+ nServices = (ServiceFlags)nServicesInt;
READWRITE(*(CService*)this);
}
// TODO: make private (improves encapsulation)
public:
- uint64_t nServices;
+ ServiceFlags nServices;
// disk and network only
unsigned int nTime;
diff --git a/src/qt/bitcoinstrings.cpp b/src/qt/bitcoinstrings.cpp
index 23be8e016b..9e53f19591 100644
--- a/src/qt/bitcoinstrings.cpp
+++ b/src/qt/bitcoinstrings.cpp
@@ -140,8 +140,6 @@ QT_TRANSLATE_NOOP("bitcoin-core", ""
QT_TRANSLATE_NOOP("bitcoin-core", ""
"Support filtering of blocks and transaction with bloom filters (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
-"Tell other nodes to filter invs to us by our mempool min fee (default: %u)"),
-QT_TRANSLATE_NOOP("bitcoin-core", ""
"The block database contains a block which appears to be from the future. "
"This may be due to your computer's date and time being set incorrectly. Only "
"rebuild the block database if you are sure that your computer's date and "
diff --git a/src/qt/forms/receiverequestdialog.ui b/src/qt/forms/receiverequestdialog.ui
index 1e484dd9a0..4163f4189c 100644
--- a/src/qt/forms/receiverequestdialog.ui
+++ b/src/qt/forms/receiverequestdialog.ui
@@ -22,7 +22,7 @@
<property name="minimumSize">
<size>
<width>300</width>
- <height>300</height>
+ <height>320</height>
</size>
</property>
<property name="toolTip">
diff --git a/src/qt/guiconstants.h b/src/qt/guiconstants.h
index 4b2c10dd48..bab9923d20 100644
--- a/src/qt/guiconstants.h
+++ b/src/qt/guiconstants.h
@@ -43,7 +43,7 @@ static const int TOOLTIP_WRAP_THRESHOLD = 80;
static const int MAX_URI_LENGTH = 255;
/* QRCodeDialog -- size of exported QR Code image */
-#define EXPORT_IMAGE_SIZE 256
+#define QR_IMAGE_SIZE 300
/* Number of frames in spinner animation */
#define SPINNER_FRAMES 36
diff --git a/src/qt/locale/bitcoin_en.ts b/src/qt/locale/bitcoin_en.ts
index 5549ccd4f8..b90221f2c2 100644
--- a/src/qt/locale/bitcoin_en.ts
+++ b/src/qt/locale/bitcoin_en.ts
@@ -3785,7 +3785,7 @@
<context>
<name>bitcoin-core</name>
<message>
- <location filename="../bitcoinstrings.cpp" line="+288"/>
+ <location filename="../bitcoinstrings.cpp" line="+286"/>
<source>Options:</source>
<translation>Options:</translation>
</message>
@@ -3810,7 +3810,7 @@
<translation>Accept command line and JSON-RPC commands</translation>
</message>
<message>
- <location line="-127"/>
+ <location line="-125"/>
<source>If &lt;category&gt; is not supplied or if &lt;category&gt; = 1, output all debugging information.</source>
<translation type="unfinished"></translation>
</message>
@@ -3835,7 +3835,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+120"/>
+ <location line="+118"/>
<source>Error: A fatal internal error occurred, see debug.log for details</source>
<translation type="unfinished"></translation>
</message>
@@ -3865,7 +3865,7 @@
<translation>Accept connections from outside (default: 1 if no -proxy or -connect)</translation>
</message>
<message>
- <location line="-203"/>
+ <location line="-201"/>
<source>Bitcoin Core</source>
<translation type="unfinished">Bitcoin Core</translation>
</message>
@@ -3946,11 +3946,6 @@
</message>
<message>
<location line="+5"/>
- <source>Tell other nodes to filter invs to us by our mempool min fee (default: %u)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+2"/>
<source>The block database contains a block which appears to be from the future. This may be due to your computer&apos;s date and time being set incorrectly. Only rebuild the block database if you are sure that your computer&apos;s date and time are correct</source>
<translation type="unfinished"></translation>
</message>
@@ -4305,7 +4300,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-315"/>
+ <location line="-313"/>
<source>Allow JSON-RPC connections from specified source. Valid for &lt;ip&gt; are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times</source>
<translation type="unfinished"></translation>
</message>
@@ -4375,7 +4370,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+14"/>
+ <location line="+12"/>
<source>The transaction amount is too small to send after the fee has been deducted</source>
<translation type="unfinished"></translation>
</message>
@@ -4580,12 +4575,12 @@
<translation>Password for JSON-RPC connections</translation>
</message>
<message>
- <location line="-216"/>
+ <location line="-214"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Execute command when the best block changes (%s in cmd is replaced by block hash)</translation>
</message>
<message>
- <location line="+145"/>
+ <location line="+143"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Allow DNS lookups for -addnode, -seednode and -connect</translation>
</message>
@@ -4595,7 +4590,7 @@
<translation>Loading addresses...</translation>
</message>
<message>
- <location line="-260"/>
+ <location line="-258"/>
<source>(1 = keep tx meta data e.g. account owner and payment request information, 2 = drop tx meta data)</source>
<translation type="unfinished"></translation>
</message>
@@ -4645,7 +4640,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+18"/>
+ <location line="+16"/>
<source>Total length of network version string (%i) exceeds maximum length (%i). Reduce the number or size of uacomments.</source>
<translation type="unfinished"></translation>
</message>
diff --git a/src/qt/receiverequestdialog.cpp b/src/qt/receiverequestdialog.cpp
index a1e9156eea..b13ea3df70 100644
--- a/src/qt/receiverequestdialog.cpp
+++ b/src/qt/receiverequestdialog.cpp
@@ -45,7 +45,7 @@ QImage QRImageWidget::exportImage()
{
if(!pixmap())
return QImage();
- return pixmap()->toImage().scaled(EXPORT_IMAGE_SIZE, EXPORT_IMAGE_SIZE);
+ return pixmap()->toImage();
}
void QRImageWidget::mousePressEvent(QMouseEvent *event)
@@ -166,20 +166,32 @@ void ReceiveRequestDialog::update()
ui->lblQRCode->setText(tr("Error encoding URI into QR Code."));
return;
}
- QImage myImage = QImage(code->width + 8, code->width + 8, QImage::Format_RGB32);
- myImage.fill(0xffffff);
+ QImage qrImage = QImage(code->width + 8, code->width + 8, QImage::Format_RGB32);
+ qrImage.fill(0xffffff);
unsigned char *p = code->data;
for (int y = 0; y < code->width; y++)
{
for (int x = 0; x < code->width; x++)
{
- myImage.setPixel(x + 4, y + 4, ((*p & 1) ? 0x0 : 0xffffff));
+ qrImage.setPixel(x + 4, y + 4, ((*p & 1) ? 0x0 : 0xffffff));
p++;
}
}
QRcode_free(code);
- ui->lblQRCode->setPixmap(QPixmap::fromImage(myImage).scaled(300, 300));
+ QImage qrAddrImage = QImage(QR_IMAGE_SIZE, QR_IMAGE_SIZE+20, QImage::Format_RGB32);
+ qrAddrImage.fill(0xffffff);
+ QPainter painter(&qrAddrImage);
+ painter.drawImage(0, 0, qrImage.scaled(QR_IMAGE_SIZE, QR_IMAGE_SIZE));
+ QFont font = GUIUtil::fixedPitchFont();
+ font.setPixelSize(12);
+ painter.setFont(font);
+ QRect paddedRect = qrAddrImage.rect();
+ paddedRect.setHeight(QR_IMAGE_SIZE+12);
+ painter.drawText(paddedRect, Qt::AlignBottom|Qt::AlignCenter, info.address);
+ painter.end();
+
+ ui->lblQRCode->setPixmap(QPixmap::fromImage(qrAddrImage));
ui->btnSaveAs->setEnabled(true);
}
}
diff --git a/src/qt/receiverequestdialog.h b/src/qt/receiverequestdialog.h
index 4cab4caff1..676745a858 100644
--- a/src/qt/receiverequestdialog.h
+++ b/src/qt/receiverequestdialog.h
@@ -10,6 +10,7 @@
#include <QDialog>
#include <QImage>
#include <QLabel>
+#include <QPainter>
class OptionsModel;
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index cf3c73c4df..1bb365d36c 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -183,6 +183,60 @@ UniValue getdifficulty(const UniValue& params, bool fHelp)
return GetDifficulty();
}
+std::string EntryDescriptionString()
+{
+ return " \"size\" : n, (numeric) transaction size in bytes\n"
+ " \"fee\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + "\n"
+ " \"modifiedfee\" : n, (numeric) transaction fee with fee deltas used for mining priority\n"
+ " \"time\" : n, (numeric) local time transaction entered pool in seconds since 1 Jan 1970 GMT\n"
+ " \"height\" : n, (numeric) block height when transaction entered pool\n"
+ " \"startingpriority\" : n, (numeric) priority when transaction entered pool\n"
+ " \"currentpriority\" : n, (numeric) transaction priority now\n"
+ " \"descendantcount\" : n, (numeric) number of in-mempool descendant transactions (including this one)\n"
+ " \"descendantsize\" : n, (numeric) size of in-mempool descendants (including this one)\n"
+ " \"descendantfees\" : n, (numeric) modified fees (see above) of in-mempool descendants (including this one)\n"
+ " \"ancestorcount\" : n, (numeric) number of in-mempool ancestor transactions (including this one)\n"
+ " \"ancestorsize\" : n, (numeric) size of in-mempool ancestors (including this one)\n"
+ " \"ancestorfees\" : n, (numeric) modified fees (see above) of in-mempool ancestors (including this one)\n"
+ " \"depends\" : [ (array) unconfirmed transactions used as inputs for this transaction\n"
+ " \"transactionid\", (string) parent transaction id\n"
+ " ... ]\n";
+}
+
+void entryToJSON(UniValue &info, const CTxMemPoolEntry &e)
+{
+ AssertLockHeld(mempool.cs);
+
+ info.push_back(Pair("size", (int)e.GetTxSize()));
+ info.push_back(Pair("fee", ValueFromAmount(e.GetFee())));
+ info.push_back(Pair("modifiedfee", ValueFromAmount(e.GetModifiedFee())));
+ info.push_back(Pair("time", e.GetTime()));
+ info.push_back(Pair("height", (int)e.GetHeight()));
+ info.push_back(Pair("startingpriority", e.GetPriority(e.GetHeight())));
+ info.push_back(Pair("currentpriority", e.GetPriority(chainActive.Height())));
+ info.push_back(Pair("descendantcount", e.GetCountWithDescendants()));
+ info.push_back(Pair("descendantsize", e.GetSizeWithDescendants()));
+ info.push_back(Pair("descendantfees", e.GetModFeesWithDescendants()));
+ info.push_back(Pair("ancestorcount", e.GetCountWithAncestors()));
+ info.push_back(Pair("ancestorsize", e.GetSizeWithAncestors()));
+ info.push_back(Pair("ancestorfees", e.GetModFeesWithAncestors()));
+ const CTransaction& tx = e.GetTx();
+ set<string> setDepends;
+ BOOST_FOREACH(const CTxIn& txin, tx.vin)
+ {
+ if (mempool.exists(txin.prevout.hash))
+ setDepends.insert(txin.prevout.hash.ToString());
+ }
+
+ UniValue depends(UniValue::VARR);
+ BOOST_FOREACH(const string& dep, setDepends)
+ {
+ depends.push_back(dep);
+ }
+
+ info.push_back(Pair("depends", depends));
+}
+
UniValue mempoolToJSON(bool fVerbose = false)
{
if (fVerbose)
@@ -193,31 +247,7 @@ UniValue mempoolToJSON(bool fVerbose = false)
{
const uint256& hash = e.GetTx().GetHash();
UniValue info(UniValue::VOBJ);
- info.push_back(Pair("size", (int)e.GetTxSize()));
- info.push_back(Pair("fee", ValueFromAmount(e.GetFee())));
- info.push_back(Pair("modifiedfee", ValueFromAmount(e.GetModifiedFee())));
- info.push_back(Pair("time", e.GetTime()));
- info.push_back(Pair("height", (int)e.GetHeight()));
- info.push_back(Pair("startingpriority", e.GetPriority(e.GetHeight())));
- info.push_back(Pair("currentpriority", e.GetPriority(chainActive.Height())));
- info.push_back(Pair("descendantcount", e.GetCountWithDescendants()));
- info.push_back(Pair("descendantsize", e.GetSizeWithDescendants()));
- info.push_back(Pair("descendantfees", e.GetModFeesWithDescendants()));
- const CTransaction& tx = e.GetTx();
- set<string> setDepends;
- BOOST_FOREACH(const CTxIn& txin, tx.vin)
- {
- if (mempool.exists(txin.prevout.hash))
- setDepends.insert(txin.prevout.hash.ToString());
- }
-
- UniValue depends(UniValue::VARR);
- BOOST_FOREACH(const string& dep, setDepends)
- {
- depends.push_back(dep);
- }
-
- info.push_back(Pair("depends", depends));
+ entryToJSON(info, e);
o.push_back(Pair(hash.ToString(), info));
}
return o;
@@ -251,20 +281,8 @@ UniValue getrawmempool(const UniValue& params, bool fHelp)
"\nResult: (for verbose = true):\n"
"{ (json object)\n"
" \"transactionid\" : { (json object)\n"
- " \"size\" : n, (numeric) transaction size in bytes\n"
- " \"fee\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + "\n"
- " \"modifiedfee\" : n, (numeric) transaction fee with fee deltas used for mining priority\n"
- " \"time\" : n, (numeric) local time transaction entered pool in seconds since 1 Jan 1970 GMT\n"
- " \"height\" : n, (numeric) block height when transaction entered pool\n"
- " \"startingpriority\" : n, (numeric) priority when transaction entered pool\n"
- " \"currentpriority\" : n, (numeric) transaction priority now\n"
- " \"descendantcount\" : n, (numeric) number of in-mempool descendant transactions (including this one)\n"
- " \"descendantsize\" : n, (numeric) size of in-mempool descendants (including this one)\n"
- " \"descendantfees\" : n, (numeric) modified fees (see above) of in-mempool descendants (including this one)\n"
- " \"depends\" : [ (array) unconfirmed transactions used as inputs for this transaction\n"
- " \"transactionid\", (string) parent transaction id\n"
- " ... ]\n"
- " }, ...\n"
+ + EntryDescriptionString()
+ + " }, ...\n"
"}\n"
"\nExamples\n"
+ HelpExampleCli("getrawmempool", "true")
@@ -280,6 +298,167 @@ UniValue getrawmempool(const UniValue& params, bool fHelp)
return mempoolToJSON(fVerbose);
}
+UniValue getmempoolancestors(const UniValue& params, bool fHelp)
+{
+ if (fHelp || params.size() < 1 || params.size() > 2) {
+ throw runtime_error(
+ "getmempoolancestors txid (verbose)\n"
+ "\nIf txid is in the mempool, returns all in-mempool ancestors.\n"
+ "\nArguments:\n"
+ "1. \"txid\" (string, required) The transaction id (must be in mempool)\n"
+ "2. verbose (boolean, optional, default=false) true for a json object, false for array of transaction ids\n"
+ "\nResult (for verbose=false):\n"
+ "[ (json array of strings)\n"
+ " \"transactionid\" (string) The transaction id of an in-mempool ancestor transaction\n"
+ " ,...\n"
+ "]\n"
+ "\nResult (for verbose=true):\n"
+ "{ (json object)\n"
+ " \"transactionid\" : { (json object)\n"
+ + EntryDescriptionString()
+ + " }, ...\n"
+ "}\n"
+ "\nExamples\n"
+ + HelpExampleCli("getmempoolancestors", "\"mytxid\"")
+ + HelpExampleRpc("getmempoolancestors", "\"mytxid\"")
+ );
+ }
+
+ bool fVerbose = false;
+ if (params.size() > 1)
+ fVerbose = params[1].get_bool();
+
+ uint256 hash = ParseHashV(params[0], "parameter 1");
+
+ LOCK(mempool.cs);
+
+ CTxMemPool::txiter it = mempool.mapTx.find(hash);
+ if (it == mempool.mapTx.end()) {
+ throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool");
+ }
+
+ CTxMemPool::setEntries setAncestors;
+ uint64_t noLimit = std::numeric_limits<uint64_t>::max();
+ std::string dummy;
+ mempool.CalculateMemPoolAncestors(*it, setAncestors, noLimit, noLimit, noLimit, noLimit, dummy, false);
+
+ if (!fVerbose) {
+ UniValue o(UniValue::VARR);
+ BOOST_FOREACH(CTxMemPool::txiter ancestorIt, setAncestors) {
+ o.push_back(ancestorIt->GetTx().GetHash().ToString());
+ }
+
+ return o;
+ } else {
+ UniValue o(UniValue::VOBJ);
+ BOOST_FOREACH(CTxMemPool::txiter ancestorIt, setAncestors) {
+ const CTxMemPoolEntry &e = *ancestorIt;
+ const uint256& hash = e.GetTx().GetHash();
+ UniValue info(UniValue::VOBJ);
+ entryToJSON(info, e);
+ o.push_back(Pair(hash.ToString(), info));
+ }
+ return o;
+ }
+}
+
+UniValue getmempooldescendants(const UniValue& params, bool fHelp)
+{
+ if (fHelp || params.size() < 1 || params.size() > 2) {
+ throw runtime_error(
+ "getmempooldescendants txid (verbose)\n"
+ "\nIf txid is in the mempool, returns all in-mempool descendants.\n"
+ "\nArguments:\n"
+ "1. \"txid\" (string, required) The transaction id (must be in mempool)\n"
+ "2. verbose (boolean, optional, default=false) true for a json object, false for array of transaction ids\n"
+ "\nResult (for verbose=false):\n"
+ "[ (json array of strings)\n"
+ " \"transactionid\" (string) The transaction id of an in-mempool descendant transaction\n"
+ " ,...\n"
+ "]\n"
+ "\nResult (for verbose=true):\n"
+ "{ (json object)\n"
+ " \"transactionid\" : { (json object)\n"
+ + EntryDescriptionString()
+ + " }, ...\n"
+ "}\n"
+ "\nExamples\n"
+ + HelpExampleCli("getmempooldescendants", "\"mytxid\"")
+ + HelpExampleRpc("getmempooldescendants", "\"mytxid\"")
+ );
+ }
+
+ bool fVerbose = false;
+ if (params.size() > 1)
+ fVerbose = params[1].get_bool();
+
+ uint256 hash = ParseHashV(params[0], "parameter 1");
+
+ LOCK(mempool.cs);
+
+ CTxMemPool::txiter it = mempool.mapTx.find(hash);
+ if (it == mempool.mapTx.end()) {
+ throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool");
+ }
+
+ CTxMemPool::setEntries setDescendants;
+ mempool.CalculateDescendants(it, setDescendants);
+ // CTxMemPool::CalculateDescendants will include the given tx
+ setDescendants.erase(it);
+
+ if (!fVerbose) {
+ UniValue o(UniValue::VARR);
+ BOOST_FOREACH(CTxMemPool::txiter descendantIt, setDescendants) {
+ o.push_back(descendantIt->GetTx().GetHash().ToString());
+ }
+
+ return o;
+ } else {
+ UniValue o(UniValue::VOBJ);
+ BOOST_FOREACH(CTxMemPool::txiter descendantIt, setDescendants) {
+ const CTxMemPoolEntry &e = *descendantIt;
+ const uint256& hash = e.GetTx().GetHash();
+ UniValue info(UniValue::VOBJ);
+ entryToJSON(info, e);
+ o.push_back(Pair(hash.ToString(), info));
+ }
+ return o;
+ }
+}
+
+UniValue getmempoolentry(const UniValue& params, bool fHelp)
+{
+ if (fHelp || params.size() != 1) {
+ throw runtime_error(
+ "getmempoolentry txid\n"
+ "\nReturns mempool data for given transaction\n"
+ "\nArguments:\n"
+ "1. \"txid\" (string, required) The transaction id (must be in mempool)\n"
+ "\nResult:\n"
+ "{ (json object)\n"
+ + EntryDescriptionString()
+ + "}\n"
+ "\nExamples\n"
+ + HelpExampleCli("getmempoolentry", "\"mytxid\"")
+ + HelpExampleRpc("getmempoolentry", "\"mytxid\"")
+ );
+ }
+
+ uint256 hash = ParseHashV(params[0], "parameter 1");
+
+ LOCK(mempool.cs);
+
+ CTxMemPool::txiter it = mempool.mapTx.find(hash);
+ if (it == mempool.mapTx.end()) {
+ throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool");
+ }
+
+ const CTxMemPoolEntry &e = *it;
+ UniValue info(UniValue::VOBJ);
+ entryToJSON(info, e);
+ return info;
+}
+
UniValue getblockhash(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() != 1)
@@ -1004,6 +1183,9 @@ static const CRPCCommand commands[] =
{ "blockchain", "getblockheader", &getblockheader, true },
{ "blockchain", "getchaintips", &getchaintips, true },
{ "blockchain", "getdifficulty", &getdifficulty, true },
+ { "blockchain", "getmempoolancestors", &getmempoolancestors, true },
+ { "blockchain", "getmempooldescendants", &getmempooldescendants, true },
+ { "blockchain", "getmempoolentry", &getmempoolentry, true },
{ "blockchain", "getmempoolinfo", &getmempoolinfo, true },
{ "blockchain", "getrawmempool", &getrawmempool, true },
{ "blockchain", "gettxout", &gettxout, true },
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index c89af6bfa7..d0675fdb49 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -102,6 +102,8 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "prioritisetransaction", 2 },
{ "setban", 2 },
{ "setban", 3 },
+ { "getmempoolancestors", 1 },
+ { "getmempooldescendants", 1 },
};
class CRPCConvertTable
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 2bd52eadbc..94eeea91f3 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -112,7 +112,7 @@ UniValue generateBlocks(boost::shared_ptr<CReserveScript> coinbaseScript, int nG
UniValue blockHashes(UniValue::VARR);
while (nHeight < nHeightEnd)
{
- std::unique_ptr<CBlockTemplate> pblocktemplate(CreateNewBlock(Params(), coinbaseScript->reserveScript));
+ std::unique_ptr<CBlockTemplate> pblocktemplate(BlockAssembler(Params()).CreateNewBlock(coinbaseScript->reserveScript));
if (!pblocktemplate.get())
throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block");
CBlock *pblock = &pblocktemplate->block;
@@ -527,7 +527,7 @@ UniValue getblocktemplate(const UniValue& params, bool fHelp)
pblocktemplate = NULL;
}
CScript scriptDummy = CScript() << OP_TRUE;
- pblocktemplate = CreateNewBlock(Params(), scriptDummy);
+ pblocktemplate = BlockAssembler(Params()).CreateNewBlock(scriptDummy);
if (!pblocktemplate)
throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory");
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index cae964e46d..b85c7b2e1a 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -271,25 +271,22 @@ UniValue getaddednodeinfo(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() < 1 || params.size() > 2)
throw runtime_error(
- "getaddednodeinfo dns ( \"node\" )\n"
+ "getaddednodeinfo dummy ( \"node\" )\n"
"\nReturns information about the given added node, or all added nodes\n"
"(note that onetry addnodes are not listed here)\n"
- "If dns is false, only a list of added nodes will be provided,\n"
- "otherwise connected information will also be available.\n"
"\nArguments:\n"
- "1. dns (boolean, required) If false, only a list of added nodes will be provided, otherwise connected information will also be available.\n"
+ "1. dummy (boolean, required) Kept for historical purposes but ignored\n"
"2. \"node\" (string, optional) If provided, return information about this specific node, otherwise all nodes are returned.\n"
"\nResult:\n"
"[\n"
" {\n"
- " \"addednode\" : \"192.168.0.201\", (string) The node ip address\n"
+ " \"addednode\" : \"192.168.0.201\", (string) The node ip address or name (as provided to addnode)\n"
" \"connected\" : true|false, (boolean) If connected\n"
- " \"addresses\" : [\n"
+ " \"addresses\" : [ (list of objects) Only when connected = true\n"
" {\n"
- " \"address\" : \"192.168.0.201:8333\", (string) The bitcoin server host and port\n"
+ " \"address\" : \"192.168.0.201:8333\", (string) The bitcoin server IP and port we're connected to\n"
" \"connected\" : \"outbound\" (string) connection, inbound or outbound\n"
" }\n"
- " ,...\n"
" ]\n"
" }\n"
" ,...\n"
@@ -300,83 +297,35 @@ UniValue getaddednodeinfo(const UniValue& params, bool fHelp)
+ HelpExampleRpc("getaddednodeinfo", "true, \"192.168.0.201\"")
);
- bool fDns = params[0].get_bool();
+ std::vector<AddedNodeInfo> vInfo = GetAddedNodeInfo();
- list<string> laddedNodes(0);
- if (params.size() == 1)
- {
- LOCK(cs_vAddedNodes);
- BOOST_FOREACH(const std::string& strAddNode, vAddedNodes)
- laddedNodes.push_back(strAddNode);
- }
- else
- {
- string strNode = params[1].get_str();
- LOCK(cs_vAddedNodes);
- BOOST_FOREACH(const std::string& strAddNode, vAddedNodes) {
- if (strAddNode == strNode)
- {
- laddedNodes.push_back(strAddNode);
+ if (params.size() == 2) {
+ bool found = false;
+ for (const AddedNodeInfo& info : vInfo) {
+ if (info.strAddedNode == params[1].get_str()) {
+ vInfo.assign(1, info);
+ found = true;
break;
}
}
- if (laddedNodes.size() == 0)
+ if (!found) {
throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node has not been added.");
- }
-
- UniValue ret(UniValue::VARR);
- if (!fDns)
- {
- BOOST_FOREACH (const std::string& strAddNode, laddedNodes) {
- UniValue obj(UniValue::VOBJ);
- obj.push_back(Pair("addednode", strAddNode));
- ret.push_back(obj);
}
- return ret;
}
- list<pair<string, vector<CService> > > laddedAddreses(0);
- BOOST_FOREACH(const std::string& strAddNode, laddedNodes) {
- vector<CService> vservNode(0);
- if(Lookup(strAddNode.c_str(), vservNode, Params().GetDefaultPort(), fNameLookup, 0))
- laddedAddreses.push_back(make_pair(strAddNode, vservNode));
- else
- {
- UniValue obj(UniValue::VOBJ);
- obj.push_back(Pair("addednode", strAddNode));
- obj.push_back(Pair("connected", false));
- UniValue addresses(UniValue::VARR);
- obj.push_back(Pair("addresses", addresses));
- ret.push_back(obj);
- }
- }
+ UniValue ret(UniValue::VARR);
- LOCK(cs_vNodes);
- for (list<pair<string, vector<CService> > >::iterator it = laddedAddreses.begin(); it != laddedAddreses.end(); it++)
- {
+ for (const AddedNodeInfo& info : vInfo) {
UniValue obj(UniValue::VOBJ);
- obj.push_back(Pair("addednode", it->first));
-
+ obj.push_back(Pair("addednode", info.strAddedNode));
+ obj.push_back(Pair("connected", info.fConnected));
UniValue addresses(UniValue::VARR);
- bool fConnected = false;
- BOOST_FOREACH(const CService& addrNode, it->second) {
- bool fFound = false;
- UniValue node(UniValue::VOBJ);
- node.push_back(Pair("address", addrNode.ToString()));
- BOOST_FOREACH(CNode* pnode, vNodes) {
- if (pnode->addr == addrNode)
- {
- fFound = true;
- fConnected = true;
- node.push_back(Pair("connected", pnode->fInbound ? "inbound" : "outbound"));
- break;
- }
- }
- if (!fFound)
- node.push_back(Pair("connected", "false"));
- addresses.push_back(node);
+ if (info.fConnected) {
+ UniValue address(UniValue::VOBJ);
+ address.push_back(Pair("address", info.resolvedAddress.ToString()));
+ address.push_back(Pair("connected", info.fInbound ? "inbound" : "outbound"));
+ addresses.push_back(address);
}
- obj.push_back(Pair("connected", fConnected));
obj.push_back(Pair("addresses", addresses));
ret.push_back(obj);
}
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 992914f88c..9723e394d6 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -388,8 +388,13 @@ UniValue createrawtransaction(const UniValue& params, bool fHelp)
// set the sequence number if passed in the parameters object
const UniValue& sequenceObj = find_value(o, "sequence");
- if (sequenceObj.isNum())
- nSequence = sequenceObj.get_int();
+ if (sequenceObj.isNum()) {
+ int64_t seqNr64 = sequenceObj.get_int64();
+ if (seqNr64 < 0 || seqNr64 > std::numeric_limits<uint32_t>::max())
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, sequence number is out of range");
+ else
+ nSequence = (uint32_t)seqNr64;
+ }
CTxIn in(COutPoint(txid, nOutput), CScript(), nSequence);
diff --git a/src/test/DoS_tests.cpp b/src/test/DoS_tests.cpp
index 95342498fa..4a373fc60b 100644
--- a/src/test/DoS_tests.cpp
+++ b/src/test/DoS_tests.cpp
@@ -45,7 +45,7 @@ BOOST_FIXTURE_TEST_SUITE(DoS_tests, TestingSetup)
BOOST_AUTO_TEST_CASE(DoS_banning)
{
CNode::ClearBanned();
- CAddress addr1(ip(0xa0b0c001));
+ CAddress addr1(ip(0xa0b0c001), NODE_NONE);
CNode dummyNode1(INVALID_SOCKET, addr1, "", true);
dummyNode1.nVersion = 1;
Misbehaving(dummyNode1.GetId(), 100); // Should get banned
@@ -53,7 +53,7 @@ BOOST_AUTO_TEST_CASE(DoS_banning)
BOOST_CHECK(CNode::IsBanned(addr1));
BOOST_CHECK(!CNode::IsBanned(ip(0xa0b0c001|0x0000ff00))); // Different IP, not banned
- CAddress addr2(ip(0xa0b0c002));
+ CAddress addr2(ip(0xa0b0c002), NODE_NONE);
CNode dummyNode2(INVALID_SOCKET, addr2, "", true);
dummyNode2.nVersion = 1;
Misbehaving(dummyNode2.GetId(), 50);
@@ -69,7 +69,7 @@ BOOST_AUTO_TEST_CASE(DoS_banscore)
{
CNode::ClearBanned();
mapArgs["-banscore"] = "111"; // because 11 is my favorite number
- CAddress addr1(ip(0xa0b0c001));
+ CAddress addr1(ip(0xa0b0c001), NODE_NONE);
CNode dummyNode1(INVALID_SOCKET, addr1, "", true);
dummyNode1.nVersion = 1;
Misbehaving(dummyNode1.GetId(), 100);
@@ -90,7 +90,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
int64_t nStartTime = GetTime();
SetMockTime(nStartTime); // Overrides future calls to GetTime()
- CAddress addr(ip(0xa0b0c001));
+ CAddress addr(ip(0xa0b0c001), NODE_NONE);
CNode dummyNode(INVALID_SOCKET, addr, "", true);
dummyNode.nVersion = 1;
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index 767b653e47..b6cec24b57 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -68,7 +68,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
// Test 2: Does Addrman::Add work as expected.
CService addr1 = CService("250.1.1.1", 8333);
- addrman.Add(CAddress(addr1), source);
+ addrman.Add(CAddress(addr1, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 1);
CAddrInfo addr_ret1 = addrman.Select();
BOOST_CHECK(addr_ret1.ToString() == "250.1.1.1:8333");
@@ -76,14 +76,14 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
// Test 3: Does IP address deduplication work correctly.
// Expected dup IP should not be added.
CService addr1_dup = CService("250.1.1.1", 8333);
- addrman.Add(CAddress(addr1_dup), source);
+ addrman.Add(CAddress(addr1_dup, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 1);
// Test 5: New table has one addr and we add a diff addr we should
// have two addrs.
CService addr2 = CService("250.1.1.2", 8333);
- addrman.Add(CAddress(addr2), source);
+ addrman.Add(CAddress(addr2, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 2);
// Test 6: AddrMan::Clear() should empty the new table.
@@ -106,18 +106,18 @@ BOOST_AUTO_TEST_CASE(addrman_ports)
// Test 7; Addr with same IP but diff port does not replace existing addr.
CService addr1 = CService("250.1.1.1", 8333);
- addrman.Add(CAddress(addr1), source);
+ addrman.Add(CAddress(addr1, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 1);
CService addr1_port = CService("250.1.1.1", 8334);
- addrman.Add(CAddress(addr1_port), source);
+ addrman.Add(CAddress(addr1_port, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 1);
CAddrInfo addr_ret2 = addrman.Select();
BOOST_CHECK(addr_ret2.ToString() == "250.1.1.1:8333");
// Test 8: Add same IP but diff port to tried table, it doesn't get added.
// Perhaps this is not ideal behavior but it is the current behavior.
- addrman.Good(CAddress(addr1_port));
+ addrman.Good(CAddress(addr1_port, NODE_NONE));
BOOST_CHECK(addrman.size() == 1);
bool newOnly = true;
CAddrInfo addr_ret3 = addrman.Select(newOnly);
@@ -136,7 +136,7 @@ BOOST_AUTO_TEST_CASE(addrman_select)
// Test 9: Select from new with 1 addr in new.
CService addr1 = CService("250.1.1.1", 8333);
- addrman.Add(CAddress(addr1), source);
+ addrman.Add(CAddress(addr1, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 1);
bool newOnly = true;
@@ -144,7 +144,7 @@ BOOST_AUTO_TEST_CASE(addrman_select)
BOOST_CHECK(addr_ret1.ToString() == "250.1.1.1:8333");
// Test 10: move addr to tried, select from new expected nothing returned.
- addrman.Good(CAddress(addr1));
+ addrman.Good(CAddress(addr1, NODE_NONE));
BOOST_CHECK(addrman.size() == 1);
CAddrInfo addr_ret2 = addrman.Select(newOnly);
BOOST_CHECK(addr_ret2.ToString() == "[::]:0");
@@ -160,21 +160,21 @@ BOOST_AUTO_TEST_CASE(addrman_select)
CService addr3 = CService("250.3.2.2", 9999);
CService addr4 = CService("250.3.3.3", 9999);
- addrman.Add(CAddress(addr2), CService("250.3.1.1", 8333));
- addrman.Add(CAddress(addr3), CService("250.3.1.1", 8333));
- addrman.Add(CAddress(addr4), CService("250.4.1.1", 8333));
+ addrman.Add(CAddress(addr2, NODE_NONE), CService("250.3.1.1", 8333));
+ addrman.Add(CAddress(addr3, NODE_NONE), CService("250.3.1.1", 8333));
+ addrman.Add(CAddress(addr4, NODE_NONE), CService("250.4.1.1", 8333));
// Add three addresses to tried table.
CService addr5 = CService("250.4.4.4", 8333);
CService addr6 = CService("250.4.5.5", 7777);
CService addr7 = CService("250.4.6.6", 8333);
- addrman.Add(CAddress(addr5), CService("250.3.1.1", 8333));
- addrman.Good(CAddress(addr5));
- addrman.Add(CAddress(addr6), CService("250.3.1.1", 8333));
- addrman.Good(CAddress(addr6));
- addrman.Add(CAddress(addr7), CService("250.1.1.3", 8333));
- addrman.Good(CAddress(addr7));
+ addrman.Add(CAddress(addr5, NODE_NONE), CService("250.3.1.1", 8333));
+ addrman.Good(CAddress(addr5, NODE_NONE));
+ addrman.Add(CAddress(addr6, NODE_NONE), CService("250.3.1.1", 8333));
+ addrman.Good(CAddress(addr6, NODE_NONE));
+ addrman.Add(CAddress(addr7, NODE_NONE), CService("250.1.1.3", 8333));
+ addrman.Good(CAddress(addr7, NODE_NONE));
// Test 11: 6 addrs + 1 addr from last test = 7.
BOOST_CHECK(addrman.size() == 7);
@@ -199,7 +199,7 @@ BOOST_AUTO_TEST_CASE(addrman_new_collisions)
for (unsigned int i = 1; i < 18; i++) {
CService addr = CService("250.1.1." + boost::to_string(i));
- addrman.Add(CAddress(addr), source);
+ addrman.Add(CAddress(addr, NODE_NONE), source);
//Test 13: No collision in new table yet.
BOOST_CHECK(addrman.size() == i);
@@ -207,11 +207,11 @@ BOOST_AUTO_TEST_CASE(addrman_new_collisions)
//Test 14: new table collision!
CService addr1 = CService("250.1.1.18");
- addrman.Add(CAddress(addr1), source);
+ addrman.Add(CAddress(addr1, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 17);
CService addr2 = CService("250.1.1.19");
- addrman.Add(CAddress(addr2), source);
+ addrman.Add(CAddress(addr2, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 18);
}
@@ -228,8 +228,8 @@ BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
for (unsigned int i = 1; i < 80; i++) {
CService addr = CService("250.1.1." + boost::to_string(i));
- addrman.Add(CAddress(addr), source);
- addrman.Good(CAddress(addr));
+ addrman.Add(CAddress(addr, NODE_NONE), source);
+ addrman.Good(CAddress(addr, NODE_NONE));
//Test 15: No collision in tried table yet.
BOOST_TEST_MESSAGE(addrman.size());
@@ -238,11 +238,11 @@ BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
//Test 16: tried table collision!
CService addr1 = CService("250.1.1.80");
- addrman.Add(CAddress(addr1), source);
+ addrman.Add(CAddress(addr1, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 79);
CService addr2 = CService("250.1.1.81");
- addrman.Add(CAddress(addr2), source);
+ addrman.Add(CAddress(addr2, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 80);
}
@@ -255,9 +255,9 @@ BOOST_AUTO_TEST_CASE(addrman_find)
BOOST_CHECK(addrman.size() == 0);
- CAddress addr1 = CAddress(CService("250.1.2.1", 8333));
- CAddress addr2 = CAddress(CService("250.1.2.1", 9999));
- CAddress addr3 = CAddress(CService("251.255.2.1", 8333));
+ CAddress addr1 = CAddress(CService("250.1.2.1", 8333), NODE_NONE);
+ CAddress addr2 = CAddress(CService("250.1.2.1", 9999), NODE_NONE);
+ CAddress addr3 = CAddress(CService("251.255.2.1", 8333), NODE_NONE);
CNetAddr source1 = CNetAddr("250.1.2.1");
CNetAddr source2 = CNetAddr("250.1.2.2");
@@ -294,7 +294,7 @@ BOOST_AUTO_TEST_CASE(addrman_create)
BOOST_CHECK(addrman.size() == 0);
- CAddress addr1 = CAddress(CService("250.1.2.1", 8333));
+ CAddress addr1 = CAddress(CService("250.1.2.1", 8333), NODE_NONE);
CNetAddr source1 = CNetAddr("250.1.2.1");
int nId;
@@ -317,7 +317,7 @@ BOOST_AUTO_TEST_CASE(addrman_delete)
BOOST_CHECK(addrman.size() == 0);
- CAddress addr1 = CAddress(CService("250.1.2.1", 8333));
+ CAddress addr1 = CAddress(CService("250.1.2.1", 8333), NODE_NONE);
CNetAddr source1 = CNetAddr("250.1.2.1");
int nId;
@@ -344,15 +344,15 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
vector<CAddress> vAddr1 = addrman.GetAddr();
BOOST_CHECK(vAddr1.size() == 0);
- CAddress addr1 = CAddress(CService("250.250.2.1", 8333));
+ CAddress addr1 = CAddress(CService("250.250.2.1", 8333), NODE_NONE);
addr1.nTime = GetAdjustedTime(); // Set time so isTerrible = false
- CAddress addr2 = CAddress(CService("250.251.2.2", 9999));
+ CAddress addr2 = CAddress(CService("250.251.2.2", 9999), NODE_NONE);
addr2.nTime = GetAdjustedTime();
- CAddress addr3 = CAddress(CService("251.252.2.3", 8333));
+ CAddress addr3 = CAddress(CService("251.252.2.3", 8333), NODE_NONE);
addr3.nTime = GetAdjustedTime();
- CAddress addr4 = CAddress(CService("252.253.3.4", 8333));
+ CAddress addr4 = CAddress(CService("252.253.3.4", 8333), NODE_NONE);
addr4.nTime = GetAdjustedTime();
- CAddress addr5 = CAddress(CService("252.254.4.5", 8333));
+ CAddress addr5 = CAddress(CService("252.254.4.5", 8333), NODE_NONE);
addr5.nTime = GetAdjustedTime();
CNetAddr source1 = CNetAddr("250.1.2.1");
CNetAddr source2 = CNetAddr("250.2.3.3");
@@ -368,8 +368,8 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
BOOST_CHECK(addrman.GetAddr().size() == 1);
// Test 24: Ensure GetAddr works with new and tried addresses.
- addrman.Good(CAddress(addr1));
- addrman.Good(CAddress(addr2));
+ addrman.Good(CAddress(addr1, NODE_NONE));
+ addrman.Good(CAddress(addr2, NODE_NONE));
BOOST_CHECK(addrman.GetAddr().size() == 1);
// Test 25: Ensure GetAddr still returns 23% when addrman has many addrs.
@@ -378,7 +378,7 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
int octet2 = (i / 256) % 256;
int octet3 = (i / (256 * 2)) % 256;
string strAddr = boost::to_string(octet1) + "." + boost::to_string(octet2) + "." + boost::to_string(octet3) + ".23";
- CAddress addr = CAddress(CService(strAddr));
+ CAddress addr = CAddress(CService(strAddr), NODE_NONE);
// Ensure that for all addrs in addrman, isTerrible == false.
addr.nTime = GetAdjustedTime();
@@ -403,8 +403,8 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
// Set addrman addr placement to be deterministic.
addrman.MakeDeterministic();
- CAddress addr1 = CAddress(CService("250.1.1.1", 8333));
- CAddress addr2 = CAddress(CService("250.1.1.1", 9999));
+ CAddress addr1 = CAddress(CService("250.1.1.1", 8333), NODE_NONE);
+ CAddress addr2 = CAddress(CService("250.1.1.1", 9999), NODE_NONE);
CNetAddr source1 = CNetAddr("250.1.1.1");
@@ -431,7 +431,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
set<int> buckets;
for (int i = 0; i < 255; i++) {
CAddrInfo infoi = CAddrInfo(
- CAddress(CService("250.1.1." + boost::to_string(i))),
+ CAddress(CService("250.1.1." + boost::to_string(i)), NODE_NONE),
CNetAddr("250.1.1." + boost::to_string(i)));
int bucket = infoi.GetTriedBucket(nKey1);
buckets.insert(bucket);
@@ -443,7 +443,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
buckets.clear();
for (int j = 0; j < 255; j++) {
CAddrInfo infoj = CAddrInfo(
- CAddress(CService("250." + boost::to_string(j) + ".1.1")),
+ CAddress(CService("250." + boost::to_string(j) + ".1.1"), NODE_NONE),
CNetAddr("250." + boost::to_string(j) + ".1.1"));
int bucket = infoj.GetTriedBucket(nKey1);
buckets.insert(bucket);
@@ -460,8 +460,8 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
// Set addrman addr placement to be deterministic.
addrman.MakeDeterministic();
- CAddress addr1 = CAddress(CService("250.1.2.1", 8333));
- CAddress addr2 = CAddress(CService("250.1.2.1", 9999));
+ CAddress addr1 = CAddress(CService("250.1.2.1", 8333), NODE_NONE);
+ CAddress addr2 = CAddress(CService("250.1.2.1", 9999), NODE_NONE);
CNetAddr source1 = CNetAddr("250.1.2.1");
@@ -484,7 +484,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
set<int> buckets;
for (int i = 0; i < 255; i++) {
CAddrInfo infoi = CAddrInfo(
- CAddress(CService("250.1.1." + boost::to_string(i))),
+ CAddress(CService("250.1.1." + boost::to_string(i)), NODE_NONE),
CNetAddr("250.1.1." + boost::to_string(i)));
int bucket = infoi.GetNewBucket(nKey1);
buckets.insert(bucket);
@@ -497,7 +497,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
for (int j = 0; j < 4 * 255; j++) {
CAddrInfo infoj = CAddrInfo(CAddress(
CService(
- boost::to_string(250 + (j / 255)) + "." + boost::to_string(j % 256) + ".1.1")),
+ boost::to_string(250 + (j / 255)) + "." + boost::to_string(j % 256) + ".1.1"), NODE_NONE),
CNetAddr("251.4.1.1"));
int bucket = infoj.GetNewBucket(nKey1);
buckets.insert(bucket);
@@ -509,7 +509,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
buckets.clear();
for (int p = 0; p < 255; p++) {
CAddrInfo infoj = CAddrInfo(
- CAddress(CService("250.1.1.1")),
+ CAddress(CService("250.1.1.1"), NODE_NONE),
CNetAddr("250." + boost::to_string(p) + ".1.1"));
int bucket = infoj.GetNewBucket(nKey1);
buckets.insert(bucket);
diff --git a/src/test/base58_tests.cpp b/src/test/base58_tests.cpp
index e5a2e28b2e..01eb2aee9e 100644
--- a/src/test/base58_tests.cpp
+++ b/src/test/base58_tests.cpp
@@ -79,7 +79,7 @@ class TestAddrTypeVisitor : public boost::static_visitor<bool>
private:
std::string exp_addrType;
public:
- TestAddrTypeVisitor(const std::string &exp_addrType) : exp_addrType(exp_addrType) { }
+ TestAddrTypeVisitor(const std::string &_exp_addrType) : exp_addrType(_exp_addrType) { }
bool operator()(const CKeyID &id) const
{
return (exp_addrType == "pubkey");
@@ -100,7 +100,7 @@ class TestPayloadVisitor : public boost::static_visitor<bool>
private:
std::vector<unsigned char> exp_payload;
public:
- TestPayloadVisitor(std::vector<unsigned char> &exp_payload) : exp_payload(exp_payload) { }
+ TestPayloadVisitor(std::vector<unsigned char> &_exp_payload) : exp_payload(_exp_payload) { }
bool operator()(const CKeyID &id) const
{
uint160 exp_key(exp_payload);
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index 469862518c..3fb7967881 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -89,7 +89,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
fCheckpointsEnabled = false;
// Simple block creation, nothing special yet:
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
// We can't make transactions until we have inputs
// Therefore, load 100 blocks :)
@@ -121,7 +121,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
delete pblocktemplate;
// Just to make sure we can still make simple blocks
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
delete pblocktemplate;
const CAmount BLOCKSUBSIDY = 50*COIN;
@@ -146,7 +146,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(spendsCoinbase).FromTx(tx));
tx.vin[0].prevout.hash = hash;
}
- BOOST_CHECK_THROW(CreateNewBlock(chainparams, scriptPubKey), std::runtime_error);
+ BOOST_CHECK_THROW(BlockAssembler(chainparams).CreateNewBlock(scriptPubKey), std::runtime_error);
mempool.clear();
tx.vin[0].prevout.hash = txFirst[0]->GetHash();
@@ -160,7 +160,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(spendsCoinbase).SigOps(20).FromTx(tx));
tx.vin[0].prevout.hash = hash;
}
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
delete pblocktemplate;
mempool.clear();
@@ -181,14 +181,14 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(spendsCoinbase).FromTx(tx));
tx.vin[0].prevout.hash = hash;
}
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
delete pblocktemplate;
mempool.clear();
// orphan in mempool, template creation fails
hash = tx.GetHash();
mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).FromTx(tx));
- BOOST_CHECK_THROW(CreateNewBlock(chainparams, scriptPubKey), std::runtime_error);
+ BOOST_CHECK_THROW(BlockAssembler(chainparams).CreateNewBlock(scriptPubKey), std::runtime_error);
mempool.clear();
// child with higher priority than parent
@@ -205,7 +205,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
tx.vout[0].nValue = tx.vout[0].nValue+BLOCKSUBSIDY-HIGHERFEE; //First txn output + fresh coinbase - new txn fee
hash = tx.GetHash();
mempool.addUnchecked(hash, entry.Fee(HIGHERFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx));
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
delete pblocktemplate;
mempool.clear();
@@ -217,7 +217,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
hash = tx.GetHash();
// give it a fee so it'll get mined
mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(false).FromTx(tx));
- BOOST_CHECK_THROW(CreateNewBlock(chainparams, scriptPubKey), std::runtime_error);
+ BOOST_CHECK_THROW(BlockAssembler(chainparams).CreateNewBlock(scriptPubKey), std::runtime_error);
mempool.clear();
// invalid (pre-p2sh) txn in mempool, template creation fails
@@ -234,7 +234,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
tx.vout[0].nValue -= LOWFEE;
hash = tx.GetHash();
mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(false).FromTx(tx));
- BOOST_CHECK_THROW(CreateNewBlock(chainparams, scriptPubKey), std::runtime_error);
+ BOOST_CHECK_THROW(BlockAssembler(chainparams).CreateNewBlock(scriptPubKey), std::runtime_error);
mempool.clear();
// double spend txn pair in mempool, template creation fails
@@ -247,7 +247,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
tx.vout[0].scriptPubKey = CScript() << OP_2;
hash = tx.GetHash();
mempool.addUnchecked(hash, entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx));
- BOOST_CHECK_THROW(CreateNewBlock(chainparams, scriptPubKey), std::runtime_error);
+ BOOST_CHECK_THROW(BlockAssembler(chainparams).CreateNewBlock(scriptPubKey), std::runtime_error);
mempool.clear();
// subsidy changing
@@ -263,7 +263,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
next->BuildSkip();
chainActive.SetTip(next);
}
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
delete pblocktemplate;
// Extend to a 210000-long block chain.
while (chainActive.Tip()->nHeight < 210000) {
@@ -276,7 +276,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
next->BuildSkip();
chainActive.SetTip(next);
}
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
delete pblocktemplate;
// Delete the dummy blocks again.
while (chainActive.Tip()->nHeight > nHeight) {
@@ -363,7 +363,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
tx.vin[0].nSequence = CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG | 1;
BOOST_CHECK(!TestSequenceLocks(tx, flags)); // Sequence locks fail
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
// None of the of the absolute height/time locked tx should have made
// it into the template because we still check IsFinalTx in CreateNewBlock,
@@ -377,7 +377,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
chainActive.Tip()->nHeight++;
SetMockTime(chainActive.Tip()->GetMedianTimePast() + 1);
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
BOOST_CHECK_EQUAL(pblocktemplate->block.vtx.size(), 5);
delete pblocktemplate;
@@ -385,8 +385,8 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
SetMockTime(0);
mempool.clear();
- BOOST_FOREACH(CTransaction *tx, txFirst)
- delete tx;
+ BOOST_FOREACH(CTransaction *_tx, txFirst)
+ delete _tx;
fCheckpointsEnabled = true;
}
diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp
index b38d61f330..d005d6a163 100644
--- a/src/test/net_tests.cpp
+++ b/src/test/net_tests.cpp
@@ -51,7 +51,7 @@ public:
int nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30);
s << nUBuckets;
- CAddress addr = CAddress(CService("252.1.1.1", 7777));
+ CAddress addr = CAddress(CService("252.1.1.1", 7777), NODE_NONE);
CAddrInfo info = CAddrInfo(addr, CNetAddr("252.2.2.2"));
s << info;
}
@@ -79,9 +79,9 @@ BOOST_AUTO_TEST_CASE(caddrdb_read)
CService addr3 = CService("250.7.3.3", 9999);
// Add three addresses to new table.
- addrmanUncorrupted.Add(CAddress(addr1), CService("252.5.1.1", 8333));
- addrmanUncorrupted.Add(CAddress(addr2), CService("252.5.1.1", 8333));
- addrmanUncorrupted.Add(CAddress(addr3), CService("252.5.1.1", 8333));
+ addrmanUncorrupted.Add(CAddress(addr1, NODE_NONE), CService("252.5.1.1", 8333));
+ addrmanUncorrupted.Add(CAddress(addr2, NODE_NONE), CService("252.5.1.1", 8333));
+ addrmanUncorrupted.Add(CAddress(addr3, NODE_NONE), CService("252.5.1.1", 8333));
// Test that the de-serialization does not throw an exception.
CDataStream ssPeers1 = AddrmanToStream(addrmanUncorrupted);
diff --git a/src/test/pmt_tests.cpp b/src/test/pmt_tests.cpp
index 2f3f607889..74ffe0cc74 100644
--- a/src/test/pmt_tests.cpp
+++ b/src/test/pmt_tests.cpp
@@ -37,8 +37,8 @@ BOOST_AUTO_TEST_CASE(pmt_test1)
seed_insecure_rand(false);
static const unsigned int nTxCounts[] = {1, 4, 7, 17, 56, 100, 127, 256, 312, 513, 1000, 4095};
- for (int n = 0; n < 12; n++) {
- unsigned int nTx = nTxCounts[n];
+ for (int i = 0; i < 12; i++) {
+ unsigned int nTx = nTxCounts[i];
// build a block with some dummy transactions
CBlock block;
diff --git a/src/test/prevector_tests.cpp b/src/test/prevector_tests.cpp
index b39b903530..d1407c1da9 100644
--- a/src/test/prevector_tests.cpp
+++ b/src/test/prevector_tests.cpp
@@ -192,8 +192,8 @@ BOOST_AUTO_TEST_CASE(PrevectorTestInt)
if (((r >> 21) % 32) == 7) {
int values[4];
int num = 1 + (insecure_rand() % 4);
- for (int i = 0; i < num; i++) {
- values[i] = insecure_rand();
+ for (int k = 0; k < num; k++) {
+ values[k] = insecure_rand();
}
test.insert_range(insecure_rand() % (test.size() + 1), values, values + num);
}
diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp
index 9bcb07626a..c68320ba8b 100644
--- a/src/test/test_bitcoin.cpp
+++ b/src/test/test_bitcoin.cpp
@@ -98,7 +98,7 @@ CBlock
TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransaction>& txns, const CScript& scriptPubKey)
{
const CChainParams& chainparams = Params();
- CBlockTemplate *pblocktemplate = CreateNewBlock(chainparams, scriptPubKey);
+ CBlockTemplate *pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey);
CBlock& block = pblocktemplate->block;
// Replace mempool-selected txns with just coinbase plus passed-in txns:
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index bb40cf7245..14c2e31d95 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -590,7 +590,7 @@ UniValue dumpwallet(const UniValue& params, bool fHelp)
std::sort(vKeyBirth.begin(), vKeyBirth.end());
// produce output
- file << strprintf("# Wallet dump created by Bitcoin %s (%s)\n", CLIENT_BUILD, CLIENT_DATE);
+ file << strprintf("# Wallet dump created by Bitcoin %s\n", CLIENT_BUILD);
file << strprintf("# * Created on %s\n", EncodeDumpTime(GetTime()));
file << strprintf("# * Best block at time of backup was %i (%s),\n", chainActive.Height(), chainActive.Tip()->GetBlockHash().ToString());
file << strprintf("# mined on %s\n", EncodeDumpTime(chainActive.Tip()->GetBlockTime()));
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 9faf21591f..723b2eceff 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -42,6 +42,7 @@ bool bSpendZeroConfChange = DEFAULT_SPEND_ZEROCONF_CHANGE;
bool fSendFreeTransactions = DEFAULT_SEND_FREE_TRANSACTIONS;
const char * DEFAULT_WALLET_DAT = "wallet.dat";
+const uint32_t BIP32_HARDENED_KEY_LIMIT = 0x80000000;
/**
* Fees smaller than this (in satoshi) are considered zero fee (for transaction creation)
@@ -91,7 +92,51 @@ CPubKey CWallet::GenerateNewKey()
bool fCompressed = CanSupportFeature(FEATURE_COMPRPUBKEY); // default to compressed public keys if we want 0.6.0 wallets
CKey secret;
- secret.MakeNewKey(fCompressed);
+
+ // Create new metadata
+ int64_t nCreationTime = GetTime();
+ CKeyMetadata metadata(nCreationTime);
+
+ // use HD key derivation if HD was enabled during wallet creation
+ if (!hdChain.masterKeyID.IsNull()) {
+ // for now we use a fixed keypath scheme of m/0'/0'/k
+ CKey key; //master key seed (256bit)
+ CExtKey masterKey; //hd master key
+ CExtKey accountKey; //key at m/0'
+ CExtKey externalChainChildKey; //key at m/0'/0'
+ CExtKey childKey; //key at m/0'/0'/<n>'
+
+ // try to get the master key
+ if (!GetKey(hdChain.masterKeyID, key))
+ throw std::runtime_error("CWallet::GenerateNewKey(): Master key not found");
+
+ masterKey.SetMaster(key.begin(), key.size());
+
+ // derive m/0'
+ // use hardened derivation (child keys >= 0x80000000 are hardened after bip32)
+ masterKey.Derive(accountKey, BIP32_HARDENED_KEY_LIMIT);
+
+ // derive m/0'/0'
+ accountKey.Derive(externalChainChildKey, BIP32_HARDENED_KEY_LIMIT);
+
+ // derive child key at next index, skip keys already known to the wallet
+ do
+ {
+ // always derive hardened keys
+ // childIndex | BIP32_HARDENED_KEY_LIMIT = derive childIndex in hardened child-index-range
+ // example: 1 | BIP32_HARDENED_KEY_LIMIT == 0x80000001 == 2147483649
+ externalChainChildKey.Derive(childKey, hdChain.nExternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
+ // increment childkey index
+ hdChain.nExternalChainCounter++;
+ } while(HaveKey(childKey.key.GetPubKey().GetID()));
+ secret = childKey.key;
+
+ // update the chain model in the database
+ if (!CWalletDB(strWalletFile).WriteHDChain(hdChain))
+ throw std::runtime_error("CWallet::GenerateNewKey(): Writing HD chain model failed");
+ } else {
+ secret.MakeNewKey(fCompressed);
+ }
// Compressed public keys were introduced in version 0.6.0
if (fCompressed)
@@ -100,9 +145,7 @@ CPubKey CWallet::GenerateNewKey()
CPubKey pubkey = secret.GetPubKey();
assert(secret.VerifyPubKey(pubkey));
- // Create new metadata
- int64_t nCreationTime = GetTime();
- mapKeyMetadata[pubkey.GetID()] = CKeyMetadata(nCreationTime);
+ mapKeyMetadata[pubkey.GetID()] = metadata;
if (!nTimeFirstKey || nCreationTime < nTimeFirstKey)
nTimeFirstKey = nCreationTime;
@@ -1121,6 +1164,37 @@ CAmount CWallet::GetChange(const CTransaction& tx) const
return nChange;
}
+bool CWallet::SetHDMasterKey(const CKey& key)
+{
+ LOCK(cs_wallet);
+
+ // store the key as normal "key"/"ckey" object
+ // in the database
+ // key metadata is not required
+ CPubKey pubkey = key.GetPubKey();
+ if (!AddKeyPubKey(key, pubkey))
+ throw std::runtime_error("CWallet::GenerateNewKey(): AddKey failed");
+
+ // store the keyid (hash160) together with
+ // the child index counter in the database
+ // as a hdchain object
+ CHDChain newHdChain;
+ newHdChain.masterKeyID = pubkey.GetID();
+ SetHDChain(newHdChain, false);
+
+ return true;
+}
+
+bool CWallet::SetHDChain(const CHDChain& chain, bool memonly)
+{
+ LOCK(cs_wallet);
+ if (!memonly && !CWalletDB(strWalletFile).WriteHDChain(chain))
+ throw runtime_error("AddHDChain(): writing chain failed");
+
+ hdChain = chain;
+ return true;
+}
+
int64_t CWalletTx::GetTxTime() const
{
int64_t n = nTimeSmart;
@@ -3135,6 +3209,7 @@ std::string CWallet::GetWalletHelpString(bool showDebug)
strUsage += HelpMessageOpt("-sendfreetransactions", strprintf(_("Send transactions as zero-fee transactions if possible (default: %u)"), DEFAULT_SEND_FREE_TRANSACTIONS));
strUsage += HelpMessageOpt("-spendzeroconfchange", strprintf(_("Spend unconfirmed change when sending transactions (default: %u)"), DEFAULT_SPEND_ZEROCONF_CHANGE));
strUsage += HelpMessageOpt("-txconfirmtarget=<n>", strprintf(_("If paytxfee is not set, include enough fee so transactions begin confirmation on average within n blocks (default: %u)"), DEFAULT_TX_CONFIRM_TARGET));
+ strUsage += HelpMessageOpt("-usehd", _("Use hierarchical deterministic key generation (HD) after bip32. Only has effect during wallet creation/first start") + " " + strprintf(_("(default: %u)"), DEFAULT_USE_HD_WALLET));
strUsage += HelpMessageOpt("-upgradewallet", _("Upgrade wallet to latest format on startup"));
strUsage += HelpMessageOpt("-wallet=<file>", _("Specify wallet file (within data directory)") + " " + strprintf(_("(default: %s)"), DEFAULT_WALLET_DAT));
strUsage += HelpMessageOpt("-walletbroadcast", _("Make the wallet broadcast transactions") + " " + strprintf(_("(default: %u)"), DEFAULT_WALLETBROADCAST));
@@ -3222,6 +3297,13 @@ bool CWallet::InitLoadWallet()
if (fFirstRun)
{
// Create new keyUser and set as default key
+ if (GetBoolArg("-usehd", DEFAULT_USE_HD_WALLET)) {
+ // generate a new master key
+ CKey key;
+ key.MakeNewKey(true);
+ if (!walletInstance->SetHDMasterKey(key))
+ throw std::runtime_error("CWallet::GenerateNewKey(): Storing master key failed");
+ }
CPubKey newDefaultKey;
if (walletInstance->GetKeyFromPool(newDefaultKey)) {
walletInstance->SetDefaultKey(newDefaultKey);
@@ -3231,6 +3313,13 @@ bool CWallet::InitLoadWallet()
walletInstance->SetBestChain(chainActive.GetLocator());
}
+ else if (mapArgs.count("-usehd")) {
+ bool useHD = GetBoolArg("-usehd", DEFAULT_USE_HD_WALLET);
+ if (!walletInstance->hdChain.masterKeyID.IsNull() && !useHD)
+ return InitError(strprintf(_("Error loading %s: You can't disable HD on a already existing HD wallet"), walletFile));
+ if (walletInstance->hdChain.masterKeyID.IsNull() && useHD)
+ return InitError(strprintf(_("Error loading %s: You can't enable HD on a already existing non-HD wallet"), walletFile));
+ }
LogPrintf(" wallet %15dms\n", GetTimeMillis() - nStart);
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index 683c901444..7fc6ce5de5 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -57,6 +57,9 @@ static const unsigned int DEFAULT_TX_CONFIRM_TARGET = 2;
static const unsigned int MAX_FREE_TRANSACTION_CREATE_SIZE = 1000;
static const bool DEFAULT_WALLETBROADCAST = true;
+//! if set, all keys will be derived by using BIP32
+static const bool DEFAULT_USE_HD_WALLET = true;
+
extern const char * DEFAULT_WALLET_DAT;
class CBlockIndex;
@@ -574,6 +577,9 @@ private:
void SyncMetaData(std::pair<TxSpends::iterator, TxSpends::iterator>);
+ /* the hd chain data model (external chain counters) */
+ CHDChain hdChain;
+
public:
/*
* Main wallet lock.
@@ -889,6 +895,12 @@ public:
static bool ParameterInteraction();
bool BackupWallet(const std::string& strDest);
+
+ /* Set the hd chain model (chain child index counters) */
+ bool SetHDChain(const CHDChain& chain, bool memonly);
+
+ /* Set the current hd master key (will reset the chain child index counters) */
+ bool SetHDMasterKey(const CKey& key);
};
/** A key allocated from the key pool. */
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index b5037c9a65..7bfd490950 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -599,6 +599,16 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
return false;
}
}
+ else if (strType == "hdchain")
+ {
+ CHDChain chain;
+ ssValue >> chain;
+ if (!pwallet->SetHDChain(chain, true))
+ {
+ strErr = "Error reading wallet database: SetHDChain failed";
+ return false;
+ }
+ }
} catch (...)
{
return false;
@@ -1003,3 +1013,10 @@ bool CWalletDB::EraseDestData(const std::string &address, const std::string &key
nWalletDBUpdated++;
return Erase(std::make_pair(std::string("destdata"), std::make_pair(address, key)));
}
+
+
+bool CWalletDB::WriteHDChain(const CHDChain& chain)
+{
+ nWalletDBUpdated++;
+ return Write(std::string("hdchain"), chain);
+}
diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h
index 00c10ea70f..71b0ff26db 100644
--- a/src/wallet/walletdb.h
+++ b/src/wallet/walletdb.h
@@ -40,6 +40,35 @@ enum DBErrors
DB_NEED_REWRITE
};
+/* simple hd chain data model */
+class CHDChain
+{
+public:
+ uint32_t nExternalChainCounter;
+ CKeyID masterKeyID; //!< master key hash160
+
+ static const int CURRENT_VERSION = 1;
+ int nVersion;
+
+ CHDChain() { SetNull(); }
+ ADD_SERIALIZE_METHODS;
+ template <typename Stream, typename Operation>
+ inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion)
+ {
+ READWRITE(this->nVersion);
+ nVersion = this->nVersion;
+ READWRITE(nExternalChainCounter);
+ READWRITE(masterKeyID);
+ }
+
+ void SetNull()
+ {
+ nVersion = CHDChain::CURRENT_VERSION;
+ nExternalChainCounter = 0;
+ masterKeyID.SetNull();
+ }
+};
+
class CKeyMetadata
{
public:
@@ -134,6 +163,9 @@ public:
static bool Recover(CDBEnv& dbenv, const std::string& filename, bool fOnlyKeys);
static bool Recover(CDBEnv& dbenv, const std::string& filename);
+ //! write the hdchain model (external chain child index counter)
+ bool WriteHDChain(const CHDChain& chain);
+
private:
CWalletDB(const CWalletDB&);
void operator=(const CWalletDB&);