aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--.travis.yml4
-rw-r--r--README.md2
-rw-r--r--build-aux/m4/ax_pthread.m4550
-rw-r--r--build-aux/m4/bitcoin_qt.m44
-rw-r--r--configure.ac2
-rw-r--r--contrib/devtools/split-debug.sh.in10
-rw-r--r--contrib/gitian-descriptors/gitian-linux.yml51
-rw-r--r--contrib/gitian-descriptors/gitian-osx.yml10
-rw-r--r--contrib/gitian-descriptors/gitian-win.yml6
-rw-r--r--contrib/verify-commits/README.md26
-rw-r--r--contrib/verify-commits/allow-revsig-commits2
-rwxr-xr-xcontrib/verify-commits/gpg.sh10
-rw-r--r--contrib/verify-commits/trusted-git-root2
-rw-r--r--contrib/verify-commits/trusted-keys4
-rwxr-xr-xcontrib/verify-commits/verify-commits.sh19
-rw-r--r--depends/Makefile10
-rw-r--r--depends/hosts/darwin.mk4
-rw-r--r--depends/packages/bdb.mk3
-rw-r--r--depends/packages/native_cctools.mk20
-rw-r--r--depends/packages/openssl.mk39
-rw-r--r--depends/packages/packages.mk4
-rw-r--r--depends/packages/qt.mk88
-rw-r--r--depends/patches/qt/fix-xcb-include-order.patch28
-rw-r--r--depends/patches/qt/fix_qt_pkgconfig.patch11
-rw-r--r--doc/bips.md1
-rw-r--r--doc/gitian-building.md12
-rw-r--r--doc/release-notes.md23
-rw-r--r--doc/tor.md18
-rwxr-xr-xqa/rpc-tests/fundrawtransaction.py18
-rwxr-xr-xqa/rpc-tests/p2p-fullblocktest.py1105
-rwxr-xr-xqa/rpc-tests/rawtransactions.py14
-rw-r--r--qa/rpc-tests/test_framework/authproxy.py16
-rw-r--r--qa/rpc-tests/test_framework/blockstore.py59
-rw-r--r--qa/rpc-tests/test_framework/blocktools.py23
-rwxr-xr-xqa/rpc-tests/test_framework/mininode.py14
-rw-r--r--qa/rpc-tests/test_framework/util.py20
-rwxr-xr-xqa/rpc-tests/wallet.py21
-rw-r--r--src/addrman.cpp20
-rw-r--r--src/addrman.h11
-rw-r--r--src/chainparams.cpp8
-rw-r--r--src/chainparams.h4
-rw-r--r--src/init.cpp15
-rw-r--r--src/main.cpp41
-rw-r--r--src/main.h2
-rw-r--r--src/miner.cpp643
-rw-r--r--src/miner.h174
-rw-r--r--src/net.cpp222
-rw-r--r--src/net.h24
-rw-r--r--src/netbase.cpp8
-rw-r--r--src/protocol.cpp4
-rw-r--r--src/protocol.h12
-rw-r--r--src/qt/bitcoin.cpp3
-rw-r--r--src/qt/forms/receiverequestdialog.ui2
-rw-r--r--src/qt/guiconstants.h2
-rw-r--r--src/qt/receiverequestdialog.cpp22
-rw-r--r--src/qt/receiverequestdialog.h1
-rw-r--r--src/rpc/mining.cpp4
-rw-r--r--src/rpc/net.cpp95
-rw-r--r--src/rpc/rawtransaction.cpp9
-rw-r--r--src/test/DoS_tests.cpp8
-rw-r--r--src/test/addrman_tests.cpp92
-rw-r--r--src/test/miner_tests.cpp137
-rw-r--r--src/test/net_tests.cpp8
-rw-r--r--src/test/test_bitcoin.cpp2
-rw-r--r--src/univalue/Makefile.am9
-rw-r--r--src/univalue/configure.ac6
-rw-r--r--src/univalue/lib/univalue_read.cpp37
-rw-r--r--src/univalue/lib/univalue_utffilter.h119
-rw-r--r--src/univalue/lib/univalue_write.cpp11
-rw-r--r--src/univalue/test/fail38.json1
-rw-r--r--src/univalue/test/fail39.json1
-rw-r--r--src/univalue/test/fail40.json1
-rw-r--r--src/univalue/test/fail41.json1
-rw-r--r--src/univalue/test/round2.json1
-rw-r--r--src/univalue/test/unitester.cpp31
-rw-r--r--src/wallet/rpcdump.cpp7
-rw-r--r--src/wallet/test/wallet_tests.cpp46
-rw-r--r--src/wallet/wallet.cpp97
-rw-r--r--src/wallet/wallet.h12
-rw-r--r--src/wallet/walletdb.cpp17
-rw-r--r--src/wallet/walletdb.h32
82 files changed, 3198 insertions, 1058 deletions
diff --git a/.gitignore b/.gitignore
index a8722aa593..ce40019dc3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -114,3 +114,4 @@ share/BitcoindComparisonTool.jar
/doc/doxygen/
libbitcoinconsensus.pc
+contrib/devtools/split-debug.sh
diff --git a/.travis.yml b/.travis.yml
index 64227ac2a8..af9c476dc1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -22,7 +22,7 @@ env:
- CCACHE_TEMPDIR=/tmp/.ccache-temp
- CCACHE_COMPRESS=1
- BASE_OUTDIR=$TRAVIS_BUILD_DIR/out
- - SDK_URL=https://bitcoincore.org/depends-sources/sdks
+ - SDK_URL=https://bitcoin.jonasschnelli.ch/sdks
- PYTHON_DEBUG=1
- WINEDEBUG=fixme-all
matrix:
@@ -39,7 +39,7 @@ env:
# No wallet
- HOST=x86_64-unknown-linux-gnu PACKAGES=" openjdk-7-jre-headless python3" DEP_OPTS="NO_WALLET=1" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports"
# Cross-Mac
- - HOST=x86_64-apple-darwin11 PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python-dev" BITCOIN_CONFIG="--enable-reduce-exports" OSX_SDK=10.9 GOAL="deploy"
+ - HOST=x86_64-apple-darwin11 PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python-dev" BITCOIN_CONFIG="--enable-reduce-exports" OSX_SDK=10.11 GOAL="deploy"
before_install:
- export PATH=$(echo $PATH | tr ':' "\n" | sed '/\/opt\/python/d' | tr "\n" ":" | sed "s|::|:|g")
diff --git a/README.md b/README.md
index 8e816e7a43..3c41649c1b 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,7 @@ https://bitcoincore.org
What is Bitcoin?
----------------
-Bitcoin is an experimental new digital currency that enables instant payments to
+Bitcoin is an experimental digital currency that enables instant payments to
anyone, anywhere in the world. Bitcoin uses peer-to-peer technology to operate
with no central authority: managing transactions and issuing money are carried
out collectively by the network. Bitcoin Core is the name of open source
diff --git a/build-aux/m4/ax_pthread.m4 b/build-aux/m4/ax_pthread.m4
index d218d1af73..4c4051ea37 100644
--- a/build-aux/m4/ax_pthread.m4
+++ b/build-aux/m4/ax_pthread.m4
@@ -82,7 +82,7 @@
# modified version of the Autoconf Macro, you may extend this special
# exception to the GPL to apply to your modified version as well.
-#serial 22
+#serial 23
AU_ALIAS([ACX_PTHREAD], [AX_PTHREAD])
AC_DEFUN([AX_PTHREAD], [
@@ -100,22 +100,22 @@ ax_pthread_ok=no
# etcetera environment variables, and if threads linking works using
# them:
if test "x$PTHREAD_CFLAGS$PTHREAD_LIBS" != "x"; then
- ax_pthread_save_CC="$CC"
- ax_pthread_save_CFLAGS="$CFLAGS"
- ax_pthread_save_LIBS="$LIBS"
- AS_IF([test "x$PTHREAD_CC" != "x"], [CC="$PTHREAD_CC"])
- CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
- LIBS="$PTHREAD_LIBS $LIBS"
- AC_MSG_CHECKING([for pthread_join using $CC $PTHREAD_CFLAGS $PTHREAD_LIBS])
- AC_LINK_IFELSE([AC_LANG_CALL([], [pthread_join])], [ax_pthread_ok=yes])
- AC_MSG_RESULT([$ax_pthread_ok])
- if test "x$ax_pthread_ok" = "xno"; then
- PTHREAD_LIBS=""
- PTHREAD_CFLAGS=""
- fi
- CC="$ax_pthread_save_CC"
- CFLAGS="$ax_pthread_save_CFLAGS"
- LIBS="$ax_pthread_save_LIBS"
+ ax_pthread_save_CC="$CC"
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
+ AS_IF([test "x$PTHREAD_CC" != "x"], [CC="$PTHREAD_CC"])
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ AC_MSG_CHECKING([for pthread_join using $CC $PTHREAD_CFLAGS $PTHREAD_LIBS])
+ AC_LINK_IFELSE([AC_LANG_CALL([], [pthread_join])], [ax_pthread_ok=yes])
+ AC_MSG_RESULT([$ax_pthread_ok])
+ if test "x$ax_pthread_ok" = "xno"; then
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
+ fi
+ CC="$ax_pthread_save_CC"
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
fi
# We must check for the threads library under a number of different
@@ -152,50 +152,50 @@ ax_pthread_flags="pthreads none -Kthread -pthread -pthreads -mthreads pthread --
case $host_os in
- freebsd*)
+ freebsd*)
- # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
- # lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
+ # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
+ # lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
- ax_pthread_flags="-kthread lthread $ax_pthread_flags"
- ;;
+ ax_pthread_flags="-kthread lthread $ax_pthread_flags"
+ ;;
- hpux*)
+ hpux*)
- # From the cc(1) man page: "[-mt] Sets various -D flags to enable
- # multi-threading and also sets -lpthread."
+ # From the cc(1) man page: "[-mt] Sets various -D flags to enable
+ # multi-threading and also sets -lpthread."
- ax_pthread_flags="-mt -pthread pthread $ax_pthread_flags"
- ;;
+ ax_pthread_flags="-mt -pthread pthread $ax_pthread_flags"
+ ;;
- openedition*)
+ openedition*)
- # IBM z/OS requires a feature-test macro to be defined in order to
- # enable POSIX threads at all, so give the user a hint if this is
- # not set. (We don't define these ourselves, as they can affect
- # other portions of the system API in unpredictable ways.)
+ # IBM z/OS requires a feature-test macro to be defined in order to
+ # enable POSIX threads at all, so give the user a hint if this is
+ # not set. (We don't define these ourselves, as they can affect
+ # other portions of the system API in unpredictable ways.)
- AC_EGREP_CPP([AX_PTHREAD_ZOS_MISSING],
- [
-# if !defined(_OPEN_THREADS) && !defined(_UNIX03_THREADS)
- AX_PTHREAD_ZOS_MISSING
-# endif
- ],
- [AC_MSG_WARN([IBM z/OS requires -D_OPEN_THREADS or -D_UNIX03_THREADS to enable pthreads support.])])
- ;;
+ AC_EGREP_CPP([AX_PTHREAD_ZOS_MISSING],
+ [
+# if !defined(_OPEN_THREADS) && !defined(_UNIX03_THREADS)
+ AX_PTHREAD_ZOS_MISSING
+# endif
+ ],
+ [AC_MSG_WARN([IBM z/OS requires -D_OPEN_THREADS or -D_UNIX03_THREADS to enable pthreads support.])])
+ ;;
- solaris*)
+ solaris*)
- # On Solaris (at least, for some versions), libc contains stubbed
- # (non-functional) versions of the pthreads routines, so link-based
- # tests will erroneously succeed. (N.B.: The stubs are missing
- # pthread_cleanup_push, or rather a function called by this macro,
- # so we could check for that, but who knows whether they'll stub
- # that too in a future libc.) So we'll check first for the
- # standard Solaris way of linking pthreads (-mt -lpthread).
+ # On Solaris (at least, for some versions), libc contains stubbed
+ # (non-functional) versions of the pthreads routines, so link-based
+ # tests will erroneously succeed. (N.B.: The stubs are missing
+ # pthread_cleanup_push, or rather a function called by this macro,
+ # so we could check for that, but who knows whether they'll stub
+ # that too in a future libc.) So we'll check first for the
+ # standard Solaris way of linking pthreads (-mt -lpthread).
- ax_pthread_flags="-mt,pthread pthread $ax_pthread_flags"
- ;;
+ ax_pthread_flags="-mt,pthread pthread $ax_pthread_flags"
+ ;;
esac
# GCC generally uses -pthread, or -pthreads on some platforms (e.g. SPARC)
@@ -208,17 +208,17 @@ AS_IF([test "x$GCC" = "xyes"],
# correctly enabled
case $host_os in
- darwin* | hpux* | linux* | osf* | solaris*)
- ax_pthread_check_macro="_REENTRANT"
- ;;
+ darwin* | hpux* | linux* | osf* | solaris*)
+ ax_pthread_check_macro="_REENTRANT"
+ ;;
- aix* | freebsd*)
- ax_pthread_check_macro="_THREAD_SAFE"
- ;;
+ aix*)
+ ax_pthread_check_macro="_THREAD_SAFE"
+ ;;
- *)
- ax_pthread_check_macro="--"
- ;;
+ *)
+ ax_pthread_check_macro="--"
+ ;;
esac
AS_IF([test "x$ax_pthread_check_macro" = "x--"],
[ax_pthread_check_cond=0],
@@ -231,13 +231,13 @@ AC_CACHE_CHECK([whether $CC is Clang],
[ax_cv_PTHREAD_CLANG=no
# Note that Autoconf sets GCC=yes for Clang as well as GCC
if test "x$GCC" = "xyes"; then
- AC_EGREP_CPP([AX_PTHREAD_CC_IS_CLANG],
- [/* Note: Clang 2.7 lacks __clang_[a-z]+__ */
-# if defined(__clang__) && defined(__llvm__)
- AX_PTHREAD_CC_IS_CLANG
-# endif
- ],
- [ax_cv_PTHREAD_CLANG=yes])
+ AC_EGREP_CPP([AX_PTHREAD_CC_IS_CLANG],
+ [/* Note: Clang 2.7 lacks __clang_[a-z]+__ */
+# if defined(__clang__) && defined(__llvm__)
+ AX_PTHREAD_CC_IS_CLANG
+# endif
+ ],
+ [ax_cv_PTHREAD_CLANG=yes])
fi
])
ax_pthread_clang="$ax_cv_PTHREAD_CLANG"
@@ -249,222 +249,222 @@ ax_pthread_clang_warning=no
if test "x$ax_pthread_clang" = "xyes"; then
- # Clang takes -pthread; it has never supported any other flag
-
- # (Note 1: This will need to be revisited if a system that Clang
- # supports has POSIX threads in a separate library. This tends not
- # to be the way of modern systems, but it's conceivable.)
-
- # (Note 2: On some systems, notably Darwin, -pthread is not needed
- # to get POSIX threads support; the API is always present and
- # active. We could reasonably leave PTHREAD_CFLAGS empty. But
- # -pthread does define _REENTRANT, and while the Darwin headers
- # ignore this macro, third-party headers might not.)
-
- PTHREAD_CFLAGS="-pthread"
- PTHREAD_LIBS=
-
- ax_pthread_ok=yes
-
- # However, older versions of Clang make a point of warning the user
- # that, in an invocation where only linking and no compilation is
- # taking place, the -pthread option has no effect ("argument unused
- # during compilation"). They expect -pthread to be passed in only
- # when source code is being compiled.
- #
- # Problem is, this is at odds with the way Automake and most other
- # C build frameworks function, which is that the same flags used in
- # compilation (CFLAGS) are also used in linking. Many systems
- # supported by AX_PTHREAD require exactly this for POSIX threads
- # support, and in fact it is often not straightforward to specify a
- # flag that is used only in the compilation phase and not in
- # linking. Such a scenario is extremely rare in practice.
- #
- # Even though use of the -pthread flag in linking would only print
- # a warning, this can be a nuisance for well-run software projects
- # that build with -Werror. So if the active version of Clang has
- # this misfeature, we search for an option to squash it.
-
- AC_CACHE_CHECK([whether Clang needs flag to prevent "argument unused" warning when linking with -pthread],
- [ax_cv_PTHREAD_CLANG_NO_WARN_FLAG],
- [ax_cv_PTHREAD_CLANG_NO_WARN_FLAG=unknown
- # Create an alternate version of $ac_link that compiles and
- # links in two steps (.c -> .o, .o -> exe) instead of one
- # (.c -> exe), because the warning occurs only in the second
- # step
- ax_pthread_save_ac_link="$ac_link"
- ax_pthread_sed='s/conftest\.\$ac_ext/conftest.$ac_objext/g'
- ax_pthread_link_step=`$as_echo "$ac_link" | sed "$ax_pthread_sed"`
- ax_pthread_2step_ac_link="($ac_compile) && (echo ==== >&5) && ($ax_pthread_link_step)"
- ax_pthread_save_CFLAGS="$CFLAGS"
- for ax_pthread_try in '' -Qunused-arguments -Wno-unused-command-line-argument unknown; do
- AS_IF([test "x$ax_pthread_try" = "xunknown"], [break])
- CFLAGS="-Werror -Wunknown-warning-option $ax_pthread_try -pthread $ax_pthread_save_CFLAGS"
- ac_link="$ax_pthread_save_ac_link"
- AC_LINK_IFELSE([AC_LANG_SOURCE([[int main(void){return 0;}]])],
- [ac_link="$ax_pthread_2step_ac_link"
- AC_LINK_IFELSE([AC_LANG_SOURCE([[int main(void){return 0;}]])],
- [break])
- ])
- done
- ac_link="$ax_pthread_save_ac_link"
- CFLAGS="$ax_pthread_save_CFLAGS"
- AS_IF([test "x$ax_pthread_try" = "x"], [ax_pthread_try=no])
- ax_cv_PTHREAD_CLANG_NO_WARN_FLAG="$ax_pthread_try"
- ])
-
- case "$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG" in
- no | unknown) ;;
- *) PTHREAD_CFLAGS="$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG $PTHREAD_CFLAGS" ;;
- esac
+ # Clang takes -pthread; it has never supported any other flag
+
+ # (Note 1: This will need to be revisited if a system that Clang
+ # supports has POSIX threads in a separate library. This tends not
+ # to be the way of modern systems, but it's conceivable.)
+
+ # (Note 2: On some systems, notably Darwin, -pthread is not needed
+ # to get POSIX threads support; the API is always present and
+ # active. We could reasonably leave PTHREAD_CFLAGS empty. But
+ # -pthread does define _REENTRANT, and while the Darwin headers
+ # ignore this macro, third-party headers might not.)
+
+ PTHREAD_CFLAGS="-pthread"
+ PTHREAD_LIBS=
+
+ ax_pthread_ok=yes
+
+ # However, older versions of Clang make a point of warning the user
+ # that, in an invocation where only linking and no compilation is
+ # taking place, the -pthread option has no effect ("argument unused
+ # during compilation"). They expect -pthread to be passed in only
+ # when source code is being compiled.
+ #
+ # Problem is, this is at odds with the way Automake and most other
+ # C build frameworks function, which is that the same flags used in
+ # compilation (CFLAGS) are also used in linking. Many systems
+ # supported by AX_PTHREAD require exactly this for POSIX threads
+ # support, and in fact it is often not straightforward to specify a
+ # flag that is used only in the compilation phase and not in
+ # linking. Such a scenario is extremely rare in practice.
+ #
+ # Even though use of the -pthread flag in linking would only print
+ # a warning, this can be a nuisance for well-run software projects
+ # that build with -Werror. So if the active version of Clang has
+ # this misfeature, we search for an option to squash it.
+
+ AC_CACHE_CHECK([whether Clang needs flag to prevent "argument unused" warning when linking with -pthread],
+ [ax_cv_PTHREAD_CLANG_NO_WARN_FLAG],
+ [ax_cv_PTHREAD_CLANG_NO_WARN_FLAG=unknown
+ # Create an alternate version of $ac_link that compiles and
+ # links in two steps (.c -> .o, .o -> exe) instead of one
+ # (.c -> exe), because the warning occurs only in the second
+ # step
+ ax_pthread_save_ac_link="$ac_link"
+ ax_pthread_sed='s/conftest\.\$ac_ext/conftest.$ac_objext/g'
+ ax_pthread_link_step=`$as_echo "$ac_link" | sed "$ax_pthread_sed"`
+ ax_pthread_2step_ac_link="($ac_compile) && (echo ==== >&5) && ($ax_pthread_link_step)"
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ for ax_pthread_try in '' -Qunused-arguments -Wno-unused-command-line-argument unknown; do
+ AS_IF([test "x$ax_pthread_try" = "xunknown"], [break])
+ CFLAGS="-Werror -Wunknown-warning-option $ax_pthread_try -pthread $ax_pthread_save_CFLAGS"
+ ac_link="$ax_pthread_save_ac_link"
+ AC_LINK_IFELSE([AC_LANG_SOURCE([[int main(void){return 0;}]])],
+ [ac_link="$ax_pthread_2step_ac_link"
+ AC_LINK_IFELSE([AC_LANG_SOURCE([[int main(void){return 0;}]])],
+ [break])
+ ])
+ done
+ ac_link="$ax_pthread_save_ac_link"
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ AS_IF([test "x$ax_pthread_try" = "x"], [ax_pthread_try=no])
+ ax_cv_PTHREAD_CLANG_NO_WARN_FLAG="$ax_pthread_try"
+ ])
+
+ case "$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG" in
+ no | unknown) ;;
+ *) PTHREAD_CFLAGS="$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG $PTHREAD_CFLAGS" ;;
+ esac
fi # $ax_pthread_clang = yes
if test "x$ax_pthread_ok" = "xno"; then
for ax_pthread_try_flag in $ax_pthread_flags; do
- case $ax_pthread_try_flag in
- none)
- AC_MSG_CHECKING([whether pthreads work without any flags])
- ;;
-
- -mt,pthread)
- AC_MSG_CHECKING([whether pthreads work with -mt -lpthread])
- PTHREAD_CFLAGS="-mt"
- PTHREAD_LIBS="-lpthread"
- ;;
-
- -*)
- AC_MSG_CHECKING([whether pthreads work with $ax_pthread_try_flag])
- PTHREAD_CFLAGS="$ax_pthread_try_flag"
- ;;
-
- pthread-config)
- AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no])
- AS_IF([test "x$ax_pthread_config" = "xno"], [continue])
- PTHREAD_CFLAGS="`pthread-config --cflags`"
- PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
- ;;
-
- *)
- AC_MSG_CHECKING([for the pthreads library -l$ax_pthread_try_flag])
- PTHREAD_LIBS="-l$ax_pthread_try_flag"
- ;;
- esac
-
- ax_pthread_save_CFLAGS="$CFLAGS"
- ax_pthread_save_LIBS="$LIBS"
- CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
- LIBS="$PTHREAD_LIBS $LIBS"
-
- # Check for various functions. We must include pthread.h,
- # since some functions may be macros. (On the Sequent, we
- # need a special flag -Kthread to make this header compile.)
- # We check for pthread_join because it is in -lpthread on IRIX
- # while pthread_create is in libc. We check for pthread_attr_init
- # due to DEC craziness with -lpthreads. We check for
- # pthread_cleanup_push because it is one of the few pthread
- # functions on Solaris that doesn't have a non-functional libc stub.
- # We try pthread_create on general principles.
-
- AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>
-# if $ax_pthread_check_cond
-# error "$ax_pthread_check_macro must be defined"
-# endif
- static void routine(void *a) { a = 0; }
- static void *start_routine(void *a) { return a; }],
- [pthread_t th; pthread_attr_t attr;
- pthread_create(&th, 0, start_routine, 0);
- pthread_join(th, 0);
- pthread_attr_init(&attr);
- pthread_cleanup_push(routine, 0);
- pthread_cleanup_pop(0) /* ; */])],
- [ax_pthread_ok=yes],
- [])
-
- CFLAGS="$ax_pthread_save_CFLAGS"
- LIBS="$ax_pthread_save_LIBS"
-
- AC_MSG_RESULT([$ax_pthread_ok])
- AS_IF([test "x$ax_pthread_ok" = "xyes"], [break])
-
- PTHREAD_LIBS=""
- PTHREAD_CFLAGS=""
+ case $ax_pthread_try_flag in
+ none)
+ AC_MSG_CHECKING([whether pthreads work without any flags])
+ ;;
+
+ -mt,pthread)
+ AC_MSG_CHECKING([whether pthreads work with -mt -lpthread])
+ PTHREAD_CFLAGS="-mt"
+ PTHREAD_LIBS="-lpthread"
+ ;;
+
+ -*)
+ AC_MSG_CHECKING([whether pthreads work with $ax_pthread_try_flag])
+ PTHREAD_CFLAGS="$ax_pthread_try_flag"
+ ;;
+
+ pthread-config)
+ AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no])
+ AS_IF([test "x$ax_pthread_config" = "xno"], [continue])
+ PTHREAD_CFLAGS="`pthread-config --cflags`"
+ PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
+ ;;
+
+ *)
+ AC_MSG_CHECKING([for the pthreads library -l$ax_pthread_try_flag])
+ PTHREAD_LIBS="-l$ax_pthread_try_flag"
+ ;;
+ esac
+
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+
+ # Check for various functions. We must include pthread.h,
+ # since some functions may be macros. (On the Sequent, we
+ # need a special flag -Kthread to make this header compile.)
+ # We check for pthread_join because it is in -lpthread on IRIX
+ # while pthread_create is in libc. We check for pthread_attr_init
+ # due to DEC craziness with -lpthreads. We check for
+ # pthread_cleanup_push because it is one of the few pthread
+ # functions on Solaris that doesn't have a non-functional libc stub.
+ # We try pthread_create on general principles.
+
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>
+# if $ax_pthread_check_cond
+# error "$ax_pthread_check_macro must be defined"
+# endif
+ static void routine(void *a) { a = 0; }
+ static void *start_routine(void *a) { return a; }],
+ [pthread_t th; pthread_attr_t attr;
+ pthread_create(&th, 0, start_routine, 0);
+ pthread_join(th, 0);
+ pthread_attr_init(&attr);
+ pthread_cleanup_push(routine, 0);
+ pthread_cleanup_pop(0) /* ; */])],
+ [ax_pthread_ok=yes],
+ [])
+
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
+
+ AC_MSG_RESULT([$ax_pthread_ok])
+ AS_IF([test "x$ax_pthread_ok" = "xyes"], [break])
+
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
done
fi
# Various other checks:
if test "x$ax_pthread_ok" = "xyes"; then
- ax_pthread_save_CFLAGS="$CFLAGS"
- ax_pthread_save_LIBS="$LIBS"
- CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
- LIBS="$PTHREAD_LIBS $LIBS"
-
- # Detect AIX lossage: JOINABLE attribute is called UNDETACHED.
- AC_CACHE_CHECK([for joinable pthread attribute],
- [ax_cv_PTHREAD_JOINABLE_ATTR],
- [ax_cv_PTHREAD_JOINABLE_ATTR=unknown
- for ax_pthread_attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
- AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>],
- [int attr = $ax_pthread_attr; return attr /* ; */])],
- [ax_cv_PTHREAD_JOINABLE_ATTR=$ax_pthread_attr; break],
- [])
- done
- ])
- AS_IF([test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xunknown" && \
- test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xPTHREAD_CREATE_JOINABLE" && \
- test "x$ax_pthread_joinable_attr_defined" != "xyes"],
- [AC_DEFINE_UNQUOTED([PTHREAD_CREATE_JOINABLE],
- [$ax_cv_PTHREAD_JOINABLE_ATTR],
- [Define to necessary symbol if this constant
- uses a non-standard name on your system.])
- ax_pthread_joinable_attr_defined=yes
- ])
-
- AC_CACHE_CHECK([whether more special flags are required for pthreads],
- [ax_cv_PTHREAD_SPECIAL_FLAGS],
- [ax_cv_PTHREAD_SPECIAL_FLAGS=no
- case $host_os in
- solaris*)
- ax_cv_PTHREAD_SPECIAL_FLAGS="-D_POSIX_PTHREAD_SEMANTICS"
- ;;
- esac
- ])
- AS_IF([test "x$ax_cv_PTHREAD_SPECIAL_FLAGS" != "xno" && \
- test "x$ax_pthread_special_flags_added" != "xyes"],
- [PTHREAD_CFLAGS="$ax_cv_PTHREAD_SPECIAL_FLAGS $PTHREAD_CFLAGS"
- ax_pthread_special_flags_added=yes])
-
- AC_CACHE_CHECK([for PTHREAD_PRIO_INHERIT],
- [ax_cv_PTHREAD_PRIO_INHERIT],
- [AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <pthread.h>]],
- [[int i = PTHREAD_PRIO_INHERIT;]])],
- [ax_cv_PTHREAD_PRIO_INHERIT=yes],
- [ax_cv_PTHREAD_PRIO_INHERIT=no])
- ])
- AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes" && \
- test "x$ax_pthread_prio_inherit_defined" != "xyes"],
- [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.])
- ax_pthread_prio_inherit_defined=yes
- ])
-
- CFLAGS="$ax_pthread_save_CFLAGS"
- LIBS="$ax_pthread_save_LIBS"
-
- # More AIX lossage: compile with *_r variant
- if test "x$GCC" != "xyes"; then
- case $host_os in
- aix*)
- AS_CASE(["x/$CC"],
- [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6],
- [#handle absolute path differently from PATH based program lookup
- AS_CASE(["x$CC"],
- [x/*],
- [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])],
- [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])])
- ;;
- esac
- fi
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+
+ # Detect AIX lossage: JOINABLE attribute is called UNDETACHED.
+ AC_CACHE_CHECK([for joinable pthread attribute],
+ [ax_cv_PTHREAD_JOINABLE_ATTR],
+ [ax_cv_PTHREAD_JOINABLE_ATTR=unknown
+ for ax_pthread_attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>],
+ [int attr = $ax_pthread_attr; return attr /* ; */])],
+ [ax_cv_PTHREAD_JOINABLE_ATTR=$ax_pthread_attr; break],
+ [])
+ done
+ ])
+ AS_IF([test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xunknown" && \
+ test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xPTHREAD_CREATE_JOINABLE" && \
+ test "x$ax_pthread_joinable_attr_defined" != "xyes"],
+ [AC_DEFINE_UNQUOTED([PTHREAD_CREATE_JOINABLE],
+ [$ax_cv_PTHREAD_JOINABLE_ATTR],
+ [Define to necessary symbol if this constant
+ uses a non-standard name on your system.])
+ ax_pthread_joinable_attr_defined=yes
+ ])
+
+ AC_CACHE_CHECK([whether more special flags are required for pthreads],
+ [ax_cv_PTHREAD_SPECIAL_FLAGS],
+ [ax_cv_PTHREAD_SPECIAL_FLAGS=no
+ case $host_os in
+ solaris*)
+ ax_cv_PTHREAD_SPECIAL_FLAGS="-D_POSIX_PTHREAD_SEMANTICS"
+ ;;
+ esac
+ ])
+ AS_IF([test "x$ax_cv_PTHREAD_SPECIAL_FLAGS" != "xno" && \
+ test "x$ax_pthread_special_flags_added" != "xyes"],
+ [PTHREAD_CFLAGS="$ax_cv_PTHREAD_SPECIAL_FLAGS $PTHREAD_CFLAGS"
+ ax_pthread_special_flags_added=yes])
+
+ AC_CACHE_CHECK([for PTHREAD_PRIO_INHERIT],
+ [ax_cv_PTHREAD_PRIO_INHERIT],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <pthread.h>]],
+ [[int i = PTHREAD_PRIO_INHERIT;]])],
+ [ax_cv_PTHREAD_PRIO_INHERIT=yes],
+ [ax_cv_PTHREAD_PRIO_INHERIT=no])
+ ])
+ AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes" && \
+ test "x$ax_pthread_prio_inherit_defined" != "xyes"],
+ [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.])
+ ax_pthread_prio_inherit_defined=yes
+ ])
+
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
+
+ # More AIX lossage: compile with *_r variant
+ if test "x$GCC" != "xyes"; then
+ case $host_os in
+ aix*)
+ AS_CASE(["x/$CC"],
+ [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6],
+ [#handle absolute path differently from PATH based program lookup
+ AS_CASE(["x$CC"],
+ [x/*],
+ [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])],
+ [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])])
+ ;;
+ esac
+ fi
fi
test -n "$PTHREAD_CC" || PTHREAD_CC="$CC"
@@ -475,11 +475,11 @@ AC_SUBST([PTHREAD_CC])
# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
if test "x$ax_pthread_ok" = "xyes"; then
- ifelse([$1],,[AC_DEFINE([HAVE_PTHREAD],[1],[Define if you have POSIX threads libraries and header files.])],[$1])
- :
+ ifelse([$1],,[AC_DEFINE([HAVE_PTHREAD],[1],[Define if you have POSIX threads libraries and header files.])],[$1])
+ :
else
- ax_pthread_ok=no
- $2
+ ax_pthread_ok=no
+ $2
fi
AC_LANG_POP
])dnl AX_PTHREAD
diff --git a/build-aux/m4/bitcoin_qt.m4 b/build-aux/m4/bitcoin_qt.m4
index efffa4887d..74d9102674 100644
--- a/build-aux/m4/bitcoin_qt.m4
+++ b/build-aux/m4/bitcoin_qt.m4
@@ -342,6 +342,10 @@ AC_DEFUN([_BITCOIN_QT_FIND_STATIC_PLUGINS],[
elif test x$TARGET_OS = xdarwin; then
PKG_CHECK_MODULES([QTPRINT], [Qt5PrintSupport], [QT_LIBS="$QTPRINT_LIBS $QT_LIBS"])
fi
+ else
+ if ${PKG_CONFIG} --exists "Qt5Core >= 5.6" 2>/dev/null; then
+ QT_LIBS="-lQt5PlatformSupport $QT_LIBS"
+ fi
fi
])
else
diff --git a/configure.ac b/configure.ac
index 7f9ff20cd7..97af58bd7c 100644
--- a/configure.ac
+++ b/configure.ac
@@ -75,6 +75,7 @@ AC_PATH_PROG(XGETTEXT,xgettext)
AC_PATH_PROG(HEXDUMP,hexdump)
AC_PATH_TOOL(READELF, readelf)
AC_PATH_TOOL(CPPFILT, c++filt)
+AC_PATH_TOOL(OBJCOPY, objcopy)
AC_ARG_VAR(PYTHONPATH, Augments the default search path for python module files)
@@ -1060,6 +1061,7 @@ AC_SUBST(MINIUPNPC_LIBS)
AC_CONFIG_FILES([Makefile src/Makefile share/setup.nsi share/qt/Info.plist src/test/buildenv.py])
AC_CONFIG_FILES([qa/pull-tester/run-bitcoind-for-test.sh],[chmod +x qa/pull-tester/run-bitcoind-for-test.sh])
AC_CONFIG_FILES([qa/pull-tester/tests_config.py],[chmod +x qa/pull-tester/tests_config.py])
+AC_CONFIG_FILES([contrib/devtools/split-debug.sh],[chmod +x contrib/devtools/split-debug.sh])
AC_CONFIG_LINKS([qa/pull-tester/rpc-tests.py:qa/pull-tester/rpc-tests.py])
dnl boost's m4 checks do something really nasty: they export these vars. As a
diff --git a/contrib/devtools/split-debug.sh.in b/contrib/devtools/split-debug.sh.in
new file mode 100644
index 0000000000..deda49cc54
--- /dev/null
+++ b/contrib/devtools/split-debug.sh.in
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+if [ $# -ne 3 ];
+ then echo "usage: $0 <input> <stripped-binary> <debug-binary>"
+fi
+
+@OBJCOPY@ --enable-deterministic-archives -p --only-keep-debug $1 $3
+@OBJCOPY@ --enable-deterministic-archives -p --strip-debug $1 $2
+@STRIP@ --enable-deterministic-archives -p -s $2
+@OBJCOPY@ --enable-deterministic-archives -p --add-gnu-debuglink=$3 $2
diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml
index cd289b2f6e..a2788c9d76 100644
--- a/contrib/gitian-descriptors/gitian-linux.yml
+++ b/contrib/gitian-descriptors/gitian-linux.yml
@@ -7,7 +7,17 @@ architectures:
- "amd64"
packages:
- "curl"
-- "g++-multilib"
+- "g++-aarch64-linux-gnu"
+- "g++-4.8-aarch64-linux-gnu"
+- "gcc-4.8-aarch64-linux-gnu"
+- "binutils-aarch64-linux-gnu"
+- "g++-arm-linux-gnueabihf"
+- "g++-4.8-arm-linux-gnueabihf"
+- "gcc-4.8-arm-linux-gnueabihf"
+- "binutils-arm-linux-gnueabihf"
+- "g++-4.8-multilib"
+- "gcc-4.8-multilib"
+- "binutils-gold"
- "git-core"
- "pkg-config"
- "autoconf"
@@ -15,7 +25,6 @@ packages:
- "automake"
- "faketime"
- "bsdmainutils"
-- "binutils-gold"
- "ca-certificates"
- "python"
remotes:
@@ -23,11 +32,18 @@ remotes:
"dir": "bitcoin"
files: []
script: |
+
+ #unlock sudo
+ echo "ubuntu" | sudo -S true
+
+ sudo mkdir -p /usr/include/i386-linux-gnu/
+ sudo ln -s /usr/include/x86_64-linux-gnu/asm /usr/include/i386-linux-gnu/asm
+
WRAP_DIR=$HOME/wrapped
- HOSTS="i686-pc-linux-gnu x86_64-unknown-linux-gnu"
+ HOSTS="i686-pc-linux-gnu x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu"
CONFIGFLAGS="--enable-glibc-back-compat --enable-reduce-exports --disable-bench --disable-gui-tests"
FAKETIME_HOST_PROGS=""
- FAKETIME_PROGS="date ar ranlib nm strip objcopy"
+ FAKETIME_PROGS="date ar ranlib nm"
HOST_CFLAGS="-O2 -g"
HOST_CXXFLAGS="-O2 -g"
HOST_LDFLAGS=-static-libstdc++
@@ -68,11 +84,11 @@ script: |
done
}
- export PATH=${WRAP_DIR}:${PATH}
-
# Faketime for depends so intermediate results are comparable
+ export PATH_orig=${PATH}
create_global_faketime_wrappers "2000-01-01 12:00:00"
create_per-host_faketime_wrappers "2000-01-01 12:00:00"
+ export PATH=${WRAP_DIR}:${PATH}
cd bitcoin
BASEPREFIX=`pwd`/depends
@@ -82,8 +98,10 @@ script: |
done
# Faketime for binaries
+ export PATH=${PATH_orig}
create_global_faketime_wrappers "${REFERENCE_DATETIME}"
create_per-host_faketime_wrappers "${REFERENCE_DATETIME}"
+ export PATH=${WRAP_DIR}:${PATH}
# Create the release tarball using (arbitrarily) the first host
./autogen.sh
@@ -111,14 +129,24 @@ script: |
CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}" LDFLAGS="${HOST_LDFLAGS}"
make ${MAKEOPTS}
make ${MAKEOPTS} -C src check-security
- make ${MAKEOPTS} -C src check-symbols
+
+ #TODO: This is a quick hack that disables symbol checking for arm.
+ # Instead, we should investigate why these are popping up.
+ # For aarch64, we'll need to bump up the min GLIBC version, as the abi
+ # support wasn't introduced until 2.17.
+ case $i in
+ aarch64-*) : ;;
+ arm-*) : ;;
+ *) make ${MAKEOPTS} -C src check-symbols ;;
+ esac
+
make install DESTDIR=${INSTALLPATH}
cd installed
find . -name "lib*.la" -delete
find . -name "lib*.a" -delete
rm -rf ${DISTNAME}/lib/pkgconfig
- find ${DISTNAME}/bin -type f -executable -exec objcopy --only-keep-debug {} {}.dbg \; -exec strip -s {} \; -exec objcopy --add-gnu-debuglink={}.dbg {} \;
- find ${DISTNAME}/lib -type f -exec objcopy --only-keep-debug {} {}.dbg \; -exec strip -s {} \; -exec objcopy --add-gnu-debuglink={}.dbg {} \;
+ find ${DISTNAME}/bin -type f -executable -exec ../contrib/devtools/split-debug.sh {} {} {}.dbg \;
+ find ${DISTNAME}/lib -type f -exec ../contrib/devtools/split-debug.sh {} {} {}.dbg \;
find ${DISTNAME} -not -name "*.dbg" | sort | tar --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}.tar.gz
find ${DISTNAME} -name "*.dbg" | sort | tar --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}-debug.tar.gz
cd ../../
@@ -126,8 +154,3 @@ script: |
done
mkdir -p $OUTDIR/src
mv $SOURCEDIST $OUTDIR/src
- mv ${OUTDIR}/${DISTNAME}-x86_64-*-debug.tar.gz ${OUTDIR}/${DISTNAME}-linux64-debug.tar.gz
- mv ${OUTDIR}/${DISTNAME}-i686-*-debug.tar.gz ${OUTDIR}/${DISTNAME}-linux32-debug.tar.gz
- mv ${OUTDIR}/${DISTNAME}-x86_64-*.tar.gz ${OUTDIR}/${DISTNAME}-linux64.tar.gz
- mv ${OUTDIR}/${DISTNAME}-i686-*.tar.gz ${OUTDIR}/${DISTNAME}-linux32.tar.gz
-
diff --git a/contrib/gitian-descriptors/gitian-osx.yml b/contrib/gitian-descriptors/gitian-osx.yml
index 8436cd612a..991976d59e 100644
--- a/contrib/gitian-descriptors/gitian-osx.yml
+++ b/contrib/gitian-descriptors/gitian-osx.yml
@@ -31,7 +31,7 @@ remotes:
- "url": "https://github.com/bitcoin/bitcoin.git"
"dir": "bitcoin"
files:
-- "MacOSX10.9.sdk.tar.gz"
+- "MacOSX10.11.sdk.tar.gz"
script: |
WRAP_DIR=$HOME/wrapped
HOSTS="x86_64-apple-darwin11"
@@ -77,17 +77,17 @@ script: |
done
}
- export PATH=${WRAP_DIR}:${PATH}
-
# Faketime for depends so intermediate results are comparable
+ export PATH_orig=${PATH}
create_global_faketime_wrappers "2000-01-01 12:00:00"
create_per-host_faketime_wrappers "2000-01-01 12:00:00"
+ export PATH=${WRAP_DIR}:${PATH}
cd bitcoin
BASEPREFIX=`pwd`/depends
mkdir -p ${BASEPREFIX}/SDKs
- tar -C ${BASEPREFIX}/SDKs -xf ${BUILD_DIR}/MacOSX10.9.sdk.tar.gz
+ tar -C ${BASEPREFIX}/SDKs -xf ${BUILD_DIR}/MacOSX10.11.sdk.tar.gz
# Build dependencies for each host
for i in $HOSTS; do
@@ -95,8 +95,10 @@ script: |
done
# Faketime for binaries
+ export PATH=${PATH_orig}
create_global_faketime_wrappers "${REFERENCE_DATETIME}"
create_per-host_faketime_wrappers "${REFERENCE_DATETIME}"
+ export PATH=${WRAP_DIR}:${PATH}
# Create the release tarball using (arbitrarily) the first host
./autogen.sh
diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml
index 1d3a876dfb..32b57b3160 100644
--- a/contrib/gitian-descriptors/gitian-win.yml
+++ b/contrib/gitian-descriptors/gitian-win.yml
@@ -94,12 +94,12 @@ script: |
done
}
- export PATH=${WRAP_DIR}:${PATH}
-
# Faketime for depends so intermediate results are comparable
+ export PATH_orig=${PATH}
create_global_faketime_wrappers "2000-01-01 12:00:00"
create_per-host_faketime_wrappers "2000-01-01 12:00:00"
create_per-host_linker_wrapper "2000-01-01 12:00:00"
+ export PATH=${WRAP_DIR}:${PATH}
cd bitcoin
BASEPREFIX=`pwd`/depends
@@ -109,9 +109,11 @@ script: |
done
# Faketime for binaries
+ export PATH=${PATH_orig}
create_global_faketime_wrappers "${REFERENCE_DATETIME}"
create_per-host_faketime_wrappers "${REFERENCE_DATETIME}"
create_per-host_linker_wrapper "${REFERENCE_DATETIME}"
+ export PATH=${WRAP_DIR}:${PATH}
# Create the release tarball using (arbitrarily) the first host
./autogen.sh
diff --git a/contrib/verify-commits/README.md b/contrib/verify-commits/README.md
new file mode 100644
index 0000000000..e9e3f65da2
--- /dev/null
+++ b/contrib/verify-commits/README.md
@@ -0,0 +1,26 @@
+Tooling for verification of PGP signed commits
+----------------------------------------------
+
+This is an incomplete work in progress, but currently includes a pre-push hook
+script (`pre-push-hook.sh`) for maintainers to ensure that their own commits
+are PGP signed (nearly always merge commits), as well as a script to verify
+commits against a trusted keys list.
+
+
+Using verify-commits.sh safely
+------------------------------
+
+Remember that you can't use an untrusted script to verify itself. This means
+that checking out code, then running `verify-commits.sh` against `HEAD` is
+_not_ safe, because the version of `verify-commits.sh` that you just ran could
+be backdoored. Instead, you need to use a trusted version of verify-commits
+prior to checkout to make sure you're checking out only code signed by trusted
+keys:
+
+ git fetch origin && \
+ ./contrib/verify-commits/verify-commits.sh origin/master && \
+ git checkout origin/master
+
+Note that the above isn't a good UI/UX yet, and needs significant improvements
+to make it more convenient and reduce the chance of errors; pull-reqs
+improving this process would be much appreciated.
diff --git a/contrib/verify-commits/allow-revsig-commits b/contrib/verify-commits/allow-revsig-commits
index 31aeb8f3d3..e69de29bb2 100644
--- a/contrib/verify-commits/allow-revsig-commits
+++ b/contrib/verify-commits/allow-revsig-commits
@@ -1,2 +0,0 @@
-586a29253dabec3ca0f1ccba9091daabd16b8411
-eddaba7b5692288087a926da5733e86b47274e4e
diff --git a/contrib/verify-commits/gpg.sh b/contrib/verify-commits/gpg.sh
index 0218b82e11..375d711725 100755
--- a/contrib/verify-commits/gpg.sh
+++ b/contrib/verify-commits/gpg.sh
@@ -1,8 +1,9 @@
#!/bin/sh
-INPUT=$(</dev/stdin)
+INPUT=$(cat /dev/stdin)
VALID=false
REVSIG=false
-IFS=$'\n'
+IFS='
+'
for LINE in $(echo "$INPUT" | gpg --trust-model always "$@" 2>/dev/null); do
case "$LINE" in
"[GNUPG:] VALIDSIG "*)
@@ -13,10 +14,9 @@ for LINE in $(echo "$INPUT" | gpg --trust-model always "$@" 2>/dev/null); do
"[GNUPG:] REVKEYSIG "*)
[ "$BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG" != 1 ] && exit 1
while read KEY; do
- case "$LINE" in "[GNUPG:] REVKEYSIG ${KEY:24:40} "*)
+ case "$LINE" in "[GNUPG:] REVKEYSIG ${KEY#????????????????????????} "*)
REVSIG=true
- GOODREVSIG="[GNUPG:] GOODSIG ${KEY:24:40} "
- ;;
+ GOODREVSIG="[GNUPG:] GOODSIG ${KEY#????????????????????????} "
esac
done < ./contrib/verify-commits/trusted-keys
;;
diff --git a/contrib/verify-commits/trusted-git-root b/contrib/verify-commits/trusted-git-root
index 838b8d1ea8..c60f8ab695 100644
--- a/contrib/verify-commits/trusted-git-root
+++ b/contrib/verify-commits/trusted-git-root
@@ -1 +1 @@
-165e323d851cc87213c7673c6f278e87a6f2e752
+82bcf405f6db1d55b684a1f63a4aabad376cdad7
diff --git a/contrib/verify-commits/trusted-keys b/contrib/verify-commits/trusted-keys
index ad1b28be0c..75242c2a97 100644
--- a/contrib/verify-commits/trusted-keys
+++ b/contrib/verify-commits/trusted-keys
@@ -1,8 +1,4 @@
71A3B16735405025D447E8F274810B012346C9A6
-1F4410F6A89268CE3197A84C57896D2FF8F0B657
-01CDF4627A3B88AAE4A571C87588242FBE38D3A8
-AF8BE07C7049F3A26B239D5325B3083201782B2F
-81291FA67D2C379A006A053FEAB5AF94D9E9ABE7
3F1888C6DCA92A6499C4911FDBA1A67379A1A931
32EE5C4C3FA15CCADB46ABE529D4BCB6416F53EC
FE09B823E6D83A3BC7983EAA2D7F2372E50FE137
diff --git a/contrib/verify-commits/verify-commits.sh b/contrib/verify-commits/verify-commits.sh
index 9ba781008a..5219331e2e 100755
--- a/contrib/verify-commits/verify-commits.sh
+++ b/contrib/verify-commits/verify-commits.sh
@@ -1,25 +1,19 @@
#!/bin/sh
+# Not technically POSIX-compliant due to use of "local", but almost every
+# shell anyone uses today supports it, so its probably fine
DIR=$(dirname "$0")
-
-echo "Please verify all commits in the following list are not evil:"
-git log "$DIR"
+[ "/${DIR#/}" != "$DIR" ] && DIR=$(dirname "$(pwd)/$0")
VERIFIED_ROOT=$(cat "${DIR}/trusted-git-root")
-
-IS_REVSIG_ALLOWED () {
- while read LINE; do
- [ "$LINE" = "$1" ] && return 0
- done < "${DIR}/allow-revsig-commits"
- return 1
-}
+REVSIG_ALLOWED=$(cat "${DIR}/allow-revsig-commits")
HAVE_FAILED=false
IS_SIGNED () {
if [ $1 = $VERIFIED_ROOT ]; then
return 0;
fi
- if IS_REVSIG_ALLOWED "$1"; then
+ if [ "${REVSIG_ALLOWED#*$1}" != "$REVSIG_ALLOWED" ]; then
export BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG=1
else
export BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG=0
@@ -27,7 +21,8 @@ IS_SIGNED () {
if ! git -c "gpg.program=${DIR}/gpg.sh" verify-commit $1 > /dev/null 2>&1; then
return 1;
fi
- local PARENTS=$(git show -s --format=format:%P $1)
+ local PARENTS
+ PARENTS=$(git show -s --format=format:%P $1)
for PARENT in $PARENTS; do
if IS_SIGNED $PARENT > /dev/null; then
return 0;
diff --git a/depends/Makefile b/depends/Makefile
index 3ddfc85a45..dedb0674cf 100644
--- a/depends/Makefile
+++ b/depends/Makefile
@@ -89,13 +89,17 @@ $(host_arch)_$(host_os)_id_string+=$(shell $(host_CXX) --version 2>/dev/null)
$(host_arch)_$(host_os)_id_string+=$(shell $(host_RANLIB) --version 2>/dev/null)
$(host_arch)_$(host_os)_id_string+=$(shell $(host_STRIP) --version 2>/dev/null)
-qt_packages_$(NO_QT) = $(qt_packages) $(qt_$(host_os)_packages)
-qt_native_packages_$(NO_QT) = $(qt_native_packages)
+qt_packages_$(NO_QT) = $(qt_packages) $(qt_$(host_os)_packages) $(qt_$(host_arch)_$(host_os)_packages)
wallet_packages_$(NO_WALLET) = $(wallet_packages)
upnp_packages_$(NO_UPNP) = $(upnp_packages)
packages += $($(host_arch)_$(host_os)_packages) $($(host_os)_packages) $(qt_packages_) $(wallet_packages_) $(upnp_packages_)
-native_packages += $($(host_arch)_$(host_os)_native_packages) $($(host_os)_native_packages) $(qt_native_packages_)
+native_packages += $($(host_arch)_$(host_os)_native_packages) $($(host_os)_native_packages)
+
+ifneq ($(qt_packages_),)
+native_packages += $(qt_native_packages)
+endif
+
all_packages = $(packages) $(native_packages)
meta_depends = Makefile funcs.mk builders/default.mk hosts/default.mk hosts/$(host_os).mk builders/$(build_os).mk
diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk
index dbe6d00795..985649619f 100644
--- a/depends/hosts/darwin.mk
+++ b/depends/hosts/darwin.mk
@@ -1,7 +1,7 @@
OSX_MIN_VERSION=10.7
-OSX_SDK_VERSION=10.9
+OSX_SDK_VERSION=10.11
OSX_SDK=$(SDK_PATH)/MacOSX$(OSX_SDK_VERSION).sdk
-LD64_VERSION=241.9
+LD64_VERSION=253.9
darwin_CC=clang -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(OSX_SDK) -mlinker-version=$(LD64_VERSION)
darwin_CXX=clang++ -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(OSX_SDK) -mlinker-version=$(LD64_VERSION) -stdlib=libc++
diff --git a/depends/packages/bdb.mk b/depends/packages/bdb.mk
index 200d57314e..6c9876c2c7 100644
--- a/depends/packages/bdb.mk
+++ b/depends/packages/bdb.mk
@@ -14,7 +14,8 @@ endef
define $(package)_preprocess_cmds
sed -i.old 's/__atomic_compare_exchange/__atomic_compare_exchange_db/' dbinc/atomic.h && \
- sed -i.old 's/atomic_init/atomic_init_db/' dbinc/atomic.h mp/mp_region.c mp/mp_mvcc.c mp/mp_fget.c mutex/mut_method.c mutex/mut_tas.c
+ sed -i.old 's/atomic_init/atomic_init_db/' dbinc/atomic.h mp/mp_region.c mp/mp_mvcc.c mp/mp_fget.c mutex/mut_method.c mutex/mut_tas.c && \
+ cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub dist
endef
define $(package)_config_cmds
diff --git a/depends/packages/native_cctools.mk b/depends/packages/native_cctools.mk
index b5603a8d48..797480c25e 100644
--- a/depends/packages/native_cctools.mk
+++ b/depends/packages/native_cctools.mk
@@ -1,14 +1,14 @@
package=native_cctools
-$(package)_version=ee31ae567931c426136c94aad457c7b51d844beb
+$(package)_version=807d6fd1be5d2224872e381870c0a75387fe05e6
$(package)_download_path=https://github.com/theuni/cctools-port/archive
$(package)_file_name=$($(package)_version).tar.gz
-$(package)_sha256_hash=ef107e6ab1b3994cb22e14f4f5c59ea0c0b5a988e6b21d42ed9616b018bbcbf9
+$(package)_sha256_hash=a09c9ba4684670a0375e42d9d67e7f12c1f62581a27f28f7c825d6d7032ccc6a
$(package)_build_subdir=cctools
-$(package)_clang_version=3.3
+$(package)_clang_version=3.7.1
$(package)_clang_download_path=http://llvm.org/releases/$($(package)_clang_version)
-$(package)_clang_download_file=clang+llvm-$($(package)_clang_version)-amd64-Ubuntu-12.04.2.tar.gz
-$(package)_clang_file_name=clang-llvm-$($(package)_clang_version)-amd64-Ubuntu-12.04.2.tar.gz
-$(package)_clang_sha256_hash=60d8f69f032d62ef61bf527857ebb933741ec3352d4d328c5516aa520662dab7
+$(package)_clang_download_file=clang+llvm-$($(package)_clang_version)-x86_64-linux-gnu-ubuntu-14.04.tar.xz
+$(package)_clang_file_name=clang-llvm-$($(package)_clang_version)-x86_64-linux-gnu-ubuntu-14.04.tar.xz
+$(package)_clang_sha256_hash=99b28a6b48e793705228a390471991386daa33a9717cd9ca007fcdde69608fd9
$(package)_extra_sources=$($(package)_clang_file_name)
define $(package)_fetch_cmds
@@ -23,6 +23,7 @@ define $(package)_extract_cmds
$(build_SHA256SUM) -c $($(package)_extract_dir)/.$($(package)_file_name).hash && \
mkdir -p toolchain/bin toolchain/lib/clang/3.5/include && \
tar --strip-components=1 -C toolchain -xf $($(package)_source_dir)/$($(package)_clang_file_name) && \
+ rm -f toolchain/lib/libc++abi.so* && \
echo "#!/bin/sh" > toolchain/bin/$(host)-dsymutil && \
echo "exit 0" >> toolchain/bin/$(host)-dsymutil && \
chmod +x toolchain/bin/$(host)-dsymutil && \
@@ -30,7 +31,7 @@ define $(package)_extract_cmds
endef
define $(package)_set_vars
-$(package)_config_opts=--target=$(host) --disable-libuuid
+$(package)_config_opts=--target=$(host) --disable-lto-support
$(package)_ldflags+=-Wl,-rpath=\\$$$$$$$$\$$$$$$$$ORIGIN/../lib
$(package)_cc=$($(package)_extract_dir)/toolchain/bin/clang
$(package)_cxx=$($(package)_extract_dir)/toolchain/bin/clang++
@@ -53,10 +54,11 @@ define $(package)_stage_cmds
cd $($(package)_extract_dir)/toolchain && \
mkdir -p $($(package)_staging_prefix_dir)/lib/clang/$($(package)_clang_version)/include && \
mkdir -p $($(package)_staging_prefix_dir)/bin $($(package)_staging_prefix_dir)/include && \
- cp -P bin/clang bin/clang++ $($(package)_staging_prefix_dir)/bin/ &&\
+ cp bin/clang $($(package)_staging_prefix_dir)/bin/ &&\
+ cp -P bin/clang++ $($(package)_staging_prefix_dir)/bin/ &&\
cp lib/libLTO.so $($(package)_staging_prefix_dir)/lib/ && \
cp -rf lib/clang/$($(package)_clang_version)/include/* $($(package)_staging_prefix_dir)/lib/clang/$($(package)_clang_version)/include/ && \
- cp bin/$(host)-dsymutil $($(package)_staging_prefix_dir)/bin && \
+ cp bin/llvm-dsymutil $($(package)_staging_prefix_dir)/bin/$(host)-dsymutil && \
if `test -d include/c++/`; then cp -rf include/c++/ $($(package)_staging_prefix_dir)/include/; fi && \
if `test -d lib/c++/`; then cp -rf lib/c++/ $($(package)_staging_prefix_dir)/lib/; fi
endef
diff --git a/depends/packages/openssl.mk b/depends/packages/openssl.mk
index c6452820a2..5ee9f17a63 100644
--- a/depends/packages/openssl.mk
+++ b/depends/packages/openssl.mk
@@ -6,9 +6,42 @@ $(package)_sha256_hash=8f9faeaebad088e772f4ef5e38252d472be4d878c6b3a2718c10a4fce
define $(package)_set_vars
$(package)_config_env=AR="$($(package)_ar)" RANLIB="$($(package)_ranlib)" CC="$($(package)_cc)"
-$(package)_config_opts=--prefix=$(host_prefix) --openssldir=$(host_prefix)/etc/openssl no-zlib no-shared no-dso
-$(package)_config_opts+=no-krb5 no-camellia no-capieng no-cast no-cms no-dtls1 no-gost no-gmp no-heartbeats no-idea no-jpake no-md2
-$(package)_config_opts+=no-mdc2 no-rc5 no-rdrand no-rfc3779 no-rsax no-sctp no-seed no-sha0 no-static_engine no-whirlpool no-rc2 no-rc4 no-ssl2 no-ssl3
+$(package)_config_opts=--prefix=$(host_prefix) --openssldir=$(host_prefix)/etc/openssl
+$(package)_config_opts+=no-camellia
+$(package)_config_opts+=no-capieng
+$(package)_config_opts+=no-cast
+$(package)_config_opts+=no-comp
+$(package)_config_opts+=no-dso
+$(package)_config_opts+=no-dtls1
+$(package)_config_opts+=no-ec_nistp_64_gcc_128
+$(package)_config_opts+=no-gost
+$(package)_config_opts+=no-gmp
+$(package)_config_opts+=no-heartbeats
+$(package)_config_opts+=no-idea
+$(package)_config_opts+=no-jpake
+$(package)_config_opts+=no-krb5
+$(package)_config_opts+=no-libunbound
+$(package)_config_opts+=no-md2
+$(package)_config_opts+=no-mdc2
+$(package)_config_opts+=no-rc4
+$(package)_config_opts+=no-rc5
+$(package)_config_opts+=no-rdrand
+$(package)_config_opts+=no-rfc3779
+$(package)_config_opts+=no-rsax
+$(package)_config_opts+=no-sctp
+$(package)_config_opts+=no-seed
+$(package)_config_opts+=no-sha0
+$(package)_config_opts+=no-shared
+$(package)_config_opts+=no-ssl-trace
+$(package)_config_opts+=no-ssl2
+$(package)_config_opts+=no-ssl3
+$(package)_config_opts+=no-static_engine
+$(package)_config_opts+=no-store
+$(package)_config_opts+=no-unit-test
+$(package)_config_opts+=no-weak-ssl-ciphers
+$(package)_config_opts+=no-whirlpool
+$(package)_config_opts+=no-zlib
+$(package)_config_opts+=no-zlib-dynamic
$(package)_config_opts+=$($(package)_cflags) $($(package)_cppflags)
$(package)_config_opts_linux=-fPIC -Wa,--noexecstack
$(package)_config_opts_x86_64_linux=linux-x86_64
diff --git a/depends/packages/packages.mk b/depends/packages/packages.mk
index 59b009b66a..ac43ef4a2e 100644
--- a/depends/packages/packages.mk
+++ b/depends/packages/packages.mk
@@ -6,7 +6,9 @@ native_packages := native_ccache native_comparisontool
qt_native_packages = native_protobuf
qt_packages = qrencode protobuf
-qt_linux_packages= qt expat dbus libxcb xcb_proto libXau xproto freetype fontconfig libX11 xextproto libXext xtrans
+qt_x86_64_linux_packages:=qt expat dbus libxcb xcb_proto libXau xproto freetype fontconfig libX11 xextproto libXext xtrans
+qt_i686_linux_packages:=$(qt_x86_64_linux_packages)
+
qt_darwin_packages=qt
qt_mingw32_packages=qt
diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk
index c1fc8e3058..d41d0b9ea5 100644
--- a/depends/packages/qt.mk
+++ b/depends/packages/qt.mk
@@ -1,20 +1,21 @@
PACKAGE=qt
-$(package)_version=5.5.0
-$(package)_download_path=http://download.qt.io/official_releases/qt/5.5/$($(package)_version)/submodules
+$(package)_version=5.6.1
+$(package)_download_path=http://download.qt.io/official_releases/qt/5.6/$($(package)_version)/submodules
$(package)_suffix=opensource-src-$($(package)_version).tar.gz
$(package)_file_name=qtbase-$($(package)_suffix)
-$(package)_sha256_hash=7e82b1318f88e56a2a9376e069aa608d4fd96b48cb0e1b880ae658b0a1af0561
+$(package)_sha256_hash=0ac67cf8d66d52b995f96c31c4b48117a1afb3db99eaa93e20ccd8f7f55f7fde
$(package)_dependencies=openssl
-$(package)_linux_dependencies=freetype fontconfig dbus libxcb libX11 xproto libXext
+$(package)_linux_dependencies=freetype fontconfig libxcb libX11 xproto libXext
$(package)_build_subdir=qtbase
$(package)_qt_libs=corelib network widgets gui plugins testlib
-$(package)_patches=mac-qmake.conf fix-xcb-include-order.patch mingw-uuidof.patch pidlist_absolute.patch
+$(package)_patches=mac-qmake.conf mingw-uuidof.patch pidlist_absolute.patch fix-xcb-include-order.patch fix_qt_pkgconfig.patch
$(package)_qttranslations_file_name=qttranslations-$($(package)_suffix)
-$(package)_qttranslations_sha256_hash=c4bd6db6e426965c6f8824c54e81f68bbd61e2bae1bcadc328c6e81c45902a0d
+$(package)_qttranslations_sha256_hash=dcc1534d247babca1840cb6d0a000671801a341ea352d0535474f86adadaf028
+
$(package)_qttools_file_name=qttools-$($(package)_suffix)
-$(package)_qttools_sha256_hash=d9e06bd19ecc86afba5e95d45a906d1bc1ad579aa70001e36143c1aaf695bdd6
+$(package)_qttools_sha256_hash=e0f845de28c31230dfa428f0190ccb3b91d1fc02481b1f064698ae4ef8376aa1
$(package)_extra_sources = $($(package)_qttranslations_file_name)
$(package)_extra_sources += $($(package)_qttools_file_name)
@@ -22,21 +23,34 @@ $(package)_extra_sources += $($(package)_qttools_file_name)
define $(package)_set_vars
$(package)_config_opts_release = -release
$(package)_config_opts_debug = -debug
-$(package)_config_opts += -opensource -confirm-license
+$(package)_config_opts += -bindir $(build_prefix)/bin
+$(package)_config_opts += -c++11
+$(package)_config_opts += -confirm-license
+$(package)_config_opts += -dbus-runtime
+$(package)_config_opts += -hostprefix $(build_prefix)
+$(package)_config_opts += -no-alsa
$(package)_config_opts += -no-audio-backend
+$(package)_config_opts += -no-cups
+$(package)_config_opts += -no-egl
+$(package)_config_opts += -no-eglfs
+$(package)_config_opts += -no-feature-style-windowsmobile
+$(package)_config_opts += -no-feature-style-windowsce
+$(package)_config_opts += -no-freetype
+$(package)_config_opts += -no-gif
$(package)_config_opts += -no-glib
+$(package)_config_opts += -no-gstreamer
$(package)_config_opts += -no-icu
-$(package)_config_opts += -no-cups
$(package)_config_opts += -no-iconv
-$(package)_config_opts += -no-gif
-$(package)_config_opts += -no-freetype
+$(package)_config_opts += -no-kms
+$(package)_config_opts += -no-linuxfb
+$(package)_config_opts += -no-libudev
+$(package)_config_opts += -no-mitshm
+$(package)_config_opts += -no-mtdev
$(package)_config_opts += -no-nis
-$(package)_config_opts += -pch
+$(package)_config_opts += -no-pulseaudio
+$(package)_config_opts += -no-openvg
+$(package)_config_opts += -no-reduce-relocations
$(package)_config_opts += -no-qml-debug
-$(package)_config_opts += -nomake examples
-$(package)_config_opts += -nomake tests
-$(package)_config_opts += -no-feature-style-windowsmobile
-$(package)_config_opts += -no-feature-style-windowsce
$(package)_config_opts += -no-sql-db2
$(package)_config_opts += -no-sql-ibase
$(package)_config_opts += -no-sql-oci
@@ -46,36 +60,25 @@ $(package)_config_opts += -no-sql-odbc
$(package)_config_opts += -no-sql-psql
$(package)_config_opts += -no-sql-sqlite
$(package)_config_opts += -no-sql-sqlite2
-$(package)_config_opts += -prefix $(host_prefix)
-$(package)_config_opts += -hostprefix $(build_prefix)
-$(package)_config_opts += -bindir $(build_prefix)/bin
-$(package)_config_opts += -c++11
+$(package)_config_opts += -no-use-gold-linker
+$(package)_config_opts += -no-xinput2
+$(package)_config_opts += -no-xrender
+$(package)_config_opts += -nomake examples
+$(package)_config_opts += -nomake tests
+$(package)_config_opts += -opensource
$(package)_config_opts += -openssl-linked
-$(package)_config_opts += -v
-$(package)_config_opts += -static
-$(package)_config_opts += -silent
+$(package)_config_opts += -optimized-qmake
+$(package)_config_opts += -pch
$(package)_config_opts += -pkg-config
+$(package)_config_opts += -prefix $(host_prefix)
$(package)_config_opts += -qt-libpng
$(package)_config_opts += -qt-libjpeg
-$(package)_config_opts += -qt-zlib
$(package)_config_opts += -qt-pcre
-$(package)_config_opts += -no-pulseaudio
-$(package)_config_opts += -no-openvg
-$(package)_config_opts += -no-xrender
-$(package)_config_opts += -no-alsa
-$(package)_config_opts += -no-mtdev
-$(package)_config_opts += -no-gstreamer
-$(package)_config_opts += -no-mitshm
-$(package)_config_opts += -no-kms
-$(package)_config_opts += -no-reduce-relocations
-$(package)_config_opts += -no-egl
-$(package)_config_opts += -no-eglfs
-$(package)_config_opts += -no-linuxfb
-$(package)_config_opts += -no-xinput2
-$(package)_config_opts += -no-libudev
-$(package)_config_opts += -no-use-gold-linker
+$(package)_config_opts += -qt-zlib
$(package)_config_opts += -reduce-exports
-$(package)_config_opts += -optimized-qmake
+$(package)_config_opts += -static
+$(package)_config_opts += -silent
+$(package)_config_opts += -v
ifneq ($(build_os),darwin)
$(package)_config_opts_darwin = -xplatform macx-clang-linux
@@ -119,19 +122,22 @@ define $(package)_extract_cmds
tar --strip-components=1 -xf $($(package)_source_dir)/$($(package)_qttools_file_name) -C qttools
endef
+
define $(package)_preprocess_cmds
sed -i.old "s|updateqm.commands = \$$$$\$$$$LRELEASE|updateqm.commands = $($(package)_extract_dir)/qttools/bin/lrelease|" qttranslations/translations/translations.pro && \
sed -i.old "s/src_plugins.depends = src_sql src_xml src_network/src_plugins.depends = src_xml src_network/" qtbase/src/src.pro && \
sed -i.old "s|X11/extensions/XIproto.h|X11/X.h|" qtbase/src/plugins/platforms/xcb/qxcbxsettings.cpp && \
sed -i.old 's/if \[ "$$$$XPLATFORM_MAC" = "yes" \]; then xspecvals=$$$$(macSDKify/if \[ "$$$$BUILD_ON_MAC" = "yes" \]; then xspecvals=$$$$(macSDKify/' qtbase/configure && \
+ sed -i.old 's/CGEventCreateMouseEvent(0, kCGEventMouseMoved, pos, 0)/CGEventCreateMouseEvent(0, kCGEventMouseMoved, pos, kCGMouseButtonLeft)/' qtbase/src/plugins/platforms/cocoa/qcocoacursor.mm && \
mkdir -p qtbase/mkspecs/macx-clang-linux &&\
cp -f qtbase/mkspecs/macx-clang/Info.plist.lib qtbase/mkspecs/macx-clang-linux/ &&\
cp -f qtbase/mkspecs/macx-clang/Info.plist.app qtbase/mkspecs/macx-clang-linux/ &&\
cp -f qtbase/mkspecs/macx-clang/qplatformdefs.h qtbase/mkspecs/macx-clang-linux/ &&\
cp -f $($(package)_patch_dir)/mac-qmake.conf qtbase/mkspecs/macx-clang-linux/qmake.conf && \
- patch -p1 < $($(package)_patch_dir)/fix-xcb-include-order.patch && \
patch -p1 < $($(package)_patch_dir)/mingw-uuidof.patch && \
patch -p1 < $($(package)_patch_dir)/pidlist_absolute.patch && \
+ patch -p1 < $($(package)_patch_dir)/fix-xcb-include-order.patch && \
+ patch -p1 < $($(package)_patch_dir)/fix_qt_pkgconfig.patch && \
echo "QMAKE_CFLAGS += $($(package)_cflags) $($(package)_cppflags)" >> qtbase/mkspecs/common/gcc-base.conf && \
echo "QMAKE_CXXFLAGS += $($(package)_cxxflags) $($(package)_cppflags)" >> qtbase/mkspecs/common/gcc-base.conf && \
echo "QMAKE_LFLAGS += $($(package)_ldflags)" >> qtbase/mkspecs/common/gcc-base.conf && \
diff --git a/depends/patches/qt/fix-xcb-include-order.patch b/depends/patches/qt/fix-xcb-include-order.patch
index ae469ea94b..c7dbebedce 100644
--- a/depends/patches/qt/fix-xcb-include-order.patch
+++ b/depends/patches/qt/fix-xcb-include-order.patch
@@ -1,15 +1,15 @@
--- old/qtbase/src/plugins/platforms/xcb/xcb_qpa_lib.pro 2015-03-17 02:06:42.705930685 +0000
+++ new/qtbase/src/plugins/platforms/xcb/xcb_qpa_lib.pro 2015-03-17 02:08:41.281926351 +0000
-@@ -94,8 +94,6 @@
-
+@@ -74,8 +74,6 @@
+
DEFINES += $$QMAKE_DEFINES_XCB
LIBS += $$QMAKE_LIBS_XCB
-QMAKE_CXXFLAGS += $$QMAKE_CFLAGS_XCB
-QMAKE_CFLAGS += $$QMAKE_CFLAGS_XCB
-
+
CONFIG += qpa/genericunixfontdatabase
-
-@@ -104,7 +102,8 @@
+
+@@ -87,7 +85,8 @@
contains(QT_CONFIG, xcb-qt) {
DEFINES += XCB_USE_RENDER
XCB_DIR = ../../../3rdparty/xcb
@@ -18,28 +18,32 @@
+ QMAKE_CXXFLAGS += -I$$XCB_DIR/include -I$$XCB_DIR/sysinclude $$QMAKE_CFLAGS_XCB
LIBS += -lxcb -L$$OUT_PWD/xcb-static -lxcb-static
} else {
- LIBS += -lxcb -lxcb-image -lxcb-icccm -lxcb-sync -lxcb-xfixes -lxcb-shm -lxcb-randr -lxcb-shape -lxcb-keysyms
+ LIBS += -lxcb -lxcb-image -lxcb-icccm -lxcb-sync -lxcb-xfixes -lxcb-shm -lxcb-randr -lxcb-shape -lxcb-keysyms -lxcb-xinerama
--- old/qtbase/src/plugins/platforms/xcb/xcb-static/xcb-static.pro 2015-03-17 02:07:04.641929383 +0000
+++ new/qtbase/src/plugins/platforms/xcb/xcb-static/xcb-static.pro 2015-03-17 02:10:15.485922059 +0000
-@@ -8,7 +8,8 @@
-
+@@ -9,7 +9,8 @@
+
XCB_DIR = ../../../../3rdparty/xcb
-
+
-INCLUDEPATH += $$XCB_DIR/include $$XCB_DIR/include/xcb $$XCB_DIR/sysinclude
+QMAKE_CFLAGS += -I$$XCB_DIR/include -I$$XCB_DIR/include/xcb -I$$XCB_DIR/sysinclude
+QMAKE_CXXFLAGS += -I$$XCB_DIR/include -I$$XCB_DIR/include/xcb -I$$XCB_DIR/sysinclude
-
+
QMAKE_CXXFLAGS += $$QMAKE_CFLAGS_XCB
QMAKE_CFLAGS += $$QMAKE_CFLAGS_XCB
--- old/qtbase/src/plugins/platforms/xcb/xcb-plugin.pro 2015-07-24 16:02:59.530038830 -0400
+++ new/qtbase/src/plugins/platforms/xcb/xcb-plugin.pro 2015-07-24 16:01:22.106037459 -0400
-@@ -11,3 +11,9 @@
+@@ -6,6 +6,13 @@
qxcbmain.cpp
OTHER_FILES += xcb.json README
-
+
+contains(QT_CONFIG, xcb-qt) {
+ DEFINES += XCB_USE_RENDER
+ XCB_DIR = ../../../3rdparty/xcb
+ QMAKE_CFLAGS += -I$$XCB_DIR/include -I$$XCB_DIR/sysinclude $$QMAKE_CFLAGS_XCB
+ QMAKE_CXXFLAGS += -I$$XCB_DIR/include -I$$XCB_DIR/sysinclude $$QMAKE_CFLAGS_XCB
+}
++
+ PLUGIN_TYPE = platforms
+ PLUGIN_CLASS_NAME = QXcbIntegrationPlugin
+ !equals(TARGET, $$QT_DEFAULT_QPA_PLUGIN): PLUGIN_EXTENDS = -
diff --git a/depends/patches/qt/fix_qt_pkgconfig.patch b/depends/patches/qt/fix_qt_pkgconfig.patch
new file mode 100644
index 0000000000..3772db4f8b
--- /dev/null
+++ b/depends/patches/qt/fix_qt_pkgconfig.patch
@@ -0,0 +1,11 @@
+--- old/qtbase/mkspecs/features/qt_module.prf 2016-03-17 02:06:42.705930685 +0000
++++ new/qtbase/mkspecs/features/qt_module.prf 2016-03-17 02:06:42.705930685 +0000
+@@ -244,7 +244,7 @@
+ load(qt_targets)
+
+ # this builds on top of qt_common
+-!internal_module:!lib_bundle:if(unix|mingw) {
++unix|mingw {
+ CONFIG += create_pc
+ QMAKE_PKGCONFIG_DESTDIR = pkgconfig
+ host_build: \
diff --git a/doc/bips.md b/doc/bips.md
index b4b62e781e..1ec03d2fb1 100644
--- a/doc/bips.md
+++ b/doc/bips.md
@@ -10,6 +10,7 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.13.0**):
* [`BIP 23`](https://github.com/bitcoin/bips/blob/master/bip-0023.mediawiki): Some extensions to GBT have been implemented since **v0.10.0rc1**, including longpolling and block proposals ([PR #1816](https://github.com/bitcoin/bitcoin/pull/1816)).
* [`BIP 30`](https://github.com/bitcoin/bips/blob/master/bip-0030.mediawiki): The evaluation rules to forbid creating new transactions with the same txid as previous not-fully-spent transactions were implemented since **v0.6.0**, and the rule took effect on *March 15th 2012* ([PR #915](https://github.com/bitcoin/bitcoin/pull/915)).
* [`BIP 31`](https://github.com/bitcoin/bips/blob/master/bip-0031.mediawiki): The 'pong' protocol message (and the protocol version bump to 60001) has been implemented since **v0.6.1** ([PR #1081](https://github.com/bitcoin/bitcoin/pull/1081)).
+* [`BIP 32`](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki): Hierarchical Deterministic Wallets has been implemented since **v0.13.0** ([PR #8035](https://github.com/bitcoin/bitcoin/pull/8035)).
* [`BIP 34`](https://github.com/bitcoin/bips/blob/master/bip-0034.mediawiki): The rule that requires blocks to contain their height (number) in the coinbase input, and the introduction of version 2 blocks has been implemented since **v0.7.0**. The rule took effect for version 2 blocks as of *block 224413* (March 5th 2013), and version 1 blocks are no longer allowed since *block 227931* (March 25th 2013) ([PR #1526](https://github.com/bitcoin/bitcoin/pull/1526)).
* [`BIP 35`](https://github.com/bitcoin/bips/blob/master/bip-0035.mediawiki): The 'mempool' protocol message (and the protocol version bump to 60002) has been implemented since **v0.7.0** ([PR #1641](https://github.com/bitcoin/bitcoin/pull/1641)).
* [`BIP 37`](https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki): The bloom filtering for transaction relaying, partial merkle trees for blocks, and the protocol version bump to 70001 (enabling low-bandwidth SPV clients) has been implemented since **v0.8.0** ([PR #1795](https://github.com/bitcoin/bitcoin/pull/1795)).
diff --git a/doc/gitian-building.md b/doc/gitian-building.md
index 791f209bb1..7796a5fc9c 100644
--- a/doc/gitian-building.md
+++ b/doc/gitian-building.md
@@ -1,7 +1,7 @@
Gitian building
================
-*Setup instructions for a Gitian build of Bitcoin using a Debian VM or physical system.*
+*Setup instructions for a Gitian build of Bitcoin Core using a Debian VM or physical system.*
Gitian is the deterministic build process that is used to build the Bitcoin
Core executables. It provides a way to be reasonably sure that the
@@ -26,7 +26,7 @@ Table of Contents
- [Installing Gitian](#installing-gitian)
- [Setting up the Gitian image](#setting-up-the-gitian-image)
- [Getting and building the inputs](#getting-and-building-the-inputs)
-- [Building Bitcoin](#building-bitcoin)
+- [Building Bitcoin Core](#building-bitcoin-core)
- [Building an alternative repository](#building-an-alternative-repository)
- [Signing externally](#signing-externally)
- [Uploading signatures](#uploading-signatures)
@@ -95,11 +95,11 @@ After creating the VM, we need to configure it.
- Click `Ok` twice to save.
-Get the [Debian 8.x net installer](http://cdimage.debian.org/debian-cd/8.4.0/amd64/iso-cd/debian-8.4.0-amd64-netinst.iso) (a more recent minor version should also work, see also [Debian Network installation](https://www.debian.org/CD/netinst/)).
+Get the [Debian 8.x net installer](http://cdimage.debian.org/debian-cd/8.5.0/amd64/iso-cd/debian-8.5.0-amd64-netinst.iso) (a more recent minor version should also work, see also [Debian Network installation](https://www.debian.org/CD/netinst/)).
This DVD image can be validated using a SHA256 hashing tool, for example on
Unixy OSes by entering the following in a terminal:
- echo "7a6b418e6a4ee3ca75dda04d79ed96c9e2c33bb0c703ca7e40c6374ab4590748 debian-8.4.0-amd64-netinst.iso" | sha256sum -c
+ echo "ad4e8c27c561ad8248d5ebc1d36eb172f884057bfeb2c22ead823f59fa8c3dff debian-8.5.0-amd64-netinst.iso" | sha256sum -c
# (must return OK)
Then start the VM. On the first launch you will be asked for a CD or DVD image. Choose the downloaded iso.
@@ -342,10 +342,10 @@ manual intervention. Also optionally follow the next step: 'Seed the Gitian sour
and offline git repositories' which will fetch the remaining files required for building
offline.
-Building Bitcoin
+Building Bitcoin Core
----------------
-To build Bitcoin (for Linux, OS X and Windows) just follow the steps under 'perform
+To build Bitcoin Core (for Linux, OS X and Windows) just follow the steps under 'perform
Gitian builds' in [doc/release-process.md](release-process.md#perform-gitian-builds) in the bitcoin repository.
This may take some time as it will build all the dependencies needed for each descriptor.
diff --git a/doc/release-notes.md b/doc/release-notes.md
index be619e41c6..6cc05989db 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -43,6 +43,11 @@ RPC low-level changes
32-bit and 64-bit platforms, and the txids were missing in the hashed data. This has been
fixed, but this means that the output will be different than from previous versions.
+- Full UTF-8 support in the RPC API. Non-ASCII characters in, for example,
+ wallet labels have always been malformed because they weren't taken into account
+ properly in JSON RPC processing. This is no longer the case. This also affects
+ the GUI debug console.
+
C++11 and Python 3
-------------------
@@ -119,6 +124,24 @@ feerate. [BIP 133](https://github.com/bitcoin/bips/blob/master/bip-0133.mediawik
### Wallet
+Hierarchical Deterministic Key Generation
+-----------------------------------------
+Newly created wallets will use hierarchical deterministic key generation
+according to BIP32 (keypath m/0'/0'/k').
+Existing wallets will still use traditional key generation.
+
+Backups of HD wallets, regardless of when they have been created, can
+therefore be used to re-generate all possible private keys, even the
+ones which haven't already been generated during the time of the backup.
+
+HD key generation for new wallets can be disabled by `-usehd=0`. Keep in
+mind that this flag only has affect on newly created wallets.
+You can't disable HD key generation once you have created a HD wallet.
+
+There is no distinction between internal (change) and external keys.
+
+[Pull request](https://github.com/bitcoin/bitcoin/pull/8035/files), [BIP 32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
+
### GUI
### Tests
diff --git a/doc/tor.md b/doc/tor.md
index 43e922718b..79f1563021 100644
--- a/doc/tor.md
+++ b/doc/tor.md
@@ -95,12 +95,22 @@ Starting with Tor version 0.2.7.1 it is possible, through Tor's control socket
API, to create and destroy 'ephemeral' hidden services programmatically.
Bitcoin Core has been updated to make use of this.
-This means that if Tor is running (and proper authorization is available),
-Bitcoin Core automatically creates a hidden service to listen on, without
-manual configuration. This will positively affect the number of available
-.onion nodes.
+This means that if Tor is running (and proper authentication has been configured),
+Bitcoin Core automatically creates a hidden service to listen on. This will positively
+affect the number of available .onion nodes.
This new feature is enabled by default if Bitcoin Core is listening, and
a connection to Tor can be made. It can be configured with the `-listenonion`,
`-torcontrol` and `-torpassword` settings. To show verbose debugging
information, pass `-debug=tor`.
+
+Connecting to Tor's control socket API requires one of two authentication methods to be
+configured. For cookie authentication the user running bitcoind must have write access
+to the `CookieAuthFile` specified in Tor configuration. In some cases this is
+preconfigured and the creation of a hidden service is automatic. If permission problems
+are seen with `-debug=tor` they can be resolved by adding both the user running tor and
+the user running bitcoind to the same group and setting permissions appropriately. On
+Debian-based systems the user running bitcoind can be added to the debian-tor group,
+which has the appropriate permissions. An alternative authentication method is the use
+of the `-torpassword` flag and a `hash-password` which can be enabled and specified in
+Tor configuration. \ No newline at end of file
diff --git a/qa/rpc-tests/fundrawtransaction.py b/qa/rpc-tests/fundrawtransaction.py
index 998f822afe..228574e671 100755
--- a/qa/rpc-tests/fundrawtransaction.py
+++ b/qa/rpc-tests/fundrawtransaction.py
@@ -58,7 +58,6 @@ class RawTransactionsTest(BitcoinTestFramework):
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
- self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
@@ -552,7 +551,6 @@ class RawTransactionsTest(BitcoinTestFramework):
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
- self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
@@ -572,7 +570,6 @@ class RawTransactionsTest(BitcoinTestFramework):
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
- self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
@@ -603,7 +600,6 @@ class RawTransactionsTest(BitcoinTestFramework):
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
- self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
@@ -677,6 +673,15 @@ class RawTransactionsTest(BitcoinTestFramework):
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ #######################
+ # Test feeRate option #
+ #######################
+
+ # Make sure there is exactly one input so coin selection can't skew the result
+ assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress() : 1}
@@ -684,8 +689,9 @@ class RawTransactionsTest(BitcoinTestFramework):
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
- assert_equal(result['fee']*2, result2['fee'])
- assert_equal(result['fee']*10, result3['fee'])
+ result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
+ assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
+ assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
if __name__ == '__main__':
RawTransactionsTest().main()
diff --git a/qa/rpc-tests/p2p-fullblocktest.py b/qa/rpc-tests/p2p-fullblocktest.py
index aa0501c5e9..17fd40ef1d 100755
--- a/qa/rpc-tests/p2p-fullblocktest.py
+++ b/qa/rpc-tests/p2p-fullblocktest.py
@@ -9,7 +9,8 @@ from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
-from test_framework.script import CScript, SignatureHash, SIGHASH_ALL, OP_TRUE, OP_FALSE
+from test_framework.script import *
+import struct
class PreviousSpendableOutput(object):
def __init__(self, tx = CTransaction(), n = -1):
@@ -24,10 +25,36 @@ We use the testing framework in which we expect a particular answer from
each test.
'''
+def hash160(s):
+ return hashlib.new('ripemd160', sha256(s)).digest()
+
+# Use this class for tests that require behavior other than normal "mininode" behavior.
+# For now, it is used to serialize a bloated varint (b64).
+class CBrokenBlock(CBlock):
+ def __init__(self, header=None):
+ super(CBrokenBlock, self).__init__(header)
+
+ def initialize(self, base_block):
+ self.vtx = copy.deepcopy(base_block.vtx)
+ self.hashMerkleRoot = self.calc_merkle_root()
+
+ def serialize(self):
+ r = b""
+ r += super(CBlock, self).serialize()
+ r += struct.pack("<BQ", 255, len(self.vtx))
+ for tx in self.vtx:
+ r += tx.serialize()
+ return r
+
+ def normal_serialize(self):
+ r = b""
+ r += super(CBrokenBlock, self).serialize()
+ return r
+
class FullBlockTest(ComparisonTestFramework):
- ''' Can either run this test as 1 node with expected answers, or two and compare them.
- Change the "outcome" variable from each TestInstance object to only do the comparison. '''
+ # Can either run this test as 1 node with expected answers, or two and compare them.
+ # Change the "outcome" variable from each TestInstance object to only do the comparison.
def __init__(self):
super().__init__()
self.num_nodes = 1
@@ -35,66 +62,70 @@ class FullBlockTest(ComparisonTestFramework):
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
- self.block_time = int(time.time())+1
self.tip = None
self.blocks = {}
+ def add_options(self, parser):
+ super().add_options(parser)
+ parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True)
+
def run_test(self):
- test = TestManager(self, self.options.tmpdir)
- test.add_all_connections(self.nodes)
+ self.test = TestManager(self, self.options.tmpdir)
+ self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
- test.run()
+ self.test.run()
def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
block.vtx.extend(tx_list)
- block.hashMerkleRoot = block.calc_merkle_root()
- block.rehash()
- return block
-
- # Create a block on top of self.tip, and advance self.tip to point to the new block
- # if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output,
- # and rest will go to fees.
- def next_block(self, number, spend=None, additional_coinbase_value=0, script=None):
+
+ # this is a little handier to use than the version in blocktools.py
+ def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])):
+ tx = create_transaction(spend_tx, n, b"", value, script)
+ return tx
+
+ # sign a transaction, using the key we know about
+ # this signs input 0 in tx, which is assumed to be spending output n in spend_tx
+ def sign_tx(self, tx, spend_tx, n):
+ scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey)
+ if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
+ tx.vin[0].scriptSig = CScript()
+ return
+ (sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL)
+ tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
+
+ def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])):
+ tx = self.create_tx(spend_tx, n, value, script)
+ self.sign_tx(tx, spend_tx, n)
+ tx.rehash()
+ return tx
+
+ def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
if self.tip == None:
base_block_hash = self.genesis_hash
+ block_time = int(time.time())+1
else:
base_block_hash = self.tip.sha256
+ block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
- if (spend != None):
- coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
- block = create_block(base_block_hash, coinbase, self.block_time)
- if (spend != None):
- tx = CTransaction()
- tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # no signature yet
- # This copies the java comparison tool testing behavior: the first
- # txout has a garbage scriptPubKey, "to make sure we're not
- # pre-verifying too much" (?)
- tx.vout.append(CTxOut(0, CScript([random.randint(0,255), height & 255])))
- if script == None:
- tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
- else:
- tx.vout.append(CTxOut(1, script))
- # Now sign it if necessary
- scriptSig = b""
- scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey)
- if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend
- scriptSig = CScript([OP_TRUE])
- else:
- # We have to actually sign it
- (sighash, err) = SignatureHash(spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL)
- scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
- tx.vin[0].scriptSig = scriptSig
- # Now add the transaction to the block
- block = self.add_transactions_to_block(block, [tx])
- block.solve()
+ if spend == None:
+ block = create_block(base_block_hash, coinbase, block_time)
+ else:
+ coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
+ coinbase.rehash()
+ block = create_block(base_block_hash, coinbase, block_time)
+ tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi
+ self.sign_tx(tx, spend.tx, spend.n)
+ self.add_transactions_to_block(block, [tx])
+ block.hashMerkleRoot = block.calc_merkle_root()
+ if solve:
+ block.solve()
self.tip = block
self.block_heights[block.sha256] = height
- self.block_time += 1
assert number not in self.blocks
self.blocks[number] = block
return block
@@ -108,7 +139,7 @@ class FullBlockTest(ComparisonTestFramework):
def save_spendable_output():
spendable_outputs.append(self.tip)
- # get an output that we previous marked as spendable
+ # get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
@@ -122,26 +153,33 @@ class FullBlockTest(ComparisonTestFramework):
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
-
+
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
- # add transactions to a block produced by next_block
+ # adds transactions to the block and updates state
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
- old_hash = block.sha256
self.add_transactions_to_block(block, new_transactions)
+ old_sha256 = block.sha256
+ block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
- self.block_heights[block.sha256] = self.block_heights[old_hash]
- del self.block_heights[old_hash]
+ if block.sha256 != old_sha256:
+ self.block_heights[block.sha256] = self.block_heights[old_sha256]
+ del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
- # creates a new block and advances the tip to that block
+ # shorthand for functions
block = self.next_block
+ create_tx = self.create_tx
+ create_and_sign_tx = self.create_and_sign_transaction
+
+ # these must be updated if consensus changes
+ MAX_BLOCK_SIGOPS = 20000
# Create a new block
@@ -153,43 +191,44 @@ class FullBlockTest(ComparisonTestFramework):
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
- block(1000 + i)
+ block(5000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
+ # collect spendable outputs now to avoid cluttering the code later on
+ out = []
+ for i in range(33):
+ out.append(get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
- out0 = get_spendable_output()
- block(1, spend=out0)
+ block(1, spend=out[0])
save_spendable_output()
yield accepted()
- out1 = get_spendable_output()
- b2 = block(2, spend=out1)
+ block(2, spend=out[1])
yield accepted()
-
+ save_spendable_output()
# so fork like this:
- #
+ #
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
- #
+ #
# Nothing should happen at this point. We saw b2 first so it takes priority.
tip(1)
- b3 = block(3, spend=out1)
- txout_b3 = PreviousSpendableOutput(b3.vtx[1], 1)
+ b3 = block(3, spend=out[1])
+ txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0)
yield rejected()
# Now we add another block to make the alternative chain longer.
- #
+ #
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
- out2 = get_spendable_output()
- block(4, spend=out2)
+ block(4, spend=out[2])
yield accepted()
@@ -197,46 +236,41 @@ class FullBlockTest(ComparisonTestFramework):
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
tip(2)
- block(5, spend=out2)
+ block(5, spend=out[2])
save_spendable_output()
yield rejected()
- out3 = get_spendable_output()
- block(6, spend=out3)
+ block(6, spend=out[3])
yield accepted()
-
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
- block(7, spend=out2)
+ block(7, spend=out[2])
yield rejected()
- out4 = get_spendable_output()
- block(8, spend=out4)
+ block(8, spend=out[4])
yield rejected()
-
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
tip(6)
- block(9, spend=out4, additional_coinbase_value=1)
+ block(9, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
-
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
- block(10, spend=out3)
+ block(10, spend=out[3])
yield rejected()
- block(11, spend=out4, additional_coinbase_value=1)
+ block(11, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
@@ -246,19 +280,17 @@ class FullBlockTest(ComparisonTestFramework):
# (b12 added last)
# \-> b3 (1) -> b4 (2)
tip(5)
- b12 = block(12, spend=out3)
+ b12 = block(12, spend=out[3])
save_spendable_output()
- #yield TestInstance([[b12, False]])
- b13 = block(13, spend=out4)
+ b13 = block(13, spend=out[4])
# Deliver the block header for b12, and the block b13.
# b13 should be accepted but the tip won't advance until b12 is delivered.
yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
save_spendable_output()
- out5 = get_spendable_output()
# b14 is invalid, but the node won't know that until it tries to connect
# Tip still can't advance because b12 is missing
- block(14, spend=out5, additional_coinbase_value=1)
+ block(14, spend=out[5], additional_coinbase_value=1)
yield rejected()
yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
@@ -267,18 +299,18 @@ class FullBlockTest(ComparisonTestFramework):
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
-
+
# Test that a block with a lot of checksigs is okay
- lots_of_checksigs = CScript([OP_CHECKSIG] * (1000000 // 50 - 1))
+ lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
tip(13)
- block(15, spend=out5, script=lots_of_checksigs)
+ block(15, spend=out[5], script=lots_of_checksigs)
yield accepted()
+ save_spendable_output()
# Test that a block with too many checksigs is rejected
- out6 = get_spendable_output()
- too_many_checksigs = CScript([OP_CHECKSIG] * (1000000 // 50))
- block(16, spend=out6, script=too_many_checksigs)
+ too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
+ block(16, spend=out[6], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
@@ -299,7 +331,7 @@ class FullBlockTest(ComparisonTestFramework):
block(18, spend=txout_b3)
yield rejected()
- block(19, spend=out6)
+ block(19, spend=out[6])
yield rejected()
# Attempt to spend a coinbase at depth too low
@@ -307,8 +339,7 @@ class FullBlockTest(ComparisonTestFramework):
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
- out7 = get_spendable_output()
- block(20, spend=out7)
+ block(20, spend=out[7])
yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase'))
# Attempt to spend a coinbase at depth too low (on a fork this time)
@@ -317,10 +348,10 @@ class FullBlockTest(ComparisonTestFramework):
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
tip(13)
- block(21, spend=out6)
+ block(21, spend=out[6])
yield rejected()
- block(22, spend=out5)
+ block(22, spend=out[5])
yield rejected()
# Create a block on either side of MAX_BLOCK_SIZE and make sure its accepted/rejected
@@ -329,21 +360,21 @@ class FullBlockTest(ComparisonTestFramework):
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
- b23 = block(23, spend=out6)
- old_hash = b23.sha256
+ b23 = block(23, spend=out[6])
tx = CTransaction()
script_length = MAX_BLOCK_SIZE - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
- tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 1)))
+ tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_SIZE)
yield accepted()
+ save_spendable_output()
# Make the next block one byte bigger and check that it fails
tip(15)
- b24 = block(24, spend=out6)
+ b24 = block(24, spend=out[6])
script_length = MAX_BLOCK_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length+1)])
tx.vout = [CTxOut(0, script_output)]
@@ -351,7 +382,7 @@ class FullBlockTest(ComparisonTestFramework):
assert_equal(len(b24.serialize()), MAX_BLOCK_SIZE+1)
yield rejected(RejectResult(16, b'bad-blk-length'))
- b25 = block(25, spend=out7)
+ block(25, spend=out[7])
yield rejected()
# Create blocks with a coinbase input script size out of range
@@ -360,7 +391,7 @@ class FullBlockTest(ComparisonTestFramework):
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
tip(15)
- b26 = block(26, spend=out6)
+ b26 = block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
@@ -369,23 +400,20 @@ class FullBlockTest(ComparisonTestFramework):
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b26 chain to make sure bitcoind isn't accepting b26
- b27 = block(27, spend=out7)
- yield rejected()
+ b27 = block(27, spend=out[7])
+ yield rejected(RejectResult(16, b'bad-prevblk'))
# Now try a too-large-coinbase script
tip(15)
- b28 = block(28, spend=out6)
+ b28 = block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = update_block(28, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
- # Extend the b28 chain to make sure bitcoind isn't accepted b28
- b29 = block(29, spend=out7)
- # TODO: Should get a reject message back with "bad-prevblk", except
- # there's a bug that prevents this from being detected. Just note
- # failure for now, and add the reject result later.
- yield rejected()
+ # Extend the b28 chain to make sure bitcoind isn't accepting b28
+ b29 = block(29, spend=out[7])
+ yield rejected(RejectResult(16, b'bad-prevblk'))
# b30 has a max-sized coinbase scriptSig.
tip(23)
@@ -394,6 +422,871 @@ class FullBlockTest(ComparisonTestFramework):
b30.vtx[0].rehash()
b30 = update_block(30, [])
yield accepted()
+ save_spendable_output()
+
+ # b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
+ #
+ # genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
+ # \-> b36 (11)
+ # \-> b34 (10)
+ # \-> b32 (9)
+ #
+
+ # MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
+ lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
+ b31 = block(31, spend=out[8], script=lots_of_multisigs)
+ assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
+ yield accepted()
+ save_spendable_output()
+
+ # this goes over the limit because the coinbase has one sigop
+ too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
+ b32 = block(32, spend=out[9], script=too_many_multisigs)
+ assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+
+ # CHECKMULTISIGVERIFY
+ tip(31)
+ lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
+ block(33, spend=out[9], script=lots_of_multisigs)
+ yield accepted()
+ save_spendable_output()
+
+ too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
+ block(34, spend=out[10], script=too_many_multisigs)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+
+ # CHECKSIGVERIFY
+ tip(33)
+ lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
+ b35 = block(35, spend=out[10], script=lots_of_checksigs)
+ yield accepted()
+ save_spendable_output()
+
+ too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
+ block(36, spend=out[11], script=too_many_checksigs)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+
+ # Check spending of a transaction in a block which failed to connect
+ #
+ # b6 (3)
+ # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
+ # \-> b37 (11)
+ # \-> b38 (11/37)
+ #
+
+ # save 37's spendable output, but then double-spend out11 to invalidate the block
+ tip(35)
+ b37 = block(37, spend=out[11])
+ txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0)
+ tx = create_and_sign_tx(out[11].tx, out[11].n, 0)
+ b37 = update_block(37, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
+ tip(35)
+ block(38, spend=txout_b37)
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # Check P2SH SigOp counting
+ #
+ #
+ # 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
+ # \-> b40 (12)
+ #
+ # b39 - create some P2SH outputs that will require 6 sigops to spend:
+ #
+ # redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
+ # p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
+ #
+ tip(35)
+ b39 = block(39)
+ b39_outputs = 0
+ b39_sigops_per_output = 6
+
+ # Build the redeem script, hash it, use hash to create the p2sh script
+ redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG])
+ redeem_script_hash = hash160(redeem_script)
+ p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
+
+ # Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
+ # This must be signed because it is spending a coinbase
+ spend = out[11]
+ tx = create_tx(spend.tx, spend.n, 1, p2sh_script)
+ tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE])))
+ self.sign_tx(tx, spend.tx, spend.n)
+ tx.rehash()
+ b39 = update_block(39, [tx])
+ b39_outputs += 1
+
+ # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
+ tx_new = None
+ tx_last = tx
+ total_size=len(b39.serialize())
+ while(total_size < MAX_BLOCK_SIZE):
+ tx_new = create_tx(tx_last, 1, 1, p2sh_script)
+ tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
+ tx_new.rehash()
+ total_size += len(tx_new.serialize())
+ if total_size >= MAX_BLOCK_SIZE:
+ break
+ b39.vtx.append(tx_new) # add tx to block
+ tx_last = tx_new
+ b39_outputs += 1
+
+ b39 = update_block(39, [])
+ yield accepted()
+ save_spendable_output()
+
+
+ # Test sigops in P2SH redeem scripts
+ #
+ # b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
+ # The first tx has one sigop and then at the end we add 2 more to put us just over the max.
+ #
+ # b41 does the same, less one, so it has the maximum sigops permitted.
+ #
+ tip(39)
+ b40 = block(40, spend=out[12])
+ sigops = get_legacy_sigopcount_block(b40)
+ numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
+ assert_equal(numTxes <= b39_outputs, True)
+
+ lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
+ new_txs = []
+ for i in range(1, numTxes+1):
+ tx = CTransaction()
+ tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
+ tx.vin.append(CTxIn(lastOutpoint, b''))
+ # second input is corresponding P2SH output from b39
+ tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
+ # Note: must pass the redeem_script (not p2sh_script) to the signature hash function
+ (sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
+ sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
+ scriptSig = CScript([sig, redeem_script])
+
+ tx.vin[1].scriptSig = scriptSig
+ tx.rehash()
+ new_txs.append(tx)
+ lastOutpoint = COutPoint(tx.sha256, 0)
+
+ b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
+ tx = CTransaction()
+ tx.vin.append(CTxIn(lastOutpoint, b''))
+ tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
+ tx.rehash()
+ new_txs.append(tx)
+ update_block(40, new_txs)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+ # same as b40, but one less sigop
+ tip(39)
+ b41 = block(41, spend=None)
+ update_block(41, b40.vtx[1:-1])
+ b41_sigops_to_fill = b40_sigops_to_fill - 1
+ tx = CTransaction()
+ tx.vin.append(CTxIn(lastOutpoint, b''))
+ tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
+ tx.rehash()
+ update_block(41, [tx])
+ yield accepted()
+
+ # Fork off of b39 to create a constant base again
+ #
+ # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
+ # \-> b41 (12)
+ #
+ tip(39)
+ block(42, spend=out[12])
+ yield rejected()
+ save_spendable_output()
+
+ block(43, spend=out[13])
+ yield accepted()
+ save_spendable_output()
+
+
+ # Test a number of really invalid scenarios
+ #
+ # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
+ # \-> ??? (15)
+
+ # The next few blocks are going to be created "by hand" since they'll do funky things, such as having
+ # the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
+ height = self.block_heights[self.tip.sha256] + 1
+ coinbase = create_coinbase(height, self.coinbase_pubkey)
+ b44 = CBlock()
+ b44.nTime = self.tip.nTime + 1
+ b44.hashPrevBlock = self.tip.sha256
+ b44.nBits = 0x207fffff
+ b44.vtx.append(coinbase)
+ b44.hashMerkleRoot = b44.calc_merkle_root()
+ b44.solve()
+ self.tip = b44
+ self.block_heights[b44.sha256] = height
+ self.blocks[44] = b44
+ yield accepted()
+
+ # A block with a non-coinbase as the first tx
+ non_coinbase = create_tx(out[15].tx, out[15].n, 1)
+ b45 = CBlock()
+ b45.nTime = self.tip.nTime + 1
+ b45.hashPrevBlock = self.tip.sha256
+ b45.nBits = 0x207fffff
+ b45.vtx.append(non_coinbase)
+ b45.hashMerkleRoot = b45.calc_merkle_root()
+ b45.calc_sha256()
+ b45.solve()
+ self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1
+ self.tip = b45
+ self.blocks[45] = b45
+ yield rejected(RejectResult(16, b'bad-cb-missing'))
+
+ # A block with no txns
+ tip(44)
+ b46 = CBlock()
+ b46.nTime = b44.nTime+1
+ b46.hashPrevBlock = b44.sha256
+ b46.nBits = 0x207fffff
+ b46.vtx = []
+ b46.hashMerkleRoot = 0
+ b46.solve()
+ self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1
+ self.tip = b46
+ assert 46 not in self.blocks
+ self.blocks[46] = b46
+ s = ser_uint256(b46.hashMerkleRoot)
+ yield rejected(RejectResult(16, b'bad-blk-length'))
+
+ # A block with invalid work
+ tip(44)
+ b47 = block(47, solve=False)
+ target = uint256_from_compact(b47.nBits)
+ while b47.sha256 < target: #changed > to <
+ b47.nNonce += 1
+ b47.rehash()
+ yield rejected(RejectResult(16, b'high-hash'))
+
+ # A block with timestamp > 2 hrs in the future
+ tip(44)
+ b48 = block(48, solve=False)
+ b48.nTime = int(time.time()) + 60 * 60 * 3
+ b48.solve()
+ yield rejected(RejectResult(16, b'time-too-new'))
+
+ # A block with an invalid merkle hash
+ tip(44)
+ b49 = block(49)
+ b49.hashMerkleRoot += 1
+ b49.solve()
+ yield rejected(RejectResult(16, b'bad-txnmrklroot'))
+
+ # A block with an incorrect POW limit
+ tip(44)
+ b50 = block(50)
+ b50.nBits = b50.nBits - 1
+ b50.solve()
+ yield rejected(RejectResult(16, b'bad-diffbits'))
+
+ # A block with two coinbase txns
+ tip(44)
+ b51 = block(51)
+ cb2 = create_coinbase(51, self.coinbase_pubkey)
+ b51 = update_block(51, [cb2])
+ yield rejected(RejectResult(16, b'bad-cb-multiple'))
+
+ # A block w/ duplicate txns
+ # Note: txns have to be in the right position in the merkle tree to trigger this error
+ tip(44)
+ b52 = block(52, spend=out[15])
+ tx = create_tx(b52.vtx[1], 0, 1)
+ b52 = update_block(52, [tx, tx])
+ yield rejected(RejectResult(16, b'bad-txns-duplicate'))
+
+ # Test block timestamps
+ # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
+ # \-> b54 (15)
+ #
+ tip(43)
+ block(53, spend=out[14])
+ yield rejected() # rejected since b44 is at same height
+ save_spendable_output()
+
+ # invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast)
+ b54 = block(54, spend=out[15])
+ b54.nTime = b35.nTime - 1
+ b54.solve()
+ yield rejected(RejectResult(16, b'time-too-old'))
+
+ # valid timestamp
+ tip(53)
+ b55 = block(55, spend=out[15])
+ b55.nTime = b35.nTime
+ update_block(55, [])
+ yield accepted()
+ save_spendable_output()
+
+
+ # Test CVE-2012-2459
+ #
+ # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
+ # \-> b57 (16)
+ # \-> b56p2 (16)
+ # \-> b56 (16)
+ #
+ # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
+ # affecting the merkle root of a block, while still invalidating it.
+ # See: src/consensus/merkle.h
+ #
+ # b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
+ # Result: OK
+ #
+ # b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
+ # root but duplicate transactions.
+ # Result: Fails
+ #
+ # b57p2 has six transactions in its merkle tree:
+ # - coinbase, tx, tx1, tx2, tx3, tx4
+ # Merkle root calculation will duplicate as necessary.
+ # Result: OK.
+ #
+ # b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
+ # duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
+ # that the error was caught early, avoiding a DOS vulnerability.)
+
+ # b57 - a good block with 2 txs, don't submit until end
+ tip(55)
+ b57 = block(57)
+ tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
+ tx1 = create_tx(tx, 0, 1)
+ b57 = update_block(57, [tx, tx1])
+
+ # b56 - copy b57, add a duplicate tx
+ tip(55)
+ b56 = copy.deepcopy(b57)
+ self.blocks[56] = b56
+ assert_equal(len(b56.vtx),3)
+ b56 = update_block(56, [tx1])
+ assert_equal(b56.hash, b57.hash)
+ yield rejected(RejectResult(16, b'bad-txns-duplicate'))
+
+ # b57p2 - a good block with 6 tx'es, don't submit until end
+ tip(55)
+ b57p2 = block("57p2")
+ tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
+ tx1 = create_tx(tx, 0, 1)
+ tx2 = create_tx(tx1, 0, 1)
+ tx3 = create_tx(tx2, 0, 1)
+ tx4 = create_tx(tx3, 0, 1)
+ b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4])
+
+ # b56p2 - copy b57p2, duplicate two non-consecutive tx's
+ tip(55)
+ b56p2 = copy.deepcopy(b57p2)
+ self.blocks["b56p2"] = b56p2
+ assert_equal(b56p2.hash, b57p2.hash)
+ assert_equal(len(b56p2.vtx),6)
+ b56p2 = update_block("b56p2", [tx3, tx4])
+ yield rejected(RejectResult(16, b'bad-txns-duplicate'))
+
+ tip("57p2")
+ yield accepted()
+
+ tip(57)
+ yield rejected() #rejected because 57p2 seen first
+ save_spendable_output()
+
+ # Test a few invalid tx types
+ #
+ # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
+ # \-> ??? (17)
+ #
+
+ # tx with prevout.n out of range
+ tip(57)
+ b58 = block(58, spend=out[17])
+ tx = CTransaction()
+ assert(len(out[17].tx.vout) < 42)
+ tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff))
+ tx.vout.append(CTxOut(0, b""))
+ tx.calc_sha256()
+ b58 = update_block(58, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # tx with output value > input value out of range
+ tip(57)
+ b59 = block(59)
+ tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN)
+ b59 = update_block(59, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-in-belowout'))
+
+ # reset to good chain
+ tip(57)
+ b60 = block(60, spend=out[17])
+ yield accepted()
+ save_spendable_output()
+
+ # Test BIP30
+ #
+ # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
+ # \-> b61 (18)
+ #
+ # Blocks are not allowed to contain a transaction whose id matches that of an earlier,
+ # not-fully-spent transaction in the same chain. To test, make identical coinbases;
+ # the second one should be rejected.
+ #
+ tip(60)
+ b61 = block(61, spend=out[18])
+ b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases
+ b61.vtx[0].rehash()
+ b61 = update_block(61, [])
+ assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
+ yield rejected(RejectResult(16, b'bad-txns-BIP30'))
+
+
+ # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
+ #
+ # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
+ # \-> b62 (18)
+ #
+ tip(60)
+ b62 = block(62)
+ tx = CTransaction()
+ tx.nLockTime = 0xffffffff #this locktime is non-final
+ assert(out[18].n < len(out[18].tx.vout))
+ tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence
+ tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ assert(tx.vin[0].nSequence < 0xffffffff)
+ tx.calc_sha256()
+ b62 = update_block(62, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
+
+
+ # Test a non-final coinbase is also rejected
+ #
+ # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
+ # \-> b63 (-)
+ #
+ tip(60)
+ b63 = block(63)
+ b63.vtx[0].nLockTime = 0xffffffff
+ b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
+ b63.vtx[0].rehash()
+ b63 = update_block(63, [])
+ yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
+
+
+ # This checks that a block with a bloated VARINT between the block_header and the array of tx such that
+ # the block is > MAX_BLOCK_SIZE with the bloated varint, but <= MAX_BLOCK_SIZE without the bloated varint,
+ # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
+ # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
+ #
+ # What matters is that the receiving node should not reject the bloated block, and then reject the canonical
+ # block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
+ #
+ # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
+ # \
+ # b64a (18)
+ # b64a is a bloated block (non-canonical varint)
+ # b64 is a good block (same as b64 but w/ canonical varint)
+ #
+ tip(60)
+ regular_block = block("64a", spend=out[18])
+
+ # make it a "broken_block," with non-canonical serialization
+ b64a = CBrokenBlock(regular_block)
+ b64a.initialize(regular_block)
+ self.blocks["64a"] = b64a
+ self.tip = b64a
+ tx = CTransaction()
+
+ # use canonical serialization to calculate size
+ script_length = MAX_BLOCK_SIZE - len(b64a.normal_serialize()) - 69
+ script_output = CScript([b'\x00' * script_length])
+ tx.vout.append(CTxOut(0, script_output))
+ tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
+ b64a = update_block("64a", [tx])
+ assert_equal(len(b64a.serialize()), MAX_BLOCK_SIZE + 8)
+ yield TestInstance([[self.tip, None]])
+
+ # comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore
+ self.test.block_store.erase(b64a.sha256)
+
+ tip(60)
+ b64 = CBlock(b64a)
+ b64.vtx = copy.deepcopy(b64a.vtx)
+ assert_equal(b64.hash, b64a.hash)
+ assert_equal(len(b64.serialize()), MAX_BLOCK_SIZE)
+ self.blocks[64] = b64
+ update_block(64, [])
+ yield accepted()
+ save_spendable_output()
+
+ # Spend an output created in the block itself
+ #
+ # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
+ #
+ tip(64)
+ b65 = block(65)
+ tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue)
+ tx2 = create_and_sign_tx(tx1, 0, 0)
+ update_block(65, [tx1, tx2])
+ yield accepted()
+ save_spendable_output()
+
+ # Attempt to spend an output created later in the same block
+ #
+ # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
+ # \-> b66 (20)
+ tip(65)
+ b66 = block(66)
+ tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
+ tx2 = create_and_sign_tx(tx1, 0, 1)
+ update_block(66, [tx2, tx1])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # Attempt to double-spend a transaction created in a block
+ #
+ # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
+ # \-> b67 (20)
+ #
+ #
+ tip(65)
+ b67 = block(67)
+ tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
+ tx2 = create_and_sign_tx(tx1, 0, 1)
+ tx3 = create_and_sign_tx(tx1, 0, 2)
+ update_block(67, [tx1, tx2, tx3])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # More tests of block subsidy
+ #
+ # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
+ # \-> b68 (20)
+ #
+ # b68 - coinbase with an extra 10 satoshis,
+ # creates a tx that has 9 satoshis from out[20] go to fees
+ # this fails because the coinbase is trying to claim 1 satoshi too much in fees
+ #
+ # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
+ # this succeeds
+ #
+ tip(65)
+ b68 = block(68, additional_coinbase_value=10)
+ tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9)
+ update_block(68, [tx])
+ yield rejected(RejectResult(16, b'bad-cb-amount'))
+
+ tip(65)
+ b69 = block(69, additional_coinbase_value=10)
+ tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10)
+ update_block(69, [tx])
+ yield accepted()
+ save_spendable_output()
+
+ # Test spending the outpoint of a non-existent transaction
+ #
+ # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
+ # \-> b70 (21)
+ #
+ tip(69)
+ block(70, spend=out[21])
+ bogus_tx = CTransaction()
+ bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
+ tx.vout.append(CTxOut(1, b""))
+ update_block(70, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+
+ # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
+ #
+ # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
+ # \-> b71 (21)
+ #
+ # b72 is a good block.
+ # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71.
+ #
+ tip(69)
+ b72 = block(72)
+ tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2)
+ tx2 = create_and_sign_tx(tx1, 0, 1)
+ b72 = update_block(72, [tx1, tx2]) # now tip is 72
+ b71 = copy.deepcopy(b72)
+ b71.vtx.append(tx2) # add duplicate tx2
+ self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
+ self.blocks[71] = b71
+
+ assert_equal(len(b71.vtx), 4)
+ assert_equal(len(b72.vtx), 3)
+ assert_equal(b72.sha256, b71.sha256)
+
+ tip(71)
+ yield rejected(RejectResult(16, b'bad-txns-duplicate'))
+ tip(72)
+ yield accepted()
+ save_spendable_output()
+
+
+ # Test some invalid scripts and MAX_BLOCK_SIGOPS
+ #
+ # -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
+ # \-> b** (22)
+ #
+
+ # b73 - tx with excessive sigops that are placed after an excessively large script element.
+ # The purpose of the test is to make sure those sigops are counted.
+ #
+ # script is a bytearray of size 20,526
+ #
+ # bytearray[0-19,998] : OP_CHECKSIG
+ # bytearray[19,999] : OP_PUSHDATA4
+ # bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
+ # bytearray[20,004-20,525]: unread data (script_element)
+ # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
+ #
+ tip(72)
+ b73 = block(73)
+ size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
+ a = bytearray([OP_CHECKSIG] * size)
+ a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4
+
+ element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
+ a[MAX_BLOCK_SIGOPS] = element_size % 256
+ a[MAX_BLOCK_SIGOPS+1] = element_size // 256
+ a[MAX_BLOCK_SIGOPS+2] = 0
+ a[MAX_BLOCK_SIGOPS+3] = 0
+
+ tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
+ b73 = update_block(73, [tx])
+ assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+ # b74/75 - if we push an invalid script element, all prevous sigops are counted,
+ # but sigops after the element are not counted.
+ #
+ # The invalid script element is that the push_data indicates that
+ # there will be a large amount of data (0xffffff bytes), but we only
+ # provide a much smaller number. These bytes are CHECKSIGS so they would
+ # cause b75 to fail for excessive sigops, if those bytes were counted.
+ #
+ # b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
+ # b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
+ #
+ #
+ tip(72)
+ b74 = block(74)
+ size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
+ a = bytearray([OP_CHECKSIG] * size)
+ a[MAX_BLOCK_SIGOPS] = 0x4e
+ a[MAX_BLOCK_SIGOPS+1] = 0xfe
+ a[MAX_BLOCK_SIGOPS+2] = 0xff
+ a[MAX_BLOCK_SIGOPS+3] = 0xff
+ a[MAX_BLOCK_SIGOPS+4] = 0xff
+ tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
+ b74 = update_block(74, [tx])
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+ tip(72)
+ b75 = block(75)
+ size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
+ a = bytearray([OP_CHECKSIG] * size)
+ a[MAX_BLOCK_SIGOPS-1] = 0x4e
+ a[MAX_BLOCK_SIGOPS] = 0xff
+ a[MAX_BLOCK_SIGOPS+1] = 0xff
+ a[MAX_BLOCK_SIGOPS+2] = 0xff
+ a[MAX_BLOCK_SIGOPS+3] = 0xff
+ tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
+ b75 = update_block(75, [tx])
+ yield accepted()
+ save_spendable_output()
+
+ # Check that if we push an element filled with CHECKSIGs, they are not counted
+ tip(75)
+ b76 = block(76)
+ size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
+ a = bytearray([OP_CHECKSIG] * size)
+ a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
+ tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a))
+ b76 = update_block(76, [tx])
+ yield accepted()
+ save_spendable_output()
+
+ # Test transaction resurrection
+ #
+ # -> b77 (24) -> b78 (25) -> b79 (26)
+ # \-> b80 (25) -> b81 (26) -> b82 (27)
+ #
+ # b78 creates a tx, which is spent in b79. After b82, both should be in mempool
+ #
+ # The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
+ # rather obscure reason that the Python signature code does not distinguish between
+ # Low-S and High-S values (whereas the bitcoin code has custom code which does so);
+ # as a result of which, the odds are 50% that the python code will use the right
+ # value and the transaction will be accepted into the mempool. Until we modify the
+ # test framework to support low-S signing, we are out of luck.
+ #
+ # To get around this issue, we construct transactions which are not signed and which
+ # spend to OP_TRUE. If the standard-ness rules change, this test would need to be
+ # updated. (Perhaps to spend to a P2SH OP_TRUE script)
+ #
+ tip(76)
+ block(77)
+ tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN)
+ update_block(77, [tx77])
+ yield accepted()
+ save_spendable_output()
+
+ block(78)
+ tx78 = create_tx(tx77, 0, 9*COIN)
+ update_block(78, [tx78])
+ yield accepted()
+
+ block(79)
+ tx79 = create_tx(tx78, 0, 8*COIN)
+ update_block(79, [tx79])
+ yield accepted()
+
+ # mempool should be empty
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
+
+ tip(77)
+ block(80, spend=out[25])
+ yield rejected()
+ save_spendable_output()
+
+ block(81, spend=out[26])
+ yield rejected() # other chain is same length
+ save_spendable_output()
+
+ block(82, spend=out[27])
+ yield accepted() # now this chain is longer, triggers re-org
+ save_spendable_output()
+
+ # now check that tx78 and tx79 have been put back into the peer's mempool
+ mempool = self.nodes[0].getrawmempool()
+ assert_equal(len(mempool), 2)
+ assert(tx78.hash in mempool)
+ assert(tx79.hash in mempool)
+
+
+ # Test invalid opcodes in dead execution paths.
+ #
+ # -> b81 (26) -> b82 (27) -> b83 (28)
+ #
+ b83 = block(83)
+ op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
+ script = CScript(op_codes)
+ tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script)
+
+ tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE]))
+ tx2.vin[0].scriptSig = CScript([OP_FALSE])
+ tx2.rehash()
+
+ update_block(83, [tx1, tx2])
+ yield accepted()
+ save_spendable_output()
+
+
+ # Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
+ #
+ # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
+ # \-> b85 (29) -> b86 (30) \-> b89a (32)
+ #
+ #
+ b84 = block(84)
+ tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN]))
+ tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx1.calc_sha256()
+ self.sign_tx(tx1, out[29].tx, out[29].n)
+ tx1.rehash()
+ tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN]))
+ tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
+ tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN]))
+ tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE]))
+ tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
+ tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN]))
+
+ update_block(84, [tx1,tx2,tx3,tx4,tx5])
+ yield accepted()
+ save_spendable_output()
+
+ tip(83)
+ block(85, spend=out[29])
+ yield rejected()
+
+ block(86, spend=out[30])
+ yield accepted()
+
+ tip(84)
+ block(87, spend=out[30])
+ yield rejected()
+ save_spendable_output()
+
+ block(88, spend=out[31])
+ yield accepted()
+ save_spendable_output()
+
+ # trying to spend the OP_RETURN output is rejected
+ block("89a", spend=out[32])
+ tx = create_tx(tx1, 0, 0, CScript([OP_TRUE]))
+ update_block("89a", [tx])
+ yield rejected()
+
+
+ # Test re-org of a week's worth of blocks (1088 blocks)
+ # This test takes a minute or two and can be accomplished in memory
+ #
+ if self.options.runbarelyexpensive:
+ tip(88)
+ LARGE_REORG_SIZE = 1088
+ test1 = TestInstance(sync_every_block=False)
+ spend=out[32]
+ for i in range(89, LARGE_REORG_SIZE + 89):
+ b = block(i, spend)
+ tx = CTransaction()
+ script_length = MAX_BLOCK_SIZE - len(b.serialize()) - 69
+ script_output = CScript([b'\x00' * script_length])
+ tx.vout.append(CTxOut(0, script_output))
+ tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
+ b = update_block(i, [tx])
+ assert_equal(len(b.serialize()), MAX_BLOCK_SIZE)
+ test1.blocks_and_transactions.append([self.tip, True])
+ save_spendable_output()
+ spend = get_spendable_output()
+
+ yield test1
+ chain1_tip = i
+
+ # now create alt chain of same length
+ tip(88)
+ test2 = TestInstance(sync_every_block=False)
+ for i in range(89, LARGE_REORG_SIZE + 89):
+ block("alt"+str(i))
+ test2.blocks_and_transactions.append([self.tip, False])
+ yield test2
+
+ # extend alt chain to trigger re-org
+ block("alt" + str(chain1_tip + 1))
+ yield accepted()
+
+ # ... and re-org back to the first chain
+ tip(chain1_tip)
+ block(chain1_tip + 1)
+ yield rejected()
+ block(chain1_tip + 2)
+ yield accepted()
+
+ chain1_tip += 2
+
if __name__ == '__main__':
diff --git a/qa/rpc-tests/rawtransactions.py b/qa/rpc-tests/rawtransactions.py
index aa403f058c..ab6d2e8def 100755
--- a/qa/rpc-tests/rawtransactions.py
+++ b/qa/rpc-tests/rawtransactions.py
@@ -143,6 +143,20 @@ class RawTransactionsTest(BitcoinTestFramework):
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
+
+ inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
+ outputs = { self.nodes[0].getnewaddress() : 1 }
+ assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
+
+ inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
+ outputs = { self.nodes[0].getnewaddress() : 1 }
+ assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
+
+ inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
+ outputs = { self.nodes[0].getnewaddress() : 1 }
+ rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
+ decrawtx= self.nodes[0].decoderawtransaction(rawtx)
+ assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
if __name__ == '__main__':
RawTransactionsTest().main()
diff --git a/qa/rpc-tests/test_framework/authproxy.py b/qa/rpc-tests/test_framework/authproxy.py
index 95b2be658c..d095a56ce7 100644
--- a/qa/rpc-tests/test_framework/authproxy.py
+++ b/qa/rpc-tests/test_framework/authproxy.py
@@ -67,9 +67,11 @@ def EncodeDecimal(o):
class AuthServiceProxy(object):
__id_count = 0
- def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None):
+ # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
+ def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
self.__service_url = service_url
self._service_name = service_name
+ self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
self.__url = urlparse.urlparse(service_url)
if self.__url.port is None:
port = 80
@@ -134,12 +136,12 @@ class AuthServiceProxy(object):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s"%(AuthServiceProxy.__id_count, self._service_name,
- json.dumps(args, default=EncodeDecimal)))
+ json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
postdata = json.dumps({'version': '1.1',
'method': self._service_name,
'params': args,
- 'id': AuthServiceProxy.__id_count}, default=EncodeDecimal)
- response = self._request('POST', self.__url.path, postdata)
+ 'id': AuthServiceProxy.__id_count}, default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
+ response = self._request('POST', self.__url.path, postdata.encode('utf-8'))
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
@@ -149,9 +151,9 @@ class AuthServiceProxy(object):
return response['result']
def _batch(self, rpc_call_list):
- postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal)
+ postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
log.debug("--> "+postdata)
- return self._request('POST', self.__url.path, postdata)
+ return self._request('POST', self.__url.path, postdata.encode('utf-8'))
def _get_response(self):
http_response = self.__conn.getresponse()
@@ -167,7 +169,7 @@ class AuthServiceProxy(object):
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
if "error" in response and response["error"] is None:
- log.debug("<-%s- %s"%(response["id"], json.dumps(response["result"], default=EncodeDecimal)))
+ log.debug("<-%s- %s"%(response["id"], json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
else:
log.debug("<-- "+responsedata)
return response
diff --git a/qa/rpc-tests/test_framework/blockstore.py b/qa/rpc-tests/test_framework/blockstore.py
index 4bc279032b..6120dd574b 100644
--- a/qa/rpc-tests/test_framework/blockstore.py
+++ b/qa/rpc-tests/test_framework/blockstore.py
@@ -13,20 +13,31 @@ class BlockStore(object):
self.blockDB = dbm.ndbm.open(datadir + "/blocks", 'c')
self.currentBlock = 0
self.headers_map = dict()
-
+
def close(self):
self.blockDB.close()
+ def erase(self, blockhash):
+ del self.blockDB[repr(blockhash)]
+
+ # lookup an entry and return the item as raw bytes
def get(self, blockhash):
- serialized_block = None
+ value = None
try:
- serialized_block = self.blockDB[repr(blockhash)]
+ value = self.blockDB[repr(blockhash)]
except KeyError:
return None
- f = BytesIO(serialized_block)
- ret = CBlock()
- ret.deserialize(f)
- ret.calc_sha256()
+ return value
+
+ # lookup an entry and return it as a CBlock
+ def get_block(self, blockhash):
+ ret = None
+ serialized_block = self.get(blockhash)
+ if serialized_block is not None:
+ f = BytesIO(serialized_block)
+ ret = CBlock()
+ ret.deserialize(f)
+ ret.calc_sha256()
return ret
def get_header(self, blockhash):
@@ -75,13 +86,16 @@ class BlockStore(object):
def add_header(self, header):
self.headers_map[header.sha256] = header
+ # lookup the hashes in "inv", and return p2p messages for delivering
+ # blocks found.
def get_blocks(self, inv):
responses = []
for i in inv:
if (i.type == 2): # MSG_BLOCK
- block = self.get(i.hash)
- if block is not None:
- responses.append(msg_block(block))
+ data = self.get(i.hash)
+ if data is not None:
+ # Use msg_generic to avoid re-serialization
+ responses.append(msg_generic(b"block", data))
return responses
def get_locator(self, current_tip=None):
@@ -90,11 +104,11 @@ class BlockStore(object):
r = []
counter = 0
step = 1
- lastBlock = self.get(current_tip)
+ lastBlock = self.get_block(current_tip)
while lastBlock is not None:
r.append(lastBlock.hashPrevBlock)
for i in range(step):
- lastBlock = self.get(lastBlock.hashPrevBlock)
+ lastBlock = self.get_block(lastBlock.hashPrevBlock)
if lastBlock is None:
break
counter += 1
@@ -111,16 +125,23 @@ class TxStore(object):
def close(self):
self.txDB.close()
+ # lookup an entry and return the item as raw bytes
def get(self, txhash):
- serialized_tx = None
+ value = None
try:
- serialized_tx = self.txDB[repr(txhash)]
+ value = self.txDB[repr(txhash)]
except KeyError:
return None
- f = BytesIO(serialized_tx)
- ret = CTransaction()
- ret.deserialize(f)
- ret.calc_sha256()
+ return value
+
+ def get_transaction(self, txhash):
+ ret = None
+ serialized_tx = self.get(txhash)
+ if serialized_tx is not None:
+ f = BytesIO(serialized_tx)
+ ret = CTransaction()
+ ret.deserialize(f)
+ ret.calc_sha256()
return ret
def add_transaction(self, tx):
@@ -136,5 +157,5 @@ class TxStore(object):
if (i.type == 1): # MSG_TX
tx = self.get(i.hash)
if tx is not None:
- responses.append(msg_tx(tx))
+ responses.append(msg_generic(b"tx", tx))
return responses
diff --git a/qa/rpc-tests/test_framework/blocktools.py b/qa/rpc-tests/test_framework/blocktools.py
index 44232153ac..26cc396315 100644
--- a/qa/rpc-tests/test_framework/blocktools.py
+++ b/qa/rpc-tests/test_framework/blocktools.py
@@ -56,12 +56,27 @@ def create_coinbase(height, pubkey = None):
coinbase.calc_sha256()
return coinbase
-# Create a transaction with an anyone-can-spend output, that spends the
-# nth output of prevtx.
-def create_transaction(prevtx, n, sig, value):
+# Create a transaction.
+# If the scriptPubKey is not specified, make it anyone-can-spend.
+def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
- tx.vout.append(CTxOut(value, b""))
+ tx.vout.append(CTxOut(value, scriptPubKey))
tx.calc_sha256()
return tx
+
+def get_legacy_sigopcount_block(block, fAccurate=True):
+ count = 0
+ for tx in block.vtx:
+ count += get_legacy_sigopcount_tx(tx, fAccurate)
+ return count
+
+def get_legacy_sigopcount_tx(tx, fAccurate=True):
+ count = 0
+ for i in tx.vout:
+ count += i.scriptPubKey.GetSigOpCount(fAccurate)
+ for j in tx.vin:
+ # scriptSig might be of type bytes, so convert to CScript for the moment
+ count += CScript(j.scriptSig).GetSigOpCount(fAccurate)
+ return count
diff --git a/qa/rpc-tests/test_framework/mininode.py b/qa/rpc-tests/test_framework/mininode.py
index 1617daa200..6612b99b84 100755
--- a/qa/rpc-tests/test_framework/mininode.py
+++ b/qa/rpc-tests/test_framework/mininode.py
@@ -836,6 +836,18 @@ class msg_block(object):
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
+# for cases where a user needs tighter control over what is sent over the wire
+# note that the user must supply the name of the command, and the data
+class msg_generic(object):
+ def __init__(self, command, data=None):
+ self.command = command
+ self.data = data
+
+ def serialize(self):
+ return self.data
+
+ def __repr__(self):
+ return "msg_generic()"
class msg_getaddr(object):
command = b"getaddr"
@@ -1303,7 +1315,7 @@ class NodeConn(asyncore.dispatcher):
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
- return
+ raise IOError('Not connected, no pushbuf')
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
diff --git a/qa/rpc-tests/test_framework/util.py b/qa/rpc-tests/test_framework/util.py
index fc66ef287d..32fe79efc3 100644
--- a/qa/rpc-tests/test_framework/util.py
+++ b/qa/rpc-tests/test_framework/util.py
@@ -156,17 +156,22 @@ def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
+ rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "bitcoin.conf"), 'w') as f:
f.write("regtest=1\n")
- f.write("rpcuser=rt\n")
- f.write("rpcpassword=rt\n")
+ f.write("rpcuser=" + rpc_u + "\n")
+ f.write("rpcpassword=" + rpc_p + "\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
+def rpc_auth_pair(n):
+ return 'rpcuser๐Ÿ’ป' + str(n), 'rpcpass๐Ÿ”‘' + str(n)
+
def rpc_url(i, rpchost=None):
- return "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
+ rpc_u, rpc_p = rpc_auth_pair(i)
+ return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, rpchost or '127.0.0.1', rpc_port(i))
def wait_for_bitcoind_start(process, url, i):
'''
@@ -477,6 +482,15 @@ def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
return (txid, signresult["hex"], fee)
+def assert_fee_amount(fee, tx_size, fee_per_kB):
+ """Assert the fee was in range"""
+ target_fee = tx_size * fee_per_kB / 1000
+ if fee < target_fee:
+ raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
+ # allow the wallet's estimation to be at most 2 bytes off
+ if fee > (tx_size + 2) * fee_per_kB / 1000:
+ raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
+
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
diff --git a/qa/rpc-tests/wallet.py b/qa/rpc-tests/wallet.py
index 9dda712f4f..5d96e7a6e5 100755
--- a/qa/rpc-tests/wallet.py
+++ b/qa/rpc-tests/wallet.py
@@ -11,12 +11,7 @@ class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
- target_fee = fee_per_byte * tx_size
- if fee < target_fee:
- raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
- # allow the node's estimation to be at most 2 bytes off
- if fee > fee_per_byte * (tx_size + 2):
- raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
+ assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
def __init__(self):
@@ -314,6 +309,20 @@ class WalletTest (BitcoinTestFramework):
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
+ # Check modes:
+ # - True: unicode escaped as \u....
+ # - False: unicode directly as UTF-8
+ for mode in [True, False]:
+ self.nodes[0].ensure_ascii = mode
+ # unicode check: Basic Multilingual Plane, Supplementary Plane respectively
+ for s in [u'ั€ั‹ะฑะฐ', u'๐…ก']:
+ addr = self.nodes[0].getaccountaddress(s)
+ label = self.nodes[0].getaccount(addr)
+ assert_equal(label, s)
+ assert(s in self.nodes[0].listaccounts().keys())
+ self.nodes[0].ensure_ascii = True # restore to default
+
+ # maintenance tests
maintenance = [
'-rescan',
'-reindex',
diff --git a/src/addrman.cpp b/src/addrman.cpp
index 00f6fe99e0..cebb1c8e5e 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -263,7 +263,7 @@ bool CAddrMan::Add_(const CAddress& addr, const CNetAddr& source, int64_t nTimeP
pinfo->nTime = std::max((int64_t)0, addr.nTime - nTimePenalty);
// add services
- pinfo->nServices |= addr.nServices;
+ pinfo->nServices = ServiceFlags(pinfo->nServices | addr.nServices);
// do not update if no new information is present
if (!addr.nTime || (pinfo->nTime && addr.nTime <= pinfo->nTime))
@@ -502,6 +502,24 @@ void CAddrMan::Connected_(const CService& addr, int64_t nTime)
info.nTime = nTime;
}
+void CAddrMan::SetServices_(const CService& addr, ServiceFlags nServices)
+{
+ CAddrInfo* pinfo = Find(addr);
+
+ // if not found, bail out
+ if (!pinfo)
+ return;
+
+ CAddrInfo& info = *pinfo;
+
+ // check whether we are talking about the exact same CService (including same port)
+ if (info != addr)
+ return;
+
+ // update info
+ info.nServices = nServices;
+}
+
int CAddrMan::RandomInt(int nMax){
return GetRandInt(nMax);
}
diff --git a/src/addrman.h b/src/addrman.h
index c5923e9417..1caf540758 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -256,6 +256,9 @@ protected:
//! Mark an entry as currently-connected-to.
void Connected_(const CService &addr, int64_t nTime);
+ //! Update an entry's service bits.
+ void SetServices_(const CService &addr, ServiceFlags nServices);
+
public:
/**
* serialized format:
@@ -589,6 +592,14 @@ public:
}
}
+ void SetServices(const CService &addr, ServiceFlags nServices)
+ {
+ LOCK(cs);
+ Check();
+ SetServices_(addr, nServices);
+ Check();
+ }
+
};
#endif // BITCOIN_ADDRMAN_H
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index 0005115671..8c27a578bb 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -16,14 +16,6 @@
#include "chainparamsseeds.h"
-std::string CDNSSeedData::getHost(uint64_t requiredServiceBits) const {
- //use default host for non-filter-capable seeds or if we use the default service bits (NODE_NETWORK)
- if (!supportsServiceBitsFiltering || requiredServiceBits == NODE_NETWORK)
- return host;
-
- return strprintf("x%x.%s", requiredServiceBits, host);
-}
-
static CBlock CreateGenesisBlock(const char* pszTimestamp, const CScript& genesisOutputScript, uint32_t nTime, uint32_t nNonce, uint32_t nBits, int32_t nVersion, const CAmount& genesisReward)
{
CMutableTransaction txNew;
diff --git a/src/chainparams.h b/src/chainparams.h
index 7168daaf43..638893e9ad 100644
--- a/src/chainparams.h
+++ b/src/chainparams.h
@@ -13,11 +13,9 @@
#include <vector>
-class CDNSSeedData {
-public:
+struct CDNSSeedData {
std::string name, host;
bool supportsServiceBitsFiltering;
- std::string getHost(uint64_t requiredServiceBits) const;
CDNSSeedData(const std::string &strName, const std::string &strHost, bool supportsServiceBitsFilteringIn = false) : name(strName), host(strHost), supportsServiceBitsFiltering(supportsServiceBitsFilteringIn) {}
};
diff --git a/src/init.cpp b/src/init.cpp
index ec4ce6b6da..b572bfc327 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -479,11 +479,20 @@ std::string HelpMessage(HelpMessageMode mode)
std::string LicenseInfo()
{
+ const std::string URL_SOURCE_CODE = "<https://github.com/bitcoin/bitcoin>";
+ const std::string URL_WEBSITE = "<https://bitcoincore.org>";
// todo: remove urls from translations on next change
return CopyrightHolders(strprintf(_("Copyright (C) %i-%i"), 2009, COPYRIGHT_YEAR) + " ") + "\n" +
"\n" +
- _("This is experimental software.") + "\n" +
+ strprintf(_("Please contribute if you find %s useful. "
+ "Visit %s for further information about the software."),
+ PACKAGE_NAME, URL_WEBSITE) +
+ "\n" +
+ strprintf(_("The source code is available from %s."),
+ URL_SOURCE_CODE) +
"\n" +
+ "\n" +
+ _("This is experimental software.") + "\n" +
_("Distributed under the MIT software license, see the accompanying file COPYING or <http://www.opensource.org/licenses/mit-license.php>.") + "\n" +
"\n" +
_("This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit <https://www.openssl.org/> and cryptographic software written by Eric Young and UPnP software written by Thomas Bernard.") +
@@ -950,7 +959,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
SetMockTime(GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op
if (GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS))
- nLocalServices |= NODE_BLOOM;
+ nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM);
nMaxTipAge = GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE);
@@ -1361,7 +1370,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
// after any wallet rescanning has taken place.
if (fPruneMode) {
LogPrintf("Unsetting NODE_NETWORK on prune mode\n");
- nLocalServices &= ~NODE_NETWORK;
+ nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK);
if (!fReindex) {
uiInterface.InitMessage(_("Pruning blockstore..."));
PruneAndFlush();
diff --git a/src/main.cpp b/src/main.cpp
index d4ab32744f..361526f337 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -3501,8 +3501,9 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
}
/** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */
-static bool AcceptBlock(const CBlock& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp)
+static bool AcceptBlock(const CBlock& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock)
{
+ if (fNewBlock) *fNewBlock = false;
AssertLockHeld(cs_main);
CBlockIndex *pindexDummy = NULL;
@@ -3531,6 +3532,7 @@ static bool AcceptBlock(const CBlock& block, CValidationState& state, const CCha
if (!fHasMoreWork) return true; // Don't process less-work chains
if (fTooFarAhead) return true; // Block height is too high
}
+ if (fNewBlock) *fNewBlock = true;
if ((!CheckBlock(block, state, chainparams.GetConsensus(), GetAdjustedTime())) || !ContextualCheckBlock(block, state, pindex->pprev)) {
if (state.IsInvalid() && !state.CorruptionPossible()) {
@@ -3578,7 +3580,7 @@ static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned
}
-bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, const CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp)
+bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp)
{
{
LOCK(cs_main);
@@ -3587,9 +3589,11 @@ bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, c
// Store to disk
CBlockIndex *pindex = NULL;
- bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fRequested, dbp);
+ bool fNewBlock = false;
+ bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fRequested, dbp, &fNewBlock);
if (pindex && pfrom) {
mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId();
+ if (fNewBlock) pfrom->nLastBlockTime = GetTime();
}
CheckBlockIndex(chainparams.GetConsensus());
if (!ret)
@@ -4159,7 +4163,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) {
LOCK(cs_main);
CValidationState state;
- if (AcceptBlock(block, state, chainparams, NULL, true, dbp))
+ if (AcceptBlock(block, state, chainparams, NULL, true, dbp, NULL))
nLoaded++;
if (state.IsError())
break;
@@ -4192,7 +4196,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
head.ToString());
LOCK(cs_main);
CValidationState dummy;
- if (AcceptBlock(block, dummy, chainparams, NULL, true, &it->second))
+ if (AcceptBlock(block, dummy, chainparams, NULL, true, &it->second, NULL))
{
nLoaded++;
queue.push_back(block.GetHash());
@@ -4663,7 +4667,22 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
CAddress addrMe;
CAddress addrFrom;
uint64_t nNonce = 1;
- vRecv >> pfrom->nVersion >> pfrom->nServices >> nTime >> addrMe;
+ uint64_t nServiceInt;
+ vRecv >> pfrom->nVersion >> nServiceInt >> nTime >> addrMe;
+ pfrom->nServices = ServiceFlags(nServiceInt);
+ if (!pfrom->fInbound)
+ {
+ addrman.SetServices(pfrom->addr, pfrom->nServices);
+ }
+ if (pfrom->nServicesExpected & ~pfrom->nServices)
+ {
+ LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, pfrom->nServices, pfrom->nServicesExpected);
+ pfrom->PushMessage(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
+ strprintf("Expected to offer services %08x", pfrom->nServicesExpected));
+ pfrom->fDisconnect = true;
+ return false;
+ }
+
if (pfrom->nVersion < MIN_PEER_PROTO_VERSION)
{
// disconnect from peers older than this proto version
@@ -4824,6 +4843,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
{
boost::this_thread::interruption_point();
+ if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES)
+ continue;
+
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
addr.nTime = nNow - 5 * 24 * 60 * 60;
pfrom->AddAddressKnown(addr);
@@ -5094,6 +5116,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
vWorkQueue.emplace_back(inv.hash, i);
}
+ pfrom->nLastTXTime = GetTime();
+
LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
pfrom->id,
tx.GetHash().ToString(),
@@ -5681,6 +5705,11 @@ bool ProcessMessages(CNode* pfrom)
// Allow exceptions from over-long size
LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
}
+ else if (strstr(e.what(), "non-canonical ReadCompactSize()"))
+ {
+ // Allow exceptions from non-canonical encoding
+ LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
+ }
else
{
PrintExceptionContinue(&e, "ProcessMessages()");
diff --git a/src/main.h b/src/main.h
index 191aafe043..a39ffbf56d 100644
--- a/src/main.h
+++ b/src/main.h
@@ -219,7 +219,7 @@ void UnregisterNodeSignals(CNodeSignals& nodeSignals);
* @param[out] dbp The already known disk position of pblock, or NULL if not yet stored.
* @return True if state.IsValid()
*/
-bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, const CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp);
+bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp);
/** Check whether enough disk space is available for an incoming block */
bool CheckDiskSpace(uint64_t nAdditionalBytes = 0);
/** Open a block file (blk?????.dat) */
diff --git a/src/miner.cpp b/src/miner.cpp
index eaf29a767b..989ad11a26 100644
--- a/src/miner.cpp
+++ b/src/miner.cpp
@@ -25,6 +25,7 @@
#include "utilmoneystr.h"
#include "validationinterface.h"
+#include <algorithm>
#include <boost/thread.hpp>
#include <boost/tuple/tuple.hpp>
#include <queue>
@@ -71,231 +72,507 @@ int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParam
return nNewTime - nOldTime;
}
-CBlockTemplate* CreateNewBlock(const CChainParams& chainparams, const CScript& scriptPubKeyIn)
+BlockAssembler::BlockAssembler(const CChainParams& _chainparams)
+ : chainparams(_chainparams)
{
- // Create new block
- std::unique_ptr<CBlockTemplate> pblocktemplate(new CBlockTemplate());
+ // Largest block you're willing to create:
+ nBlockMaxSize = GetArg("-blockmaxsize", DEFAULT_BLOCK_MAX_SIZE);
+ // Limit to between 1K and MAX_BLOCK_SIZE-1K for sanity:
+ nBlockMaxSize = std::max((unsigned int)1000, std::min((unsigned int)(MAX_BLOCK_SIZE-1000), nBlockMaxSize));
+
+ // Minimum block size you want to create; block will be filled with free transactions
+ // until there are no more or the block reaches this size:
+ nBlockMinSize = GetArg("-blockminsize", DEFAULT_BLOCK_MIN_SIZE);
+ nBlockMinSize = std::min(nBlockMaxSize, nBlockMinSize);
+}
+
+void BlockAssembler::resetBlock()
+{
+ inBlock.clear();
+
+ // Reserve space for coinbase tx
+ nBlockSize = 1000;
+ nBlockSigOps = 100;
+
+ // These counters do not include coinbase tx
+ nBlockTx = 0;
+ nFees = 0;
+
+ lastFewTxs = 0;
+ blockFinished = false;
+}
+
+CBlockTemplate* BlockAssembler::CreateNewBlock(const CScript& scriptPubKeyIn)
+{
+ resetBlock();
+
+ pblocktemplate.reset(new CBlockTemplate());
+
if(!pblocktemplate.get())
return NULL;
- CBlock *pblock = &pblocktemplate->block; // pointer for convenience
-
- // Create coinbase tx
- CMutableTransaction txNew;
- txNew.vin.resize(1);
- txNew.vin[0].prevout.SetNull();
- txNew.vout.resize(1);
- txNew.vout[0].scriptPubKey = scriptPubKeyIn;
+ pblock = &pblocktemplate->block; // pointer for convenience
// Add dummy coinbase tx as first transaction
pblock->vtx.push_back(CTransaction());
pblocktemplate->vTxFees.push_back(-1); // updated at end
pblocktemplate->vTxSigOps.push_back(-1); // updated at end
- // Largest block you're willing to create:
- unsigned int nBlockMaxSize = GetArg("-blockmaxsize", DEFAULT_BLOCK_MAX_SIZE);
- // Limit to between 1K and MAX_BLOCK_SIZE-1K for sanity:
- nBlockMaxSize = std::max((unsigned int)1000, std::min((unsigned int)(MAX_BLOCK_SIZE-1000), nBlockMaxSize));
+ LOCK2(cs_main, mempool.cs);
+ CBlockIndex* pindexPrev = chainActive.Tip();
+ nHeight = pindexPrev->nHeight + 1;
+
+ pblock->nVersion = ComputeBlockVersion(pindexPrev, chainparams.GetConsensus());
+ // -regtest only: allow overriding block.nVersion with
+ // -blockversion=N to test forking scenarios
+ if (chainparams.MineBlocksOnDemand())
+ pblock->nVersion = GetArg("-blockversion", pblock->nVersion);
+
+ pblock->nTime = GetAdjustedTime();
+ const int64_t nMedianTimePast = pindexPrev->GetMedianTimePast();
+
+ nLockTimeCutoff = (STANDARD_LOCKTIME_VERIFY_FLAGS & LOCKTIME_MEDIAN_TIME_PAST)
+ ? nMedianTimePast
+ : pblock->GetBlockTime();
+
+ addPriorityTxs();
+ addPackageTxs();
+
+ nLastBlockTx = nBlockTx;
+ nLastBlockSize = nBlockSize;
+ LogPrintf("CreateNewBlock(): total size %u txs: %u fees: %ld sigops %d\n", nBlockSize, nBlockTx, nFees, nBlockSigOps);
+
+ // Create coinbase transaction.
+ CMutableTransaction coinbaseTx;
+ coinbaseTx.vin.resize(1);
+ coinbaseTx.vin[0].prevout.SetNull();
+ coinbaseTx.vout.resize(1);
+ coinbaseTx.vout[0].scriptPubKey = scriptPubKeyIn;
+ coinbaseTx.vout[0].nValue = nFees + GetBlockSubsidy(nHeight, chainparams.GetConsensus());
+ coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0;
+ pblock->vtx[0] = coinbaseTx;
+ pblocktemplate->vTxFees[0] = -nFees;
+
+ // Fill in header
+ pblock->hashPrevBlock = pindexPrev->GetBlockHash();
+ UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev);
+ pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus());
+ pblock->nNonce = 0;
+ pblocktemplate->vTxSigOps[0] = GetLegacySigOpCount(pblock->vtx[0]);
+
+ CValidationState state;
+ if (!TestBlockValidity(state, chainparams, *pblock, pindexPrev, false, false)) {
+ throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, FormatStateMessage(state)));
+ }
- // How much of the block should be dedicated to high-priority transactions,
- // included regardless of the fees they pay
- unsigned int nBlockPrioritySize = GetArg("-blockprioritysize", DEFAULT_BLOCK_PRIORITY_SIZE);
- nBlockPrioritySize = std::min(nBlockMaxSize, nBlockPrioritySize);
+ return pblocktemplate.release();
+}
- // Minimum block size you want to create; block will be filled with free transactions
- // until there are no more or the block reaches this size:
- unsigned int nBlockMinSize = GetArg("-blockminsize", DEFAULT_BLOCK_MIN_SIZE);
- nBlockMinSize = std::min(nBlockMaxSize, nBlockMinSize);
+bool BlockAssembler::isStillDependent(CTxMemPool::txiter iter)
+{
+ BOOST_FOREACH(CTxMemPool::txiter parent, mempool.GetMemPoolParents(iter))
+ {
+ if (!inBlock.count(parent)) {
+ return true;
+ }
+ }
+ return false;
+}
- // Collect memory pool transactions into the block
- CTxMemPool::setEntries inBlock;
- CTxMemPool::setEntries waitSet;
+void BlockAssembler::onlyUnconfirmed(CTxMemPool::setEntries& testSet)
+{
+ for (CTxMemPool::setEntries::iterator iit = testSet.begin(); iit != testSet.end(); ) {
+ // Only test txs not already in the block
+ if (inBlock.count(*iit)) {
+ testSet.erase(iit++);
+ }
+ else {
+ iit++;
+ }
+ }
+}
- // This vector will be sorted into a priority queue:
- vector<TxCoinAgePriority> vecPriority;
- TxCoinAgePriorityCompare pricomparer;
- std::map<CTxMemPool::txiter, double, CTxMemPool::CompareIteratorByHash> waitPriMap;
- typedef std::map<CTxMemPool::txiter, double, CTxMemPool::CompareIteratorByHash>::iterator waitPriIter;
- double actualPriority = -1;
+bool BlockAssembler::TestPackage(uint64_t packageSize, unsigned int packageSigOps)
+{
+ if (nBlockSize + packageSize >= nBlockMaxSize)
+ return false;
+ if (nBlockSigOps + packageSigOps >= MAX_BLOCK_SIGOPS)
+ return false;
+ return true;
+}
+
+// Block size and sigops have already been tested. Check that all transactions
+// are final.
+bool BlockAssembler::TestPackageFinality(const CTxMemPool::setEntries& package)
+{
+ BOOST_FOREACH (const CTxMemPool::txiter it, package) {
+ if (!IsFinalTx(it->GetTx(), nHeight, nLockTimeCutoff))
+ return false;
+ }
+ return true;
+}
+
+bool BlockAssembler::TestForBlock(CTxMemPool::txiter iter)
+{
+ if (nBlockSize + iter->GetTxSize() >= nBlockMaxSize) {
+ // If the block is so close to full that no more txs will fit
+ // or if we've tried more than 50 times to fill remaining space
+ // then flag that the block is finished
+ if (nBlockSize > nBlockMaxSize - 100 || lastFewTxs > 50) {
+ blockFinished = true;
+ return false;
+ }
+ // Once we're within 1000 bytes of a full block, only look at 50 more txs
+ // to try to fill the remaining space.
+ if (nBlockSize > nBlockMaxSize - 1000) {
+ lastFewTxs++;
+ }
+ return false;
+ }
+
+ if (nBlockSigOps + iter->GetSigOpCount() >= MAX_BLOCK_SIGOPS) {
+ // If the block has room for no more sig ops then
+ // flag that the block is finished
+ if (nBlockSigOps > MAX_BLOCK_SIGOPS - 2) {
+ blockFinished = true;
+ return false;
+ }
+ // Otherwise attempt to find another tx with fewer sigops
+ // to put in the block.
+ return false;
+ }
+
+ // Must check that lock times are still valid
+ // This can be removed once MTP is always enforced
+ // as long as reorgs keep the mempool consistent.
+ if (!IsFinalTx(iter->GetTx(), nHeight, nLockTimeCutoff))
+ return false;
+
+ return true;
+}
+
+void BlockAssembler::AddToBlock(CTxMemPool::txiter iter)
+{
+ pblock->vtx.push_back(iter->GetTx());
+ pblocktemplate->vTxFees.push_back(iter->GetFee());
+ pblocktemplate->vTxSigOps.push_back(iter->GetSigOpCount());
+ nBlockSize += iter->GetTxSize();
+ ++nBlockTx;
+ nBlockSigOps += iter->GetSigOpCount();
+ nFees += iter->GetFee();
+ inBlock.insert(iter);
- std::priority_queue<CTxMemPool::txiter, std::vector<CTxMemPool::txiter>, ScoreCompare> clearedTxs;
bool fPrintPriority = GetBoolArg("-printpriority", DEFAULT_PRINTPRIORITY);
- uint64_t nBlockSize = 1000;
- uint64_t nBlockTx = 0;
- unsigned int nBlockSigOps = 100;
- int lastFewTxs = 0;
- CAmount nFees = 0;
+ if (fPrintPriority) {
+ double dPriority = iter->GetPriority(nHeight);
+ CAmount dummy;
+ mempool.ApplyDeltas(iter->GetTx().GetHash(), dPriority, dummy);
+ LogPrintf("priority %.1f fee %s txid %s\n",
+ dPriority,
+ CFeeRate(iter->GetModifiedFee(), iter->GetTxSize()).ToString(),
+ iter->GetTx().GetHash().ToString());
+ }
+}
+void BlockAssembler::addScoreTxs()
+{
+ std::priority_queue<CTxMemPool::txiter, std::vector<CTxMemPool::txiter>, ScoreCompare> clearedTxs;
+ CTxMemPool::setEntries waitSet;
+ CTxMemPool::indexed_transaction_set::index<mining_score>::type::iterator mi = mempool.mapTx.get<mining_score>().begin();
+ CTxMemPool::txiter iter;
+ while (!blockFinished && (mi != mempool.mapTx.get<mining_score>().end() || !clearedTxs.empty()))
{
- LOCK2(cs_main, mempool.cs);
- CBlockIndex* pindexPrev = chainActive.Tip();
- const int nHeight = pindexPrev->nHeight + 1;
- pblock->nTime = GetAdjustedTime();
- const int64_t nMedianTimePast = pindexPrev->GetMedianTimePast();
-
- pblock->nVersion = ComputeBlockVersion(pindexPrev, chainparams.GetConsensus());
- // -regtest only: allow overriding block.nVersion with
- // -blockversion=N to test forking scenarios
- if (chainparams.MineBlocksOnDemand())
- pblock->nVersion = GetArg("-blockversion", pblock->nVersion);
-
- int64_t nLockTimeCutoff = (STANDARD_LOCKTIME_VERIFY_FLAGS & LOCKTIME_MEDIAN_TIME_PAST)
- ? nMedianTimePast
- : pblock->GetBlockTime();
-
- bool fPriorityBlock = nBlockPrioritySize > 0;
- if (fPriorityBlock) {
- vecPriority.reserve(mempool.mapTx.size());
- for (CTxMemPool::indexed_transaction_set::iterator mi = mempool.mapTx.begin();
- mi != mempool.mapTx.end(); ++mi)
- {
- double dPriority = mi->GetPriority(nHeight);
- CAmount dummy;
- mempool.ApplyDeltas(mi->GetTx().GetHash(), dPriority, dummy);
- vecPriority.push_back(TxCoinAgePriority(dPriority, mi));
- }
- std::make_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
+ // If no txs that were previously postponed are available to try
+ // again, then try the next highest score tx
+ if (clearedTxs.empty()) {
+ iter = mempool.mapTx.project<0>(mi);
+ mi++;
+ }
+ // If a previously postponed tx is available to try again, then it
+ // has higher score than all untried so far txs
+ else {
+ iter = clearedTxs.top();
+ clearedTxs.pop();
}
- CTxMemPool::indexed_transaction_set::index<mining_score>::type::iterator mi = mempool.mapTx.get<mining_score>().begin();
- CTxMemPool::txiter iter;
-
- while (mi != mempool.mapTx.get<mining_score>().end() || !clearedTxs.empty())
- {
- bool priorityTx = false;
- if (fPriorityBlock && !vecPriority.empty()) { // add a tx from priority queue to fill the blockprioritysize
- priorityTx = true;
- iter = vecPriority.front().second;
- actualPriority = vecPriority.front().first;
- std::pop_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
- vecPriority.pop_back();
- }
- else if (clearedTxs.empty()) { // add tx with next highest score
- iter = mempool.mapTx.project<0>(mi);
- mi++;
- }
- else { // try to add a previously postponed child tx
- iter = clearedTxs.top();
- clearedTxs.pop();
- }
+ // If tx already in block, skip (added by addPriorityTxs)
+ if (inBlock.count(iter)) {
+ continue;
+ }
+
+ // If tx is dependent on other mempool txs which haven't yet been included
+ // then put it in the waitSet
+ if (isStillDependent(iter)) {
+ waitSet.insert(iter);
+ continue;
+ }
- if (inBlock.count(iter))
- continue; // could have been added to the priorityBlock
+ // If the fee rate is below the min fee rate for mining, then we're done
+ // adding txs based on score (fee rate)
+ if (iter->GetModifiedFee() < ::minRelayTxFee.GetFee(iter->GetTxSize()) && nBlockSize >= nBlockMinSize) {
+ return;
+ }
- const CTransaction& tx = iter->GetTx();
+ // If this tx fits in the block add it, otherwise keep looping
+ if (TestForBlock(iter)) {
+ AddToBlock(iter);
- bool fOrphan = false;
- BOOST_FOREACH(CTxMemPool::txiter parent, mempool.GetMemPoolParents(iter))
+ // This tx was successfully added, so
+ // add transactions that depend on this one to the priority queue to try again
+ BOOST_FOREACH(CTxMemPool::txiter child, mempool.GetMemPoolChildren(iter))
{
- if (!inBlock.count(parent)) {
- fOrphan = true;
- break;
+ if (waitSet.count(child)) {
+ clearedTxs.push(child);
+ waitSet.erase(child);
}
}
- if (fOrphan) {
- if (priorityTx)
- waitPriMap.insert(std::make_pair(iter,actualPriority));
- else
- waitSet.insert(iter);
+ }
+ }
+}
+
+void BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded,
+ indexed_modified_transaction_set &mapModifiedTx)
+{
+ BOOST_FOREACH(const CTxMemPool::txiter it, alreadyAdded) {
+ CTxMemPool::setEntries descendants;
+ mempool.CalculateDescendants(it, descendants);
+ // Insert all descendants (not yet in block) into the modified set
+ BOOST_FOREACH(CTxMemPool::txiter desc, descendants) {
+ if (alreadyAdded.count(desc))
continue;
+ modtxiter mit = mapModifiedTx.find(desc);
+ if (mit == mapModifiedTx.end()) {
+ CTxMemPoolModifiedEntry modEntry(desc);
+ modEntry.nSizeWithAncestors -= it->GetTxSize();
+ modEntry.nModFeesWithAncestors -= it->GetModifiedFee();
+ modEntry.nSigOpCountWithAncestors -= it->GetSigOpCount();
+ mapModifiedTx.insert(modEntry);
+ } else {
+ mapModifiedTx.modify(mit, update_for_parent_inclusion(it));
}
+ }
+ }
+}
- unsigned int nTxSize = iter->GetTxSize();
- if (fPriorityBlock &&
- (nBlockSize + nTxSize >= nBlockPrioritySize || !AllowFree(actualPriority))) {
- fPriorityBlock = false;
- waitPriMap.clear();
- }
- if (!priorityTx &&
- (iter->GetModifiedFee() < ::minRelayTxFee.GetFee(nTxSize) && nBlockSize >= nBlockMinSize)) {
- break;
- }
- if (nBlockSize + nTxSize >= nBlockMaxSize) {
- if (nBlockSize > nBlockMaxSize - 100 || lastFewTxs > 50) {
- break;
- }
- // Once we're within 1000 bytes of a full block, only look at 50 more txs
- // to try to fill the remaining space.
- if (nBlockSize > nBlockMaxSize - 1000) {
- lastFewTxs++;
- }
- continue;
+// Skip entries in mapTx that are already in a block or are present
+// in mapModifiedTx (which implies that the mapTx ancestor state is
+// stale due to ancestor inclusion in the block)
+// Also skip transactions that we've already failed to add. This can happen if
+// we consider a transaction in mapModifiedTx and it fails: we can then
+// potentially consider it again while walking mapTx. It's currently
+// guaranteed to fail again, but as a belt-and-suspenders check we put it in
+// failedTx and avoid re-evaluation, since the re-evaluation would be using
+// cached size/sigops/fee values that are not actually correct.
+bool BlockAssembler::SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_transaction_set &mapModifiedTx, CTxMemPool::setEntries &failedTx)
+{
+ assert (it != mempool.mapTx.end());
+ if (mapModifiedTx.count(it) || inBlock.count(it) || failedTx.count(it))
+ return true;
+ return false;
+}
+
+void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, CTxMemPool::txiter entry, std::vector<CTxMemPool::txiter>& sortedEntries)
+{
+ // Sort package by ancestor count
+ // If a transaction A depends on transaction B, then A's ancestor count
+ // must be greater than B's. So this is sufficient to validly order the
+ // transactions for block inclusion.
+ sortedEntries.clear();
+ sortedEntries.insert(sortedEntries.begin(), package.begin(), package.end());
+ std::sort(sortedEntries.begin(), sortedEntries.end(), CompareTxIterByAncestorCount());
+}
+
+// This transaction selection algorithm orders the mempool based
+// on feerate of a transaction including all unconfirmed ancestors.
+// Since we don't remove transactions from the mempool as we select them
+// for block inclusion, we need an alternate method of updating the feerate
+// of a transaction with its not-yet-selected ancestors as we go.
+// This is accomplished by walking the in-mempool descendants of selected
+// transactions and storing a temporary modified state in mapModifiedTxs.
+// Each time through the loop, we compare the best transaction in
+// mapModifiedTxs with the next transaction in the mempool to decide what
+// transaction package to work on next.
+void BlockAssembler::addPackageTxs()
+{
+ // mapModifiedTx will store sorted packages after they are modified
+ // because some of their txs are already in the block
+ indexed_modified_transaction_set mapModifiedTx;
+ // Keep track of entries that failed inclusion, to avoid duplicate work
+ CTxMemPool::setEntries failedTx;
+
+ // Start by adding all descendants of previously added txs to mapModifiedTx
+ // and modifying them for their already included ancestors
+ UpdatePackagesForAdded(inBlock, mapModifiedTx);
+
+ CTxMemPool::indexed_transaction_set::index<ancestor_score>::type::iterator mi = mempool.mapTx.get<ancestor_score>().begin();
+ CTxMemPool::txiter iter;
+ while (mi != mempool.mapTx.get<ancestor_score>().end() || !mapModifiedTx.empty())
+ {
+ // First try to find a new transaction in mapTx to evaluate.
+ if (mi != mempool.mapTx.get<ancestor_score>().end() &&
+ SkipMapTxEntry(mempool.mapTx.project<0>(mi), mapModifiedTx, failedTx)) {
+ ++mi;
+ continue;
+ }
+
+ // Now that mi is not stale, determine which transaction to evaluate:
+ // the next entry from mapTx, or the best from mapModifiedTx?
+ bool fUsingModified = false;
+
+ modtxscoreiter modit = mapModifiedTx.get<ancestor_score>().begin();
+ if (mi == mempool.mapTx.get<ancestor_score>().end()) {
+ // We're out of entries in mapTx; use the entry from mapModifiedTx
+ iter = modit->iter;
+ fUsingModified = true;
+ } else {
+ // Try to compare the mapTx entry to the mapModifiedTx entry
+ iter = mempool.mapTx.project<0>(mi);
+ if (modit != mapModifiedTx.get<ancestor_score>().end() &&
+ CompareModifiedEntry()(*modit, CTxMemPoolModifiedEntry(iter))) {
+ // The best entry in mapModifiedTx has higher score
+ // than the one from mapTx.
+ // Switch which transaction (package) to consider
+ iter = modit->iter;
+ fUsingModified = true;
+ } else {
+ // Either no entry in mapModifiedTx, or it's worse than mapTx.
+ // Increment mi for the next loop iteration.
+ ++mi;
}
+ }
- if (!IsFinalTx(tx, nHeight, nLockTimeCutoff))
- continue;
+ // We skip mapTx entries that are inBlock, and mapModifiedTx shouldn't
+ // contain anything that is inBlock.
+ assert(!inBlock.count(iter));
+
+ uint64_t packageSize = iter->GetSizeWithAncestors();
+ CAmount packageFees = iter->GetModFeesWithAncestors();
+ unsigned int packageSigOps = iter->GetSigOpCountWithAncestors();
+ if (fUsingModified) {
+ packageSize = modit->nSizeWithAncestors;
+ packageFees = modit->nModFeesWithAncestors;
+ packageSigOps = modit->nSigOpCountWithAncestors;
+ }
- unsigned int nTxSigOps = iter->GetSigOpCount();
- if (nBlockSigOps + nTxSigOps >= MAX_BLOCK_SIGOPS) {
- if (nBlockSigOps > MAX_BLOCK_SIGOPS - 2) {
- break;
- }
- continue;
+ if (packageFees < ::minRelayTxFee.GetFee(packageSize) && nBlockSize >= nBlockMinSize) {
+ // Everything else we might consider has a lower fee rate
+ return;
+ }
+
+ if (!TestPackage(packageSize, packageSigOps)) {
+ if (fUsingModified) {
+ // Since we always look at the best entry in mapModifiedTx,
+ // we must erase failed entries so that we can consider the
+ // next best entry on the next loop iteration
+ mapModifiedTx.get<ancestor_score>().erase(modit);
+ failedTx.insert(iter);
}
+ continue;
+ }
- CAmount nTxFees = iter->GetFee();
- // Added
- pblock->vtx.push_back(tx);
- pblocktemplate->vTxFees.push_back(nTxFees);
- pblocktemplate->vTxSigOps.push_back(nTxSigOps);
- nBlockSize += nTxSize;
- ++nBlockTx;
- nBlockSigOps += nTxSigOps;
- nFees += nTxFees;
-
- if (fPrintPriority)
- {
- double dPriority = iter->GetPriority(nHeight);
- CAmount dummy;
- mempool.ApplyDeltas(tx.GetHash(), dPriority, dummy);
- LogPrintf("priority %.1f fee %s txid %s\n",
- dPriority , CFeeRate(iter->GetModifiedFee(), nTxSize).ToString(), tx.GetHash().ToString());
+ CTxMemPool::setEntries ancestors;
+ uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
+ std::string dummy;
+ mempool.CalculateMemPoolAncestors(*iter, ancestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false);
+
+ onlyUnconfirmed(ancestors);
+ ancestors.insert(iter);
+
+ // Test if all tx's are Final
+ if (!TestPackageFinality(ancestors)) {
+ if (fUsingModified) {
+ mapModifiedTx.get<ancestor_score>().erase(modit);
+ failedTx.insert(iter);
}
+ continue;
+ }
+
+ // Package can be added. Sort the entries in a valid order.
+ vector<CTxMemPool::txiter> sortedEntries;
+ SortForBlock(ancestors, iter, sortedEntries);
+
+ for (size_t i=0; i<sortedEntries.size(); ++i) {
+ AddToBlock(sortedEntries[i]);
+ // Erase from the modified set, if present
+ mapModifiedTx.erase(sortedEntries[i]);
+ }
+
+ // Update transactions that depend on each of these
+ UpdatePackagesForAdded(ancestors, mapModifiedTx);
+ }
+}
+
+void BlockAssembler::addPriorityTxs()
+{
+ // How much of the block should be dedicated to high-priority transactions,
+ // included regardless of the fees they pay
+ unsigned int nBlockPrioritySize = GetArg("-blockprioritysize", DEFAULT_BLOCK_PRIORITY_SIZE);
+ nBlockPrioritySize = std::min(nBlockMaxSize, nBlockPrioritySize);
+
+ if (nBlockPrioritySize == 0) {
+ return;
+ }
- inBlock.insert(iter);
+ // This vector will be sorted into a priority queue:
+ vector<TxCoinAgePriority> vecPriority;
+ TxCoinAgePriorityCompare pricomparer;
+ std::map<CTxMemPool::txiter, double, CTxMemPool::CompareIteratorByHash> waitPriMap;
+ typedef std::map<CTxMemPool::txiter, double, CTxMemPool::CompareIteratorByHash>::iterator waitPriIter;
+ double actualPriority = -1;
+
+ vecPriority.reserve(mempool.mapTx.size());
+ for (CTxMemPool::indexed_transaction_set::iterator mi = mempool.mapTx.begin();
+ mi != mempool.mapTx.end(); ++mi)
+ {
+ double dPriority = mi->GetPriority(nHeight);
+ CAmount dummy;
+ mempool.ApplyDeltas(mi->GetTx().GetHash(), dPriority, dummy);
+ vecPriority.push_back(TxCoinAgePriority(dPriority, mi));
+ }
+ std::make_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
+
+ CTxMemPool::txiter iter;
+ while (!vecPriority.empty() && !blockFinished) { // add a tx from priority queue to fill the blockprioritysize
+ iter = vecPriority.front().second;
+ actualPriority = vecPriority.front().first;
+ std::pop_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
+ vecPriority.pop_back();
+
+ // If tx already in block, skip
+ if (inBlock.count(iter)) {
+ assert(false); // shouldn't happen for priority txs
+ continue;
+ }
- // Add transactions that depend on this one to the priority queue
+ // If tx is dependent on other mempool txs which haven't yet been included
+ // then put it in the waitSet
+ if (isStillDependent(iter)) {
+ waitPriMap.insert(std::make_pair(iter, actualPriority));
+ continue;
+ }
+
+ // If this tx fits in the block add it, otherwise keep looping
+ if (TestForBlock(iter)) {
+ AddToBlock(iter);
+
+ // If now that this txs is added we've surpassed our desired priority size
+ // or have dropped below the AllowFreeThreshold, then we're done adding priority txs
+ if (nBlockSize >= nBlockPrioritySize || !AllowFree(actualPriority)) {
+ return;
+ }
+
+ // This tx was successfully added, so
+ // add transactions that depend on this one to the priority queue to try again
BOOST_FOREACH(CTxMemPool::txiter child, mempool.GetMemPoolChildren(iter))
{
- if (fPriorityBlock) {
- waitPriIter wpiter = waitPriMap.find(child);
- if (wpiter != waitPriMap.end()) {
- vecPriority.push_back(TxCoinAgePriority(wpiter->second,child));
- std::push_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
- waitPriMap.erase(wpiter);
- }
- }
- else {
- if (waitSet.count(child)) {
- clearedTxs.push(child);
- waitSet.erase(child);
- }
+ waitPriIter wpiter = waitPriMap.find(child);
+ if (wpiter != waitPriMap.end()) {
+ vecPriority.push_back(TxCoinAgePriority(wpiter->second,child));
+ std::push_heap(vecPriority.begin(), vecPriority.end(), pricomparer);
+ waitPriMap.erase(wpiter);
}
}
}
- nLastBlockTx = nBlockTx;
- nLastBlockSize = nBlockSize;
- LogPrintf("CreateNewBlock(): total size %u txs: %u fees: %ld sigops %d\n", nBlockSize, nBlockTx, nFees, nBlockSigOps);
-
- // Compute final coinbase transaction.
- txNew.vout[0].nValue = nFees + GetBlockSubsidy(nHeight, chainparams.GetConsensus());
- txNew.vin[0].scriptSig = CScript() << nHeight << OP_0;
- pblock->vtx[0] = txNew;
- pblocktemplate->vTxFees[0] = -nFees;
-
- // Fill in header
- pblock->hashPrevBlock = pindexPrev->GetBlockHash();
- UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev);
- pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus());
- pblock->nNonce = 0;
- pblocktemplate->vTxSigOps[0] = GetLegacySigOpCount(pblock->vtx[0]);
-
- CValidationState state;
- if (!TestBlockValidity(state, chainparams, *pblock, pindexPrev, false, false)) {
- throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, FormatStateMessage(state)));
- }
}
-
- return pblocktemplate.release();
}
void IncrementExtraNonce(CBlock* pblock, const CBlockIndex* pindexPrev, unsigned int& nExtraNonce)
diff --git a/src/miner.h b/src/miner.h
index cd0f136625..a9fea85304 100644
--- a/src/miner.h
+++ b/src/miner.h
@@ -7,14 +7,19 @@
#define BITCOIN_MINER_H
#include "primitives/block.h"
+#include "txmempool.h"
#include <stdint.h>
+#include <memory>
+#include "boost/multi_index_container.hpp"
+#include "boost/multi_index/ordered_index.hpp"
class CBlockIndex;
class CChainParams;
class CReserveKey;
class CScript;
class CWallet;
+
namespace Consensus { struct Params; };
static const bool DEFAULT_PRINTPRIORITY = false;
@@ -26,8 +31,175 @@ struct CBlockTemplate
std::vector<int64_t> vTxSigOps;
};
+// Container for tracking updates to ancestor feerate as we include (parent)
+// transactions in a block
+struct CTxMemPoolModifiedEntry {
+ CTxMemPoolModifiedEntry(CTxMemPool::txiter entry)
+ {
+ iter = entry;
+ nSizeWithAncestors = entry->GetSizeWithAncestors();
+ nModFeesWithAncestors = entry->GetModFeesWithAncestors();
+ nSigOpCountWithAncestors = entry->GetSigOpCountWithAncestors();
+ }
+
+ CTxMemPool::txiter iter;
+ uint64_t nSizeWithAncestors;
+ CAmount nModFeesWithAncestors;
+ unsigned int nSigOpCountWithAncestors;
+};
+
+/** Comparator for CTxMemPool::txiter objects.
+ * It simply compares the internal memory address of the CTxMemPoolEntry object
+ * pointed to. This means it has no meaning, and is only useful for using them
+ * as key in other indexes.
+ */
+struct CompareCTxMemPoolIter {
+ bool operator()(const CTxMemPool::txiter& a, const CTxMemPool::txiter& b) const
+ {
+ return &(*a) < &(*b);
+ }
+};
+
+struct modifiedentry_iter {
+ typedef CTxMemPool::txiter result_type;
+ result_type operator() (const CTxMemPoolModifiedEntry &entry) const
+ {
+ return entry.iter;
+ }
+};
+
+// This matches the calculation in CompareTxMemPoolEntryByAncestorFee,
+// except operating on CTxMemPoolModifiedEntry.
+// TODO: refactor to avoid duplication of this logic.
+struct CompareModifiedEntry {
+ bool operator()(const CTxMemPoolModifiedEntry &a, const CTxMemPoolModifiedEntry &b)
+ {
+ double f1 = (double)a.nModFeesWithAncestors * b.nSizeWithAncestors;
+ double f2 = (double)b.nModFeesWithAncestors * a.nSizeWithAncestors;
+ if (f1 == f2) {
+ return CTxMemPool::CompareIteratorByHash()(a.iter, b.iter);
+ }
+ return f1 > f2;
+ }
+};
+
+// A comparator that sorts transactions based on number of ancestors.
+// This is sufficient to sort an ancestor package in an order that is valid
+// to appear in a block.
+struct CompareTxIterByAncestorCount {
+ bool operator()(const CTxMemPool::txiter &a, const CTxMemPool::txiter &b)
+ {
+ if (a->GetCountWithAncestors() != b->GetCountWithAncestors())
+ return a->GetCountWithAncestors() < b->GetCountWithAncestors();
+ return CTxMemPool::CompareIteratorByHash()(a, b);
+ }
+};
+
+typedef boost::multi_index_container<
+ CTxMemPoolModifiedEntry,
+ boost::multi_index::indexed_by<
+ boost::multi_index::ordered_unique<
+ modifiedentry_iter,
+ CompareCTxMemPoolIter
+ >,
+ // sorted by modified ancestor fee rate
+ boost::multi_index::ordered_non_unique<
+ // Reuse same tag from CTxMemPool's similar index
+ boost::multi_index::tag<ancestor_score>,
+ boost::multi_index::identity<CTxMemPoolModifiedEntry>,
+ CompareModifiedEntry
+ >
+ >
+> indexed_modified_transaction_set;
+
+typedef indexed_modified_transaction_set::nth_index<0>::type::iterator modtxiter;
+typedef indexed_modified_transaction_set::index<ancestor_score>::type::iterator modtxscoreiter;
+
+struct update_for_parent_inclusion
+{
+ update_for_parent_inclusion(CTxMemPool::txiter it) : iter(it) {}
+
+ void operator() (CTxMemPoolModifiedEntry &e)
+ {
+ e.nModFeesWithAncestors -= iter->GetFee();
+ e.nSizeWithAncestors -= iter->GetTxSize();
+ e.nSigOpCountWithAncestors -= iter->GetSigOpCount();
+ }
+
+ CTxMemPool::txiter iter;
+};
+
/** Generate a new block, without valid proof-of-work */
-CBlockTemplate* CreateNewBlock(const CChainParams& chainparams, const CScript& scriptPubKeyIn);
+class BlockAssembler
+{
+private:
+ // The constructed block template
+ std::unique_ptr<CBlockTemplate> pblocktemplate;
+ // A convenience pointer that always refers to the CBlock in pblocktemplate
+ CBlock* pblock;
+
+ // Configuration parameters for the block size
+ unsigned int nBlockMaxSize, nBlockMinSize;
+
+ // Information on the current status of the block
+ uint64_t nBlockSize;
+ uint64_t nBlockTx;
+ unsigned int nBlockSigOps;
+ CAmount nFees;
+ CTxMemPool::setEntries inBlock;
+
+ // Chain context for the block
+ int nHeight;
+ int64_t nLockTimeCutoff;
+ const CChainParams& chainparams;
+
+ // Variables used for addScoreTxs and addPriorityTxs
+ int lastFewTxs;
+ bool blockFinished;
+
+public:
+ BlockAssembler(const CChainParams& chainparams);
+ /** Construct a new block template with coinbase to scriptPubKeyIn */
+ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn);
+
+private:
+ // utility functions
+ /** Clear the block's state and prepare for assembling a new block */
+ void resetBlock();
+ /** Add a tx to the block */
+ void AddToBlock(CTxMemPool::txiter iter);
+
+ // Methods for how to add transactions to a block.
+ /** Add transactions based on modified feerate */
+ void addScoreTxs();
+ /** Add transactions based on tx "priority" */
+ void addPriorityTxs();
+ /** Add transactions based on feerate including unconfirmed ancestors */
+ void addPackageTxs();
+
+ // helper function for addScoreTxs and addPriorityTxs
+ /** Test if tx will still "fit" in the block */
+ bool TestForBlock(CTxMemPool::txiter iter);
+ /** Test if tx still has unconfirmed parents not yet in block */
+ bool isStillDependent(CTxMemPool::txiter iter);
+
+ // helper functions for addPackageTxs()
+ /** Remove confirmed (inBlock) entries from given set */
+ void onlyUnconfirmed(CTxMemPool::setEntries& testSet);
+ /** Test if a new package would "fit" in the block */
+ bool TestPackage(uint64_t packageSize, unsigned int packageSigOps);
+ /** Test if a set of transactions are all final */
+ bool TestPackageFinality(const CTxMemPool::setEntries& package);
+ /** Return true if given transaction from mapTx has already been evaluated,
+ * or if the transaction's cached data in mapTx is incorrect. */
+ bool SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_transaction_set &mapModifiedTx, CTxMemPool::setEntries &failedTx);
+ /** Sort the package in an order that is valid to appear in a block */
+ void SortForBlock(const CTxMemPool::setEntries& package, CTxMemPool::txiter entry, std::vector<CTxMemPool::txiter>& sortedEntries);
+ /** Add descendants of given transactions to mapModifiedTx with ancestor
+ * state updated assuming given transactions are inBlock. */
+ void UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded, indexed_modified_transaction_set &mapModifiedTx);
+};
+
/** Modify the extranonce in a block */
void IncrementExtraNonce(CBlock* pblock, const CBlockIndex* pindexPrev, unsigned int& nExtraNonce);
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev);
diff --git a/src/net.cpp b/src/net.cpp
index 173eba57c8..4eca3d75cc 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -71,12 +71,15 @@ namespace {
const static std::string NET_MESSAGE_COMMAND_OTHER = "*other*";
+/** Services this node implementation cares about */
+static const ServiceFlags nRelevantServices = NODE_NETWORK;
+
//
// Global state variables
//
bool fDiscover = true;
bool fListen = true;
-uint64_t nLocalServices = NODE_NETWORK;
+ServiceFlags nLocalServices = NODE_NETWORK;
bool fRelayTxes = true;
CCriticalSection cs_mapLocalHost;
std::map<CNetAddr, LocalServiceInfo> mapLocalHost;
@@ -159,7 +162,7 @@ static std::vector<CAddress> convertSeed6(const std::vector<SeedSpec6> &vSeedsIn
{
struct in6_addr ip;
memcpy(&ip, i->addr, sizeof(ip));
- CAddress addr(CService(ip, i->port));
+ CAddress addr(CService(ip, i->port), NODE_NETWORK);
addr.nTime = GetTime() - GetRand(nOneWeek) - nOneWeek;
vSeedsOut.push_back(addr);
}
@@ -172,13 +175,12 @@ static std::vector<CAddress> convertSeed6(const std::vector<SeedSpec6> &vSeedsIn
// one by discovery.
CAddress GetLocalAddress(const CNetAddr *paddrPeer)
{
- CAddress ret(CService("0.0.0.0",GetListenPort()),0);
+ CAddress ret(CService("0.0.0.0",GetListenPort()), NODE_NONE);
CService addr;
if (GetLocal(addr, paddrPeer))
{
- ret = CAddress(addr);
+ ret = CAddress(addr, nLocalServices);
}
- ret.nServices = nLocalServices;
ret.nTime = GetAdjustedTime();
return ret;
}
@@ -398,6 +400,26 @@ CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure
return NULL;
}
+ if (pszDest && addrConnect.IsValid()) {
+ // It is possible that we already have a connection to the IP/port pszDest resolved to.
+ // In that case, drop the connection that was just created, and return the existing CNode instead.
+ // Also store the name we used to connect in that CNode, so that future FindNode() calls to that
+ // name catch this early.
+ CNode* pnode = FindNode((CService)addrConnect);
+ if (pnode)
+ {
+ pnode->AddRef();
+ {
+ LOCK(cs_vNodes);
+ if (pnode->addrName.empty()) {
+ pnode->addrName = std::string(pszDest);
+ }
+ }
+ CloseSocket(hSocket);
+ return pnode;
+ }
+ }
+
addrman.Attempt(addrConnect, fCountFailure);
// Add node
@@ -409,6 +431,7 @@ CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure
vNodes.push_back(pnode);
}
+ pnode->nServicesExpected = ServiceFlags(addrConnect.nServices & nRelevantServices);
pnode->nTimeConnected = GetTime();
return pnode;
@@ -461,14 +484,14 @@ void CNode::PushVersion()
int nBestHeight = GetNodeSignals().GetHeight().get_value_or(0);
int64_t nTime = (fInbound ? GetAdjustedTime() : GetTime());
- CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService("0.0.0.0",0)));
+ CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService("0.0.0.0", 0), addr.nServices));
CAddress addrMe = GetLocalAddress(&addr);
GetRandBytes((unsigned char*)&nLocalHostNonce, sizeof(nLocalHostNonce));
if (fLogIPs)
LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nBestHeight, addrMe.ToString(), addrYou.ToString(), id);
else
LogPrint("net", "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nBestHeight, addrMe.ToString(), id);
- PushMessage(NetMsgType::VERSION, PROTOCOL_VERSION, nLocalServices, nTime, addrYou, addrMe,
+ PushMessage(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalServices, nTime, addrYou, addrMe,
nLocalHostNonce, strSubVersion, nBestHeight, ::fRelayTxes);
}
@@ -838,6 +861,11 @@ struct NodeEvictionCandidate
NodeId id;
int64_t nTimeConnected;
int64_t nMinPingUsecTime;
+ int64_t nLastBlockTime;
+ int64_t nLastTXTime;
+ bool fNetworkNode;
+ bool fRelayTxes;
+ bool fBloomFilter;
CAddress addr;
uint64_t nKeyedNetGroup;
};
@@ -854,7 +882,24 @@ static bool ReverseCompareNodeTimeConnected(const NodeEvictionCandidate &a, cons
static bool CompareNetGroupKeyed(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) {
return a.nKeyedNetGroup < b.nKeyedNetGroup;
-};
+}
+
+static bool CompareNodeBlockTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
+{
+ // There is a fall-through here because it is common for a node to have many peers which have not yet relayed a block.
+ if (a.nLastBlockTime != b.nLastBlockTime) return a.nLastBlockTime < b.nLastBlockTime;
+ if (a.fNetworkNode != b.fNetworkNode) return b.fNetworkNode;
+ return a.nTimeConnected > b.nTimeConnected;
+}
+
+static bool CompareNodeTXTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
+{
+ // There is a fall-through here because it is common for a node to have more than a few peers that have not yet relayed txn.
+ if (a.nLastTXTime != b.nLastTXTime) return a.nLastTXTime < b.nLastTXTime;
+ if (a.fRelayTxes != b.fRelayTxes) return b.fRelayTxes;
+ if (a.fBloomFilter != b.fBloomFilter) return a.fBloomFilter;
+ return a.nTimeConnected > b.nTimeConnected;
+}
/** Try to find a connection to evict when the node is full.
* Extreme care must be taken to avoid opening the node to attacker
@@ -864,7 +909,7 @@ static bool CompareNetGroupKeyed(const NodeEvictionCandidate &a, const NodeEvict
* to forge. In order to partition a node the attacker must be
* simultaneously better at all of them than honest peers.
*/
-static bool AttemptToEvictConnection(bool fPreferNewConnection) {
+static bool AttemptToEvictConnection() {
std::vector<NodeEvictionCandidate> vEvictionCandidates;
{
LOCK(cs_vNodes);
@@ -876,7 +921,9 @@ static bool AttemptToEvictConnection(bool fPreferNewConnection) {
continue;
if (node->fDisconnect)
continue;
- NodeEvictionCandidate candidate = {node->id, node->nTimeConnected, node->nMinPingUsecTime, node->addr, node->nKeyedNetGroup};
+ NodeEvictionCandidate candidate = {node->id, node->nTimeConnected, node->nMinPingUsecTime,
+ node->nLastBlockTime, node->nLastTXTime, node->fNetworkNode,
+ node->fRelayTxes, node->pfilter != NULL, node->addr, node->nKeyedNetGroup};
vEvictionCandidates.push_back(candidate);
}
}
@@ -899,6 +946,20 @@ static bool AttemptToEvictConnection(bool fPreferNewConnection) {
if (vEvictionCandidates.empty()) return false;
+ // Protect 4 nodes that most recently sent us transactions.
+ // An attacker cannot manipulate this metric without performing useful work.
+ std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), CompareNodeTXTime);
+ vEvictionCandidates.erase(vEvictionCandidates.end() - std::min(4, static_cast<int>(vEvictionCandidates.size())), vEvictionCandidates.end());
+
+ if (vEvictionCandidates.empty()) return false;
+
+ // Protect 4 nodes that most recently sent us blocks.
+ // An attacker cannot manipulate this metric without performing useful work.
+ std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), CompareNodeBlockTime);
+ vEvictionCandidates.erase(vEvictionCandidates.end() - std::min(4, static_cast<int>(vEvictionCandidates.size())), vEvictionCandidates.end());
+
+ if (vEvictionCandidates.empty()) return false;
+
// Protect the half of the remaining nodes which have been connected the longest.
// This replicates the non-eviction implicit behavior, and precludes attacks that start later.
std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), ReverseCompareNodeTimeConnected);
@@ -927,13 +988,6 @@ static bool AttemptToEvictConnection(bool fPreferNewConnection) {
// Reduce to the network group with the most connections
vEvictionCandidates = std::move(mapAddrCounts[naMostConnections]);
- // Do not disconnect peers if there is only one unprotected connection from their network group.
- // This step excessively favors netgroup diversity, and should be removed once more protective criteria are established.
- if (vEvictionCandidates.size() <= 1)
- // unless we prefer the new connection (for whitelisted peers)
- if (!fPreferNewConnection)
- return false;
-
// Disconnect from the network group with the most connections
NodeId evicted = vEvictionCandidates.front().id;
LOCK(cs_vNodes);
@@ -999,7 +1053,7 @@ static void AcceptConnection(const ListenSocket& hListenSocket) {
if (nInbound >= nMaxInbound)
{
- if (!AttemptToEvictConnection(whitelisted)) {
+ if (!AttemptToEvictConnection()) {
// No connection to evict, disconnect the new connection
LogPrint("net", "failed to find an eviction candidate - connection dropped (full)\n");
CloseSocket(hSocket);
@@ -1412,6 +1466,18 @@ void MapPort(bool)
+static std::string GetDNSHost(const CDNSSeedData& data, ServiceFlags* requiredServiceBits)
+{
+ //use default host for non-filter-capable seeds or if we use the default service bits (NODE_NETWORK)
+ if (!data.supportsServiceBitsFiltering || *requiredServiceBits == NODE_NETWORK) {
+ *requiredServiceBits = NODE_NETWORK;
+ return data.host;
+ }
+
+ return strprintf("x%x.%s", *requiredServiceBits, data.host);
+}
+
+
void ThreadDNSAddressSeed()
{
// goal: only query DNS seeds if address need is acute
@@ -1437,8 +1503,8 @@ void ThreadDNSAddressSeed()
} else {
std::vector<CNetAddr> vIPs;
std::vector<CAddress> vAdd;
- uint64_t requiredServiceBits = NODE_NETWORK;
- if (LookupHost(seed.getHost(requiredServiceBits).c_str(), vIPs, 0, true))
+ ServiceFlags requiredServiceBits = nRelevantServices;
+ if (LookupHost(GetDNSHost(seed, &requiredServiceBits).c_str(), vIPs, 0, true))
{
BOOST_FOREACH(const CNetAddr& ip, vIPs)
{
@@ -1520,7 +1586,7 @@ void ThreadOpenConnections()
ProcessOneShot();
BOOST_FOREACH(const std::string& strAddr, mapMultiArgs["-connect"])
{
- CAddress addr;
+ CAddress addr(CService(), NODE_NONE);
OpenNetworkConnection(addr, false, NULL, strAddr.c_str());
for (int i = 0; i < 10 && i < nLoop; i++)
{
@@ -1592,6 +1658,10 @@ void ThreadOpenConnections()
if (IsLimited(addr))
continue;
+ // only connect to full nodes
+ if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES)
+ continue;
+
// only consider very recently tried nodes after 30 failed attempts
if (nANow - addr.nLastTry < 600 && nTries < 30)
continue;
@@ -1609,66 +1679,79 @@ void ThreadOpenConnections()
}
}
-void ThreadOpenAddedConnections()
+std::vector<AddedNodeInfo> GetAddedNodeInfo()
{
+ std::vector<AddedNodeInfo> ret;
+
+ std::list<std::string> lAddresses(0);
{
LOCK(cs_vAddedNodes);
- vAddedNodes = mapMultiArgs["-addnode"];
+ ret.reserve(vAddedNodes.size());
+ BOOST_FOREACH(const std::string& strAddNode, vAddedNodes)
+ lAddresses.push_back(strAddNode);
}
- if (HaveNameProxy()) {
- while(true) {
- std::list<std::string> lAddresses(0);
- {
- LOCK(cs_vAddedNodes);
- BOOST_FOREACH(const std::string& strAddNode, vAddedNodes)
- lAddresses.push_back(strAddNode);
+
+ // Build a map of all already connected addresses (by IP:port and by name) to inbound/outbound and resolved CService
+ std::map<CService, bool> mapConnected;
+ std::map<std::string, std::pair<bool, CService>> mapConnectedByName;
+ {
+ LOCK(cs_vNodes);
+ for (const CNode* pnode : vNodes) {
+ if (pnode->addr.IsValid()) {
+ mapConnected[pnode->addr] = pnode->fInbound;
}
- BOOST_FOREACH(const std::string& strAddNode, lAddresses) {
- CAddress addr;
- CSemaphoreGrant grant(*semOutbound);
- OpenNetworkConnection(addr, false, &grant, strAddNode.c_str());
- MilliSleep(500);
+ if (!pnode->addrName.empty()) {
+ mapConnectedByName[pnode->addrName] = std::make_pair(pnode->fInbound, static_cast<const CService&>(pnode->addr));
+ }
+ }
+ }
+
+ BOOST_FOREACH(const std::string& strAddNode, lAddresses) {
+ CService service(strAddNode, Params().GetDefaultPort());
+ if (service.IsValid()) {
+ // strAddNode is an IP:port
+ auto it = mapConnected.find(service);
+ if (it != mapConnected.end()) {
+ ret.push_back(AddedNodeInfo{strAddNode, service, true, it->second});
+ } else {
+ ret.push_back(AddedNodeInfo{strAddNode, CService(), false, false});
+ }
+ } else {
+ // strAddNode is a name
+ auto it = mapConnectedByName.find(strAddNode);
+ if (it != mapConnectedByName.end()) {
+ ret.push_back(AddedNodeInfo{strAddNode, it->second.second, true, it->second.first});
+ } else {
+ ret.push_back(AddedNodeInfo{strAddNode, CService(), false, false});
}
- MilliSleep(120000); // Retry every 2 minutes
}
}
+ return ret;
+}
+
+void ThreadOpenAddedConnections()
+{
+ {
+ LOCK(cs_vAddedNodes);
+ vAddedNodes = mapMultiArgs["-addnode"];
+ }
+
for (unsigned int i = 0; true; i++)
{
- std::list<std::string> lAddresses(0);
- {
- LOCK(cs_vAddedNodes);
- BOOST_FOREACH(const std::string& strAddNode, vAddedNodes)
- lAddresses.push_back(strAddNode);
+ std::vector<AddedNodeInfo> vInfo = GetAddedNodeInfo();
+ for (const AddedNodeInfo& info : vInfo) {
+ if (!info.fConnected) {
+ CSemaphoreGrant grant(*semOutbound);
+ // If strAddedNode is an IP/port, decode it immediately, so
+ // OpenNetworkConnection can detect existing connections to that IP/port.
+ CService service(info.strAddedNode, Params().GetDefaultPort());
+ OpenNetworkConnection(CAddress(service, NODE_NONE), false, &grant, info.strAddedNode.c_str(), false);
+ MilliSleep(500);
+ }
}
- std::list<std::vector<CService> > lservAddressesToAdd(0);
- BOOST_FOREACH(const std::string& strAddNode, lAddresses) {
- std::vector<CService> vservNode(0);
- if(Lookup(strAddNode.c_str(), vservNode, Params().GetDefaultPort(), fNameLookup, 0))
- lservAddressesToAdd.push_back(vservNode);
- }
- // Attempt to connect to each IP for each addnode entry until at least one is successful per addnode entry
- // (keeping in mind that addnode entries can have many IPs if fNameLookup)
- {
- LOCK(cs_vNodes);
- BOOST_FOREACH(CNode* pnode, vNodes)
- for (std::list<std::vector<CService> >::iterator it = lservAddressesToAdd.begin(); it != lservAddressesToAdd.end(); it++)
- BOOST_FOREACH(const CService& addrNode, *(it))
- if (pnode->addr == addrNode)
- {
- it = lservAddressesToAdd.erase(it);
- it--;
- break;
- }
- }
- BOOST_FOREACH(std::vector<CService>& vserv, lservAddressesToAdd)
- {
- CSemaphoreGrant grant(*semOutbound);
- OpenNetworkConnection(CAddress(vserv[i % vserv.size()]), false, &grant);
- MilliSleep(500);
- }
MilliSleep(120000); // Retry every 2 minutes
}
}
@@ -2324,7 +2407,8 @@ CNode::CNode(SOCKET hSocketIn, const CAddress& addrIn, const std::string& addrNa
addrKnown(5000, 0.001),
filterInventoryKnown(50000, 0.000001)
{
- nServices = 0;
+ nServices = NODE_NONE;
+ nServicesExpected = NODE_NONE;
hSocket = hSocketIn;
nRecvVersion = INIT_PROTO_VERSION;
nLastSend = 0;
@@ -2358,6 +2442,8 @@ CNode::CNode(SOCKET hSocketIn, const CAddress& addrIn, const std::string& addrNa
fSentAddr = false;
pfilter = new CBloomFilter();
timeLastMempoolReq = 0;
+ nLastBlockTime = 0;
+ nLastTXTime = 0;
nPingNonceSent = 0;
nPingUsecStart = 0;
nPingUsecTime = 0;
diff --git a/src/net.h b/src/net.h
index 5c1f7e3e89..67b95fe0e4 100644
--- a/src/net.h
+++ b/src/net.h
@@ -72,6 +72,8 @@ static const bool DEFAULT_FORCEDNSSEED = false;
static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000;
static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000;
+static const ServiceFlags REQUIRED_SERVICES = NODE_NETWORK;
+
// NOTE: When adjusting this, update rpcnet:setban's help ("24h")
static const unsigned int DEFAULT_MISBEHAVING_BANTIME = 60 * 60 * 24; // Default 24-hour ban
@@ -152,7 +154,7 @@ CAddress GetLocalAddress(const CNetAddr *paddrPeer = NULL);
extern bool fDiscover;
extern bool fListen;
-extern uint64_t nLocalServices;
+extern ServiceFlags nLocalServices;
extern bool fRelayTxes;
extern uint64_t nLocalHostNonce;
extern CAddrMan addrman;
@@ -186,7 +188,7 @@ class CNodeStats
{
public:
NodeId nodeid;
- uint64_t nServices;
+ ServiceFlags nServices;
bool fRelayTxes;
int64_t nLastSend;
int64_t nLastRecv;
@@ -316,7 +318,8 @@ class CNode
{
public:
// socket
- uint64_t nServices;
+ ServiceFlags nServices;
+ ServiceFlags nServicesExpected;
SOCKET hSocket;
CDataStream ssSend;
size_t nSendSize; // total size of all vSendMsg entries
@@ -416,6 +419,11 @@ public:
// Last time a "MEMPOOL" request was serviced.
std::atomic<int64_t> timeLastMempoolReq;
+
+ // Block and TXN accept times
+ std::atomic<int64_t> nLastBlockTime;
+ std::atomic<int64_t> nLastTXTime;
+
// Ping time measurement:
// The pong reply we're expecting, or 0 if no pong expected.
uint64_t nPingNonceSent;
@@ -815,4 +823,14 @@ public:
/** Return a timestamp in the future (in microseconds) for exponentially distributed events. */
int64_t PoissonNextSend(int64_t nNow, int average_interval_seconds);
+struct AddedNodeInfo
+{
+ std::string strAddedNode;
+ CService resolvedAddress;
+ bool fConnected;
+ bool fInbound;
+};
+
+std::vector<AddedNodeInfo> GetAddedNodeInfo();
+
#endif // BITCOIN_NET_H
diff --git a/src/netbase.cpp b/src/netbase.cpp
index 572ae70871..e2a516986c 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -621,10 +621,10 @@ bool ConnectSocketByName(CService &addr, SOCKET& hSocketRet, const char *pszDest
proxyType nameProxy;
GetNameProxy(nameProxy);
- CService addrResolved;
- if (Lookup(strDest.c_str(), addrResolved, port, fNameLookup && !HaveNameProxy())) {
- if (addrResolved.IsValid()) {
- addr = addrResolved;
+ std::vector<CService> addrResolved;
+ if (Lookup(strDest.c_str(), addrResolved, port, fNameLookup && !HaveNameProxy(), 256)) {
+ if (addrResolved.size() > 0) {
+ addr = addrResolved[GetRand(addrResolved.size())];
return ConnectSocket(addr, hSocketRet, nTimeout);
}
}
diff --git a/src/protocol.cpp b/src/protocol.cpp
index 8c4bd05725..422ef6f636 100644
--- a/src/protocol.cpp
+++ b/src/protocol.cpp
@@ -133,7 +133,7 @@ CAddress::CAddress() : CService()
Init();
}
-CAddress::CAddress(CService ipIn, uint64_t nServicesIn) : CService(ipIn)
+CAddress::CAddress(CService ipIn, ServiceFlags nServicesIn) : CService(ipIn)
{
Init();
nServices = nServicesIn;
@@ -141,7 +141,7 @@ CAddress::CAddress(CService ipIn, uint64_t nServicesIn) : CService(ipIn)
void CAddress::Init()
{
- nServices = NODE_NETWORK;
+ nServices = NODE_NONE;
nTime = 100000000;
}
diff --git a/src/protocol.h b/src/protocol.h
index 1b049e52af..ab0a581783 100644
--- a/src/protocol.h
+++ b/src/protocol.h
@@ -223,7 +223,9 @@ extern const char *FEEFILTER;
const std::vector<std::string> &getAllNetMessageTypes();
/** nServices flags */
-enum {
+enum ServiceFlags : uint64_t {
+ // Nothing
+ NODE_NONE = 0,
// NODE_NETWORK means that the node is capable of serving the block chain. It is currently
// set by all Bitcoin Core nodes, and is unset by SPV clients or other peers that just want
// network services but don't provide them.
@@ -251,7 +253,7 @@ class CAddress : public CService
{
public:
CAddress();
- explicit CAddress(CService ipIn, uint64_t nServicesIn = NODE_NETWORK);
+ explicit CAddress(CService ipIn, ServiceFlags nServicesIn);
void Init();
@@ -267,13 +269,15 @@ public:
if ((nType & SER_DISK) ||
(nVersion >= CADDR_TIME_VERSION && !(nType & SER_GETHASH)))
READWRITE(nTime);
- READWRITE(nServices);
+ uint64_t nServicesInt = nServices;
+ READWRITE(nServicesInt);
+ nServices = (ServiceFlags)nServicesInt;
READWRITE(*(CService*)this);
}
// TODO: make private (improves encapsulation)
public:
- uint64_t nServices;
+ ServiceFlags nServices;
// disk and network only
unsigned int nTime;
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index 6218ab6ab0..64b5c83d72 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -533,6 +533,9 @@ int main(int argc, char *argv[])
// Generate high-dpi pixmaps
QApplication::setAttribute(Qt::AA_UseHighDpiPixmaps);
#endif
+#if QT_VERSION >= 0x050600
+ QGuiApplication::setAttribute(Qt::AA_EnableHighDpiScaling);
+#endif
#ifdef Q_OS_MAC
QApplication::setAttribute(Qt::AA_DontShowIconsInMenus);
#endif
diff --git a/src/qt/forms/receiverequestdialog.ui b/src/qt/forms/receiverequestdialog.ui
index 1e484dd9a0..4163f4189c 100644
--- a/src/qt/forms/receiverequestdialog.ui
+++ b/src/qt/forms/receiverequestdialog.ui
@@ -22,7 +22,7 @@
<property name="minimumSize">
<size>
<width>300</width>
- <height>300</height>
+ <height>320</height>
</size>
</property>
<property name="toolTip">
diff --git a/src/qt/guiconstants.h b/src/qt/guiconstants.h
index 4b2c10dd48..bab9923d20 100644
--- a/src/qt/guiconstants.h
+++ b/src/qt/guiconstants.h
@@ -43,7 +43,7 @@ static const int TOOLTIP_WRAP_THRESHOLD = 80;
static const int MAX_URI_LENGTH = 255;
/* QRCodeDialog -- size of exported QR Code image */
-#define EXPORT_IMAGE_SIZE 256
+#define QR_IMAGE_SIZE 300
/* Number of frames in spinner animation */
#define SPINNER_FRAMES 36
diff --git a/src/qt/receiverequestdialog.cpp b/src/qt/receiverequestdialog.cpp
index a1e9156eea..b13ea3df70 100644
--- a/src/qt/receiverequestdialog.cpp
+++ b/src/qt/receiverequestdialog.cpp
@@ -45,7 +45,7 @@ QImage QRImageWidget::exportImage()
{
if(!pixmap())
return QImage();
- return pixmap()->toImage().scaled(EXPORT_IMAGE_SIZE, EXPORT_IMAGE_SIZE);
+ return pixmap()->toImage();
}
void QRImageWidget::mousePressEvent(QMouseEvent *event)
@@ -166,20 +166,32 @@ void ReceiveRequestDialog::update()
ui->lblQRCode->setText(tr("Error encoding URI into QR Code."));
return;
}
- QImage myImage = QImage(code->width + 8, code->width + 8, QImage::Format_RGB32);
- myImage.fill(0xffffff);
+ QImage qrImage = QImage(code->width + 8, code->width + 8, QImage::Format_RGB32);
+ qrImage.fill(0xffffff);
unsigned char *p = code->data;
for (int y = 0; y < code->width; y++)
{
for (int x = 0; x < code->width; x++)
{
- myImage.setPixel(x + 4, y + 4, ((*p & 1) ? 0x0 : 0xffffff));
+ qrImage.setPixel(x + 4, y + 4, ((*p & 1) ? 0x0 : 0xffffff));
p++;
}
}
QRcode_free(code);
- ui->lblQRCode->setPixmap(QPixmap::fromImage(myImage).scaled(300, 300));
+ QImage qrAddrImage = QImage(QR_IMAGE_SIZE, QR_IMAGE_SIZE+20, QImage::Format_RGB32);
+ qrAddrImage.fill(0xffffff);
+ QPainter painter(&qrAddrImage);
+ painter.drawImage(0, 0, qrImage.scaled(QR_IMAGE_SIZE, QR_IMAGE_SIZE));
+ QFont font = GUIUtil::fixedPitchFont();
+ font.setPixelSize(12);
+ painter.setFont(font);
+ QRect paddedRect = qrAddrImage.rect();
+ paddedRect.setHeight(QR_IMAGE_SIZE+12);
+ painter.drawText(paddedRect, Qt::AlignBottom|Qt::AlignCenter, info.address);
+ painter.end();
+
+ ui->lblQRCode->setPixmap(QPixmap::fromImage(qrAddrImage));
ui->btnSaveAs->setEnabled(true);
}
}
diff --git a/src/qt/receiverequestdialog.h b/src/qt/receiverequestdialog.h
index 4cab4caff1..676745a858 100644
--- a/src/qt/receiverequestdialog.h
+++ b/src/qt/receiverequestdialog.h
@@ -10,6 +10,7 @@
#include <QDialog>
#include <QImage>
#include <QLabel>
+#include <QPainter>
class OptionsModel;
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 2bd52eadbc..94eeea91f3 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -112,7 +112,7 @@ UniValue generateBlocks(boost::shared_ptr<CReserveScript> coinbaseScript, int nG
UniValue blockHashes(UniValue::VARR);
while (nHeight < nHeightEnd)
{
- std::unique_ptr<CBlockTemplate> pblocktemplate(CreateNewBlock(Params(), coinbaseScript->reserveScript));
+ std::unique_ptr<CBlockTemplate> pblocktemplate(BlockAssembler(Params()).CreateNewBlock(coinbaseScript->reserveScript));
if (!pblocktemplate.get())
throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block");
CBlock *pblock = &pblocktemplate->block;
@@ -527,7 +527,7 @@ UniValue getblocktemplate(const UniValue& params, bool fHelp)
pblocktemplate = NULL;
}
CScript scriptDummy = CScript() << OP_TRUE;
- pblocktemplate = CreateNewBlock(Params(), scriptDummy);
+ pblocktemplate = BlockAssembler(Params()).CreateNewBlock(scriptDummy);
if (!pblocktemplate)
throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory");
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index cae964e46d..b85c7b2e1a 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -271,25 +271,22 @@ UniValue getaddednodeinfo(const UniValue& params, bool fHelp)
{
if (fHelp || params.size() < 1 || params.size() > 2)
throw runtime_error(
- "getaddednodeinfo dns ( \"node\" )\n"
+ "getaddednodeinfo dummy ( \"node\" )\n"
"\nReturns information about the given added node, or all added nodes\n"
"(note that onetry addnodes are not listed here)\n"
- "If dns is false, only a list of added nodes will be provided,\n"
- "otherwise connected information will also be available.\n"
"\nArguments:\n"
- "1. dns (boolean, required) If false, only a list of added nodes will be provided, otherwise connected information will also be available.\n"
+ "1. dummy (boolean, required) Kept for historical purposes but ignored\n"
"2. \"node\" (string, optional) If provided, return information about this specific node, otherwise all nodes are returned.\n"
"\nResult:\n"
"[\n"
" {\n"
- " \"addednode\" : \"192.168.0.201\", (string) The node ip address\n"
+ " \"addednode\" : \"192.168.0.201\", (string) The node ip address or name (as provided to addnode)\n"
" \"connected\" : true|false, (boolean) If connected\n"
- " \"addresses\" : [\n"
+ " \"addresses\" : [ (list of objects) Only when connected = true\n"
" {\n"
- " \"address\" : \"192.168.0.201:8333\", (string) The bitcoin server host and port\n"
+ " \"address\" : \"192.168.0.201:8333\", (string) The bitcoin server IP and port we're connected to\n"
" \"connected\" : \"outbound\" (string) connection, inbound or outbound\n"
" }\n"
- " ,...\n"
" ]\n"
" }\n"
" ,...\n"
@@ -300,83 +297,35 @@ UniValue getaddednodeinfo(const UniValue& params, bool fHelp)
+ HelpExampleRpc("getaddednodeinfo", "true, \"192.168.0.201\"")
);
- bool fDns = params[0].get_bool();
+ std::vector<AddedNodeInfo> vInfo = GetAddedNodeInfo();
- list<string> laddedNodes(0);
- if (params.size() == 1)
- {
- LOCK(cs_vAddedNodes);
- BOOST_FOREACH(const std::string& strAddNode, vAddedNodes)
- laddedNodes.push_back(strAddNode);
- }
- else
- {
- string strNode = params[1].get_str();
- LOCK(cs_vAddedNodes);
- BOOST_FOREACH(const std::string& strAddNode, vAddedNodes) {
- if (strAddNode == strNode)
- {
- laddedNodes.push_back(strAddNode);
+ if (params.size() == 2) {
+ bool found = false;
+ for (const AddedNodeInfo& info : vInfo) {
+ if (info.strAddedNode == params[1].get_str()) {
+ vInfo.assign(1, info);
+ found = true;
break;
}
}
- if (laddedNodes.size() == 0)
+ if (!found) {
throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node has not been added.");
- }
-
- UniValue ret(UniValue::VARR);
- if (!fDns)
- {
- BOOST_FOREACH (const std::string& strAddNode, laddedNodes) {
- UniValue obj(UniValue::VOBJ);
- obj.push_back(Pair("addednode", strAddNode));
- ret.push_back(obj);
}
- return ret;
}
- list<pair<string, vector<CService> > > laddedAddreses(0);
- BOOST_FOREACH(const std::string& strAddNode, laddedNodes) {
- vector<CService> vservNode(0);
- if(Lookup(strAddNode.c_str(), vservNode, Params().GetDefaultPort(), fNameLookup, 0))
- laddedAddreses.push_back(make_pair(strAddNode, vservNode));
- else
- {
- UniValue obj(UniValue::VOBJ);
- obj.push_back(Pair("addednode", strAddNode));
- obj.push_back(Pair("connected", false));
- UniValue addresses(UniValue::VARR);
- obj.push_back(Pair("addresses", addresses));
- ret.push_back(obj);
- }
- }
+ UniValue ret(UniValue::VARR);
- LOCK(cs_vNodes);
- for (list<pair<string, vector<CService> > >::iterator it = laddedAddreses.begin(); it != laddedAddreses.end(); it++)
- {
+ for (const AddedNodeInfo& info : vInfo) {
UniValue obj(UniValue::VOBJ);
- obj.push_back(Pair("addednode", it->first));
-
+ obj.push_back(Pair("addednode", info.strAddedNode));
+ obj.push_back(Pair("connected", info.fConnected));
UniValue addresses(UniValue::VARR);
- bool fConnected = false;
- BOOST_FOREACH(const CService& addrNode, it->second) {
- bool fFound = false;
- UniValue node(UniValue::VOBJ);
- node.push_back(Pair("address", addrNode.ToString()));
- BOOST_FOREACH(CNode* pnode, vNodes) {
- if (pnode->addr == addrNode)
- {
- fFound = true;
- fConnected = true;
- node.push_back(Pair("connected", pnode->fInbound ? "inbound" : "outbound"));
- break;
- }
- }
- if (!fFound)
- node.push_back(Pair("connected", "false"));
- addresses.push_back(node);
+ if (info.fConnected) {
+ UniValue address(UniValue::VOBJ);
+ address.push_back(Pair("address", info.resolvedAddress.ToString()));
+ address.push_back(Pair("connected", info.fInbound ? "inbound" : "outbound"));
+ addresses.push_back(address);
}
- obj.push_back(Pair("connected", fConnected));
obj.push_back(Pair("addresses", addresses));
ret.push_back(obj);
}
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 992914f88c..9723e394d6 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -388,8 +388,13 @@ UniValue createrawtransaction(const UniValue& params, bool fHelp)
// set the sequence number if passed in the parameters object
const UniValue& sequenceObj = find_value(o, "sequence");
- if (sequenceObj.isNum())
- nSequence = sequenceObj.get_int();
+ if (sequenceObj.isNum()) {
+ int64_t seqNr64 = sequenceObj.get_int64();
+ if (seqNr64 < 0 || seqNr64 > std::numeric_limits<uint32_t>::max())
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, sequence number is out of range");
+ else
+ nSequence = (uint32_t)seqNr64;
+ }
CTxIn in(COutPoint(txid, nOutput), CScript(), nSequence);
diff --git a/src/test/DoS_tests.cpp b/src/test/DoS_tests.cpp
index 818128d186..93f7ae09da 100644
--- a/src/test/DoS_tests.cpp
+++ b/src/test/DoS_tests.cpp
@@ -45,7 +45,7 @@ BOOST_FIXTURE_TEST_SUITE(DoS_tests, TestingSetup)
BOOST_AUTO_TEST_CASE(DoS_banning)
{
CNode::ClearBanned();
- CAddress addr1(ip(0xa0b0c001));
+ CAddress addr1(ip(0xa0b0c001), NODE_NONE);
CNode dummyNode1(INVALID_SOCKET, addr1, "", true);
dummyNode1.nVersion = 1;
Misbehaving(dummyNode1.GetId(), 100); // Should get banned
@@ -53,7 +53,7 @@ BOOST_AUTO_TEST_CASE(DoS_banning)
BOOST_CHECK(CNode::IsBanned(addr1));
BOOST_CHECK(!CNode::IsBanned(ip(0xa0b0c001|0x0000ff00))); // Different IP, not banned
- CAddress addr2(ip(0xa0b0c002));
+ CAddress addr2(ip(0xa0b0c002), NODE_NONE);
CNode dummyNode2(INVALID_SOCKET, addr2, "", true);
dummyNode2.nVersion = 1;
Misbehaving(dummyNode2.GetId(), 50);
@@ -69,7 +69,7 @@ BOOST_AUTO_TEST_CASE(DoS_banscore)
{
CNode::ClearBanned();
mapArgs["-banscore"] = "111"; // because 11 is my favorite number
- CAddress addr1(ip(0xa0b0c001));
+ CAddress addr1(ip(0xa0b0c001), NODE_NONE);
CNode dummyNode1(INVALID_SOCKET, addr1, "", true);
dummyNode1.nVersion = 1;
Misbehaving(dummyNode1.GetId(), 100);
@@ -90,7 +90,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
int64_t nStartTime = GetTime();
SetMockTime(nStartTime); // Overrides future calls to GetTime()
- CAddress addr(ip(0xa0b0c001));
+ CAddress addr(ip(0xa0b0c001), NODE_NONE);
CNode dummyNode(INVALID_SOCKET, addr, "", true);
dummyNode.nVersion = 1;
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index 767b653e47..b6cec24b57 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -68,7 +68,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
// Test 2: Does Addrman::Add work as expected.
CService addr1 = CService("250.1.1.1", 8333);
- addrman.Add(CAddress(addr1), source);
+ addrman.Add(CAddress(addr1, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 1);
CAddrInfo addr_ret1 = addrman.Select();
BOOST_CHECK(addr_ret1.ToString() == "250.1.1.1:8333");
@@ -76,14 +76,14 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
// Test 3: Does IP address deduplication work correctly.
// Expected dup IP should not be added.
CService addr1_dup = CService("250.1.1.1", 8333);
- addrman.Add(CAddress(addr1_dup), source);
+ addrman.Add(CAddress(addr1_dup, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 1);
// Test 5: New table has one addr and we add a diff addr we should
// have two addrs.
CService addr2 = CService("250.1.1.2", 8333);
- addrman.Add(CAddress(addr2), source);
+ addrman.Add(CAddress(addr2, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 2);
// Test 6: AddrMan::Clear() should empty the new table.
@@ -106,18 +106,18 @@ BOOST_AUTO_TEST_CASE(addrman_ports)
// Test 7; Addr with same IP but diff port does not replace existing addr.
CService addr1 = CService("250.1.1.1", 8333);
- addrman.Add(CAddress(addr1), source);
+ addrman.Add(CAddress(addr1, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 1);
CService addr1_port = CService("250.1.1.1", 8334);
- addrman.Add(CAddress(addr1_port), source);
+ addrman.Add(CAddress(addr1_port, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 1);
CAddrInfo addr_ret2 = addrman.Select();
BOOST_CHECK(addr_ret2.ToString() == "250.1.1.1:8333");
// Test 8: Add same IP but diff port to tried table, it doesn't get added.
// Perhaps this is not ideal behavior but it is the current behavior.
- addrman.Good(CAddress(addr1_port));
+ addrman.Good(CAddress(addr1_port, NODE_NONE));
BOOST_CHECK(addrman.size() == 1);
bool newOnly = true;
CAddrInfo addr_ret3 = addrman.Select(newOnly);
@@ -136,7 +136,7 @@ BOOST_AUTO_TEST_CASE(addrman_select)
// Test 9: Select from new with 1 addr in new.
CService addr1 = CService("250.1.1.1", 8333);
- addrman.Add(CAddress(addr1), source);
+ addrman.Add(CAddress(addr1, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 1);
bool newOnly = true;
@@ -144,7 +144,7 @@ BOOST_AUTO_TEST_CASE(addrman_select)
BOOST_CHECK(addr_ret1.ToString() == "250.1.1.1:8333");
// Test 10: move addr to tried, select from new expected nothing returned.
- addrman.Good(CAddress(addr1));
+ addrman.Good(CAddress(addr1, NODE_NONE));
BOOST_CHECK(addrman.size() == 1);
CAddrInfo addr_ret2 = addrman.Select(newOnly);
BOOST_CHECK(addr_ret2.ToString() == "[::]:0");
@@ -160,21 +160,21 @@ BOOST_AUTO_TEST_CASE(addrman_select)
CService addr3 = CService("250.3.2.2", 9999);
CService addr4 = CService("250.3.3.3", 9999);
- addrman.Add(CAddress(addr2), CService("250.3.1.1", 8333));
- addrman.Add(CAddress(addr3), CService("250.3.1.1", 8333));
- addrman.Add(CAddress(addr4), CService("250.4.1.1", 8333));
+ addrman.Add(CAddress(addr2, NODE_NONE), CService("250.3.1.1", 8333));
+ addrman.Add(CAddress(addr3, NODE_NONE), CService("250.3.1.1", 8333));
+ addrman.Add(CAddress(addr4, NODE_NONE), CService("250.4.1.1", 8333));
// Add three addresses to tried table.
CService addr5 = CService("250.4.4.4", 8333);
CService addr6 = CService("250.4.5.5", 7777);
CService addr7 = CService("250.4.6.6", 8333);
- addrman.Add(CAddress(addr5), CService("250.3.1.1", 8333));
- addrman.Good(CAddress(addr5));
- addrman.Add(CAddress(addr6), CService("250.3.1.1", 8333));
- addrman.Good(CAddress(addr6));
- addrman.Add(CAddress(addr7), CService("250.1.1.3", 8333));
- addrman.Good(CAddress(addr7));
+ addrman.Add(CAddress(addr5, NODE_NONE), CService("250.3.1.1", 8333));
+ addrman.Good(CAddress(addr5, NODE_NONE));
+ addrman.Add(CAddress(addr6, NODE_NONE), CService("250.3.1.1", 8333));
+ addrman.Good(CAddress(addr6, NODE_NONE));
+ addrman.Add(CAddress(addr7, NODE_NONE), CService("250.1.1.3", 8333));
+ addrman.Good(CAddress(addr7, NODE_NONE));
// Test 11: 6 addrs + 1 addr from last test = 7.
BOOST_CHECK(addrman.size() == 7);
@@ -199,7 +199,7 @@ BOOST_AUTO_TEST_CASE(addrman_new_collisions)
for (unsigned int i = 1; i < 18; i++) {
CService addr = CService("250.1.1." + boost::to_string(i));
- addrman.Add(CAddress(addr), source);
+ addrman.Add(CAddress(addr, NODE_NONE), source);
//Test 13: No collision in new table yet.
BOOST_CHECK(addrman.size() == i);
@@ -207,11 +207,11 @@ BOOST_AUTO_TEST_CASE(addrman_new_collisions)
//Test 14: new table collision!
CService addr1 = CService("250.1.1.18");
- addrman.Add(CAddress(addr1), source);
+ addrman.Add(CAddress(addr1, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 17);
CService addr2 = CService("250.1.1.19");
- addrman.Add(CAddress(addr2), source);
+ addrman.Add(CAddress(addr2, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 18);
}
@@ -228,8 +228,8 @@ BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
for (unsigned int i = 1; i < 80; i++) {
CService addr = CService("250.1.1." + boost::to_string(i));
- addrman.Add(CAddress(addr), source);
- addrman.Good(CAddress(addr));
+ addrman.Add(CAddress(addr, NODE_NONE), source);
+ addrman.Good(CAddress(addr, NODE_NONE));
//Test 15: No collision in tried table yet.
BOOST_TEST_MESSAGE(addrman.size());
@@ -238,11 +238,11 @@ BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
//Test 16: tried table collision!
CService addr1 = CService("250.1.1.80");
- addrman.Add(CAddress(addr1), source);
+ addrman.Add(CAddress(addr1, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 79);
CService addr2 = CService("250.1.1.81");
- addrman.Add(CAddress(addr2), source);
+ addrman.Add(CAddress(addr2, NODE_NONE), source);
BOOST_CHECK(addrman.size() == 80);
}
@@ -255,9 +255,9 @@ BOOST_AUTO_TEST_CASE(addrman_find)
BOOST_CHECK(addrman.size() == 0);
- CAddress addr1 = CAddress(CService("250.1.2.1", 8333));
- CAddress addr2 = CAddress(CService("250.1.2.1", 9999));
- CAddress addr3 = CAddress(CService("251.255.2.1", 8333));
+ CAddress addr1 = CAddress(CService("250.1.2.1", 8333), NODE_NONE);
+ CAddress addr2 = CAddress(CService("250.1.2.1", 9999), NODE_NONE);
+ CAddress addr3 = CAddress(CService("251.255.2.1", 8333), NODE_NONE);
CNetAddr source1 = CNetAddr("250.1.2.1");
CNetAddr source2 = CNetAddr("250.1.2.2");
@@ -294,7 +294,7 @@ BOOST_AUTO_TEST_CASE(addrman_create)
BOOST_CHECK(addrman.size() == 0);
- CAddress addr1 = CAddress(CService("250.1.2.1", 8333));
+ CAddress addr1 = CAddress(CService("250.1.2.1", 8333), NODE_NONE);
CNetAddr source1 = CNetAddr("250.1.2.1");
int nId;
@@ -317,7 +317,7 @@ BOOST_AUTO_TEST_CASE(addrman_delete)
BOOST_CHECK(addrman.size() == 0);
- CAddress addr1 = CAddress(CService("250.1.2.1", 8333));
+ CAddress addr1 = CAddress(CService("250.1.2.1", 8333), NODE_NONE);
CNetAddr source1 = CNetAddr("250.1.2.1");
int nId;
@@ -344,15 +344,15 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
vector<CAddress> vAddr1 = addrman.GetAddr();
BOOST_CHECK(vAddr1.size() == 0);
- CAddress addr1 = CAddress(CService("250.250.2.1", 8333));
+ CAddress addr1 = CAddress(CService("250.250.2.1", 8333), NODE_NONE);
addr1.nTime = GetAdjustedTime(); // Set time so isTerrible = false
- CAddress addr2 = CAddress(CService("250.251.2.2", 9999));
+ CAddress addr2 = CAddress(CService("250.251.2.2", 9999), NODE_NONE);
addr2.nTime = GetAdjustedTime();
- CAddress addr3 = CAddress(CService("251.252.2.3", 8333));
+ CAddress addr3 = CAddress(CService("251.252.2.3", 8333), NODE_NONE);
addr3.nTime = GetAdjustedTime();
- CAddress addr4 = CAddress(CService("252.253.3.4", 8333));
+ CAddress addr4 = CAddress(CService("252.253.3.4", 8333), NODE_NONE);
addr4.nTime = GetAdjustedTime();
- CAddress addr5 = CAddress(CService("252.254.4.5", 8333));
+ CAddress addr5 = CAddress(CService("252.254.4.5", 8333), NODE_NONE);
addr5.nTime = GetAdjustedTime();
CNetAddr source1 = CNetAddr("250.1.2.1");
CNetAddr source2 = CNetAddr("250.2.3.3");
@@ -368,8 +368,8 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
BOOST_CHECK(addrman.GetAddr().size() == 1);
// Test 24: Ensure GetAddr works with new and tried addresses.
- addrman.Good(CAddress(addr1));
- addrman.Good(CAddress(addr2));
+ addrman.Good(CAddress(addr1, NODE_NONE));
+ addrman.Good(CAddress(addr2, NODE_NONE));
BOOST_CHECK(addrman.GetAddr().size() == 1);
// Test 25: Ensure GetAddr still returns 23% when addrman has many addrs.
@@ -378,7 +378,7 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
int octet2 = (i / 256) % 256;
int octet3 = (i / (256 * 2)) % 256;
string strAddr = boost::to_string(octet1) + "." + boost::to_string(octet2) + "." + boost::to_string(octet3) + ".23";
- CAddress addr = CAddress(CService(strAddr));
+ CAddress addr = CAddress(CService(strAddr), NODE_NONE);
// Ensure that for all addrs in addrman, isTerrible == false.
addr.nTime = GetAdjustedTime();
@@ -403,8 +403,8 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
// Set addrman addr placement to be deterministic.
addrman.MakeDeterministic();
- CAddress addr1 = CAddress(CService("250.1.1.1", 8333));
- CAddress addr2 = CAddress(CService("250.1.1.1", 9999));
+ CAddress addr1 = CAddress(CService("250.1.1.1", 8333), NODE_NONE);
+ CAddress addr2 = CAddress(CService("250.1.1.1", 9999), NODE_NONE);
CNetAddr source1 = CNetAddr("250.1.1.1");
@@ -431,7 +431,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
set<int> buckets;
for (int i = 0; i < 255; i++) {
CAddrInfo infoi = CAddrInfo(
- CAddress(CService("250.1.1." + boost::to_string(i))),
+ CAddress(CService("250.1.1." + boost::to_string(i)), NODE_NONE),
CNetAddr("250.1.1." + boost::to_string(i)));
int bucket = infoi.GetTriedBucket(nKey1);
buckets.insert(bucket);
@@ -443,7 +443,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
buckets.clear();
for (int j = 0; j < 255; j++) {
CAddrInfo infoj = CAddrInfo(
- CAddress(CService("250." + boost::to_string(j) + ".1.1")),
+ CAddress(CService("250." + boost::to_string(j) + ".1.1"), NODE_NONE),
CNetAddr("250." + boost::to_string(j) + ".1.1"));
int bucket = infoj.GetTriedBucket(nKey1);
buckets.insert(bucket);
@@ -460,8 +460,8 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
// Set addrman addr placement to be deterministic.
addrman.MakeDeterministic();
- CAddress addr1 = CAddress(CService("250.1.2.1", 8333));
- CAddress addr2 = CAddress(CService("250.1.2.1", 9999));
+ CAddress addr1 = CAddress(CService("250.1.2.1", 8333), NODE_NONE);
+ CAddress addr2 = CAddress(CService("250.1.2.1", 9999), NODE_NONE);
CNetAddr source1 = CNetAddr("250.1.2.1");
@@ -484,7 +484,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
set<int> buckets;
for (int i = 0; i < 255; i++) {
CAddrInfo infoi = CAddrInfo(
- CAddress(CService("250.1.1." + boost::to_string(i))),
+ CAddress(CService("250.1.1." + boost::to_string(i)), NODE_NONE),
CNetAddr("250.1.1." + boost::to_string(i)));
int bucket = infoi.GetNewBucket(nKey1);
buckets.insert(bucket);
@@ -497,7 +497,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
for (int j = 0; j < 4 * 255; j++) {
CAddrInfo infoj = CAddrInfo(CAddress(
CService(
- boost::to_string(250 + (j / 255)) + "." + boost::to_string(j % 256) + ".1.1")),
+ boost::to_string(250 + (j / 255)) + "." + boost::to_string(j % 256) + ".1.1"), NODE_NONE),
CNetAddr("251.4.1.1"));
int bucket = infoj.GetNewBucket(nKey1);
buckets.insert(bucket);
@@ -509,7 +509,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
buckets.clear();
for (int p = 0; p < 255; p++) {
CAddrInfo infoj = CAddrInfo(
- CAddress(CService("250.1.1.1")),
+ CAddress(CService("250.1.1.1"), NODE_NONE),
CNetAddr("250." + boost::to_string(p) + ".1.1"));
int bucket = infoj.GetNewBucket(nKey1);
buckets.insert(bucket);
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index 3f5f0ee98b..ca8d6d2e05 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -71,6 +71,113 @@ bool TestSequenceLocks(const CTransaction &tx, int flags)
return CheckSequenceLocks(tx, flags);
}
+// Test suite for ancestor feerate transaction selection.
+// Implemented as an additional function, rather than a separate test case,
+// to allow reusing the blockchain created in CreateNewBlock_validity.
+// Note that this test assumes blockprioritysize is 0.
+void TestPackageSelection(const CChainParams& chainparams, CScript scriptPubKey, std::vector<CTransaction *>& txFirst)
+{
+ // Test the ancestor feerate transaction selection.
+ TestMemPoolEntryHelper entry;
+
+ // Test that a medium fee transaction will be selected after a higher fee
+ // rate package with a low fee rate parent.
+ CMutableTransaction tx;
+ tx.vin.resize(1);
+ tx.vin[0].scriptSig = CScript() << OP_1;
+ tx.vin[0].prevout.hash = txFirst[0]->GetHash();
+ tx.vin[0].prevout.n = 0;
+ tx.vout.resize(1);
+ tx.vout[0].nValue = 5000000000LL - 1000;
+ // This tx has a low fee: 1000 satoshis
+ uint256 hashParentTx = tx.GetHash(); // save this txid for later use
+ mempool.addUnchecked(hashParentTx, entry.Fee(1000).Time(GetTime()).SpendsCoinbase(true).FromTx(tx));
+
+ // This tx has a medium fee: 10000 satoshis
+ tx.vin[0].prevout.hash = txFirst[1]->GetHash();
+ tx.vout[0].nValue = 5000000000LL - 10000;
+ uint256 hashMediumFeeTx = tx.GetHash();
+ mempool.addUnchecked(hashMediumFeeTx, entry.Fee(10000).Time(GetTime()).SpendsCoinbase(true).FromTx(tx));
+
+ // This tx has a high fee, but depends on the first transaction
+ tx.vin[0].prevout.hash = hashParentTx;
+ tx.vout[0].nValue = 5000000000LL - 1000 - 50000; // 50k satoshi fee
+ uint256 hashHighFeeTx = tx.GetHash();
+ mempool.addUnchecked(hashHighFeeTx, entry.Fee(50000).Time(GetTime()).SpendsCoinbase(false).FromTx(tx));
+
+ CBlockTemplate *pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey);
+ BOOST_CHECK(pblocktemplate->block.vtx[1].GetHash() == hashParentTx);
+ BOOST_CHECK(pblocktemplate->block.vtx[2].GetHash() == hashHighFeeTx);
+ BOOST_CHECK(pblocktemplate->block.vtx[3].GetHash() == hashMediumFeeTx);
+
+ // Test that a package below the min relay fee doesn't get included
+ tx.vin[0].prevout.hash = hashHighFeeTx;
+ tx.vout[0].nValue = 5000000000LL - 1000 - 50000; // 0 fee
+ uint256 hashFreeTx = tx.GetHash();
+ mempool.addUnchecked(hashFreeTx, entry.Fee(0).FromTx(tx));
+ size_t freeTxSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
+
+ // Calculate a fee on child transaction that will put the package just
+ // below the min relay fee (assuming 1 child tx of the same size).
+ CAmount feeToUse = minRelayTxFee.GetFee(2*freeTxSize) - 1;
+
+ tx.vin[0].prevout.hash = hashFreeTx;
+ tx.vout[0].nValue = 5000000000LL - 1000 - 50000 - feeToUse;
+ uint256 hashLowFeeTx = tx.GetHash();
+ mempool.addUnchecked(hashLowFeeTx, entry.Fee(feeToUse).FromTx(tx));
+ pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey);
+ // Verify that the free tx and the low fee tx didn't get selected
+ for (size_t i=0; i<pblocktemplate->block.vtx.size(); ++i) {
+ BOOST_CHECK(pblocktemplate->block.vtx[i].GetHash() != hashFreeTx);
+ BOOST_CHECK(pblocktemplate->block.vtx[i].GetHash() != hashLowFeeTx);
+ }
+
+ // Test that packages above the min relay fee do get included, even if one
+ // of the transactions is below the min relay fee
+ // Remove the low fee transaction and replace with a higher fee transaction
+ std::list<CTransaction> dummy;
+ mempool.removeRecursive(tx, dummy);
+ tx.vout[0].nValue -= 2; // Now we should be just over the min relay fee
+ hashLowFeeTx = tx.GetHash();
+ mempool.addUnchecked(hashLowFeeTx, entry.Fee(feeToUse+2).FromTx(tx));
+ pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey);
+ BOOST_CHECK(pblocktemplate->block.vtx[4].GetHash() == hashFreeTx);
+ BOOST_CHECK(pblocktemplate->block.vtx[5].GetHash() == hashLowFeeTx);
+
+ // Test that transaction selection properly updates ancestor fee
+ // calculations as ancestor transactions get included in a block.
+ // Add a 0-fee transaction that has 2 outputs.
+ tx.vin[0].prevout.hash = txFirst[2]->GetHash();
+ tx.vout.resize(2);
+ tx.vout[0].nValue = 5000000000LL - 100000000;
+ tx.vout[1].nValue = 100000000; // 1BTC output
+ uint256 hashFreeTx2 = tx.GetHash();
+ mempool.addUnchecked(hashFreeTx2, entry.Fee(0).SpendsCoinbase(true).FromTx(tx));
+
+ // This tx can't be mined by itself
+ tx.vin[0].prevout.hash = hashFreeTx2;
+ tx.vout.resize(1);
+ feeToUse = minRelayTxFee.GetFee(freeTxSize);
+ tx.vout[0].nValue = 5000000000LL - 100000000 - feeToUse;
+ uint256 hashLowFeeTx2 = tx.GetHash();
+ mempool.addUnchecked(hashLowFeeTx2, entry.Fee(feeToUse).SpendsCoinbase(false).FromTx(tx));
+ pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey);
+
+ // Verify that this tx isn't selected.
+ for (size_t i=0; i<pblocktemplate->block.vtx.size(); ++i) {
+ BOOST_CHECK(pblocktemplate->block.vtx[i].GetHash() != hashFreeTx2);
+ BOOST_CHECK(pblocktemplate->block.vtx[i].GetHash() != hashLowFeeTx2);
+ }
+
+ // This tx will be mineable, and should cause hashLowFeeTx2 to be selected
+ // as well.
+ tx.vin[0].prevout.n = 1;
+ tx.vout[0].nValue = 100000000 - 10000; // 10k satoshi fee
+ mempool.addUnchecked(tx.GetHash(), entry.Fee(10000).FromTx(tx));
+ pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey);
+ BOOST_CHECK(pblocktemplate->block.vtx[8].GetHash() == hashLowFeeTx2);
+}
+
// NOTE: These tests rely on CreateNewBlock doing its own self-validation!
BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
{
@@ -89,7 +196,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
fCheckpointsEnabled = false;
// Simple block creation, nothing special yet:
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
// We can't make transactions until we have inputs
// Therefore, load 100 blocks :)
@@ -121,7 +228,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
delete pblocktemplate;
// Just to make sure we can still make simple blocks
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
delete pblocktemplate;
const CAmount BLOCKSUBSIDY = 50*COIN;
@@ -146,7 +253,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(spendsCoinbase).FromTx(tx));
tx.vin[0].prevout.hash = hash;
}
- BOOST_CHECK_THROW(CreateNewBlock(chainparams, scriptPubKey), std::runtime_error);
+ BOOST_CHECK_THROW(BlockAssembler(chainparams).CreateNewBlock(scriptPubKey), std::runtime_error);
mempool.clear();
tx.vin[0].prevout.hash = txFirst[0]->GetHash();
@@ -160,7 +267,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(spendsCoinbase).SigOps(20).FromTx(tx));
tx.vin[0].prevout.hash = hash;
}
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
delete pblocktemplate;
mempool.clear();
@@ -181,14 +288,14 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(spendsCoinbase).FromTx(tx));
tx.vin[0].prevout.hash = hash;
}
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
delete pblocktemplate;
mempool.clear();
// orphan in mempool, template creation fails
hash = tx.GetHash();
mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).FromTx(tx));
- BOOST_CHECK_THROW(CreateNewBlock(chainparams, scriptPubKey), std::runtime_error);
+ BOOST_CHECK_THROW(BlockAssembler(chainparams).CreateNewBlock(scriptPubKey), std::runtime_error);
mempool.clear();
// child with higher priority than parent
@@ -205,7 +312,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
tx.vout[0].nValue = tx.vout[0].nValue+BLOCKSUBSIDY-HIGHERFEE; //First txn output + fresh coinbase - new txn fee
hash = tx.GetHash();
mempool.addUnchecked(hash, entry.Fee(HIGHERFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx));
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
delete pblocktemplate;
mempool.clear();
@@ -217,7 +324,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
hash = tx.GetHash();
// give it a fee so it'll get mined
mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(false).FromTx(tx));
- BOOST_CHECK_THROW(CreateNewBlock(chainparams, scriptPubKey), std::runtime_error);
+ BOOST_CHECK_THROW(BlockAssembler(chainparams).CreateNewBlock(scriptPubKey), std::runtime_error);
mempool.clear();
// invalid (pre-p2sh) txn in mempool, template creation fails
@@ -234,7 +341,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
tx.vout[0].nValue -= LOWFEE;
hash = tx.GetHash();
mempool.addUnchecked(hash, entry.Fee(LOWFEE).Time(GetTime()).SpendsCoinbase(false).FromTx(tx));
- BOOST_CHECK_THROW(CreateNewBlock(chainparams, scriptPubKey), std::runtime_error);
+ BOOST_CHECK_THROW(BlockAssembler(chainparams).CreateNewBlock(scriptPubKey), std::runtime_error);
mempool.clear();
// double spend txn pair in mempool, template creation fails
@@ -247,7 +354,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
tx.vout[0].scriptPubKey = CScript() << OP_2;
hash = tx.GetHash();
mempool.addUnchecked(hash, entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx));
- BOOST_CHECK_THROW(CreateNewBlock(chainparams, scriptPubKey), std::runtime_error);
+ BOOST_CHECK_THROW(BlockAssembler(chainparams).CreateNewBlock(scriptPubKey), std::runtime_error);
mempool.clear();
// subsidy changing
@@ -263,7 +370,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
next->BuildSkip();
chainActive.SetTip(next);
}
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
delete pblocktemplate;
// Extend to a 210000-long block chain.
while (chainActive.Tip()->nHeight < 210000) {
@@ -276,7 +383,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
next->BuildSkip();
chainActive.SetTip(next);
}
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
delete pblocktemplate;
// Delete the dummy blocks again.
while (chainActive.Tip()->nHeight > nHeight) {
@@ -363,7 +470,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
tx.vin[0].nSequence = CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG | 1;
BOOST_CHECK(!TestSequenceLocks(tx, flags)); // Sequence locks fail
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
// None of the of the absolute height/time locked tx should have made
// it into the template because we still check IsFinalTx in CreateNewBlock,
@@ -377,7 +484,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
chainActive.Tip()->nHeight++;
SetMockTime(chainActive.Tip()->GetMedianTimePast() + 1);
- BOOST_CHECK(pblocktemplate = CreateNewBlock(chainparams, scriptPubKey));
+ BOOST_CHECK(pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey));
BOOST_CHECK_EQUAL(pblocktemplate->block.vtx.size(), 5);
delete pblocktemplate;
@@ -385,6 +492,8 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
SetMockTime(0);
mempool.clear();
+ TestPackageSelection(chainparams, scriptPubKey, txFirst);
+
BOOST_FOREACH(CTransaction *_tx, txFirst)
delete _tx;
diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp
index b38d61f330..d005d6a163 100644
--- a/src/test/net_tests.cpp
+++ b/src/test/net_tests.cpp
@@ -51,7 +51,7 @@ public:
int nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30);
s << nUBuckets;
- CAddress addr = CAddress(CService("252.1.1.1", 7777));
+ CAddress addr = CAddress(CService("252.1.1.1", 7777), NODE_NONE);
CAddrInfo info = CAddrInfo(addr, CNetAddr("252.2.2.2"));
s << info;
}
@@ -79,9 +79,9 @@ BOOST_AUTO_TEST_CASE(caddrdb_read)
CService addr3 = CService("250.7.3.3", 9999);
// Add three addresses to new table.
- addrmanUncorrupted.Add(CAddress(addr1), CService("252.5.1.1", 8333));
- addrmanUncorrupted.Add(CAddress(addr2), CService("252.5.1.1", 8333));
- addrmanUncorrupted.Add(CAddress(addr3), CService("252.5.1.1", 8333));
+ addrmanUncorrupted.Add(CAddress(addr1, NODE_NONE), CService("252.5.1.1", 8333));
+ addrmanUncorrupted.Add(CAddress(addr2, NODE_NONE), CService("252.5.1.1", 8333));
+ addrmanUncorrupted.Add(CAddress(addr3, NODE_NONE), CService("252.5.1.1", 8333));
// Test that the de-serialization does not throw an exception.
CDataStream ssPeers1 = AddrmanToStream(addrmanUncorrupted);
diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp
index 9bcb07626a..c68320ba8b 100644
--- a/src/test/test_bitcoin.cpp
+++ b/src/test/test_bitcoin.cpp
@@ -98,7 +98,7 @@ CBlock
TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransaction>& txns, const CScript& scriptPubKey)
{
const CChainParams& chainparams = Params();
- CBlockTemplate *pblocktemplate = CreateNewBlock(chainparams, scriptPubKey);
+ CBlockTemplate *pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey);
CBlock& block = pblocktemplate->block;
// Replace mempool-selected txns with just coinbase plus passed-in txns:
diff --git a/src/univalue/Makefile.am b/src/univalue/Makefile.am
index 34fe9e3f13..6c1ec81e63 100644
--- a/src/univalue/Makefile.am
+++ b/src/univalue/Makefile.am
@@ -3,7 +3,7 @@ ACLOCAL_AMFLAGS = -I build-aux/m4
.INTERMEDIATE: $(GENBIN)
include_HEADERS = include/univalue.h
-noinst_HEADERS = lib/univalue_escapes.h
+noinst_HEADERS = lib/univalue_escapes.h lib/univalue_utffilter.h
lib_LTLIBRARIES = libunivalue.la
@@ -73,6 +73,10 @@ TEST_FILES = \
$(TEST_DATA_DIR)/fail35.json \
$(TEST_DATA_DIR)/fail36.json \
$(TEST_DATA_DIR)/fail37.json \
+ $(TEST_DATA_DIR)/fail38.json \
+ $(TEST_DATA_DIR)/fail39.json \
+ $(TEST_DATA_DIR)/fail40.json \
+ $(TEST_DATA_DIR)/fail41.json \
$(TEST_DATA_DIR)/fail3.json \
$(TEST_DATA_DIR)/fail4.json \
$(TEST_DATA_DIR)/fail5.json \
@@ -83,6 +87,7 @@ TEST_FILES = \
$(TEST_DATA_DIR)/pass1.json \
$(TEST_DATA_DIR)/pass2.json \
$(TEST_DATA_DIR)/pass3.json \
- $(TEST_DATA_DIR)/round1.json
+ $(TEST_DATA_DIR)/round1.json \
+ $(TEST_DATA_DIR)/round2.json
EXTRA_DIST=$(TEST_FILES) $(GEN_SRCS)
diff --git a/src/univalue/configure.ac b/src/univalue/configure.ac
index 0515b632bd..93d3ba945d 100644
--- a/src/univalue/configure.ac
+++ b/src/univalue/configure.ac
@@ -1,7 +1,7 @@
m4_define([libunivalue_major_version], [1])
m4_define([libunivalue_minor_version], [1])
-m4_define([libunivalue_micro_version], [1])
-m4_define([libunivalue_interface_age], [1])
+m4_define([libunivalue_micro_version], [2])
+m4_define([libunivalue_interface_age], [2])
# If you need a modifier for the version number.
# Normally empty, but can be used to make "fixup" releases.
m4_define([libunivalue_extraversion], [])
@@ -14,7 +14,7 @@ m4_define([libunivalue_age], [m4_eval(libunivalue_binary_age - libunivalue_inter
m4_define([libunivalue_version], [libunivalue_major_version().libunivalue_minor_version().libunivalue_micro_version()libunivalue_extraversion()])
-AC_INIT([univalue], [1.0.1],
+AC_INIT([univalue], [1.0.2],
[http://github.com/jgarzik/univalue/])
dnl make the compilation flags quiet unless V=1 is used
diff --git a/src/univalue/lib/univalue_read.cpp b/src/univalue/lib/univalue_read.cpp
index c7516b9628..95bac6958d 100644
--- a/src/univalue/lib/univalue_read.cpp
+++ b/src/univalue/lib/univalue_read.cpp
@@ -6,6 +6,7 @@
#include <vector>
#include <stdio.h>
#include "univalue.h"
+#include "univalue_utffilter.h"
using namespace std;
@@ -174,41 +175,31 @@ enum jtokentype getJsonToken(string& tokenVal, unsigned int& consumed,
raw++; // skip "
string valStr;
+ JSONUTF8StringFilter writer(valStr);
while (*raw) {
- if (*raw < 0x20)
+ if ((unsigned char)*raw < 0x20)
return JTOK_ERR;
else if (*raw == '\\') {
raw++; // skip backslash
switch (*raw) {
- case '"': valStr += "\""; break;
- case '\\': valStr += "\\"; break;
- case '/': valStr += "/"; break;
- case 'b': valStr += "\b"; break;
- case 'f': valStr += "\f"; break;
- case 'n': valStr += "\n"; break;
- case 'r': valStr += "\r"; break;
- case 't': valStr += "\t"; break;
+ case '"': writer.push_back('\"'); break;
+ case '\\': writer.push_back('\\'); break;
+ case '/': writer.push_back('/'); break;
+ case 'b': writer.push_back('\b'); break;
+ case 'f': writer.push_back('\f'); break;
+ case 'n': writer.push_back('\n'); break;
+ case 'r': writer.push_back('\r'); break;
+ case 't': writer.push_back('\t'); break;
case 'u': {
unsigned int codepoint;
if (hatoui(raw + 1, raw + 1 + 4, codepoint) !=
raw + 1 + 4)
return JTOK_ERR;
-
- if (codepoint <= 0x7f)
- valStr.push_back((char)codepoint);
- else if (codepoint <= 0x7FF) {
- valStr.push_back((char)(0xC0 | (codepoint >> 6)));
- valStr.push_back((char)(0x80 | (codepoint & 0x3F)));
- } else if (codepoint <= 0xFFFF) {
- valStr.push_back((char)(0xE0 | (codepoint >> 12)));
- valStr.push_back((char)(0x80 | ((codepoint >> 6) & 0x3F)));
- valStr.push_back((char)(0x80 | (codepoint & 0x3F)));
- }
-
+ writer.push_back_u(codepoint);
raw += 4;
break;
}
@@ -226,11 +217,13 @@ enum jtokentype getJsonToken(string& tokenVal, unsigned int& consumed,
}
else {
- valStr += *raw;
+ writer.push_back(*raw);
raw++;
}
}
+ if (!writer.finalize())
+ return JTOK_ERR;
tokenVal = valStr;
consumed = (raw - rawStart);
return JTOK_STRING;
diff --git a/src/univalue/lib/univalue_utffilter.h b/src/univalue/lib/univalue_utffilter.h
new file mode 100644
index 0000000000..0e330dce9c
--- /dev/null
+++ b/src/univalue/lib/univalue_utffilter.h
@@ -0,0 +1,119 @@
+// Copyright 2016 Wladimir J. van der Laan
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#ifndef UNIVALUE_UTFFILTER_H
+#define UNIVALUE_UTFFILTER_H
+
+#include <string>
+
+/**
+ * Filter that generates and validates UTF-8, as well as collates UTF-16
+ * surrogate pairs as specified in RFC4627.
+ */
+class JSONUTF8StringFilter
+{
+public:
+ JSONUTF8StringFilter(std::string &s):
+ str(s), is_valid(true), codepoint(0), state(0), surpair(0)
+ {
+ }
+ // Write single 8-bit char (may be part of UTF-8 sequence)
+ void push_back(unsigned char ch)
+ {
+ if (state == 0) {
+ if (ch < 0x80) // 7-bit ASCII, fast direct pass-through
+ str.push_back(ch);
+ else if (ch < 0xc0) // Mid-sequence character, invalid in this state
+ is_valid = false;
+ else if (ch < 0xe0) { // Start of 2-byte sequence
+ codepoint = (ch & 0x1f) << 6;
+ state = 6;
+ } else if (ch < 0xf0) { // Start of 3-byte sequence
+ codepoint = (ch & 0x0f) << 12;
+ state = 12;
+ } else if (ch < 0xf8) { // Start of 4-byte sequence
+ codepoint = (ch & 0x07) << 18;
+ state = 18;
+ } else // Reserved, invalid
+ is_valid = false;
+ } else {
+ if ((ch & 0xc0) != 0x80) // Not a continuation, invalid
+ is_valid = false;
+ state -= 6;
+ codepoint |= (ch & 0x3f) << state;
+ if (state == 0)
+ push_back_u(codepoint);
+ }
+ }
+ // Write codepoint directly, possibly collating surrogate pairs
+ void push_back_u(unsigned int codepoint)
+ {
+ if (state) // Only accept full codepoints in open state
+ is_valid = false;
+ if (codepoint >= 0xD800 && codepoint < 0xDC00) { // First half of surrogate pair
+ if (surpair) // Two subsequent surrogate pair openers - fail
+ is_valid = false;
+ else
+ surpair = codepoint;
+ } else if (codepoint >= 0xDC00 && codepoint < 0xE000) { // Second half of surrogate pair
+ if (surpair) { // Open surrogate pair, expect second half
+ // Compute code point from UTF-16 surrogate pair
+ append_codepoint(0x10000 | ((surpair - 0xD800)<<10) | (codepoint - 0xDC00));
+ surpair = 0;
+ } else // Second half doesn't follow a first half - fail
+ is_valid = false;
+ } else {
+ if (surpair) // First half of surrogate pair not followed by second - fail
+ is_valid = false;
+ else
+ append_codepoint(codepoint);
+ }
+ }
+ // Check that we're in a state where the string can be ended
+ // No open sequences, no open surrogate pairs, etc
+ bool finalize()
+ {
+ if (state || surpair)
+ is_valid = false;
+ return is_valid;
+ }
+private:
+ std::string &str;
+ bool is_valid;
+ // Current UTF-8 decoding state
+ unsigned int codepoint;
+ int state; // Top bit to be filled in for next UTF-8 byte, or 0
+
+ // Keep track of the following state to handle the following section of
+ // RFC4627:
+ //
+ // To escape an extended character that is not in the Basic Multilingual
+ // Plane, the character is represented as a twelve-character sequence,
+ // encoding the UTF-16 surrogate pair. So, for example, a string
+ // containing only the G clef character (U+1D11E) may be represented as
+ // "\uD834\uDD1E".
+ //
+ // Two subsequent \u.... may have to be replaced with one actual codepoint.
+ unsigned int surpair; // First half of open UTF-16 surrogate pair, or 0
+
+ void append_codepoint(unsigned int codepoint)
+ {
+ if (codepoint <= 0x7f)
+ str.push_back((char)codepoint);
+ else if (codepoint <= 0x7FF) {
+ str.push_back((char)(0xC0 | (codepoint >> 6)));
+ str.push_back((char)(0x80 | (codepoint & 0x3F)));
+ } else if (codepoint <= 0xFFFF) {
+ str.push_back((char)(0xE0 | (codepoint >> 12)));
+ str.push_back((char)(0x80 | ((codepoint >> 6) & 0x3F)));
+ str.push_back((char)(0x80 | (codepoint & 0x3F)));
+ } else if (codepoint <= 0x1FFFFF) {
+ str.push_back((char)(0xF0 | (codepoint >> 18)));
+ str.push_back((char)(0x80 | ((codepoint >> 12) & 0x3F)));
+ str.push_back((char)(0x80 | ((codepoint >> 6) & 0x3F)));
+ str.push_back((char)(0x80 | (codepoint & 0x3F)));
+ }
+ }
+};
+
+#endif
diff --git a/src/univalue/lib/univalue_write.cpp b/src/univalue/lib/univalue_write.cpp
index ceb4cc9166..cfbdad3284 100644
--- a/src/univalue/lib/univalue_write.cpp
+++ b/src/univalue/lib/univalue_write.cpp
@@ -8,8 +8,6 @@
#include "univalue.h"
#include "univalue_escapes.h"
-// TODO: Using UTF8
-
using namespace std;
static string json_escape(const string& inS)
@@ -23,15 +21,8 @@ static string json_escape(const string& inS)
if (escStr)
outS += escStr;
-
- else if (ch < 0x80)
+ else
outS += ch;
-
- else { // TODO handle UTF-8 properly
- char tmpesc[16];
- sprintf(tmpesc, "\\u%04x", ch);
- outS += tmpesc;
- }
}
return outS;
diff --git a/src/univalue/test/fail38.json b/src/univalue/test/fail38.json
new file mode 100644
index 0000000000..b245e2e46c
--- /dev/null
+++ b/src/univalue/test/fail38.json
@@ -0,0 +1 @@
+["\ud834"]
diff --git a/src/univalue/test/fail39.json b/src/univalue/test/fail39.json
new file mode 100644
index 0000000000..7c9e263f27
--- /dev/null
+++ b/src/univalue/test/fail39.json
@@ -0,0 +1 @@
+["\udd61"]
diff --git a/src/univalue/test/fail40.json b/src/univalue/test/fail40.json
new file mode 100644
index 0000000000..664dc9e245
--- /dev/null
+++ b/src/univalue/test/fail40.json
@@ -0,0 +1 @@
+["…ก"] \ No newline at end of file
diff --git a/src/univalue/test/fail41.json b/src/univalue/test/fail41.json
new file mode 100644
index 0000000000..0de342a2b5
--- /dev/null
+++ b/src/univalue/test/fail41.json
@@ -0,0 +1 @@
+["๐…"] \ No newline at end of file
diff --git a/src/univalue/test/round2.json b/src/univalue/test/round2.json
new file mode 100644
index 0000000000..b766cccc68
--- /dev/null
+++ b/src/univalue/test/round2.json
@@ -0,0 +1 @@
+["aยงโ– ๐Ž’๐…ก"]
diff --git a/src/univalue/test/unitester.cpp b/src/univalue/test/unitester.cpp
index 5a052fe92c..05f3842cd1 100644
--- a/src/univalue/test/unitester.cpp
+++ b/src/univalue/test/unitester.cpp
@@ -22,6 +22,7 @@ string srcdir(JSON_TEST_SRC);
static bool test_failed = false;
#define d_assert(expr) { if (!(expr)) { test_failed = true; fprintf(stderr, "%s failed\n", filename.c_str()); } }
+#define f_assert(expr) { if (!(expr)) { test_failed = true; fprintf(stderr, "%s failed\n", __func__); } }
static std::string rtrim(std::string s)
{
@@ -108,6 +109,10 @@ static const char *filenames[] = {
"fail35.json",
"fail36.json",
"fail37.json",
+ "fail38.json", // invalid unicode: only first half of surrogate pair
+ "fail39.json", // invalid unicode: only second half of surrogate pair
+ "fail40.json", // invalid unicode: broken UTF-8
+ "fail41.json", // invalid unicode: unfinished UTF-8
"fail3.json",
"fail4.json", // extra comma
"fail5.json",
@@ -119,14 +124,40 @@ static const char *filenames[] = {
"pass2.json",
"pass3.json",
"round1.json", // round-trip test
+ "round2.json", // unicode
};
+// Test \u handling
+void unescape_unicode_test()
+{
+ UniValue val;
+ bool testResult;
+ // Escaped ASCII (quote)
+ testResult = val.read("[\"\\u0022\"]");
+ f_assert(testResult);
+ f_assert(val[0].get_str() == "\"");
+ // Escaped Basic Plane character, two-byte UTF-8
+ testResult = val.read("[\"\\u0191\"]");
+ f_assert(testResult);
+ f_assert(val[0].get_str() == "\xc6\x91");
+ // Escaped Basic Plane character, three-byte UTF-8
+ testResult = val.read("[\"\\u2191\"]");
+ f_assert(testResult);
+ f_assert(val[0].get_str() == "\xe2\x86\x91");
+ // Escaped Supplementary Plane character U+1d161
+ testResult = val.read("[\"\\ud834\\udd61\"]");
+ f_assert(testResult);
+ f_assert(val[0].get_str() == "\xf0\x9d\x85\xa1");
+}
+
int main (int argc, char *argv[])
{
for (unsigned int fidx = 0; fidx < ARRAY_SIZE(filenames); fidx++) {
runtest_file(filenames[fidx]);
}
+ unescape_unicode_test();
+
return test_failed ? 1 : 0;
}
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index 14c2e31d95..d55cc68dc0 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -167,6 +167,11 @@ void ImportScript(const CScript& script, const string& strLabel, bool isRedeemSc
if (!pwalletMain->HaveCScript(script) && !pwalletMain->AddCScript(script))
throw JSONRPCError(RPC_WALLET_ERROR, "Error adding p2sh redeemScript to wallet");
ImportAddress(CBitcoinAddress(CScriptID(script)), strLabel);
+ } else {
+ CTxDestination destination;
+ if (ExtractDestination(script, destination)) {
+ pwalletMain->SetAddressBook(destination, strLabel, "receive");
+ }
}
}
@@ -195,6 +200,8 @@ UniValue importaddress(const UniValue& params, bool fHelp)
"4. p2sh (boolean, optional, default=false) Add the P2SH version of the script as well\n"
"\nNote: This call can take minutes to complete if rescan is true.\n"
"If you have the full public key, you should call importpubkey instead of this.\n"
+ "\nNote: If you import a non-standard raw script in hex form, outputs sending to it will be treated\n"
+ "as change, and not show up in many RPCs.\n"
"\nExamples:\n"
"\nImport a script with rescan\n"
+ HelpExampleCli("importaddress", "\"myscript\"") +
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 387b223589..0a4f06ba88 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -27,7 +27,7 @@ typedef set<pair<const CWalletTx*,unsigned int> > CoinSet;
BOOST_FIXTURE_TEST_SUITE(wallet_tests, WalletTestingSetup)
-static CWallet wallet;
+static const CWallet wallet;
static vector<COutput> vCoins;
static void add_coin(const CAmount& nValue, int nAge = 6*24, bool fIsFromMe = false, int nInput=0)
@@ -188,11 +188,11 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
// empty the wallet and start again, now with fractions of a cent, to test small change avoidance
empty_wallet();
- add_coin(0.1*MIN_CHANGE);
- add_coin(0.2*MIN_CHANGE);
- add_coin(0.3*MIN_CHANGE);
- add_coin(0.4*MIN_CHANGE);
- add_coin(0.5*MIN_CHANGE);
+ add_coin(MIN_CHANGE * 1 / 10);
+ add_coin(MIN_CHANGE * 2 / 10);
+ add_coin(MIN_CHANGE * 3 / 10);
+ add_coin(MIN_CHANGE * 4 / 10);
+ add_coin(MIN_CHANGE * 5 / 10);
// try making 1 * MIN_CHANGE from the 1.5 * MIN_CHANGE
// we'll get change smaller than MIN_CHANGE whatever happens, so can expect MIN_CHANGE exactly
@@ -207,8 +207,8 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
BOOST_CHECK_EQUAL(nValueRet, 1 * MIN_CHANGE); // we should get the exact amount
// if we add more small coins:
- add_coin(0.6*MIN_CHANGE);
- add_coin(0.7*MIN_CHANGE);
+ add_coin(MIN_CHANGE * 6 / 10);
+ add_coin(MIN_CHANGE * 7 / 10);
// and try again to make 1.0 * MIN_CHANGE
BOOST_CHECK( wallet.SelectCoinsMinConf(1 * MIN_CHANGE, 1, 1, vCoins, setCoinsRet, nValueRet));
@@ -229,9 +229,9 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
// sometimes it will fail, and so we use the next biggest coin:
empty_wallet();
- add_coin(0.5 * MIN_CHANGE);
- add_coin(0.6 * MIN_CHANGE);
- add_coin(0.7 * MIN_CHANGE);
+ add_coin(MIN_CHANGE * 5 / 10);
+ add_coin(MIN_CHANGE * 6 / 10);
+ add_coin(MIN_CHANGE * 7 / 10);
add_coin(1111 * MIN_CHANGE);
BOOST_CHECK( wallet.SelectCoinsMinConf(1 * MIN_CHANGE, 1, 1, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 1111 * MIN_CHANGE); // we get the bigger coin
@@ -239,9 +239,9 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
// but sometimes it's possible, and we use an exact subset (0.4 + 0.6 = 1.0)
empty_wallet();
- add_coin(0.4 * MIN_CHANGE);
- add_coin(0.6 * MIN_CHANGE);
- add_coin(0.8 * MIN_CHANGE);
+ add_coin(MIN_CHANGE * 4 / 10);
+ add_coin(MIN_CHANGE * 6 / 10);
+ add_coin(MIN_CHANGE * 8 / 10);
add_coin(1111 * MIN_CHANGE);
BOOST_CHECK( wallet.SelectCoinsMinConf(MIN_CHANGE, 1, 1, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, MIN_CHANGE); // we should get the exact amount
@@ -249,17 +249,17 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
// test avoiding small change
empty_wallet();
- add_coin(0.05 * MIN_CHANGE);
- add_coin(1 * MIN_CHANGE);
- add_coin(100 * MIN_CHANGE);
+ add_coin(MIN_CHANGE * 5 / 100);
+ add_coin(MIN_CHANGE * 1);
+ add_coin(MIN_CHANGE * 100);
// trying to make 100.01 from these three coins
- BOOST_CHECK( wallet.SelectCoinsMinConf(100.01 * MIN_CHANGE, 1, 1, vCoins, setCoinsRet, nValueRet));
- BOOST_CHECK_EQUAL(nValueRet, 101.05 * MIN_CHANGE); // we should get all coins
+ BOOST_CHECK(wallet.SelectCoinsMinConf(MIN_CHANGE * 10001 / 100, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK_EQUAL(nValueRet, MIN_CHANGE * 10105 / 100); // we should get all coins
BOOST_CHECK_EQUAL(setCoinsRet.size(), 3U);
// but if we try to make 99.9, we should take the bigger of the two small coins to avoid small change
- BOOST_CHECK( wallet.SelectCoinsMinConf(99.9 * MIN_CHANGE, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK(wallet.SelectCoinsMinConf(MIN_CHANGE * 9990 / 100, 1, 1, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 101 * MIN_CHANGE);
BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U);
@@ -310,7 +310,11 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
// add 75 cents in small change. not enough to make 90 cents,
// then try making 90 cents. there are multiple competing "smallest bigger" coins,
// one of which should be picked at random
- add_coin( 5*CENT); add_coin(10*CENT); add_coin(15*CENT); add_coin(20*CENT); add_coin(25*CENT);
+ add_coin(5 * CENT);
+ add_coin(10 * CENT);
+ add_coin(15 * CENT);
+ add_coin(20 * CENT);
+ add_coin(25 * CENT);
fails = 0;
for (int i = 0; i < RANDOM_REPEATS; i++)
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 9faf21591f..723b2eceff 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -42,6 +42,7 @@ bool bSpendZeroConfChange = DEFAULT_SPEND_ZEROCONF_CHANGE;
bool fSendFreeTransactions = DEFAULT_SEND_FREE_TRANSACTIONS;
const char * DEFAULT_WALLET_DAT = "wallet.dat";
+const uint32_t BIP32_HARDENED_KEY_LIMIT = 0x80000000;
/**
* Fees smaller than this (in satoshi) are considered zero fee (for transaction creation)
@@ -91,7 +92,51 @@ CPubKey CWallet::GenerateNewKey()
bool fCompressed = CanSupportFeature(FEATURE_COMPRPUBKEY); // default to compressed public keys if we want 0.6.0 wallets
CKey secret;
- secret.MakeNewKey(fCompressed);
+
+ // Create new metadata
+ int64_t nCreationTime = GetTime();
+ CKeyMetadata metadata(nCreationTime);
+
+ // use HD key derivation if HD was enabled during wallet creation
+ if (!hdChain.masterKeyID.IsNull()) {
+ // for now we use a fixed keypath scheme of m/0'/0'/k
+ CKey key; //master key seed (256bit)
+ CExtKey masterKey; //hd master key
+ CExtKey accountKey; //key at m/0'
+ CExtKey externalChainChildKey; //key at m/0'/0'
+ CExtKey childKey; //key at m/0'/0'/<n>'
+
+ // try to get the master key
+ if (!GetKey(hdChain.masterKeyID, key))
+ throw std::runtime_error("CWallet::GenerateNewKey(): Master key not found");
+
+ masterKey.SetMaster(key.begin(), key.size());
+
+ // derive m/0'
+ // use hardened derivation (child keys >= 0x80000000 are hardened after bip32)
+ masterKey.Derive(accountKey, BIP32_HARDENED_KEY_LIMIT);
+
+ // derive m/0'/0'
+ accountKey.Derive(externalChainChildKey, BIP32_HARDENED_KEY_LIMIT);
+
+ // derive child key at next index, skip keys already known to the wallet
+ do
+ {
+ // always derive hardened keys
+ // childIndex | BIP32_HARDENED_KEY_LIMIT = derive childIndex in hardened child-index-range
+ // example: 1 | BIP32_HARDENED_KEY_LIMIT == 0x80000001 == 2147483649
+ externalChainChildKey.Derive(childKey, hdChain.nExternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
+ // increment childkey index
+ hdChain.nExternalChainCounter++;
+ } while(HaveKey(childKey.key.GetPubKey().GetID()));
+ secret = childKey.key;
+
+ // update the chain model in the database
+ if (!CWalletDB(strWalletFile).WriteHDChain(hdChain))
+ throw std::runtime_error("CWallet::GenerateNewKey(): Writing HD chain model failed");
+ } else {
+ secret.MakeNewKey(fCompressed);
+ }
// Compressed public keys were introduced in version 0.6.0
if (fCompressed)
@@ -100,9 +145,7 @@ CPubKey CWallet::GenerateNewKey()
CPubKey pubkey = secret.GetPubKey();
assert(secret.VerifyPubKey(pubkey));
- // Create new metadata
- int64_t nCreationTime = GetTime();
- mapKeyMetadata[pubkey.GetID()] = CKeyMetadata(nCreationTime);
+ mapKeyMetadata[pubkey.GetID()] = metadata;
if (!nTimeFirstKey || nCreationTime < nTimeFirstKey)
nTimeFirstKey = nCreationTime;
@@ -1121,6 +1164,37 @@ CAmount CWallet::GetChange(const CTransaction& tx) const
return nChange;
}
+bool CWallet::SetHDMasterKey(const CKey& key)
+{
+ LOCK(cs_wallet);
+
+ // store the key as normal "key"/"ckey" object
+ // in the database
+ // key metadata is not required
+ CPubKey pubkey = key.GetPubKey();
+ if (!AddKeyPubKey(key, pubkey))
+ throw std::runtime_error("CWallet::GenerateNewKey(): AddKey failed");
+
+ // store the keyid (hash160) together with
+ // the child index counter in the database
+ // as a hdchain object
+ CHDChain newHdChain;
+ newHdChain.masterKeyID = pubkey.GetID();
+ SetHDChain(newHdChain, false);
+
+ return true;
+}
+
+bool CWallet::SetHDChain(const CHDChain& chain, bool memonly)
+{
+ LOCK(cs_wallet);
+ if (!memonly && !CWalletDB(strWalletFile).WriteHDChain(chain))
+ throw runtime_error("AddHDChain(): writing chain failed");
+
+ hdChain = chain;
+ return true;
+}
+
int64_t CWalletTx::GetTxTime() const
{
int64_t n = nTimeSmart;
@@ -3135,6 +3209,7 @@ std::string CWallet::GetWalletHelpString(bool showDebug)
strUsage += HelpMessageOpt("-sendfreetransactions", strprintf(_("Send transactions as zero-fee transactions if possible (default: %u)"), DEFAULT_SEND_FREE_TRANSACTIONS));
strUsage += HelpMessageOpt("-spendzeroconfchange", strprintf(_("Spend unconfirmed change when sending transactions (default: %u)"), DEFAULT_SPEND_ZEROCONF_CHANGE));
strUsage += HelpMessageOpt("-txconfirmtarget=<n>", strprintf(_("If paytxfee is not set, include enough fee so transactions begin confirmation on average within n blocks (default: %u)"), DEFAULT_TX_CONFIRM_TARGET));
+ strUsage += HelpMessageOpt("-usehd", _("Use hierarchical deterministic key generation (HD) after bip32. Only has effect during wallet creation/first start") + " " + strprintf(_("(default: %u)"), DEFAULT_USE_HD_WALLET));
strUsage += HelpMessageOpt("-upgradewallet", _("Upgrade wallet to latest format on startup"));
strUsage += HelpMessageOpt("-wallet=<file>", _("Specify wallet file (within data directory)") + " " + strprintf(_("(default: %s)"), DEFAULT_WALLET_DAT));
strUsage += HelpMessageOpt("-walletbroadcast", _("Make the wallet broadcast transactions") + " " + strprintf(_("(default: %u)"), DEFAULT_WALLETBROADCAST));
@@ -3222,6 +3297,13 @@ bool CWallet::InitLoadWallet()
if (fFirstRun)
{
// Create new keyUser and set as default key
+ if (GetBoolArg("-usehd", DEFAULT_USE_HD_WALLET)) {
+ // generate a new master key
+ CKey key;
+ key.MakeNewKey(true);
+ if (!walletInstance->SetHDMasterKey(key))
+ throw std::runtime_error("CWallet::GenerateNewKey(): Storing master key failed");
+ }
CPubKey newDefaultKey;
if (walletInstance->GetKeyFromPool(newDefaultKey)) {
walletInstance->SetDefaultKey(newDefaultKey);
@@ -3231,6 +3313,13 @@ bool CWallet::InitLoadWallet()
walletInstance->SetBestChain(chainActive.GetLocator());
}
+ else if (mapArgs.count("-usehd")) {
+ bool useHD = GetBoolArg("-usehd", DEFAULT_USE_HD_WALLET);
+ if (!walletInstance->hdChain.masterKeyID.IsNull() && !useHD)
+ return InitError(strprintf(_("Error loading %s: You can't disable HD on a already existing HD wallet"), walletFile));
+ if (walletInstance->hdChain.masterKeyID.IsNull() && useHD)
+ return InitError(strprintf(_("Error loading %s: You can't enable HD on a already existing non-HD wallet"), walletFile));
+ }
LogPrintf(" wallet %15dms\n", GetTimeMillis() - nStart);
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index 683c901444..7fc6ce5de5 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -57,6 +57,9 @@ static const unsigned int DEFAULT_TX_CONFIRM_TARGET = 2;
static const unsigned int MAX_FREE_TRANSACTION_CREATE_SIZE = 1000;
static const bool DEFAULT_WALLETBROADCAST = true;
+//! if set, all keys will be derived by using BIP32
+static const bool DEFAULT_USE_HD_WALLET = true;
+
extern const char * DEFAULT_WALLET_DAT;
class CBlockIndex;
@@ -574,6 +577,9 @@ private:
void SyncMetaData(std::pair<TxSpends::iterator, TxSpends::iterator>);
+ /* the hd chain data model (external chain counters) */
+ CHDChain hdChain;
+
public:
/*
* Main wallet lock.
@@ -889,6 +895,12 @@ public:
static bool ParameterInteraction();
bool BackupWallet(const std::string& strDest);
+
+ /* Set the hd chain model (chain child index counters) */
+ bool SetHDChain(const CHDChain& chain, bool memonly);
+
+ /* Set the current hd master key (will reset the chain child index counters) */
+ bool SetHDMasterKey(const CKey& key);
};
/** A key allocated from the key pool. */
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index b5037c9a65..7bfd490950 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -599,6 +599,16 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
return false;
}
}
+ else if (strType == "hdchain")
+ {
+ CHDChain chain;
+ ssValue >> chain;
+ if (!pwallet->SetHDChain(chain, true))
+ {
+ strErr = "Error reading wallet database: SetHDChain failed";
+ return false;
+ }
+ }
} catch (...)
{
return false;
@@ -1003,3 +1013,10 @@ bool CWalletDB::EraseDestData(const std::string &address, const std::string &key
nWalletDBUpdated++;
return Erase(std::make_pair(std::string("destdata"), std::make_pair(address, key)));
}
+
+
+bool CWalletDB::WriteHDChain(const CHDChain& chain)
+{
+ nWalletDBUpdated++;
+ return Write(std::string("hdchain"), chain);
+}
diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h
index 00c10ea70f..71b0ff26db 100644
--- a/src/wallet/walletdb.h
+++ b/src/wallet/walletdb.h
@@ -40,6 +40,35 @@ enum DBErrors
DB_NEED_REWRITE
};
+/* simple hd chain data model */
+class CHDChain
+{
+public:
+ uint32_t nExternalChainCounter;
+ CKeyID masterKeyID; //!< master key hash160
+
+ static const int CURRENT_VERSION = 1;
+ int nVersion;
+
+ CHDChain() { SetNull(); }
+ ADD_SERIALIZE_METHODS;
+ template <typename Stream, typename Operation>
+ inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion)
+ {
+ READWRITE(this->nVersion);
+ nVersion = this->nVersion;
+ READWRITE(nExternalChainCounter);
+ READWRITE(masterKeyID);
+ }
+
+ void SetNull()
+ {
+ nVersion = CHDChain::CURRENT_VERSION;
+ nExternalChainCounter = 0;
+ masterKeyID.SetNull();
+ }
+};
+
class CKeyMetadata
{
public:
@@ -134,6 +163,9 @@ public:
static bool Recover(CDBEnv& dbenv, const std::string& filename, bool fOnlyKeys);
static bool Recover(CDBEnv& dbenv, const std::string& filename);
+ //! write the hdchain model (external chain child index counter)
+ bool WriteHDChain(const CHDChain& chain);
+
private:
CWalletDB(const CWalletDB&);
void operator=(const CWalletDB&);