aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--configure.ac39
-rw-r--r--doc/Doxyfile2
-rw-r--r--doc/build-unix.md8
-rw-r--r--doc/build-windows.md66
-rw-r--r--doc/developer-notes.md31
-rw-r--r--doc/release-notes.md6
-rwxr-xr-xqa/rpc-tests/maxuploadtarget.py38
-rwxr-xr-xqa/rpc-tests/multi_rpc.py2
-rwxr-xr-xqa/rpc-tests/p2p-compactblocks.py16
-rwxr-xr-xqa/rpc-tests/p2p-fullblocktest.py26
-rwxr-xr-xqa/rpc-tests/p2p-segwit.py98
-rwxr-xr-xqa/rpc-tests/prioritise_transaction.py6
-rwxr-xr-xqa/rpc-tests/proxy_test.py16
-rwxr-xr-xqa/rpc-tests/pruning.py45
-rwxr-xr-xqa/rpc-tests/rawtransactions.py39
-rwxr-xr-xqa/rpc-tests/sendheaders.py11
-rwxr-xr-xqa/rpc-tests/test_framework/comptool.py19
-rwxr-xr-xqa/rpc-tests/test_framework/mininode.py2
-rwxr-xr-xqa/rpc-tests/test_framework/test_framework.py11
-rw-r--r--qa/rpc-tests/test_framework/util.py46
-rwxr-xr-xqa/rpc-tests/wallet.py2
-rw-r--r--src/Makefile.am7
-rw-r--r--src/Makefile.bench.include4
-rw-r--r--src/addrdb.cpp2
-rw-r--r--src/addrman.h53
-rw-r--r--src/bench/Examples.cpp2
-rw-r--r--src/bench/base58.cpp2
-rw-r--r--src/bench/bench.cpp29
-rw-r--r--src/bench/bench.h10
-rw-r--r--src/bench/bench_bitcoin.cpp2
-rw-r--r--src/bench/checkblock.cpp3
-rw-r--r--src/bench/coin_selection.cpp2
-rw-r--r--src/bench/perf.cpp53
-rw-r--r--src/bench/perf.h37
-rw-r--r--src/bitcoin-cli.cpp32
-rw-r--r--src/bitcoin-tx.cpp196
-rw-r--r--src/bitcoind.cpp26
-rw-r--r--src/blockencodings.cpp18
-rw-r--r--src/blockencodings.h12
-rw-r--r--src/chainparams.cpp2
-rw-r--r--src/checkpoints.cpp2
-rw-r--r--src/compat.h1
-rw-r--r--src/consensus/merkle.cpp6
-rw-r--r--src/core_io.h23
-rw-r--r--src/core_memusage.h4
-rw-r--r--src/core_read.cpp2
-rw-r--r--src/crypto/ctaes/test.c2
-rw-r--r--src/init.cpp114
-rw-r--r--src/init.h25
-rw-r--r--src/leveldb/.travis.yml13
-rw-r--r--src/leveldb/Makefile467
-rw-r--r--src/leveldb/README51
-rw-r--r--src/leveldb/README.md39
-rwxr-xr-xsrc/leveldb/build_detect_platform2
-rw-r--r--src/leveldb/db/corruption_test.cc2
-rw-r--r--src/leveldb/db/db_bench.cc24
-rw-r--r--src/leveldb/db/db_impl.cc194
-rw-r--r--src/leveldb/db/db_impl.h8
-rw-r--r--src/leveldb/db/db_test.cc32
-rw-r--r--src/leveldb/db/fault_injection_test.cc554
-rw-r--r--src/leveldb/db/leveldbutil.cc (renamed from src/leveldb/db/leveldb_main.cc)0
-rw-r--r--src/leveldb/db/log_reader.cc22
-rw-r--r--src/leveldb/db/log_reader.h5
-rw-r--r--src/leveldb/db/log_test.cc101
-rw-r--r--src/leveldb/db/log_writer.cc17
-rw-r--r--src/leveldb/db/log_writer.h6
-rw-r--r--src/leveldb/db/memtable.h5
-rw-r--r--src/leveldb/db/recovery_test.cc324
-rw-r--r--src/leveldb/db/skiplist.h8
-rw-r--r--src/leveldb/db/skiplist_test.cc2
-rw-r--r--src/leveldb/db/snapshot.h1
-rw-r--r--src/leveldb/db/version_set.cc40
-rw-r--r--src/leveldb/db/version_set.h4
-rw-r--r--src/leveldb/db/write_batch_internal.h1
-rw-r--r--src/leveldb/doc/index.html2
-rw-r--r--src/leveldb/helpers/memenv/memenv.cc13
-rw-r--r--src/leveldb/helpers/memenv/memenv_test.cc13
-rw-r--r--src/leveldb/include/leveldb/cache.h11
-rw-r--r--src/leveldb/include/leveldb/db.h4
-rw-r--r--src/leveldb/include/leveldb/env.h18
-rw-r--r--src/leveldb/include/leveldb/iterator.h2
-rw-r--r--src/leveldb/include/leveldb/options.h6
-rw-r--r--src/leveldb/include/leveldb/status.h6
-rw-r--r--src/leveldb/port/atomic_pointer.h19
-rw-r--r--src/leveldb/port/port_posix.cc1
-rw-r--r--src/leveldb/table/filter_block.cc4
-rw-r--r--src/leveldb/table/format.cc3
-rw-r--r--src/leveldb/table/iterator_wrapper.h3
-rw-r--r--src/leveldb/table/table.cc2
-rw-r--r--src/leveldb/table/table_test.cc20
-rw-r--r--src/leveldb/util/arena.cc6
-rw-r--r--src/leveldb/util/arena.h10
-rw-r--r--src/leveldb/util/bloom.cc2
-rw-r--r--src/leveldb/util/bloom_test.cc3
-rw-r--r--src/leveldb/util/cache.cc136
-rw-r--r--src/leveldb/util/cache_test.cc42
-rw-r--r--src/leveldb/util/env.cc4
-rw-r--r--src/leveldb/util/env_posix.cc13
-rw-r--r--src/leveldb/util/env_win.cc37
-rw-r--r--src/leveldb/util/options.cc2
-rw-r--r--src/leveldb/util/testutil.h10
-rw-r--r--src/merkleblock.cpp6
-rw-r--r--src/miner.cpp18
-rw-r--r--src/net.cpp75
-rw-r--r--src/net.h54
-rw-r--r--src/net_processing.cpp3026
-rw-r--r--src/net_processing.h51
-rw-r--r--src/netbase.cpp48
-rw-r--r--src/netmessagemaker.h36
-rw-r--r--src/policy/fees.cpp7
-rw-r--r--src/policy/policy.cpp4
-rw-r--r--src/primitives/block.cpp2
-rw-r--r--src/primitives/block.h2
-rw-r--r--src/primitives/transaction.cpp23
-rw-r--r--src/primitives/transaction.h165
-rw-r--r--src/protocol.h2
-rw-r--r--src/qt/addressbookpage.cpp2
-rw-r--r--src/qt/bantablemodel.cpp7
-rw-r--r--src/qt/bantablemodel.h3
-rw-r--r--src/qt/bitcoin.cpp31
-rw-r--r--src/qt/bitcoingui.cpp51
-rw-r--r--src/qt/bitcoingui.h19
-rw-r--r--src/qt/clientmodel.cpp1
-rw-r--r--src/qt/coincontroldialog.cpp61
-rw-r--r--src/qt/coincontroldialog.h38
-rw-r--r--src/qt/forms/sendcoinsdialog.ui2
-rw-r--r--src/qt/guiutil.cpp12
-rw-r--r--src/qt/guiutil.h16
-rw-r--r--src/qt/optionsdialog.cpp2
-rw-r--r--src/qt/optionsmodel.cpp2
-rw-r--r--src/qt/optionsmodel.h2
-rw-r--r--src/qt/overviewpage.cpp11
-rw-r--r--src/qt/overviewpage.h3
-rw-r--r--src/qt/paymentserver.cpp45
-rw-r--r--src/qt/paymentserver.h5
-rw-r--r--src/qt/peertablemodel.cpp10
-rw-r--r--src/qt/peertablemodel.h5
-rw-r--r--src/qt/receivecoinsdialog.cpp5
-rw-r--r--src/qt/receiverequestdialog.cpp2
-rw-r--r--src/qt/recentrequeststablemodel.cpp2
-rw-r--r--src/qt/rpcconsole.cpp28
-rw-r--r--src/qt/rpcconsole.h2
-rw-r--r--src/qt/sendcoinsdialog.cpp10
-rw-r--r--src/qt/signverifymessagedialog.cpp2
-rw-r--r--src/qt/splashscreen.cpp1
-rw-r--r--src/qt/test/rpcnestedtests.cpp2
-rw-r--r--src/qt/transactiondesc.cpp32
-rw-r--r--src/qt/transactionrecord.cpp20
-rw-r--r--src/qt/transactiontablemodel.cpp2
-rw-r--r--src/qt/transactionview.cpp6
-rw-r--r--src/qt/utilitydialog.cpp8
-rw-r--r--src/qt/utilitydialog.h2
-rw-r--r--src/qt/walletmodel.cpp16
-rw-r--r--src/qt/walletmodeltransaction.cpp4
-rw-r--r--src/rest.cpp6
-rw-r--r--src/rpc/blockchain.cpp32
-rw-r--r--src/rpc/mining.cpp35
-rw-r--r--src/rpc/misc.cpp2
-rw-r--r--src/rpc/net.cpp3
-rw-r--r--src/rpc/rawtransaction.cpp57
-rw-r--r--src/scheduler.cpp7
-rw-r--r--src/script/bitcoinconsensus.cpp3
-rw-r--r--src/serialize.h59
-rw-r--r--src/streams.h69
-rw-r--r--src/support/lockedpool.h2
-rw-r--r--src/test/DoS_tests.cpp3
-rwxr-xr-xsrc/test/bitcoin-util-test.py2
-rw-r--r--src/test/blockencodings_tests.cpp63
-rw-r--r--src/test/bloom_tests.cpp26
-rw-r--r--src/test/coins_tests.cpp2
-rw-r--r--src/test/data/bitcoin-util-test.json10
-rw-r--r--src/test/data/tx_valid.json18
-rw-r--r--src/test/limitedmap_tests.cpp2
-rw-r--r--src/test/main_tests.cpp3
-rw-r--r--src/test/mempool_tests.cpp8
-rw-r--r--src/test/merkle_tests.cpp8
-rw-r--r--src/test/miner_tests.cpp41
-rw-r--r--src/test/pmt_tests.cpp4
-rw-r--r--src/test/policyestimator_tests.cpp36
-rw-r--r--src/test/script_P2SH_tests.cpp2
-rw-r--r--src/test/script_tests.cpp12
-rw-r--r--src/test/serialize_tests.cpp6
-rw-r--r--src/test/sighash_tests.cpp8
-rw-r--r--src/test/sigopcount_tests.cpp2
-rw-r--r--src/test/streams_tests.cpp58
-rw-r--r--src/test/test_bitcoin.cpp15
-rw-r--r--src/test/test_bitcoin.h4
-rw-r--r--src/test/transaction_tests.cpp52
-rw-r--r--src/test/txvalidationcache_tests.cpp2
-rw-r--r--src/test/versionbits_tests.cpp2
-rw-r--r--src/torcontrol.cpp2
-rw-r--r--src/txmempool.cpp28
-rw-r--r--src/txmempool.h18
-rw-r--r--src/util.cpp5
-rw-r--r--src/util.h1
-rw-r--r--src/utiltime.cpp3
-rw-r--r--src/validation.cpp (renamed from src/main.cpp)3150
-rw-r--r--src/validation.h (renamed from src/main.h)88
-rw-r--r--src/wallet/rpcdump.cpp8
-rw-r--r--src/wallet/rpcwallet.cpp29
-rw-r--r--src/wallet/test/accounting_tests.cpp4
-rw-r--r--src/wallet/test/wallet_tests.cpp4
-rw-r--r--src/wallet/wallet.cpp134
-rw-r--r--src/wallet/wallet.h33
-rw-r--r--src/wallet/walletdb.cpp2
-rw-r--r--src/zmq/zmqnotificationinterface.cpp2
-rw-r--r--src/zmq/zmqpublishnotifier.cpp3
207 files changed, 6977 insertions, 4706 deletions
diff --git a/configure.ac b/configure.ac
index 705327e816..7357e4ec9f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -9,7 +9,7 @@ define(_COPYRIGHT_YEAR, 2016)
define(_COPYRIGHT_HOLDERS,[The %s developers])
define(_COPYRIGHT_HOLDERS_SUBSTITUTION,[[Bitcoin Core]])
AC_INIT([Bitcoin Core],[_CLIENT_VERSION_MAJOR._CLIENT_VERSION_MINOR._CLIENT_VERSION_REVISION],[https://github.com/bitcoin/bitcoin/issues],[bitcoin],[https://bitcoincore.org/])
-AC_CONFIG_SRCDIR([src/main.cpp])
+AC_CONFIG_SRCDIR([src/validation.cpp])
AC_CONFIG_HEADERS([src/config/bitcoin-config.h])
AC_CONFIG_AUX_DIR([build-aux])
AC_CONFIG_MACRO_DIR([build-aux/m4])
@@ -45,7 +45,6 @@ else
CXXFLAGS_overridden=no
fi
AC_PROG_CXX
-m4_ifdef([AC_PROG_OBJCXX],[AC_PROG_OBJCXX])
dnl By default, libtool for mingw refuses to link static libs into a dll for
dnl fear of mixing pic/non-pic objects, and import/export complications. Since
@@ -60,6 +59,15 @@ AX_CXX_COMPILE_STDCXX([11], [noext], [mandatory])
dnl Check if -latomic is required for <std::atomic>
CHECK_ATOMIC
+dnl Unless the user specified OBJCXX, force it to be the same as CXX. This ensures
+dnl that we get the same -std flags for both.
+m4_ifdef([AC_PROG_OBJCXX],[
+if test "x${OBJCXX+set}" = "x"; then
+ OBJCXX="${CXX}"
+fi
+AC_PROG_OBJCXX
+])
+
dnl Libtool init checks.
LT_INIT([pic-only])
@@ -504,8 +512,6 @@ if test x$TARGET_OS = xdarwin; then
fi
AC_CHECK_HEADERS([endian.h sys/endian.h byteswap.h stdio.h stdlib.h unistd.h strings.h sys/types.h sys/stat.h sys/select.h sys/prctl.h])
-AC_SEARCH_LIBS([getaddrinfo_a], [anl], [AC_DEFINE(HAVE_GETADDRINFO_A, 1, [Define this symbol if you have getaddrinfo_a])])
-AC_SEARCH_LIBS([inet_pton], [nsl resolv], [AC_DEFINE(HAVE_INET_PTON, 1, [Define this symbol if you have inet_pton])])
AC_CHECK_DECLS([strnlen])
@@ -1126,3 +1132,28 @@ case ${OS} in
mv qa/pull-tester/tests_config-2.py qa/pull-tester/tests_config.py
;;
esac
+
+echo
+echo "Options used to compile and link:"
+echo " with wallet = $enable_wallet"
+echo " with gui / qt = $bitcoin_enable_qt"
+if test x$bitcoin_enable_qt != xno; then
+ echo " qt version = $bitcoin_qt_got_major_vers"
+ echo " with qr = $use_qr"
+fi
+echo " with zmq = $use_zmq"
+echo " with test = $use_tests"
+echo " with bench = $use_bench"
+echo " with upnp = $use_upnp"
+echo " debug enabled = $enable_debug"
+echo
+echo " target os = $TARGET_OS"
+echo " build os = $BUILD_OS"
+echo
+echo " CC = $CC"
+echo " CFLAGS = $CFLAGS"
+echo " CPPFLAGS = $CPPFLAGS"
+echo " CXX = $CXX"
+echo " CXXFLAGS = $CXXFLAGS"
+echo " LDFLAGS = $LDFLAGS"
+echo
diff --git a/doc/Doxyfile b/doc/Doxyfile
index a0cbf7139a..ef55acdbc3 100644
--- a/doc/Doxyfile
+++ b/doc/Doxyfile
@@ -32,7 +32,7 @@ DOXYFILE_ENCODING = UTF-8
# title of most generated pages and in a few other places.
# The default value is: My Project.
-PROJECT_NAME = Bitcoin
+PROJECT_NAME = "Bitcoin Core"
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
diff --git a/doc/build-unix.md b/doc/build-unix.md
index ba7b9cd18d..5b0a38457e 100644
--- a/doc/build-unix.md
+++ b/doc/build-unix.md
@@ -96,13 +96,13 @@ pass `--with-incompatible-bdb` to configure.
See the section "Disable-wallet mode" to build Bitcoin Core without wallet.
-Optional:
+Optional (see --with-miniupnpc and --enable-upnp-default):
- sudo apt-get install libminiupnpc-dev (see --with-miniupnpc and --enable-upnp-default)
+ sudo apt-get install libminiupnpc-dev
-ZMQ dependencies:
+ZMQ dependencies (provides ZMQ API 4.x):
- sudo apt-get install libzmq3-dev (provides ZMQ API 4.x)
+ sudo apt-get install libzmq3-dev
Dependencies for the GUI: Ubuntu & Debian
-----------------------------------------
diff --git a/doc/build-windows.md b/doc/build-windows.md
index 044356830a..b6a047b9c3 100644
--- a/doc/build-windows.md
+++ b/doc/build-windows.md
@@ -7,16 +7,18 @@ Most developers use cross-compilation from Ubuntu to build executables for
Windows. This is also used to build the release binaries.
While there are potentially a number of ways to build on Windows (for example using msys / mingw-w64),
-using the Windows Subsystem For Linux is the most straight forward. If you are building with
-an alternative method, please contribute the instructions here for others who are running versions
+using the Windows Subsystem For Linux is the most straightforward. If you are building with
+another method, please contribute the instructions here for others who are running versions
of Windows that are not compatible with the Windows Subsystem for Linux.
-Compiling with the Windows Subsystem For Linux
--------------------
+Compiling with Windows Subsystem For Linux
+-------------------------------------------
-With Windows 10, Microsoft has released a new feature named the
-[Windows Subsystem for Linux](https://msdn.microsoft.com/commandline/wsl/about). This feature allows you to run a bash shell directly on Windows in an Ubuntu based
-environment. Within this environment you can cross compile for Windows without the need for a separate Linux VM or Server.
+With Windows 10, Microsoft has released a new feature named the [Windows
+Subsystem for Linux](https://msdn.microsoft.com/commandline/wsl/about). This
+feature allows you to run a bash shell directly on Windows in an Ubuntu based
+environment. Within this environment you can cross compile for Windows without
+the need for a separate Linux VM or Server.
This feature is not supported in versions of Windows prior to Windows 10 or on Windows Server SKUs.
@@ -37,9 +39,6 @@ To get the bash shell, you must first activate the feature in Windows.
* Create a new UNIX user account (this is a separate account from your Windows account)
After the bash shell is active, you can follow the instructions below for Windows 64-bit Cross-compilation.
-When building dependencies within the 'depends' folder, you may encounter an error building
-the protobuf dependency. If this occurs, re-run the command with sudo. This is likely
-a bug with the Windows Subsystem for Linux feature and may be fixed with a future update.
Cross-compilation
-------------------
@@ -48,28 +47,55 @@ These steps can be performed on, for example, an Ubuntu VM. The depends system
will also work on other Linux distributions, however the commands for
installing the toolchain will be different.
-Make sure you install the build requirements mentioned in
-[build-unix.md](/doc/build-unix.md).
-Then, install the toolchains and curl:
+First, install the general dependencies:
+
+ sudo apt-get install build-essential libtool autotools-dev automake pkg-config bsdmainutils curl
+
+A host toolchain (`build-essential`) is necessary because some dependency
+packages (such as `protobuf`) need to build host utilities that are used in the
+build process.
+
+## Building for 64-bit Windows
+
+To build executables for Windows 64-bit, install the following dependencies:
- sudo apt-get install g++-mingw-w64-i686 mingw-w64-i686-dev g++-mingw-w64-x86-64 mingw-w64-x86-64-dev curl
+ sudo apt-get install g++-mingw-w64-x86-64 mingw-w64-x86-64-dev
-To build executables for Windows 32-bit:
+Then build using:
cd depends
- make HOST=i686-w64-mingw32 -j4
+ make HOST=x86_64-w64-mingw32 -j4
cd ..
./autogen.sh # not required when building from tarball
- ./configure --prefix=`pwd`/depends/i686-w64-mingw32
+ CONFIG_SITE=$PWD/depends/x86_64-w64-mingw32/share/config.site ./configure --prefix=/
make
-To build executables for Windows 64-bit:
+## Building for 32-bit Windows
+
+To build executables for Windows 32-bit, install the following dependencies:
+
+ sudo apt-get install g++-mingw-w64-i686 mingw-w64-i686-dev
+
+Then build using:
cd depends
- make HOST=x86_64-w64-mingw32 -j4
+ make HOST=i686-w64-mingw32 -j4
cd ..
./autogen.sh # not required when building from tarball
- ./configure --prefix=`pwd`/depends/x86_64-w64-mingw32
+ CONFIG_SITE=$PWD/depends/i686-w64-mingw32/share/config.site ./configure --prefix=/
make
+## Depends system
+
For further documentation on the depends system see [README.md](../depends/README.md) in the depends directory.
+
+Installation
+-------------
+
+After building using the Windows subsystem it can be useful to copy the compiled
+executables to a directory on the windows drive in the same directory structure
+as they appear in the release `.zip` archive. This can be done in the following
+way. This will install to `c:\workspace\bitcoin`, for example:
+
+ make install DESTDIR=/mnt/c/workspace/bitcoin
+
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index b0794e6d30..ba03579e86 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -408,6 +408,37 @@ GUI
should not interact with the user. That's where View classes come in. The converse also
holds: try to not directly access core data structures from Views.
+Subtrees
+----------
+
+Several parts of the repository are subtrees of software maintained elsewhere.
+
+Some of these are maintained by active developers of Bitcoin Core, in which case changes should probably go
+directly upstream without being PRed directly against the project. They will be merged back in the next
+subtree merge.
+
+Others are external projects without a tight relationship with our project. Changes to these should also
+be sent upstream but bugfixes may also be prudent to PR against Bitcoin Core so that they can be integrated
+quickly. Cosmetic changes should be purely taken upstream.
+
+There is a tool in contrib/devtools/git-subtree-check.sh to check a subtree directory for consistency with
+its upstream repository.
+
+Current subtrees include:
+
+- src/leveldb
+ - Upstream at https://github.com/google/leveldb ; Maintained by Google, but open important PRs to Core to avoid delay
+
+- src/libsecp256k1
+ - Upstream at https://github.com/bitcoin-core/secp256k1/ ; actively maintaned by Core contributors.
+
+- src/crypto/ctaes
+ - Upstream at https://github.com/bitcoin-core/ctaes ; actively maintained by Core contributors.
+
+- src/univalue
+ - Upstream at https://github.com/jgarzik/univalue ; report important PRs to Core to avoid delay.
+
+
Git and github tips
---------------------
diff --git a/doc/release-notes.md b/doc/release-notes.md
index f511fee22e..fe7f69d1f8 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -53,11 +53,15 @@ Removal of Priority Estimation
- Estimation of "priority" needed for a transaction to be included within a target
number of blocks has been removed. The rpc calls are deprecated and will either
- return -1 or 1e24 appropriately. The format for fee_estimates.dat has also
+ return -1 or 1e24 appropriately. The format for `fee_estimates.dat` has also
changed to no longer save these priority estimates. It will automatically be
converted to the new format which is not readable by prior versions of the
software.
+- The concept of "priority" transactions is planned to be removed in the next
+ major version. To prepare for this, the default for the rate limit of priority
+ transactions (`-limitfreerelay`) has been set to `0` kB/minute.
+
0.14.0 Change log
=================
diff --git a/qa/rpc-tests/maxuploadtarget.py b/qa/rpc-tests/maxuploadtarget.py
index d0e9fe9a3f..83168a7ce7 100755
--- a/qa/rpc-tests/maxuploadtarget.py
+++ b/qa/rpc-tests/maxuploadtarget.py
@@ -81,49 +81,16 @@ class TestNode(NodeConnCB):
class MaxUploadTest(BitcoinTestFramework):
- def add_options(self, parser):
- parser.add_option("--testbinary", dest="testbinary",
- default=os.getenv("BITCOIND", "bitcoind"),
- help="bitcoind binary to test")
-
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
- self.utxo = []
- self.txouts = gen_return_txouts()
-
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=800", "-blockmaxsize=999000"]))
- def mine_full_block(self, node, address):
- # Want to create a full block
- # We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
- for j in range(14):
- if len(self.utxo) < 14:
- self.utxo = node.listunspent()
- inputs=[]
- outputs = {}
- t = self.utxo.pop()
- inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
- remchange = t["amount"] - Decimal("0.001000")
- outputs[address]=remchange
- # Create a basic transaction that will send change back to ourself after account for a fee
- # And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
- # of txouts is stored and is the only thing we overwrite from the original transaction
- rawtx = node.createrawtransaction(inputs, outputs)
- newtx = rawtx[0:92]
- newtx = newtx + self.txouts
- newtx = newtx + rawtx[94:]
- # Appears to be ever so slightly faster to sign with SIGHASH_NONE
- signresult = node.signrawtransaction(newtx,None,None,"NONE")
- txid = node.sendrawtransaction(signresult["hex"], True)
- # Mine a full sized block which will be these transactions we just created
- node.generate(1)
-
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
@@ -151,7 +118,7 @@ class MaxUploadTest(BitcoinTestFramework):
# Test logic begins here
# Now mine a big block
- self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
+ mine_large_block(self.nodes[0])
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
@@ -162,11 +129,10 @@ class MaxUploadTest(BitcoinTestFramework):
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
- self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
+ mine_large_block(self.nodes[0])
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
- new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
diff --git a/qa/rpc-tests/multi_rpc.py b/qa/rpc-tests/multi_rpc.py
index cdeb94caa0..95d9090ce2 100755
--- a/qa/rpc-tests/multi_rpc.py
+++ b/qa/rpc-tests/multi_rpc.py
@@ -4,7 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
-# Test mulitple rpc user config option rpcauth
+# Test multiple rpc user config option rpcauth
#
from test_framework.test_framework import BitcoinTestFramework
diff --git a/qa/rpc-tests/p2p-compactblocks.py b/qa/rpc-tests/p2p-compactblocks.py
index 1b4c8d90e7..ab4b809ded 100755
--- a/qa/rpc-tests/p2p-compactblocks.py
+++ b/qa/rpc-tests/p2p-compactblocks.py
@@ -27,6 +27,7 @@ class TestNode(SingleNodeConnCB):
self.last_cmpctblock = None
self.block_announced = False
self.last_getdata = None
+ self.last_getheaders = None
self.last_getblocktxn = None
self.last_block = None
self.last_blocktxn = None
@@ -64,6 +65,9 @@ class TestNode(SingleNodeConnCB):
def on_getdata(self, conn, message):
self.last_getdata = message
+ def on_getheaders(self, conn, message):
+ self.last_getheaders = message
+
def on_getblocktxn(self, conn, message):
self.last_getblocktxn = message
@@ -186,12 +190,15 @@ class CompactBlocksTest(BitcoinTestFramework):
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
- node.generate(1)
- got_message = wait_until(lambda: peer.block_announced, timeout=30)
+ block_hash = int(node.generate(1)[0], 16)
+ peer.wait_for_block_announcement(block_hash, timeout=30)
assert(peer.block_announced)
assert(got_message)
+
with mininode_lock:
- assert(predicate(peer))
+ assert predicate(peer), (
+ "block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
+ block_hash, peer.last_cmpctblock, peer.last_inv))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
@@ -393,6 +400,9 @@ class CompactBlocksTest(BitcoinTestFramework):
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
+ success = wait_until(lambda: test_node.last_getheaders is not None, timeout=30)
+ assert(success)
+ test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
success = wait_until(lambda: test_node.last_getdata is not None, timeout=30)
diff --git a/qa/rpc-tests/p2p-fullblocktest.py b/qa/rpc-tests/p2p-fullblocktest.py
index 9aee81164f..e4b889d761 100755
--- a/qa/rpc-tests/p2p-fullblocktest.py
+++ b/qa/rpc-tests/p2p-fullblocktest.py
@@ -351,7 +351,7 @@ class FullBlockTest(ComparisonTestFramework):
block(22, spend=out[5])
yield rejected()
- # Create a block on either side of MAX_BLOCK_SIZE and make sure its accepted/rejected
+ # Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
@@ -359,24 +359,24 @@ class FullBlockTest(ComparisonTestFramework):
tip(15)
b23 = block(23, spend=out[6])
tx = CTransaction()
- script_length = MAX_BLOCK_SIZE - len(b23.serialize()) - 69
+ script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
- assert_equal(len(b23.serialize()), MAX_BLOCK_SIZE)
+ assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
yield accepted()
save_spendable_output()
# Make the next block one byte bigger and check that it fails
tip(15)
b24 = block(24, spend=out[6])
- script_length = MAX_BLOCK_SIZE - len(b24.serialize()) - 69
+ script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length+1)])
tx.vout = [CTxOut(0, script_output)]
b24 = update_block(24, [tx])
- assert_equal(len(b24.serialize()), MAX_BLOCK_SIZE+1)
+ assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1)
yield rejected(RejectResult(16, b'bad-blk-length'))
block(25, spend=out[7])
@@ -523,12 +523,12 @@ class FullBlockTest(ComparisonTestFramework):
tx_new = None
tx_last = tx
total_size=len(b39.serialize())
- while(total_size < MAX_BLOCK_SIZE):
+ while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
- if total_size >= MAX_BLOCK_SIZE:
+ if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
@@ -877,7 +877,7 @@ class FullBlockTest(ComparisonTestFramework):
# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
- # the block is > MAX_BLOCK_SIZE with the bloated varint, but <= MAX_BLOCK_SIZE without the bloated varint,
+ # the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
@@ -901,12 +901,12 @@ class FullBlockTest(ComparisonTestFramework):
tx = CTransaction()
# use canonical serialization to calculate size
- script_length = MAX_BLOCK_SIZE - len(b64a.normal_serialize()) - 69
+ script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
b64a = update_block("64a", [tx])
- assert_equal(len(b64a.serialize()), MAX_BLOCK_SIZE + 8)
+ assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
yield TestInstance([[self.tip, None]])
# comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore
@@ -916,7 +916,7 @@ class FullBlockTest(ComparisonTestFramework):
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
- assert_equal(len(b64.serialize()), MAX_BLOCK_SIZE)
+ assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
self.blocks[64] = b64
update_block(64, [])
yield accepted()
@@ -1250,12 +1250,12 @@ class FullBlockTest(ComparisonTestFramework):
for i in range(89, LARGE_REORG_SIZE + 89):
b = block(i, spend)
tx = CTransaction()
- script_length = MAX_BLOCK_SIZE - len(b.serialize()) - 69
+ script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
b = update_block(i, [tx])
- assert_equal(len(b.serialize()), MAX_BLOCK_SIZE)
+ assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
test1.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
spend = get_spendable_output()
diff --git a/qa/rpc-tests/p2p-segwit.py b/qa/rpc-tests/p2p-segwit.py
index 09ab1b80fc..51adec5cae 100755
--- a/qa/rpc-tests/p2p-segwit.py
+++ b/qa/rpc-tests/p2p-segwit.py
@@ -64,6 +64,9 @@ class TestNode(NodeConnCB):
self.getdataset.add(inv.hash)
self.last_getdata = message
+ def on_getheaders(self, conn, message):
+ self.last_getheaders = message
+
def on_pong(self, conn, message):
self.last_pong = message
@@ -97,6 +100,10 @@ class TestNode(NodeConnCB):
test_function = lambda: self.last_getdata != None
self.sync(test_function, timeout)
+ def wait_for_getheaders(self, timeout=60):
+ test_function = lambda: self.last_getheaders != None
+ self.sync(test_function, timeout)
+
def wait_for_inv(self, expected_inv, timeout=60):
test_function = lambda: self.last_inv != expected_inv
self.sync(test_function, timeout)
@@ -111,12 +118,15 @@ class TestNode(NodeConnCB):
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_getdata = None
+ self.last_getheaders = None
+ msg = msg_headers()
+ msg.headers = [ CBlockHeader(block) ]
if use_header:
- msg = msg_headers()
- msg.headers = [ CBlockHeader(block) ]
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
+ self.wait_for_getheaders()
+ self.send_message(msg)
self.wait_for_getdata()
return
@@ -185,11 +195,6 @@ class SegWitTest(BitcoinTestFramework):
self.setup_clean_chain = True
self.num_nodes = 3
- def add_options(self, parser):
- parser.add_option("--oldbinary", dest="oldbinary",
- default=None,
- help="pre-segwit bitcoind binary for upgrade testing")
-
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-whitelist=127.0.0.1"]))
@@ -197,12 +202,9 @@ class SegWitTest(BitcoinTestFramework):
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-whitelist=127.0.0.1", "-acceptnonstdtxn=0"]))
connect_nodes(self.nodes[0], 1)
- # If an old bitcoind is given, do the upgrade-after-activation test.
- self.test_upgrade = False
- if (self.options.oldbinary != None):
- self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1"], binary=self.options.oldbinary))
- connect_nodes(self.nodes[0], 2)
- self.test_upgrade = True
+ # Disable segwit's bip9 parameter to simulate upgrading after activation.
+ self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-bip9params=segwit:0:0"]))
+ connect_nodes(self.nodes[0], 2)
''' Helpers '''
# Build a block on top of node0's tip.
@@ -488,7 +490,7 @@ class SegWitTest(BitcoinTestFramework):
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
- assert(get_virtual_size(block) > MAX_BLOCK_SIZE)
+ assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
@@ -497,7 +499,7 @@ class SegWitTest(BitcoinTestFramework):
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
- assert(get_virtual_size(block) < MAX_BLOCK_SIZE)
+ assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
@@ -562,10 +564,10 @@ class SegWitTest(BitcoinTestFramework):
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
- additional_bytes = (MAX_BLOCK_SIZE - vsize)*4
+ additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4
i = 0
while additional_bytes > 0:
- # Add some more bytes to each input until we hit MAX_BLOCK_SIZE+1
+ # Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes+1, 55)
block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
additional_bytes -= extra_bytes
@@ -575,7 +577,7 @@ class SegWitTest(BitcoinTestFramework):
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
- assert_equal(vsize, MAX_BLOCK_SIZE + 1)
+ assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2*1024*1024)
@@ -588,7 +590,7 @@ class SegWitTest(BitcoinTestFramework):
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
- assert(get_virtual_size(block) == MAX_BLOCK_SIZE)
+ assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
self.test_node.test_witness_block(block, accepted=True)
@@ -1167,7 +1169,7 @@ class SegWitTest(BitcoinTestFramework):
if segwit_activated:
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
- sync_mempools(self.nodes)
+ sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
@@ -1423,7 +1425,7 @@ class SegWitTest(BitcoinTestFramework):
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
- if (get_virtual_size(block) > MAX_BLOCK_SIZE - 1000):
+ if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
@@ -1696,19 +1698,53 @@ class SegWitTest(BitcoinTestFramework):
def test_getblocktemplate_before_lockin(self):
print("\tTesting getblocktemplate setting of segwit versionbit (before lockin)")
- block_version = (self.nodes[0].getblocktemplate())['version']
- assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
+ # Node0 is segwit aware, node2 is not.
+ for node in [self.nodes[0], self.nodes[2]]:
+ gbt_results = node.getblocktemplate()
+ block_version = gbt_results['version']
+ # If we're not indicating segwit support, we should not be signalling
+ # for segwit activation, nor should we get a witness commitment.
+ assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
+ assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
- self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
+ txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
+ sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time())+10)
-
- block_version = self.nodes[0].getblocktemplate({"rules" : ["segwit"]})['version']
- assert(block_version & (1 << VB_WITNESS_BIT) != 0)
- self.nodes[0].setmocktime(0) # undo mocktime
+ self.nodes[2].setmocktime(int(time.time())+10)
+
+ for node in [self.nodes[0], self.nodes[2]]:
+ gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
+ block_version = gbt_results['version']
+ if node == self.nodes[2]:
+ # If this is a non-segwit node, we should still not get a witness
+ # commitment, nor a version bit signalling segwit.
+ assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
+ assert('default_witness_commitment' not in gbt_results)
+ else:
+ # For segwit-aware nodes, check the version bit and the witness
+ # commitment are correct.
+ assert(block_version & (1 << VB_WITNESS_BIT) != 0)
+ assert('default_witness_commitment' in gbt_results)
+ witness_commitment = gbt_results['default_witness_commitment']
+
+ # TODO: this duplicates some code from blocktools.py, would be nice
+ # to refactor.
+ # Check that default_witness_commitment is present.
+ block = CBlock()
+ witness_root = block.get_merkle_root([ser_uint256(0), ser_uint256(txid)])
+ check_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(0)))
+ from test_framework.blocktools import WITNESS_COMMITMENT_HEADER
+ output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(check_commitment)
+ script = CScript([OP_RETURN, output_data])
+ assert_equal(witness_commitment, bytes_to_hex_str(script))
+
+ # undo mocktime
+ self.nodes[0].setmocktime(0)
+ self.nodes[2].setmocktime(0)
# Uncompressed pubkeys are no longer supported in default relay policy,
# but (for now) are still valid in blocks.
@@ -1948,6 +1984,7 @@ class SegWitTest(BitcoinTestFramework):
# Advance to segwit being 'started'
self.advance_to_segwit_started()
+ sync_blocks(self.nodes)
self.test_getblocktemplate_before_lockin()
sync_blocks(self.nodes)
@@ -1990,10 +2027,7 @@ class SegWitTest(BitcoinTestFramework):
self.test_signature_version_1()
self.test_non_standard_witness()
sync_blocks(self.nodes)
- if self.test_upgrade:
- self.test_upgrade_after_activation(self.nodes[2], 2)
- else:
- print("\tSkipping upgrade-after-activation test (use --oldbinary to enable)")
+ self.test_upgrade_after_activation(self.nodes[2], 2)
self.test_witness_sigops()
diff --git a/qa/rpc-tests/prioritise_transaction.py b/qa/rpc-tests/prioritise_transaction.py
index e1771231c0..85afeab2e3 100755
--- a/qa/rpc-tests/prioritise_transaction.py
+++ b/qa/rpc-tests/prioritise_transaction.py
@@ -9,7 +9,7 @@
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
-from test_framework.mininode import COIN, MAX_BLOCK_SIZE
+from test_framework.mininode import COIN, MAX_BLOCK_BASE_SIZE
class PrioritiseTransactionTest(BitcoinTestFramework):
@@ -42,7 +42,7 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
- # MAX_BLOCK_SIZE -- otherwise the test needs to be revised to create
+ # MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
@@ -50,7 +50,7 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
for j in txids[i]:
assert(j in mempool)
sizes[i] += mempool[j]['size']
- assert(sizes[i] > MAX_BLOCK_SIZE) # Fail => raise utxo_count
+ assert(sizes[i] > MAX_BLOCK_BASE_SIZE) # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined (lower
diff --git a/qa/rpc-tests/proxy_test.py b/qa/rpc-tests/proxy_test.py
index 27160cae07..9ccc0ffbb0 100755
--- a/qa/rpc-tests/proxy_test.py
+++ b/qa/rpc-tests/proxy_test.py
@@ -4,10 +4,16 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
+import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import *
+from test_framework.util import (
+ PORT_MIN,
+ PORT_RANGE,
+ start_nodes,
+ assert_equal,
+)
from test_framework.netutil import test_ipv6_local
'''
Test plan:
@@ -33,6 +39,8 @@ addnode connect to onion
addnode connect to generic DNS name
'''
+RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
+
class ProxyTest(BitcoinTestFramework):
def __init__(self):
@@ -44,19 +52,19 @@ class ProxyTest(BitcoinTestFramework):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
- self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
+ self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
- self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
+ self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
- self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
+ self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
diff --git a/qa/rpc-tests/pruning.py b/qa/rpc-tests/pruning.py
index 287dbc776e..6635b0dff2 100755
--- a/qa/rpc-tests/pruning.py
+++ b/qa/rpc-tests/pruning.py
@@ -24,10 +24,6 @@ class PruneTest(BitcoinTestFramework):
self.setup_clean_chain = True
self.num_nodes = 3
- self.utxo = []
- self.address = ["",""]
- self.txouts = gen_return_txouts()
-
def setup_network(self):
self.nodes = []
self.is_network_split = False
@@ -40,12 +36,6 @@ class PruneTest(BitcoinTestFramework):
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
- self.address[0] = self.nodes[0].getnewaddress()
- self.address[1] = self.nodes[1].getnewaddress()
-
- # Determine default relay fee
- self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
-
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
@@ -58,7 +48,7 @@ class PruneTest(BitcoinTestFramework):
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
- self.mine_full_block(self.nodes[0], self.address[0])
+ mine_large_block(self.nodes[0])
sync_blocks(self.nodes[0:3])
@@ -70,7 +60,7 @@ class PruneTest(BitcoinTestFramework):
print("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
- self.mine_full_block(self.nodes[0],self.address[0])
+ mine_large_block(self.nodes[0])
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
@@ -95,17 +85,15 @@ class PruneTest(BitcoinTestFramework):
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
# Mine 24 blocks in node 1
- self.utxo = self.nodes[1].listunspent()
for i in range(24):
if j == 0:
- self.mine_full_block(self.nodes[1],self.address[1])
+ mine_large_block(self.nodes[1])
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
- self.utxo = self.nodes[0].listunspent()
for i in range(25):
- self.mine_full_block(self.nodes[0],self.address[0])
+ mine_large_block(self.nodes[0])
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
@@ -217,31 +205,6 @@ class PruneTest(BitcoinTestFramework):
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
- def mine_full_block(self, node, address):
- # Want to create a full block
- # We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
- for j in range(14):
- if len(self.utxo) < 14:
- self.utxo = node.listunspent()
- inputs=[]
- outputs = {}
- t = self.utxo.pop()
- inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
- remchange = t["amount"] - 100*self.relayfee # Fee must be above min relay rate for 66kb tx
- outputs[address]=remchange
- # Create a basic transaction that will send change back to ourself after account for a fee
- # And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
- # of txouts is stored and is the only thing we overwrite from the original transaction
- rawtx = node.createrawtransaction(inputs, outputs)
- newtx = rawtx[0:92]
- newtx = newtx + self.txouts
- newtx = newtx + rawtx[94:]
- # Appears to be ever so slightly faster to sign with SIGHASH_NONE
- signresult = node.signrawtransaction(newtx,None,None,"NONE")
- txid = node.sendrawtransaction(signresult["hex"], True)
- # Mine a full sized block which will be these transactions we just created
- node.generate(1)
-
def run_test(self):
print("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
diff --git a/qa/rpc-tests/rawtransactions.py b/qa/rpc-tests/rawtransactions.py
index ab6d2e8def..33a6f2b0cc 100755
--- a/qa/rpc-tests/rawtransactions.py
+++ b/qa/rpc-tests/rawtransactions.py
@@ -2,11 +2,15 @@
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""rawtranscation RPCs QA test.
-#
-# Test re-org scenarios with a mempool that contains transactions
-# that spend (directly or indirectly) coinbase transactions.
-#
+# Tests the following RPCs:
+# - createrawtransaction
+# - signrawtransaction
+# - sendrawtransaction
+# - decoderawtransaction
+# - getrawtransaction
+"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
@@ -138,6 +142,33 @@ class RawTransactionsTest(BitcoinTestFramework):
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
+ # getrawtransaction tests
+ # 1. valid parameters - only supply txid
+ txHash = rawTx["hash"]
+ assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
+
+ # 2. valid parameters - supply txid and 0 for non-verbose
+ assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
+
+ # 3. valid parameters - supply txid and False for non-verbose
+ assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
+
+ # 4. valid parameters - supply txid and 1 for verbose.
+ # We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
+ assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
+
+ # 5. valid parameters - supply txid and True for non-verbose
+ assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
+
+ # 6. invalid parameters - supply txid and string "Flase"
+ assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, "Flase")
+
+ # 7. invalid parameters - supply txid and empty array
+ assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, [])
+
+ # 8. invalid parameters - supply txid and empty dict
+ assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, {})
+
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
diff --git a/qa/rpc-tests/sendheaders.py b/qa/rpc-tests/sendheaders.py
index 81b2442e6a..37b98c576e 100755
--- a/qa/rpc-tests/sendheaders.py
+++ b/qa/rpc-tests/sendheaders.py
@@ -348,14 +348,13 @@ class SendHeadersTest(BitcoinTestFramework):
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
- test_node.wait_for_getdata([tip], timeout=5)
+ test_node.wait_for_getheaders(timeout=5)
+ # Should have received a getheaders now
+ test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
- inv_node.send_block_inv(tip)
- # Should have received a getheaders as well!
- test_node.send_header_for_blocks(blocks)
- test_node.wait_for_getdata([x.sha256 for x in blocks[0:-1]], timeout=5)
- [ inv_node.send_block_inv(x.sha256) for x in blocks[0:-1] ]
+ [ inv_node.send_block_inv(x.sha256) for x in blocks ]
+ test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
inv_node.sync_with_ping()
else:
# Announce via headers
diff --git a/qa/rpc-tests/test_framework/comptool.py b/qa/rpc-tests/test_framework/comptool.py
index 7c92d3f828..17679fc7e1 100755
--- a/qa/rpc-tests/test_framework/comptool.py
+++ b/qa/rpc-tests/test_framework/comptool.py
@@ -111,6 +111,11 @@ class TestNode(NodeConnCB):
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
+ def send_header(self, header):
+ m = msg_headers()
+ m.headers.append(header)
+ self.conn.send_message(m)
+
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
@@ -345,8 +350,16 @@ class TestManager(object):
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
- [ c.cb.send_inv(block) for c in self.connections ]
- self.sync_blocks(block.sha256, 1)
+ # if we expect success, send inv and sync every block
+ # if we expect failure, just push the block and see what happens.
+ if outcome == True:
+ [ c.cb.send_inv(block) for c in self.connections ]
+ self.sync_blocks(block.sha256, 1)
+ else:
+ [ c.send_message(msg_block(block)) for c in self.connections ]
+ [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
+ self.wait_for_pings(self.ping_counter)
+ self.ping_counter += 1
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
@@ -354,6 +367,8 @@ class TestManager(object):
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
+ [ c.cb.send_header(block_header) for c in self.connections ]
+
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
diff --git a/qa/rpc-tests/test_framework/mininode.py b/qa/rpc-tests/test_framework/mininode.py
index 495c6bdf35..91daa4ab1f 100755
--- a/qa/rpc-tests/test_framework/mininode.py
+++ b/qa/rpc-tests/test_framework/mininode.py
@@ -44,7 +44,7 @@ MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
-MAX_BLOCK_SIZE = 1000000
+MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
diff --git a/qa/rpc-tests/test_framework/test_framework.py b/qa/rpc-tests/test_framework/test_framework.py
index e6d3e9ab9a..98c4f6070b 100755
--- a/qa/rpc-tests/test_framework/test_framework.py
+++ b/qa/rpc-tests/test_framework/test_framework.py
@@ -172,7 +172,16 @@ class BitcoinTestFramework(object):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
-
+ if os.getenv("PYTHON_DEBUG", ""):
+ # Dump the end of the debug logs, to aid in debugging rare
+ # travis failures.
+ import glob
+ filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
+ MAX_LINES_TO_PRINT = 1000
+ for f in filenames:
+ print("From" , f, ":")
+ from collections import deque
+ print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
if success:
print("Tests successful")
sys.exit(0)
diff --git a/qa/rpc-tests/test_framework/util.py b/qa/rpc-tests/test_framework/util.py
index bf13b7fd84..1d6c9aa230 100644
--- a/qa/rpc-tests/test_framework/util.py
+++ b/qa/rpc-tests/test_framework/util.py
@@ -129,17 +129,19 @@ def sync_blocks(rpc_connections, *, wait=1, timeout=60):
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
- maxheight = 0
+ # Use getblockcount() instead of waitforblockheight() to determine the
+ # initial max height because the two RPCs look at different internal global
+ # variables (chainActive vs latestBlock) and the former gets updated
+ # earlier.
+ maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
- heights = [t["height"] for t in tips]
- if all(t == tips[0] for t in tips):
- return
- if all(h == heights[0] for h in heights):
+ if all(t["height"] == maxheight for t in tips):
+ if all(t["hash"] == tips[0]["hash"] for t in tips):
+ return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
- maxheight = max(heights)
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
@@ -221,7 +223,7 @@ def wait_for_bitcoind_start(process, url, i):
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
- raise # unkown JSON RPC exception
+ raise # unknown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir, num_nodes, cachedir):
@@ -255,7 +257,7 @@ def initialize_chain(test_dir, num_nodes, cachedir):
print("initialize_chain: bitcoind started, waiting for RPC to come up")
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
- print("initialize_chain: RPC succesfully started")
+ print("initialize_chain: RPC successfully started")
rpcs = []
for i in range(MAX_NODES):
@@ -342,7 +344,7 @@ def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
- print("start_node: RPC succesfully started")
+ print("start_node: RPC successfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
@@ -531,10 +533,14 @@ def assert_greater_than(thing1, thing2):
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
+ assert_raises_message(exc, None, fun, *args, **kwds)
+
+def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
- except exc:
- pass
+ except exc as e:
+ if message is not None and message not in e.error['message']:
+ raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
@@ -651,13 +657,12 @@ def create_tx(node, coinbase, to_address, amount):
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
- for i in range(len(utxos)):
+ for _ in range(len(utxos)):
t = utxos.pop()
- inputs = []
- inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
+ inputs=[{ "txid" : t["txid"], "vout" : t["vout"]}]
outputs = {}
- send_value = t['amount'] - fee
- outputs[addr] = satoshi_round(send_value)
+ change = t['amount'] - fee
+ outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
@@ -667,6 +672,15 @@ def create_lots_of_big_transactions(node, txouts, utxos, fee):
txids.append(txid)
return txids
+def mine_large_block(node):
+ # generate a 66k transaction,
+ # and 14 of them is close to the 1MB block limit
+ txouts = gen_return_txouts()
+ utxos = node.listunspent()[:14]
+ fee = 100 * node.getnetworkinfo()["relayfee"]
+ create_lots_of_big_transactions(node, txouts, utxos, fee=fee)
+ node.generate(1)
+
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
diff --git a/qa/rpc-tests/wallet.py b/qa/rpc-tests/wallet.py
index e43f6ea5d2..3c0dc0f4ea 100755
--- a/qa/rpc-tests/wallet.py
+++ b/qa/rpc-tests/wallet.py
@@ -71,7 +71,7 @@ class WalletTest (BitcoinTestFramework):
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
- assert_raises(JSONRPCException, self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
+ assert_raises_message(JSONRPCException, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
diff --git a/src/Makefile.am b/src/Makefile.am
index 5a5e3abcfa..8c12aee217 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -104,13 +104,14 @@ BITCOIN_CORE_H = \
keystore.h \
dbwrapper.h \
limitedmap.h \
- main.h \
memusage.h \
merkleblock.h \
miner.h \
net.h \
+ net_processing.h \
netaddress.h \
netbase.h \
+ netmessagemaker.h \
noui.h \
policy/fees.h \
policy/policy.h \
@@ -144,6 +145,7 @@ BITCOIN_CORE_H = \
util.h \
utilmoneystr.h \
utiltime.h \
+ validation.h \
validationinterface.h \
versionbits.h \
wallet/coincontrol.h \
@@ -178,10 +180,10 @@ libbitcoin_server_a_SOURCES = \
httpserver.cpp \
init.cpp \
dbwrapper.cpp \
- main.cpp \
merkleblock.cpp \
miner.cpp \
net.cpp \
+ net_processing.cpp \
noui.cpp \
policy/fees.cpp \
policy/policy.cpp \
@@ -200,6 +202,7 @@ libbitcoin_server_a_SOURCES = \
txdb.cpp \
txmempool.cpp \
ui_interface.cpp \
+ validation.cpp \
validationinterface.cpp \
versionbits.cpp \
$(BITCOIN_CORE_H)
diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include
index 246797a1b2..e58bd9dfbf 100644
--- a/src/Makefile.bench.include
+++ b/src/Makefile.bench.include
@@ -22,7 +22,9 @@ bench_bench_bitcoin_SOURCES = \
bench/mempool_eviction.cpp \
bench/verify_script.cpp \
bench/base58.cpp \
- bench/lockedpool.cpp
+ bench/lockedpool.cpp \
+ bench/perf.cpp \
+ bench/perf.h
nodist_bench_bench_bitcoin_SOURCES = $(GENERATED_TEST_FILES)
diff --git a/src/addrdb.cpp b/src/addrdb.cpp
index ddf41f92de..23715bbea4 100644
--- a/src/addrdb.cpp
+++ b/src/addrdb.cpp
@@ -103,7 +103,7 @@ bool CBanDB::Read(banmap_t& banSet)
if (memcmp(pchMsgTmp, Params().MessageStart(), sizeof(pchMsgTmp)))
return error("%s: Invalid network magic number", __func__);
- // de-serialize address data into one CAddrMan object
+ // de-serialize ban data
ssBanlist >> banSet;
}
catch (const std::exception& e) {
diff --git a/src/addrman.h b/src/addrman.h
index cabacbbea9..76e7b210f7 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -482,6 +482,7 @@ public:
//! Return the number of (unique) addresses in all tables.
size_t size() const
{
+ LOCK(cs); // TODO: Cache this in an atomic to avoid this overhead
return vRandom.size();
}
@@ -501,13 +502,11 @@ public:
//! Add a single address.
bool Add(const CAddress &addr, const CNetAddr& source, int64_t nTimePenalty = 0)
{
+ LOCK(cs);
bool fRet = false;
- {
- LOCK(cs);
- Check();
- fRet |= Add_(addr, source, nTimePenalty);
- Check();
- }
+ Check();
+ fRet |= Add_(addr, source, nTimePenalty);
+ Check();
if (fRet)
LogPrint("addrman", "Added %s from %s: %i tried, %i new\n", addr.ToStringIPPort(), source.ToString(), nTried, nNew);
return fRet;
@@ -516,14 +515,12 @@ public:
//! Add multiple addresses.
bool Add(const std::vector<CAddress> &vAddr, const CNetAddr& source, int64_t nTimePenalty = 0)
{
+ LOCK(cs);
int nAdd = 0;
- {
- LOCK(cs);
- Check();
- for (std::vector<CAddress>::const_iterator it = vAddr.begin(); it != vAddr.end(); it++)
- nAdd += Add_(*it, source, nTimePenalty) ? 1 : 0;
- Check();
- }
+ Check();
+ for (std::vector<CAddress>::const_iterator it = vAddr.begin(); it != vAddr.end(); it++)
+ nAdd += Add_(*it, source, nTimePenalty) ? 1 : 0;
+ Check();
if (nAdd)
LogPrint("addrman", "Added %i addresses from %s: %i tried, %i new\n", nAdd, source.ToString(), nTried, nNew);
return nAdd > 0;
@@ -532,23 +529,19 @@ public:
//! Mark an entry as accessible.
void Good(const CService &addr, int64_t nTime = GetAdjustedTime())
{
- {
- LOCK(cs);
- Check();
- Good_(addr, nTime);
- Check();
- }
+ LOCK(cs);
+ Check();
+ Good_(addr, nTime);
+ Check();
}
//! Mark an entry as connection attempted to.
void Attempt(const CService &addr, bool fCountFailure, int64_t nTime = GetAdjustedTime())
{
- {
- LOCK(cs);
- Check();
- Attempt_(addr, fCountFailure, nTime);
- Check();
- }
+ LOCK(cs);
+ Check();
+ Attempt_(addr, fCountFailure, nTime);
+ Check();
}
/**
@@ -582,12 +575,10 @@ public:
//! Mark an entry as currently-connected-to.
void Connected(const CService &addr, int64_t nTime = GetAdjustedTime())
{
- {
- LOCK(cs);
- Check();
- Connected_(addr, nTime);
- Check();
- }
+ LOCK(cs);
+ Check();
+ Connected_(addr, nTime);
+ Check();
}
void SetServices(const CService &addr, ServiceFlags nServices)
diff --git a/src/bench/Examples.cpp b/src/bench/Examples.cpp
index b6b020a971..9f35a1ea04 100644
--- a/src/bench/Examples.cpp
+++ b/src/bench/Examples.cpp
@@ -3,7 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "bench.h"
-#include "main.h"
+#include "validation.h"
#include "utiltime.h"
// Sanity test: this should loop ten times, and
diff --git a/src/bench/base58.cpp b/src/bench/base58.cpp
index a791b5b7fa..3319c179bf 100644
--- a/src/bench/base58.cpp
+++ b/src/bench/base58.cpp
@@ -4,7 +4,7 @@
#include "bench.h"
-#include "main.h"
+#include "validation.h"
#include "base58.h"
#include <vector>
diff --git a/src/bench/bench.cpp b/src/bench/bench.cpp
index 227546a7a7..af3d152c9a 100644
--- a/src/bench/bench.cpp
+++ b/src/bench/bench.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "bench.h"
+#include "perf.h"
#include <iostream>
#include <iomanip>
@@ -26,7 +27,9 @@ BenchRunner::BenchRunner(std::string name, BenchFunction func)
void
BenchRunner::RunAll(double elapsedTimeForOne)
{
- std::cout << "#Benchmark" << "," << "count" << "," << "min" << "," << "max" << "," << "average" << "\n";
+ perf_init();
+ std::cout << "#Benchmark" << "," << "count" << "," << "min" << "," << "max" << "," << "average" << ","
+ << "min_cycles" << "," << "max_cycles" << "," << "average_cycles" << "\n";
for (std::map<std::string,BenchFunction>::iterator it = benchmarks.begin();
it != benchmarks.end(); ++it) {
@@ -35,6 +38,7 @@ BenchRunner::RunAll(double elapsedTimeForOne)
BenchFunction& func = it->second;
func(state);
}
+ perf_fini();
}
bool State::KeepRunning()
@@ -44,8 +48,10 @@ bool State::KeepRunning()
return true;
}
double now;
+ uint64_t nowCycles;
if (count == 0) {
lastTime = beginTime = now = gettimedouble();
+ lastCycles = beginCycles = nowCycles = perf_cpucycles();
}
else {
now = gettimedouble();
@@ -53,6 +59,13 @@ bool State::KeepRunning()
double elapsedOne = elapsed * countMaskInv;
if (elapsedOne < minTime) minTime = elapsedOne;
if (elapsedOne > maxTime) maxTime = elapsedOne;
+
+ // We only use relative values, so don't have to handle 64-bit wrap-around specially
+ nowCycles = perf_cpucycles();
+ uint64_t elapsedOneCycles = (nowCycles - lastCycles) * countMaskInv;
+ if (elapsedOneCycles < minCycles) minCycles = elapsedOneCycles;
+ if (elapsedOneCycles > maxCycles) maxCycles = elapsedOneCycles;
+
if (elapsed*128 < maxElapsed) {
// If the execution was much too fast (1/128th of maxElapsed), increase the count mask by 8x and restart timing.
// The restart avoids including the overhead of this code in the measurement.
@@ -61,14 +74,20 @@ bool State::KeepRunning()
count = 0;
minTime = std::numeric_limits<double>::max();
maxTime = std::numeric_limits<double>::min();
+ minCycles = std::numeric_limits<uint64_t>::max();
+ maxCycles = std::numeric_limits<uint64_t>::min();
return true;
}
if (elapsed*16 < maxElapsed) {
- countMask = ((countMask<<1)|1) & ((1LL<<60)-1);
- countMaskInv = 1./(countMask+1);
+ uint64_t newCountMask = ((countMask<<1)|1) & ((1LL<<60)-1);
+ if ((count & newCountMask)==0) {
+ countMask = newCountMask;
+ countMaskInv = 1./(countMask+1);
+ }
}
}
lastTime = now;
+ lastCycles = nowCycles;
++count;
if (now - beginTime < maxElapsed) return true; // Keep going
@@ -77,7 +96,9 @@ bool State::KeepRunning()
// Output results
double average = (now-beginTime)/count;
- std::cout << std::fixed << std::setprecision(15) << name << "," << count << "," << minTime << "," << maxTime << "," << average << "\n";
+ int64_t averageCycles = (nowCycles-beginCycles)/count;
+ std::cout << std::fixed << std::setprecision(15) << name << "," << count << "," << minTime << "," << maxTime << "," << average << ","
+ << minCycles << "," << maxCycles << "," << averageCycles << "\n";
return false;
}
diff --git a/src/bench/bench.h b/src/bench/bench.h
index f13b145aaf..caf73e949b 100644
--- a/src/bench/bench.h
+++ b/src/bench/bench.h
@@ -41,12 +41,18 @@ namespace benchmark {
double maxElapsed;
double beginTime;
double lastTime, minTime, maxTime, countMaskInv;
- int64_t count;
- int64_t countMask;
+ uint64_t count;
+ uint64_t countMask;
+ uint64_t beginCycles;
+ uint64_t lastCycles;
+ uint64_t minCycles;
+ uint64_t maxCycles;
public:
State(std::string _name, double _maxElapsed) : name(_name), maxElapsed(_maxElapsed), count(0) {
minTime = std::numeric_limits<double>::max();
maxTime = std::numeric_limits<double>::min();
+ minCycles = std::numeric_limits<uint64_t>::max();
+ maxCycles = std::numeric_limits<uint64_t>::min();
countMask = 1;
countMaskInv = 1./(countMask + 1);
}
diff --git a/src/bench/bench_bitcoin.cpp b/src/bench/bench_bitcoin.cpp
index db1402216d..bd768180c6 100644
--- a/src/bench/bench_bitcoin.cpp
+++ b/src/bench/bench_bitcoin.cpp
@@ -5,7 +5,7 @@
#include "bench.h"
#include "key.h"
-#include "main.h"
+#include "validation.h"
#include "util.h"
int
diff --git a/src/bench/checkblock.cpp b/src/bench/checkblock.cpp
index 4a564d3fc8..230e4ca773 100644
--- a/src/bench/checkblock.cpp
+++ b/src/bench/checkblock.cpp
@@ -5,7 +5,8 @@
#include "bench.h"
#include "chainparams.h"
-#include "main.h"
+#include "validation.h"
+#include "streams.h"
#include "consensus/validation.h"
namespace block_bench {
diff --git a/src/bench/coin_selection.cpp b/src/bench/coin_selection.cpp
index 7091ee3e11..32690fe484 100644
--- a/src/bench/coin_selection.cpp
+++ b/src/bench/coin_selection.cpp
@@ -19,7 +19,7 @@ static void addCoin(const CAmount& nValue, const CWallet& wallet, vector<COutput
tx.nLockTime = nextLockTime++; // so all transactions get different hashes
tx.vout.resize(nInput + 1);
tx.vout[nInput].nValue = nValue;
- CWalletTx* wtx = new CWalletTx(&wallet, tx);
+ CWalletTx* wtx = new CWalletTx(&wallet, MakeTransactionRef(std::move(tx)));
int nAge = 6 * 24;
COutput output(wtx, nInput, nAge, true, true);
diff --git a/src/bench/perf.cpp b/src/bench/perf.cpp
new file mode 100644
index 0000000000..1f43e5d3ac
--- /dev/null
+++ b/src/bench/perf.cpp
@@ -0,0 +1,53 @@
+// Copyright (c) 2016 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include "perf.h"
+
+#if defined(__i386__) || defined(__x86_64__)
+
+/* These architectures support quering the cycle counter
+ * from user space, no need for any syscall overhead.
+ */
+void perf_init(void) { }
+void perf_fini(void) { }
+
+#elif defined(__linux__)
+
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <linux/perf_event.h>
+
+static int fd = -1;
+static struct perf_event_attr attr;
+
+void perf_init(void)
+{
+ attr.type = PERF_TYPE_HARDWARE;
+ attr.config = PERF_COUNT_HW_CPU_CYCLES;
+ fd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, 0);
+}
+
+void perf_fini(void)
+{
+ if (fd != -1) {
+ close(fd);
+ }
+}
+
+uint64_t perf_cpucycles(void)
+{
+ uint64_t result = 0;
+ if (fd == -1 || read(fd, &result, sizeof(result)) < (ssize_t)sizeof(result)) {
+ return 0;
+ }
+ return result;
+}
+
+#else /* Unhandled platform */
+
+void perf_init(void) { }
+void perf_fini(void) { }
+uint64_t perf_cpucycles(void) { return 0; }
+
+#endif
diff --git a/src/bench/perf.h b/src/bench/perf.h
new file mode 100644
index 0000000000..681bd0c8a2
--- /dev/null
+++ b/src/bench/perf.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2016 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+/** Functions for measurement of CPU cycles */
+#ifndef H_PERF
+#define H_PERF
+
+#include <stdint.h>
+
+#if defined(__i386__)
+
+static inline uint64_t perf_cpucycles(void)
+{
+ uint64_t x;
+ __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));
+ return x;
+}
+
+#elif defined(__x86_64__)
+
+static inline uint64_t perf_cpucycles(void)
+{
+ uint32_t hi, lo;
+ __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
+ return ((uint64_t)lo)|(((uint64_t)hi)<<32);
+}
+#else
+
+uint64_t perf_cpucycles(void);
+
+#endif
+
+void perf_init(void);
+void perf_fini(void);
+
+#endif // H_PERF
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index 392d1b9329..596cc8eada 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -24,15 +24,13 @@
#include <univalue.h>
-using namespace std;
-
static const char DEFAULT_RPCCONNECT[] = "127.0.0.1";
static const int DEFAULT_HTTP_CLIENT_TIMEOUT=900;
static const int CONTINUE_EXECUTION=-1;
std::string HelpMessageCli()
{
- string strUsage;
+ std::string strUsage;
strUsage += HelpMessageGroup(_("Options:"));
strUsage += HelpMessageOpt("-?", _("This help message"));
strUsage += HelpMessageOpt("-conf=<file>", strprintf(_("Specify configuration file (default: %s)"), BITCOIN_CONF_FILENAME));
@@ -187,7 +185,7 @@ static void http_error_cb(enum evhttp_request_error err, void *ctx)
}
#endif
-UniValue CallRPC(const string& strMethod, const UniValue& params)
+UniValue CallRPC(const std::string& strMethod, const UniValue& params)
{
std::string host = GetArg("-rpcconnect", DEFAULT_RPCCONNECT);
int port = GetArg("-rpcport", BaseParams().RPCPort());
@@ -195,18 +193,18 @@ UniValue CallRPC(const string& strMethod, const UniValue& params)
// Create event base
struct event_base *base = event_base_new(); // TODO RAII
if (!base)
- throw runtime_error("cannot create event_base");
+ throw std::runtime_error("cannot create event_base");
// Synchronously look up hostname
struct evhttp_connection *evcon = evhttp_connection_base_new(base, NULL, host.c_str(), port); // TODO RAII
if (evcon == NULL)
- throw runtime_error("create connection failed");
+ throw std::runtime_error("create connection failed");
evhttp_connection_set_timeout(evcon, GetArg("-rpcclienttimeout", DEFAULT_HTTP_CLIENT_TIMEOUT));
HTTPReply response;
struct evhttp_request *req = evhttp_request_new(http_request_done, (void*)&response); // TODO RAII
if (req == NULL)
- throw runtime_error("create http request failed");
+ throw std::runtime_error("create http request failed");
#if LIBEVENT_VERSION_NUMBER >= 0x02010300
evhttp_request_set_error_cb(req, http_error_cb);
#endif
@@ -216,7 +214,7 @@ UniValue CallRPC(const string& strMethod, const UniValue& params)
if (mapArgs["-rpcpassword"] == "") {
// Try fall back to cookie-based authentication if no password is provided
if (!GetAuthCookie(&strRPCUserColonPass)) {
- throw runtime_error(strprintf(
+ throw std::runtime_error(strprintf(
_("Could not locate RPC credentials. No authentication cookie could be found, and no rpcpassword is set in the configuration file (%s)"),
GetConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME)).string().c_str()));
@@ -249,28 +247,28 @@ UniValue CallRPC(const string& strMethod, const UniValue& params)
event_base_free(base);
if (response.status == 0)
- throw CConnectionFailed(strprintf("couldn't connect to server\n(make sure server is running and you are connecting to the correct RPC port: %d %s)", response.error, http_errorstring(response.error)));
+ throw CConnectionFailed(strprintf("couldn't connect to server: %s (code %d)\n(make sure server is running and you are connecting to the correct RPC port)", http_errorstring(response.error), response.error));
else if (response.status == HTTP_UNAUTHORIZED)
- throw runtime_error("incorrect rpcuser or rpcpassword (authorization failed)");
+ throw std::runtime_error("incorrect rpcuser or rpcpassword (authorization failed)");
else if (response.status >= 400 && response.status != HTTP_BAD_REQUEST && response.status != HTTP_NOT_FOUND && response.status != HTTP_INTERNAL_SERVER_ERROR)
- throw runtime_error(strprintf("server returned HTTP error %d", response.status));
+ throw std::runtime_error(strprintf("server returned HTTP error %d", response.status));
else if (response.body.empty())
- throw runtime_error("no response from server");
+ throw std::runtime_error("no response from server");
// Parse reply
UniValue valReply(UniValue::VSTR);
if (!valReply.read(response.body))
- throw runtime_error("couldn't parse reply from server");
+ throw std::runtime_error("couldn't parse reply from server");
const UniValue& reply = valReply.get_obj();
if (reply.empty())
- throw runtime_error("expected reply to have result, error and id properties");
+ throw std::runtime_error("expected reply to have result, error and id properties");
return reply;
}
int CommandLineRPC(int argc, char *argv[])
{
- string strPrint;
+ std::string strPrint;
int nRet = 0;
try {
// Skip switches
@@ -286,7 +284,7 @@ int CommandLineRPC(int argc, char *argv[])
args.push_back(line);
}
if (args.size() < 1)
- throw runtime_error("too few parameters (need at least command)");
+ throw std::runtime_error("too few parameters (need at least command)");
std::string strMethod = args[0];
UniValue params = RPCConvertValues(strMethod, std::vector<std::string>(args.begin()+1, args.end()));
@@ -340,7 +338,7 @@ int CommandLineRPC(int argc, char *argv[])
throw;
}
catch (const std::exception& e) {
- strPrint = string("error: ") + e.what();
+ strPrint = std::string("error: ") + e.what();
nRet = EXIT_FAILURE;
}
catch (...) {
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index 6c66efcc9c..1ed1449f03 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -26,10 +26,8 @@
#include <boost/algorithm/string.hpp>
#include <boost/assign/list_of.hpp>
-using namespace std;
-
static bool fCreateBlank;
-static map<string,UniValue> registers;
+static std::map<std::string,UniValue> registers;
static const int CONTINUE_EXECUTION=-1;
//
@@ -103,52 +101,52 @@ static int AppInitRawTx(int argc, char* argv[])
return CONTINUE_EXECUTION;
}
-static void RegisterSetJson(const string& key, const string& rawJson)
+static void RegisterSetJson(const std::string& key, const std::string& rawJson)
{
UniValue val;
if (!val.read(rawJson)) {
- string strErr = "Cannot parse JSON for key " + key;
- throw runtime_error(strErr);
+ std::string strErr = "Cannot parse JSON for key " + key;
+ throw std::runtime_error(strErr);
}
registers[key] = val;
}
-static void RegisterSet(const string& strInput)
+static void RegisterSet(const std::string& strInput)
{
// separate NAME:VALUE in string
size_t pos = strInput.find(':');
- if ((pos == string::npos) ||
+ if ((pos == std::string::npos) ||
(pos == 0) ||
(pos == (strInput.size() - 1)))
- throw runtime_error("Register input requires NAME:VALUE");
+ throw std::runtime_error("Register input requires NAME:VALUE");
- string key = strInput.substr(0, pos);
- string valStr = strInput.substr(pos + 1, string::npos);
+ std::string key = strInput.substr(0, pos);
+ std::string valStr = strInput.substr(pos + 1, std::string::npos);
RegisterSetJson(key, valStr);
}
-static void RegisterLoad(const string& strInput)
+static void RegisterLoad(const std::string& strInput)
{
// separate NAME:FILENAME in string
size_t pos = strInput.find(':');
- if ((pos == string::npos) ||
+ if ((pos == std::string::npos) ||
(pos == 0) ||
(pos == (strInput.size() - 1)))
- throw runtime_error("Register load requires NAME:FILENAME");
+ throw std::runtime_error("Register load requires NAME:FILENAME");
- string key = strInput.substr(0, pos);
- string filename = strInput.substr(pos + 1, string::npos);
+ std::string key = strInput.substr(0, pos);
+ std::string filename = strInput.substr(pos + 1, std::string::npos);
FILE *f = fopen(filename.c_str(), "r");
if (!f) {
- string strErr = "Cannot open file " + filename;
- throw runtime_error(strErr);
+ std::string strErr = "Cannot open file " + filename;
+ throw std::runtime_error(strErr);
}
// load file chunks into one big buffer
- string valStr;
+ std::string valStr;
while ((!feof(f)) && (!ferror(f))) {
char buf[4096];
int bread = fread(buf, 1, sizeof(buf), f);
@@ -162,55 +160,55 @@ static void RegisterLoad(const string& strInput)
fclose(f);
if (error) {
- string strErr = "Error reading file " + filename;
- throw runtime_error(strErr);
+ std::string strErr = "Error reading file " + filename;
+ throw std::runtime_error(strErr);
}
// evaluate as JSON buffer register
RegisterSetJson(key, valStr);
}
-static void MutateTxVersion(CMutableTransaction& tx, const string& cmdVal)
+static void MutateTxVersion(CMutableTransaction& tx, const std::string& cmdVal)
{
int64_t newVersion = atoi64(cmdVal);
if (newVersion < 1 || newVersion > CTransaction::MAX_STANDARD_VERSION)
- throw runtime_error("Invalid TX version requested");
+ throw std::runtime_error("Invalid TX version requested");
tx.nVersion = (int) newVersion;
}
-static void MutateTxLocktime(CMutableTransaction& tx, const string& cmdVal)
+static void MutateTxLocktime(CMutableTransaction& tx, const std::string& cmdVal)
{
int64_t newLocktime = atoi64(cmdVal);
if (newLocktime < 0LL || newLocktime > 0xffffffffLL)
- throw runtime_error("Invalid TX locktime requested");
+ throw std::runtime_error("Invalid TX locktime requested");
tx.nLockTime = (unsigned int) newLocktime;
}
-static void MutateTxAddInput(CMutableTransaction& tx, const string& strInput)
+static void MutateTxAddInput(CMutableTransaction& tx, const std::string& strInput)
{
std::vector<std::string> vStrInputParts;
boost::split(vStrInputParts, strInput, boost::is_any_of(":"));
// separate TXID:VOUT in string
if (vStrInputParts.size()<2)
- throw runtime_error("TX input missing separator");
+ throw std::runtime_error("TX input missing separator");
// extract and validate TXID
- string strTxid = vStrInputParts[0];
+ std::string strTxid = vStrInputParts[0];
if ((strTxid.size() != 64) || !IsHex(strTxid))
- throw runtime_error("invalid TX input txid");
+ throw std::runtime_error("invalid TX input txid");
uint256 txid(uint256S(strTxid));
static const unsigned int minTxOutSz = 9;
static const unsigned int maxVout = MAX_BLOCK_BASE_SIZE / minTxOutSz;
// extract and validate vout
- string strVout = vStrInputParts[1];
+ std::string strVout = vStrInputParts[1];
int vout = atoi(strVout);
if ((vout < 0) || (vout > (int)maxVout))
- throw runtime_error("invalid TX input vout");
+ throw std::runtime_error("invalid TX input vout");
// extract the optional sequence number
uint32_t nSequenceIn=std::numeric_limits<unsigned int>::max();
@@ -222,26 +220,26 @@ static void MutateTxAddInput(CMutableTransaction& tx, const string& strInput)
tx.vin.push_back(txin);
}
-static void MutateTxAddOutAddr(CMutableTransaction& tx, const string& strInput)
+static void MutateTxAddOutAddr(CMutableTransaction& tx, const std::string& strInput)
{
// separate VALUE:ADDRESS in string
size_t pos = strInput.find(':');
- if ((pos == string::npos) ||
+ if ((pos == std::string::npos) ||
(pos == 0) ||
(pos == (strInput.size() - 1)))
- throw runtime_error("TX output missing separator");
+ throw std::runtime_error("TX output missing separator");
// extract and validate VALUE
- string strValue = strInput.substr(0, pos);
+ std::string strValue = strInput.substr(0, pos);
CAmount value;
if (!ParseMoney(strValue, value))
- throw runtime_error("invalid TX output value");
+ throw std::runtime_error("invalid TX output value");
// extract and validate ADDRESS
- string strAddr = strInput.substr(pos + 1, string::npos);
+ std::string strAddr = strInput.substr(pos + 1, std::string::npos);
CBitcoinAddress addr(strAddr);
if (!addr.IsValid())
- throw runtime_error("invalid TX output address");
+ throw std::runtime_error("invalid TX output address");
// build standard output script via GetScriptForDestination()
CScript scriptPubKey = GetScriptForDestination(addr.Get());
@@ -251,7 +249,7 @@ static void MutateTxAddOutAddr(CMutableTransaction& tx, const string& strInput)
tx.vout.push_back(txout);
}
-static void MutateTxAddOutData(CMutableTransaction& tx, const string& strInput)
+static void MutateTxAddOutData(CMutableTransaction& tx, const std::string& strInput)
{
CAmount value = 0;
@@ -259,20 +257,20 @@ static void MutateTxAddOutData(CMutableTransaction& tx, const string& strInput)
size_t pos = strInput.find(':');
if (pos==0)
- throw runtime_error("TX output value not specified");
+ throw std::runtime_error("TX output value not specified");
- if (pos != string::npos) {
+ if (pos != std::string::npos) {
// extract and validate VALUE
- string strValue = strInput.substr(0, pos);
+ std::string strValue = strInput.substr(0, pos);
if (!ParseMoney(strValue, value))
- throw runtime_error("invalid TX output value");
+ throw std::runtime_error("invalid TX output value");
}
// extract and validate DATA
- string strData = strInput.substr(pos + 1, string::npos);
+ std::string strData = strInput.substr(pos + 1, std::string::npos);
if (!IsHex(strData))
- throw runtime_error("invalid TX output data");
+ throw std::runtime_error("invalid TX output data");
std::vector<unsigned char> data = ParseHex(strData);
@@ -280,22 +278,22 @@ static void MutateTxAddOutData(CMutableTransaction& tx, const string& strInput)
tx.vout.push_back(txout);
}
-static void MutateTxAddOutScript(CMutableTransaction& tx, const string& strInput)
+static void MutateTxAddOutScript(CMutableTransaction& tx, const std::string& strInput)
{
// separate VALUE:SCRIPT in string
size_t pos = strInput.find(':');
- if ((pos == string::npos) ||
+ if ((pos == std::string::npos) ||
(pos == 0))
- throw runtime_error("TX output missing separator");
+ throw std::runtime_error("TX output missing separator");
// extract and validate VALUE
- string strValue = strInput.substr(0, pos);
+ std::string strValue = strInput.substr(0, pos);
CAmount value;
if (!ParseMoney(strValue, value))
- throw runtime_error("invalid TX output value");
+ throw std::runtime_error("invalid TX output value");
// extract and validate script
- string strScript = strInput.substr(pos + 1, string::npos);
+ std::string strScript = strInput.substr(pos + 1, std::string::npos);
CScript scriptPubKey = ParseScript(strScript); // throws on err
// construct TxOut, append to transaction output list
@@ -303,26 +301,26 @@ static void MutateTxAddOutScript(CMutableTransaction& tx, const string& strInput
tx.vout.push_back(txout);
}
-static void MutateTxDelInput(CMutableTransaction& tx, const string& strInIdx)
+static void MutateTxDelInput(CMutableTransaction& tx, const std::string& strInIdx)
{
// parse requested deletion index
int inIdx = atoi(strInIdx);
if (inIdx < 0 || inIdx >= (int)tx.vin.size()) {
- string strErr = "Invalid TX input index '" + strInIdx + "'";
- throw runtime_error(strErr.c_str());
+ std::string strErr = "Invalid TX input index '" + strInIdx + "'";
+ throw std::runtime_error(strErr.c_str());
}
// delete input from transaction
tx.vin.erase(tx.vin.begin() + inIdx);
}
-static void MutateTxDelOutput(CMutableTransaction& tx, const string& strOutIdx)
+static void MutateTxDelOutput(CMutableTransaction& tx, const std::string& strOutIdx)
{
// parse requested deletion index
int outIdx = atoi(strOutIdx);
if (outIdx < 0 || outIdx >= (int)tx.vout.size()) {
- string strErr = "Invalid TX output index '" + strOutIdx + "'";
- throw runtime_error(strErr.c_str());
+ std::string strErr = "Invalid TX output index '" + strOutIdx + "'";
+ throw std::runtime_error(strErr.c_str());
}
// delete output from transaction
@@ -342,7 +340,7 @@ static const struct {
{"SINGLE|ANYONECANPAY", SIGHASH_SINGLE|SIGHASH_ANYONECANPAY},
};
-static bool findSighashFlags(int& flags, const string& flagStr)
+static bool findSighashFlags(int& flags, const std::string& flagStr)
{
flags = 0;
@@ -356,17 +354,17 @@ static bool findSighashFlags(int& flags, const string& flagStr)
return false;
}
-uint256 ParseHashUO(map<string,UniValue>& o, string strKey)
+uint256 ParseHashUO(std::map<std::string,UniValue>& o, std::string strKey)
{
if (!o.count(strKey))
return uint256();
return ParseHashUV(o[strKey], strKey);
}
-vector<unsigned char> ParseHexUO(map<string,UniValue>& o, string strKey)
+std::vector<unsigned char> ParseHexUO(std::map<std::string,UniValue>& o, std::string strKey)
{
if (!o.count(strKey)) {
- vector<unsigned char> emptyVec;
+ std::vector<unsigned char> emptyVec;
return emptyVec;
}
return ParseHexUV(o[strKey], strKey);
@@ -375,24 +373,24 @@ vector<unsigned char> ParseHexUO(map<string,UniValue>& o, string strKey)
static CAmount AmountFromValue(const UniValue& value)
{
if (!value.isNum() && !value.isStr())
- throw runtime_error("Amount is not a number or string");
+ throw std::runtime_error("Amount is not a number or string");
CAmount amount;
if (!ParseFixedPoint(value.getValStr(), 8, &amount))
- throw runtime_error("Invalid amount");
+ throw std::runtime_error("Invalid amount");
if (!MoneyRange(amount))
- throw runtime_error("Amount out of range");
+ throw std::runtime_error("Amount out of range");
return amount;
}
-static void MutateTxSign(CMutableTransaction& tx, const string& flagStr)
+static void MutateTxSign(CMutableTransaction& tx, const std::string& flagStr)
{
int nHashType = SIGHASH_ALL;
if (flagStr.size() > 0)
if (!findSighashFlags(nHashType, flagStr))
- throw runtime_error("unknown sighash flag/sign option");
+ throw std::runtime_error("unknown sighash flag/sign option");
- vector<CTransaction> txVariants;
+ std::vector<CTransaction> txVariants;
txVariants.push_back(tx);
// mergedTx will end up with all the signatures; it
@@ -403,17 +401,17 @@ static void MutateTxSign(CMutableTransaction& tx, const string& flagStr)
CCoinsViewCache view(&viewDummy);
if (!registers.count("privatekeys"))
- throw runtime_error("privatekeys register variable must be set.");
+ throw std::runtime_error("privatekeys register variable must be set.");
CBasicKeyStore tempKeystore;
UniValue keysObj = registers["privatekeys"];
for (unsigned int kidx = 0; kidx < keysObj.size(); kidx++) {
if (!keysObj[kidx].isStr())
- throw runtime_error("privatekey not a string");
+ throw std::runtime_error("privatekey not a std::string");
CBitcoinSecret vchSecret;
bool fGood = vchSecret.SetString(keysObj[kidx].getValStr());
if (!fGood)
- throw runtime_error("privatekey not valid");
+ throw std::runtime_error("privatekey not valid");
CKey key = vchSecret.GetKey();
tempKeystore.AddKey(key);
@@ -421,34 +419,34 @@ static void MutateTxSign(CMutableTransaction& tx, const string& flagStr)
// Add previous txouts given in the RPC call:
if (!registers.count("prevtxs"))
- throw runtime_error("prevtxs register variable must be set.");
+ throw std::runtime_error("prevtxs register variable must be set.");
UniValue prevtxsObj = registers["prevtxs"];
{
for (unsigned int previdx = 0; previdx < prevtxsObj.size(); previdx++) {
UniValue prevOut = prevtxsObj[previdx];
if (!prevOut.isObject())
- throw runtime_error("expected prevtxs internal object");
+ throw std::runtime_error("expected prevtxs internal object");
- map<string,UniValue::VType> types = boost::assign::map_list_of("txid", UniValue::VSTR)("vout",UniValue::VNUM)("scriptPubKey",UniValue::VSTR);
+ std::map<std::string,UniValue::VType> types = boost::assign::map_list_of("txid", UniValue::VSTR)("vout",UniValue::VNUM)("scriptPubKey",UniValue::VSTR);
if (!prevOut.checkObject(types))
- throw runtime_error("prevtxs internal object typecheck fail");
+ throw std::runtime_error("prevtxs internal object typecheck fail");
uint256 txid = ParseHashUV(prevOut["txid"], "txid");
int nOut = atoi(prevOut["vout"].getValStr());
if (nOut < 0)
- throw runtime_error("vout must be positive");
+ throw std::runtime_error("vout must be positive");
- vector<unsigned char> pkData(ParseHexUV(prevOut["scriptPubKey"], "scriptPubKey"));
+ std::vector<unsigned char> pkData(ParseHexUV(prevOut["scriptPubKey"], "scriptPubKey"));
CScript scriptPubKey(pkData.begin(), pkData.end());
{
CCoinsModifier coins = view.ModifyCoins(txid);
if (coins->IsAvailable(nOut) && coins->vout[nOut].scriptPubKey != scriptPubKey) {
- string err("Previous output scriptPubKey mismatch:\n");
+ std::string err("Previous output scriptPubKey mismatch:\n");
err = err + ScriptToAsmStr(coins->vout[nOut].scriptPubKey) + "\nvs:\n"+
ScriptToAsmStr(scriptPubKey);
- throw runtime_error(err);
+ throw std::runtime_error(err);
}
if ((unsigned int)nOut >= coins->vout.size())
coins->vout.resize(nOut+1);
@@ -464,7 +462,7 @@ static void MutateTxSign(CMutableTransaction& tx, const string& flagStr)
if ((scriptPubKey.IsPayToScriptHash() || scriptPubKey.IsPayToWitnessScriptHash()) &&
prevOut.exists("redeemScript")) {
UniValue v = prevOut["redeemScript"];
- vector<unsigned char> rsData(ParseHexUV(v, "redeemScript"));
+ std::vector<unsigned char> rsData(ParseHexUV(v, "redeemScript"));
CScript redeemScript(rsData.begin(), rsData.end());
tempKeystore.AddCScript(redeemScript);
}
@@ -521,8 +519,8 @@ public:
}
};
-static void MutateTx(CMutableTransaction& tx, const string& command,
- const string& commandVal)
+static void MutateTx(CMutableTransaction& tx, const std::string& command,
+ const std::string& commandVal)
{
std::unique_ptr<Secp256k1Init> ecc;
@@ -557,7 +555,7 @@ static void MutateTx(CMutableTransaction& tx, const string& command,
RegisterSet(commandVal);
else
- throw runtime_error("unknown command");
+ throw std::runtime_error("unknown command");
}
static void OutputTxJSON(const CTransaction& tx)
@@ -565,20 +563,20 @@ static void OutputTxJSON(const CTransaction& tx)
UniValue entry(UniValue::VOBJ);
TxToUniv(tx, uint256(), entry);
- string jsonOutput = entry.write(4);
+ std::string jsonOutput = entry.write(4);
fprintf(stdout, "%s\n", jsonOutput.c_str());
}
static void OutputTxHash(const CTransaction& tx)
{
- string strHexHash = tx.GetHash().GetHex(); // the hex-encoded transaction hash (aka the transaction id)
+ std::string strHexHash = tx.GetHash().GetHex(); // the hex-encoded transaction hash (aka the transaction id)
fprintf(stdout, "%s\n", strHexHash.c_str());
}
static void OutputTxHex(const CTransaction& tx)
{
- string strHex = EncodeHexTx(tx);
+ std::string strHex = EncodeHexTx(tx);
fprintf(stdout, "%s\n", strHex.c_str());
}
@@ -593,10 +591,10 @@ static void OutputTx(const CTransaction& tx)
OutputTxHex(tx);
}
-static string readStdin()
+static std::string readStdin()
{
char buf[4096];
- string ret;
+ std::string ret;
while (!feof(stdin)) {
size_t bread = fread(buf, 1, sizeof(buf), stdin);
@@ -606,7 +604,7 @@ static string readStdin()
}
if (ferror(stdin))
- throw runtime_error("error reading stdin");
+ throw std::runtime_error("error reading stdin");
boost::algorithm::trim_right(ret);
@@ -615,7 +613,7 @@ static string readStdin()
static int CommandLineRawTx(int argc, char* argv[])
{
- string strPrint;
+ std::string strPrint;
int nRet = 0;
try {
// Skip switches; Permit common stdin convention "-"
@@ -625,33 +623,31 @@ static int CommandLineRawTx(int argc, char* argv[])
argv++;
}
- CTransaction txDecodeTmp;
+ CMutableTransaction tx;
int startArg;
if (!fCreateBlank) {
// require at least one param
if (argc < 2)
- throw runtime_error("too few parameters");
+ throw std::runtime_error("too few parameters");
// param: hex-encoded bitcoin transaction
- string strHexTx(argv[1]);
+ std::string strHexTx(argv[1]);
if (strHexTx == "-") // "-" implies standard input
strHexTx = readStdin();
- if (!DecodeHexTx(txDecodeTmp, strHexTx))
- throw runtime_error("invalid transaction encoding");
+ if (!DecodeHexTx(tx, strHexTx, true))
+ throw std::runtime_error("invalid transaction encoding");
startArg = 2;
} else
startArg = 1;
- CMutableTransaction tx(txDecodeTmp);
-
for (int i = startArg; i < argc; i++) {
- string arg = argv[i];
- string key, value;
+ std::string arg = argv[i];
+ std::string key, value;
size_t eqpos = arg.find('=');
- if (eqpos == string::npos)
+ if (eqpos == std::string::npos)
key = arg;
else {
key = arg.substr(0, eqpos);
@@ -668,7 +664,7 @@ static int CommandLineRawTx(int argc, char* argv[])
throw;
}
catch (const std::exception& e) {
- strPrint = string("error: ") + e.what();
+ strPrint = std::string("error: ") + e.what();
nRet = EXIT_FAILURE;
}
catch (...) {
diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp
index 3352a76de6..ba3ccac615 100644
--- a/src/bitcoind.cpp
+++ b/src/bitcoind.cpp
@@ -128,6 +128,26 @@ bool AppInit(int argc, char* argv[])
fprintf(stderr, "Error: There is no RPC client functionality in bitcoind anymore. Use the bitcoin-cli utility instead.\n");
exit(EXIT_FAILURE);
}
+ // -server defaults to true for bitcoind but not for the GUI so do this here
+ SoftSetBoolArg("-server", true);
+ // Set this early so that parameter interactions go to console
+ InitLogging();
+ InitParameterInteraction();
+ if (!AppInitBasicSetup())
+ {
+ // InitError will have been called with detailed error, which ends up on console
+ exit(1);
+ }
+ if (!AppInitParameterInteraction())
+ {
+ // InitError will have been called with detailed error, which ends up on console
+ exit(1);
+ }
+ if (!AppInitSanityChecks())
+ {
+ // InitError will have been called with detailed error, which ends up on console
+ exit(1);
+ }
if (GetBoolArg("-daemon", false))
{
#if HAVE_DECL_DAEMON
@@ -143,12 +163,8 @@ bool AppInit(int argc, char* argv[])
return false;
#endif // HAVE_DECL_DAEMON
}
- SoftSetBoolArg("-server", true);
- // Set this early so that parameter interactions go to console
- InitLogging();
- InitParameterInteraction();
- fRet = AppInit2(threadGroup, scheduler);
+ fRet = AppInitMain(threadGroup, scheduler);
}
catch (const std::exception& e) {
PrintExceptionContinue(&e, "AppInit()");
diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp
index dbed90583d..914af0c670 100644
--- a/src/blockencodings.cpp
+++ b/src/blockencodings.cpp
@@ -10,7 +10,7 @@
#include "random.h"
#include "streams.h"
#include "txmempool.h"
-#include "main.h"
+#include "validation.h"
#include "util.h"
#include <unordered_map>
@@ -24,7 +24,7 @@ CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, bool f
//TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase
prefilledtxn[0] = {0, block.vtx[0]};
for (size_t i = 1; i < block.vtx.size(); i++) {
- const CTransaction& tx = block.vtx[i];
+ const CTransaction& tx = *block.vtx[i];
shorttxids[i - 1] = GetShortID(fUseWTXID ? tx.GetWitnessHash() : tx.GetHash());
}
}
@@ -59,10 +59,10 @@ ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& c
int32_t lastprefilledindex = -1;
for (size_t i = 0; i < cmpctblock.prefilledtxn.size(); i++) {
- if (cmpctblock.prefilledtxn[i].tx.IsNull())
+ if (cmpctblock.prefilledtxn[i].tx->IsNull())
return READ_STATUS_INVALID;
- lastprefilledindex += cmpctblock.prefilledtxn[i].index + 1; //index is a uint16_t, so cant overflow here
+ lastprefilledindex += cmpctblock.prefilledtxn[i].index + 1; //index is a uint16_t, so can't overflow here
if (lastprefilledindex > std::numeric_limits<uint16_t>::max())
return READ_STATUS_INVALID;
if ((uint32_t)lastprefilledindex > cmpctblock.shorttxids.size() + i) {
@@ -71,7 +71,7 @@ ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& c
// have neither a prefilled txn or a shorttxid!
return READ_STATUS_INVALID;
}
- txn_available[lastprefilledindex] = std::make_shared<CTransaction>(cmpctblock.prefilledtxn[i].tx);
+ txn_available[lastprefilledindex] = cmpctblock.prefilledtxn[i].tx;
}
prefilled_count = cmpctblock.prefilledtxn.size();
@@ -142,7 +142,7 @@ bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const {
return txn_available[index] ? true : false;
}
-ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransaction>& vtx_missing) const {
+ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing) const {
assert(!header.IsNull());
block = header;
block.vtx.resize(txn_available.size());
@@ -154,7 +154,7 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<
return READ_STATUS_INVALID;
block.vtx[i] = vtx_missing[tx_missing_offset++];
} else
- block.vtx[i] = *txn_available[i];
+ block.vtx[i] = txn_available[i];
}
if (vtx_missing.size() != tx_missing_offset)
return READ_STATUS_INVALID;
@@ -172,8 +172,8 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<
LogPrint("cmpctblock", "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool and %lu txn requested\n", header.GetHash().ToString(), prefilled_count, mempool_count, vtx_missing.size());
if (vtx_missing.size() < 5) {
- for(const CTransaction& tx : vtx_missing)
- LogPrint("cmpctblock", "Reconstructed block %s required tx %s\n", header.GetHash().ToString(), tx.GetHash().ToString());
+ for (const auto& tx : vtx_missing)
+ LogPrint("cmpctblock", "Reconstructed block %s required tx %s\n", header.GetHash().ToString(), tx->GetHash().ToString());
}
return READ_STATUS_OK;
diff --git a/src/blockencodings.h b/src/blockencodings.h
index 1f9491867a..27baf1f8f8 100644
--- a/src/blockencodings.h
+++ b/src/blockencodings.h
@@ -14,9 +14,9 @@ class CTxMemPool;
// Dumb helper to handle CTransaction compression at serialize-time
struct TransactionCompressor {
private:
- CTransaction& tx;
+ CTransactionRef& tx;
public:
- TransactionCompressor(CTransaction& txIn) : tx(txIn) {}
+ TransactionCompressor(CTransactionRef& txIn) : tx(txIn) {}
ADD_SERIALIZE_METHODS;
@@ -72,7 +72,7 @@ class BlockTransactions {
public:
// A BlockTransactions message
uint256 blockhash;
- std::vector<CTransaction> txn;
+ std::vector<CTransactionRef> txn;
BlockTransactions() {}
BlockTransactions(const BlockTransactionsRequest& req) :
@@ -104,7 +104,7 @@ struct PrefilledTransaction {
// Used as an offset since last prefilled tx in CBlockHeaderAndShortTxIDs,
// as a proper transaction-in-block-index in PartiallyDownloadedBlock
uint16_t index;
- CTransaction tx;
+ CTransactionRef tx;
ADD_SERIALIZE_METHODS;
@@ -193,7 +193,7 @@ public:
class PartiallyDownloadedBlock {
protected:
- std::vector<std::shared_ptr<const CTransaction> > txn_available;
+ std::vector<CTransactionRef> txn_available;
size_t prefilled_count = 0, mempool_count = 0;
CTxMemPool* pool;
public:
@@ -202,7 +202,7 @@ public:
ReadStatus InitData(const CBlockHeaderAndShortTxIDs& cmpctblock);
bool IsTxAvailable(size_t index) const;
- ReadStatus FillBlock(CBlock& block, const std::vector<CTransaction>& vtx_missing) const;
+ ReadStatus FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing) const;
};
#endif
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index a57ab632e4..3b3c0a5d3e 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -31,7 +31,7 @@ static CBlock CreateGenesisBlock(const char* pszTimestamp, const CScript& genesi
genesis.nBits = nBits;
genesis.nNonce = nNonce;
genesis.nVersion = nVersion;
- genesis.vtx.push_back(txNew);
+ genesis.vtx.push_back(MakeTransactionRef(std::move(txNew)));
genesis.hashPrevBlock.SetNull();
genesis.hashMerkleRoot = BlockMerkleRoot(genesis);
return genesis;
diff --git a/src/checkpoints.cpp b/src/checkpoints.cpp
index d22c188c16..a487fb1dc4 100644
--- a/src/checkpoints.cpp
+++ b/src/checkpoints.cpp
@@ -6,7 +6,7 @@
#include "chain.h"
#include "chainparams.h"
-#include "main.h"
+#include "validation.h"
#include "uint256.h"
#include <stdint.h>
diff --git a/src/compat.h b/src/compat.h
index 79a297e5e4..2578d6d344 100644
--- a/src/compat.h
+++ b/src/compat.h
@@ -34,6 +34,7 @@
#else
#include <sys/fcntl.h>
#include <sys/mman.h>
+#include <sys/select.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <net/if.h>
diff --git a/src/consensus/merkle.cpp b/src/consensus/merkle.cpp
index 35f7d2e05a..6fa96ddf45 100644
--- a/src/consensus/merkle.cpp
+++ b/src/consensus/merkle.cpp
@@ -160,7 +160,7 @@ uint256 BlockMerkleRoot(const CBlock& block, bool* mutated)
std::vector<uint256> leaves;
leaves.resize(block.vtx.size());
for (size_t s = 0; s < block.vtx.size(); s++) {
- leaves[s] = block.vtx[s].GetHash();
+ leaves[s] = block.vtx[s]->GetHash();
}
return ComputeMerkleRoot(leaves, mutated);
}
@@ -171,7 +171,7 @@ uint256 BlockWitnessMerkleRoot(const CBlock& block, bool* mutated)
leaves.resize(block.vtx.size());
leaves[0].SetNull(); // The witness hash of the coinbase is 0.
for (size_t s = 1; s < block.vtx.size(); s++) {
- leaves[s] = block.vtx[s].GetWitnessHash();
+ leaves[s] = block.vtx[s]->GetWitnessHash();
}
return ComputeMerkleRoot(leaves, mutated);
}
@@ -181,7 +181,7 @@ std::vector<uint256> BlockMerkleBranch(const CBlock& block, uint32_t position)
std::vector<uint256> leaves;
leaves.resize(block.vtx.size());
for (size_t s = 0; s < block.vtx.size(); s++) {
- leaves[s] = block.vtx[s].GetHash();
+ leaves[s] = block.vtx[s]->GetHash();
}
return ComputeMerkleBranch(leaves, position);
}
diff --git a/src/core_io.h b/src/core_io.h
index b559d44bf5..4344290bb8 100644
--- a/src/core_io.h
+++ b/src/core_io.h
@@ -11,22 +11,23 @@
class CBlock;
class CScript;
class CTransaction;
+class CMutableTransaction;
class uint256;
class UniValue;
// core_read.cpp
-extern CScript ParseScript(const std::string& s);
-extern std::string ScriptToAsmStr(const CScript& script, const bool fAttemptSighashDecode = false);
-extern bool DecodeHexTx(CTransaction& tx, const std::string& strHexTx, bool fTryNoWitness = false);
-extern bool DecodeHexBlk(CBlock&, const std::string& strHexBlk);
-extern uint256 ParseHashUV(const UniValue& v, const std::string& strName);
-extern uint256 ParseHashStr(const std::string&, const std::string& strName);
-extern std::vector<unsigned char> ParseHexUV(const UniValue& v, const std::string& strName);
+CScript ParseScript(const std::string& s);
+std::string ScriptToAsmStr(const CScript& script, const bool fAttemptSighashDecode = false);
+bool DecodeHexTx(CMutableTransaction& tx, const std::string& strHexTx, bool fTryNoWitness = false);
+bool DecodeHexBlk(CBlock&, const std::string& strHexBlk);
+uint256 ParseHashUV(const UniValue& v, const std::string& strName);
+uint256 ParseHashStr(const std::string&, const std::string& strName);
+std::vector<unsigned char> ParseHexUV(const UniValue& v, const std::string& strName);
// core_write.cpp
-extern std::string FormatScript(const CScript& script);
-extern std::string EncodeHexTx(const CTransaction& tx);
-extern void ScriptPubKeyToUniv(const CScript& scriptPubKey, UniValue& out, bool fIncludeHex);
-extern void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry);
+std::string FormatScript(const CScript& script);
+std::string EncodeHexTx(const CTransaction& tx);
+void ScriptPubKeyToUniv(const CScript& scriptPubKey, UniValue& out, bool fIncludeHex);
+void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry);
#endif // BITCOIN_CORE_IO_H
diff --git a/src/core_memusage.h b/src/core_memusage.h
index b8e0f08bbf..0dcc24c40c 100644
--- a/src/core_memusage.h
+++ b/src/core_memusage.h
@@ -69,8 +69,8 @@ static inline size_t RecursiveDynamicUsage(const CMutableTransaction& tx) {
static inline size_t RecursiveDynamicUsage(const CBlock& block) {
size_t mem = memusage::DynamicUsage(block.vtx);
- for (std::vector<CTransaction>::const_iterator it = block.vtx.begin(); it != block.vtx.end(); it++) {
- mem += RecursiveDynamicUsage(*it);
+ for (const auto& tx : block.vtx) {
+ mem += memusage::DynamicUsage(tx) + RecursiveDynamicUsage(*tx);
}
return mem;
}
diff --git a/src/core_read.cpp b/src/core_read.cpp
index 7cfda6dd6d..b6d0285459 100644
--- a/src/core_read.cpp
+++ b/src/core_read.cpp
@@ -90,7 +90,7 @@ CScript ParseScript(const std::string& s)
return result;
}
-bool DecodeHexTx(CTransaction& tx, const std::string& strHexTx, bool fTryNoWitness)
+bool DecodeHexTx(CMutableTransaction& tx, const std::string& strHexTx, bool fTryNoWitness)
{
if (!IsHex(strHexTx))
return false;
diff --git a/src/crypto/ctaes/test.c b/src/crypto/ctaes/test.c
index fce1696acd..21439a16f1 100644
--- a/src/crypto/ctaes/test.c
+++ b/src/crypto/ctaes/test.c
@@ -102,7 +102,7 @@ int main(void) {
}
}
if (fail == 0) {
- fprintf(stderr, "All tests succesful\n");
+ fprintf(stderr, "All tests successful\n");
} else {
fprintf(stderr, "%i tests failed\n", fail);
}
diff --git a/src/init.cpp b/src/init.cpp
index 31e3efb459..ba5fe4152a 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -19,10 +19,11 @@
#include "httpserver.h"
#include "httprpc.h"
#include "key.h"
-#include "main.h"
+#include "validation.h"
#include "miner.h"
#include "netbase.h"
#include "net.h"
+#include "net_processing.h"
#include "policy/policy.h"
#include "rpc/server.h"
#include "rpc/register.h"
@@ -391,7 +392,7 @@ std::string HelpMessage(HelpMessageMode mode)
#endif
#endif
strUsage += HelpMessageOpt("-whitebind=<addr>", _("Bind to given address and whitelist peers connecting to it. Use [host]:port notation for IPv6"));
- strUsage += HelpMessageOpt("-whitelist=<netmask>", _("Whitelist peers connecting from the given netmask or IP address. Can be specified multiple times.") +
+ strUsage += HelpMessageOpt("-whitelist=<IP address or network>", _("Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) or CIDR notated network (e.g. 1.2.3.0/24). Can be specified multiple times.") +
" " + _("Whitelisted peers cannot be DoS banned and their transactions are always relayed, even if they are already in the mempool, useful e.g. for a gateway"));
strUsage += HelpMessageOpt("-whitelistrelay", strprintf(_("Accept relayed transactions received from whitelisted peers even when not relaying transactions (default: %d)"), DEFAULT_WHITELISTRELAY));
strUsage += HelpMessageOpt("-whitelistforcerelay", strprintf(_("Force relay of transactions from whitelisted peers even if they violate local relay policy (default: %d)"), DEFAULT_WHITELISTFORCERELAY));
@@ -601,6 +602,8 @@ void ThreadImport(std::vector<boost::filesystem::path> vImportFiles)
{
const CChainParams& chainparams = Params();
RenameThread("bitcoin-loadblk");
+
+ {
CImportingNow imp;
// -reindex
@@ -660,7 +663,7 @@ void ThreadImport(std::vector<boost::filesystem::path> vImportFiles)
LogPrintf("Stopping after block import\n");
StartShutdown();
}
-
+ } // End scope of CImportingNow
LoadMempool();
}
@@ -749,23 +752,10 @@ void InitParameterInteraction()
LogPrintf("%s: parameter interaction: -externalip set -> setting -discover=0\n", __func__);
}
- if (GetBoolArg("-salvagewallet", false)) {
- // Rewrite just private keys: rescan to find transactions
- if (SoftSetBoolArg("-rescan", true))
- LogPrintf("%s: parameter interaction: -salvagewallet=1 -> setting -rescan=1\n", __func__);
- }
-
- // -zapwallettx implies a rescan
- if (GetBoolArg("-zapwallettxes", false)) {
- if (SoftSetBoolArg("-rescan", true))
- LogPrintf("%s: parameter interaction: -zapwallettxes=<mode> -> setting -rescan=1\n", __func__);
- }
-
- // disable walletbroadcast and whitelistrelay in blocksonly mode
+ // disable whitelistrelay in blocksonly mode
if (GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) {
if (SoftSetBoolArg("-whitelistrelay", false))
LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -whitelistrelay=0\n", __func__);
- // walletbroadcast is disabled in CWallet::ParameterInteraction()
}
// Forcing relay from whitelisted hosts implies we will accept relays from them in the first place.
@@ -791,10 +781,17 @@ void InitLogging()
LogPrintf("Bitcoin version %s\n", FormatFullVersion());
}
-/** Initialize bitcoin.
- * @pre Parameters should be parsed and config file should be read.
- */
-bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
+namespace { // Variables internal to initialization process only
+
+ServiceFlags nRelevantServices = NODE_NETWORK;
+int nMaxConnections;
+int nUserMaxConnections;
+int nFD;
+ServiceFlags nLocalServices = NODE_NETWORK;
+
+}
+
+bool AppInitBasicSetup()
{
// ********************************************************* Step 1: setup
#ifdef _MSC_VER
@@ -846,9 +843,13 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
// Ignore SIGPIPE, otherwise it will bring the daemon down if the client closes unexpectedly
signal(SIGPIPE, SIG_IGN);
#endif
+ return true;
+}
- // ********************************************************* Step 2: parameter interactions
+bool AppInitParameterInteraction()
+{
const CChainParams& chainparams = Params();
+ // ********************************************************* Step 2: parameter interactions
// also see: InitParameterInteraction()
@@ -859,13 +860,15 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
}
// Make sure enough file descriptors are available
- int nBind = std::max((int)mapArgs.count("-bind") + (int)mapArgs.count("-whitebind"), 1);
- int nUserMaxConnections = GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);
- int nMaxConnections = std::max(nUserMaxConnections, 0);
+ int nBind = std::max(
+ (mapMultiArgs.count("-bind") ? mapMultiArgs.at("-bind").size() : 0) +
+ (mapMultiArgs.count("-whitebind") ? mapMultiArgs.at("-whitebind").size() : 0), size_t(1));
+ nUserMaxConnections = GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);
+ nMaxConnections = std::max(nUserMaxConnections, 0);
// Trim requested connection counts, to fit into system limitations
nMaxConnections = std::max(std::min(nMaxConnections, (int)(FD_SETSIZE - nBind - MIN_CORE_FILEDESCRIPTORS)), 0);
- int nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS);
+ nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS);
if (nFD < MIN_CORE_FILEDESCRIPTORS)
return InitError(_("Not enough file descriptors available."));
nMaxConnections = std::min(nFD - MIN_CORE_FILEDESCRIPTORS, nMaxConnections);
@@ -923,8 +926,6 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
else if (nScriptCheckThreads > MAX_SCRIPTCHECK_THREADS)
nScriptCheckThreads = MAX_SCRIPTCHECK_THREADS;
- fServer = GetBoolArg("-server", false);
-
// block pruning; get the amount of disk space (in MiB) to allot for block & undo files
int64_t nSignedPruneTarget = GetArg("-prune", 0) * 1024 * 1024;
if (nSignedPruneTarget < 0) {
@@ -980,9 +981,6 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
// Option to startup with mocktime set (used for regression testing):
SetMockTime(GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op
- ServiceFlags nLocalServices = NODE_NETWORK;
- ServiceFlags nRelevantServices = NODE_NETWORK;
-
if (GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS))
nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM);
@@ -1031,17 +1029,11 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
}
}
}
+ return true;
+}
- // ********************************************************* Step 4: application initialization: dir lock, daemonize, pidfile, debug log
-
- // Initialize elliptic curve code
- ECC_Start();
- globalVerifyHandle.reset(new ECCVerifyHandle());
-
- // Sanity check
- if (!InitSanityCheck())
- return InitError(strprintf(_("Initialization sanity check failed. %s is shutting down."), _(PACKAGE_NAME)));
-
+static bool LockDataDirectory(bool probeOnly)
+{
std::string strDataDir = GetDataDir().string();
// Make sure only a single Bitcoin process is using the data directory.
@@ -1051,11 +1043,45 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
try {
static boost::interprocess::file_lock lock(pathLockFile.string().c_str());
- if (!lock.try_lock())
+ if (!lock.try_lock()) {
return InitError(strprintf(_("Cannot obtain a lock on data directory %s. %s is probably already running."), strDataDir, _(PACKAGE_NAME)));
+ }
+ if (probeOnly) {
+ lock.unlock();
+ }
} catch(const boost::interprocess::interprocess_exception& e) {
return InitError(strprintf(_("Cannot obtain a lock on data directory %s. %s is probably already running.") + " %s.", strDataDir, _(PACKAGE_NAME), e.what()));
}
+ return true;
+}
+
+bool AppInitSanityChecks()
+{
+ // ********************************************************* Step 4: sanity checks
+
+ // Initialize elliptic curve code
+ ECC_Start();
+ globalVerifyHandle.reset(new ECCVerifyHandle());
+
+ // Sanity check
+ if (!InitSanityCheck())
+ return InitError(strprintf(_("Initialization sanity check failed. %s is shutting down."), _(PACKAGE_NAME)));
+
+ // Probe the data directory lock to give an early error message, if possible
+ return LockDataDirectory(true);
+}
+
+bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
+{
+ const CChainParams& chainparams = Params();
+ // ********************************************************* Step 4a: application initialization
+ // After daemonization get the data directory lock again and hold on to it until exit
+ // This creates a slight window for a race condition to happen, however this condition is harmless: it
+ // will at most make us exit without printing a message to console.
+ if (!LockDataDirectory(false)) {
+ // Detailed error printed inside LockDataDirectory
+ return false;
+ }
#ifndef WIN32
CreatePidFile(GetPidFile(), getpid());
@@ -1069,7 +1095,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
if (!fLogTimestamps)
LogPrintf("Startup time: %s\n", DateTimeStrFormat("%Y-%m-%d %H:%M:%S", GetTime()));
LogPrintf("Default data directory %s\n", GetDefaultDataDir().string());
- LogPrintf("Using data directory %s\n", strDataDir);
+ LogPrintf("Using data directory %s\n", GetDataDir().string());
LogPrintf("Using config file %s\n", GetConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME)).string());
LogPrintf("Using at most %i connections (%i file descriptors available)\n", nMaxConnections, nFD);
@@ -1088,7 +1114,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
* that the server is there and will be ready later). Warmup mode will
* be disabled when initialisation is finished.
*/
- if (fServer)
+ if (GetBoolArg("-server", false))
{
uiInterface.InitMessage.connect(SetRPCWarmupStatus);
if (!AppInitServers(threadGroup))
diff --git a/src/init.h b/src/init.h
index 63e07ccb3c..1b87e0ec52 100644
--- a/src/init.h
+++ b/src/init.h
@@ -25,7 +25,30 @@ void Shutdown();
void InitLogging();
//!Parameter interaction: change current parameters depending on various rules
void InitParameterInteraction();
-bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler);
+
+/** Initialize bitcoin core: Basic context setup.
+ * @note This can be done before daemonization.
+ * @pre Parameters should be parsed and config file should be read.
+ */
+bool AppInitBasicSetup();
+/**
+ * Initialization: parameter interaction.
+ * @note This can be done before daemonization.
+ * @pre Parameters should be parsed and config file should be read, AppInitBasicSetup should have been called.
+ */
+bool AppInitParameterInteraction();
+/**
+ * Initialization sanity checks: ecc init, sanity checks, dir lock.
+ * @note This can be done before daemonization.
+ * @pre Parameters should be parsed and config file should be read, AppInitParameterInteraction should have been called.
+ */
+bool AppInitSanityChecks();
+/**
+ * Bitcoin core main initialization.
+ * @note This should only be done after daemonization.
+ * @pre Parameters should be parsed and config file should be read, AppInitSanityChecks should have been called.
+ */
+bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler);
/** The help message mode determines what help message to show */
enum HelpMessageMode {
diff --git a/src/leveldb/.travis.yml b/src/leveldb/.travis.yml
new file mode 100644
index 0000000000..f5bd74c454
--- /dev/null
+++ b/src/leveldb/.travis.yml
@@ -0,0 +1,13 @@
+language: cpp
+compiler:
+- clang
+- gcc
+os:
+- linux
+- osx
+sudo: false
+before_install:
+- echo $LANG
+- echo $LC_ALL
+script:
+- make -j 4 check
diff --git a/src/leveldb/Makefile b/src/leveldb/Makefile
index 2bd2cadcdd..07a5a1ead6 100644
--- a/src/leveldb/Makefile
+++ b/src/leveldb/Makefile
@@ -20,208 +20,395 @@ $(shell CC="$(CC)" CXX="$(CXX)" TARGET_OS="$(TARGET_OS)" \
# this file is generated by the previous line to set build flags and sources
include build_config.mk
+TESTS = \
+ db/autocompact_test \
+ db/c_test \
+ db/corruption_test \
+ db/db_test \
+ db/dbformat_test \
+ db/fault_injection_test \
+ db/filename_test \
+ db/log_test \
+ db/recovery_test \
+ db/skiplist_test \
+ db/version_edit_test \
+ db/version_set_test \
+ db/write_batch_test \
+ helpers/memenv/memenv_test \
+ issues/issue178_test \
+ issues/issue200_test \
+ table/filter_block_test \
+ table/table_test \
+ util/arena_test \
+ util/bloom_test \
+ util/cache_test \
+ util/coding_test \
+ util/crc32c_test \
+ util/env_test \
+ util/hash_test
+
+UTILS = \
+ db/db_bench \
+ db/leveldbutil
+
+# Put the object files in a subdirectory, but the application at the top of the object dir.
+PROGNAMES := $(notdir $(TESTS) $(UTILS))
+
+# On Linux may need libkyotocabinet-dev for dependency.
+BENCHMARKS = \
+ doc/bench/db_bench_sqlite3 \
+ doc/bench/db_bench_tree_db
+
CFLAGS += -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
CXXFLAGS += -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT)
LDFLAGS += $(PLATFORM_LDFLAGS)
LIBS += $(PLATFORM_LIBS)
-LIBOBJECTS = $(SOURCES:.cc=.o)
-MEMENVOBJECTS = $(MEMENV_SOURCES:.cc=.o)
-
-TESTUTIL = ./util/testutil.o
-TESTHARNESS = ./util/testharness.o $(TESTUTIL)
+SIMULATOR_OUTDIR=out-ios-x86
+DEVICE_OUTDIR=out-ios-arm
-# Note: iOS should probably be using libtool, not ar.
ifeq ($(PLATFORM), IOS)
+# Note: iOS should probably be using libtool, not ar.
AR=xcrun ar
+SIMULATORSDK=$(shell xcrun -sdk iphonesimulator --show-sdk-path)
+DEVICESDK=$(shell xcrun -sdk iphoneos --show-sdk-path)
+DEVICE_CFLAGS = -isysroot "$(DEVICESDK)" -arch armv6 -arch armv7 -arch armv7s -arch arm64
+SIMULATOR_CFLAGS = -isysroot "$(SIMULATORSDK)" -arch i686 -arch x86_64
+STATIC_OUTDIR=out-ios-universal
+else
+STATIC_OUTDIR=out-static
+SHARED_OUTDIR=out-shared
+STATIC_PROGRAMS := $(addprefix $(STATIC_OUTDIR)/, $(PROGNAMES))
+SHARED_PROGRAMS := $(addprefix $(SHARED_OUTDIR)/, db_bench)
endif
-TESTS = \
- arena_test \
- autocompact_test \
- bloom_test \
- c_test \
- cache_test \
- coding_test \
- corruption_test \
- crc32c_test \
- db_test \
- dbformat_test \
- env_test \
- filename_test \
- filter_block_test \
- hash_test \
- issue178_test \
- issue200_test \
- log_test \
- memenv_test \
- skiplist_test \
- table_test \
- version_edit_test \
- version_set_test \
- write_batch_test
-
-PROGRAMS = db_bench leveldbutil $(TESTS)
-BENCHMARKS = db_bench_sqlite3 db_bench_tree_db
-
-LIBRARY = libleveldb.a
-MEMENVLIBRARY = libmemenv.a
+STATIC_LIBOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(SOURCES:.cc=.o))
+STATIC_MEMENVOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
+
+DEVICE_LIBOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(SOURCES:.cc=.o))
+DEVICE_MEMENVOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
+
+SIMULATOR_LIBOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(SOURCES:.cc=.o))
+SIMULATOR_MEMENVOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
+
+SHARED_LIBOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(SOURCES:.cc=.o))
+SHARED_MEMENVOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
+
+TESTUTIL := $(STATIC_OUTDIR)/util/testutil.o
+TESTHARNESS := $(STATIC_OUTDIR)/util/testharness.o $(TESTUTIL)
+
+STATIC_TESTOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(TESTS)))
+STATIC_UTILOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(UTILS)))
+STATIC_ALLOBJS := $(STATIC_LIBOBJECTS) $(STATIC_MEMENVOBJECTS) $(STATIC_TESTOBJS) $(STATIC_UTILOBJS) $(TESTHARNESS)
+DEVICE_ALLOBJS := $(DEVICE_LIBOBJECTS) $(DEVICE_MEMENVOBJECTS)
+SIMULATOR_ALLOBJS := $(SIMULATOR_LIBOBJECTS) $(SIMULATOR_MEMENVOBJECTS)
default: all
# Should we build shared libraries?
ifneq ($(PLATFORM_SHARED_EXT),)
+# Many leveldb test apps use non-exported API's. Only build a subset for testing.
+SHARED_ALLOBJS := $(SHARED_LIBOBJECTS) $(SHARED_MEMENVOBJECTS) $(TESTHARNESS)
+
ifneq ($(PLATFORM_SHARED_VERSIONED),true)
-SHARED1 = libleveldb.$(PLATFORM_SHARED_EXT)
-SHARED2 = $(SHARED1)
-SHARED3 = $(SHARED1)
-SHARED = $(SHARED1)
+SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT)
+SHARED_LIB2 = $(SHARED_LIB1)
+SHARED_LIB3 = $(SHARED_LIB1)
+SHARED_LIBS = $(SHARED_LIB1)
+SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a
else
# Update db.h if you change these.
-SHARED_MAJOR = 1
-SHARED_MINOR = 18
-SHARED1 = libleveldb.$(PLATFORM_SHARED_EXT)
-SHARED2 = $(SHARED1).$(SHARED_MAJOR)
-SHARED3 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR)
-SHARED = $(SHARED1) $(SHARED2) $(SHARED3)
-$(SHARED1): $(SHARED3)
- ln -fs $(SHARED3) $(SHARED1)
-$(SHARED2): $(SHARED3)
- ln -fs $(SHARED3) $(SHARED2)
+SHARED_VERSION_MAJOR = 1
+SHARED_VERSION_MINOR = 19
+SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT)
+SHARED_LIB2 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR)
+SHARED_LIB3 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR).$(SHARED_VERSION_MINOR)
+SHARED_LIBS = $(SHARED_OUTDIR)/$(SHARED_LIB1) $(SHARED_OUTDIR)/$(SHARED_LIB2) $(SHARED_OUTDIR)/$(SHARED_LIB3)
+$(SHARED_OUTDIR)/$(SHARED_LIB1): $(SHARED_OUTDIR)/$(SHARED_LIB3)
+ ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB1)
+$(SHARED_OUTDIR)/$(SHARED_LIB2): $(SHARED_OUTDIR)/$(SHARED_LIB3)
+ ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB2)
+SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a
endif
-$(SHARED3):
- $(CXX) $(LDFLAGS) $(PLATFORM_SHARED_LDFLAGS)$(SHARED2) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(SOURCES) -o $(SHARED3) $(LIBS)
+$(SHARED_OUTDIR)/$(SHARED_LIB3): $(SHARED_LIBOBJECTS)
+ $(CXX) $(LDFLAGS) $(PLATFORM_SHARED_LDFLAGS)$(SHARED_LIB2) $(SHARED_LIBOBJECTS) -o $(SHARED_OUTDIR)/$(SHARED_LIB3) $(LIBS)
endif # PLATFORM_SHARED_EXT
-all: $(SHARED) $(LIBRARY)
+all: $(SHARED_LIBS) $(SHARED_PROGRAMS) $(STATIC_OUTDIR)/libleveldb.a $(STATIC_OUTDIR)/libmemenv.a $(STATIC_PROGRAMS)
-check: all $(PROGRAMS) $(TESTS)
- for t in $(TESTS); do echo "***** Running $$t"; ./$$t || exit 1; done
+check: $(STATIC_PROGRAMS)
+ for t in $(notdir $(TESTS)); do echo "***** Running $$t"; $(STATIC_OUTDIR)/$$t || exit 1; done
clean:
- -rm -f $(PROGRAMS) $(BENCHMARKS) $(LIBRARY) $(SHARED) $(MEMENVLIBRARY) */*.o */*/*.o ios-x86/*/*.o ios-arm/*/*.o build_config.mk
- -rm -rf ios-x86/* ios-arm/*
+ -rm -rf out-static out-shared out-ios-x86 out-ios-arm out-ios-universal
+ -rm -f build_config.mk
+ -rm -rf ios-x86 ios-arm
-$(LIBRARY): $(LIBOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(LIBOBJECTS)
+$(STATIC_OUTDIR):
+ mkdir $@
-db_bench: db/db_bench.o $(LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) db/db_bench.o $(LIBOBJECTS) $(TESTUTIL) -o $@ $(LIBS)
+$(STATIC_OUTDIR)/db: | $(STATIC_OUTDIR)
+ mkdir $@
-db_bench_sqlite3: doc/bench/db_bench_sqlite3.o $(LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) doc/bench/db_bench_sqlite3.o $(LIBOBJECTS) $(TESTUTIL) -o $@ -lsqlite3 $(LIBS)
+$(STATIC_OUTDIR)/helpers/memenv: | $(STATIC_OUTDIR)
+ mkdir -p $@
-db_bench_tree_db: doc/bench/db_bench_tree_db.o $(LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) doc/bench/db_bench_tree_db.o $(LIBOBJECTS) $(TESTUTIL) -o $@ -lkyotocabinet $(LIBS)
+$(STATIC_OUTDIR)/port: | $(STATIC_OUTDIR)
+ mkdir $@
-leveldbutil: db/leveldb_main.o $(LIBOBJECTS)
- $(CXX) $(LDFLAGS) db/leveldb_main.o $(LIBOBJECTS) -o $@ $(LIBS)
+$(STATIC_OUTDIR)/table: | $(STATIC_OUTDIR)
+ mkdir $@
-arena_test: util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(STATIC_OUTDIR)/util: | $(STATIC_OUTDIR)
+ mkdir $@
-autocompact_test: db/autocompact_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/autocompact_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+.PHONY: STATIC_OBJDIRS
+STATIC_OBJDIRS: \
+ $(STATIC_OUTDIR)/db \
+ $(STATIC_OUTDIR)/port \
+ $(STATIC_OUTDIR)/table \
+ $(STATIC_OUTDIR)/util \
+ $(STATIC_OUTDIR)/helpers/memenv
-bloom_test: util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SHARED_OUTDIR):
+ mkdir $@
-c_test: db/c_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/c_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SHARED_OUTDIR)/db: | $(SHARED_OUTDIR)
+ mkdir $@
-cache_test: util/cache_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) util/cache_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SHARED_OUTDIR)/helpers/memenv: | $(SHARED_OUTDIR)
+ mkdir -p $@
-coding_test: util/coding_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) util/coding_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SHARED_OUTDIR)/port: | $(SHARED_OUTDIR)
+ mkdir $@
-corruption_test: db/corruption_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/corruption_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SHARED_OUTDIR)/table: | $(SHARED_OUTDIR)
+ mkdir $@
-crc32c_test: util/crc32c_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) util/crc32c_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SHARED_OUTDIR)/util: | $(SHARED_OUTDIR)
+ mkdir $@
-db_test: db/db_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/db_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+.PHONY: SHARED_OBJDIRS
+SHARED_OBJDIRS: \
+ $(SHARED_OUTDIR)/db \
+ $(SHARED_OUTDIR)/port \
+ $(SHARED_OUTDIR)/table \
+ $(SHARED_OUTDIR)/util \
+ $(SHARED_OUTDIR)/helpers/memenv
-dbformat_test: db/dbformat_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/dbformat_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(DEVICE_OUTDIR):
+ mkdir $@
-env_test: util/env_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) util/env_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(DEVICE_OUTDIR)/db: | $(DEVICE_OUTDIR)
+ mkdir $@
-filename_test: db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(DEVICE_OUTDIR)/helpers/memenv: | $(DEVICE_OUTDIR)
+ mkdir -p $@
-filter_block_test: table/filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) table/filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(DEVICE_OUTDIR)/port: | $(DEVICE_OUTDIR)
+ mkdir $@
-hash_test: util/hash_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) util/hash_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(DEVICE_OUTDIR)/table: | $(DEVICE_OUTDIR)
+ mkdir $@
-issue178_test: issues/issue178_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) issues/issue178_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(DEVICE_OUTDIR)/util: | $(DEVICE_OUTDIR)
+ mkdir $@
-issue200_test: issues/issue200_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) issues/issue200_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+.PHONY: DEVICE_OBJDIRS
+DEVICE_OBJDIRS: \
+ $(DEVICE_OUTDIR)/db \
+ $(DEVICE_OUTDIR)/port \
+ $(DEVICE_OUTDIR)/table \
+ $(DEVICE_OUTDIR)/util \
+ $(DEVICE_OUTDIR)/helpers/memenv
-log_test: db/log_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/log_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SIMULATOR_OUTDIR):
+ mkdir $@
-table_test: table/table_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) table/table_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SIMULATOR_OUTDIR)/db: | $(SIMULATOR_OUTDIR)
+ mkdir $@
-skiplist_test: db/skiplist_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/skiplist_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SIMULATOR_OUTDIR)/helpers/memenv: | $(SIMULATOR_OUTDIR)
+ mkdir -p $@
-version_edit_test: db/version_edit_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/version_edit_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SIMULATOR_OUTDIR)/port: | $(SIMULATOR_OUTDIR)
+ mkdir $@
-version_set_test: db/version_set_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/version_set_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SIMULATOR_OUTDIR)/table: | $(SIMULATOR_OUTDIR)
+ mkdir $@
-write_batch_test: db/write_batch_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/write_batch_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SIMULATOR_OUTDIR)/util: | $(SIMULATOR_OUTDIR)
+ mkdir $@
-$(MEMENVLIBRARY) : $(MEMENVOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(MEMENVOBJECTS)
+.PHONY: SIMULATOR_OBJDIRS
+SIMULATOR_OBJDIRS: \
+ $(SIMULATOR_OUTDIR)/db \
+ $(SIMULATOR_OUTDIR)/port \
+ $(SIMULATOR_OUTDIR)/table \
+ $(SIMULATOR_OUTDIR)/util \
+ $(SIMULATOR_OUTDIR)/helpers/memenv
-memenv_test : helpers/memenv/memenv_test.o $(MEMENVLIBRARY) $(LIBRARY) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) helpers/memenv/memenv_test.o $(MEMENVLIBRARY) $(LIBRARY) $(TESTHARNESS) -o $@ $(LIBS)
+$(STATIC_ALLOBJS): | STATIC_OBJDIRS
+$(DEVICE_ALLOBJS): | DEVICE_OBJDIRS
+$(SIMULATOR_ALLOBJS): | SIMULATOR_OBJDIRS
+$(SHARED_ALLOBJS): | SHARED_OBJDIRS
ifeq ($(PLATFORM), IOS)
-# For iOS, create universal object files to be used on both the simulator and
+$(DEVICE_OUTDIR)/libleveldb.a: $(DEVICE_LIBOBJECTS)
+ rm -f $@
+ $(AR) -rs $@ $(DEVICE_LIBOBJECTS)
+
+$(SIMULATOR_OUTDIR)/libleveldb.a: $(SIMULATOR_LIBOBJECTS)
+ rm -f $@
+ $(AR) -rs $@ $(SIMULATOR_LIBOBJECTS)
+
+$(DEVICE_OUTDIR)/libmemenv.a: $(DEVICE_MEMENVOBJECTS)
+ rm -f $@
+ $(AR) -rs $@ $(DEVICE_MEMENVOBJECTS)
+
+$(SIMULATOR_OUTDIR)/libmemenv.a: $(SIMULATOR_MEMENVOBJECTS)
+ rm -f $@
+ $(AR) -rs $@ $(SIMULATOR_MEMENVOBJECTS)
+
+# For iOS, create universal object libraries to be used on both the simulator and
# a device.
-PLATFORMSROOT=/Applications/Xcode.app/Contents/Developer/Platforms
-SIMULATORROOT=$(PLATFORMSROOT)/iPhoneSimulator.platform/Developer
-DEVICEROOT=$(PLATFORMSROOT)/iPhoneOS.platform/Developer
-IOSVERSION=$(shell defaults read $(PLATFORMSROOT)/iPhoneOS.platform/version CFBundleShortVersionString)
-IOSARCH=-arch armv6 -arch armv7 -arch armv7s -arch arm64
-
-.cc.o:
- mkdir -p ios-x86/$(dir $@)
- xcrun -sdk iphonesimulator $(CXX) $(CXXFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
- mkdir -p ios-arm/$(dir $@)
- xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk $(IOSARCH) -c $< -o ios-arm/$@
- xcrun lipo ios-x86/$@ ios-arm/$@ -create -output $@
-
-.c.o:
- mkdir -p ios-x86/$(dir $@)
- xcrun -sdk iphonesimulator $(CC) $(CFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
- mkdir -p ios-arm/$(dir $@)
- xcrun -sdk iphoneos $(CC) $(CFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk $(IOSARCH) -c $< -o ios-arm/$@
- xcrun lipo ios-x86/$@ ios-arm/$@ -create -output $@
+$(STATIC_OUTDIR)/libleveldb.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a
+ lipo -create $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a -output $@
+$(STATIC_OUTDIR)/libmemenv.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a
+ lipo -create $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a -output $@
else
-.cc.o:
+$(STATIC_OUTDIR)/libleveldb.a:$(STATIC_LIBOBJECTS)
+ rm -f $@
+ $(AR) -rs $@ $(STATIC_LIBOBJECTS)
+
+$(STATIC_OUTDIR)/libmemenv.a:$(STATIC_MEMENVOBJECTS)
+ rm -f $@
+ $(AR) -rs $@ $(STATIC_MEMENVOBJECTS)
+endif
+
+$(SHARED_MEMENVLIB):$(SHARED_MEMENVOBJECTS)
+ rm -f $@
+ $(AR) -rs $@ $(SHARED_MEMENVOBJECTS)
+
+$(STATIC_OUTDIR)/db_bench:db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/db_bench_sqlite3:doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lsqlite3 $(LIBS)
+
+$(STATIC_OUTDIR)/db_bench_tree_db:doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lkyotocabinet $(LIBS)
+
+$(STATIC_OUTDIR)/leveldbutil:db/leveldbutil.cc $(STATIC_LIBOBJECTS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/leveldbutil.cc $(STATIC_LIBOBJECTS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/arena_test:util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/autocompact_test:db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/bloom_test:util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/c_test:$(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/cache_test:util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/coding_test:util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/corruption_test:db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/crc32c_test:util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/db_test:db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/dbformat_test:db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/env_test:util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/fault_injection_test:db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/filename_test:db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/filter_block_test:table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/hash_test:util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/issue178_test:issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/issue200_test:issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/log_test:db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/recovery_test:db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/table_test:table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/skiplist_test:db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/version_edit_test:db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/version_set_test:db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/write_batch_test:db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/memenv_test:$(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS)
+ $(XCRUN) $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS) -o $@ $(LIBS)
+
+$(SHARED_OUTDIR)/db_bench:$(SHARED_OUTDIR)/db/db_bench.o $(SHARED_LIBS) $(TESTUTIL)
+ $(XCRUN) $(CXX) $(LDFLAGS) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(SHARED_OUTDIR)/db/db_bench.o $(TESTUTIL) $(SHARED_OUTDIR)/$(SHARED_LIB3) -o $@ $(LIBS)
+
+.PHONY: run-shared
+run-shared: $(SHARED_OUTDIR)/db_bench
+ LD_LIBRARY_PATH=$(SHARED_OUTDIR) $(SHARED_OUTDIR)/db_bench
+
+$(SIMULATOR_OUTDIR)/%.o: %.cc
+ xcrun -sdk iphonesimulator $(CXX) $(CXXFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@
+
+$(DEVICE_OUTDIR)/%.o: %.cc
+ xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) $(DEVICE_CFLAGS) -c $< -o $@
+
+$(SIMULATOR_OUTDIR)/%.o: %.c
+ xcrun -sdk iphonesimulator $(CC) $(CFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@
+
+$(DEVICE_OUTDIR)/%.o: %.c
+ xcrun -sdk iphoneos $(CC) $(CFLAGS) $(DEVICE_CFLAGS) -c $< -o $@
+
+$(STATIC_OUTDIR)/%.o: %.cc
$(CXX) $(CXXFLAGS) -c $< -o $@
-.c.o:
+$(STATIC_OUTDIR)/%.o: %.c
$(CC) $(CFLAGS) -c $< -o $@
-endif
+
+$(SHARED_OUTDIR)/%.o: %.cc
+ $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
+
+$(SHARED_OUTDIR)/%.o: %.c
+ $(CC) $(CFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
diff --git a/src/leveldb/README b/src/leveldb/README
deleted file mode 100644
index 3618adeeed..0000000000
--- a/src/leveldb/README
+++ /dev/null
@@ -1,51 +0,0 @@
-leveldb: A key-value store
-Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
-
-The code under this directory implements a system for maintaining a
-persistent key/value store.
-
-See doc/index.html for more explanation.
-See doc/impl.html for a brief overview of the implementation.
-
-The public interface is in include/*.h. Callers should not include or
-rely on the details of any other header files in this package. Those
-internal APIs may be changed without warning.
-
-Guide to header files:
-
-include/db.h
- Main interface to the DB: Start here
-
-include/options.h
- Control over the behavior of an entire database, and also
- control over the behavior of individual reads and writes.
-
-include/comparator.h
- Abstraction for user-specified comparison function. If you want
- just bytewise comparison of keys, you can use the default comparator,
- but clients can write their own comparator implementations if they
- want custom ordering (e.g. to handle different character
- encodings, etc.)
-
-include/iterator.h
- Interface for iterating over data. You can get an iterator
- from a DB object.
-
-include/write_batch.h
- Interface for atomically applying multiple updates to a database.
-
-include/slice.h
- A simple module for maintaining a pointer and a length into some
- other byte array.
-
-include/status.h
- Status is returned from many of the public interfaces and is used
- to report success and various kinds of errors.
-
-include/env.h
- Abstraction of the OS environment. A posix implementation of
- this interface is in util/env_posix.cc
-
-include/table.h
-include/table_builder.h
- Lower-level modules that most clients probably won't use directly
diff --git a/src/leveldb/README.md b/src/leveldb/README.md
index 480affb5ca..c75b185e0e 100644
--- a/src/leveldb/README.md
+++ b/src/leveldb/README.md
@@ -1,5 +1,7 @@
**LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.**
+[![Build Status](https://travis-ci.org/google/leveldb.svg?branch=master)](https://travis-ci.org/google/leveldb)
+
Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
# Features
@@ -10,9 +12,11 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
* Multiple changes can be made in one atomic batch.
* Users can create a transient snapshot to get a consistent view of data.
* Forward and backward iteration is supported over the data.
- * Data is automatically compressed using the [Snappy compression library](http://code.google.com/p/snappy).
+ * Data is automatically compressed using the [Snappy compression library](http://google.github.io/snappy/).
* External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
- * [Detailed documentation](http://htmlpreview.github.io/?https://github.com/google/leveldb/blob/master/doc/index.html) about how to use the library is included with the source code.
+
+# Documentation
+ [LevelDB library documentation](https://rawgit.com/google/leveldb/master/doc/index.html) is online and bundled with the source code.
# Limitations
@@ -20,6 +24,37 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
* Only a single process (possibly multi-threaded) can access a particular database at a time.
* There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library.
+# Contributing to the leveldb Project
+The leveldb project welcomes contributions. leveldb's primary goal is to be
+a reliable and fast key/value store. Changes that are in line with the
+features/limitations outlined above, and meet the requirements below,
+will be considered.
+
+Contribution requirements:
+
+1. **POSIX only**. We _generally_ will only accept changes that are both
+ compiled, and tested on a POSIX platform - usually Linux. Very small
+ changes will sometimes be accepted, but consider that more of an
+ exception than the rule.
+
+2. **Stable API**. We strive very hard to maintain a stable API. Changes that
+ require changes for projects using leveldb _might_ be rejected without
+ sufficient benefit to the project.
+
+3. **Tests**: All changes must be accompanied by a new (or changed) test, or
+ a sufficient explanation as to why a new (or changed) test is not required.
+
+## Submitting a Pull Request
+Before any pull request will be accepted the author must first sign a
+Contributor License Agreement (CLA) at https://cla.developers.google.com/.
+
+In order to keep the commit timeline linear
+[squash](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History#Squashing-Commits)
+your changes down to a single commit and [rebase](https://git-scm.com/docs/git-rebase)
+on google/leveldb/master. This keeps the commit timeline linear and more easily sync'ed
+with the internal repository at Google. More information at GitHub's
+[About Git rebase](https://help.github.com/articles/about-git-rebase/) page.
+
# Performance
Here is a performance report (with explanations) from the run of the
diff --git a/src/leveldb/build_detect_platform b/src/leveldb/build_detect_platform
index a1101c1bda..d7edab1d87 100755
--- a/src/leveldb/build_detect_platform
+++ b/src/leveldb/build_detect_platform
@@ -175,7 +175,7 @@ DIRS="$PREFIX/db $PREFIX/util $PREFIX/table"
set -f # temporarily disable globbing so that our patterns aren't expanded
PRUNE_TEST="-name *test*.cc -prune"
PRUNE_BENCH="-name *_bench.cc -prune"
-PRUNE_TOOL="-name leveldb_main.cc -prune"
+PRUNE_TOOL="-name leveldbutil.cc -prune"
PORTABLE_FILES=`find $DIRS $PRUNE_TEST -o $PRUNE_BENCH -o $PRUNE_TOOL -o -name '*.cc' -print | sort | sed "s,^$PREFIX/,," | tr "\n" " "`
set +f # re-enable globbing
diff --git a/src/leveldb/db/corruption_test.cc b/src/leveldb/db/corruption_test.cc
index 96afc68913..37a484d25f 100644
--- a/src/leveldb/db/corruption_test.cc
+++ b/src/leveldb/db/corruption_test.cc
@@ -36,7 +36,7 @@ class CorruptionTest {
tiny_cache_ = NewLRUCache(100);
options_.env = &env_;
options_.block_cache = tiny_cache_;
- dbname_ = test::TmpDir() + "/db_test";
+ dbname_ = test::TmpDir() + "/corruption_test";
DestroyDB(dbname_, options_);
db_ = NULL;
diff --git a/src/leveldb/db/db_bench.cc b/src/leveldb/db/db_bench.cc
index 705a170aae..7a0f5e08cd 100644
--- a/src/leveldb/db/db_bench.cc
+++ b/src/leveldb/db/db_bench.cc
@@ -33,6 +33,7 @@
// readmissing -- read N missing keys in random order
// readhot -- read N times in random order from 1% section of DB
// seekrandom -- N random seeks
+// open -- cost of opening a DB
// crc32c -- repeated crc32c of 4K of data
// acquireload -- load N*1000 times
// Meta operations:
@@ -99,6 +100,9 @@ static int FLAGS_bloom_bits = -1;
// benchmark will fail.
static bool FLAGS_use_existing_db = false;
+// If true, reuse existing log/MANIFEST files when re-opening a database.
+static bool FLAGS_reuse_logs = false;
+
// Use the db with the following name.
static const char* FLAGS_db = NULL;
@@ -138,6 +142,7 @@ class RandomGenerator {
}
};
+#if defined(__linux)
static Slice TrimSpace(Slice s) {
size_t start = 0;
while (start < s.size() && isspace(s[start])) {
@@ -149,6 +154,7 @@ static Slice TrimSpace(Slice s) {
}
return Slice(s.data() + start, limit - start);
}
+#endif
static void AppendWithSpace(std::string* str, Slice msg) {
if (msg.empty()) return;
@@ -442,7 +448,11 @@ class Benchmark {
bool fresh_db = false;
int num_threads = FLAGS_threads;
- if (name == Slice("fillseq")) {
+ if (name == Slice("open")) {
+ method = &Benchmark::OpenBench;
+ num_ /= 10000;
+ if (num_ < 1) num_ = 1;
+ } else if (name == Slice("fillseq")) {
fresh_db = true;
method = &Benchmark::WriteSeq;
} else if (name == Slice("fillbatch")) {
@@ -695,6 +705,7 @@ class Benchmark {
options.write_buffer_size = FLAGS_write_buffer_size;
options.max_open_files = FLAGS_open_files;
options.filter_policy = filter_policy_;
+ options.reuse_logs = FLAGS_reuse_logs;
Status s = DB::Open(options, FLAGS_db, &db_);
if (!s.ok()) {
fprintf(stderr, "open error: %s\n", s.ToString().c_str());
@@ -702,6 +713,14 @@ class Benchmark {
}
}
+ void OpenBench(ThreadState* thread) {
+ for (int i = 0; i < num_; i++) {
+ delete db_;
+ Open();
+ thread->stats.FinishedSingleOp();
+ }
+ }
+
void WriteSeq(ThreadState* thread) {
DoWrite(thread, true);
}
@@ -941,6 +960,9 @@ int main(int argc, char** argv) {
} else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_use_existing_db = n;
+ } else if (sscanf(argv[i], "--reuse_logs=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_reuse_logs = n;
} else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
FLAGS_num = n;
} else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
diff --git a/src/leveldb/db/db_impl.cc b/src/leveldb/db/db_impl.cc
index 49b95953b4..60f4e66e55 100644
--- a/src/leveldb/db/db_impl.cc
+++ b/src/leveldb/db/db_impl.cc
@@ -125,7 +125,7 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
db_lock_(NULL),
shutting_down_(NULL),
bg_cv_(&mutex_),
- mem_(new MemTable(internal_comparator_)),
+ mem_(NULL),
imm_(NULL),
logfile_(NULL),
logfile_number_(0),
@@ -134,7 +134,6 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
tmp_batch_(new WriteBatch),
bg_compaction_scheduled_(false),
manual_compaction_(NULL) {
- mem_->Ref();
has_imm_.Release_Store(NULL);
// Reserve ten files or so for other uses and give the rest to TableCache.
@@ -271,7 +270,7 @@ void DBImpl::DeleteObsoleteFiles() {
}
}
-Status DBImpl::Recover(VersionEdit* edit) {
+Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) {
mutex_.AssertHeld();
// Ignore error from CreateDir since the creation of the DB is
@@ -301,66 +300,69 @@ Status DBImpl::Recover(VersionEdit* edit) {
}
}
- s = versions_->Recover();
- if (s.ok()) {
- SequenceNumber max_sequence(0);
-
- // Recover from all newer log files than the ones named in the
- // descriptor (new log files may have been added by the previous
- // incarnation without registering them in the descriptor).
- //
- // Note that PrevLogNumber() is no longer used, but we pay
- // attention to it in case we are recovering a database
- // produced by an older version of leveldb.
- const uint64_t min_log = versions_->LogNumber();
- const uint64_t prev_log = versions_->PrevLogNumber();
- std::vector<std::string> filenames;
- s = env_->GetChildren(dbname_, &filenames);
+ s = versions_->Recover(save_manifest);
+ if (!s.ok()) {
+ return s;
+ }
+ SequenceNumber max_sequence(0);
+
+ // Recover from all newer log files than the ones named in the
+ // descriptor (new log files may have been added by the previous
+ // incarnation without registering them in the descriptor).
+ //
+ // Note that PrevLogNumber() is no longer used, but we pay
+ // attention to it in case we are recovering a database
+ // produced by an older version of leveldb.
+ const uint64_t min_log = versions_->LogNumber();
+ const uint64_t prev_log = versions_->PrevLogNumber();
+ std::vector<std::string> filenames;
+ s = env_->GetChildren(dbname_, &filenames);
+ if (!s.ok()) {
+ return s;
+ }
+ std::set<uint64_t> expected;
+ versions_->AddLiveFiles(&expected);
+ uint64_t number;
+ FileType type;
+ std::vector<uint64_t> logs;
+ for (size_t i = 0; i < filenames.size(); i++) {
+ if (ParseFileName(filenames[i], &number, &type)) {
+ expected.erase(number);
+ if (type == kLogFile && ((number >= min_log) || (number == prev_log)))
+ logs.push_back(number);
+ }
+ }
+ if (!expected.empty()) {
+ char buf[50];
+ snprintf(buf, sizeof(buf), "%d missing files; e.g.",
+ static_cast<int>(expected.size()));
+ return Status::Corruption(buf, TableFileName(dbname_, *(expected.begin())));
+ }
+
+ // Recover in the order in which the logs were generated
+ std::sort(logs.begin(), logs.end());
+ for (size_t i = 0; i < logs.size(); i++) {
+ s = RecoverLogFile(logs[i], (i == logs.size() - 1), save_manifest, edit,
+ &max_sequence);
if (!s.ok()) {
return s;
}
- std::set<uint64_t> expected;
- versions_->AddLiveFiles(&expected);
- uint64_t number;
- FileType type;
- std::vector<uint64_t> logs;
- for (size_t i = 0; i < filenames.size(); i++) {
- if (ParseFileName(filenames[i], &number, &type)) {
- expected.erase(number);
- if (type == kLogFile && ((number >= min_log) || (number == prev_log)))
- logs.push_back(number);
- }
- }
- if (!expected.empty()) {
- char buf[50];
- snprintf(buf, sizeof(buf), "%d missing files; e.g.",
- static_cast<int>(expected.size()));
- return Status::Corruption(buf, TableFileName(dbname_, *(expected.begin())));
- }
-
- // Recover in the order in which the logs were generated
- std::sort(logs.begin(), logs.end());
- for (size_t i = 0; i < logs.size(); i++) {
- s = RecoverLogFile(logs[i], edit, &max_sequence);
- // The previous incarnation may not have written any MANIFEST
- // records after allocating this log number. So we manually
- // update the file number allocation counter in VersionSet.
- versions_->MarkFileNumberUsed(logs[i]);
- }
+ // The previous incarnation may not have written any MANIFEST
+ // records after allocating this log number. So we manually
+ // update the file number allocation counter in VersionSet.
+ versions_->MarkFileNumberUsed(logs[i]);
+ }
- if (s.ok()) {
- if (versions_->LastSequence() < max_sequence) {
- versions_->SetLastSequence(max_sequence);
- }
- }
+ if (versions_->LastSequence() < max_sequence) {
+ versions_->SetLastSequence(max_sequence);
}
- return s;
+ return Status::OK();
}
-Status DBImpl::RecoverLogFile(uint64_t log_number,
- VersionEdit* edit,
+Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
+ bool* save_manifest, VersionEdit* edit,
SequenceNumber* max_sequence) {
struct LogReporter : public log::Reader::Reporter {
Env* env;
@@ -405,6 +407,7 @@ Status DBImpl::RecoverLogFile(uint64_t log_number,
std::string scratch;
Slice record;
WriteBatch batch;
+ int compactions = 0;
MemTable* mem = NULL;
while (reader.ReadRecord(&record, &scratch) &&
status.ok()) {
@@ -432,25 +435,52 @@ Status DBImpl::RecoverLogFile(uint64_t log_number,
}
if (mem->ApproximateMemoryUsage() > options_.write_buffer_size) {
+ compactions++;
+ *save_manifest = true;
status = WriteLevel0Table(mem, edit, NULL);
+ mem->Unref();
+ mem = NULL;
if (!status.ok()) {
// Reflect errors immediately so that conditions like full
// file-systems cause the DB::Open() to fail.
break;
}
- mem->Unref();
- mem = NULL;
}
}
- if (status.ok() && mem != NULL) {
- status = WriteLevel0Table(mem, edit, NULL);
- // Reflect errors immediately so that conditions like full
- // file-systems cause the DB::Open() to fail.
+ delete file;
+
+ // See if we should keep reusing the last log file.
+ if (status.ok() && options_.reuse_logs && last_log && compactions == 0) {
+ assert(logfile_ == NULL);
+ assert(log_ == NULL);
+ assert(mem_ == NULL);
+ uint64_t lfile_size;
+ if (env_->GetFileSize(fname, &lfile_size).ok() &&
+ env_->NewAppendableFile(fname, &logfile_).ok()) {
+ Log(options_.info_log, "Reusing old log %s \n", fname.c_str());
+ log_ = new log::Writer(logfile_, lfile_size);
+ logfile_number_ = log_number;
+ if (mem != NULL) {
+ mem_ = mem;
+ mem = NULL;
+ } else {
+ // mem can be NULL if lognum exists but was empty.
+ mem_ = new MemTable(internal_comparator_);
+ mem_->Ref();
+ }
+ }
+ }
+
+ if (mem != NULL) {
+ // mem did not get reused; compact it.
+ if (status.ok()) {
+ *save_manifest = true;
+ status = WriteLevel0Table(mem, edit, NULL);
+ }
+ mem->Unref();
}
- if (mem != NULL) mem->Unref();
- delete file;
return status;
}
@@ -821,8 +851,9 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
delete iter;
if (s.ok()) {
Log(options_.info_log,
- "Generated table #%llu: %lld keys, %lld bytes",
+ "Generated table #%llu@%d: %lld keys, %lld bytes",
(unsigned long long) output_number,
+ compact->compaction->level(),
(unsigned long long) current_entries,
(unsigned long long) current_bytes);
}
@@ -1395,6 +1426,19 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
} else if (in == "sstables") {
*value = versions_->current()->DebugString();
return true;
+ } else if (in == "approximate-memory-usage") {
+ size_t total_usage = options_.block_cache->TotalCharge();
+ if (mem_) {
+ total_usage += mem_->ApproximateMemoryUsage();
+ }
+ if (imm_) {
+ total_usage += imm_->ApproximateMemoryUsage();
+ }
+ char buf[50];
+ snprintf(buf, sizeof(buf), "%llu",
+ static_cast<unsigned long long>(total_usage));
+ value->append(buf);
+ return true;
}
return false;
@@ -1449,8 +1493,11 @@ Status DB::Open(const Options& options, const std::string& dbname,
DBImpl* impl = new DBImpl(options, dbname);
impl->mutex_.Lock();
VersionEdit edit;
- Status s = impl->Recover(&edit); // Handles create_if_missing, error_if_exists
- if (s.ok()) {
+ // Recover handles create_if_missing, error_if_exists
+ bool save_manifest = false;
+ Status s = impl->Recover(&edit, &save_manifest);
+ if (s.ok() && impl->mem_ == NULL) {
+ // Create new log and a corresponding memtable.
uint64_t new_log_number = impl->versions_->NewFileNumber();
WritableFile* lfile;
s = options.env->NewWritableFile(LogFileName(dbname, new_log_number),
@@ -1460,15 +1507,22 @@ Status DB::Open(const Options& options, const std::string& dbname,
impl->logfile_ = lfile;
impl->logfile_number_ = new_log_number;
impl->log_ = new log::Writer(lfile);
- s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
- }
- if (s.ok()) {
- impl->DeleteObsoleteFiles();
- impl->MaybeScheduleCompaction();
+ impl->mem_ = new MemTable(impl->internal_comparator_);
+ impl->mem_->Ref();
}
}
+ if (s.ok() && save_manifest) {
+ edit.SetPrevLogNumber(0); // No older logs needed after recovery.
+ edit.SetLogNumber(impl->logfile_number_);
+ s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
+ }
+ if (s.ok()) {
+ impl->DeleteObsoleteFiles();
+ impl->MaybeScheduleCompaction();
+ }
impl->mutex_.Unlock();
if (s.ok()) {
+ assert(impl->mem_ != NULL);
*dbptr = impl;
} else {
delete impl;
diff --git a/src/leveldb/db/db_impl.h b/src/leveldb/db/db_impl.h
index cfc998164a..8ff323e728 100644
--- a/src/leveldb/db/db_impl.h
+++ b/src/leveldb/db/db_impl.h
@@ -78,7 +78,8 @@ class DBImpl : public DB {
// Recover the descriptor from persistent storage. May do a significant
// amount of work to recover recently logged updates. Any changes to
// be made to the descriptor are added to *edit.
- Status Recover(VersionEdit* edit) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ Status Recover(VersionEdit* edit, bool* save_manifest)
+ EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void MaybeIgnoreError(Status* s) const;
@@ -90,9 +91,8 @@ class DBImpl : public DB {
// Errors are recorded in bg_error_.
void CompactMemTable() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
- Status RecoverLogFile(uint64_t log_number,
- VersionEdit* edit,
- SequenceNumber* max_sequence)
+ Status RecoverLogFile(uint64_t log_number, bool last_log, bool* save_manifest,
+ VersionEdit* edit, SequenceNumber* max_sequence)
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
Status WriteLevel0Table(MemTable* mem, VersionEdit* edit, Version* base)
diff --git a/src/leveldb/db/db_test.cc b/src/leveldb/db/db_test.cc
index 0fed9137d5..a0b08bc19c 100644
--- a/src/leveldb/db/db_test.cc
+++ b/src/leveldb/db/db_test.cc
@@ -193,6 +193,7 @@ class DBTest {
// Sequence of option configurations to try
enum OptionConfig {
kDefault,
+ kReuse,
kFilter,
kUncompressed,
kEnd
@@ -237,7 +238,11 @@ class DBTest {
// Return the current option configuration.
Options CurrentOptions() {
Options options;
+ options.reuse_logs = false;
switch (option_config_) {
+ case kReuse:
+ options.reuse_logs = true;
+ break;
case kFilter:
options.filter_policy = filter_policy_;
break;
@@ -558,6 +563,17 @@ TEST(DBTest, GetFromVersions) {
} while (ChangeOptions());
}
+TEST(DBTest, GetMemUsage) {
+ do {
+ ASSERT_OK(Put("foo", "v1"));
+ std::string val;
+ ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
+ int mem_usage = atoi(val.c_str());
+ ASSERT_GT(mem_usage, 0);
+ ASSERT_LT(mem_usage, 5*1024*1024);
+ } while (ChangeOptions());
+}
+
TEST(DBTest, GetSnapshot) {
do {
// Try with both a short key and a long key
@@ -1080,6 +1096,14 @@ TEST(DBTest, ApproximateSizes) {
// 0 because GetApproximateSizes() does not account for memtable space
ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
+ if (options.reuse_logs) {
+ // Recovery will reuse memtable, and GetApproximateSizes() does not
+ // account for memtable usage;
+ Reopen(&options);
+ ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
+ continue;
+ }
+
// Check sizes across recovery by reopening a few times
for (int run = 0; run < 3; run++) {
Reopen(&options);
@@ -1123,6 +1147,11 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
ASSERT_OK(Put(Key(6), RandomString(&rnd, 300000)));
ASSERT_OK(Put(Key(7), RandomString(&rnd, 10000)));
+ if (options.reuse_logs) {
+ // Need to force a memtable compaction since recovery does not do so.
+ ASSERT_OK(dbfull()->TEST_CompactMemTable());
+ }
+
// Check sizes across recovery by reopening a few times
for (int run = 0; run < 3; run++) {
Reopen(&options);
@@ -2084,7 +2113,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
InternalKeyComparator cmp(BytewiseComparator());
Options options;
VersionSet vset(dbname, &options, NULL, &cmp);
- ASSERT_OK(vset.Recover());
+ bool save_manifest;
+ ASSERT_OK(vset.Recover(&save_manifest));
VersionEdit vbase;
uint64_t fnum = 1;
for (int i = 0; i < num_base_files; i++) {
diff --git a/src/leveldb/db/fault_injection_test.cc b/src/leveldb/db/fault_injection_test.cc
new file mode 100644
index 0000000000..875dfe81ee
--- /dev/null
+++ b/src/leveldb/db/fault_injection_test.cc
@@ -0,0 +1,554 @@
+// Copyright 2014 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+// This test uses a custom Env to keep track of the state of a filesystem as of
+// the last "sync". It then checks for data loss errors by purposely dropping
+// file data (or entire files) not protected by a "sync".
+
+#include "leveldb/db.h"
+
+#include <map>
+#include <set>
+#include "db/db_impl.h"
+#include "db/filename.h"
+#include "db/log_format.h"
+#include "db/version_set.h"
+#include "leveldb/cache.h"
+#include "leveldb/env.h"
+#include "leveldb/table.h"
+#include "leveldb/write_batch.h"
+#include "util/logging.h"
+#include "util/mutexlock.h"
+#include "util/testharness.h"
+#include "util/testutil.h"
+
+namespace leveldb {
+
+static const int kValueSize = 1000;
+static const int kMaxNumValues = 2000;
+static const size_t kNumIterations = 3;
+
+class FaultInjectionTestEnv;
+
+namespace {
+
+// Assume a filename, and not a directory name like "/foo/bar/"
+static std::string GetDirName(const std::string filename) {
+ size_t found = filename.find_last_of("/\\");
+ if (found == std::string::npos) {
+ return "";
+ } else {
+ return filename.substr(0, found);
+ }
+}
+
+Status SyncDir(const std::string& dir) {
+ // As this is a test it isn't required to *actually* sync this directory.
+ return Status::OK();
+}
+
+// A basic file truncation function suitable for this test.
+Status Truncate(const std::string& filename, uint64_t length) {
+ leveldb::Env* env = leveldb::Env::Default();
+
+ SequentialFile* orig_file;
+ Status s = env->NewSequentialFile(filename, &orig_file);
+ if (!s.ok())
+ return s;
+
+ char* scratch = new char[length];
+ leveldb::Slice result;
+ s = orig_file->Read(length, &result, scratch);
+ delete orig_file;
+ if (s.ok()) {
+ std::string tmp_name = GetDirName(filename) + "/truncate.tmp";
+ WritableFile* tmp_file;
+ s = env->NewWritableFile(tmp_name, &tmp_file);
+ if (s.ok()) {
+ s = tmp_file->Append(result);
+ delete tmp_file;
+ if (s.ok()) {
+ s = env->RenameFile(tmp_name, filename);
+ } else {
+ env->DeleteFile(tmp_name);
+ }
+ }
+ }
+
+ delete[] scratch;
+
+ return s;
+}
+
+struct FileState {
+ std::string filename_;
+ ssize_t pos_;
+ ssize_t pos_at_last_sync_;
+ ssize_t pos_at_last_flush_;
+
+ FileState(const std::string& filename)
+ : filename_(filename),
+ pos_(-1),
+ pos_at_last_sync_(-1),
+ pos_at_last_flush_(-1) { }
+
+ FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {}
+
+ bool IsFullySynced() const { return pos_ <= 0 || pos_ == pos_at_last_sync_; }
+
+ Status DropUnsyncedData() const;
+};
+
+} // anonymous namespace
+
+// A wrapper around WritableFile which informs another Env whenever this file
+// is written to or sync'ed.
+class TestWritableFile : public WritableFile {
+ public:
+ TestWritableFile(const FileState& state,
+ WritableFile* f,
+ FaultInjectionTestEnv* env);
+ virtual ~TestWritableFile();
+ virtual Status Append(const Slice& data);
+ virtual Status Close();
+ virtual Status Flush();
+ virtual Status Sync();
+
+ private:
+ FileState state_;
+ WritableFile* target_;
+ bool writable_file_opened_;
+ FaultInjectionTestEnv* env_;
+
+ Status SyncParent();
+};
+
+class FaultInjectionTestEnv : public EnvWrapper {
+ public:
+ FaultInjectionTestEnv() : EnvWrapper(Env::Default()), filesystem_active_(true) {}
+ virtual ~FaultInjectionTestEnv() { }
+ virtual Status NewWritableFile(const std::string& fname,
+ WritableFile** result);
+ virtual Status NewAppendableFile(const std::string& fname,
+ WritableFile** result);
+ virtual Status DeleteFile(const std::string& f);
+ virtual Status RenameFile(const std::string& s, const std::string& t);
+
+ void WritableFileClosed(const FileState& state);
+ Status DropUnsyncedFileData();
+ Status DeleteFilesCreatedAfterLastDirSync();
+ void DirWasSynced();
+ bool IsFileCreatedSinceLastDirSync(const std::string& filename);
+ void ResetState();
+ void UntrackFile(const std::string& f);
+ // Setting the filesystem to inactive is the test equivalent to simulating a
+ // system reset. Setting to inactive will freeze our saved filesystem state so
+ // that it will stop being recorded. It can then be reset back to the state at
+ // the time of the reset.
+ bool IsFilesystemActive() const { return filesystem_active_; }
+ void SetFilesystemActive(bool active) { filesystem_active_ = active; }
+
+ private:
+ port::Mutex mutex_;
+ std::map<std::string, FileState> db_file_state_;
+ std::set<std::string> new_files_since_last_dir_sync_;
+ bool filesystem_active_; // Record flushes, syncs, writes
+};
+
+TestWritableFile::TestWritableFile(const FileState& state,
+ WritableFile* f,
+ FaultInjectionTestEnv* env)
+ : state_(state),
+ target_(f),
+ writable_file_opened_(true),
+ env_(env) {
+ assert(f != NULL);
+}
+
+TestWritableFile::~TestWritableFile() {
+ if (writable_file_opened_) {
+ Close();
+ }
+ delete target_;
+}
+
+Status TestWritableFile::Append(const Slice& data) {
+ Status s = target_->Append(data);
+ if (s.ok() && env_->IsFilesystemActive()) {
+ state_.pos_ += data.size();
+ }
+ return s;
+}
+
+Status TestWritableFile::Close() {
+ writable_file_opened_ = false;
+ Status s = target_->Close();
+ if (s.ok()) {
+ env_->WritableFileClosed(state_);
+ }
+ return s;
+}
+
+Status TestWritableFile::Flush() {
+ Status s = target_->Flush();
+ if (s.ok() && env_->IsFilesystemActive()) {
+ state_.pos_at_last_flush_ = state_.pos_;
+ }
+ return s;
+}
+
+Status TestWritableFile::SyncParent() {
+ Status s = SyncDir(GetDirName(state_.filename_));
+ if (s.ok()) {
+ env_->DirWasSynced();
+ }
+ return s;
+}
+
+Status TestWritableFile::Sync() {
+ if (!env_->IsFilesystemActive()) {
+ return Status::OK();
+ }
+ // Ensure new files referred to by the manifest are in the filesystem.
+ Status s = target_->Sync();
+ if (s.ok()) {
+ state_.pos_at_last_sync_ = state_.pos_;
+ }
+ if (env_->IsFileCreatedSinceLastDirSync(state_.filename_)) {
+ Status ps = SyncParent();
+ if (s.ok() && !ps.ok()) {
+ s = ps;
+ }
+ }
+ return s;
+}
+
+Status FaultInjectionTestEnv::NewWritableFile(const std::string& fname,
+ WritableFile** result) {
+ WritableFile* actual_writable_file;
+ Status s = target()->NewWritableFile(fname, &actual_writable_file);
+ if (s.ok()) {
+ FileState state(fname);
+ state.pos_ = 0;
+ *result = new TestWritableFile(state, actual_writable_file, this);
+ // NewWritableFile doesn't append to files, so if the same file is
+ // opened again then it will be truncated - so forget our saved
+ // state.
+ UntrackFile(fname);
+ MutexLock l(&mutex_);
+ new_files_since_last_dir_sync_.insert(fname);
+ }
+ return s;
+}
+
+Status FaultInjectionTestEnv::NewAppendableFile(const std::string& fname,
+ WritableFile** result) {
+ WritableFile* actual_writable_file;
+ Status s = target()->NewAppendableFile(fname, &actual_writable_file);
+ if (s.ok()) {
+ FileState state(fname);
+ state.pos_ = 0;
+ {
+ MutexLock l(&mutex_);
+ if (db_file_state_.count(fname) == 0) {
+ new_files_since_last_dir_sync_.insert(fname);
+ } else {
+ state = db_file_state_[fname];
+ }
+ }
+ *result = new TestWritableFile(state, actual_writable_file, this);
+ }
+ return s;
+}
+
+Status FaultInjectionTestEnv::DropUnsyncedFileData() {
+ Status s;
+ MutexLock l(&mutex_);
+ for (std::map<std::string, FileState>::const_iterator it =
+ db_file_state_.begin();
+ s.ok() && it != db_file_state_.end(); ++it) {
+ const FileState& state = it->second;
+ if (!state.IsFullySynced()) {
+ s = state.DropUnsyncedData();
+ }
+ }
+ return s;
+}
+
+void FaultInjectionTestEnv::DirWasSynced() {
+ MutexLock l(&mutex_);
+ new_files_since_last_dir_sync_.clear();
+}
+
+bool FaultInjectionTestEnv::IsFileCreatedSinceLastDirSync(
+ const std::string& filename) {
+ MutexLock l(&mutex_);
+ return new_files_since_last_dir_sync_.find(filename) !=
+ new_files_since_last_dir_sync_.end();
+}
+
+void FaultInjectionTestEnv::UntrackFile(const std::string& f) {
+ MutexLock l(&mutex_);
+ db_file_state_.erase(f);
+ new_files_since_last_dir_sync_.erase(f);
+}
+
+Status FaultInjectionTestEnv::DeleteFile(const std::string& f) {
+ Status s = EnvWrapper::DeleteFile(f);
+ ASSERT_OK(s);
+ if (s.ok()) {
+ UntrackFile(f);
+ }
+ return s;
+}
+
+Status FaultInjectionTestEnv::RenameFile(const std::string& s,
+ const std::string& t) {
+ Status ret = EnvWrapper::RenameFile(s, t);
+
+ if (ret.ok()) {
+ MutexLock l(&mutex_);
+ if (db_file_state_.find(s) != db_file_state_.end()) {
+ db_file_state_[t] = db_file_state_[s];
+ db_file_state_.erase(s);
+ }
+
+ if (new_files_since_last_dir_sync_.erase(s) != 0) {
+ assert(new_files_since_last_dir_sync_.find(t) ==
+ new_files_since_last_dir_sync_.end());
+ new_files_since_last_dir_sync_.insert(t);
+ }
+ }
+
+ return ret;
+}
+
+void FaultInjectionTestEnv::ResetState() {
+ // Since we are not destroying the database, the existing files
+ // should keep their recorded synced/flushed state. Therefore
+ // we do not reset db_file_state_ and new_files_since_last_dir_sync_.
+ MutexLock l(&mutex_);
+ SetFilesystemActive(true);
+}
+
+Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() {
+ // Because DeleteFile access this container make a copy to avoid deadlock
+ mutex_.Lock();
+ std::set<std::string> new_files(new_files_since_last_dir_sync_.begin(),
+ new_files_since_last_dir_sync_.end());
+ mutex_.Unlock();
+ Status s;
+ std::set<std::string>::const_iterator it;
+ for (it = new_files.begin(); s.ok() && it != new_files.end(); ++it) {
+ s = DeleteFile(*it);
+ }
+ return s;
+}
+
+void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) {
+ MutexLock l(&mutex_);
+ db_file_state_[state.filename_] = state;
+}
+
+Status FileState::DropUnsyncedData() const {
+ ssize_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
+ return Truncate(filename_, sync_pos);
+}
+
+class FaultInjectionTest {
+ public:
+ enum ExpectedVerifResult { VAL_EXPECT_NO_ERROR, VAL_EXPECT_ERROR };
+ enum ResetMethod { RESET_DROP_UNSYNCED_DATA, RESET_DELETE_UNSYNCED_FILES };
+
+ FaultInjectionTestEnv* env_;
+ std::string dbname_;
+ Cache* tiny_cache_;
+ Options options_;
+ DB* db_;
+
+ FaultInjectionTest()
+ : env_(new FaultInjectionTestEnv),
+ tiny_cache_(NewLRUCache(100)),
+ db_(NULL) {
+ dbname_ = test::TmpDir() + "/fault_test";
+ DestroyDB(dbname_, Options()); // Destroy any db from earlier run
+ options_.reuse_logs = true;
+ options_.env = env_;
+ options_.paranoid_checks = true;
+ options_.block_cache = tiny_cache_;
+ options_.create_if_missing = true;
+ }
+
+ ~FaultInjectionTest() {
+ CloseDB();
+ DestroyDB(dbname_, Options());
+ delete tiny_cache_;
+ delete env_;
+ }
+
+ void ReuseLogs(bool reuse) {
+ options_.reuse_logs = reuse;
+ }
+
+ void Build(int start_idx, int num_vals) {
+ std::string key_space, value_space;
+ WriteBatch batch;
+ for (int i = start_idx; i < start_idx + num_vals; i++) {
+ Slice key = Key(i, &key_space);
+ batch.Clear();
+ batch.Put(key, Value(i, &value_space));
+ WriteOptions options;
+ ASSERT_OK(db_->Write(options, &batch));
+ }
+ }
+
+ Status ReadValue(int i, std::string* val) const {
+ std::string key_space, value_space;
+ Slice key = Key(i, &key_space);
+ Value(i, &value_space);
+ ReadOptions options;
+ return db_->Get(options, key, val);
+ }
+
+ Status Verify(int start_idx, int num_vals,
+ ExpectedVerifResult expected) const {
+ std::string val;
+ std::string value_space;
+ Status s;
+ for (int i = start_idx; i < start_idx + num_vals && s.ok(); i++) {
+ Value(i, &value_space);
+ s = ReadValue(i, &val);
+ if (expected == VAL_EXPECT_NO_ERROR) {
+ if (s.ok()) {
+ ASSERT_EQ(value_space, val);
+ }
+ } else if (s.ok()) {
+ fprintf(stderr, "Expected an error at %d, but was OK\n", i);
+ s = Status::IOError(dbname_, "Expected value error:");
+ } else {
+ s = Status::OK(); // An expected error
+ }
+ }
+ return s;
+ }
+
+ // Return the ith key
+ Slice Key(int i, std::string* storage) const {
+ char buf[100];
+ snprintf(buf, sizeof(buf), "%016d", i);
+ storage->assign(buf, strlen(buf));
+ return Slice(*storage);
+ }
+
+ // Return the value to associate with the specified key
+ Slice Value(int k, std::string* storage) const {
+ Random r(k);
+ return test::RandomString(&r, kValueSize, storage);
+ }
+
+ Status OpenDB() {
+ delete db_;
+ db_ = NULL;
+ env_->ResetState();
+ return DB::Open(options_, dbname_, &db_);
+ }
+
+ void CloseDB() {
+ delete db_;
+ db_ = NULL;
+ }
+
+ void DeleteAllData() {
+ Iterator* iter = db_->NewIterator(ReadOptions());
+ WriteOptions options;
+ for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
+ ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
+ }
+
+ delete iter;
+ }
+
+ void ResetDBState(ResetMethod reset_method) {
+ switch (reset_method) {
+ case RESET_DROP_UNSYNCED_DATA:
+ ASSERT_OK(env_->DropUnsyncedFileData());
+ break;
+ case RESET_DELETE_UNSYNCED_FILES:
+ ASSERT_OK(env_->DeleteFilesCreatedAfterLastDirSync());
+ break;
+ default:
+ assert(false);
+ }
+ }
+
+ void PartialCompactTestPreFault(int num_pre_sync, int num_post_sync) {
+ DeleteAllData();
+ Build(0, num_pre_sync);
+ db_->CompactRange(NULL, NULL);
+ Build(num_pre_sync, num_post_sync);
+ }
+
+ void PartialCompactTestReopenWithFault(ResetMethod reset_method,
+ int num_pre_sync,
+ int num_post_sync) {
+ env_->SetFilesystemActive(false);
+ CloseDB();
+ ResetDBState(reset_method);
+ ASSERT_OK(OpenDB());
+ ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
+ ASSERT_OK(Verify(num_pre_sync, num_post_sync, FaultInjectionTest::VAL_EXPECT_ERROR));
+ }
+
+ void NoWriteTestPreFault() {
+ }
+
+ void NoWriteTestReopenWithFault(ResetMethod reset_method) {
+ CloseDB();
+ ResetDBState(reset_method);
+ ASSERT_OK(OpenDB());
+ }
+
+ void DoTest() {
+ Random rnd(0);
+ ASSERT_OK(OpenDB());
+ for (size_t idx = 0; idx < kNumIterations; idx++) {
+ int num_pre_sync = rnd.Uniform(kMaxNumValues);
+ int num_post_sync = rnd.Uniform(kMaxNumValues);
+
+ PartialCompactTestPreFault(num_pre_sync, num_post_sync);
+ PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA,
+ num_pre_sync,
+ num_post_sync);
+
+ NoWriteTestPreFault();
+ NoWriteTestReopenWithFault(RESET_DROP_UNSYNCED_DATA);
+
+ PartialCompactTestPreFault(num_pre_sync, num_post_sync);
+ // No new files created so we expect all values since no files will be
+ // dropped.
+ PartialCompactTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES,
+ num_pre_sync + num_post_sync,
+ 0);
+
+ NoWriteTestPreFault();
+ NoWriteTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES);
+ }
+ }
+};
+
+TEST(FaultInjectionTest, FaultTestNoLogReuse) {
+ ReuseLogs(false);
+ DoTest();
+}
+
+TEST(FaultInjectionTest, FaultTestWithLogReuse) {
+ ReuseLogs(true);
+ DoTest();
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ return leveldb::test::RunAllTests();
+}
diff --git a/src/leveldb/db/leveldb_main.cc b/src/leveldb/db/leveldbutil.cc
index 9f4b7dd70c..9f4b7dd70c 100644
--- a/src/leveldb/db/leveldb_main.cc
+++ b/src/leveldb/db/leveldbutil.cc
diff --git a/src/leveldb/db/log_reader.cc b/src/leveldb/db/log_reader.cc
index e44b66c85b..a6d304545d 100644
--- a/src/leveldb/db/log_reader.cc
+++ b/src/leveldb/db/log_reader.cc
@@ -25,7 +25,8 @@ Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
eof_(false),
last_record_offset_(0),
end_of_buffer_offset_(0),
- initial_offset_(initial_offset) {
+ initial_offset_(initial_offset),
+ resyncing_(initial_offset > 0) {
}
Reader::~Reader() {
@@ -72,8 +73,25 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
Slice fragment;
while (true) {
- uint64_t physical_record_offset = end_of_buffer_offset_ - buffer_.size();
const unsigned int record_type = ReadPhysicalRecord(&fragment);
+
+ // ReadPhysicalRecord may have only had an empty trailer remaining in its
+ // internal buffer. Calculate the offset of the next physical record now
+ // that it has returned, properly accounting for its header size.
+ uint64_t physical_record_offset =
+ end_of_buffer_offset_ - buffer_.size() - kHeaderSize - fragment.size();
+
+ if (resyncing_) {
+ if (record_type == kMiddleType) {
+ continue;
+ } else if (record_type == kLastType) {
+ resyncing_ = false;
+ continue;
+ } else {
+ resyncing_ = false;
+ }
+ }
+
switch (record_type) {
case kFullType:
if (in_fragmented_record) {
diff --git a/src/leveldb/db/log_reader.h b/src/leveldb/db/log_reader.h
index 6aff791716..8389d61f8f 100644
--- a/src/leveldb/db/log_reader.h
+++ b/src/leveldb/db/log_reader.h
@@ -73,6 +73,11 @@ class Reader {
// Offset at which to start looking for the first record to return
uint64_t const initial_offset_;
+ // True if we are resynchronizing after a seek (initial_offset_ > 0). In
+ // particular, a run of kMiddleType and kLastType records can be silently
+ // skipped in this mode
+ bool resyncing_;
+
// Extend record types with the following special values
enum {
kEof = kMaxRecordType + 1,
diff --git a/src/leveldb/db/log_test.cc b/src/leveldb/db/log_test.cc
index dcf0562652..48a5928657 100644
--- a/src/leveldb/db/log_test.cc
+++ b/src/leveldb/db/log_test.cc
@@ -79,7 +79,7 @@ class LogTest {
virtual Status Skip(uint64_t n) {
if (n > contents_.size()) {
contents_.clear();
- return Status::NotFound("in-memory file skipepd past end");
+ return Status::NotFound("in-memory file skipped past end");
}
contents_.remove_prefix(n);
@@ -104,23 +104,34 @@ class LogTest {
StringSource source_;
ReportCollector report_;
bool reading_;
- Writer writer_;
- Reader reader_;
+ Writer* writer_;
+ Reader* reader_;
// Record metadata for testing initial offset functionality
static size_t initial_offset_record_sizes_[];
static uint64_t initial_offset_last_record_offsets_[];
+ static int num_initial_offset_records_;
public:
LogTest() : reading_(false),
- writer_(&dest_),
- reader_(&source_, &report_, true/*checksum*/,
- 0/*initial_offset*/) {
+ writer_(new Writer(&dest_)),
+ reader_(new Reader(&source_, &report_, true/*checksum*/,
+ 0/*initial_offset*/)) {
+ }
+
+ ~LogTest() {
+ delete writer_;
+ delete reader_;
+ }
+
+ void ReopenForAppend() {
+ delete writer_;
+ writer_ = new Writer(&dest_, dest_.contents_.size());
}
void Write(const std::string& msg) {
ASSERT_TRUE(!reading_) << "Write() after starting to read";
- writer_.AddRecord(Slice(msg));
+ writer_->AddRecord(Slice(msg));
}
size_t WrittenBytes() const {
@@ -134,7 +145,7 @@ class LogTest {
}
std::string scratch;
Slice record;
- if (reader_.ReadRecord(&record, &scratch)) {
+ if (reader_->ReadRecord(&record, &scratch)) {
return record.ToString();
} else {
return "EOF";
@@ -182,13 +193,18 @@ class LogTest {
}
void WriteInitialOffsetLog() {
- for (int i = 0; i < 4; i++) {
+ for (int i = 0; i < num_initial_offset_records_; i++) {
std::string record(initial_offset_record_sizes_[i],
static_cast<char>('a' + i));
Write(record);
}
}
+ void StartReadingAt(uint64_t initial_offset) {
+ delete reader_;
+ reader_ = new Reader(&source_, &report_, true/*checksum*/, initial_offset);
+ }
+
void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
WriteInitialOffsetLog();
reading_ = true;
@@ -208,32 +224,48 @@ class LogTest {
source_.contents_ = Slice(dest_.contents_);
Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
initial_offset);
- Slice record;
- std::string scratch;
- ASSERT_TRUE(offset_reader->ReadRecord(&record, &scratch));
- ASSERT_EQ(initial_offset_record_sizes_[expected_record_offset],
- record.size());
- ASSERT_EQ(initial_offset_last_record_offsets_[expected_record_offset],
- offset_reader->LastRecordOffset());
- ASSERT_EQ((char)('a' + expected_record_offset), record.data()[0]);
+
+ // Read all records from expected_record_offset through the last one.
+ ASSERT_LT(expected_record_offset, num_initial_offset_records_);
+ for (; expected_record_offset < num_initial_offset_records_;
+ ++expected_record_offset) {
+ Slice record;
+ std::string scratch;
+ ASSERT_TRUE(offset_reader->ReadRecord(&record, &scratch));
+ ASSERT_EQ(initial_offset_record_sizes_[expected_record_offset],
+ record.size());
+ ASSERT_EQ(initial_offset_last_record_offsets_[expected_record_offset],
+ offset_reader->LastRecordOffset());
+ ASSERT_EQ((char)('a' + expected_record_offset), record.data()[0]);
+ }
delete offset_reader;
}
-
};
size_t LogTest::initial_offset_record_sizes_[] =
{10000, // Two sizable records in first block
10000,
2 * log::kBlockSize - 1000, // Span three blocks
- 1};
+ 1,
+ 13716, // Consume all but two bytes of block 3.
+ log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
+ };
uint64_t LogTest::initial_offset_last_record_offsets_[] =
{0,
kHeaderSize + 10000,
2 * (kHeaderSize + 10000),
2 * (kHeaderSize + 10000) +
- (2 * log::kBlockSize - 1000) + 3 * kHeaderSize};
+ (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
+ 2 * (kHeaderSize + 10000) +
+ (2 * log::kBlockSize - 1000) + 3 * kHeaderSize
+ + kHeaderSize + 1,
+ 3 * log::kBlockSize,
+ };
+// LogTest::initial_offset_last_record_offsets_ must be defined before this.
+int LogTest::num_initial_offset_records_ =
+ sizeof(LogTest::initial_offset_last_record_offsets_)/sizeof(uint64_t);
TEST(LogTest, Empty) {
ASSERT_EQ("EOF", Read());
@@ -318,6 +350,15 @@ TEST(LogTest, AlignedEof) {
ASSERT_EQ("EOF", Read());
}
+TEST(LogTest, OpenForAppend) {
+ Write("hello");
+ ReopenForAppend();
+ Write("world");
+ ASSERT_EQ("hello", Read());
+ ASSERT_EQ("world", Read());
+ ASSERT_EQ("EOF", Read());
+}
+
TEST(LogTest, RandomRead) {
const int N = 500;
Random write_rnd(301);
@@ -445,6 +486,22 @@ TEST(LogTest, PartialLastIsIgnored) {
ASSERT_EQ(0, DroppedBytes());
}
+TEST(LogTest, SkipIntoMultiRecord) {
+ // Consider a fragmented record:
+ // first(R1), middle(R1), last(R1), first(R2)
+ // If initial_offset points to a record after first(R1) but before first(R2)
+ // incomplete fragment errors are not actual errors, and must be suppressed
+ // until a new first or full record is encountered.
+ Write(BigString("foo", 3*kBlockSize));
+ Write("correct");
+ StartReadingAt(kBlockSize);
+
+ ASSERT_EQ("correct", Read());
+ ASSERT_EQ("", ReportMessage());
+ ASSERT_EQ(0, DroppedBytes());
+ ASSERT_EQ("EOF", Read());
+}
+
TEST(LogTest, ErrorJoinsRecords) {
// Consider two fragmented records:
// first(R1) last(R1) first(R2) last(R2)
@@ -514,6 +571,10 @@ TEST(LogTest, ReadFourthStart) {
3);
}
+TEST(LogTest, ReadInitialOffsetIntoBlockPadding) {
+ CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
+}
+
TEST(LogTest, ReadEnd) {
CheckOffsetPastEndReturnsNoRecords(0);
}
diff --git a/src/leveldb/db/log_writer.cc b/src/leveldb/db/log_writer.cc
index 2da99ac088..74a03270da 100644
--- a/src/leveldb/db/log_writer.cc
+++ b/src/leveldb/db/log_writer.cc
@@ -12,15 +12,24 @@
namespace leveldb {
namespace log {
-Writer::Writer(WritableFile* dest)
- : dest_(dest),
- block_offset_(0) {
+static void InitTypeCrc(uint32_t* type_crc) {
for (int i = 0; i <= kMaxRecordType; i++) {
char t = static_cast<char>(i);
- type_crc_[i] = crc32c::Value(&t, 1);
+ type_crc[i] = crc32c::Value(&t, 1);
}
}
+Writer::Writer(WritableFile* dest)
+ : dest_(dest),
+ block_offset_(0) {
+ InitTypeCrc(type_crc_);
+}
+
+Writer::Writer(WritableFile* dest, uint64_t dest_length)
+ : dest_(dest), block_offset_(dest_length % kBlockSize) {
+ InitTypeCrc(type_crc_);
+}
+
Writer::~Writer() {
}
diff --git a/src/leveldb/db/log_writer.h b/src/leveldb/db/log_writer.h
index a3a954d967..9e7cc4705b 100644
--- a/src/leveldb/db/log_writer.h
+++ b/src/leveldb/db/log_writer.h
@@ -22,6 +22,12 @@ class Writer {
// "*dest" must be initially empty.
// "*dest" must remain live while this Writer is in use.
explicit Writer(WritableFile* dest);
+
+ // Create a writer that will append data to "*dest".
+ // "*dest" must have initial length "dest_length".
+ // "*dest" must remain live while this Writer is in use.
+ Writer(WritableFile* dest, uint64_t dest_length);
+
~Writer();
Status AddRecord(const Slice& slice);
diff --git a/src/leveldb/db/memtable.h b/src/leveldb/db/memtable.h
index 92e90bb099..9f41567cde 100644
--- a/src/leveldb/db/memtable.h
+++ b/src/leveldb/db/memtable.h
@@ -36,10 +36,7 @@ class MemTable {
}
// Returns an estimate of the number of bytes of data in use by this
- // data structure.
- //
- // REQUIRES: external synchronization to prevent simultaneous
- // operations on the same MemTable.
+ // data structure. It is safe to call when MemTable is being modified.
size_t ApproximateMemoryUsage();
// Return an iterator that yields the contents of the memtable.
diff --git a/src/leveldb/db/recovery_test.cc b/src/leveldb/db/recovery_test.cc
new file mode 100644
index 0000000000..9596f4288a
--- /dev/null
+++ b/src/leveldb/db/recovery_test.cc
@@ -0,0 +1,324 @@
+// Copyright (c) 2014 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/db_impl.h"
+#include "db/filename.h"
+#include "db/version_set.h"
+#include "db/write_batch_internal.h"
+#include "leveldb/db.h"
+#include "leveldb/env.h"
+#include "leveldb/write_batch.h"
+#include "util/logging.h"
+#include "util/testharness.h"
+#include "util/testutil.h"
+
+namespace leveldb {
+
+class RecoveryTest {
+ public:
+ RecoveryTest() : env_(Env::Default()), db_(NULL) {
+ dbname_ = test::TmpDir() + "/recovery_test";
+ DestroyDB(dbname_, Options());
+ Open();
+ }
+
+ ~RecoveryTest() {
+ Close();
+ DestroyDB(dbname_, Options());
+ }
+
+ DBImpl* dbfull() const { return reinterpret_cast<DBImpl*>(db_); }
+ Env* env() const { return env_; }
+
+ bool CanAppend() {
+ WritableFile* tmp;
+ Status s = env_->NewAppendableFile(CurrentFileName(dbname_), &tmp);
+ delete tmp;
+ if (s.IsNotSupportedError()) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ void Close() {
+ delete db_;
+ db_ = NULL;
+ }
+
+ void Open(Options* options = NULL) {
+ Close();
+ Options opts;
+ if (options != NULL) {
+ opts = *options;
+ } else {
+ opts.reuse_logs = true; // TODO(sanjay): test both ways
+ opts.create_if_missing = true;
+ }
+ if (opts.env == NULL) {
+ opts.env = env_;
+ }
+ ASSERT_OK(DB::Open(opts, dbname_, &db_));
+ ASSERT_EQ(1, NumLogs());
+ }
+
+ Status Put(const std::string& k, const std::string& v) {
+ return db_->Put(WriteOptions(), k, v);
+ }
+
+ std::string Get(const std::string& k, const Snapshot* snapshot = NULL) {
+ std::string result;
+ Status s = db_->Get(ReadOptions(), k, &result);
+ if (s.IsNotFound()) {
+ result = "NOT_FOUND";
+ } else if (!s.ok()) {
+ result = s.ToString();
+ }
+ return result;
+ }
+
+ std::string ManifestFileName() {
+ std::string current;
+ ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), &current));
+ size_t len = current.size();
+ if (len > 0 && current[len-1] == '\n') {
+ current.resize(len - 1);
+ }
+ return dbname_ + "/" + current;
+ }
+
+ std::string LogName(uint64_t number) {
+ return LogFileName(dbname_, number);
+ }
+
+ size_t DeleteLogFiles() {
+ std::vector<uint64_t> logs = GetFiles(kLogFile);
+ for (size_t i = 0; i < logs.size(); i++) {
+ ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]);
+ }
+ return logs.size();
+ }
+
+ uint64_t FirstLogFile() {
+ return GetFiles(kLogFile)[0];
+ }
+
+ std::vector<uint64_t> GetFiles(FileType t) {
+ std::vector<std::string> filenames;
+ ASSERT_OK(env_->GetChildren(dbname_, &filenames));
+ std::vector<uint64_t> result;
+ for (size_t i = 0; i < filenames.size(); i++) {
+ uint64_t number;
+ FileType type;
+ if (ParseFileName(filenames[i], &number, &type) && type == t) {
+ result.push_back(number);
+ }
+ }
+ return result;
+ }
+
+ int NumLogs() {
+ return GetFiles(kLogFile).size();
+ }
+
+ int NumTables() {
+ return GetFiles(kTableFile).size();
+ }
+
+ uint64_t FileSize(const std::string& fname) {
+ uint64_t result;
+ ASSERT_OK(env_->GetFileSize(fname, &result)) << fname;
+ return result;
+ }
+
+ void CompactMemTable() {
+ dbfull()->TEST_CompactMemTable();
+ }
+
+ // Directly construct a log file that sets key to val.
+ void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) {
+ std::string fname = LogFileName(dbname_, lognum);
+ WritableFile* file;
+ ASSERT_OK(env_->NewWritableFile(fname, &file));
+ log::Writer writer(file);
+ WriteBatch batch;
+ batch.Put(key, val);
+ WriteBatchInternal::SetSequence(&batch, seq);
+ ASSERT_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch)));
+ ASSERT_OK(file->Flush());
+ delete file;
+ }
+
+ private:
+ std::string dbname_;
+ Env* env_;
+ DB* db_;
+};
+
+TEST(RecoveryTest, ManifestReused) {
+ if (!CanAppend()) {
+ fprintf(stderr, "skipping test because env does not support appending\n");
+ return;
+ }
+ ASSERT_OK(Put("foo", "bar"));
+ Close();
+ std::string old_manifest = ManifestFileName();
+ Open();
+ ASSERT_EQ(old_manifest, ManifestFileName());
+ ASSERT_EQ("bar", Get("foo"));
+ Open();
+ ASSERT_EQ(old_manifest, ManifestFileName());
+ ASSERT_EQ("bar", Get("foo"));
+}
+
+TEST(RecoveryTest, LargeManifestCompacted) {
+ if (!CanAppend()) {
+ fprintf(stderr, "skipping test because env does not support appending\n");
+ return;
+ }
+ ASSERT_OK(Put("foo", "bar"));
+ Close();
+ std::string old_manifest = ManifestFileName();
+
+ // Pad with zeroes to make manifest file very big.
+ {
+ uint64_t len = FileSize(old_manifest);
+ WritableFile* file;
+ ASSERT_OK(env()->NewAppendableFile(old_manifest, &file));
+ std::string zeroes(3*1048576 - static_cast<size_t>(len), 0);
+ ASSERT_OK(file->Append(zeroes));
+ ASSERT_OK(file->Flush());
+ delete file;
+ }
+
+ Open();
+ std::string new_manifest = ManifestFileName();
+ ASSERT_NE(old_manifest, new_manifest);
+ ASSERT_GT(10000, FileSize(new_manifest));
+ ASSERT_EQ("bar", Get("foo"));
+
+ Open();
+ ASSERT_EQ(new_manifest, ManifestFileName());
+ ASSERT_EQ("bar", Get("foo"));
+}
+
+TEST(RecoveryTest, NoLogFiles) {
+ ASSERT_OK(Put("foo", "bar"));
+ ASSERT_EQ(1, DeleteLogFiles());
+ Open();
+ ASSERT_EQ("NOT_FOUND", Get("foo"));
+ Open();
+ ASSERT_EQ("NOT_FOUND", Get("foo"));
+}
+
+TEST(RecoveryTest, LogFileReuse) {
+ if (!CanAppend()) {
+ fprintf(stderr, "skipping test because env does not support appending\n");
+ return;
+ }
+ for (int i = 0; i < 2; i++) {
+ ASSERT_OK(Put("foo", "bar"));
+ if (i == 0) {
+ // Compact to ensure current log is empty
+ CompactMemTable();
+ }
+ Close();
+ ASSERT_EQ(1, NumLogs());
+ uint64_t number = FirstLogFile();
+ if (i == 0) {
+ ASSERT_EQ(0, FileSize(LogName(number)));
+ } else {
+ ASSERT_LT(0, FileSize(LogName(number)));
+ }
+ Open();
+ ASSERT_EQ(1, NumLogs());
+ ASSERT_EQ(number, FirstLogFile()) << "did not reuse log file";
+ ASSERT_EQ("bar", Get("foo"));
+ Open();
+ ASSERT_EQ(1, NumLogs());
+ ASSERT_EQ(number, FirstLogFile()) << "did not reuse log file";
+ ASSERT_EQ("bar", Get("foo"));
+ }
+}
+
+TEST(RecoveryTest, MultipleMemTables) {
+ // Make a large log.
+ const int kNum = 1000;
+ for (int i = 0; i < kNum; i++) {
+ char buf[100];
+ snprintf(buf, sizeof(buf), "%050d", i);
+ ASSERT_OK(Put(buf, buf));
+ }
+ ASSERT_EQ(0, NumTables());
+ Close();
+ ASSERT_EQ(0, NumTables());
+ ASSERT_EQ(1, NumLogs());
+ uint64_t old_log_file = FirstLogFile();
+
+ // Force creation of multiple memtables by reducing the write buffer size.
+ Options opt;
+ opt.reuse_logs = true;
+ opt.write_buffer_size = (kNum*100) / 2;
+ Open(&opt);
+ ASSERT_LE(2, NumTables());
+ ASSERT_EQ(1, NumLogs());
+ ASSERT_NE(old_log_file, FirstLogFile()) << "must not reuse log";
+ for (int i = 0; i < kNum; i++) {
+ char buf[100];
+ snprintf(buf, sizeof(buf), "%050d", i);
+ ASSERT_EQ(buf, Get(buf));
+ }
+}
+
+TEST(RecoveryTest, MultipleLogFiles) {
+ ASSERT_OK(Put("foo", "bar"));
+ Close();
+ ASSERT_EQ(1, NumLogs());
+
+ // Make a bunch of uncompacted log files.
+ uint64_t old_log = FirstLogFile();
+ MakeLogFile(old_log+1, 1000, "hello", "world");
+ MakeLogFile(old_log+2, 1001, "hi", "there");
+ MakeLogFile(old_log+3, 1002, "foo", "bar2");
+
+ // Recover and check that all log files were processed.
+ Open();
+ ASSERT_LE(1, NumTables());
+ ASSERT_EQ(1, NumLogs());
+ uint64_t new_log = FirstLogFile();
+ ASSERT_LE(old_log+3, new_log);
+ ASSERT_EQ("bar2", Get("foo"));
+ ASSERT_EQ("world", Get("hello"));
+ ASSERT_EQ("there", Get("hi"));
+
+ // Test that previous recovery produced recoverable state.
+ Open();
+ ASSERT_LE(1, NumTables());
+ ASSERT_EQ(1, NumLogs());
+ if (CanAppend()) {
+ ASSERT_EQ(new_log, FirstLogFile());
+ }
+ ASSERT_EQ("bar2", Get("foo"));
+ ASSERT_EQ("world", Get("hello"));
+ ASSERT_EQ("there", Get("hi"));
+
+ // Check that introducing an older log file does not cause it to be re-read.
+ Close();
+ MakeLogFile(old_log+1, 2000, "hello", "stale write");
+ Open();
+ ASSERT_LE(1, NumTables());
+ ASSERT_EQ(1, NumLogs());
+ if (CanAppend()) {
+ ASSERT_EQ(new_log, FirstLogFile());
+ }
+ ASSERT_EQ("bar2", Get("foo"));
+ ASSERT_EQ("world", Get("hello"));
+ ASSERT_EQ("there", Get("hi"));
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ return leveldb::test::RunAllTests();
+}
diff --git a/src/leveldb/db/skiplist.h b/src/leveldb/db/skiplist.h
index ed8b092203..8bd77764d8 100644
--- a/src/leveldb/db/skiplist.h
+++ b/src/leveldb/db/skiplist.h
@@ -1,10 +1,10 @@
-#ifndef STORAGE_LEVELDB_DB_SKIPLIST_H_
-#define STORAGE_LEVELDB_DB_SKIPLIST_H_
-
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
+
+#ifndef STORAGE_LEVELDB_DB_SKIPLIST_H_
+#define STORAGE_LEVELDB_DB_SKIPLIST_H_
+
// Thread safety
// -------------
//
diff --git a/src/leveldb/db/skiplist_test.cc b/src/leveldb/db/skiplist_test.cc
index c78f4b4fb1..aee1461e1b 100644
--- a/src/leveldb/db/skiplist_test.cc
+++ b/src/leveldb/db/skiplist_test.cc
@@ -250,7 +250,7 @@ class ConcurrentTest {
// Note that generation 0 is never inserted, so it is ok if
// <*,0,*> is missing.
ASSERT_TRUE((gen(pos) == 0) ||
- (gen(pos) > initial_state.Get(key(pos)))
+ (gen(pos) > static_cast<Key>(initial_state.Get(key(pos))))
) << "key: " << key(pos)
<< "; gen: " << gen(pos)
<< "; initgen: "
diff --git a/src/leveldb/db/snapshot.h b/src/leveldb/db/snapshot.h
index e7f8fd2c37..6ed413c42d 100644
--- a/src/leveldb/db/snapshot.h
+++ b/src/leveldb/db/snapshot.h
@@ -5,6 +5,7 @@
#ifndef STORAGE_LEVELDB_DB_SNAPSHOT_H_
#define STORAGE_LEVELDB_DB_SNAPSHOT_H_
+#include "db/dbformat.h"
#include "leveldb/db.h"
namespace leveldb {
diff --git a/src/leveldb/db/version_set.cc b/src/leveldb/db/version_set.cc
index aa83df55e4..a5e0f77a6a 100644
--- a/src/leveldb/db/version_set.cc
+++ b/src/leveldb/db/version_set.cc
@@ -893,7 +893,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
return s;
}
-Status VersionSet::Recover() {
+Status VersionSet::Recover(bool *save_manifest) {
struct LogReporter : public log::Reader::Reporter {
Status* status;
virtual void Corruption(size_t bytes, const Status& s) {
@@ -1003,11 +1003,49 @@ Status VersionSet::Recover() {
last_sequence_ = last_sequence;
log_number_ = log_number;
prev_log_number_ = prev_log_number;
+
+ // See if we can reuse the existing MANIFEST file.
+ if (ReuseManifest(dscname, current)) {
+ // No need to save new manifest
+ } else {
+ *save_manifest = true;
+ }
}
return s;
}
+bool VersionSet::ReuseManifest(const std::string& dscname,
+ const std::string& dscbase) {
+ if (!options_->reuse_logs) {
+ return false;
+ }
+ FileType manifest_type;
+ uint64_t manifest_number;
+ uint64_t manifest_size;
+ if (!ParseFileName(dscbase, &manifest_number, &manifest_type) ||
+ manifest_type != kDescriptorFile ||
+ !env_->GetFileSize(dscname, &manifest_size).ok() ||
+ // Make new compacted MANIFEST if old one is too big
+ manifest_size >= kTargetFileSize) {
+ return false;
+ }
+
+ assert(descriptor_file_ == NULL);
+ assert(descriptor_log_ == NULL);
+ Status r = env_->NewAppendableFile(dscname, &descriptor_file_);
+ if (!r.ok()) {
+ Log(options_->info_log, "Reuse MANIFEST: %s\n", r.ToString().c_str());
+ assert(descriptor_file_ == NULL);
+ return false;
+ }
+
+ Log(options_->info_log, "Reusing MANIFEST %s\n", dscname.c_str());
+ descriptor_log_ = new log::Writer(descriptor_file_, manifest_size);
+ manifest_file_number_ = manifest_number;
+ return true;
+}
+
void VersionSet::MarkFileNumberUsed(uint64_t number) {
if (next_file_number_ <= number) {
next_file_number_ = number + 1;
diff --git a/src/leveldb/db/version_set.h b/src/leveldb/db/version_set.h
index 8dc14b8e01..1dec745673 100644
--- a/src/leveldb/db/version_set.h
+++ b/src/leveldb/db/version_set.h
@@ -179,7 +179,7 @@ class VersionSet {
EXCLUSIVE_LOCKS_REQUIRED(mu);
// Recover the last saved descriptor from persistent storage.
- Status Recover();
+ Status Recover(bool *save_manifest);
// Return the current version.
Version* current() const { return current_; }
@@ -274,6 +274,8 @@ class VersionSet {
friend class Compaction;
friend class Version;
+ bool ReuseManifest(const std::string& dscname, const std::string& dscbase);
+
void Finalize(Version* v);
void GetRange(const std::vector<FileMetaData*>& inputs,
diff --git a/src/leveldb/db/write_batch_internal.h b/src/leveldb/db/write_batch_internal.h
index 310a3c8912..9448ef7b21 100644
--- a/src/leveldb/db/write_batch_internal.h
+++ b/src/leveldb/db/write_batch_internal.h
@@ -5,6 +5,7 @@
#ifndef STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_
#define STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_
+#include "db/dbformat.h"
#include "leveldb/write_batch.h"
namespace leveldb {
diff --git a/src/leveldb/doc/index.html b/src/leveldb/doc/index.html
index 3ed0ed9d9e..2155192581 100644
--- a/src/leveldb/doc/index.html
+++ b/src/leveldb/doc/index.html
@@ -22,7 +22,7 @@ directory. The following example shows how to open a database,
creating it if necessary:
<p>
<pre>
- #include &lt;assert&gt;
+ #include &lt;cassert&gt;
#include "leveldb/db.h"
leveldb::DB* db;
diff --git a/src/leveldb/helpers/memenv/memenv.cc b/src/leveldb/helpers/memenv/memenv.cc
index 43ef2e0729..9a98884daf 100644
--- a/src/leveldb/helpers/memenv/memenv.cc
+++ b/src/leveldb/helpers/memenv/memenv.cc
@@ -277,6 +277,19 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
+ virtual Status NewAppendableFile(const std::string& fname,
+ WritableFile** result) {
+ MutexLock lock(&mutex_);
+ FileState** sptr = &file_map_[fname];
+ FileState* file = *sptr;
+ if (file == NULL) {
+ file = new FileState();
+ file->Ref();
+ }
+ *result = new WritableFileImpl(file);
+ return Status::OK();
+ }
+
virtual bool FileExists(const std::string& fname) {
MutexLock lock(&mutex_);
return file_map_.find(fname) != file_map_.end();
diff --git a/src/leveldb/helpers/memenv/memenv_test.cc b/src/leveldb/helpers/memenv/memenv_test.cc
index a44310fed8..5cff77613f 100644
--- a/src/leveldb/helpers/memenv/memenv_test.cc
+++ b/src/leveldb/helpers/memenv/memenv_test.cc
@@ -40,6 +40,8 @@ TEST(MemEnvTest, Basics) {
// Create a file.
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
+ ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
+ ASSERT_EQ(0, file_size);
delete writable_file;
// Check that the file exists.
@@ -55,9 +57,16 @@ TEST(MemEnvTest, Basics) {
ASSERT_OK(writable_file->Append("abc"));
delete writable_file;
- // Check for expected size.
+ // Check that append works.
+ ASSERT_OK(env_->NewAppendableFile("/dir/f", &writable_file));
ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(3, file_size);
+ ASSERT_OK(writable_file->Append("hello"));
+ delete writable_file;
+
+ // Check for expected size.
+ ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
+ ASSERT_EQ(8, file_size);
// Check that renaming works.
ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok());
@@ -65,7 +74,7 @@ TEST(MemEnvTest, Basics) {
ASSERT_TRUE(!env_->FileExists("/dir/f"));
ASSERT_TRUE(env_->FileExists("/dir/g"));
ASSERT_OK(env_->GetFileSize("/dir/g", &file_size));
- ASSERT_EQ(3, file_size);
+ ASSERT_EQ(8, file_size);
// Check that opening non-existent file fails.
SequentialFile* seq_file;
diff --git a/src/leveldb/include/leveldb/cache.h b/src/leveldb/include/leveldb/cache.h
index 1a201e5e0a..6819d5bc49 100644
--- a/src/leveldb/include/leveldb/cache.h
+++ b/src/leveldb/include/leveldb/cache.h
@@ -81,6 +81,17 @@ class Cache {
// its cache keys.
virtual uint64_t NewId() = 0;
+ // Remove all cache entries that are not actively in use. Memory-constrained
+ // applications may wish to call this method to reduce memory usage.
+ // Default implementation of Prune() does nothing. Subclasses are strongly
+ // encouraged to override the default implementation. A future release of
+ // leveldb may change Prune() to a pure abstract method.
+ virtual void Prune() {}
+
+ // Return an estimate of the combined charges of all elements stored in the
+ // cache.
+ virtual size_t TotalCharge() const = 0;
+
private:
void LRU_Remove(Handle* e);
void LRU_Append(Handle* e);
diff --git a/src/leveldb/include/leveldb/db.h b/src/leveldb/include/leveldb/db.h
index 4c169bf22e..9752cbad51 100644
--- a/src/leveldb/include/leveldb/db.h
+++ b/src/leveldb/include/leveldb/db.h
@@ -14,7 +14,7 @@ namespace leveldb {
// Update Makefile if you change these
static const int kMajorVersion = 1;
-static const int kMinorVersion = 18;
+static const int kMinorVersion = 19;
struct Options;
struct ReadOptions;
@@ -115,6 +115,8 @@ class DB {
// about the internal operation of the DB.
// "leveldb.sstables" - returns a multi-line string that describes all
// of the sstables that make up the db contents.
+ // "leveldb.approximate-memory-usage" - returns the approximate number of
+ // bytes of memory in use by the DB.
virtual bool GetProperty(const Slice& property, std::string* value) = 0;
// For each i in [0,n-1], store in "sizes[i]", the approximate
diff --git a/src/leveldb/include/leveldb/env.h b/src/leveldb/include/leveldb/env.h
index f709514da6..99b6c21414 100644
--- a/src/leveldb/include/leveldb/env.h
+++ b/src/leveldb/include/leveldb/env.h
@@ -69,6 +69,21 @@ class Env {
virtual Status NewWritableFile(const std::string& fname,
WritableFile** result) = 0;
+ // Create an object that either appends to an existing file, or
+ // writes to a new file (if the file does not exist to begin with).
+ // On success, stores a pointer to the new file in *result and
+ // returns OK. On failure stores NULL in *result and returns
+ // non-OK.
+ //
+ // The returned file will only be accessed by one thread at a time.
+ //
+ // May return an IsNotSupportedError error if this Env does
+ // not allow appending to an existing file. Users of Env (including
+ // the leveldb implementation) must be prepared to deal with
+ // an Env that does not support appending.
+ virtual Status NewAppendableFile(const std::string& fname,
+ WritableFile** result);
+
// Returns true iff the named file exists.
virtual bool FileExists(const std::string& fname) = 0;
@@ -289,6 +304,9 @@ class EnvWrapper : public Env {
Status NewWritableFile(const std::string& f, WritableFile** r) {
return target_->NewWritableFile(f, r);
}
+ Status NewAppendableFile(const std::string& f, WritableFile** r) {
+ return target_->NewAppendableFile(f, r);
+ }
bool FileExists(const std::string& f) { return target_->FileExists(f); }
Status GetChildren(const std::string& dir, std::vector<std::string>* r) {
return target_->GetChildren(dir, r);
diff --git a/src/leveldb/include/leveldb/iterator.h b/src/leveldb/include/leveldb/iterator.h
index 76aced04bd..da631ed9d8 100644
--- a/src/leveldb/include/leveldb/iterator.h
+++ b/src/leveldb/include/leveldb/iterator.h
@@ -37,7 +37,7 @@ class Iterator {
// Valid() after this call iff the source is not empty.
virtual void SeekToLast() = 0;
- // Position at the first key in the source that at or past target
+ // Position at the first key in the source that is at or past target.
// The iterator is Valid() after this call iff the source contains
// an entry that comes at or past target.
virtual void Seek(const Slice& target) = 0;
diff --git a/src/leveldb/include/leveldb/options.h b/src/leveldb/include/leveldb/options.h
index 7c9b973454..83a1ef39a4 100644
--- a/src/leveldb/include/leveldb/options.h
+++ b/src/leveldb/include/leveldb/options.h
@@ -128,6 +128,12 @@ struct Options {
// efficiently detect that and will switch to uncompressed mode.
CompressionType compression;
+ // EXPERIMENTAL: If true, append to existing MANIFEST and log files
+ // when a database is opened. This can significantly speed up open.
+ //
+ // Default: currently false, but may become true later.
+ bool reuse_logs;
+
// If non-NULL, use the specified filter policy to reduce disk reads.
// Many applications will benefit from passing the result of
// NewBloomFilterPolicy() here.
diff --git a/src/leveldb/include/leveldb/status.h b/src/leveldb/include/leveldb/status.h
index 11dbd4b47e..d9575f9753 100644
--- a/src/leveldb/include/leveldb/status.h
+++ b/src/leveldb/include/leveldb/status.h
@@ -60,6 +60,12 @@ class Status {
// Returns true iff the status indicates an IOError.
bool IsIOError() const { return code() == kIOError; }
+ // Returns true iff the status indicates a NotSupportedError.
+ bool IsNotSupportedError() const { return code() == kNotSupported; }
+
+ // Returns true iff the status indicates an InvalidArgument.
+ bool IsInvalidArgument() const { return code() == kInvalidArgument; }
+
// Return a string representation of this status suitable for printing.
// Returns the string "OK" for success.
std::string ToString() const;
diff --git a/src/leveldb/port/atomic_pointer.h b/src/leveldb/port/atomic_pointer.h
index 9bf091f757..1c4c7aafc6 100644
--- a/src/leveldb/port/atomic_pointer.h
+++ b/src/leveldb/port/atomic_pointer.h
@@ -35,8 +35,12 @@
#define ARCH_CPU_X86_FAMILY 1
#elif defined(__ARMEL__)
#define ARCH_CPU_ARM_FAMILY 1
+#elif defined(__aarch64__)
+#define ARCH_CPU_ARM64_FAMILY 1
#elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__)
#define ARCH_CPU_PPC_FAMILY 1
+#elif defined(__mips__)
+#define ARCH_CPU_MIPS_FAMILY 1
#endif
namespace leveldb {
@@ -92,6 +96,13 @@ inline void MemoryBarrier() {
}
#define LEVELDB_HAVE_MEMORY_BARRIER
+// ARM64
+#elif defined(ARCH_CPU_ARM64_FAMILY)
+inline void MemoryBarrier() {
+ asm volatile("dmb sy" : : : "memory");
+}
+#define LEVELDB_HAVE_MEMORY_BARRIER
+
// PPC
#elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__)
inline void MemoryBarrier() {
@@ -101,6 +112,13 @@ inline void MemoryBarrier() {
}
#define LEVELDB_HAVE_MEMORY_BARRIER
+// MIPS
+#elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__)
+inline void MemoryBarrier() {
+ __asm__ __volatile__("sync" : : : "memory");
+}
+#define LEVELDB_HAVE_MEMORY_BARRIER
+
#endif
// AtomicPointer built using platform-specific MemoryBarrier()
@@ -215,6 +233,7 @@ class AtomicPointer {
#undef LEVELDB_HAVE_MEMORY_BARRIER
#undef ARCH_CPU_X86_FAMILY
#undef ARCH_CPU_ARM_FAMILY
+#undef ARCH_CPU_ARM64_FAMILY
#undef ARCH_CPU_PPC_FAMILY
} // namespace port
diff --git a/src/leveldb/port/port_posix.cc b/src/leveldb/port/port_posix.cc
index 5ba127a5b9..30e8007ae3 100644
--- a/src/leveldb/port/port_posix.cc
+++ b/src/leveldb/port/port_posix.cc
@@ -7,7 +7,6 @@
#include <cstdlib>
#include <stdio.h>
#include <string.h>
-#include "util/logging.h"
namespace leveldb {
namespace port {
diff --git a/src/leveldb/table/filter_block.cc b/src/leveldb/table/filter_block.cc
index 203e15c8bc..4e78b954f8 100644
--- a/src/leveldb/table/filter_block.cc
+++ b/src/leveldb/table/filter_block.cc
@@ -68,7 +68,7 @@ void FilterBlockBuilder::GenerateFilter() {
// Generate filter for current set of keys and append to result_.
filter_offsets_.push_back(result_.size());
- policy_->CreateFilter(&tmp_keys_[0], num_keys, &result_);
+ policy_->CreateFilter(&tmp_keys_[0], static_cast<int>(num_keys), &result_);
tmp_keys_.clear();
keys_.clear();
@@ -97,7 +97,7 @@ bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
if (index < num_) {
uint32_t start = DecodeFixed32(offset_ + index*4);
uint32_t limit = DecodeFixed32(offset_ + index*4 + 4);
- if (start <= limit && limit <= (offset_ - data_)) {
+ if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) {
Slice filter = Slice(data_ + start, limit - start);
return policy_->KeyMayMatch(key, filter);
} else if (start == limit) {
diff --git a/src/leveldb/table/format.cc b/src/leveldb/table/format.cc
index aa63144c9e..24e4e02445 100644
--- a/src/leveldb/table/format.cc
+++ b/src/leveldb/table/format.cc
@@ -30,15 +30,14 @@ Status BlockHandle::DecodeFrom(Slice* input) {
}
void Footer::EncodeTo(std::string* dst) const {
-#ifndef NDEBUG
const size_t original_size = dst->size();
-#endif
metaindex_handle_.EncodeTo(dst);
index_handle_.EncodeTo(dst);
dst->resize(2 * BlockHandle::kMaxEncodedLength); // Padding
PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber & 0xffffffffu));
PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber >> 32));
assert(dst->size() == original_size + kEncodedLength);
+ (void)original_size; // Disable unused variable warning.
}
Status Footer::DecodeFrom(Slice* input) {
diff --git a/src/leveldb/table/iterator_wrapper.h b/src/leveldb/table/iterator_wrapper.h
index 9e16b3dbed..f410c3fabe 100644
--- a/src/leveldb/table/iterator_wrapper.h
+++ b/src/leveldb/table/iterator_wrapper.h
@@ -5,6 +5,9 @@
#ifndef STORAGE_LEVELDB_TABLE_ITERATOR_WRAPPER_H_
#define STORAGE_LEVELDB_TABLE_ITERATOR_WRAPPER_H_
+#include "leveldb/iterator.h"
+#include "leveldb/slice.h"
+
namespace leveldb {
// A internal wrapper class with an interface similar to Iterator that
diff --git a/src/leveldb/table/table.cc b/src/leveldb/table/table.cc
index dff8a82590..decf8082cc 100644
--- a/src/leveldb/table/table.cc
+++ b/src/leveldb/table/table.cc
@@ -82,7 +82,7 @@ Status Table::Open(const Options& options,
*table = new Table(rep);
(*table)->ReadMeta(footer);
} else {
- if (index_block) delete index_block;
+ delete index_block;
}
return s;
diff --git a/src/leveldb/table/table_test.cc b/src/leveldb/table/table_test.cc
index c723bf84cf..abf6e246ff 100644
--- a/src/leveldb/table/table_test.cc
+++ b/src/leveldb/table/table_test.cc
@@ -853,12 +853,20 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
options.compression = kSnappyCompression;
c.Finish(options, &keys, &kvmap);
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 2000, 3000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 2000, 3000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 4000, 6000));
+ // Expected upper and lower bounds of space used by compressible strings.
+ static const int kSlop = 1000; // Compressor effectiveness varies.
+ const int expected = 2500; // 10000 * compression ratio (0.25)
+ const int min_z = expected - kSlop;
+ const int max_z = expected + kSlop;
+
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, kSlop));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, kSlop));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, kSlop));
+ // Have now emitted a large compressible string, so adjust expected offset.
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), min_z, max_z));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), min_z, max_z));
+ // Have now emitted two large compressible strings, so adjust expected offset.
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 2 * min_z, 2 * max_z));
}
} // namespace leveldb
diff --git a/src/leveldb/util/arena.cc b/src/leveldb/util/arena.cc
index 9367f71492..74078213ee 100644
--- a/src/leveldb/util/arena.cc
+++ b/src/leveldb/util/arena.cc
@@ -9,8 +9,7 @@ namespace leveldb {
static const int kBlockSize = 4096;
-Arena::Arena() {
- blocks_memory_ = 0;
+Arena::Arena() : memory_usage_(0) {
alloc_ptr_ = NULL; // First allocation will allocate a block
alloc_bytes_remaining_ = 0;
}
@@ -60,8 +59,9 @@ char* Arena::AllocateAligned(size_t bytes) {
char* Arena::AllocateNewBlock(size_t block_bytes) {
char* result = new char[block_bytes];
- blocks_memory_ += block_bytes;
blocks_.push_back(result);
+ memory_usage_.NoBarrier_Store(
+ reinterpret_cast<void*>(MemoryUsage() + block_bytes + sizeof(char*)));
return result;
}
diff --git a/src/leveldb/util/arena.h b/src/leveldb/util/arena.h
index 73bbf1cb9b..48bab33741 100644
--- a/src/leveldb/util/arena.h
+++ b/src/leveldb/util/arena.h
@@ -9,6 +9,7 @@
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
+#include "port/port.h"
namespace leveldb {
@@ -24,10 +25,9 @@ class Arena {
char* AllocateAligned(size_t bytes);
// Returns an estimate of the total memory usage of data allocated
- // by the arena (including space allocated but not yet used for user
- // allocations).
+ // by the arena.
size_t MemoryUsage() const {
- return blocks_memory_ + blocks_.capacity() * sizeof(char*);
+ return reinterpret_cast<uintptr_t>(memory_usage_.NoBarrier_Load());
}
private:
@@ -41,8 +41,8 @@ class Arena {
// Array of new[] allocated memory blocks
std::vector<char*> blocks_;
- // Bytes of memory in blocks allocated so far
- size_t blocks_memory_;
+ // Total memory usage of the arena.
+ port::AtomicPointer memory_usage_;
// No copying allowed
Arena(const Arena&);
diff --git a/src/leveldb/util/bloom.cc b/src/leveldb/util/bloom.cc
index a27a2ace28..bf3e4ca6e9 100644
--- a/src/leveldb/util/bloom.cc
+++ b/src/leveldb/util/bloom.cc
@@ -47,7 +47,7 @@ class BloomFilterPolicy : public FilterPolicy {
dst->resize(init_size + bytes, 0);
dst->push_back(static_cast<char>(k_)); // Remember # of probes in filter
char* array = &(*dst)[init_size];
- for (size_t i = 0; i < n; i++) {
+ for (int i = 0; i < n; i++) {
// Use double-hashing to generate a sequence of hash values.
// See analysis in [Kirsch,Mitzenmacher 2006].
uint32_t h = BloomHash(keys[i]);
diff --git a/src/leveldb/util/bloom_test.cc b/src/leveldb/util/bloom_test.cc
index 77fb1b3159..1b87a2be3f 100644
--- a/src/leveldb/util/bloom_test.cc
+++ b/src/leveldb/util/bloom_test.cc
@@ -46,7 +46,8 @@ class BloomTest {
key_slices.push_back(Slice(keys_[i]));
}
filter_.clear();
- policy_->CreateFilter(&key_slices[0], key_slices.size(), &filter_);
+ policy_->CreateFilter(&key_slices[0], static_cast<int>(key_slices.size()),
+ &filter_);
keys_.clear();
if (kVerbose >= 2) DumpFilter();
}
diff --git a/src/leveldb/util/cache.cc b/src/leveldb/util/cache.cc
index 8b197bc02a..ce46886171 100644
--- a/src/leveldb/util/cache.cc
+++ b/src/leveldb/util/cache.cc
@@ -19,6 +19,23 @@ Cache::~Cache() {
namespace {
// LRU cache implementation
+//
+// Cache entries have an "in_cache" boolean indicating whether the cache has a
+// reference on the entry. The only ways that this can become false without the
+// entry being passed to its "deleter" are via Erase(), via Insert() when
+// an element with a duplicate key is inserted, or on destruction of the cache.
+//
+// The cache keeps two linked lists of items in the cache. All items in the
+// cache are in one list or the other, and never both. Items still referenced
+// by clients but erased from the cache are in neither list. The lists are:
+// - in-use: contains the items currently referenced by clients, in no
+// particular order. (This list is used for invariant checking. If we
+// removed the check, elements that would otherwise be on this list could be
+// left as disconnected singleton lists.)
+// - LRU: contains the items not currently referenced by clients, in LRU order
+// Elements are moved between these lists by the Ref() and Unref() methods,
+// when they detect an element in the cache acquiring or losing its only
+// external reference.
// An entry is a variable length heap-allocated structure. Entries
// are kept in a circular doubly linked list ordered by access time.
@@ -30,7 +47,8 @@ struct LRUHandle {
LRUHandle* prev;
size_t charge; // TODO(opt): Only allow uint32_t?
size_t key_length;
- uint32_t refs;
+ bool in_cache; // Whether entry is in the cache.
+ uint32_t refs; // References, including cache reference, if present.
uint32_t hash; // Hash of key(); used for fast sharding and comparisons
char key_data[1]; // Beginning of key
@@ -147,49 +165,77 @@ class LRUCache {
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
void Release(Cache::Handle* handle);
void Erase(const Slice& key, uint32_t hash);
+ void Prune();
+ size_t TotalCharge() const {
+ MutexLock l(&mutex_);
+ return usage_;
+ }
private:
void LRU_Remove(LRUHandle* e);
- void LRU_Append(LRUHandle* e);
+ void LRU_Append(LRUHandle*list, LRUHandle* e);
+ void Ref(LRUHandle* e);
void Unref(LRUHandle* e);
+ bool FinishErase(LRUHandle* e);
// Initialized before use.
size_t capacity_;
// mutex_ protects the following state.
- port::Mutex mutex_;
+ mutable port::Mutex mutex_;
size_t usage_;
// Dummy head of LRU list.
// lru.prev is newest entry, lru.next is oldest entry.
+ // Entries have refs==1 and in_cache==true.
LRUHandle lru_;
+ // Dummy head of in-use list.
+ // Entries are in use by clients, and have refs >= 2 and in_cache==true.
+ LRUHandle in_use_;
+
HandleTable table_;
};
LRUCache::LRUCache()
: usage_(0) {
- // Make empty circular linked list
+ // Make empty circular linked lists.
lru_.next = &lru_;
lru_.prev = &lru_;
+ in_use_.next = &in_use_;
+ in_use_.prev = &in_use_;
}
LRUCache::~LRUCache() {
+ assert(in_use_.next == &in_use_); // Error if caller has an unreleased handle
for (LRUHandle* e = lru_.next; e != &lru_; ) {
LRUHandle* next = e->next;
- assert(e->refs == 1); // Error if caller has an unreleased handle
+ assert(e->in_cache);
+ e->in_cache = false;
+ assert(e->refs == 1); // Invariant of lru_ list.
Unref(e);
e = next;
}
}
+void LRUCache::Ref(LRUHandle* e) {
+ if (e->refs == 1 && e->in_cache) { // If on lru_ list, move to in_use_ list.
+ LRU_Remove(e);
+ LRU_Append(&in_use_, e);
+ }
+ e->refs++;
+}
+
void LRUCache::Unref(LRUHandle* e) {
assert(e->refs > 0);
e->refs--;
- if (e->refs <= 0) {
- usage_ -= e->charge;
+ if (e->refs == 0) { // Deallocate.
+ assert(!e->in_cache);
(*e->deleter)(e->key(), e->value);
free(e);
+ } else if (e->in_cache && e->refs == 1) { // No longer in use; move to lru_ list.
+ LRU_Remove(e);
+ LRU_Append(&lru_, e);
}
}
@@ -198,10 +244,10 @@ void LRUCache::LRU_Remove(LRUHandle* e) {
e->prev->next = e->next;
}
-void LRUCache::LRU_Append(LRUHandle* e) {
- // Make "e" newest entry by inserting just before lru_
- e->next = &lru_;
- e->prev = lru_.prev;
+void LRUCache::LRU_Append(LRUHandle* list, LRUHandle* e) {
+ // Make "e" newest entry by inserting just before *list
+ e->next = list;
+ e->prev = list->prev;
e->prev->next = e;
e->next->prev = e;
}
@@ -210,9 +256,7 @@ Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_);
LRUHandle* e = table_.Lookup(key, hash);
if (e != NULL) {
- e->refs++;
- LRU_Remove(e);
- LRU_Append(e);
+ Ref(e);
}
return reinterpret_cast<Cache::Handle*>(e);
}
@@ -234,34 +278,58 @@ Cache::Handle* LRUCache::Insert(
e->charge = charge;
e->key_length = key.size();
e->hash = hash;
- e->refs = 2; // One from LRUCache, one for the returned handle
+ e->in_cache = false;
+ e->refs = 1; // for the returned handle.
memcpy(e->key_data, key.data(), key.size());
- LRU_Append(e);
- usage_ += charge;
- LRUHandle* old = table_.Insert(e);
- if (old != NULL) {
- LRU_Remove(old);
- Unref(old);
- }
+ if (capacity_ > 0) {
+ e->refs++; // for the cache's reference.
+ e->in_cache = true;
+ LRU_Append(&in_use_, e);
+ usage_ += charge;
+ FinishErase(table_.Insert(e));
+ } // else don't cache. (Tests use capacity_==0 to turn off caching.)
while (usage_ > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
- LRU_Remove(old);
- table_.Remove(old->key(), old->hash);
- Unref(old);
+ assert(old->refs == 1);
+ bool erased = FinishErase(table_.Remove(old->key(), old->hash));
+ if (!erased) { // to avoid unused variable when compiled NDEBUG
+ assert(erased);
+ }
}
return reinterpret_cast<Cache::Handle*>(e);
}
-void LRUCache::Erase(const Slice& key, uint32_t hash) {
- MutexLock l(&mutex_);
- LRUHandle* e = table_.Remove(key, hash);
+// If e != NULL, finish removing *e from the cache; it has already been removed
+// from the hash table. Return whether e != NULL. Requires mutex_ held.
+bool LRUCache::FinishErase(LRUHandle* e) {
if (e != NULL) {
+ assert(e->in_cache);
LRU_Remove(e);
+ e->in_cache = false;
+ usage_ -= e->charge;
Unref(e);
}
+ return e != NULL;
+}
+
+void LRUCache::Erase(const Slice& key, uint32_t hash) {
+ MutexLock l(&mutex_);
+ FinishErase(table_.Remove(key, hash));
+}
+
+void LRUCache::Prune() {
+ MutexLock l(&mutex_);
+ while (lru_.next != &lru_) {
+ LRUHandle* e = lru_.next;
+ assert(e->refs == 1);
+ bool erased = FinishErase(table_.Remove(e->key(), e->hash));
+ if (!erased) { // to avoid unused variable when compiled NDEBUG
+ assert(erased);
+ }
+ }
}
static const int kNumShardBits = 4;
@@ -314,6 +382,18 @@ class ShardedLRUCache : public Cache {
MutexLock l(&id_mutex_);
return ++(last_id_);
}
+ virtual void Prune() {
+ for (int s = 0; s < kNumShards; s++) {
+ shard_[s].Prune();
+ }
+ }
+ virtual size_t TotalCharge() const {
+ size_t total = 0;
+ for (int s = 0; s < kNumShards; s++) {
+ total += shard_[s].TotalCharge();
+ }
+ return total;
+ }
};
} // end anonymous namespace
diff --git a/src/leveldb/util/cache_test.cc b/src/leveldb/util/cache_test.cc
index 43716715a8..468f7a6425 100644
--- a/src/leveldb/util/cache_test.cc
+++ b/src/leveldb/util/cache_test.cc
@@ -59,6 +59,11 @@ class CacheTest {
&CacheTest::Deleter));
}
+ Cache::Handle* InsertAndReturnHandle(int key, int value, int charge = 1) {
+ return cache_->Insert(EncodeKey(key), EncodeValue(value), charge,
+ &CacheTest::Deleter);
+ }
+
void Erase(int key) {
cache_->Erase(EncodeKey(key));
}
@@ -135,8 +140,11 @@ TEST(CacheTest, EntriesArePinned) {
TEST(CacheTest, EvictionPolicy) {
Insert(100, 101);
Insert(200, 201);
+ Insert(300, 301);
+ Cache::Handle* h = cache_->Lookup(EncodeKey(300));
- // Frequently used entry must be kept around
+ // Frequently used entry must be kept around,
+ // as must things that are still in use.
for (int i = 0; i < kCacheSize + 100; i++) {
Insert(1000+i, 2000+i);
ASSERT_EQ(2000+i, Lookup(1000+i));
@@ -144,6 +152,25 @@ TEST(CacheTest, EvictionPolicy) {
}
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(-1, Lookup(200));
+ ASSERT_EQ(301, Lookup(300));
+ cache_->Release(h);
+}
+
+TEST(CacheTest, UseExceedsCacheSize) {
+ // Overfill the cache, keeping handles on all inserted entries.
+ std::vector<Cache::Handle*> h;
+ for (int i = 0; i < kCacheSize + 100; i++) {
+ h.push_back(InsertAndReturnHandle(1000+i, 2000+i));
+ }
+
+ // Check that all the entries can be found in the cache.
+ for (int i = 0; i < h.size(); i++) {
+ ASSERT_EQ(2000+i, Lookup(1000+i));
+ }
+
+ for (int i = 0; i < h.size(); i++) {
+ cache_->Release(h[i]);
+ }
}
TEST(CacheTest, HeavyEntries) {
@@ -179,6 +206,19 @@ TEST(CacheTest, NewId) {
ASSERT_NE(a, b);
}
+TEST(CacheTest, Prune) {
+ Insert(1, 100);
+ Insert(2, 200);
+
+ Cache::Handle* handle = cache_->Lookup(EncodeKey(1));
+ ASSERT_TRUE(handle);
+ cache_->Prune();
+ cache_->Release(handle);
+
+ ASSERT_EQ(100, Lookup(1));
+ ASSERT_EQ(-1, Lookup(2));
+}
+
} // namespace leveldb
int main(int argc, char** argv) {
diff --git a/src/leveldb/util/env.cc b/src/leveldb/util/env.cc
index c2600e964a..c58a0821ef 100644
--- a/src/leveldb/util/env.cc
+++ b/src/leveldb/util/env.cc
@@ -9,6 +9,10 @@ namespace leveldb {
Env::~Env() {
}
+Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) {
+ return Status::NotSupported("NewAppendableFile", fname);
+}
+
SequentialFile::~SequentialFile() {
}
diff --git a/src/leveldb/util/env_posix.cc b/src/leveldb/util/env_posix.cc
index ba2667864a..e0fca52f46 100644
--- a/src/leveldb/util/env_posix.cc
+++ b/src/leveldb/util/env_posix.cc
@@ -351,6 +351,19 @@ class PosixEnv : public Env {
return s;
}
+ virtual Status NewAppendableFile(const std::string& fname,
+ WritableFile** result) {
+ Status s;
+ FILE* f = fopen(fname.c_str(), "a");
+ if (f == NULL) {
+ *result = NULL;
+ s = IOError(fname, errno);
+ } else {
+ *result = new PosixWritableFile(fname, f);
+ }
+ return s;
+ }
+
virtual bool FileExists(const std::string& fname) {
return access(fname.c_str(), F_OK) == 0;
}
diff --git a/src/leveldb/util/env_win.cc b/src/leveldb/util/env_win.cc
index e11a96b791..b074b7579e 100644
--- a/src/leveldb/util/env_win.cc
+++ b/src/leveldb/util/env_win.cc
@@ -106,7 +106,7 @@ private:
class Win32WritableFile : public WritableFile
{
public:
- Win32WritableFile(const std::string& fname);
+ Win32WritableFile(const std::string& fname, bool append);
~Win32WritableFile();
virtual Status Append(const Slice& data);
@@ -158,6 +158,8 @@ public:
RandomAccessFile** result);
virtual Status NewWritableFile(const std::string& fname,
WritableFile** result);
+ virtual Status NewAppendableFile(const std::string& fname,
+ WritableFile** result);
virtual bool FileExists(const std::string& fname);
@@ -423,17 +425,23 @@ void Win32RandomAccessFile::_CleanUp()
}
}
-Win32WritableFile::Win32WritableFile(const std::string& fname)
+Win32WritableFile::Win32WritableFile(const std::string& fname, bool append)
: filename_(fname)
{
std::wstring path;
ToWidePath(fname, path);
- DWORD Flag = PathFileExistsW(path.c_str()) ? OPEN_EXISTING : CREATE_ALWAYS;
+ // NewAppendableFile: append to an existing file, or create a new one
+ // if none exists - this is OPEN_ALWAYS behavior, with
+ // FILE_APPEND_DATA to avoid having to manually position the file
+ // pointer at the end of the file.
+ // NewWritableFile: create a new file, delete if it exists - this is
+ // CREATE_ALWAYS behavior. This file is used for writing only so
+ // use GENERIC_WRITE.
_hFile = CreateFileW(path.c_str(),
- GENERIC_READ | GENERIC_WRITE,
+ append ? FILE_APPEND_DATA : GENERIC_WRITE,
FILE_SHARE_READ|FILE_SHARE_DELETE|FILE_SHARE_WRITE,
NULL,
- Flag,
+ append ? OPEN_ALWAYS : CREATE_ALWAYS,
FILE_ATTRIBUTE_NORMAL,
NULL);
// CreateFileW returns INVALID_HANDLE_VALUE in case of error, always check isEnable() before use
@@ -823,7 +831,9 @@ Status Win32Env::NewLogger( const std::string& fname, Logger** result )
{
Status sRet;
std::string path = fname;
- Win32WritableFile* pMapFile = new Win32WritableFile(ModifyPath(path));
+ // Logs are opened with write semantics, not with append semantics
+ // (see PosixEnv::NewLogger)
+ Win32WritableFile* pMapFile = new Win32WritableFile(ModifyPath(path), false);
if(!pMapFile->isEnable()){
delete pMapFile;
*result = NULL;
@@ -837,7 +847,20 @@ Status Win32Env::NewWritableFile( const std::string& fname, WritableFile** resul
{
Status sRet;
std::string path = fname;
- Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path));
+ Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path), false);
+ if(!pFile->isEnable()){
+ *result = NULL;
+ sRet = Status::IOError(fname,Win32::GetLastErrSz());
+ }else
+ *result = pFile;
+ return sRet;
+}
+
+Status Win32Env::NewAppendableFile( const std::string& fname, WritableFile** result )
+{
+ Status sRet;
+ std::string path = fname;
+ Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path), true);
if(!pFile->isEnable()){
*result = NULL;
sRet = Status::IOError(fname,Win32::GetLastErrSz());
diff --git a/src/leveldb/util/options.cc b/src/leveldb/util/options.cc
index 76af5b9302..8b618fb1ae 100644
--- a/src/leveldb/util/options.cc
+++ b/src/leveldb/util/options.cc
@@ -22,8 +22,8 @@ Options::Options()
block_size(4096),
block_restart_interval(16),
compression(kSnappyCompression),
+ reuse_logs(false),
filter_policy(NULL) {
}
-
} // namespace leveldb
diff --git a/src/leveldb/util/testutil.h b/src/leveldb/util/testutil.h
index adad3fc1ea..d7e4583702 100644
--- a/src/leveldb/util/testutil.h
+++ b/src/leveldb/util/testutil.h
@@ -45,6 +45,16 @@ class ErrorEnv : public EnvWrapper {
}
return target()->NewWritableFile(fname, result);
}
+
+ virtual Status NewAppendableFile(const std::string& fname,
+ WritableFile** result) {
+ if (writable_file_error_) {
+ ++num_writable_file_errors_;
+ *result = NULL;
+ return Status::IOError(fname, "fake error");
+ }
+ return target()->NewAppendableFile(fname, result);
+ }
};
} // namespace test
diff --git a/src/merkleblock.cpp b/src/merkleblock.cpp
index 31332526a9..882717ac56 100644
--- a/src/merkleblock.cpp
+++ b/src/merkleblock.cpp
@@ -23,8 +23,8 @@ CMerkleBlock::CMerkleBlock(const CBlock& block, CBloomFilter& filter)
for (unsigned int i = 0; i < block.vtx.size(); i++)
{
- const uint256& hash = block.vtx[i].GetHash();
- if (filter.IsRelevantAndUpdate(block.vtx[i]))
+ const uint256& hash = block.vtx[i]->GetHash();
+ if (filter.IsRelevantAndUpdate(*block.vtx[i]))
{
vMatch.push_back(true);
vMatchedTxn.push_back(make_pair(i, hash));
@@ -49,7 +49,7 @@ CMerkleBlock::CMerkleBlock(const CBlock& block, const std::set<uint256>& txids)
for (unsigned int i = 0; i < block.vtx.size(); i++)
{
- const uint256& hash = block.vtx[i].GetHash();
+ const uint256& hash = block.vtx[i]->GetHash();
if (txids.count(hash))
vMatch.push_back(true);
else
diff --git a/src/miner.cpp b/src/miner.cpp
index ebf2f21ffd..e80e8a2656 100644
--- a/src/miner.cpp
+++ b/src/miner.cpp
@@ -13,7 +13,7 @@
#include "consensus/merkle.h"
#include "consensus/validation.h"
#include "hash.h"
-#include "main.h"
+#include "validation.h"
#include "net.h"
#include "policy/policy.h"
#include "pow.h"
@@ -134,7 +134,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
pblock = &pblocktemplate->block; // pointer for convenience
// Add dummy coinbase tx as first transaction
- pblock->vtx.push_back(CTransaction());
+ pblock->vtx.emplace_back();
pblocktemplate->vTxFees.push_back(-1); // updated at end
pblocktemplate->vTxSigOpsCost.push_back(-1); // updated at end
@@ -169,7 +169,6 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
nLastBlockTx = nBlockTx;
nLastBlockSize = nBlockSize;
nLastBlockWeight = nBlockWeight;
- LogPrintf("CreateNewBlock(): total size %u txs: %u fees: %ld sigops %d\n", nBlockSize, nBlockTx, nFees, nBlockSigOpsCost);
// Create coinbase transaction.
CMutableTransaction coinbaseTx;
@@ -179,16 +178,19 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
coinbaseTx.vout[0].scriptPubKey = scriptPubKeyIn;
coinbaseTx.vout[0].nValue = nFees + GetBlockSubsidy(nHeight, chainparams.GetConsensus());
coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0;
- pblock->vtx[0] = coinbaseTx;
+ pblock->vtx[0] = MakeTransactionRef(std::move(coinbaseTx));
pblocktemplate->vchCoinbaseCommitment = GenerateCoinbaseCommitment(*pblock, pindexPrev, chainparams.GetConsensus());
pblocktemplate->vTxFees[0] = -nFees;
+ uint64_t nSerializeSize = GetSerializeSize(*pblock, SER_NETWORK, PROTOCOL_VERSION);
+ LogPrintf("CreateNewBlock(): total size: %u block weight: %u txs: %u fees: %ld sigops %d\n", nSerializeSize, GetBlockWeight(*pblock), nBlockTx, nFees, nBlockSigOpsCost);
+
// Fill in header
pblock->hashPrevBlock = pindexPrev->GetBlockHash();
UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev);
pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus());
pblock->nNonce = 0;
- pblocktemplate->vTxSigOpsCost[0] = WITNESS_SCALE_FACTOR * GetLegacySigOpCount(pblock->vtx[0]);
+ pblocktemplate->vTxSigOpsCost[0] = WITNESS_SCALE_FACTOR * GetLegacySigOpCount(*pblock->vtx[0]);
CValidationState state;
if (!TestBlockValidity(state, chainparams, *pblock, pindexPrev, false, false)) {
@@ -310,7 +312,7 @@ bool BlockAssembler::TestForBlock(CTxMemPool::txiter iter)
void BlockAssembler::AddToBlock(CTxMemPool::txiter iter)
{
- pblock->vtx.push_back(iter->GetTx());
+ pblock->vtx.emplace_back(iter->GetSharedTx());
pblocktemplate->vTxFees.push_back(iter->GetFee());
pblocktemplate->vTxSigOpsCost.push_back(iter->GetSigOpCost());
if (fNeedSizeAccounting) {
@@ -599,10 +601,10 @@ void IncrementExtraNonce(CBlock* pblock, const CBlockIndex* pindexPrev, unsigned
}
++nExtraNonce;
unsigned int nHeight = pindexPrev->nHeight+1; // Height first in coinbase required for block.version=2
- CMutableTransaction txCoinbase(pblock->vtx[0]);
+ CMutableTransaction txCoinbase(*pblock->vtx[0]);
txCoinbase.vin[0].scriptSig = (CScript() << nHeight << CScriptNum(nExtraNonce)) + COINBASE_FLAGS;
assert(txCoinbase.vin[0].scriptSig.size() <= 100);
- pblock->vtx[0] = txCoinbase;
+ pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
}
diff --git a/src/net.cpp b/src/net.cpp
index 15c4514f15..27b200b3f0 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -74,7 +74,6 @@ bool fRelayTxes = true;
CCriticalSection cs_mapLocalHost;
std::map<CNetAddr, LocalServiceInfo> mapLocalHost;
static bool vfLimited[NET_MAX] = {};
-static CNode* pnodeLocalHost = NULL;
std::string strSubVersion;
limitedmap<uint256, int64_t> mapAlreadyAskedFor(MAX_INV_SZ);
@@ -768,13 +767,13 @@ const uint256& CNetMessage::GetMessageHash() const
// requires LOCK(cs_vSend)
size_t SocketSendData(CNode *pnode)
{
- std::deque<CSerializeData>::iterator it = pnode->vSendMsg.begin();
+ auto it = pnode->vSendMsg.begin();
size_t nSentSize = 0;
while (it != pnode->vSendMsg.end()) {
- const CSerializeData &data = *it;
+ const auto &data = *it;
assert(data.size() > pnode->nSendOffset);
- int nBytes = send(pnode->hSocket, &data[pnode->nSendOffset], data.size() - pnode->nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT);
+ int nBytes = send(pnode->hSocket, reinterpret_cast<const char*>(data.data()) + pnode->nSendOffset, data.size() - pnode->nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT);
if (nBytes > 0) {
pnode->nLastSend = GetTime();
pnode->nSendBytes += nBytes;
@@ -1067,8 +1066,7 @@ void CConnman::ThreadSocketHandler()
pnode->CloseSocketDisconnect();
// hold in disconnected pool until all refs are released
- if (pnode->fNetworkNode || pnode->fInbound)
- pnode->Release();
+ pnode->Release();
vNodesDisconnected.push_back(pnode);
}
}
@@ -1103,8 +1101,13 @@ void CConnman::ThreadSocketHandler()
}
}
}
- if(vNodes.size() != nPrevNodeCount) {
- nPrevNodeCount = vNodes.size();
+ size_t vNodesSize;
+ {
+ LOCK(cs_vNodes);
+ vNodesSize = vNodes.size();
+ }
+ if(vNodesSize != nPrevNodeCount) {
+ nPrevNodeCount = vNodesSize;
if(clientInterface)
clientInterface->NotifyNumConnectionsChanged(nPrevNodeCount);
}
@@ -1808,7 +1811,6 @@ bool CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai
return false;
if (grantOutbound)
grantOutbound->MoveTo(pnode->grantOutbound);
- pnode->fNetworkNode = true;
if (fOneShot)
pnode->fOneShot = true;
if (fFeeler)
@@ -2139,17 +2141,6 @@ bool CConnman::Start(boost::thread_group& threadGroup, CScheduler& scheduler, st
semOutbound = new CSemaphore(std::min((nMaxOutbound + nMaxFeeler), nMaxConnections));
}
- if (pnodeLocalHost == NULL) {
- CNetAddr local;
- LookupHost("127.0.0.1", local, false);
-
- NodeId id = GetNewNodeId();
- uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize();
-
- pnodeLocalHost = new CNode(id, nLocalServices, GetBestHeight(), INVALID_SOCKET, CAddress(CService(local, 0), nLocalServices), 0, nonce);
- GetNodeSignals().InitializeNode(pnodeLocalHost, *this);
- }
-
//
// Start threads
//
@@ -2227,9 +2218,6 @@ void CConnman::Stop()
vhListenSocket.clear();
delete semOutbound;
semOutbound = NULL;
- if(pnodeLocalHost)
- DeleteNode(pnodeLocalHost);
- pnodeLocalHost = NULL;
}
void CConnman::DeleteNode(CNode* pnode)
@@ -2531,7 +2519,6 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn
fOneShot = false;
fClient = false; // set by version message
fFeeler = false;
- fNetworkNode = false;
fSuccessfullyConnected = false;
fDisconnect = false;
nRefCount = 0;
@@ -2612,30 +2599,19 @@ void CNode::AskFor(const CInv& inv)
mapAskFor.insert(std::make_pair(nRequestTime, inv));
}
-CDataStream CConnman::BeginMessage(CNode* pnode, int nVersion, int flags, const std::string& sCommand)
+void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg)
{
- return {SER_NETWORK, (nVersion ? nVersion : pnode->GetSendVersion()) | flags, CMessageHeader(Params().MessageStart(), sCommand.c_str(), 0) };
-}
-
-void CConnman::EndMessage(CDataStream& strm)
-{
- // Set the size
- assert(strm.size () >= CMessageHeader::HEADER_SIZE);
- unsigned int nSize = strm.size() - CMessageHeader::HEADER_SIZE;
- WriteLE32((uint8_t*)&strm[CMessageHeader::MESSAGE_SIZE_OFFSET], nSize);
- // Set the checksum
- uint256 hash = Hash(strm.begin() + CMessageHeader::HEADER_SIZE, strm.end());
- memcpy((char*)&strm[CMessageHeader::CHECKSUM_OFFSET], hash.begin(), CMessageHeader::CHECKSUM_SIZE);
-
-}
+ size_t nMessageSize = msg.data.size();
+ size_t nTotalSize = nMessageSize + CMessageHeader::HEADER_SIZE;
+ LogPrint("net", "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.command.c_str()), nMessageSize, pnode->id);
-void CConnman::PushMessage(CNode* pnode, CDataStream& strm, const std::string& sCommand)
-{
- if(strm.empty())
- return;
+ std::vector<unsigned char> serializedHeader;
+ serializedHeader.reserve(CMessageHeader::HEADER_SIZE);
+ uint256 hash = Hash(msg.data.data(), msg.data.data() + nMessageSize);
+ CMessageHeader hdr(Params().MessageStart(), msg.command.c_str(), nMessageSize);
+ memcpy(hdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE);
- unsigned int nSize = strm.size() - CMessageHeader::HEADER_SIZE;
- LogPrint("net", "sending %s (%d bytes) peer=%d\n", SanitizeString(sCommand.c_str()), nSize, pnode->id);
+ CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, serializedHeader, 0, hdr};
size_t nBytesSent = 0;
{
@@ -2644,11 +2620,14 @@ void CConnman::PushMessage(CNode* pnode, CDataStream& strm, const std::string& s
return;
}
bool optimisticSend(pnode->vSendMsg.empty());
- pnode->vSendMsg.emplace_back(strm.begin(), strm.end());
//log total amount of bytes per command
- pnode->mapSendBytesPerMsgCmd[sCommand] += strm.size();
- pnode->nSendSize += strm.size();
+ pnode->mapSendBytesPerMsgCmd[msg.command] += nTotalSize;
+ pnode->nSendSize += nTotalSize;
+
+ pnode->vSendMsg.push_back(std::move(serializedHeader));
+ if (nMessageSize)
+ pnode->vSendMsg.push_back(std::move(msg.data));
// If write queue empty, attempt "optimistic write"
if (optimisticSend == true)
diff --git a/src/net.h b/src/net.h
index e2d88b478d..a7c0ecf324 100644
--- a/src/net.h
+++ b/src/net.h
@@ -101,6 +101,20 @@ class CTransaction;
class CNodeStats;
class CClientUIInterface;
+struct CSerializedNetMsg
+{
+ CSerializedNetMsg() = default;
+ CSerializedNetMsg(CSerializedNetMsg&&) = default;
+ CSerializedNetMsg& operator=(CSerializedNetMsg&&) = default;
+ // No copying, only moves.
+ CSerializedNetMsg(const CSerializedNetMsg& msg) = delete;
+ CSerializedNetMsg& operator=(const CSerializedNetMsg&) = delete;
+
+ std::vector<unsigned char> data;
+ std::string command;
+};
+
+
class CConnman
{
public:
@@ -138,32 +152,7 @@ public:
bool ForNode(NodeId id, std::function<bool(CNode* pnode)> func);
- template <typename... Args>
- void PushMessageWithVersionAndFlag(CNode* pnode, int nVersion, int flag, const std::string& sCommand, Args&&... args)
- {
- auto msg(BeginMessage(pnode, nVersion, flag, sCommand));
- ::SerializeMany(msg, std::forward<Args>(args)...);
- EndMessage(msg);
- PushMessage(pnode, msg, sCommand);
- }
-
- template <typename... Args>
- void PushMessageWithFlag(CNode* pnode, int flag, const std::string& sCommand, Args&&... args)
- {
- PushMessageWithVersionAndFlag(pnode, 0, flag, sCommand, std::forward<Args>(args)...);
- }
-
- template <typename... Args>
- void PushMessageWithVersion(CNode* pnode, int nVersion, const std::string& sCommand, Args&&... args)
- {
- PushMessageWithVersionAndFlag(pnode, nVersion, 0, sCommand, std::forward<Args>(args)...);
- }
-
- template <typename... Args>
- void PushMessage(CNode* pnode, const std::string& sCommand, Args&&... args)
- {
- PushMessageWithVersionAndFlag(pnode, 0, 0, sCommand, std::forward<Args>(args)...);
- }
+ void PushMessage(CNode* pnode, CSerializedNetMsg&& msg);
template<typename Callable>
bool ForEachNodeContinueIf(Callable&& func)
@@ -374,10 +363,6 @@ private:
unsigned int GetReceiveFloodSize() const;
- CDataStream BeginMessage(CNode* node, int nVersion, int flags, const std::string& sCommand);
- void PushMessage(CNode* pnode, CDataStream& strm, const std::string& sCommand);
- void EndMessage(CDataStream& strm);
-
// Network stats
void RecordBytesRecv(uint64_t bytes);
void RecordBytesSent(uint64_t bytes);
@@ -403,7 +388,7 @@ private:
unsigned int nReceiveFloodSize;
std::vector<ListenSocket> vhListenSocket;
- bool fNetworkActive;
+ std::atomic<bool> fNetworkActive;
banmap_t setBanned;
CCriticalSection cs_setBanned;
bool setBannedIsDirty;
@@ -601,7 +586,7 @@ public:
size_t nSendSize; // total size of all vSendMsg entries
size_t nSendOffset; // offset inside the first vSendMsg already sent
uint64_t nSendBytes;
- std::deque<CSerializeData> vSendMsg;
+ std::deque<std::vector<unsigned char>> vSendMsg;
CCriticalSection cs_vSend;
std::deque<CInv> vRecvGetData;
@@ -628,9 +613,8 @@ public:
bool fOneShot;
bool fClient;
const bool fInbound;
- bool fNetworkNode;
bool fSuccessfullyConnected;
- bool fDisconnect;
+ std::atomic_bool fDisconnect;
// We use fRelayTxes for two purposes -
// a) it allows us to not relay tx invs before receiving the peer's version message
// b) the peer may tell us in its version message that we should not relay tx invs
@@ -771,7 +755,7 @@ public:
{
// The send version should always be explicitly set to
// INIT_PROTO_VERSION rather than using this value until the handshake
- // is complete. See PushMessageWithVersion().
+ // is complete.
assert(nSendVersion != 0);
return nSendVersion;
}
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
new file mode 100644
index 0000000000..4bee13a7bf
--- /dev/null
+++ b/src/net_processing.cpp
@@ -0,0 +1,3026 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2016 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include "net_processing.h"
+
+#include "addrman.h"
+#include "arith_uint256.h"
+#include "blockencodings.h"
+#include "chainparams.h"
+#include "consensus/validation.h"
+#include "hash.h"
+#include "init.h"
+#include "validation.h"
+#include "merkleblock.h"
+#include "net.h"
+#include "netmessagemaker.h"
+#include "netbase.h"
+#include "policy/fees.h"
+#include "policy/policy.h"
+#include "primitives/block.h"
+#include "primitives/transaction.h"
+#include "random.h"
+#include "tinyformat.h"
+#include "txmempool.h"
+#include "ui_interface.h"
+#include "util.h"
+#include "utilmoneystr.h"
+#include "utilstrencodings.h"
+#include "validationinterface.h"
+
+#include <boost/thread.hpp>
+
+using namespace std;
+
+#if defined(NDEBUG)
+# error "Bitcoin cannot be compiled without assertions."
+#endif
+
+int64_t nTimeBestReceived = 0; // Used only to inform the wallet of when we last received a block
+
+struct IteratorComparator
+{
+ template<typename I>
+ bool operator()(const I& a, const I& b)
+ {
+ return &(*a) < &(*b);
+ }
+};
+
+struct COrphanTx {
+ // When modifying, adapt the copy of this definition in tests/DoS_tests.
+ CTransaction tx;
+ NodeId fromPeer;
+ int64_t nTimeExpire;
+};
+map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);
+map<COutPoint, set<map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main);
+void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // SHA256("main address relay")[0:8]
+
+// Internal stuff
+namespace {
+ /** Number of nodes with fSyncStarted. */
+ int nSyncStarted = 0;
+
+ /**
+ * Sources of received blocks, saved to be able to send them reject
+ * messages or ban them when processing happens afterwards. Protected by
+ * cs_main.
+ * Set mapBlockSource[hash].second to false if the node should not be
+ * punished if the block is invalid.
+ */
+ map<uint256, std::pair<NodeId, bool>> mapBlockSource;
+
+ /**
+ * Filter for transactions that were recently rejected by
+ * AcceptToMemoryPool. These are not rerequested until the chain tip
+ * changes, at which point the entire filter is reset. Protected by
+ * cs_main.
+ *
+ * Without this filter we'd be re-requesting txs from each of our peers,
+ * increasing bandwidth consumption considerably. For instance, with 100
+ * peers, half of which relay a tx we don't accept, that might be a 50x
+ * bandwidth increase. A flooding attacker attempting to roll-over the
+ * filter using minimum-sized, 60byte, transactions might manage to send
+ * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
+ * two minute window to send invs to us.
+ *
+ * Decreasing the false positive rate is fairly cheap, so we pick one in a
+ * million to make it highly unlikely for users to have issues with this
+ * filter.
+ *
+ * Memory used: 1.3 MB
+ */
+ std::unique_ptr<CRollingBloomFilter> recentRejects;
+ uint256 hashRecentRejectsChainTip;
+
+ /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
+ struct QueuedBlock {
+ uint256 hash;
+ CBlockIndex* pindex; //!< Optional.
+ bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request.
+ std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads
+ };
+ map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight;
+
+ /** Stack of nodes which we have set to announce using compact blocks */
+ list<NodeId> lNodesAnnouncingHeaderAndIDs;
+
+ /** Number of preferable block download peers. */
+ int nPreferredDownload = 0;
+
+ /** Number of peers from which we're downloading blocks. */
+ int nPeersWithValidatedDownloads = 0;
+
+ /** Relay map, protected by cs_main. */
+ typedef std::map<uint256, CTransactionRef> MapRelay;
+ MapRelay mapRelay;
+ /** Expiration-time ordered list of (expire time, relay map entry) pairs, protected by cs_main). */
+ std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration;
+} // anon namespace
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Registration of network node signals.
+//
+
+namespace {
+
+struct CBlockReject {
+ unsigned char chRejectCode;
+ string strRejectReason;
+ uint256 hashBlock;
+};
+
+/**
+ * Maintain validation-specific state about nodes, protected by cs_main, instead
+ * by CNode's own locks. This simplifies asynchronous operation, where
+ * processing of incoming data is done after the ProcessMessage call returns,
+ * and we're no longer holding the node's locks.
+ */
+struct CNodeState {
+ //! The peer's address
+ const CService address;
+ //! Whether we have a fully established connection.
+ bool fCurrentlyConnected;
+ //! Accumulated misbehaviour score for this peer.
+ int nMisbehavior;
+ //! Whether this peer should be disconnected and banned (unless whitelisted).
+ bool fShouldBan;
+ //! String name of this peer (debugging/logging purposes).
+ const std::string name;
+ //! List of asynchronously-determined block rejections to notify this peer about.
+ std::vector<CBlockReject> rejects;
+ //! The best known block we know this peer has announced.
+ CBlockIndex *pindexBestKnownBlock;
+ //! The hash of the last unknown block this peer has announced.
+ uint256 hashLastUnknownBlock;
+ //! The last full block we both have.
+ CBlockIndex *pindexLastCommonBlock;
+ //! The best header we have sent our peer.
+ CBlockIndex *pindexBestHeaderSent;
+ //! Length of current-streak of unconnecting headers announcements
+ int nUnconnectingHeaders;
+ //! Whether we've started headers synchronization with this peer.
+ bool fSyncStarted;
+ //! Since when we're stalling block download progress (in microseconds), or 0.
+ int64_t nStallingSince;
+ list<QueuedBlock> vBlocksInFlight;
+ //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
+ int64_t nDownloadingSince;
+ int nBlocksInFlight;
+ int nBlocksInFlightValidHeaders;
+ //! Whether we consider this a preferred download peer.
+ bool fPreferredDownload;
+ //! Whether this peer wants invs or headers (when possible) for block announcements.
+ bool fPreferHeaders;
+ //! Whether this peer wants invs or cmpctblocks (when possible) for block announcements.
+ bool fPreferHeaderAndIDs;
+ /**
+ * Whether this peer will send us cmpctblocks if we request them.
+ * This is not used to gate request logic, as we really only care about fSupportsDesiredCmpctVersion,
+ * but is used as a flag to "lock in" the version of compact blocks (fWantsCmpctWitness) we send.
+ */
+ bool fProvidesHeaderAndIDs;
+ //! Whether this peer can give us witnesses
+ bool fHaveWitness;
+ //! Whether this peer wants witnesses in cmpctblocks/blocktxns
+ bool fWantsCmpctWitness;
+ /**
+ * If we've announced NODE_WITNESS to this peer: whether the peer sends witnesses in cmpctblocks/blocktxns,
+ * otherwise: whether this peer sends non-witnesses in cmpctblocks/blocktxns.
+ */
+ bool fSupportsDesiredCmpctVersion;
+
+ CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) {
+ fCurrentlyConnected = false;
+ nMisbehavior = 0;
+ fShouldBan = false;
+ pindexBestKnownBlock = NULL;
+ hashLastUnknownBlock.SetNull();
+ pindexLastCommonBlock = NULL;
+ pindexBestHeaderSent = NULL;
+ nUnconnectingHeaders = 0;
+ fSyncStarted = false;
+ nStallingSince = 0;
+ nDownloadingSince = 0;
+ nBlocksInFlight = 0;
+ nBlocksInFlightValidHeaders = 0;
+ fPreferredDownload = false;
+ fPreferHeaders = false;
+ fPreferHeaderAndIDs = false;
+ fProvidesHeaderAndIDs = false;
+ fHaveWitness = false;
+ fWantsCmpctWitness = false;
+ fSupportsDesiredCmpctVersion = false;
+ }
+};
+
+/** Map maintaining per-node state. Requires cs_main. */
+map<NodeId, CNodeState> mapNodeState;
+
+// Requires cs_main.
+CNodeState *State(NodeId pnode) {
+ map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
+ if (it == mapNodeState.end())
+ return NULL;
+ return &it->second;
+}
+
+void UpdatePreferredDownload(CNode* node, CNodeState* state)
+{
+ nPreferredDownload -= state->fPreferredDownload;
+
+ // Whether this node should be marked as a preferred download node.
+ state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient;
+
+ nPreferredDownload += state->fPreferredDownload;
+}
+
+void PushNodeVersion(CNode *pnode, CConnman& connman, int64_t nTime)
+{
+ ServiceFlags nLocalNodeServices = pnode->GetLocalServices();
+ uint64_t nonce = pnode->GetLocalNonce();
+ int nNodeStartingHeight = pnode->GetMyStartingHeight();
+ NodeId nodeid = pnode->GetId();
+ CAddress addr = pnode->addr;
+
+ CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices));
+ CAddress addrMe = CAddress(CService(), nLocalNodeServices);
+
+ connman.PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
+ nonce, strSubVersion, nNodeStartingHeight, ::fRelayTxes));
+
+ if (fLogIPs)
+ LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
+ else
+ LogPrint("net", "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
+}
+
+void InitializeNode(CNode *pnode, CConnman& connman) {
+ CAddress addr = pnode->addr;
+ std::string addrName = pnode->addrName;
+ NodeId nodeid = pnode->GetId();
+ {
+ LOCK(cs_main);
+ mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName)));
+ }
+ if(!pnode->fInbound)
+ PushNodeVersion(pnode, connman, GetTime());
+}
+
+void FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
+ fUpdateConnectionTime = false;
+ LOCK(cs_main);
+ CNodeState *state = State(nodeid);
+
+ if (state->fSyncStarted)
+ nSyncStarted--;
+
+ if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
+ fUpdateConnectionTime = true;
+ }
+
+ BOOST_FOREACH(const QueuedBlock& entry, state->vBlocksInFlight) {
+ mapBlocksInFlight.erase(entry.hash);
+ }
+ EraseOrphansFor(nodeid);
+ nPreferredDownload -= state->fPreferredDownload;
+ nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
+ assert(nPeersWithValidatedDownloads >= 0);
+
+ mapNodeState.erase(nodeid);
+
+ if (mapNodeState.empty()) {
+ // Do a consistency check after the last peer is removed.
+ assert(mapBlocksInFlight.empty());
+ assert(nPreferredDownload == 0);
+ assert(nPeersWithValidatedDownloads == 0);
+ }
+}
+
+// Requires cs_main.
+// Returns a bool indicating whether we requested this block.
+// Also used if a block was /not/ received and timed out or started with another peer
+bool MarkBlockAsReceived(const uint256& hash) {
+ map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
+ if (itInFlight != mapBlocksInFlight.end()) {
+ CNodeState *state = State(itInFlight->second.first);
+ state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
+ if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) {
+ // Last validated block on the queue was received.
+ nPeersWithValidatedDownloads--;
+ }
+ if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
+ // First block on the queue was received, update the start download time for the next one
+ state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros());
+ }
+ state->vBlocksInFlight.erase(itInFlight->second.second);
+ state->nBlocksInFlight--;
+ state->nStallingSince = 0;
+ mapBlocksInFlight.erase(itInFlight);
+ return true;
+ }
+ return false;
+}
+
+// Requires cs_main.
+// returns false, still setting pit, if the block was already in flight from the same peer
+// pit will only be valid as long as the same cs_main lock is being held
+bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL, list<QueuedBlock>::iterator **pit = NULL) {
+ CNodeState *state = State(nodeid);
+ assert(state != NULL);
+
+ // Short-circuit most stuff in case its from the same node
+ map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
+ if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
+ *pit = &itInFlight->second.second;
+ return false;
+ }
+
+ // Make sure it's not listed somewhere already.
+ MarkBlockAsReceived(hash);
+
+ list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
+ {hash, pindex, pindex != NULL, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : NULL)});
+ state->nBlocksInFlight++;
+ state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
+ if (state->nBlocksInFlight == 1) {
+ // We're starting a block download (batch) from this peer.
+ state->nDownloadingSince = GetTimeMicros();
+ }
+ if (state->nBlocksInFlightValidHeaders == 1 && pindex != NULL) {
+ nPeersWithValidatedDownloads++;
+ }
+ itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first;
+ if (pit)
+ *pit = &itInFlight->second.second;
+ return true;
+}
+
+/** Check whether the last unknown block a peer advertised is not yet known. */
+void ProcessBlockAvailability(NodeId nodeid) {
+ CNodeState *state = State(nodeid);
+ assert(state != NULL);
+
+ if (!state->hashLastUnknownBlock.IsNull()) {
+ BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock);
+ if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) {
+ if (state->pindexBestKnownBlock == NULL || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
+ state->pindexBestKnownBlock = itOld->second;
+ state->hashLastUnknownBlock.SetNull();
+ }
+ }
+}
+
+/** Update tracking information about which blocks a peer is assumed to have. */
+void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
+ CNodeState *state = State(nodeid);
+ assert(state != NULL);
+
+ ProcessBlockAvailability(nodeid);
+
+ BlockMap::iterator it = mapBlockIndex.find(hash);
+ if (it != mapBlockIndex.end() && it->second->nChainWork > 0) {
+ // An actually better block was announced.
+ if (state->pindexBestKnownBlock == NULL || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
+ state->pindexBestKnownBlock = it->second;
+ } else {
+ // An unknown block was announced; just assume that the latest one is the best one.
+ state->hashLastUnknownBlock = hash;
+ }
+}
+
+void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pfrom, CConnman& connman) {
+ if (!nodestate->fSupportsDesiredCmpctVersion) {
+ // Never ask from peers who can't provide witnesses.
+ return;
+ }
+ if (nodestate->fProvidesHeaderAndIDs) {
+ for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
+ if (*it == pfrom->GetId()) {
+ lNodesAnnouncingHeaderAndIDs.erase(it);
+ lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
+ return;
+ }
+ }
+ bool fAnnounceUsingCMPCTBLOCK = false;
+ uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1;
+ if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
+ // As per BIP152, we only get 3 of our peers to announce
+ // blocks using compact encodings.
+ connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode* pnodeStop){
+ connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()).Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
+ return true;
+ });
+ lNodesAnnouncingHeaderAndIDs.pop_front();
+ }
+ fAnnounceUsingCMPCTBLOCK = true;
+ connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
+ lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
+ }
+}
+
+// Requires cs_main
+bool CanDirectFetch(const Consensus::Params &consensusParams)
+{
+ return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
+}
+
+// Requires cs_main
+bool PeerHasHeader(CNodeState *state, CBlockIndex *pindex)
+{
+ if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
+ return true;
+ if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
+ return true;
+ return false;
+}
+
+/** Find the last common ancestor two blocks have.
+ * Both pa and pb must be non-NULL. */
+CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
+ if (pa->nHeight > pb->nHeight) {
+ pa = pa->GetAncestor(pb->nHeight);
+ } else if (pb->nHeight > pa->nHeight) {
+ pb = pb->GetAncestor(pa->nHeight);
+ }
+
+ while (pa != pb && pa && pb) {
+ pa = pa->pprev;
+ pb = pb->pprev;
+ }
+
+ // Eventually all chain branches meet at the genesis block.
+ assert(pa == pb);
+ return pa;
+}
+
+/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
+ * at most count entries. */
+void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) {
+ if (count == 0)
+ return;
+
+ vBlocks.reserve(vBlocks.size() + count);
+ CNodeState *state = State(nodeid);
+ assert(state != NULL);
+
+ // Make sure pindexBestKnownBlock is up to date, we'll need it.
+ ProcessBlockAvailability(nodeid);
+
+ if (state->pindexBestKnownBlock == NULL || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) {
+ // This peer has nothing interesting.
+ return;
+ }
+
+ if (state->pindexLastCommonBlock == NULL) {
+ // Bootstrap quickly by guessing a parent of our best tip is the forking point.
+ // Guessing wrong in either direction is not a problem.
+ state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())];
+ }
+
+ // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
+ // of its current tip anymore. Go back enough to fix that.
+ state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
+ if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
+ return;
+
+ std::vector<CBlockIndex*> vToFetch;
+ CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
+ // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
+ // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
+ // download that next block if the window were 1 larger.
+ int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
+ int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
+ NodeId waitingfor = -1;
+ while (pindexWalk->nHeight < nMaxHeight) {
+ // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
+ // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
+ // as iterating over ~100 CBlockIndex* entries anyway.
+ int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
+ vToFetch.resize(nToFetch);
+ pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
+ vToFetch[nToFetch - 1] = pindexWalk;
+ for (unsigned int i = nToFetch - 1; i > 0; i--) {
+ vToFetch[i - 1] = vToFetch[i]->pprev;
+ }
+
+ // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
+ // are not yet downloaded and not in flight to vBlocks. In the mean time, update
+ // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
+ // already part of our chain (and therefore don't need it even if pruned).
+ BOOST_FOREACH(CBlockIndex* pindex, vToFetch) {
+ if (!pindex->IsValid(BLOCK_VALID_TREE)) {
+ // We consider the chain that this peer is on invalid.
+ return;
+ }
+ if (!State(nodeid)->fHaveWitness && IsWitnessEnabled(pindex->pprev, consensusParams)) {
+ // We wouldn't download this block or its descendants from this peer.
+ return;
+ }
+ if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) {
+ if (pindex->nChainTx)
+ state->pindexLastCommonBlock = pindex;
+ } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
+ // The block is not already downloaded, and not yet in flight.
+ if (pindex->nHeight > nWindowEnd) {
+ // We reached the end of the window.
+ if (vBlocks.size() == 0 && waitingfor != nodeid) {
+ // We aren't able to fetch anything, but we would be if the download window was one larger.
+ nodeStaller = waitingfor;
+ }
+ return;
+ }
+ vBlocks.push_back(pindex);
+ if (vBlocks.size() == count) {
+ return;
+ }
+ } else if (waitingfor == -1) {
+ // This is the first already-in-flight block.
+ waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
+ }
+ }
+ }
+}
+
+} // anon namespace
+
+bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
+ LOCK(cs_main);
+ CNodeState *state = State(nodeid);
+ if (state == NULL)
+ return false;
+ stats.nMisbehavior = state->nMisbehavior;
+ stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
+ stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
+ BOOST_FOREACH(const QueuedBlock& queue, state->vBlocksInFlight) {
+ if (queue.pindex)
+ stats.vHeightInFlight.push_back(queue.pindex->nHeight);
+ }
+ return true;
+}
+
+void RegisterNodeSignals(CNodeSignals& nodeSignals)
+{
+ nodeSignals.ProcessMessages.connect(&ProcessMessages);
+ nodeSignals.SendMessages.connect(&SendMessages);
+ nodeSignals.InitializeNode.connect(&InitializeNode);
+ nodeSignals.FinalizeNode.connect(&FinalizeNode);
+}
+
+void UnregisterNodeSignals(CNodeSignals& nodeSignals)
+{
+ nodeSignals.ProcessMessages.disconnect(&ProcessMessages);
+ nodeSignals.SendMessages.disconnect(&SendMessages);
+ nodeSignals.InitializeNode.disconnect(&InitializeNode);
+ nodeSignals.FinalizeNode.disconnect(&FinalizeNode);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// mapOrphanTransactions
+//
+
+bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+{
+ uint256 hash = tx.GetHash();
+ if (mapOrphanTransactions.count(hash))
+ return false;
+
+ // Ignore big transactions, to avoid a
+ // send-big-orphans memory exhaustion attack. If a peer has a legitimate
+ // large transaction with a missing parent then we assume
+ // it will rebroadcast it later, after the parent transaction(s)
+ // have been mined or received.
+ // 100 orphans, each of which is at most 99,999 bytes big is
+ // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
+ unsigned int sz = GetTransactionWeight(tx);
+ if (sz >= MAX_STANDARD_TX_WEIGHT)
+ {
+ LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
+ return false;
+ }
+
+ auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME});
+ assert(ret.second);
+ BOOST_FOREACH(const CTxIn& txin, tx.vin) {
+ mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
+ }
+
+ LogPrint("mempool", "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
+ mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
+ return true;
+}
+
+int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+{
+ map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
+ if (it == mapOrphanTransactions.end())
+ return 0;
+ BOOST_FOREACH(const CTxIn& txin, it->second.tx.vin)
+ {
+ auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
+ if (itPrev == mapOrphanTransactionsByPrev.end())
+ continue;
+ itPrev->second.erase(it);
+ if (itPrev->second.empty())
+ mapOrphanTransactionsByPrev.erase(itPrev);
+ }
+ mapOrphanTransactions.erase(it);
+ return 1;
+}
+
+void EraseOrphansFor(NodeId peer)
+{
+ int nErased = 0;
+ map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
+ while (iter != mapOrphanTransactions.end())
+ {
+ map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
+ if (maybeErase->second.fromPeer == peer)
+ {
+ nErased += EraseOrphanTx(maybeErase->second.tx.GetHash());
+ }
+ }
+ if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased, peer);
+}
+
+
+unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+{
+ unsigned int nEvicted = 0;
+ static int64_t nNextSweep;
+ int64_t nNow = GetTime();
+ if (nNextSweep <= nNow) {
+ // Sweep out expired orphan pool entries:
+ int nErased = 0;
+ int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
+ map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
+ while (iter != mapOrphanTransactions.end())
+ {
+ map<uint256, COrphanTx>::iterator maybeErase = iter++;
+ if (maybeErase->second.nTimeExpire <= nNow) {
+ nErased += EraseOrphanTx(maybeErase->second.tx.GetHash());
+ } else {
+ nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
+ }
+ }
+ // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
+ nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
+ if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx due to expiration\n", nErased);
+ }
+ while (mapOrphanTransactions.size() > nMaxOrphans)
+ {
+ // Evict a random orphan:
+ uint256 randomhash = GetRandHash();
+ map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.lower_bound(randomhash);
+ if (it == mapOrphanTransactions.end())
+ it = mapOrphanTransactions.begin();
+ EraseOrphanTx(it->first);
+ ++nEvicted;
+ }
+ return nEvicted;
+}
+
+// Requires cs_main.
+void Misbehaving(NodeId pnode, int howmuch)
+{
+ if (howmuch == 0)
+ return;
+
+ CNodeState *state = State(pnode);
+ if (state == NULL)
+ return;
+
+ state->nMisbehavior += howmuch;
+ int banscore = GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD);
+ if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore)
+ {
+ LogPrintf("%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior);
+ state->fShouldBan = true;
+ } else
+ LogPrintf("%s: %s peer=%d (%d -> %d)\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior);
+}
+
+
+
+
+
+
+
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// blockchain -> download logic notification
+//
+
+PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn) : connman(connmanIn) {
+ // Initialize global variables that cannot be constructed at startup.
+ recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
+}
+
+void PeerLogicValidation::SyncTransaction(const CTransaction& tx, const CBlockIndex* pindex, int nPosInBlock) {
+ if (nPosInBlock == CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK)
+ return;
+
+ LOCK(cs_main);
+
+ std::vector<uint256> vOrphanErase;
+ // Which orphan pool entries must we evict?
+ for (size_t j = 0; j < tx.vin.size(); j++) {
+ auto itByPrev = mapOrphanTransactionsByPrev.find(tx.vin[j].prevout);
+ if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
+ for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
+ const CTransaction& orphanTx = (*mi)->second.tx;
+ const uint256& orphanHash = orphanTx.GetHash();
+ vOrphanErase.push_back(orphanHash);
+ }
+ }
+
+ // Erase orphan transactions include or precluded by this block
+ if (vOrphanErase.size()) {
+ int nErased = 0;
+ BOOST_FOREACH(uint256 &orphanHash, vOrphanErase) {
+ nErased += EraseOrphanTx(orphanHash);
+ }
+ LogPrint("mempool", "Erased %d orphan tx included or conflicted by block\n", nErased);
+ }
+}
+
+void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
+ const int nNewHeight = pindexNew->nHeight;
+ connman->SetBestHeight(nNewHeight);
+
+ if (!fInitialDownload) {
+ // Find the hashes of all blocks that weren't previously in the best chain.
+ std::vector<uint256> vHashes;
+ const CBlockIndex *pindexToAnnounce = pindexNew;
+ while (pindexToAnnounce != pindexFork) {
+ vHashes.push_back(pindexToAnnounce->GetBlockHash());
+ pindexToAnnounce = pindexToAnnounce->pprev;
+ if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
+ // Limit announcements in case of a huge reorganization.
+ // Rely on the peer's synchronization mechanism in that case.
+ break;
+ }
+ }
+ // Relay inventory, but don't relay old inventory during initial block download.
+ connman->ForEachNode([nNewHeight, &vHashes](CNode* pnode) {
+ if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) {
+ BOOST_REVERSE_FOREACH(const uint256& hash, vHashes) {
+ pnode->PushBlockHash(hash);
+ }
+ }
+ });
+ }
+
+ nTimeBestReceived = GetTime();
+}
+
+void PeerLogicValidation::BlockChecked(const CBlock& block, const CValidationState& state) {
+ LOCK(cs_main);
+
+ const uint256 hash(block.GetHash());
+ std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
+
+ int nDoS = 0;
+ if (state.IsInvalid(nDoS)) {
+ if (it != mapBlockSource.end() && State(it->second.first)) {
+ assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
+ CBlockReject reject = {(unsigned char)state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), hash};
+ State(it->second.first)->rejects.push_back(reject);
+ if (nDoS > 0 && it->second.second)
+ Misbehaving(it->second.first, nDoS);
+ }
+ }
+ if (it != mapBlockSource.end())
+ mapBlockSource.erase(it);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Messages
+//
+
+
+bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+{
+ switch (inv.type)
+ {
+ case MSG_TX:
+ case MSG_WITNESS_TX:
+ {
+ assert(recentRejects);
+ if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip)
+ {
+ // If the chain tip has changed previously rejected transactions
+ // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
+ // or a double-spend. Reset the rejects filter and give those
+ // txs a second chance.
+ hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash();
+ recentRejects->reset();
+ }
+
+ // Use pcoinsTip->HaveCoinsInCache as a quick approximation to exclude
+ // requesting or processing some txs which have already been included in a block
+ return recentRejects->contains(inv.hash) ||
+ mempool.exists(inv.hash) ||
+ mapOrphanTransactions.count(inv.hash) ||
+ pcoinsTip->HaveCoinsInCache(inv.hash);
+ }
+ case MSG_BLOCK:
+ case MSG_WITNESS_BLOCK:
+ return mapBlockIndex.count(inv.hash);
+ }
+ // Don't know what it is, just say we already got one
+ return true;
+}
+
+static void RelayTransaction(const CTransaction& tx, CConnman& connman)
+{
+ CInv inv(MSG_TX, tx.GetHash());
+ connman.ForEachNode([&inv](CNode* pnode)
+ {
+ pnode->PushInventory(inv);
+ });
+}
+
+static void RelayAddress(const CAddress& addr, bool fReachable, CConnman& connman)
+{
+ unsigned int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
+
+ // Relay to a limited number of other nodes
+ // Use deterministic randomness to send to the same nodes for 24 hours
+ // at a time so the addrKnowns of the chosen nodes prevent repeats
+ uint64_t hashAddr = addr.GetHash();
+ const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24*60*60));
+ FastRandomContext insecure_rand;
+
+ std::array<std::pair<uint64_t, CNode*>,2> best{{{0, nullptr}, {0, nullptr}}};
+ assert(nRelayNodes <= best.size());
+
+ auto sortfunc = [&best, &hasher, nRelayNodes](CNode* pnode) {
+ if (pnode->nVersion >= CADDR_TIME_VERSION) {
+ uint64_t hashKey = CSipHasher(hasher).Write(pnode->id).Finalize();
+ for (unsigned int i = 0; i < nRelayNodes; i++) {
+ if (hashKey > best[i].first) {
+ std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
+ best[i] = std::make_pair(hashKey, pnode);
+ break;
+ }
+ }
+ }
+ };
+
+ auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
+ for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
+ best[i].second->PushAddress(addr, insecure_rand);
+ }
+ };
+
+ connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
+}
+
+void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman& connman)
+{
+ std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
+ unsigned int nMaxSendBufferSize = connman.GetSendBufferSize();
+ vector<CInv> vNotFound;
+ CNetMsgMaker msgMaker(pfrom->GetSendVersion());
+ LOCK(cs_main);
+
+ while (it != pfrom->vRecvGetData.end()) {
+ // Don't bother if send buffer is too full to respond anyway
+ if (pfrom->nSendSize >= nMaxSendBufferSize)
+ break;
+
+ const CInv &inv = *it;
+ {
+ boost::this_thread::interruption_point();
+ it++;
+
+ if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
+ {
+ bool send = false;
+ BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
+ if (mi != mapBlockIndex.end())
+ {
+ if (chainActive.Contains(mi->second)) {
+ send = true;
+ } else {
+ static const int nOneMonth = 30 * 24 * 60 * 60;
+ // To prevent fingerprinting attacks, only send blocks outside of the active
+ // chain if they are valid, and no more than a month older (both in time, and in
+ // best equivalent proof of work) than the best header chain we know about.
+ send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) &&
+ (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) &&
+ (GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, consensusParams) < nOneMonth);
+ if (!send) {
+ LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
+ }
+ }
+ }
+ // disconnect node in case we have reached the outbound limit for serving historical blocks
+ // never disconnect whitelisted nodes
+ static const int nOneWeek = 7 * 24 * 60 * 60; // assume > 1 week = historical
+ if (send && connman.OutboundTargetReached(true) && ( ((pindexBestHeader != NULL) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
+ {
+ LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
+
+ //disconnect node
+ pfrom->fDisconnect = true;
+ send = false;
+ }
+ // Pruned nodes may have deleted the block, so check whether
+ // it's available before trying to send.
+ if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
+ {
+ // Send block from disk
+ CBlock block;
+ if (!ReadBlockFromDisk(block, (*mi).second, consensusParams))
+ assert(!"cannot load block from disk");
+ if (inv.type == MSG_BLOCK)
+ connman.PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block));
+ else if (inv.type == MSG_WITNESS_BLOCK)
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, block));
+ else if (inv.type == MSG_FILTERED_BLOCK)
+ {
+ bool sendMerkleBlock = false;
+ CMerkleBlock merkleBlock;
+ {
+ LOCK(pfrom->cs_filter);
+ if (pfrom->pfilter) {
+ sendMerkleBlock = true;
+ merkleBlock = CMerkleBlock(block, *pfrom->pfilter);
+ }
+ }
+ if (sendMerkleBlock) {
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
+ // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
+ // This avoids hurting performance by pointlessly requiring a round-trip
+ // Note that there is currently no way for a node to request any single transactions we didn't send here -
+ // they must either disconnect and retry or request the full block.
+ // Thus, the protocol spec specified allows for us to provide duplicate txn here,
+ // however we MUST always provide at least what the remote peer needs
+ typedef std::pair<unsigned int, uint256> PairType;
+ BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn)
+ connman.PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *block.vtx[pair.first]));
+ }
+ // else
+ // no response
+ }
+ else if (inv.type == MSG_CMPCT_BLOCK)
+ {
+ // If a peer is asking for old blocks, we're almost guaranteed
+ // they won't have a useful mempool to match against a compact block,
+ // and we don't feel like constructing the object for them, so
+ // instead we respond with the full, non-compact block.
+ bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
+ int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
+ if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
+ CBlockHeaderAndShortTxIDs cmpctblock(block, fPeerWantsWitness);
+ connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
+ } else
+ connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, block));
+ }
+
+ // Trigger the peer node to send a getblocks request for the next batch of inventory
+ if (inv.hash == pfrom->hashContinue)
+ {
+ // Bypass PushInventory, this must send even if redundant,
+ // and we want it right after the last block so they don't
+ // wait for other stuff first.
+ vector<CInv> vInv;
+ vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
+ pfrom->hashContinue.SetNull();
+ }
+ }
+ }
+ else if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX)
+ {
+ // Send stream from relay memory
+ bool push = false;
+ auto mi = mapRelay.find(inv.hash);
+ int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
+ if (mi != mapRelay.end()) {
+ connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
+ push = true;
+ } else if (pfrom->timeLastMempoolReq) {
+ auto txinfo = mempool.info(inv.hash);
+ // To protect privacy, do not answer getdata using the mempool when
+ // that TX couldn't have been INVed in reply to a MEMPOOL request.
+ if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
+ connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
+ push = true;
+ }
+ }
+ if (!push) {
+ vNotFound.push_back(inv);
+ }
+ }
+
+ // Track requests for our stuff.
+ GetMainSignals().Inventory(inv.hash);
+
+ if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
+ break;
+ }
+ }
+
+ pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
+
+ if (!vNotFound.empty()) {
+ // Let the peer know that we didn't find what it asked for, so it doesn't
+ // have to wait around forever. Currently only SPV clients actually care
+ // about this message: it's needed when they are recursively walking the
+ // dependencies of relevant unconfirmed transactions. SPV clients want to
+ // do that because they want to know about (and store and rebroadcast and
+ // risk analyze) the dependencies of transactions relevant to them, without
+ // having to download the entire memory pool.
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
+ }
+}
+
+uint32_t GetFetchFlags(CNode* pfrom, CBlockIndex* pprev, const Consensus::Params& chainparams) {
+ uint32_t nFetchFlags = 0;
+ if ((pfrom->GetLocalServices() & NODE_WITNESS) && State(pfrom->GetId())->fHaveWitness) {
+ nFetchFlags |= MSG_WITNESS_FLAG;
+ }
+ return nFetchFlags;
+}
+
+bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman& connman)
+{
+ unsigned int nMaxSendBufferSize = connman.GetSendBufferSize();
+
+ LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id);
+ if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0)
+ {
+ LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
+ return true;
+ }
+
+
+ if (!(pfrom->GetLocalServices() & NODE_BLOOM) &&
+ (strCommand == NetMsgType::FILTERLOAD ||
+ strCommand == NetMsgType::FILTERADD))
+ {
+ if (pfrom->nVersion >= NO_BLOOM_VERSION) {
+ LOCK(cs_main);
+ Misbehaving(pfrom->GetId(), 100);
+ return false;
+ } else {
+ pfrom->fDisconnect = true;
+ return false;
+ }
+ }
+
+
+ if (strCommand == NetMsgType::VERSION)
+ {
+ // Each connection can only send one version message
+ if (pfrom->nVersion != 0)
+ {
+ connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, string("Duplicate version message")));
+ LOCK(cs_main);
+ Misbehaving(pfrom->GetId(), 1);
+ return false;
+ }
+
+ int64_t nTime;
+ CAddress addrMe;
+ CAddress addrFrom;
+ uint64_t nNonce = 1;
+ uint64_t nServiceInt;
+ vRecv >> pfrom->nVersion >> nServiceInt >> nTime >> addrMe;
+ pfrom->nServices = ServiceFlags(nServiceInt);
+ if (!pfrom->fInbound)
+ {
+ connman.SetServices(pfrom->addr, pfrom->nServices);
+ }
+ if (pfrom->nServicesExpected & ~pfrom->nServices)
+ {
+ LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, pfrom->nServices, pfrom->nServicesExpected);
+ connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
+ strprintf("Expected to offer services %08x", pfrom->nServicesExpected)));
+ pfrom->fDisconnect = true;
+ return false;
+ }
+
+ if (pfrom->nVersion < MIN_PEER_PROTO_VERSION)
+ {
+ // disconnect from peers older than this proto version
+ LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion);
+ connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
+ strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION)));
+ pfrom->fDisconnect = true;
+ return false;
+ }
+
+ if (pfrom->nVersion == 10300)
+ pfrom->nVersion = 300;
+ if (!vRecv.empty())
+ vRecv >> addrFrom >> nNonce;
+ if (!vRecv.empty()) {
+ vRecv >> LIMITED_STRING(pfrom->strSubVer, MAX_SUBVERSION_LENGTH);
+ pfrom->cleanSubVer = SanitizeString(pfrom->strSubVer);
+ }
+ if (!vRecv.empty()) {
+ vRecv >> pfrom->nStartingHeight;
+ }
+ {
+ LOCK(pfrom->cs_filter);
+ if (!vRecv.empty())
+ vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message
+ else
+ pfrom->fRelayTxes = true;
+ }
+
+ // Disconnect if we connected to ourself
+ if (pfrom->fInbound && !connman.CheckIncomingNonce(nNonce))
+ {
+ LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString());
+ pfrom->fDisconnect = true;
+ return true;
+ }
+
+ pfrom->addrLocal = addrMe;
+ if (pfrom->fInbound && addrMe.IsRoutable())
+ {
+ SeenLocal(addrMe);
+ }
+
+ // Be shy and don't send version until we hear
+ if (pfrom->fInbound)
+ PushNodeVersion(pfrom, connman, GetAdjustedTime());
+
+ pfrom->fClient = !(pfrom->nServices & NODE_NETWORK);
+
+ if((pfrom->nServices & NODE_WITNESS))
+ {
+ LOCK(cs_main);
+ State(pfrom->GetId())->fHaveWitness = true;
+ }
+
+ // Potentially mark this peer as a preferred download peer.
+ {
+ LOCK(cs_main);
+ UpdatePreferredDownload(pfrom, State(pfrom->GetId()));
+ }
+
+ // Change version
+ connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK));
+ int nSendVersion = std::min(pfrom->nVersion, PROTOCOL_VERSION);
+ pfrom->SetSendVersion(nSendVersion);
+
+ if (!pfrom->fInbound)
+ {
+ // Advertise our address
+ if (fListen && !IsInitialBlockDownload())
+ {
+ CAddress addr = GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices());
+ FastRandomContext insecure_rand;
+ if (addr.IsRoutable())
+ {
+ LogPrint("net", "ProcessMessages: advertising address %s\n", addr.ToString());
+ pfrom->PushAddress(addr, insecure_rand);
+ } else if (IsPeerAddrLocalGood(pfrom)) {
+ addr.SetIP(pfrom->addrLocal);
+ LogPrint("net", "ProcessMessages: advertising address %s\n", addr.ToString());
+ pfrom->PushAddress(addr, insecure_rand);
+ }
+ }
+
+ // Get recent addresses
+ if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman.GetAddressCount() < 1000)
+ {
+ connman.PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
+ pfrom->fGetAddr = true;
+ }
+ connman.MarkAddressGood(pfrom->addr);
+ }
+
+ pfrom->fSuccessfullyConnected = true;
+
+ string remoteAddr;
+ if (fLogIPs)
+ remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
+
+ LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
+ pfrom->cleanSubVer, pfrom->nVersion,
+ pfrom->nStartingHeight, addrMe.ToString(), pfrom->id,
+ remoteAddr);
+
+ int64_t nTimeOffset = nTime - GetTime();
+ pfrom->nTimeOffset = nTimeOffset;
+ AddTimeData(pfrom->addr, nTimeOffset);
+
+ // Feeler connections exist only to verify if address is online.
+ if (pfrom->fFeeler) {
+ assert(pfrom->fInbound == false);
+ pfrom->fDisconnect = true;
+ }
+ return true;
+ }
+
+
+ else if (pfrom->nVersion == 0)
+ {
+ // Must have a version message before anything else
+ LOCK(cs_main);
+ Misbehaving(pfrom->GetId(), 1);
+ return false;
+ }
+
+ // At this point, the outgoing message serialization version can't change.
+ CNetMsgMaker msgMaker(pfrom->GetSendVersion());
+
+ if (strCommand == NetMsgType::VERACK)
+ {
+ pfrom->SetRecvVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
+
+ if (!pfrom->fInbound) {
+ // Mark this node as currently connected, so we update its timestamp later.
+ LOCK(cs_main);
+ State(pfrom->GetId())->fCurrentlyConnected = true;
+ }
+
+ if (pfrom->nVersion >= SENDHEADERS_VERSION) {
+ // Tell our peer we prefer to receive headers rather than inv's
+ // We send this to non-NODE NETWORK peers as well, because even
+ // non-NODE NETWORK peers can announce blocks (such as pruning
+ // nodes)
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
+ }
+ if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) {
+ // Tell our peer we are willing to provide version 1 or 2 cmpctblocks
+ // However, we do not request new block announcements using
+ // cmpctblock messages.
+ // We send this to non-NODE NETWORK peers as well, because
+ // they may wish to request compact blocks from us
+ bool fAnnounceUsingCMPCTBLOCK = false;
+ uint64_t nCMPCTBLOCKVersion = 2;
+ if (pfrom->GetLocalServices() & NODE_WITNESS)
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
+ nCMPCTBLOCKVersion = 1;
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
+ }
+ }
+
+
+ else if (strCommand == NetMsgType::ADDR)
+ {
+ vector<CAddress> vAddr;
+ vRecv >> vAddr;
+
+ // Don't want addr from older versions unless seeding
+ if (pfrom->nVersion < CADDR_TIME_VERSION && connman.GetAddressCount() > 1000)
+ return true;
+ if (vAddr.size() > 1000)
+ {
+ LOCK(cs_main);
+ Misbehaving(pfrom->GetId(), 20);
+ return error("message addr size() = %u", vAddr.size());
+ }
+
+ // Store the new addresses
+ vector<CAddress> vAddrOk;
+ int64_t nNow = GetAdjustedTime();
+ int64_t nSince = nNow - 10 * 60;
+ BOOST_FOREACH(CAddress& addr, vAddr)
+ {
+ boost::this_thread::interruption_point();
+
+ if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES)
+ continue;
+
+ if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
+ addr.nTime = nNow - 5 * 24 * 60 * 60;
+ pfrom->AddAddressKnown(addr);
+ bool fReachable = IsReachable(addr);
+ if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
+ {
+ // Relay to a limited number of other nodes
+ RelayAddress(addr, fReachable, connman);
+ }
+ // Do not store addresses outside our network
+ if (fReachable)
+ vAddrOk.push_back(addr);
+ }
+ connman.AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60);
+ if (vAddr.size() < 1000)
+ pfrom->fGetAddr = false;
+ if (pfrom->fOneShot)
+ pfrom->fDisconnect = true;
+ }
+
+ else if (strCommand == NetMsgType::SENDHEADERS)
+ {
+ LOCK(cs_main);
+ State(pfrom->GetId())->fPreferHeaders = true;
+ }
+
+ else if (strCommand == NetMsgType::SENDCMPCT)
+ {
+ bool fAnnounceUsingCMPCTBLOCK = false;
+ uint64_t nCMPCTBLOCKVersion = 0;
+ vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
+ if (nCMPCTBLOCKVersion == 1 || ((pfrom->GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) {
+ LOCK(cs_main);
+ // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
+ if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) {
+ State(pfrom->GetId())->fProvidesHeaderAndIDs = true;
+ State(pfrom->GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2;
+ }
+ if (State(pfrom->GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces
+ State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
+ if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) {
+ if (pfrom->GetLocalServices() & NODE_WITNESS)
+ State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
+ else
+ State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1);
+ }
+ }
+ }
+
+
+ else if (strCommand == NetMsgType::INV)
+ {
+ vector<CInv> vInv;
+ vRecv >> vInv;
+ if (vInv.size() > MAX_INV_SZ)
+ {
+ LOCK(cs_main);
+ Misbehaving(pfrom->GetId(), 20);
+ return error("message inv size() = %u", vInv.size());
+ }
+
+ bool fBlocksOnly = !fRelayTxes;
+
+ // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
+ if (pfrom->fWhitelisted && GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY))
+ fBlocksOnly = false;
+
+ LOCK(cs_main);
+
+ uint32_t nFetchFlags = GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus());
+
+ std::vector<CInv> vToFetch;
+
+ for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
+ {
+ CInv &inv = vInv[nInv];
+
+ boost::this_thread::interruption_point();
+
+ bool fAlreadyHave = AlreadyHave(inv);
+ LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
+
+ if (inv.type == MSG_TX) {
+ inv.type |= nFetchFlags;
+ }
+
+ if (inv.type == MSG_BLOCK) {
+ UpdateBlockAvailability(pfrom->GetId(), inv.hash);
+ if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
+ // We used to request the full block here, but since headers-announcements are now the
+ // primary method of announcement on the network, and since, in the case that a node
+ // fell back to inv we probably have a reorg which we should get the headers for first,
+ // we now only provide a getheaders response here. When we receive the headers, we will
+ // then ask for the blocks we need.
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash));
+ LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
+ }
+ }
+ else
+ {
+ pfrom->AddInventoryKnown(inv);
+ if (fBlocksOnly)
+ LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id);
+ else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload())
+ pfrom->AskFor(inv);
+ }
+
+ // Track requests for our stuff
+ GetMainSignals().Inventory(inv.hash);
+
+ if (pfrom->nSendSize > (nMaxSendBufferSize * 2)) {
+ Misbehaving(pfrom->GetId(), 50);
+ return error("send buffer size() = %u", pfrom->nSendSize);
+ }
+ }
+
+ if (!vToFetch.empty())
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vToFetch));
+ }
+
+
+ else if (strCommand == NetMsgType::GETDATA)
+ {
+ vector<CInv> vInv;
+ vRecv >> vInv;
+ if (vInv.size() > MAX_INV_SZ)
+ {
+ LOCK(cs_main);
+ Misbehaving(pfrom->GetId(), 20);
+ return error("message getdata size() = %u", vInv.size());
+ }
+
+ if (fDebug || (vInv.size() != 1))
+ LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id);
+
+ if ((fDebug && vInv.size() > 0) || (vInv.size() == 1))
+ LogPrint("net", "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id);
+
+ pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
+ ProcessGetData(pfrom, chainparams.GetConsensus(), connman);
+ }
+
+
+ else if (strCommand == NetMsgType::GETBLOCKS)
+ {
+ CBlockLocator locator;
+ uint256 hashStop;
+ vRecv >> locator >> hashStop;
+
+ LOCK(cs_main);
+
+ // Find the last block the caller has in the main chain
+ CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator);
+
+ // Send the rest of the chain
+ if (pindex)
+ pindex = chainActive.Next(pindex);
+ int nLimit = 500;
+ LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id);
+ for (; pindex; pindex = chainActive.Next(pindex))
+ {
+ if (pindex->GetBlockHash() == hashStop)
+ {
+ LogPrint("net", " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ break;
+ }
+ // If pruning, don't inv blocks unless we have on disk and are likely to still have
+ // for some reasonable time window (1 hour) that block relay might require.
+ const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing;
+ if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave))
+ {
+ LogPrint("net", " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ break;
+ }
+ pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
+ if (--nLimit <= 0)
+ {
+ // When this block is requested, we'll send an inv that'll
+ // trigger the peer to getblocks the next batch of inventory.
+ LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ pfrom->hashContinue = pindex->GetBlockHash();
+ break;
+ }
+ }
+ }
+
+
+ else if (strCommand == NetMsgType::GETBLOCKTXN)
+ {
+ BlockTransactionsRequest req;
+ vRecv >> req;
+
+ LOCK(cs_main);
+
+ BlockMap::iterator it = mapBlockIndex.find(req.blockhash);
+ if (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)) {
+ LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom->id);
+ return true;
+ }
+
+ if (it->second->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) {
+ // If an older block is requested (should never happen in practice,
+ // but can happen in tests) send a block response instead of a
+ // blocktxn response. Sending a full block response instead of a
+ // small blocktxn response is preferable in the case where a peer
+ // might maliciously send lots of getblocktxn requests to trigger
+ // expensive disk reads, because it will require the peer to
+ // actually receive all the data read from disk over the network.
+ LogPrint("net", "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->id, MAX_BLOCKTXN_DEPTH);
+ CInv inv;
+ inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
+ inv.hash = req.blockhash;
+ pfrom->vRecvGetData.push_back(inv);
+ ProcessGetData(pfrom, chainparams.GetConsensus(), connman);
+ return true;
+ }
+
+ CBlock block;
+ assert(ReadBlockFromDisk(block, it->second, chainparams.GetConsensus()));
+
+ BlockTransactions resp(req);
+ for (size_t i = 0; i < req.indexes.size(); i++) {
+ if (req.indexes[i] >= block.vtx.size()) {
+ Misbehaving(pfrom->GetId(), 100);
+ LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->id);
+ return true;
+ }
+ resp.txn[i] = block.vtx[req.indexes[i]];
+ }
+ int nSendFlags = State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
+ connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
+ }
+
+
+ else if (strCommand == NetMsgType::GETHEADERS)
+ {
+ CBlockLocator locator;
+ uint256 hashStop;
+ vRecv >> locator >> hashStop;
+
+ LOCK(cs_main);
+ if (IsInitialBlockDownload() && !pfrom->fWhitelisted) {
+ LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->id);
+ return true;
+ }
+
+ CNodeState *nodestate = State(pfrom->GetId());
+ CBlockIndex* pindex = NULL;
+ if (locator.IsNull())
+ {
+ // If locator is null, return the hashStop block
+ BlockMap::iterator mi = mapBlockIndex.find(hashStop);
+ if (mi == mapBlockIndex.end())
+ return true;
+ pindex = (*mi).second;
+ }
+ else
+ {
+ // Find the last block the caller has in the main chain
+ pindex = FindForkInGlobalIndex(chainActive, locator);
+ if (pindex)
+ pindex = chainActive.Next(pindex);
+ }
+
+ // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
+ vector<CBlock> vHeaders;
+ int nLimit = MAX_HEADERS_RESULTS;
+ LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->id);
+ for (; pindex; pindex = chainActive.Next(pindex))
+ {
+ vHeaders.push_back(pindex->GetBlockHeader());
+ if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
+ break;
+ }
+ // pindex can be NULL either if we sent chainActive.Tip() OR
+ // if our peer has chainActive.Tip() (and thus we are sending an empty
+ // headers message). In both cases it's safe to update
+ // pindexBestHeaderSent to be our tip.
+ nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip();
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
+ }
+
+
+ else if (strCommand == NetMsgType::TX)
+ {
+ // Stop processing the transaction early if
+ // We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
+ if (!fRelayTxes && (!pfrom->fWhitelisted || !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)))
+ {
+ LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom->id);
+ return true;
+ }
+
+ deque<COutPoint> vWorkQueue;
+ vector<uint256> vEraseQueue;
+ CTransaction tx(deserialize, vRecv);
+
+ CInv inv(MSG_TX, tx.GetHash());
+ pfrom->AddInventoryKnown(inv);
+
+ LOCK(cs_main);
+
+ bool fMissingInputs = false;
+ CValidationState state;
+
+ pfrom->setAskFor.erase(inv.hash);
+ mapAlreadyAskedFor.erase(inv.hash);
+
+ if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs)) {
+ mempool.check(pcoinsTip);
+ RelayTransaction(tx, connman);
+ for (unsigned int i = 0; i < tx.vout.size(); i++) {
+ vWorkQueue.emplace_back(inv.hash, i);
+ }
+
+ pfrom->nLastTXTime = GetTime();
+
+ LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
+ pfrom->id,
+ tx.GetHash().ToString(),
+ mempool.size(), mempool.DynamicMemoryUsage() / 1000);
+
+ // Recursively process any orphan transactions that depended on this one
+ set<NodeId> setMisbehaving;
+ while (!vWorkQueue.empty()) {
+ auto itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue.front());
+ vWorkQueue.pop_front();
+ if (itByPrev == mapOrphanTransactionsByPrev.end())
+ continue;
+ for (auto mi = itByPrev->second.begin();
+ mi != itByPrev->second.end();
+ ++mi)
+ {
+ const CTransaction& orphanTx = (*mi)->second.tx;
+ const uint256& orphanHash = orphanTx.GetHash();
+ NodeId fromPeer = (*mi)->second.fromPeer;
+ bool fMissingInputs2 = false;
+ // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
+ // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
+ // anyone relaying LegitTxX banned)
+ CValidationState stateDummy;
+
+
+ if (setMisbehaving.count(fromPeer))
+ continue;
+ if (AcceptToMemoryPool(mempool, stateDummy, orphanTx, true, &fMissingInputs2)) {
+ LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString());
+ RelayTransaction(orphanTx, connman);
+ for (unsigned int i = 0; i < orphanTx.vout.size(); i++) {
+ vWorkQueue.emplace_back(orphanHash, i);
+ }
+ vEraseQueue.push_back(orphanHash);
+ }
+ else if (!fMissingInputs2)
+ {
+ int nDos = 0;
+ if (stateDummy.IsInvalid(nDos) && nDos > 0)
+ {
+ // Punish peer that gave us an invalid orphan tx
+ Misbehaving(fromPeer, nDos);
+ setMisbehaving.insert(fromPeer);
+ LogPrint("mempool", " invalid orphan tx %s\n", orphanHash.ToString());
+ }
+ // Has inputs but not accepted to mempool
+ // Probably non-standard or insufficient fee/priority
+ LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString());
+ vEraseQueue.push_back(orphanHash);
+ if (orphanTx.wit.IsNull() && !stateDummy.CorruptionPossible()) {
+ // Do not use rejection cache for witness transactions or
+ // witness-stripped transactions, as they can have been malleated.
+ // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
+ assert(recentRejects);
+ recentRejects->insert(orphanHash);
+ }
+ }
+ mempool.check(pcoinsTip);
+ }
+ }
+
+ BOOST_FOREACH(uint256 hash, vEraseQueue)
+ EraseOrphanTx(hash);
+ }
+ else if (fMissingInputs)
+ {
+ bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
+ BOOST_FOREACH(const CTxIn& txin, tx.vin) {
+ if (recentRejects->contains(txin.prevout.hash)) {
+ fRejectedParents = true;
+ break;
+ }
+ }
+ if (!fRejectedParents) {
+ uint32_t nFetchFlags = GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus());
+ BOOST_FOREACH(const CTxIn& txin, tx.vin) {
+ CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash);
+ pfrom->AddInventoryKnown(_inv);
+ if (!AlreadyHave(_inv)) pfrom->AskFor(_inv);
+ }
+ AddOrphanTx(tx, pfrom->GetId());
+
+ // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
+ unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
+ unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
+ if (nEvicted > 0)
+ LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted);
+ } else {
+ LogPrint("mempool", "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
+ }
+ } else {
+ if (tx.wit.IsNull() && !state.CorruptionPossible()) {
+ // Do not use rejection cache for witness transactions or
+ // witness-stripped transactions, as they can have been malleated.
+ // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
+ assert(recentRejects);
+ recentRejects->insert(tx.GetHash());
+ }
+
+ if (pfrom->fWhitelisted && GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
+ // Always relay transactions received from whitelisted peers, even
+ // if they were already in the mempool or rejected from it due
+ // to policy, allowing the node to function as a gateway for
+ // nodes hidden behind it.
+ //
+ // Never relay transactions that we would assign a non-zero DoS
+ // score for, as we expect peers to do the same with us in that
+ // case.
+ int nDoS = 0;
+ if (!state.IsInvalid(nDoS) || nDoS == 0) {
+ LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->id);
+ RelayTransaction(tx, connman);
+ } else {
+ LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->id, FormatStateMessage(state));
+ }
+ }
+ }
+ int nDoS = 0;
+ if (state.IsInvalid(nDoS))
+ {
+ LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
+ pfrom->id,
+ FormatStateMessage(state));
+ if (state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
+ state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash));
+ if (nDoS > 0) {
+ Misbehaving(pfrom->GetId(), nDoS);
+ }
+ }
+ }
+
+
+ else if (strCommand == NetMsgType::CMPCTBLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
+ {
+ CBlockHeaderAndShortTxIDs cmpctblock;
+ vRecv >> cmpctblock;
+
+ {
+ LOCK(cs_main);
+
+ if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) {
+ // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
+ if (!IsInitialBlockDownload())
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
+ return true;
+ }
+ }
+
+ CBlockIndex *pindex = NULL;
+ CValidationState state;
+ if (!ProcessNewBlockHeaders({cmpctblock.header}, state, chainparams, &pindex)) {
+ int nDoS;
+ if (state.IsInvalid(nDoS)) {
+ if (nDoS > 0) {
+ LOCK(cs_main);
+ Misbehaving(pfrom->GetId(), nDoS);
+ }
+ LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom->id);
+ return true;
+ }
+ }
+
+ LOCK(cs_main);
+ // If AcceptBlockHeader returned true, it set pindex
+ assert(pindex);
+ UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
+
+ std::map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
+ bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
+
+ if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
+ return true;
+
+ if (pindex->nChainWork <= chainActive.Tip()->nChainWork || // We know something better
+ pindex->nTx != 0) { // We had this block at some point, but pruned it
+ if (fAlreadyInFlight) {
+ // We requested this block for some reason, but our mempool will probably be useless
+ // so we just grab the block via normal getdata
+ std::vector<CInv> vInv(1);
+ vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
+ }
+ return true;
+ }
+
+ // If we're not close to tip yet, give up and let parallel block fetch work its magic
+ if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus()))
+ return true;
+
+ CNodeState *nodestate = State(pfrom->GetId());
+
+ if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) {
+ // Don't bother trying to process compact blocks from v1 peers
+ // after segwit activates.
+ return true;
+ }
+
+ // We want to be a bit conservative just to be extra careful about DoS
+ // possibilities in compact block processing...
+ if (pindex->nHeight <= chainActive.Height() + 2) {
+ if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
+ (fAlreadyInFlight && blockInFlightIt->second.first == pfrom->GetId())) {
+ list<QueuedBlock>::iterator *queuedBlockIt = NULL;
+ if (!MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex, &queuedBlockIt)) {
+ if (!(*queuedBlockIt)->partialBlock)
+ (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool));
+ else {
+ // The block was already in flight using compact blocks from the same peer
+ LogPrint("net", "Peer sent us compact block we were already syncing!\n");
+ return true;
+ }
+ }
+
+ PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
+ ReadStatus status = partialBlock.InitData(cmpctblock);
+ if (status == READ_STATUS_INVALID) {
+ MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case of whitelist
+ Misbehaving(pfrom->GetId(), 100);
+ LogPrintf("Peer %d sent us invalid compact block\n", pfrom->id);
+ return true;
+ } else if (status == READ_STATUS_FAILED) {
+ // Duplicate txindexes, the block is now in-flight, so just request it
+ std::vector<CInv> vInv(1);
+ vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
+ return true;
+ }
+
+ if (!fAlreadyInFlight && mapBlocksInFlight.size() == 1 && pindex->pprev->IsValid(BLOCK_VALID_CHAIN)) {
+ // We seem to be rather well-synced, so it appears pfrom was the first to provide us
+ // with this block! Let's get them to announce using compact blocks in the future.
+ MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate, pfrom, connman);
+ }
+
+ BlockTransactionsRequest req;
+ for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
+ if (!partialBlock.IsTxAvailable(i))
+ req.indexes.push_back(i);
+ }
+ if (req.indexes.empty()) {
+ // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
+ BlockTransactions txn;
+ txn.blockhash = cmpctblock.header.GetHash();
+ CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
+ blockTxnMsg << txn;
+ return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman);
+ } else {
+ req.blockhash = pindex->GetBlockHash();
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
+ }
+ }
+ } else {
+ if (fAlreadyInFlight) {
+ // We requested this block, but its far into the future, so our
+ // mempool will probably be useless - request the block normally
+ std::vector<CInv> vInv(1);
+ vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
+ return true;
+ } else {
+ // If this was an announce-cmpctblock, we want the same treatment as a header message
+ // Dirty hack to process as if it were just a headers message (TODO: move message handling into their own functions)
+ std::vector<CBlock> headers;
+ headers.push_back(cmpctblock.header);
+ CDataStream vHeadersMsg(SER_NETWORK, PROTOCOL_VERSION);
+ vHeadersMsg << headers;
+ return ProcessMessage(pfrom, NetMsgType::HEADERS, vHeadersMsg, nTimeReceived, chainparams, connman);
+ }
+ }
+ }
+
+ else if (strCommand == NetMsgType::BLOCKTXN && !fImporting && !fReindex) // Ignore blocks received while importing
+ {
+ BlockTransactions resp;
+ vRecv >> resp;
+
+ std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
+ bool fBlockRead = false;
+ {
+ LOCK(cs_main);
+
+ map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
+ if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
+ it->second.first != pfrom->GetId()) {
+ LogPrint("net", "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->id);
+ return true;
+ }
+
+ PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock;
+ ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
+ if (status == READ_STATUS_INVALID) {
+ MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case of whitelist
+ Misbehaving(pfrom->GetId(), 100);
+ LogPrintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom->id);
+ return true;
+ } else if (status == READ_STATUS_FAILED) {
+ // Might have collided, fall back to getdata now :(
+ std::vector<CInv> invs;
+ invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()), resp.blockhash));
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
+ } else {
+ // Block is either okay, or possibly we received
+ // READ_STATUS_CHECKBLOCK_FAILED.
+ // Note that CheckBlock can only fail for one of a few reasons:
+ // 1. bad-proof-of-work (impossible here, because we've already
+ // accepted the header)
+ // 2. merkleroot doesn't match the transactions given (already
+ // caught in FillBlock with READ_STATUS_FAILED, so
+ // impossible here)
+ // 3. the block is otherwise invalid (eg invalid coinbase,
+ // block is too big, too many legacy sigops, etc).
+ // So if CheckBlock failed, #3 is the only possibility.
+ // Under BIP 152, we don't DoS-ban unless proof of work is
+ // invalid (we don't require all the stateless checks to have
+ // been run). This is handled below, so just treat this as
+ // though the block was successfully read, and rely on the
+ // handling in ProcessNewBlock to ensure the block index is
+ // updated, reject messages go out, etc.
+ MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer
+ fBlockRead = true;
+ // mapBlockSource is only used for sending reject messages and DoS scores,
+ // so the race between here and cs_main in ProcessNewBlock is fine.
+ // BIP 152 permits peers to relay compact blocks after validating
+ // the header only; we should not punish peers if the block turns
+ // out to be invalid.
+ mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom->GetId(), false));
+ }
+ } // Don't hold cs_main when we call into ProcessNewBlock
+ if (fBlockRead) {
+ bool fNewBlock = false;
+ // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
+ // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
+ ProcessNewBlock(chainparams, pblock, true, NULL, &fNewBlock);
+ if (fNewBlock)
+ pfrom->nLastBlockTime = GetTime();
+ }
+ }
+
+
+ else if (strCommand == NetMsgType::HEADERS && !fImporting && !fReindex) // Ignore headers received while importing
+ {
+ std::vector<CBlockHeader> headers;
+
+ // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
+ unsigned int nCount = ReadCompactSize(vRecv);
+ if (nCount > MAX_HEADERS_RESULTS) {
+ LOCK(cs_main);
+ Misbehaving(pfrom->GetId(), 20);
+ return error("headers message size = %u", nCount);
+ }
+ headers.resize(nCount);
+ for (unsigned int n = 0; n < nCount; n++) {
+ vRecv >> headers[n];
+ ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
+ }
+
+ if (nCount == 0) {
+ // Nothing interesting. Stop asking this peers for more headers.
+ return true;
+ }
+
+ CBlockIndex *pindexLast = NULL;
+ {
+ LOCK(cs_main);
+ CNodeState *nodestate = State(pfrom->GetId());
+
+ // If this looks like it could be a block announcement (nCount <
+ // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
+ // don't connect:
+ // - Send a getheaders message in response to try to connect the chain.
+ // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
+ // don't connect before giving DoS points
+ // - Once a headers message is received that is valid and does connect,
+ // nUnconnectingHeaders gets reset back to 0.
+ if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
+ nodestate->nUnconnectingHeaders++;
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
+ LogPrint("net", "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
+ headers[0].GetHash().ToString(),
+ headers[0].hashPrevBlock.ToString(),
+ pindexBestHeader->nHeight,
+ pfrom->id, nodestate->nUnconnectingHeaders);
+ // Set hashLastUnknownBlock for this peer, so that if we
+ // eventually get the headers - even from a different peer -
+ // we can use this peer to download.
+ UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash());
+
+ if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
+ Misbehaving(pfrom->GetId(), 20);
+ }
+ return true;
+ }
+
+ uint256 hashLastBlock;
+ for (const CBlockHeader& header : headers) {
+ if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
+ Misbehaving(pfrom->GetId(), 20);
+ return error("non-continuous headers sequence");
+ }
+ hashLastBlock = header.GetHash();
+ }
+ }
+
+ CValidationState state;
+ if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast)) {
+ int nDoS;
+ if (state.IsInvalid(nDoS)) {
+ if (nDoS > 0) {
+ LOCK(cs_main);
+ Misbehaving(pfrom->GetId(), nDoS);
+ }
+ return error("invalid header received");
+ }
+ }
+
+ {
+ LOCK(cs_main);
+ CNodeState *nodestate = State(pfrom->GetId());
+ if (nodestate->nUnconnectingHeaders > 0) {
+ LogPrint("net", "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->id, nodestate->nUnconnectingHeaders);
+ }
+ nodestate->nUnconnectingHeaders = 0;
+
+ assert(pindexLast);
+ UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
+
+ if (nCount == MAX_HEADERS_RESULTS) {
+ // Headers message had its maximum size; the peer may have more headers.
+ // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
+ // from there instead.
+ LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256()));
+ }
+
+ bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
+ // If this set of headers is valid and ends in a block with at least as
+ // much work as our tip, download as much as possible.
+ if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) {
+ vector<CBlockIndex *> vToFetch;
+ CBlockIndex *pindexWalk = pindexLast;
+ // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
+ while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
+ !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) &&
+ (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) {
+ // We don't have this block, and it's not yet in flight.
+ vToFetch.push_back(pindexWalk);
+ }
+ pindexWalk = pindexWalk->pprev;
+ }
+ // If pindexWalk still isn't on our main chain, we're looking at a
+ // very large reorg at a time we think we're close to caught up to
+ // the main chain -- this shouldn't really happen. Bail out on the
+ // direct fetch and rely on parallel download instead.
+ if (!chainActive.Contains(pindexWalk)) {
+ LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n",
+ pindexLast->GetBlockHash().ToString(),
+ pindexLast->nHeight);
+ } else {
+ vector<CInv> vGetData;
+ // Download as much as possible, from earliest to latest.
+ BOOST_REVERSE_FOREACH(CBlockIndex *pindex, vToFetch) {
+ if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ // Can't download any more from this peer
+ break;
+ }
+ uint32_t nFetchFlags = GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus());
+ vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
+ MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex);
+ LogPrint("net", "Requesting block %s from peer=%d\n",
+ pindex->GetBlockHash().ToString(), pfrom->id);
+ }
+ if (vGetData.size() > 1) {
+ LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n",
+ pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
+ }
+ if (vGetData.size() > 0) {
+ if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
+ // We seem to be rather well-synced, so it appears pfrom was the first to provide us
+ // with this block! Let's get them to announce using compact blocks in the future.
+ MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate, pfrom, connman);
+ // In any case, we want to download using a compact block, not a regular one
+ vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
+ }
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
+ }
+ }
+ }
+ }
+ }
+
+ else if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
+ {
+ std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
+ vRecv >> *pblock;
+
+ LogPrint("net", "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->id);
+
+ // Process all blocks from whitelisted peers, even if not requested,
+ // unless we're still syncing with the network.
+ // Such an unrequested block may still be processed, subject to the
+ // conditions in AcceptBlock().
+ bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload();
+ const uint256 hash(pblock->GetHash());
+ {
+ LOCK(cs_main);
+ // Also always process if we requested the block explicitly, as we may
+ // need it even though it is not a candidate for a new best tip.
+ forceProcessing |= MarkBlockAsReceived(hash);
+ // mapBlockSource is only used for sending reject messages and DoS scores,
+ // so the race between here and cs_main in ProcessNewBlock is fine.
+ mapBlockSource.emplace(hash, std::make_pair(pfrom->GetId(), true));
+ }
+ bool fNewBlock = false;
+ ProcessNewBlock(chainparams, pblock, forceProcessing, NULL, &fNewBlock);
+ if (fNewBlock)
+ pfrom->nLastBlockTime = GetTime();
+ }
+
+
+ else if (strCommand == NetMsgType::GETADDR)
+ {
+ // This asymmetric behavior for inbound and outbound connections was introduced
+ // to prevent a fingerprinting attack: an attacker can send specific fake addresses
+ // to users' AddrMan and later request them by sending getaddr messages.
+ // Making nodes which are behind NAT and can only make outgoing connections ignore
+ // the getaddr message mitigates the attack.
+ if (!pfrom->fInbound) {
+ LogPrint("net", "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->id);
+ return true;
+ }
+
+ // Only send one GetAddr response per connection to reduce resource waste
+ // and discourage addr stamping of INV announcements.
+ if (pfrom->fSentAddr) {
+ LogPrint("net", "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->id);
+ return true;
+ }
+ pfrom->fSentAddr = true;
+
+ pfrom->vAddrToSend.clear();
+ vector<CAddress> vAddr = connman.GetAddresses();
+ FastRandomContext insecure_rand;
+ BOOST_FOREACH(const CAddress &addr, vAddr)
+ pfrom->PushAddress(addr, insecure_rand);
+ }
+
+
+ else if (strCommand == NetMsgType::MEMPOOL)
+ {
+ if (!(pfrom->GetLocalServices() & NODE_BLOOM) && !pfrom->fWhitelisted)
+ {
+ LogPrint("net", "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom->GetId());
+ pfrom->fDisconnect = true;
+ return true;
+ }
+
+ if (connman.OutboundTargetReached(false) && !pfrom->fWhitelisted)
+ {
+ LogPrint("net", "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId());
+ pfrom->fDisconnect = true;
+ return true;
+ }
+
+ LOCK(pfrom->cs_inventory);
+ pfrom->fSendMempool = true;
+ }
+
+
+ else if (strCommand == NetMsgType::PING)
+ {
+ if (pfrom->nVersion > BIP0031_VERSION)
+ {
+ uint64_t nonce = 0;
+ vRecv >> nonce;
+ // Echo the message back with the nonce. This allows for two useful features:
+ //
+ // 1) A remote node can quickly check if the connection is operational
+ // 2) Remote nodes can measure the latency of the network thread. If this node
+ // is overloaded it won't respond to pings quickly and the remote node can
+ // avoid sending us more work, like chain download requests.
+ //
+ // The nonce stops the remote getting confused between different pings: without
+ // it, if the remote node sends a ping once per second and this node takes 5
+ // seconds to respond to each, the 5th ping the remote sends would appear to
+ // return very quickly.
+ connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
+ }
+ }
+
+
+ else if (strCommand == NetMsgType::PONG)
+ {
+ int64_t pingUsecEnd = nTimeReceived;
+ uint64_t nonce = 0;
+ size_t nAvail = vRecv.in_avail();
+ bool bPingFinished = false;
+ std::string sProblem;
+
+ if (nAvail >= sizeof(nonce)) {
+ vRecv >> nonce;
+
+ // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
+ if (pfrom->nPingNonceSent != 0) {
+ if (nonce == pfrom->nPingNonceSent) {
+ // Matching pong received, this ping is no longer outstanding
+ bPingFinished = true;
+ int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart;
+ if (pingUsecTime > 0) {
+ // Successful ping time measurement, replace previous
+ pfrom->nPingUsecTime = pingUsecTime;
+ pfrom->nMinPingUsecTime = std::min(pfrom->nMinPingUsecTime, pingUsecTime);
+ } else {
+ // This should never happen
+ sProblem = "Timing mishap";
+ }
+ } else {
+ // Nonce mismatches are normal when pings are overlapping
+ sProblem = "Nonce mismatch";
+ if (nonce == 0) {
+ // This is most likely a bug in another implementation somewhere; cancel this ping
+ bPingFinished = true;
+ sProblem = "Nonce zero";
+ }
+ }
+ } else {
+ sProblem = "Unsolicited pong without ping";
+ }
+ } else {
+ // This is most likely a bug in another implementation somewhere; cancel this ping
+ bPingFinished = true;
+ sProblem = "Short payload";
+ }
+
+ if (!(sProblem.empty())) {
+ LogPrint("net", "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
+ pfrom->id,
+ sProblem,
+ pfrom->nPingNonceSent,
+ nonce,
+ nAvail);
+ }
+ if (bPingFinished) {
+ pfrom->nPingNonceSent = 0;
+ }
+ }
+
+
+ else if (strCommand == NetMsgType::FILTERLOAD)
+ {
+ CBloomFilter filter;
+ vRecv >> filter;
+
+ if (!filter.IsWithinSizeConstraints())
+ {
+ // There is no excuse for sending a too-large filter
+ LOCK(cs_main);
+ Misbehaving(pfrom->GetId(), 100);
+ }
+ else
+ {
+ LOCK(pfrom->cs_filter);
+ delete pfrom->pfilter;
+ pfrom->pfilter = new CBloomFilter(filter);
+ pfrom->pfilter->UpdateEmptyFull();
+ pfrom->fRelayTxes = true;
+ }
+ }
+
+
+ else if (strCommand == NetMsgType::FILTERADD)
+ {
+ vector<unsigned char> vData;
+ vRecv >> vData;
+
+ // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
+ // and thus, the maximum size any matched object can have) in a filteradd message
+ bool bad = false;
+ if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
+ bad = true;
+ } else {
+ LOCK(pfrom->cs_filter);
+ if (pfrom->pfilter) {
+ pfrom->pfilter->insert(vData);
+ } else {
+ bad = true;
+ }
+ }
+ if (bad) {
+ LOCK(cs_main);
+ Misbehaving(pfrom->GetId(), 100);
+ }
+ }
+
+
+ else if (strCommand == NetMsgType::FILTERCLEAR)
+ {
+ LOCK(pfrom->cs_filter);
+ if (pfrom->GetLocalServices() & NODE_BLOOM) {
+ delete pfrom->pfilter;
+ pfrom->pfilter = new CBloomFilter();
+ }
+ pfrom->fRelayTxes = true;
+ }
+
+
+ else if (strCommand == NetMsgType::REJECT)
+ {
+ if (fDebug) {
+ try {
+ string strMsg; unsigned char ccode; string strReason;
+ vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
+
+ ostringstream ss;
+ ss << strMsg << " code " << itostr(ccode) << ": " << strReason;
+
+ if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX)
+ {
+ uint256 hash;
+ vRecv >> hash;
+ ss << ": hash " << hash.ToString();
+ }
+ LogPrint("net", "Reject %s\n", SanitizeString(ss.str()));
+ } catch (const std::ios_base::failure&) {
+ // Avoid feedback loops by preventing reject messages from triggering a new reject message.
+ LogPrint("net", "Unparseable reject message received\n");
+ }
+ }
+ }
+
+ else if (strCommand == NetMsgType::FEEFILTER) {
+ CAmount newFeeFilter = 0;
+ vRecv >> newFeeFilter;
+ if (MoneyRange(newFeeFilter)) {
+ {
+ LOCK(pfrom->cs_feeFilter);
+ pfrom->minFeeFilter = newFeeFilter;
+ }
+ LogPrint("net", "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->id);
+ }
+ }
+
+ else if (strCommand == NetMsgType::NOTFOUND) {
+ // We do not care about the NOTFOUND message, but logging an Unknown Command
+ // message would be undesirable as we transmit it ourselves.
+ }
+
+ else {
+ // Ignore unknown commands for extensibility
+ LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id);
+ }
+
+
+
+ return true;
+}
+
+// requires LOCK(cs_vRecvMsg)
+bool ProcessMessages(CNode* pfrom, CConnman& connman)
+{
+ const CChainParams& chainparams = Params();
+ unsigned int nMaxSendBufferSize = connman.GetSendBufferSize();
+ //if (fDebug)
+ // LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
+
+ //
+ // Message format
+ // (4) message start
+ // (12) command
+ // (4) size
+ // (4) checksum
+ // (x) data
+ //
+ bool fOk = true;
+
+ if (!pfrom->vRecvGetData.empty())
+ ProcessGetData(pfrom, chainparams.GetConsensus(), connman);
+
+ // this maintains the order of responses
+ if (!pfrom->vRecvGetData.empty()) return fOk;
+
+ std::deque<CNetMessage>::iterator it = pfrom->vRecvMsg.begin();
+ while (!pfrom->fDisconnect && it != pfrom->vRecvMsg.end()) {
+ // Don't bother if send buffer is too full to respond anyway
+ if (pfrom->nSendSize >= nMaxSendBufferSize)
+ break;
+
+ // get next message
+ CNetMessage& msg = *it;
+
+ //if (fDebug)
+ // LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__,
+ // msg.hdr.nMessageSize, msg.vRecv.size(),
+ // msg.complete() ? "Y" : "N");
+
+ // end, if an incomplete message is found
+ if (!msg.complete())
+ break;
+
+ // at this point, any failure means we can delete the current message
+ it++;
+
+ // Scan for message start
+ if (memcmp(msg.hdr.pchMessageStart, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) {
+ LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id);
+ fOk = false;
+ break;
+ }
+
+ // Read header
+ CMessageHeader& hdr = msg.hdr;
+ if (!hdr.IsValid(chainparams.MessageStart()))
+ {
+ LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id);
+ continue;
+ }
+ string strCommand = hdr.GetCommand();
+
+ // Message size
+ unsigned int nMessageSize = hdr.nMessageSize;
+
+ // Checksum
+ CDataStream& vRecv = msg.vRecv;
+ const uint256& hash = msg.GetMessageHash();
+ if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0)
+ {
+ LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__,
+ SanitizeString(strCommand), nMessageSize,
+ HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE),
+ HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE));
+ continue;
+ }
+
+ // Process message
+ bool fRet = false;
+ try
+ {
+ fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime, chainparams, connman);
+ boost::this_thread::interruption_point();
+ }
+ catch (const std::ios_base::failure& e)
+ {
+ connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_MALFORMED, string("error parsing message")));
+ if (strstr(e.what(), "end of data"))
+ {
+ // Allow exceptions from under-length message on vRecv
+ LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
+ }
+ else if (strstr(e.what(), "size too large"))
+ {
+ // Allow exceptions from over-long size
+ LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
+ }
+ else if (strstr(e.what(), "non-canonical ReadCompactSize()"))
+ {
+ // Allow exceptions from non-canonical encoding
+ LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
+ }
+ else
+ {
+ PrintExceptionContinue(&e, "ProcessMessages()");
+ }
+ }
+ catch (const boost::thread_interrupted&) {
+ throw;
+ }
+ catch (const std::exception& e) {
+ PrintExceptionContinue(&e, "ProcessMessages()");
+ } catch (...) {
+ PrintExceptionContinue(NULL, "ProcessMessages()");
+ }
+
+ if (!fRet)
+ LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->id);
+
+ break;
+ }
+
+ // In case the connection got shut down, its receive buffer was wiped
+ if (!pfrom->fDisconnect)
+ pfrom->vRecvMsg.erase(pfrom->vRecvMsg.begin(), it);
+
+ return fOk;
+}
+
+class CompareInvMempoolOrder
+{
+ CTxMemPool *mp;
+public:
+ CompareInvMempoolOrder(CTxMemPool *_mempool)
+ {
+ mp = _mempool;
+ }
+
+ bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
+ {
+ /* As std::make_heap produces a max-heap, we want the entries with the
+ * fewest ancestors/highest fee to sort later. */
+ return mp->CompareDepthAndScore(*b, *a);
+ }
+};
+
+bool SendMessages(CNode* pto, CConnman& connman)
+{
+ const Consensus::Params& consensusParams = Params().GetConsensus();
+ {
+ // Don't send anything until we get its version message
+ if (pto->nVersion == 0 || pto->fDisconnect)
+ return true;
+
+ // If we get here, the outgoing message serialization version is set and can't change.
+ CNetMsgMaker msgMaker(pto->GetSendVersion());
+
+ //
+ // Message: ping
+ //
+ bool pingSend = false;
+ if (pto->fPingQueued) {
+ // RPC ping request by user
+ pingSend = true;
+ }
+ if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
+ // Ping automatically sent as a latency probe & keepalive.
+ pingSend = true;
+ }
+ if (pingSend) {
+ uint64_t nonce = 0;
+ while (nonce == 0) {
+ GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
+ }
+ pto->fPingQueued = false;
+ pto->nPingUsecStart = GetTimeMicros();
+ if (pto->nVersion > BIP0031_VERSION) {
+ pto->nPingNonceSent = nonce;
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
+ } else {
+ // Peer is too old to support ping command with nonce, pong will never arrive.
+ pto->nPingNonceSent = 0;
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING));
+ }
+ }
+
+ TRY_LOCK(cs_main, lockMain); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
+ if (!lockMain)
+ return true;
+
+ CNodeState &state = *State(pto->GetId());
+
+ BOOST_FOREACH(const CBlockReject& reject, state.rejects)
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::REJECT, (string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock));
+ state.rejects.clear();
+
+ if (state.fShouldBan) {
+ state.fShouldBan = false;
+ if (pto->fWhitelisted)
+ LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto->addr.ToString());
+ else {
+ pto->fDisconnect = true;
+ if (pto->addr.IsLocal())
+ LogPrintf("Warning: not banning local peer %s!\n", pto->addr.ToString());
+ else
+ {
+ connman.Ban(pto->addr, BanReasonNodeMisbehaving);
+ }
+ return true;
+ }
+ }
+
+ // Address refresh broadcast
+ int64_t nNow = GetTimeMicros();
+ if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) {
+ AdvertiseLocal(pto);
+ pto->nNextLocalAddrSend = PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
+ }
+
+ //
+ // Message: addr
+ //
+ if (pto->nNextAddrSend < nNow) {
+ pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
+ vector<CAddress> vAddr;
+ vAddr.reserve(pto->vAddrToSend.size());
+ BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend)
+ {
+ if (!pto->addrKnown.contains(addr.GetKey()))
+ {
+ pto->addrKnown.insert(addr.GetKey());
+ vAddr.push_back(addr);
+ // receiver rejects addr messages larger than 1000
+ if (vAddr.size() >= 1000)
+ {
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
+ vAddr.clear();
+ }
+ }
+ }
+ pto->vAddrToSend.clear();
+ if (!vAddr.empty())
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
+ // we only send the big addr message once
+ if (pto->vAddrToSend.capacity() > 40)
+ pto->vAddrToSend.shrink_to_fit();
+ }
+
+ // Start block sync
+ if (pindexBestHeader == NULL)
+ pindexBestHeader = chainActive.Tip();
+ bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
+ if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
+ // Only actively request headers from a single peer, unless we're close to today.
+ if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
+ state.fSyncStarted = true;
+ nSyncStarted++;
+ const CBlockIndex *pindexStart = pindexBestHeader;
+ /* If possible, start at the block preceding the currently
+ best known header. This ensures that we always get a
+ non-empty list of headers back as long as the peer
+ is up-to-date. With a non-empty response, we can initialise
+ the peer's known best block. This wouldn't be possible
+ if we requested starting at pindexBestHeader and
+ got back an empty response. */
+ if (pindexStart->pprev)
+ pindexStart = pindexStart->pprev;
+ LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256()));
+ }
+ }
+
+ // Resend wallet transactions that haven't gotten in a block yet
+ // Except during reindex, importing and IBD, when old wallet
+ // transactions become unconfirmed and spams other nodes.
+ if (!fReindex && !fImporting && !IsInitialBlockDownload())
+ {
+ GetMainSignals().Broadcast(nTimeBestReceived, &connman);
+ }
+
+ //
+ // Try sending block announcements via headers
+ //
+ {
+ // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
+ // list of block hashes we're relaying, and our peer wants
+ // headers announcements, then find the first header
+ // not yet known to our peer but would connect, and send.
+ // If no header would connect, or if we have too many
+ // blocks, or if the peer doesn't want headers, just
+ // add all to the inv queue.
+ LOCK(pto->cs_inventory);
+ vector<CBlock> vHeaders;
+ bool fRevertToInv = ((!state.fPreferHeaders &&
+ (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
+ pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
+ CBlockIndex *pBestIndex = NULL; // last header queued for delivery
+ ProcessBlockAvailability(pto->id); // ensure pindexBestKnownBlock is up-to-date
+
+ if (!fRevertToInv) {
+ bool fFoundStartingHeader = false;
+ // Try to find first header that our peer doesn't have, and
+ // then send all headers past that one. If we come across any
+ // headers that aren't on chainActive, give up.
+ BOOST_FOREACH(const uint256 &hash, pto->vBlockHashesToAnnounce) {
+ BlockMap::iterator mi = mapBlockIndex.find(hash);
+ assert(mi != mapBlockIndex.end());
+ CBlockIndex *pindex = mi->second;
+ if (chainActive[pindex->nHeight] != pindex) {
+ // Bail out if we reorged away from this block
+ fRevertToInv = true;
+ break;
+ }
+ if (pBestIndex != NULL && pindex->pprev != pBestIndex) {
+ // This means that the list of blocks to announce don't
+ // connect to each other.
+ // This shouldn't really be possible to hit during
+ // regular operation (because reorgs should take us to
+ // a chain that has some block not on the prior chain,
+ // which should be caught by the prior check), but one
+ // way this could happen is by using invalidateblock /
+ // reconsiderblock repeatedly on the tip, causing it to
+ // be added multiple times to vBlockHashesToAnnounce.
+ // Robustly deal with this rare situation by reverting
+ // to an inv.
+ fRevertToInv = true;
+ break;
+ }
+ pBestIndex = pindex;
+ if (fFoundStartingHeader) {
+ // add this to the headers message
+ vHeaders.push_back(pindex->GetBlockHeader());
+ } else if (PeerHasHeader(&state, pindex)) {
+ continue; // keep looking for the first new block
+ } else if (pindex->pprev == NULL || PeerHasHeader(&state, pindex->pprev)) {
+ // Peer doesn't have this header but they do have the prior one.
+ // Start sending headers.
+ fFoundStartingHeader = true;
+ vHeaders.push_back(pindex->GetBlockHeader());
+ } else {
+ // Peer doesn't have this header or the prior one -- nothing will
+ // connect, so bail out.
+ fRevertToInv = true;
+ break;
+ }
+ }
+ }
+ if (!fRevertToInv && !vHeaders.empty()) {
+ if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
+ // We only send up to 1 block as header-and-ids, as otherwise
+ // probably means we're doing an initial-ish-sync or they're slow
+ LogPrint("net", "%s sending header-and-ids %s to peer %d\n", __func__,
+ vHeaders.front().GetHash().ToString(), pto->id);
+ //TODO: Shouldn't need to reload block from disk, but requires refactor
+ CBlock block;
+ assert(ReadBlockFromDisk(block, pBestIndex, consensusParams));
+ CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
+ int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
+ connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
+ state.pindexBestHeaderSent = pBestIndex;
+ } else if (state.fPreferHeaders) {
+ if (vHeaders.size() > 1) {
+ LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
+ vHeaders.size(),
+ vHeaders.front().GetHash().ToString(),
+ vHeaders.back().GetHash().ToString(), pto->id);
+ } else {
+ LogPrint("net", "%s: sending header %s to peer=%d\n", __func__,
+ vHeaders.front().GetHash().ToString(), pto->id);
+ }
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
+ state.pindexBestHeaderSent = pBestIndex;
+ } else
+ fRevertToInv = true;
+ }
+ if (fRevertToInv) {
+ // If falling back to using an inv, just try to inv the tip.
+ // The last entry in vBlockHashesToAnnounce was our tip at some point
+ // in the past.
+ if (!pto->vBlockHashesToAnnounce.empty()) {
+ const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
+ BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce);
+ assert(mi != mapBlockIndex.end());
+ CBlockIndex *pindex = mi->second;
+
+ // Warn if we're announcing a block that is not on the main chain.
+ // This should be very rare and could be optimized out.
+ // Just log for now.
+ if (chainActive[pindex->nHeight] != pindex) {
+ LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n",
+ hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString());
+ }
+
+ // If the peer's chain has this block, don't inv it back.
+ if (!PeerHasHeader(&state, pindex)) {
+ pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
+ LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__,
+ pto->id, hashToAnnounce.ToString());
+ }
+ }
+ }
+ pto->vBlockHashesToAnnounce.clear();
+ }
+
+ //
+ // Message: inventory
+ //
+ vector<CInv> vInv;
+ {
+ LOCK(pto->cs_inventory);
+ vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX));
+
+ // Add blocks
+ BOOST_FOREACH(const uint256& hash, pto->vInventoryBlockToSend) {
+ vInv.push_back(CInv(MSG_BLOCK, hash));
+ if (vInv.size() == MAX_INV_SZ) {
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+ vInv.clear();
+ }
+ }
+ pto->vInventoryBlockToSend.clear();
+
+ // Check whether periodic sends should happen
+ bool fSendTrickle = pto->fWhitelisted;
+ if (pto->nNextInvSend < nNow) {
+ fSendTrickle = true;
+ // Use half the delay for outbound peers, as there is less privacy concern for them.
+ pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
+ }
+
+ // Time to send but the peer has requested we not relay transactions.
+ if (fSendTrickle) {
+ LOCK(pto->cs_filter);
+ if (!pto->fRelayTxes) pto->setInventoryTxToSend.clear();
+ }
+
+ // Respond to BIP35 mempool requests
+ if (fSendTrickle && pto->fSendMempool) {
+ auto vtxinfo = mempool.infoAll();
+ pto->fSendMempool = false;
+ CAmount filterrate = 0;
+ {
+ LOCK(pto->cs_feeFilter);
+ filterrate = pto->minFeeFilter;
+ }
+
+ LOCK(pto->cs_filter);
+
+ for (const auto& txinfo : vtxinfo) {
+ const uint256& hash = txinfo.tx->GetHash();
+ CInv inv(MSG_TX, hash);
+ pto->setInventoryTxToSend.erase(hash);
+ if (filterrate) {
+ if (txinfo.feeRate.GetFeePerK() < filterrate)
+ continue;
+ }
+ if (pto->pfilter) {
+ if (!pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
+ }
+ pto->filterInventoryKnown.insert(hash);
+ vInv.push_back(inv);
+ if (vInv.size() == MAX_INV_SZ) {
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+ vInv.clear();
+ }
+ }
+ pto->timeLastMempoolReq = GetTime();
+ }
+
+ // Determine transactions to relay
+ if (fSendTrickle) {
+ // Produce a vector with all candidates for sending
+ vector<std::set<uint256>::iterator> vInvTx;
+ vInvTx.reserve(pto->setInventoryTxToSend.size());
+ for (std::set<uint256>::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) {
+ vInvTx.push_back(it);
+ }
+ CAmount filterrate = 0;
+ {
+ LOCK(pto->cs_feeFilter);
+ filterrate = pto->minFeeFilter;
+ }
+ // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
+ // A heap is used so that not all items need sorting if only a few are being sent.
+ CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
+ std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
+ // No reason to drain out at many times the network's capacity,
+ // especially since we have many peers and some will draw much shorter delays.
+ unsigned int nRelayedTransactions = 0;
+ LOCK(pto->cs_filter);
+ while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
+ // Fetch the top element from the heap
+ std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
+ std::set<uint256>::iterator it = vInvTx.back();
+ vInvTx.pop_back();
+ uint256 hash = *it;
+ // Remove it from the to-be-sent set
+ pto->setInventoryTxToSend.erase(it);
+ // Check if not in the filter already
+ if (pto->filterInventoryKnown.contains(hash)) {
+ continue;
+ }
+ // Not in the mempool anymore? don't bother sending it.
+ auto txinfo = mempool.info(hash);
+ if (!txinfo.tx) {
+ continue;
+ }
+ if (filterrate && txinfo.feeRate.GetFeePerK() < filterrate) {
+ continue;
+ }
+ if (pto->pfilter && !pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
+ // Send
+ vInv.push_back(CInv(MSG_TX, hash));
+ nRelayedTransactions++;
+ {
+ // Expire old relay messages
+ while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow)
+ {
+ mapRelay.erase(vRelayExpiration.front().second);
+ vRelayExpiration.pop_front();
+ }
+
+ auto ret = mapRelay.insert(std::make_pair(hash, std::move(txinfo.tx)));
+ if (ret.second) {
+ vRelayExpiration.push_back(std::make_pair(nNow + 15 * 60 * 1000000, ret.first));
+ }
+ }
+ if (vInv.size() == MAX_INV_SZ) {
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+ vInv.clear();
+ }
+ pto->filterInventoryKnown.insert(hash);
+ }
+ }
+ }
+ if (!vInv.empty())
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+
+ // Detect whether we're stalling
+ nNow = GetTimeMicros();
+ if (state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
+ // Stalling only triggers when the block download window cannot move. During normal steady state,
+ // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
+ // should only happen during initial block download.
+ LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id);
+ pto->fDisconnect = true;
+ return true;
+ }
+ // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
+ // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
+ // We compensate for other peers to prevent killing off peers due to our own downstream link
+ // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
+ // to unreasonably increase our timeout.
+ if (state.vBlocksInFlight.size() > 0) {
+ QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
+ int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
+ if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
+ LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->id);
+ pto->fDisconnect = true;
+ return true;
+ }
+ }
+
+ //
+ // Message: getdata (blocks)
+ //
+ vector<CInv> vGetData;
+ if (!pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ vector<CBlockIndex*> vToDownload;
+ NodeId staller = -1;
+ FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
+ BOOST_FOREACH(CBlockIndex *pindex, vToDownload) {
+ uint32_t nFetchFlags = GetFetchFlags(pto, pindex->pprev, consensusParams);
+ vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
+ MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
+ LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
+ pindex->nHeight, pto->id);
+ }
+ if (state.nBlocksInFlight == 0 && staller != -1) {
+ if (State(staller)->nStallingSince == 0) {
+ State(staller)->nStallingSince = nNow;
+ LogPrint("net", "Stall started peer=%d\n", staller);
+ }
+ }
+ }
+
+ //
+ // Message: getdata (non-blocks)
+ //
+ while (!pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow)
+ {
+ const CInv& inv = (*pto->mapAskFor.begin()).second;
+ if (!AlreadyHave(inv))
+ {
+ if (fDebug)
+ LogPrint("net", "Requesting %s peer=%d\n", inv.ToString(), pto->id);
+ vGetData.push_back(inv);
+ if (vGetData.size() >= 1000)
+ {
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
+ vGetData.clear();
+ }
+ } else {
+ //If we're not going to ask, don't expect a response.
+ pto->setAskFor.erase(inv.hash);
+ }
+ pto->mapAskFor.erase(pto->mapAskFor.begin());
+ }
+ if (!vGetData.empty())
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
+
+ //
+ // Message: feefilter
+ //
+ // We don't want white listed peers to filter txs to us if we have -whitelistforcerelay
+ if (pto->nVersion >= FEEFILTER_VERSION && GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
+ !(pto->fWhitelisted && GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY))) {
+ CAmount currentFilter = mempool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
+ int64_t timeNow = GetTimeMicros();
+ if (timeNow > pto->nextSendTimeFeeFilter) {
+ static FeeFilterRounder filterRounder(::minRelayTxFee);
+ CAmount filterToSend = filterRounder.round(currentFilter);
+ if (filterToSend != pto->lastSentFeeFilter) {
+ connman.PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
+ pto->lastSentFeeFilter = filterToSend;
+ }
+ pto->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
+ }
+ // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
+ // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
+ else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->nextSendTimeFeeFilter &&
+ (currentFilter < 3 * pto->lastSentFeeFilter / 4 || currentFilter > 4 * pto->lastSentFeeFilter / 3)) {
+ pto->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
+ }
+ }
+ }
+ return true;
+}
+
+class CNetProcessingCleanup
+{
+public:
+ CNetProcessingCleanup() {}
+ ~CNetProcessingCleanup() {
+ // orphan transactions
+ mapOrphanTransactions.clear();
+ mapOrphanTransactionsByPrev.clear();
+ }
+} instance_of_cnetprocessingcleanup;
diff --git a/src/net_processing.h b/src/net_processing.h
new file mode 100644
index 0000000000..130433cc7c
--- /dev/null
+++ b/src/net_processing.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2015 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_NET_PROCESSING_H
+#define BITCOIN_NET_PROCESSING_H
+
+#include "net.h"
+#include "validationinterface.h"
+
+/** Register with a network node to receive its signals */
+void RegisterNodeSignals(CNodeSignals& nodeSignals);
+/** Unregister a network node */
+void UnregisterNodeSignals(CNodeSignals& nodeSignals);
+
+class PeerLogicValidation : public CValidationInterface {
+private:
+ CConnman* connman;
+
+public:
+ PeerLogicValidation(CConnman* connmanIn);
+
+ virtual void SyncTransaction(const CTransaction& tx, const CBlockIndex* pindex, int nPosInBlock);
+ virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload);
+ virtual void BlockChecked(const CBlock& block, const CValidationState& state);
+};
+
+struct CNodeStateStats {
+ int nMisbehavior;
+ int nSyncHeight;
+ int nCommonHeight;
+ std::vector<int> vHeightInFlight;
+};
+
+/** Get statistics from node state */
+bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats);
+/** Increase a node's misbehavior score. */
+void Misbehaving(NodeId nodeid, int howmuch);
+
+/** Process protocol messages received from a given node */
+bool ProcessMessages(CNode* pfrom, CConnman& connman);
+/**
+ * Send queued protocol messages to be sent to a give node.
+ *
+ * @param[in] pto The node which we are sending messages to.
+ * @param[in] connman The connection manager for that node.
+ */
+bool SendMessages(CNode* pto, CConnman& connman);
+
+#endif // BITCOIN_NET_PROCESSING_H
diff --git a/src/netbase.cpp b/src/netbase.cpp
index 9fe34108f5..9118584b80 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -94,30 +94,9 @@ bool static LookupIntern(const char *pszName, std::vector<CNetAddr>& vIP, unsign
}
}
-#ifdef HAVE_GETADDRINFO_A
- struct in_addr ipv4_addr;
-#ifdef HAVE_INET_PTON
- if (inet_pton(AF_INET, pszName, &ipv4_addr) > 0) {
- vIP.push_back(CNetAddr(ipv4_addr));
- return true;
- }
-
- struct in6_addr ipv6_addr;
- if (inet_pton(AF_INET6, pszName, &ipv6_addr) > 0) {
- vIP.push_back(CNetAddr(ipv6_addr));
- return true;
- }
-#else
- ipv4_addr.s_addr = inet_addr(pszName);
- if (ipv4_addr.s_addr != INADDR_NONE) {
- vIP.push_back(CNetAddr(ipv4_addr));
- return true;
- }
-#endif
-#endif
-
struct addrinfo aiHint;
memset(&aiHint, 0, sizeof(struct addrinfo));
+
aiHint.ai_socktype = SOCK_STREAM;
aiHint.ai_protocol = IPPROTO_TCP;
aiHint.ai_family = AF_UNSPEC;
@@ -126,33 +105,8 @@ bool static LookupIntern(const char *pszName, std::vector<CNetAddr>& vIP, unsign
#else
aiHint.ai_flags = fAllowLookup ? AI_ADDRCONFIG : AI_NUMERICHOST;
#endif
-
struct addrinfo *aiRes = NULL;
-#ifdef HAVE_GETADDRINFO_A
- struct gaicb gcb, *query = &gcb;
- memset(query, 0, sizeof(struct gaicb));
- gcb.ar_name = pszName;
- gcb.ar_request = &aiHint;
- int nErr = getaddrinfo_a(GAI_NOWAIT, &query, 1, NULL);
- if (nErr)
- return false;
-
- do {
- // Should set the timeout limit to a reasonable value to avoid
- // generating unnecessary checking call during the polling loop,
- // while it can still response to stop request quick enough.
- // 2 seconds looks fine in our situation.
- struct timespec ts = { 2, 0 };
- gai_suspend(&query, 1, &ts);
- boost::this_thread::interruption_point();
-
- nErr = gai_error(query);
- if (0 == nErr)
- aiRes = query->ar_result;
- } while (nErr == EAI_INPROGRESS);
-#else
int nErr = getaddrinfo(pszName, NULL, &aiHint, &aiRes);
-#endif
if (nErr)
return false;
diff --git a/src/netmessagemaker.h b/src/netmessagemaker.h
new file mode 100644
index 0000000000..7167434a19
--- /dev/null
+++ b/src/netmessagemaker.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2016 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_NETMESSAGEMAKER_H
+#define BITCOIN_NETMESSAGEMAKER_H
+
+#include "net.h"
+#include "serialize.h"
+
+class CNetMsgMaker
+{
+public:
+ CNetMsgMaker(int nVersionIn) : nVersion(nVersionIn){}
+
+ template <typename... Args>
+ CSerializedNetMsg Make(int nFlags, std::string sCommand, Args&&... args)
+ {
+ CSerializedNetMsg msg;
+ msg.command = std::move(sCommand);
+ CVectorWriter{ SER_NETWORK, nFlags | nVersion, msg.data, 0, std::forward<Args>(args)... };
+ return msg;
+ }
+
+ template <typename... Args>
+ CSerializedNetMsg Make(std::string sCommand, Args&&... args)
+ {
+ return Make(0, std::move(sCommand), std::forward<Args>(args)...);
+ }
+
+private:
+ const int nVersion;
+};
+
+#endif // BITCOIN_NETMESSAGEMAKER_H
diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp
index 7113390cdf..9eb831bc17 100644
--- a/src/policy/fees.cpp
+++ b/src/policy/fees.cpp
@@ -404,7 +404,8 @@ void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
CFeeRate CBlockPolicyEstimator::estimateFee(int confTarget)
{
// Return failure if trying to analyze a target we're not tracking
- if (confTarget <= 0 || (unsigned int)confTarget > feeStats.GetMaxConfirms())
+ // It's not possible to get reasonable estimates for confTarget of 1
+ if (confTarget <= 1 || (unsigned int)confTarget > feeStats.GetMaxConfirms())
return CFeeRate(0);
double median = feeStats.EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
@@ -423,6 +424,10 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoun
if (confTarget <= 0 || (unsigned int)confTarget > feeStats.GetMaxConfirms())
return CFeeRate(0);
+ // It's not possible to get reasonable estimates for confTarget of 1
+ if (confTarget == 1)
+ confTarget = 2;
+
double median = -1;
while (median < 0 && (unsigned int)confTarget <= feeStats.GetMaxConfirms()) {
median = feeStats.EstimateMedianVal(confTarget++, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp
index a3eee474ab..0c71a079fb 100644
--- a/src/policy/policy.cpp
+++ b/src/policy/policy.cpp
@@ -7,7 +7,7 @@
#include "policy/policy.h"
-#include "main.h"
+#include "validation.h"
#include "tinyformat.h"
#include "util.h"
#include "utilstrencodings.h"
@@ -66,7 +66,7 @@ bool IsStandardTx(const CTransaction& tx, std::string& reason, const bool witnes
// Extremely large transactions with lots of inputs can cost the network
// almost as much to process as they cost the sender in fees, because
// computing signature hashes is O(ninputs*txsize). Limiting transactions
- // to MAX_STANDARD_TX_SIZE mitigates CPU exhaustion attacks.
+ // to MAX_STANDARD_TX_WEIGHT mitigates CPU exhaustion attacks.
unsigned int sz = GetTransactionWeight(tx);
if (sz >= MAX_STANDARD_TX_WEIGHT) {
reason = "tx-size";
diff --git a/src/primitives/block.cpp b/src/primitives/block.cpp
index 0e6ab4dd71..95bd2211f9 100644
--- a/src/primitives/block.cpp
+++ b/src/primitives/block.cpp
@@ -27,7 +27,7 @@ std::string CBlock::ToString() const
vtx.size());
for (unsigned int i = 0; i < vtx.size(); i++)
{
- s << " " << vtx[i].ToString() << "\n";
+ s << " " << vtx[i]->ToString() << "\n";
}
return s.str();
}
diff --git a/src/primitives/block.h b/src/primitives/block.h
index d148aec1e0..b037fc839c 100644
--- a/src/primitives/block.h
+++ b/src/primitives/block.h
@@ -73,7 +73,7 @@ class CBlock : public CBlockHeader
{
public:
// network and disk
- std::vector<CTransaction> vtx;
+ std::vector<CTransactionRef> vtx;
// memory only
mutable bool fChecked;
diff --git a/src/primitives/transaction.cpp b/src/primitives/transaction.cpp
index 7acdac17f2..11d7eace55 100644
--- a/src/primitives/transaction.cpp
+++ b/src/primitives/transaction.cpp
@@ -62,9 +62,9 @@ uint256 CMutableTransaction::GetHash() const
return SerializeHash(*this, SER_GETHASH, SERIALIZE_TRANSACTION_NO_WITNESS);
}
-void CTransaction::UpdateHash() const
+uint256 CTransaction::ComputeHash() const
{
- *const_cast<uint256*>(&hash) = SerializeHash(*this, SER_GETHASH, SERIALIZE_TRANSACTION_NO_WITNESS);
+ return SerializeHash(*this, SER_GETHASH, SERIALIZE_TRANSACTION_NO_WITNESS);
}
uint256 CTransaction::GetWitnessHash() const
@@ -72,21 +72,10 @@ uint256 CTransaction::GetWitnessHash() const
return SerializeHash(*this, SER_GETHASH, 0);
}
-CTransaction::CTransaction() : nVersion(CTransaction::CURRENT_VERSION), vin(), vout(), nLockTime(0) { }
-
-CTransaction::CTransaction(const CMutableTransaction &tx) : nVersion(tx.nVersion), vin(tx.vin), vout(tx.vout), wit(tx.wit), nLockTime(tx.nLockTime) {
- UpdateHash();
-}
-
-CTransaction& CTransaction::operator=(const CTransaction &tx) {
- *const_cast<int*>(&nVersion) = tx.nVersion;
- *const_cast<std::vector<CTxIn>*>(&vin) = tx.vin;
- *const_cast<std::vector<CTxOut>*>(&vout) = tx.vout;
- *const_cast<CTxWitness*>(&wit) = tx.wit;
- *const_cast<unsigned int*>(&nLockTime) = tx.nLockTime;
- *const_cast<uint256*>(&hash) = tx.hash;
- return *this;
-}
+/* For backward compatibility, the hash is initialized to 0. TODO: remove the need for this default constructor entirely. */
+CTransaction::CTransaction() : nVersion(CTransaction::CURRENT_VERSION), vin(), vout(), nLockTime(0), hash() {}
+CTransaction::CTransaction(const CMutableTransaction &tx) : nVersion(tx.nVersion), vin(tx.vin), vout(tx.vout), wit(tx.wit), nLockTime(tx.nLockTime), hash(ComputeHash()) {}
+CTransaction::CTransaction(CMutableTransaction &&tx) : nVersion(tx.nVersion), vin(std::move(tx.vin)), vout(std::move(tx.vout)), wit(std::move(tx.wit)), nLockTime(tx.nLockTime), hash(ComputeHash()) {}
CAmount CTransaction::GetValueOut() const
{
diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h
index 1d176e5d8c..66fefafef5 100644
--- a/src/primitives/transaction.h
+++ b/src/primitives/transaction.h
@@ -286,73 +286,81 @@ struct CMutableTransaction;
* - CTxWitness wit;
* - uint32_t nLockTime
*/
-template<typename Stream, typename Operation, typename TxType>
-inline void SerializeTransaction(TxType& tx, Stream& s, Operation ser_action) {
+template<typename Stream, typename TxType>
+inline void UnserializeTransaction(TxType& tx, Stream& s) {
const bool fAllowWitness = !(s.GetVersion() & SERIALIZE_TRANSACTION_NO_WITNESS);
- READWRITE(*const_cast<int32_t*>(&tx.nVersion));
+ s >> tx.nVersion;
unsigned char flags = 0;
- if (ser_action.ForRead()) {
- const_cast<std::vector<CTxIn>*>(&tx.vin)->clear();
- const_cast<std::vector<CTxOut>*>(&tx.vout)->clear();
- const_cast<CTxWitness*>(&tx.wit)->SetNull();
- /* Try to read the vin. In case the dummy is there, this will be read as an empty vector. */
- READWRITE(*const_cast<std::vector<CTxIn>*>(&tx.vin));
- if (tx.vin.size() == 0 && fAllowWitness) {
- /* We read a dummy or an empty vin. */
- READWRITE(flags);
- if (flags != 0) {
- READWRITE(*const_cast<std::vector<CTxIn>*>(&tx.vin));
- READWRITE(*const_cast<std::vector<CTxOut>*>(&tx.vout));
- }
- } else {
- /* We read a non-empty vin. Assume a normal vout follows. */
- READWRITE(*const_cast<std::vector<CTxOut>*>(&tx.vout));
- }
- if ((flags & 1) && fAllowWitness) {
- /* The witness flag is present, and we support witnesses. */
- flags ^= 1;
- const_cast<CTxWitness*>(&tx.wit)->vtxinwit.resize(tx.vin.size());
- READWRITE(tx.wit);
- }
- if (flags) {
- /* Unknown flag in the serialization */
- throw std::ios_base::failure("Unknown transaction optional data");
+ tx.vin.clear();
+ tx.vout.clear();
+ tx.wit.SetNull();
+ /* Try to read the vin. In case the dummy is there, this will be read as an empty vector. */
+ s >> tx.vin;
+ if (tx.vin.size() == 0 && fAllowWitness) {
+ /* We read a dummy or an empty vin. */
+ s >> flags;
+ if (flags != 0) {
+ s >> tx.vin;
+ s >> tx.vout;
}
} else {
- // Consistency check
- assert(tx.wit.vtxinwit.size() <= tx.vin.size());
- if (fAllowWitness) {
- /* Check whether witnesses need to be serialized. */
- if (!tx.wit.IsNull()) {
- flags |= 1;
- }
- }
- if (flags) {
- /* Use extended format in case witnesses are to be serialized. */
- std::vector<CTxIn> vinDummy;
- READWRITE(vinDummy);
- READWRITE(flags);
+ /* We read a non-empty vin. Assume a normal vout follows. */
+ s >> tx.vout;
+ }
+ if ((flags & 1) && fAllowWitness) {
+ /* The witness flag is present, and we support witnesses. */
+ flags ^= 1;
+ tx.wit.vtxinwit.resize(tx.vin.size());
+ s >> tx.wit;
+ }
+ if (flags) {
+ /* Unknown flag in the serialization */
+ throw std::ios_base::failure("Unknown transaction optional data");
+ }
+ s >> tx.nLockTime;
+}
+
+template<typename Stream, typename TxType>
+inline void SerializeTransaction(const TxType& tx, Stream& s) {
+ const bool fAllowWitness = !(s.GetVersion() & SERIALIZE_TRANSACTION_NO_WITNESS);
+
+ s << tx.nVersion;
+ unsigned char flags = 0;
+ // Consistency check
+ assert(tx.wit.vtxinwit.size() <= tx.vin.size());
+ if (fAllowWitness) {
+ /* Check whether witnesses need to be serialized. */
+ if (!tx.wit.IsNull()) {
+ flags |= 1;
}
- READWRITE(*const_cast<std::vector<CTxIn>*>(&tx.vin));
- READWRITE(*const_cast<std::vector<CTxOut>*>(&tx.vout));
- if (flags & 1) {
- const_cast<CTxWitness*>(&tx.wit)->vtxinwit.resize(tx.vin.size());
- READWRITE(tx.wit);
+ }
+ if (flags) {
+ /* Use extended format in case witnesses are to be serialized. */
+ std::vector<CTxIn> vinDummy;
+ s << vinDummy;
+ s << flags;
+ }
+ s << tx.vin;
+ s << tx.vout;
+ if (flags & 1) {
+ for (size_t i = 0; i < tx.vin.size(); i++) {
+ if (i < tx.wit.vtxinwit.size()) {
+ s << tx.wit.vtxinwit[i];
+ } else {
+ s << CTxInWitness();
+ }
}
}
- READWRITE(*const_cast<uint32_t*>(&tx.nLockTime));
+ s << tx.nLockTime;
}
+
/** The basic transaction that is broadcasted on the network and contained in
* blocks. A transaction can contain multiple inputs and outputs.
*/
class CTransaction
{
-private:
- /** Memory only. */
- const uint256 hash;
-
public:
// Default transaction version.
static const int32_t CURRENT_VERSION=1;
@@ -374,24 +382,30 @@ public:
CTxWitness wit; // Not const: can change without invalidating the txid cache
const uint32_t nLockTime;
+private:
+ /** Memory only. */
+ const uint256 hash;
+
+ uint256 ComputeHash() const;
+
+public:
/** Construct a CTransaction that qualifies as IsNull() */
CTransaction();
/** Convert a CMutableTransaction into a CTransaction. */
CTransaction(const CMutableTransaction &tx);
+ CTransaction(CMutableTransaction &&tx);
- CTransaction& operator=(const CTransaction& tx);
-
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- SerializeTransaction(*this, s, ser_action);
- if (ser_action.ForRead()) {
- UpdateHash();
- }
+ template <typename Stream>
+ inline void Serialize(Stream& s) const {
+ SerializeTransaction(*this, s);
}
+ /** This deserializing constructor is provided instead of an Unserialize method.
+ * Unserialize is not possible, since it would require overwriting const fields. */
+ template <typename Stream>
+ CTransaction(deserialize_type, Stream& s) : CTransaction(CMutableTransaction(deserialize, s)) {}
+
bool IsNull() const {
return vin.empty() && vout.empty();
}
@@ -413,7 +427,7 @@ public:
// Compute modified tx size for priority calculation (optionally given tx size)
unsigned int CalculateModifiedSize(unsigned int nTxSize=0) const;
-
+
/**
* Get the total transaction size in bytes, including witness data.
* "Total Size" defined in BIP141 and BIP144.
@@ -437,8 +451,6 @@ public:
}
std::string ToString() const;
-
- void UpdateHash() const;
};
/** A mutable version of CTransaction. */
@@ -453,11 +465,20 @@ struct CMutableTransaction
CMutableTransaction();
CMutableTransaction(const CTransaction& tx);
- ADD_SERIALIZE_METHODS;
+ template <typename Stream>
+ inline void Serialize(Stream& s) const {
+ SerializeTransaction(*this, s);
+ }
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- SerializeTransaction(*this, s, ser_action);
+
+ template <typename Stream>
+ inline void Unserialize(Stream& s) {
+ UnserializeTransaction(*this, s);
+ }
+
+ template <typename Stream>
+ CMutableTransaction(deserialize_type, Stream& s) {
+ Unserialize(s);
}
/** Compute the hash of this CMutableTransaction. This is computed on the
@@ -466,6 +487,12 @@ struct CMutableTransaction
uint256 GetHash() const;
};
+typedef std::shared_ptr<const CTransaction> CTransactionRef;
+static inline CTransactionRef MakeTransactionRef() { return std::make_shared<const CTransaction>(); }
+template <typename Tx> static inline CTransactionRef MakeTransactionRef(Tx&& txIn) { return std::make_shared<const CTransaction>(std::forward<Tx>(txIn)); }
+static inline CTransactionRef MakeTransactionRef(const CTransactionRef& txIn) { return txIn; }
+static inline CTransactionRef MakeTransactionRef(CTransactionRef&& txIn) { return std::move(txIn); }
+
/** Compute the weight of a transaction, as defined by BIP 141 */
int64_t GetTransactionWeight(const CTransaction &tx);
diff --git a/src/protocol.h b/src/protocol.h
index a52d9a67b0..dea409c5b7 100644
--- a/src/protocol.h
+++ b/src/protocol.h
@@ -261,7 +261,7 @@ enum ServiceFlags : uint64_t {
// Bitcoin Core nodes used to support this by default, without advertising this bit,
// but no longer do as of protocol version 70011 (= NO_BLOOM_VERSION)
NODE_BLOOM = (1 << 2),
- // Indicates that a node can be asked for blocks and transactions including
+ // NODE_WITNESS indicates that a node can be asked for blocks and transactions including
// witness data.
NODE_WITNESS = (1 << 3),
// NODE_XTHIN means the node supports Xtreme Thinblocks
diff --git a/src/qt/addressbookpage.cpp b/src/qt/addressbookpage.cpp
index 58cf4dede0..6076837fde 100644
--- a/src/qt/addressbookpage.cpp
+++ b/src/qt/addressbookpage.cpp
@@ -83,7 +83,7 @@ AddressBookPage::AddressBookPage(const PlatformStyle *platformStyle, Mode _mode,
deleteAction = new QAction(ui->deleteAddress->text(), this);
// Build context menu
- contextMenu = new QMenu();
+ contextMenu = new QMenu(this);
contextMenu->addAction(copyAddressAction);
contextMenu->addAction(copyLabelAction);
contextMenu->addAction(editAction);
diff --git a/src/qt/bantablemodel.cpp b/src/qt/bantablemodel.cpp
index 6e11e23904..fe53d89ba9 100644
--- a/src/qt/bantablemodel.cpp
+++ b/src/qt/bantablemodel.cpp
@@ -87,7 +87,7 @@ BanTableModel::BanTableModel(ClientModel *parent) :
clientModel(parent)
{
columns << tr("IP/Netmask") << tr("Banned Until");
- priv = new BanTablePriv();
+ priv.reset(new BanTablePriv());
// default to unsorted
priv->sortColumn = -1;
@@ -95,6 +95,11 @@ BanTableModel::BanTableModel(ClientModel *parent) :
refresh();
}
+BanTableModel::~BanTableModel()
+{
+ // Intentionally left empty
+}
+
int BanTableModel::rowCount(const QModelIndex &parent) const
{
Q_UNUSED(parent);
diff --git a/src/qt/bantablemodel.h b/src/qt/bantablemodel.h
index fe9600ac0b..3c03d05c05 100644
--- a/src/qt/bantablemodel.h
+++ b/src/qt/bantablemodel.h
@@ -40,6 +40,7 @@ class BanTableModel : public QAbstractTableModel
public:
explicit BanTableModel(ClientModel *parent = 0);
+ ~BanTableModel();
void startAutoRefresh();
void stopAutoRefresh();
@@ -66,7 +67,7 @@ public Q_SLOTS:
private:
ClientModel *clientModel;
QStringList columns;
- BanTablePriv *priv;
+ std::unique_ptr<BanTablePriv> priv;
};
#endif // BITCOIN_QT_BANTABLEMODEL_H
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index c828234f44..4f48e21a22 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -245,6 +245,7 @@ private:
#endif
int returnValue;
const PlatformStyle *platformStyle;
+ std::unique_ptr<QWidget> shutdownWindow;
void startThread();
};
@@ -267,7 +268,22 @@ void BitcoinCore::initialize()
try
{
qDebug() << __func__ << ": Running AppInit2 in thread";
- int rv = AppInit2(threadGroup, scheduler);
+ if (!AppInitBasicSetup())
+ {
+ Q_EMIT initializeResult(false);
+ return;
+ }
+ if (!AppInitParameterInteraction())
+ {
+ Q_EMIT initializeResult(false);
+ return;
+ }
+ if (!AppInitSanityChecks())
+ {
+ Q_EMIT initializeResult(false);
+ return;
+ }
+ int rv = AppInitMain(threadGroup, scheduler);
Q_EMIT initializeResult(rv);
} catch (const std::exception& e) {
handleRunawayException(&e);
@@ -365,9 +381,8 @@ void BitcoinApplication::createWindow(const NetworkStyle *networkStyle)
void BitcoinApplication::createSplashScreen(const NetworkStyle *networkStyle)
{
SplashScreen *splash = new SplashScreen(0, networkStyle);
- // We don't hold a direct pointer to the splash screen after creation, so use
- // Qt::WA_DeleteOnClose to make sure that the window will be deleted eventually.
- splash->setAttribute(Qt::WA_DeleteOnClose);
+ // We don't hold a direct pointer to the splash screen after creation, but the splash
+ // screen will take care of deleting itself when slotFinish happens.
splash->show();
connect(this, SIGNAL(splashFinished(QWidget*)), splash, SLOT(slotFinish(QWidget*)));
connect(this, SIGNAL(requestedShutdown()), splash, SLOT(close()));
@@ -409,6 +424,11 @@ void BitcoinApplication::requestInitialize()
void BitcoinApplication::requestShutdown()
{
+ // Show a simple window indicating shutdown status
+ // Do this first as some of the steps may take some time below,
+ // for example the RPC console may still be executing a command.
+ shutdownWindow.reset(ShutdownWindow::showShutdownWindow(window));
+
qDebug() << __func__ << ": Requesting shutdown";
startThread();
window->hide();
@@ -423,9 +443,6 @@ void BitcoinApplication::requestShutdown()
delete clientModel;
clientModel = 0;
- // Show a simple window indicating shutdown status
- ShutdownWindow::showShutdownWindow(window);
-
// Request shutdown from core thread
Q_EMIT requestedShutdown();
}
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index b2c9a704ed..54ed867de0 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -199,8 +199,8 @@ BitcoinGUI::BitcoinGUI(const PlatformStyle *_platformStyle, const NetworkStyle *
unitDisplayControl = new UnitDisplayStatusBarControl(platformStyle);
labelWalletEncryptionIcon = new QLabel();
labelWalletHDStatusIcon = new QLabel();
- connectionsControl = new NetworkToggleStatusBarControl();
- labelBlocksIcon = new QLabel();
+ connectionsControl = new GUIUtil::ClickableLabel();
+ labelBlocksIcon = new GUIUtil::ClickableLabel();
if(enableWallet)
{
frameBlocksLayout->addStretch();
@@ -244,10 +244,14 @@ BitcoinGUI::BitcoinGUI(const PlatformStyle *_platformStyle, const NetworkStyle *
// Subscribe to notifications from core
subscribeToCoreSignals();
+ connect(connectionsControl, SIGNAL(clicked(QPoint)), this, SLOT(toggleNetworkActive()));
+
modalOverlay = new ModalOverlay(this->centralWidget());
#ifdef ENABLE_WALLET
- if(enableWallet)
+ if(enableWallet) {
connect(walletFrame, SIGNAL(requestedSyncWarningInfo()), this, SLOT(showModalOverlay()));
+ connect(labelBlocksIcon, SIGNAL(clicked(QPoint)), this, SLOT(showModalOverlay()));
+ }
#endif
}
@@ -490,7 +494,6 @@ void BitcoinGUI::setClientModel(ClientModel *_clientModel)
}
#endif // ENABLE_WALLET
unitDisplayControl->setOptionsModel(_clientModel->getOptionsModel());
- connectionsControl->setClientModel(_clientModel);
OptionsModel* optionsModel = _clientModel->getOptionsModel();
if(optionsModel)
@@ -511,6 +514,12 @@ void BitcoinGUI::setClientModel(ClientModel *_clientModel)
// Disable context menu on tray icon
trayIconMenu->clear();
}
+ // Propagate cleared model to child objects
+ rpcConsole->setClientModel(nullptr);
+#ifdef ENABLE_WALLET
+ walletFrame->setClientModel(nullptr);
+#endif // ENABLE_WALLET
+ unitDisplayControl->setOptionsModel(nullptr);
}
}
@@ -713,13 +722,19 @@ void BitcoinGUI::updateNetworkState()
default: icon = ":/icons/connect_4"; break;
}
+ QString tooltip;
+
if (clientModel->getNetworkActive()) {
- connectionsControl->setToolTip(tr("%n active connection(s) to Bitcoin network", "", count));
+ tooltip = tr("%n active connection(s) to Bitcoin network", "", count) + QString(".<br>") + tr("Click to disable network activity.");
} else {
- connectionsControl->setToolTip(tr("Network activity disabled"));
+ tooltip = tr("Network activity disabled.") + QString("<br>") + tr("Click to enable network activity again.");
icon = ":/icons/network_disabled";
}
+ // Don't word-wrap this (fixed-width) tooltip
+ tooltip = QString("<nobr>") + tooltip + QString("</nobr>");
+ connectionsControl->setToolTip(tooltip);
+
connectionsControl->setPixmap(platformStyle->SingleColorIcon(icon).pixmap(STATUSBAR_ICONSIZE,STATUSBAR_ICONSIZE));
}
@@ -1158,6 +1173,13 @@ void BitcoinGUI::unsubscribeFromCoreSignals()
uiInterface.ThreadSafeQuestion.disconnect(boost::bind(ThreadSafeMessageBox, this, _1, _3, _4));
}
+void BitcoinGUI::toggleNetworkActive()
+{
+ if (clientModel) {
+ clientModel->setNetworkActive(!clientModel->getNetworkActive());
+ }
+}
+
UnitDisplayStatusBarControl::UnitDisplayStatusBarControl(const PlatformStyle *platformStyle) :
optionsModel(0),
menu(0)
@@ -1185,7 +1207,7 @@ void UnitDisplayStatusBarControl::mousePressEvent(QMouseEvent *event)
/** Creates context menu, its actions, and wires up all the relevant signals for mouse events. */
void UnitDisplayStatusBarControl::createContextMenu()
{
- menu = new QMenu();
+ menu = new QMenu(this);
Q_FOREACH(BitcoinUnits::Unit u, BitcoinUnits::availableUnits())
{
QAction *menuAction = new QAction(QString(BitcoinUnits::name(u)), this);
@@ -1231,18 +1253,3 @@ void UnitDisplayStatusBarControl::onMenuSelection(QAction* action)
optionsModel->setDisplayUnit(action->data());
}
}
-
-void NetworkToggleStatusBarControl::mousePressEvent(QMouseEvent *event)
-{
- if (clientModel) {
- clientModel->setNetworkActive(!clientModel->getNetworkActive());
- }
-}
-
-/** Lets the control know about the Client Model */
-void NetworkToggleStatusBarControl::setClientModel(ClientModel *_clientModel)
-{
- if (_clientModel) {
- this->clientModel = _clientModel;
- }
-}
diff --git a/src/qt/bitcoingui.h b/src/qt/bitcoingui.h
index 1b02e77fc4..59540bfe6b 100644
--- a/src/qt/bitcoingui.h
+++ b/src/qt/bitcoingui.h
@@ -26,7 +26,6 @@ class PlatformStyle;
class RPCConsole;
class SendCoinsRecipient;
class UnitDisplayStatusBarControl;
-class NetworkToggleStatusBarControl;
class WalletFrame;
class WalletModel;
class HelpMessageDialog;
@@ -86,7 +85,7 @@ private:
UnitDisplayStatusBarControl *unitDisplayControl;
QLabel *labelWalletEncryptionIcon;
QLabel *labelWalletHDStatusIcon;
- NetworkToggleStatusBarControl *connectionsControl;
+ QLabel *connectionsControl;
QLabel *labelBlocksIcon;
QLabel *progressBarLabel;
QProgressBar *progressBar;
@@ -238,6 +237,9 @@ private Q_SLOTS:
/** When hideTrayIcon setting is changed in OptionsModel hide or show the icon accordingly. */
void setTrayIconVisible(bool);
+ /** Toggle networking */
+ void toggleNetworkActive();
+
void showModalOverlay();
};
@@ -270,17 +272,4 @@ private Q_SLOTS:
void onMenuSelection(QAction* action);
};
-class NetworkToggleStatusBarControl : public QLabel
-{
- Q_OBJECT
-
-public:
- void setClientModel(ClientModel *clientModel);
-protected:
- void mousePressEvent(QMouseEvent *event);
-
-private:
- ClientModel *clientModel;
-};
-
#endif // BITCOIN_QT_BITCOINGUI_H
diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp
index a4bb2f77fe..e63a9cc7b8 100644
--- a/src/qt/clientmodel.cpp
+++ b/src/qt/clientmodel.cpp
@@ -12,6 +12,7 @@
#include "chainparams.h"
#include "checkpoints.h"
#include "clientversion.h"
+#include "validation.h"
#include "net.h"
#include "txmempool.h"
#include "ui_interface.h"
diff --git a/src/qt/coincontroldialog.cpp b/src/qt/coincontroldialog.cpp
index 1a1671f0ee..4d34bf95e9 100644
--- a/src/qt/coincontroldialog.cpp
+++ b/src/qt/coincontroldialog.cpp
@@ -15,7 +15,7 @@
#include "wallet/coincontrol.h"
#include "init.h"
-#include "main.h" // For minRelayTxFee
+#include "validation.h" // For minRelayTxFee
#include "wallet/wallet.h"
#include <boost/assign/list_of.hpp> // for 'map_list_of()'
@@ -35,6 +35,13 @@ QList<CAmount> CoinControlDialog::payAmounts;
CCoinControl* CoinControlDialog::coinControl = new CCoinControl();
bool CoinControlDialog::fSubtractFeeFromAmount = false;
+bool CCoinControlWidgetItem::operator<(const QTreeWidgetItem &other) const {
+ int column = treeWidget()->sortColumn();
+ if (column == CoinControlDialog::COLUMN_AMOUNT || column == CoinControlDialog::COLUMN_DATE || column == CoinControlDialog::COLUMN_CONFIRMATIONS)
+ return data(column, Qt::UserRole).toLongLong() < other.data(column, Qt::UserRole).toLongLong();
+ return QTreeWidgetItem::operator<(other);
+}
+
CoinControlDialog::CoinControlDialog(const PlatformStyle *_platformStyle, QWidget *parent) :
QDialog(parent),
ui(new Ui::CoinControlDialog),
@@ -52,7 +59,7 @@ CoinControlDialog::CoinControlDialog(const PlatformStyle *_platformStyle, QWidge
unlockAction = new QAction(tr("Unlock unspent"), this); // we need to enable/disable this
// context menu
- contextMenu = new QMenu();
+ contextMenu = new QMenu(this);
contextMenu->addAction(copyAddressAction);
contextMenu->addAction(copyLabelAction);
contextMenu->addAction(copyAmountAction);
@@ -128,11 +135,9 @@ CoinControlDialog::CoinControlDialog(const PlatformStyle *_platformStyle, QWidge
ui->treeWidget->setColumnWidth(COLUMN_CONFIRMATIONS, 110);
ui->treeWidget->setColumnHidden(COLUMN_TXHASH, true); // store transaction hash in this column, but don't show it
ui->treeWidget->setColumnHidden(COLUMN_VOUT_INDEX, true); // store vout index in this column, but don't show it
- ui->treeWidget->setColumnHidden(COLUMN_AMOUNT_INT64, true); // store amount int64 in this column, but don't show it
- ui->treeWidget->setColumnHidden(COLUMN_DATE_INT64, true); // store date int64 in this column, but don't show it
// default view is sorted by amount desc
- sortView(COLUMN_AMOUNT_INT64, Qt::DescendingOrder);
+ sortView(COLUMN_AMOUNT, Qt::DescendingOrder);
// restore list mode and sortorder as a convenience feature
QSettings settings;
@@ -164,15 +169,6 @@ void CoinControlDialog::setModel(WalletModel *_model)
}
}
-// helper function str_pad
-QString CoinControlDialog::strPad(QString s, int nPadLength, QString sPadding)
-{
- while (s.length() < nPadLength)
- s = sPadding + s;
-
- return s;
-}
-
// ok button
void CoinControlDialog::buttonBoxClicked(QAbstractButton* button)
{
@@ -338,7 +334,7 @@ void CoinControlDialog::sortView(int column, Qt::SortOrder order)
sortColumn = column;
sortOrder = order;
ui->treeWidget->sortItems(column, order);
- ui->treeWidget->header()->setSortIndicator(getMappedColumn(sortColumn), sortOrder);
+ ui->treeWidget->header()->setSortIndicator(sortColumn, sortOrder);
}
// treeview: clicked on header
@@ -346,12 +342,10 @@ void CoinControlDialog::headerSectionClicked(int logicalIndex)
{
if (logicalIndex == COLUMN_CHECKBOX) // click on most left column -> do nothing
{
- ui->treeWidget->header()->setSortIndicator(getMappedColumn(sortColumn), sortOrder);
+ ui->treeWidget->header()->setSortIndicator(sortColumn, sortOrder);
}
else
{
- logicalIndex = getMappedColumn(logicalIndex, false);
-
if (sortColumn == logicalIndex)
sortOrder = ((sortOrder == Qt::AscendingOrder) ? Qt::DescendingOrder : Qt::AscendingOrder);
else
@@ -476,21 +470,21 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog)
nQuantity++;
// Amount
- nAmount += out.tx->vout[out.i].nValue;
+ nAmount += out.tx->tx->vout[out.i].nValue;
// Priority
- dPriorityInputs += (double)out.tx->vout[out.i].nValue * (out.nDepth+1);
+ dPriorityInputs += (double)out.tx->tx->vout[out.i].nValue * (out.nDepth+1);
// Bytes
CTxDestination address;
int witnessversion = 0;
std::vector<unsigned char> witnessprogram;
- if (out.tx->vout[out.i].scriptPubKey.IsWitnessProgram(witnessversion, witnessprogram))
+ if (out.tx->tx->vout[out.i].scriptPubKey.IsWitnessProgram(witnessversion, witnessprogram))
{
nBytesInputs += (32 + 4 + 1 + (107 / WITNESS_SCALE_FACTOR) + 4);
fWitness = true;
}
- else if(ExtractDestination(out.tx->vout[out.i].scriptPubKey, address))
+ else if(ExtractDestination(out.tx->tx->vout[out.i].scriptPubKey, address))
{
CPubKey pubkey;
CKeyID *keyid = boost::get<CKeyID>(&address);
@@ -658,7 +652,7 @@ void CoinControlDialog::updateView()
model->listCoins(mapCoins);
BOOST_FOREACH(const PAIRTYPE(QString, std::vector<COutput>)& coins, mapCoins) {
- QTreeWidgetItem *itemWalletAddress = new QTreeWidgetItem();
+ CCoinControlWidgetItem *itemWalletAddress = new CCoinControlWidgetItem();
itemWalletAddress->setCheckState(COLUMN_CHECKBOX, Qt::Unchecked);
QString sWalletAddress = coins.first;
QString sWalletLabel = model->getAddressTableModel()->labelForAddress(sWalletAddress);
@@ -683,19 +677,19 @@ void CoinControlDialog::updateView()
CAmount nSum = 0;
int nChildren = 0;
BOOST_FOREACH(const COutput& out, coins.second) {
- nSum += out.tx->vout[out.i].nValue;
+ nSum += out.tx->tx->vout[out.i].nValue;
nChildren++;
- QTreeWidgetItem *itemOutput;
- if (treeMode) itemOutput = new QTreeWidgetItem(itemWalletAddress);
- else itemOutput = new QTreeWidgetItem(ui->treeWidget);
+ CCoinControlWidgetItem *itemOutput;
+ if (treeMode) itemOutput = new CCoinControlWidgetItem(itemWalletAddress);
+ else itemOutput = new CCoinControlWidgetItem(ui->treeWidget);
itemOutput->setFlags(flgCheckbox);
itemOutput->setCheckState(COLUMN_CHECKBOX,Qt::Unchecked);
// address
CTxDestination outputAddress;
QString sAddress = "";
- if(ExtractDestination(out.tx->vout[out.i].scriptPubKey, outputAddress))
+ if(ExtractDestination(out.tx->tx->vout[out.i].scriptPubKey, outputAddress))
{
sAddress = QString::fromStdString(CBitcoinAddress(outputAddress).ToString());
@@ -720,15 +714,16 @@ void CoinControlDialog::updateView()
}
// amount
- itemOutput->setText(COLUMN_AMOUNT, BitcoinUnits::format(nDisplayUnit, out.tx->vout[out.i].nValue));
- itemOutput->setText(COLUMN_AMOUNT_INT64, strPad(QString::number(out.tx->vout[out.i].nValue), 15, " ")); // padding so that sorting works correctly
+ itemOutput->setText(COLUMN_AMOUNT, BitcoinUnits::format(nDisplayUnit, out.tx->tx->vout[out.i].nValue));
+ itemOutput->setData(COLUMN_AMOUNT, Qt::UserRole, QVariant((qlonglong)out.tx->tx->vout[out.i].nValue)); // padding so that sorting works correctly
// date
itemOutput->setText(COLUMN_DATE, GUIUtil::dateTimeStr(out.tx->GetTxTime()));
- itemOutput->setText(COLUMN_DATE_INT64, strPad(QString::number(out.tx->GetTxTime()), 20, " "));
+ itemOutput->setData(COLUMN_DATE, Qt::UserRole, QVariant((qlonglong)out.tx->GetTxTime()));
// confirmations
- itemOutput->setText(COLUMN_CONFIRMATIONS, strPad(QString::number(out.nDepth), 8, " "));
+ itemOutput->setText(COLUMN_CONFIRMATIONS, QString::number(out.nDepth));
+ itemOutput->setData(COLUMN_CONFIRMATIONS, Qt::UserRole, QVariant((qlonglong)out.nDepth));
// transaction hash
uint256 txhash = out.tx->GetHash();
@@ -756,7 +751,7 @@ void CoinControlDialog::updateView()
{
itemWalletAddress->setText(COLUMN_CHECKBOX, "(" + QString::number(nChildren) + ")");
itemWalletAddress->setText(COLUMN_AMOUNT, BitcoinUnits::format(nDisplayUnit, nSum));
- itemWalletAddress->setText(COLUMN_AMOUNT_INT64, strPad(QString::number(nSum), 15, " "));
+ itemWalletAddress->setData(COLUMN_AMOUNT, Qt::UserRole, QVariant((qlonglong)nSum));
}
}
diff --git a/src/qt/coincontroldialog.h b/src/qt/coincontroldialog.h
index 7d73421e3a..7c2269b1eb 100644
--- a/src/qt/coincontroldialog.h
+++ b/src/qt/coincontroldialog.h
@@ -28,6 +28,17 @@ namespace Ui {
#define ASYMP_UTF8 "\xE2\x89\x88"
+class CCoinControlWidgetItem : public QTreeWidgetItem
+{
+public:
+ CCoinControlWidgetItem(QTreeWidget *parent, int type = Type) : QTreeWidgetItem(parent, type) {}
+ CCoinControlWidgetItem(int type = Type) : QTreeWidgetItem(type) {}
+ CCoinControlWidgetItem(QTreeWidgetItem *parent, int type = Type) : QTreeWidgetItem(parent, type) {}
+
+ bool operator<(const QTreeWidgetItem &other) const;
+};
+
+
class CoinControlDialog : public QDialog
{
Q_OBJECT
@@ -59,13 +70,12 @@ private:
const PlatformStyle *platformStyle;
- QString strPad(QString, int, QString);
void sortView(int, Qt::SortOrder);
void updateView();
enum
{
- COLUMN_CHECKBOX,
+ COLUMN_CHECKBOX = 0,
COLUMN_AMOUNT,
COLUMN_LABEL,
COLUMN_ADDRESS,
@@ -73,30 +83,8 @@ private:
COLUMN_CONFIRMATIONS,
COLUMN_TXHASH,
COLUMN_VOUT_INDEX,
- COLUMN_AMOUNT_INT64,
- COLUMN_DATE_INT64
};
-
- // some columns have a hidden column containing the value used for sorting
- int getMappedColumn(int column, bool fVisibleColumn = true)
- {
- if (fVisibleColumn)
- {
- if (column == COLUMN_AMOUNT_INT64)
- return COLUMN_AMOUNT;
- else if (column == COLUMN_DATE_INT64)
- return COLUMN_DATE;
- }
- else
- {
- if (column == COLUMN_AMOUNT)
- return COLUMN_AMOUNT_INT64;
- else if (column == COLUMN_DATE)
- return COLUMN_DATE_INT64;
- }
-
- return column;
- }
+ friend class CCoinControlWidgetItem;
private Q_SLOTS:
void showMenu(const QPoint &);
diff --git a/src/qt/forms/sendcoinsdialog.ui b/src/qt/forms/sendcoinsdialog.ui
index 33db9f8938..0a8afa2e76 100644
--- a/src/qt/forms/sendcoinsdialog.ui
+++ b/src/qt/forms/sendcoinsdialog.ui
@@ -1064,7 +1064,7 @@
<number>0</number>
</property>
<property name="maximum">
- <number>24</number>
+ <number>23</number>
</property>
<property name="pageStep">
<number>1</number>
diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp
index 9dc75c2e1a..4806e41439 100644
--- a/src/qt/guiutil.cpp
+++ b/src/qt/guiutil.cpp
@@ -11,7 +11,7 @@
#include "primitives/transaction.h"
#include "init.h"
-#include "main.h" // For minRelayTxFee
+#include "validation.h" // For minRelayTxFee
#include "protocol.h"
#include "script/script.h"
#include "script/standard.h"
@@ -55,6 +55,7 @@
#include <QSettings>
#include <QTextDocument> // for Qt::mightBeRichText
#include <QThread>
+#include <QMouseEvent>
#if QT_VERSION < 0x050000
#include <QUrl>
@@ -585,7 +586,8 @@ void TableViewLastColumnResizingFixer::on_geometriesChanged()
* Initializes all internal variables and prepares the
* the resize modes of the last 2 columns of the table and
*/
-TableViewLastColumnResizingFixer::TableViewLastColumnResizingFixer(QTableView* table, int lastColMinimumWidth, int allColsMinimumWidth) :
+TableViewLastColumnResizingFixer::TableViewLastColumnResizingFixer(QTableView* table, int lastColMinimumWidth, int allColsMinimumWidth, QObject *parent) :
+ QObject(parent),
tableView(table),
lastColumnMinimumWidth(lastColMinimumWidth),
allColumnsMinimumWidth(allColsMinimumWidth)
@@ -985,4 +987,10 @@ QString formateNiceTimeOffset(qint64 secs)
}
return timeBehindText;
}
+
+void ClickableLabel::mousePressEvent(QMouseEvent *event)
+{
+ Q_EMIT clicked(event->pos());
+}
+
} // namespace GUIUtil
diff --git a/src/qt/guiutil.h b/src/qt/guiutil.h
index 64cbd51eb6..9a17d24f07 100644
--- a/src/qt/guiutil.h
+++ b/src/qt/guiutil.h
@@ -14,6 +14,7 @@
#include <QProgressBar>
#include <QString>
#include <QTableView>
+#include <QLabel>
#include <boost/filesystem.hpp>
@@ -149,7 +150,7 @@ namespace GUIUtil
Q_OBJECT
public:
- TableViewLastColumnResizingFixer(QTableView* table, int lastColMinimumWidth, int allColsMinimumWidth);
+ TableViewLastColumnResizingFixer(QTableView* table, int lastColMinimumWidth, int allColsMinimumWidth, QObject *parent);
void stretchColumnWidth(int column);
private:
@@ -215,6 +216,19 @@ namespace GUIUtil
typedef QProgressBar ProgressBar;
#endif
+ class ClickableLabel : public QLabel
+ {
+ Q_OBJECT
+
+ Q_SIGNALS:
+ /** Emitted when the label is clicked. The relative mouse coordinates of the click are
+ * passed to the signal.
+ */
+ void clicked(const QPoint& point);
+ protected:
+ void mousePressEvent(QMouseEvent *event);
+ };
+
} // namespace GUIUtil
#endif // BITCOIN_QT_GUIUTIL_H
diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp
index 588059d0c5..2d5e9ff686 100644
--- a/src/qt/optionsdialog.cpp
+++ b/src/qt/optionsdialog.cpp
@@ -13,7 +13,7 @@
#include "guiutil.h"
#include "optionsmodel.h"
-#include "main.h" // for DEFAULT_SCRIPTCHECK_THREADS and MAX_SCRIPTCHECK_THREADS
+#include "validation.h" // for DEFAULT_SCRIPTCHECK_THREADS and MAX_SCRIPTCHECK_THREADS
#include "netbase.h"
#include "txdb.h" // for -dbcache defaults
diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp
index f82e153b67..d48c4d91e7 100644
--- a/src/qt/optionsmodel.cpp
+++ b/src/qt/optionsmodel.cpp
@@ -13,7 +13,7 @@
#include "amount.h"
#include "init.h"
-#include "main.h" // For DEFAULT_SCRIPTCHECK_THREADS
+#include "validation.h" // For DEFAULT_SCRIPTCHECK_THREADS
#include "net.h"
#include "netbase.h"
#include "txdb.h" // for -dbcache defaults
diff --git a/src/qt/optionsmodel.h b/src/qt/optionsmodel.h
index b23b5f2607..d2e1fb23a3 100644
--- a/src/qt/optionsmodel.h
+++ b/src/qt/optionsmodel.h
@@ -81,7 +81,7 @@ private:
int nDisplayUnit;
QString strThirdPartyTxUrls;
bool fCoinControlFeatures;
- /* settings that were overriden by command-line */
+ /* settings that were overridden by command-line */
QString strOverriddenByCommandLine;
// Add option to list of GUI options overridden through command line/config file
diff --git a/src/qt/overviewpage.cpp b/src/qt/overviewpage.cpp
index 7ccdb89c0c..ea1bb2fc94 100644
--- a/src/qt/overviewpage.cpp
+++ b/src/qt/overviewpage.cpp
@@ -25,8 +25,8 @@ class TxViewDelegate : public QAbstractItemDelegate
{
Q_OBJECT
public:
- TxViewDelegate(const PlatformStyle *_platformStyle):
- QAbstractItemDelegate(), unit(BitcoinUnits::BTC),
+ TxViewDelegate(const PlatformStyle *_platformStyle, QObject *parent=nullptr):
+ QAbstractItemDelegate(parent), unit(BitcoinUnits::BTC),
platformStyle(_platformStyle)
{
@@ -119,8 +119,7 @@ OverviewPage::OverviewPage(const PlatformStyle *platformStyle, QWidget *parent)
currentWatchOnlyBalance(-1),
currentWatchUnconfBalance(-1),
currentWatchImmatureBalance(-1),
- txdelegate(new TxViewDelegate(platformStyle)),
- filter(0)
+ txdelegate(new TxViewDelegate(platformStyle, this))
{
ui->setupUi(this);
@@ -220,7 +219,7 @@ void OverviewPage::setWalletModel(WalletModel *model)
if(model && model->getOptionsModel())
{
// Set up transaction list
- filter = new TransactionFilterProxy();
+ filter.reset(new TransactionFilterProxy());
filter->setSourceModel(model->getTransactionTableModel());
filter->setLimit(NUM_ITEMS);
filter->setDynamicSortFilter(true);
@@ -228,7 +227,7 @@ void OverviewPage::setWalletModel(WalletModel *model)
filter->setShowInactive(false);
filter->sort(TransactionTableModel::Date, Qt::DescendingOrder);
- ui->listTransactions->setModel(filter);
+ ui->listTransactions->setModel(filter.get());
ui->listTransactions->setModelColumn(TransactionTableModel::ToAddress);
// Keep up to date with wallet
diff --git a/src/qt/overviewpage.h b/src/qt/overviewpage.h
index 65cd3341b6..ffe0de328c 100644
--- a/src/qt/overviewpage.h
+++ b/src/qt/overviewpage.h
@@ -8,6 +8,7 @@
#include "amount.h"
#include <QWidget>
+#include <memory>
class ClientModel;
class TransactionFilterProxy;
@@ -56,7 +57,7 @@ private:
CAmount currentWatchImmatureBalance;
TxViewDelegate *txdelegate;
- TransactionFilterProxy *filter;
+ std::unique_ptr<TransactionFilterProxy> filter;
private Q_SLOTS:
void updateDisplayUnit();
diff --git a/src/qt/paymentserver.cpp b/src/qt/paymentserver.cpp
index 478f5ccf12..d5f0156d6c 100644
--- a/src/qt/paymentserver.cpp
+++ b/src/qt/paymentserver.cpp
@@ -10,7 +10,7 @@
#include "base58.h"
#include "chainparams.h"
-#include "main.h" // For minRelayTxFee
+#include "validation.h" // For minRelayTxFee
#include "ui_interface.h"
#include "util.h"
#include "wallet/wallet.h"
@@ -58,14 +58,19 @@ const char* BIP71_MIMETYPE_PAYMENTREQUEST = "application/bitcoin-paymentrequest"
// BIP70 max payment request size in bytes (DoS protection)
const qint64 BIP70_MAX_PAYMENTREQUEST_SIZE = 50000;
-X509_STORE* PaymentServer::certStore = NULL;
-void PaymentServer::freeCertStore()
+struct X509StoreDeleter {
+ void operator()(X509_STORE* b) {
+ X509_STORE_free(b);
+ }
+};
+
+struct X509Deleter {
+ void operator()(X509* b) { X509_free(b); }
+};
+
+namespace // Anon namespace
{
- if (PaymentServer::certStore != NULL)
- {
- X509_STORE_free(PaymentServer::certStore);
- PaymentServer::certStore = NULL;
- }
+ std::unique_ptr<X509_STORE, X509StoreDeleter> certStore;
}
//
@@ -107,20 +112,15 @@ static void ReportInvalidCertificate(const QSslCertificate& cert)
//
void PaymentServer::LoadRootCAs(X509_STORE* _store)
{
- if (PaymentServer::certStore == NULL)
- atexit(PaymentServer::freeCertStore);
- else
- freeCertStore();
-
// Unit tests mostly use this, to pass in fake root CAs:
if (_store)
{
- PaymentServer::certStore = _store;
+ certStore.reset(_store);
return;
}
// Normal execution, use either -rootcertificates or system certs:
- PaymentServer::certStore = X509_STORE_new();
+ certStore.reset(X509_STORE_new());
// Note: use "-system-" default here so that users can pass -rootcertificates=""
// and get 'I don't like X.509 certificates, don't trust anybody' behavior:
@@ -167,11 +167,11 @@ void PaymentServer::LoadRootCAs(X509_STORE* _store)
QByteArray certData = cert.toDer();
const unsigned char *data = (const unsigned char *)certData.data();
- X509* x509 = d2i_X509(0, &data, certData.size());
- if (x509 && X509_STORE_add_cert(PaymentServer::certStore, x509))
+ std::unique_ptr<X509, X509Deleter> x509(d2i_X509(0, &data, certData.size()));
+ if (x509 && X509_STORE_add_cert(certStore.get(), x509.get()))
{
- // Note: X509_STORE_free will free the X509* objects when
- // the PaymentServer is destroyed
+ // Note: X509_STORE increases the reference count to the X509 object,
+ // we still have to release our reference to it.
++nRootCerts;
}
else
@@ -550,7 +550,7 @@ bool PaymentServer::processPaymentRequest(const PaymentRequestPlus& request, Sen
recipient.paymentRequest = request;
recipient.message = GUIUtil::HtmlEscape(request.getDetails().memo());
- request.getMerchant(PaymentServer::certStore, recipient.authenticatedMerchant);
+ request.getMerchant(certStore.get(), recipient.authenticatedMerchant);
QList<std::pair<CScript, CAmount> > sendingTos = request.getPayTo();
QStringList addresses;
@@ -807,3 +807,8 @@ bool PaymentServer::verifyAmount(const CAmount& requestAmount)
}
return fVerified;
}
+
+X509_STORE* PaymentServer::getCertStore()
+{
+ return certStore.get();
+}
diff --git a/src/qt/paymentserver.h b/src/qt/paymentserver.h
index 2d27ed078b..7202e7dada 100644
--- a/src/qt/paymentserver.h
+++ b/src/qt/paymentserver.h
@@ -83,7 +83,7 @@ public:
static void LoadRootCAs(X509_STORE* store = NULL);
// Return certificate store
- static X509_STORE* getCertStore() { return certStore; }
+ static X509_STORE* getCertStore();
// OptionsModel is used for getting proxy settings and display unit
void setOptionsModel(OptionsModel *optionsModel);
@@ -140,9 +140,6 @@ private:
bool saveURIs; // true during startup
QLocalServer* uriServer;
- static X509_STORE* certStore; // Trusted root certificates
- static void freeCertStore();
-
QNetworkAccessManager* netManager; // Used to fetch payment requests
OptionsModel *optionsModel;
diff --git a/src/qt/peertablemodel.cpp b/src/qt/peertablemodel.cpp
index a2f9471fcc..dd4bd55396 100644
--- a/src/qt/peertablemodel.cpp
+++ b/src/qt/peertablemodel.cpp
@@ -8,6 +8,7 @@
#include "guiconstants.h"
#include "guiutil.h"
+#include "validation.h" // for cs_main
#include "sync.h"
#include <QDebug>
@@ -114,12 +115,12 @@ PeerTableModel::PeerTableModel(ClientModel *parent) :
timer(0)
{
columns << tr("NodeId") << tr("Node/Service") << tr("User Agent") << tr("Ping");
- priv = new PeerTablePriv();
+ priv.reset(new PeerTablePriv());
// default to unsorted
priv->sortColumn = -1;
// set up timer for auto refresh
- timer = new QTimer();
+ timer = new QTimer(this);
connect(timer, SIGNAL(timeout()), SLOT(refresh()));
timer->setInterval(MODEL_UPDATE_DELAY);
@@ -127,6 +128,11 @@ PeerTableModel::PeerTableModel(ClientModel *parent) :
refresh();
}
+PeerTableModel::~PeerTableModel()
+{
+ // Intentionally left empty
+}
+
void PeerTableModel::startAutoRefresh()
{
timer->start();
diff --git a/src/qt/peertablemodel.h b/src/qt/peertablemodel.h
index a4f7bbdb3d..3e4f0a68c4 100644
--- a/src/qt/peertablemodel.h
+++ b/src/qt/peertablemodel.h
@@ -5,7 +5,7 @@
#ifndef BITCOIN_QT_PEERTABLEMODEL_H
#define BITCOIN_QT_PEERTABLEMODEL_H
-#include "main.h" // For CNodeStateStats
+#include "net_processing.h" // For CNodeStateStats
#include "net.h"
#include <QAbstractTableModel>
@@ -46,6 +46,7 @@ class PeerTableModel : public QAbstractTableModel
public:
explicit PeerTableModel(ClientModel *parent = 0);
+ ~PeerTableModel();
const CNodeCombinedStats *getNodeStats(int idx);
int getRowByNodeId(NodeId nodeid);
void startAutoRefresh();
@@ -75,7 +76,7 @@ public Q_SLOTS:
private:
ClientModel *clientModel;
QStringList columns;
- PeerTablePriv *priv;
+ std::unique_ptr<PeerTablePriv> priv;
QTimer *timer;
};
diff --git a/src/qt/receivecoinsdialog.cpp b/src/qt/receivecoinsdialog.cpp
index b50cad4975..dd83547a91 100644
--- a/src/qt/receivecoinsdialog.cpp
+++ b/src/qt/receivecoinsdialog.cpp
@@ -25,6 +25,7 @@
ReceiveCoinsDialog::ReceiveCoinsDialog(const PlatformStyle *_platformStyle, QWidget *parent) :
QDialog(parent),
ui(new Ui::ReceiveCoinsDialog),
+ columnResizingFixer(0),
model(0),
platformStyle(_platformStyle)
{
@@ -49,7 +50,7 @@ ReceiveCoinsDialog::ReceiveCoinsDialog(const PlatformStyle *_platformStyle, QWid
QAction *copyAmountAction = new QAction(tr("Copy amount"), this);
// context menu
- contextMenu = new QMenu();
+ contextMenu = new QMenu(this);
contextMenu->addAction(copyURIAction);
contextMenu->addAction(copyLabelAction);
contextMenu->addAction(copyMessageAction);
@@ -91,7 +92,7 @@ void ReceiveCoinsDialog::setModel(WalletModel *_model)
SIGNAL(selectionChanged(QItemSelection, QItemSelection)), this,
SLOT(recentRequestsView_selectionChanged(QItemSelection, QItemSelection)));
// Last 2 columns are set by the columnResizingFixer, when the table geometry is ready.
- columnResizingFixer = new GUIUtil::TableViewLastColumnResizingFixer(tableView, AMOUNT_MINIMUM_COLUMN_WIDTH, DATE_COLUMN_WIDTH);
+ columnResizingFixer = new GUIUtil::TableViewLastColumnResizingFixer(tableView, AMOUNT_MINIMUM_COLUMN_WIDTH, DATE_COLUMN_WIDTH, this);
}
}
diff --git a/src/qt/receiverequestdialog.cpp b/src/qt/receiverequestdialog.cpp
index 998c9176d7..f896461735 100644
--- a/src/qt/receiverequestdialog.cpp
+++ b/src/qt/receiverequestdialog.cpp
@@ -32,7 +32,7 @@
QRImageWidget::QRImageWidget(QWidget *parent):
QLabel(parent), contextMenu(0)
{
- contextMenu = new QMenu();
+ contextMenu = new QMenu(this);
QAction *saveImageAction = new QAction(tr("&Save Image..."), this);
connect(saveImageAction, SIGNAL(triggered()), this, SLOT(saveImage()));
contextMenu->addAction(saveImageAction);
diff --git a/src/qt/recentrequeststablemodel.cpp b/src/qt/recentrequeststablemodel.cpp
index 2335d6b282..35d37bb22b 100644
--- a/src/qt/recentrequeststablemodel.cpp
+++ b/src/qt/recentrequeststablemodel.cpp
@@ -14,7 +14,7 @@
#include <boost/foreach.hpp>
RecentRequestsTableModel::RecentRequestsTableModel(CWallet *wallet, WalletModel *parent) :
- walletModel(parent)
+ QAbstractTableModel(parent), walletModel(parent)
{
Q_UNUSED(wallet);
nReceiveRequestsMaxId = 0;
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index 47af6a5724..520d229901 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -382,7 +382,6 @@ RPCConsole::RPCConsole(const PlatformStyle *_platformStyle, QWidget *parent) :
// based timer interface
RPCSetTimerInterfaceIfUnset(rpcTimerInterface);
- startExecutor();
setTrafficGraphRange(INITIAL_TRAFFIC_GRAPH_MINS);
ui->detailWidget->hide();
@@ -396,7 +395,6 @@ RPCConsole::RPCConsole(const PlatformStyle *_platformStyle, QWidget *parent) :
RPCConsole::~RPCConsole()
{
GUIUtil::saveWindowGeometry("nRPCConsoleWindow", this);
- Q_EMIT stopExecutor();
RPCUnsetTimerInterface(rpcTimerInterface);
delete rpcTimerInterface;
delete ui;
@@ -486,7 +484,7 @@ void RPCConsole::setClientModel(ClientModel *model)
QAction* banAction365d = new QAction(tr("Ban for") + " " + tr("1 &year"), this);
// create peer table context menu
- peersTableContextMenu = new QMenu();
+ peersTableContextMenu = new QMenu(this);
peersTableContextMenu->addAction(disconnectAction);
peersTableContextMenu->addAction(banAction1h);
peersTableContextMenu->addAction(banAction24h);
@@ -534,7 +532,7 @@ void RPCConsole::setClientModel(ClientModel *model)
QAction* unbanAction = new QAction(tr("&Unban"), this);
// create ban table context menu
- banTableContextMenu = new QMenu();
+ banTableContextMenu = new QMenu(this);
banTableContextMenu->addAction(unbanAction);
// ban table context menu signals
@@ -565,6 +563,14 @@ void RPCConsole::setClientModel(ClientModel *model)
autoCompleter = new QCompleter(wordList, this);
ui->lineEdit->setCompleter(autoCompleter);
autoCompleter->popup()->installEventFilter(this);
+ // Start thread to execute RPC commands.
+ startExecutor();
+ }
+ if (!model) {
+ // Client model is being set to 0, this means shutdown() is about to be called.
+ // Make sure we clean up the executor thread
+ Q_EMIT stopExecutor();
+ thread.wait();
}
}
@@ -759,9 +765,8 @@ void RPCConsole::browseHistory(int offset)
void RPCConsole::startExecutor()
{
- QThread *thread = new QThread;
RPCExecutor *executor = new RPCExecutor();
- executor->moveToThread(thread);
+ executor->moveToThread(&thread);
// Replies from executor object must go to this object
connect(executor, SIGNAL(reply(int,QString)), this, SLOT(message(int,QString)));
@@ -769,16 +774,15 @@ void RPCConsole::startExecutor()
connect(this, SIGNAL(cmdRequest(QString)), executor, SLOT(request(QString)));
// On stopExecutor signal
- // - queue executor for deletion (in execution thread)
// - quit the Qt event loop in the execution thread
- connect(this, SIGNAL(stopExecutor()), executor, SLOT(deleteLater()));
- connect(this, SIGNAL(stopExecutor()), thread, SLOT(quit()));
- // Queue the thread for deletion (in this thread) when it is finished
- connect(thread, SIGNAL(finished()), thread, SLOT(deleteLater()));
+ connect(this, SIGNAL(stopExecutor()), &thread, SLOT(quit()));
+ // - queue executor for deletion (in execution thread)
+ connect(&thread, SIGNAL(finished()), executor, SLOT(deleteLater()), Qt::DirectConnection);
+ connect(&thread, SIGNAL(finished()), this, SLOT(test()), Qt::DirectConnection);
// Default implementation of QThread::run() simply spins up an event loop in the thread,
// which is what we want.
- thread->start();
+ thread.start();
}
void RPCConsole::on_tabWidget_currentChanged(int index)
diff --git a/src/qt/rpcconsole.h b/src/qt/rpcconsole.h
index 8c20379a8c..344d5ecb98 100644
--- a/src/qt/rpcconsole.h
+++ b/src/qt/rpcconsole.h
@@ -12,6 +12,7 @@
#include <QWidget>
#include <QCompleter>
+#include <QThread>
class ClientModel;
class PlatformStyle;
@@ -148,6 +149,7 @@ private:
QMenu *banTableContextMenu;
int consoleFontSize;
QCompleter *autoCompleter;
+ QThread thread;
/** Update UI with latest network info from model. */
void updateNetworkState();
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index 57b2179435..13210a1adc 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -17,7 +17,7 @@
#include "base58.h"
#include "wallet/coincontrol.h"
-#include "main.h" // mempool and minRelayTxFee
+#include "validation.h" // mempool and minRelayTxFee
#include "ui_interface.h"
#include "txmempool.h"
#include "wallet/wallet.h"
@@ -175,7 +175,7 @@ void SendCoinsDialog::setModel(WalletModel *_model)
// set the smartfee-sliders default value (wallets default conf.target or last stored value)
QSettings settings;
if (settings.value("nSmartFeeSliderPosition").toInt() == 0)
- ui->sliderSmartFee->setValue(ui->sliderSmartFee->maximum() - model->getDefaultConfirmTarget() + 1);
+ ui->sliderSmartFee->setValue(ui->sliderSmartFee->maximum() - model->getDefaultConfirmTarget() + 2);
else
ui->sliderSmartFee->setValue(settings.value("nSmartFeeSliderPosition").toInt());
}
@@ -241,7 +241,7 @@ void SendCoinsDialog::on_sendButton_clicked()
if (model->getOptionsModel()->getCoinControlFeatures())
ctrl = *CoinControlDialog::coinControl;
if (ui->radioSmartFee->isChecked())
- ctrl.nConfirmTarget = ui->sliderSmartFee->maximum() - ui->sliderSmartFee->value() + 1;
+ ctrl.nConfirmTarget = ui->sliderSmartFee->maximum() - ui->sliderSmartFee->value() + 2;
else
ctrl.nConfirmTarget = 0;
@@ -601,7 +601,7 @@ void SendCoinsDialog::updateGlobalFeeVariables()
{
if (ui->radioSmartFee->isChecked())
{
- int nConfirmTarget = ui->sliderSmartFee->maximum() - ui->sliderSmartFee->value() + 1;
+ int nConfirmTarget = ui->sliderSmartFee->maximum() - ui->sliderSmartFee->value() + 2;
payTxFee = CFeeRate(0);
// set nMinimumTotalFee to 0 to not accidentally pay a custom fee
@@ -646,7 +646,7 @@ void SendCoinsDialog::updateSmartFeeLabel()
if(!model || !model->getOptionsModel())
return;
- int nBlocksToConfirm = ui->sliderSmartFee->maximum() - ui->sliderSmartFee->value() + 1;
+ int nBlocksToConfirm = ui->sliderSmartFee->maximum() - ui->sliderSmartFee->value() + 2;
int estimateFoundAtBlocks = nBlocksToConfirm;
CFeeRate feeRate = mempool.estimateSmartFee(nBlocksToConfirm, &estimateFoundAtBlocks);
if (feeRate <= CFeeRate(0)) // not enough data => minfee
diff --git a/src/qt/signverifymessagedialog.cpp b/src/qt/signverifymessagedialog.cpp
index 3e42f3a7b0..e28a6b47af 100644
--- a/src/qt/signverifymessagedialog.cpp
+++ b/src/qt/signverifymessagedialog.cpp
@@ -12,7 +12,7 @@
#include "base58.h"
#include "init.h"
-#include "main.h" // For strMessageMagic
+#include "validation.h" // For strMessageMagic
#include "wallet/wallet.h"
#include <string>
diff --git a/src/qt/splashscreen.cpp b/src/qt/splashscreen.cpp
index cd27385653..1b6e28e93e 100644
--- a/src/qt/splashscreen.cpp
+++ b/src/qt/splashscreen.cpp
@@ -147,6 +147,7 @@ void SplashScreen::slotFinish(QWidget *mainWin)
if (isMinimized())
showNormal();
hide();
+ deleteLater(); // No more need for this
}
static void InitMessage(SplashScreen *splash, const std::string &message)
diff --git a/src/qt/test/rpcnestedtests.cpp b/src/qt/test/rpcnestedtests.cpp
index 3dae33bafb..69757f9a93 100644
--- a/src/qt/test/rpcnestedtests.cpp
+++ b/src/qt/test/rpcnestedtests.cpp
@@ -6,7 +6,7 @@
#include "chainparams.h"
#include "consensus/validation.h"
-#include "main.h"
+#include "validation.h"
#include "rpc/register.h"
#include "rpc/server.h"
#include "rpcconsole.h"
diff --git a/src/qt/transactiondesc.cpp b/src/qt/transactiondesc.cpp
index 65144e7865..e7d03a6119 100644
--- a/src/qt/transactiondesc.cpp
+++ b/src/qt/transactiondesc.cpp
@@ -11,7 +11,7 @@
#include "base58.h"
#include "consensus/consensus.h"
-#include "main.h"
+#include "validation.h"
#include "script/script.h"
#include "timedata.h"
#include "util.h"
@@ -26,10 +26,10 @@ QString TransactionDesc::FormatTxStatus(const CWalletTx& wtx)
AssertLockHeld(cs_main);
if (!CheckFinalTx(wtx))
{
- if (wtx.nLockTime < LOCKTIME_THRESHOLD)
- return tr("Open for %n more block(s)", "", wtx.nLockTime - chainActive.Height());
+ if (wtx.tx->nLockTime < LOCKTIME_THRESHOLD)
+ return tr("Open for %n more block(s)", "", wtx.tx->nLockTime - chainActive.Height());
else
- return tr("Open until %1").arg(GUIUtil::dateTimeStr(wtx.nLockTime));
+ return tr("Open until %1").arg(GUIUtil::dateTimeStr(wtx.tx->nLockTime));
}
else
{
@@ -133,7 +133,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
// Coinbase
//
CAmount nUnmatured = 0;
- BOOST_FOREACH(const CTxOut& txout, wtx.vout)
+ BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
nUnmatured += wallet->GetCredit(txout, ISMINE_ALL);
strHTML += "<b>" + tr("Credit") + ":</b> ";
if (wtx.IsInMainChain())
@@ -152,14 +152,14 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
else
{
isminetype fAllFromMe = ISMINE_SPENDABLE;
- BOOST_FOREACH(const CTxIn& txin, wtx.vin)
+ BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
{
isminetype mine = wallet->IsMine(txin);
if(fAllFromMe > mine) fAllFromMe = mine;
}
isminetype fAllToMe = ISMINE_SPENDABLE;
- BOOST_FOREACH(const CTxOut& txout, wtx.vout)
+ BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
{
isminetype mine = wallet->IsMine(txout);
if(fAllToMe > mine) fAllToMe = mine;
@@ -173,7 +173,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
//
// Debit
//
- BOOST_FOREACH(const CTxOut& txout, wtx.vout)
+ BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
{
// Ignore change
isminetype toSelf = wallet->IsMine(txout);
@@ -212,7 +212,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
strHTML += "<b>" + tr("Total credit") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, nValue) + "<br>";
}
- CAmount nTxFee = nDebit - wtx.GetValueOut();
+ CAmount nTxFee = nDebit - wtx.tx->GetValueOut();
if (nTxFee > 0)
strHTML += "<b>" + tr("Transaction fee") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, -nTxFee) + "<br>";
}
@@ -221,10 +221,10 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
//
// Mixed debit transaction
//
- BOOST_FOREACH(const CTxIn& txin, wtx.vin)
+ BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
if (wallet->IsMine(txin))
strHTML += "<b>" + tr("Debit") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, -wallet->GetDebit(txin, ISMINE_ALL)) + "<br>";
- BOOST_FOREACH(const CTxOut& txout, wtx.vout)
+ BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
if (wallet->IsMine(txout))
strHTML += "<b>" + tr("Credit") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, wallet->GetCredit(txout, ISMINE_ALL)) + "<br>";
}
@@ -241,7 +241,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
strHTML += "<br><b>" + tr("Comment") + ":</b><br>" + GUIUtil::HtmlEscape(wtx.mapValue["comment"], true) + "<br>";
strHTML += "<b>" + tr("Transaction ID") + ":</b> " + rec->getTxID() + "<br>";
- strHTML += "<b>" + tr("Transaction total size") + ":</b> " + QString::number(wtx.GetTotalSize()) + " bytes<br>";
+ strHTML += "<b>" + tr("Transaction total size") + ":</b> " + QString::number(wtx.tx->GetTotalSize()) + " bytes<br>";
strHTML += "<b>" + tr("Output index") + ":</b> " + QString::number(rec->getOutputIndex()) + "<br>";
// Message from normal bitcoin:URI (bitcoin:123...?message=example)
@@ -276,20 +276,20 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
if (fDebug)
{
strHTML += "<hr><br>" + tr("Debug information") + "<br><br>";
- BOOST_FOREACH(const CTxIn& txin, wtx.vin)
+ BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
if(wallet->IsMine(txin))
strHTML += "<b>" + tr("Debit") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, -wallet->GetDebit(txin, ISMINE_ALL)) + "<br>";
- BOOST_FOREACH(const CTxOut& txout, wtx.vout)
+ BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
if(wallet->IsMine(txout))
strHTML += "<b>" + tr("Credit") + ":</b> " + BitcoinUnits::formatHtmlWithUnit(unit, wallet->GetCredit(txout, ISMINE_ALL)) + "<br>";
strHTML += "<br><b>" + tr("Transaction") + ":</b><br>";
- strHTML += GUIUtil::HtmlEscape(wtx.ToString(), true);
+ strHTML += GUIUtil::HtmlEscape(wtx.tx->ToString(), true);
strHTML += "<br><b>" + tr("Inputs") + ":</b>";
strHTML += "<ul>";
- BOOST_FOREACH(const CTxIn& txin, wtx.vin)
+ BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
{
COutPoint prevout = txin.prevout;
diff --git a/src/qt/transactionrecord.cpp b/src/qt/transactionrecord.cpp
index 4fe47181f6..a1d1422317 100644
--- a/src/qt/transactionrecord.cpp
+++ b/src/qt/transactionrecord.cpp
@@ -6,7 +6,7 @@
#include "base58.h"
#include "consensus/consensus.h"
-#include "main.h"
+#include "validation.h"
#include "timedata.h"
#include "wallet/wallet.h"
@@ -47,7 +47,7 @@ QList<TransactionRecord> TransactionRecord::decomposeTransaction(const CWallet *
//
// Credit
//
- BOOST_FOREACH(const CTxOut& txout, wtx.vout)
+ BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
{
isminetype mine = wallet->IsMine(txout);
if(mine)
@@ -83,7 +83,7 @@ QList<TransactionRecord> TransactionRecord::decomposeTransaction(const CWallet *
{
bool involvesWatchAddress = false;
isminetype fAllFromMe = ISMINE_SPENDABLE;
- BOOST_FOREACH(const CTxIn& txin, wtx.vin)
+ BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
{
isminetype mine = wallet->IsMine(txin);
if(mine & ISMINE_WATCH_ONLY) involvesWatchAddress = true;
@@ -91,7 +91,7 @@ QList<TransactionRecord> TransactionRecord::decomposeTransaction(const CWallet *
}
isminetype fAllToMe = ISMINE_SPENDABLE;
- BOOST_FOREACH(const CTxOut& txout, wtx.vout)
+ BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
{
isminetype mine = wallet->IsMine(txout);
if(mine & ISMINE_WATCH_ONLY) involvesWatchAddress = true;
@@ -112,11 +112,11 @@ QList<TransactionRecord> TransactionRecord::decomposeTransaction(const CWallet *
//
// Debit
//
- CAmount nTxFee = nDebit - wtx.GetValueOut();
+ CAmount nTxFee = nDebit - wtx.tx->GetValueOut();
- for (unsigned int nOut = 0; nOut < wtx.vout.size(); nOut++)
+ for (unsigned int nOut = 0; nOut < wtx.tx->vout.size(); nOut++)
{
- const CTxOut& txout = wtx.vout[nOut];
+ const CTxOut& txout = wtx.tx->vout[nOut];
TransactionRecord sub(hash, nTime);
sub.idx = parts.size();
sub.involvesWatchAddress = involvesWatchAddress;
@@ -190,15 +190,15 @@ void TransactionRecord::updateStatus(const CWalletTx &wtx)
if (!CheckFinalTx(wtx))
{
- if (wtx.nLockTime < LOCKTIME_THRESHOLD)
+ if (wtx.tx->nLockTime < LOCKTIME_THRESHOLD)
{
status.status = TransactionStatus::OpenUntilBlock;
- status.open_for = wtx.nLockTime - chainActive.Height();
+ status.open_for = wtx.tx->nLockTime - chainActive.Height();
}
else
{
status.status = TransactionStatus::OpenUntilDate;
- status.open_for = wtx.nLockTime;
+ status.open_for = wtx.tx->nLockTime;
}
}
// For generated transactions, determine maturity
diff --git a/src/qt/transactiontablemodel.cpp b/src/qt/transactiontablemodel.cpp
index 52261ff04b..da0742aa6a 100644
--- a/src/qt/transactiontablemodel.cpp
+++ b/src/qt/transactiontablemodel.cpp
@@ -14,7 +14,7 @@
#include "walletmodel.h"
#include "core_io.h"
-#include "main.h"
+#include "validation.h"
#include "sync.h"
#include "uint256.h"
#include "util.h"
diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp
index 856b16d2c4..f5cbde6238 100644
--- a/src/qt/transactionview.cpp
+++ b/src/qt/transactionview.cpp
@@ -37,7 +37,7 @@
TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *parent) :
QWidget(parent), model(0), transactionProxyModel(0),
- transactionView(0), abandonAction(0)
+ transactionView(0), abandonAction(0), columnResizingFixer(0)
{
// Build filter row
setContentsMargins(0,0,0,0);
@@ -147,7 +147,7 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa
QAction *editLabelAction = new QAction(tr("Edit label"), this);
QAction *showDetailsAction = new QAction(tr("Show transaction details"), this);
- contextMenu = new QMenu();
+ contextMenu = new QMenu(this);
contextMenu->addAction(copyAddressAction);
contextMenu->addAction(copyLabelAction);
contextMenu->addAction(copyAmountAction);
@@ -212,7 +212,7 @@ void TransactionView::setModel(WalletModel *_model)
transactionView->setColumnWidth(TransactionTableModel::Type, TYPE_COLUMN_WIDTH);
transactionView->setColumnWidth(TransactionTableModel::Amount, AMOUNT_MINIMUM_COLUMN_WIDTH);
- columnResizingFixer = new GUIUtil::TableViewLastColumnResizingFixer(transactionView, AMOUNT_MINIMUM_COLUMN_WIDTH, MINIMUM_COLUMN_WIDTH);
+ columnResizingFixer = new GUIUtil::TableViewLastColumnResizingFixer(transactionView, AMOUNT_MINIMUM_COLUMN_WIDTH, MINIMUM_COLUMN_WIDTH, this);
if (_model->getOptionsModel())
{
diff --git a/src/qt/utilitydialog.cpp b/src/qt/utilitydialog.cpp
index 947bcdb15a..4ec022881c 100644
--- a/src/qt/utilitydialog.cpp
+++ b/src/qt/utilitydialog.cpp
@@ -171,22 +171,20 @@ ShutdownWindow::ShutdownWindow(QWidget *parent, Qt::WindowFlags f):
setLayout(layout);
}
-void ShutdownWindow::showShutdownWindow(BitcoinGUI *window)
+QWidget *ShutdownWindow::showShutdownWindow(BitcoinGUI *window)
{
if (!window)
- return;
+ return nullptr;
// Show a simple window indicating shutdown status
QWidget *shutdownWindow = new ShutdownWindow();
- // We don't hold a direct pointer to the shutdown window after creation, so use
- // Qt::WA_DeleteOnClose to make sure that the window will be deleted eventually.
- shutdownWindow->setAttribute(Qt::WA_DeleteOnClose);
shutdownWindow->setWindowTitle(window->windowTitle());
// Center shutdown window at where main window was
const QPoint global = window->mapToGlobal(window->rect().center());
shutdownWindow->move(global.x() - shutdownWindow->width() / 2, global.y() - shutdownWindow->height() / 2);
shutdownWindow->show();
+ return shutdownWindow;
}
void ShutdownWindow::closeEvent(QCloseEvent *event)
diff --git a/src/qt/utilitydialog.h b/src/qt/utilitydialog.h
index 843bd7f67b..b930429578 100644
--- a/src/qt/utilitydialog.h
+++ b/src/qt/utilitydialog.h
@@ -43,7 +43,7 @@ class ShutdownWindow : public QWidget
public:
ShutdownWindow(QWidget *parent=0, Qt::WindowFlags f=0);
- static void showShutdownWindow(BitcoinGUI *window);
+ static QWidget *showShutdownWindow(BitcoinGUI *window);
protected:
void closeEvent(QCloseEvent *event);
diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp
index 3490d1c1cc..a78fc90d2c 100644
--- a/src/qt/walletmodel.cpp
+++ b/src/qt/walletmodel.cpp
@@ -14,9 +14,11 @@
#include "base58.h"
#include "keystore.h"
-#include "main.h"
+#include "validation.h"
+#include "net.h" // for g_connman
#include "sync.h"
#include "ui_interface.h"
+#include "util.h" // for GetBoolArg
#include "wallet/wallet.h"
#include "wallet/walletdb.h" // for BackupWallet
@@ -65,7 +67,7 @@ CAmount WalletModel::getBalance(const CCoinControl *coinControl) const
wallet->AvailableCoins(vCoins, true, coinControl);
BOOST_FOREACH(const COutput& out, vCoins)
if(out.fSpendable)
- nBalance += out.tx->vout[out.i].nValue;
+ nBalance += out.tx->tx->vout[out.i].nValue;
return nBalance;
}
@@ -607,7 +609,7 @@ void WalletModel::listCoins(std::map<QString, std::vector<COutput> >& mapCoins)
int nDepth = wallet->mapWallet[outpoint.hash].GetDepthInMainChain();
if (nDepth < 0) continue;
COutput out(&wallet->mapWallet[outpoint.hash], outpoint.n, nDepth, true, true);
- if (outpoint.n < out.tx->vout.size() && wallet->IsMine(out.tx->vout[outpoint.n]) == ISMINE_SPENDABLE)
+ if (outpoint.n < out.tx->tx->vout.size() && wallet->IsMine(out.tx->tx->vout[outpoint.n]) == ISMINE_SPENDABLE)
vCoins.push_back(out);
}
@@ -615,14 +617,14 @@ void WalletModel::listCoins(std::map<QString, std::vector<COutput> >& mapCoins)
{
COutput cout = out;
- while (wallet->IsChange(cout.tx->vout[cout.i]) && cout.tx->vin.size() > 0 && wallet->IsMine(cout.tx->vin[0]))
+ while (wallet->IsChange(cout.tx->tx->vout[cout.i]) && cout.tx->tx->vin.size() > 0 && wallet->IsMine(cout.tx->tx->vin[0]))
{
- if (!wallet->mapWallet.count(cout.tx->vin[0].prevout.hash)) break;
- cout = COutput(&wallet->mapWallet[cout.tx->vin[0].prevout.hash], cout.tx->vin[0].prevout.n, 0, true, true);
+ if (!wallet->mapWallet.count(cout.tx->tx->vin[0].prevout.hash)) break;
+ cout = COutput(&wallet->mapWallet[cout.tx->tx->vin[0].prevout.hash], cout.tx->tx->vin[0].prevout.n, 0, true, true);
}
CTxDestination address;
- if(!out.fSpendable || !ExtractDestination(cout.tx->vout[cout.i].scriptPubKey, address))
+ if(!out.fSpendable || !ExtractDestination(cout.tx->tx->vout[cout.i].scriptPubKey, address))
continue;
mapCoins[QString::fromStdString(CBitcoinAddress(address).ToString())].push_back(out);
}
diff --git a/src/qt/walletmodeltransaction.cpp b/src/qt/walletmodeltransaction.cpp
index fdec6a1c86..f9a95bed42 100644
--- a/src/qt/walletmodeltransaction.cpp
+++ b/src/qt/walletmodeltransaction.cpp
@@ -64,7 +64,7 @@ void WalletModelTransaction::reassignAmounts(int nChangePosRet)
if (out.amount() <= 0) continue;
if (i == nChangePosRet)
i++;
- subtotal += walletTransaction->vout[i].nValue;
+ subtotal += walletTransaction->tx->vout[i].nValue;
i++;
}
rcp.amount = subtotal;
@@ -73,7 +73,7 @@ void WalletModelTransaction::reassignAmounts(int nChangePosRet)
{
if (i == nChangePosRet)
i++;
- rcp.amount = walletTransaction->vout[i].nValue;
+ rcp.amount = walletTransaction->tx->vout[i].nValue;
i++;
}
}
diff --git a/src/rest.cpp b/src/rest.cpp
index 90cca6f480..6379061f8f 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -7,7 +7,7 @@
#include "chainparams.h"
#include "primitives/block.h"
#include "primitives/transaction.h"
-#include "main.h"
+#include "validation.h"
#include "httpserver.h"
#include "rpc/server.h"
#include "streams.h"
@@ -363,7 +363,7 @@ static bool rest_tx(HTTPRequest* req, const std::string& strURIPart)
if (!ParseHashStr(hashStr, hash))
return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr);
- CTransaction tx;
+ CTransactionRef tx;
uint256 hashBlock = uint256();
if (!GetTransaction(hash, tx, Params().GetConsensus(), hashBlock, true))
return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found");
@@ -388,7 +388,7 @@ static bool rest_tx(HTTPRequest* req, const std::string& strURIPart)
case RF_JSON: {
UniValue objTx(UniValue::VOBJ);
- TxToJSON(tx, hashBlock, objTx);
+ TxToJSON(*tx, hashBlock, objTx);
string strJSON = objTx.write() + "\n";
req->WriteHeader("Content-Type", "application/json");
req->WriteReply(HTTP_OK, strJSON);
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 8caea14adb..e6d80f06a3 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -9,7 +9,7 @@
#include "checkpoints.h"
#include "coins.h"
#include "consensus/validation.h"
-#include "main.h"
+#include "validation.h"
#include "policy/policy.h"
#include "primitives/transaction.h"
#include "rpc/server.h"
@@ -119,16 +119,16 @@ UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool tx
result.push_back(Pair("versionHex", strprintf("%08x", block.nVersion)));
result.push_back(Pair("merkleroot", block.hashMerkleRoot.GetHex()));
UniValue txs(UniValue::VARR);
- BOOST_FOREACH(const CTransaction&tx, block.vtx)
+ for(const auto& tx : block.vtx)
{
if(txDetails)
{
UniValue objTx(UniValue::VOBJ);
- TxToJSON(tx, uint256(), objTx);
+ TxToJSON(*tx, uint256(), objTx);
txs.push_back(objTx);
}
else
- txs.push_back(tx.GetHash().GetHex());
+ txs.push_back(tx->GetHash().GetHex());
}
result.push_back(Pair("tx", txs));
result.push_back(Pair("time", block.GetBlockTime()));
@@ -194,12 +194,12 @@ UniValue waitfornewblock(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() > 1)
throw runtime_error(
- "waitfornewblock\n"
+ "waitfornewblock (timeout)\n"
"\nWaits for a specific new block and returns useful info about it.\n"
"\nReturns the current block on timeout or exit.\n"
"\nArguments:\n"
- "1. timeout (milliseconds) (int, optional, default=false)\n"
- "\nResult::\n"
+ "1. timeout (int, optional, default=0) Time in milliseconds to wait for a response. 0 indicates no timeout.\n"
+ "\nResult:\n"
"{ (json object)\n"
" \"hash\" : { (string) The blockhash\n"
" \"height\" : { (int) Block height\n"
@@ -232,13 +232,13 @@ UniValue waitforblock(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() < 1 || request.params.size() > 2)
throw runtime_error(
- "waitforblock\n"
+ "waitforblock <blockhash> (timeout)\n"
"\nWaits for a specific new block and returns useful info about it.\n"
"\nReturns the current block on timeout or exit.\n"
"\nArguments:\n"
"1. blockhash to wait for (string)\n"
- "2. timeout (milliseconds) (int, optional, default=false)\n"
- "\nResult::\n"
+ "2. timeout (int, optional, default=0) Time in milliseconds to wait for a response. 0 indicates no timeout.\n"
+ "\nResult:\n"
"{ (json object)\n"
" \"hash\" : { (string) The blockhash\n"
" \"height\" : { (int) Block height\n"
@@ -274,14 +274,14 @@ UniValue waitforblockheight(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() < 1 || request.params.size() > 2)
throw runtime_error(
- "waitforblock\n"
+ "waitforblockheight <blockheight> (timeout)\n"
"\nWaits for (at least) block height and returns the height and hash\n"
- "\nof the current tip.\n"
+ "of the current tip.\n"
"\nReturns the current block on timeout or exit.\n"
"\nArguments:\n"
"1. block height to wait for (int)\n"
- "2. timeout (milliseconds) (int, optional, default=false)\n"
- "\nResult::\n"
+ "2. timeout (int, optional, default=0) Time in milliseconds to wait for a response. 0 indicates no timeout.\n"
+ "\nResult:\n"
"{ (json object)\n"
" \"hash\" : { (string) The blockhash\n"
" \"height\" : { (int) Block height\n"
@@ -1319,7 +1319,7 @@ UniValue invalidateblock(const JSONRPCRequest& request)
}
if (state.IsValid()) {
- ActivateBestChain(state, Params(), NULL);
+ ActivateBestChain(state, Params());
}
if (!state.IsValid()) {
@@ -1357,7 +1357,7 @@ UniValue reconsiderblock(const JSONRPCRequest& request)
}
CValidationState state;
- ActivateBestChain(state, Params(), NULL);
+ ActivateBestChain(state, Params());
if (!state.IsValid()) {
throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason());
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 8824898feb..cb22dec342 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -12,7 +12,7 @@
#include "consensus/validation.h"
#include "core_io.h"
#include "init.h"
-#include "main.h"
+#include "validation.h"
#include "miner.h"
#include "net.h"
#include "pow.h"
@@ -131,9 +131,9 @@ UniValue generateBlocks(boost::shared_ptr<CReserveScript> coinbaseScript, int nG
if (pblock->nNonce == nInnerLoopCount) {
continue;
}
- CValidationState state;
- if (!ProcessNewBlock(state, Params(), NULL, pblock, true, NULL, false))
- throw JSONRPCError(RPC_INTERNAL_ERROR, strprintf("ProcessNewBlock: block not accepted: %s", FormatStateMessage(state)));
+ std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock);
+ if (!ProcessNewBlock(Params(), shared_pblock, true, NULL, NULL))
+ throw JSONRPCError(RPC_INTERNAL_ERROR, "ProcessNewBlock, block not accepted");
++nHeight;
blockHashes.push_back(pblock->GetHash().GetHex());
@@ -558,7 +558,8 @@ UniValue getblocktemplate(const JSONRPCRequest& request)
UniValue transactions(UniValue::VARR);
map<uint256, int64_t> setTxIndex;
int i = 0;
- BOOST_FOREACH (CTransaction& tx, pblock->vtx) {
+ for (const auto& it : pblock->vtx) {
+ const CTransaction& tx = *it;
uint256 txHash = tx.GetHash();
setTxIndex[txHash] = i++;
@@ -663,7 +664,7 @@ UniValue getblocktemplate(const JSONRPCRequest& request)
result.push_back(Pair("previousblockhash", pblock->hashPrevBlock.GetHex()));
result.push_back(Pair("transactions", transactions));
result.push_back(Pair("coinbaseaux", aux));
- result.push_back(Pair("coinbasevalue", (int64_t)pblock->vtx[0].vout[0].nValue));
+ result.push_back(Pair("coinbasevalue", (int64_t)pblock->vtx[0]->vout[0].nValue));
result.push_back(Pair("longpollid", chainActive.Tip()->GetBlockHash().GetHex() + i64tostr(nTransactionsUpdatedLast)));
result.push_back(Pair("target", hashTarget.GetHex()));
result.push_back(Pair("mintime", (int64_t)pindexPrev->GetMedianTimePast()+1));
@@ -680,7 +681,9 @@ UniValue getblocktemplate(const JSONRPCRequest& request)
result.push_back(Pair("curtime", pblock->GetBlockTime()));
result.push_back(Pair("bits", strprintf("%08x", pblock->nBits)));
result.push_back(Pair("height", (int64_t)(pindexPrev->nHeight+1)));
- if (!pblocktemplate->vchCoinbaseCommitment.empty()) {
+
+ const struct BIP9DeploymentInfo& segwit_info = VersionBitsDeploymentInfo[Consensus::DEPLOYMENT_SEGWIT];
+ if (!pblocktemplate->vchCoinbaseCommitment.empty() && setClientRules.find(segwit_info.name) != setClientRules.end()) {
result.push_back(Pair("default_witness_commitment", HexStr(pblocktemplate->vchCoinbaseCommitment.begin(), pblocktemplate->vchCoinbaseCommitment.end())));
}
@@ -726,7 +729,8 @@ UniValue submitblock(const JSONRPCRequest& request)
+ HelpExampleRpc("submitblock", "\"mydata\"")
);
- CBlock block;
+ std::shared_ptr<CBlock> blockptr = std::make_shared<CBlock>();
+ CBlock& block = *blockptr;
if (!DecodeHexBlk(block, request.params[0].get_str()))
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed");
@@ -754,10 +758,9 @@ UniValue submitblock(const JSONRPCRequest& request)
}
}
- CValidationState state;
submitblock_StateCatcher sc(block.GetHash());
RegisterValidationInterface(&sc);
- bool fAccepted = ProcessNewBlock(state, Params(), NULL, &block, true, NULL, false);
+ bool fAccepted = ProcessNewBlock(Params(), blockptr, true, NULL, NULL);
UnregisterValidationInterface(&sc);
if (fBlockPresent)
{
@@ -765,13 +768,9 @@ UniValue submitblock(const JSONRPCRequest& request)
return "duplicate-inconclusive";
return "duplicate";
}
- if (fAccepted)
- {
- if (!sc.found)
- return "inconclusive";
- state = sc.state;
- }
- return BIP22ValidationResult(state);
+ if (!sc.found)
+ return "inconclusive";
+ return BIP22ValidationResult(sc.state);
}
UniValue estimatefee(const JSONRPCRequest& request)
@@ -788,6 +787,8 @@ UniValue estimatefee(const JSONRPCRequest& request)
"\n"
"A negative value is returned if not enough transactions and blocks\n"
"have been observed to make an estimate.\n"
+ "-1 is always returned for nblocks == 1 as it is impossible to calculate\n"
+ "a fee that is high enough to get reliably included in the next block.\n"
"\nExample:\n"
+ HelpExampleCli("estimatefee", "6")
);
diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp
index 3193985803..2aaee7f3f5 100644
--- a/src/rpc/misc.cpp
+++ b/src/rpc/misc.cpp
@@ -6,7 +6,7 @@
#include "base58.h"
#include "clientversion.h"
#include "init.h"
-#include "main.h"
+#include "validation.h"
#include "net.h"
#include "netbase.h"
#include "rpc/server.h"
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index f57ba76d3a..53c0f993dc 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -6,8 +6,9 @@
#include "chainparams.h"
#include "clientversion.h"
-#include "main.h"
+#include "validation.h"
#include "net.h"
+#include "net_processing.h"
#include "netbase.h"
#include "protocol.h"
#include "sync.h"
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 0656a61755..48769a5335 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -10,7 +10,7 @@
#include "core_io.h"
#include "init.h"
#include "keystore.h"
-#include "main.h"
+#include "validation.h"
#include "merkleblock.h"
#include "net.h"
#include "policy/policy.h"
@@ -135,17 +135,17 @@ UniValue getrawtransaction(const JSONRPCRequest& request)
"or there is an unspent output in the utxo for this transaction. To make it always work,\n"
"you need to maintain a transaction index, using the -txindex command line option.\n"
"\nReturn the raw transaction data.\n"
- "\nIf verbose=0, returns a string that is serialized, hex-encoded data for 'txid'.\n"
- "If verbose is non-zero, returns an Object with information about 'txid'.\n"
+ "\nIf verbose is 'true', returns an Object with information about 'txid'.\n"
+ "If verbose is 'false' or omitted, returns a string that is serialized, hex-encoded data for 'txid'.\n"
"\nArguments:\n"
"1. \"txid\" (string, required) The transaction id\n"
- "2. verbose (numeric, optional, default=0) If 0, return a string, other return a json object\n"
+ "2. verbose (bool, optional, default=false) If true, return a string, other return a json object\n"
- "\nResult (if verbose is not set or set to 0):\n"
+ "\nResult (if verbose is not set or set to false):\n"
"\"data\" (string) The serialized, hex-encoded data for 'txid'\n"
- "\nResult (if verbose > 0):\n"
+ "\nResult (if verbose is set to true):\n"
"{\n"
" \"hex\" : \"data\", (string) The serialized, hex-encoded data for 'txid'\n"
" \"txid\" : \"id\", (string) The transaction id (same as provided)\n"
@@ -192,31 +192,45 @@ UniValue getrawtransaction(const JSONRPCRequest& request)
"\nExamples:\n"
+ HelpExampleCli("getrawtransaction", "\"mytxid\"")
- + HelpExampleCli("getrawtransaction", "\"mytxid\" 1")
- + HelpExampleRpc("getrawtransaction", "\"mytxid\", 1")
+ + HelpExampleCli("getrawtransaction", "\"mytxid\" true")
+ + HelpExampleRpc("getrawtransaction", "\"mytxid\", true")
);
LOCK(cs_main);
uint256 hash = ParseHashV(request.params[0], "parameter 1");
+ // Accept either a bool (true) or a num (>=1) to indicate verbose output.
bool fVerbose = false;
- if (request.params.size() > 1)
- fVerbose = (request.params[1].get_int() != 0);
+ if (request.params.size() > 1) {
+ if (request.params[1].isNum()) {
+ if (request.params[1].get_int() != 0) {
+ fVerbose = true;
+ }
+ }
+ else if(request.params[1].isBool()) {
+ if(request.params[1].isTrue()) {
+ fVerbose = true;
+ }
+ }
+ else {
+ throw JSONRPCError(RPC_TYPE_ERROR, "Invalid type provided. Verbose parameter must be a boolean.");
+ }
+ }
- CTransaction tx;
+ CTransactionRef tx;
uint256 hashBlock;
if (!GetTransaction(hash, tx, Params().GetConsensus(), hashBlock, true))
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "No information available about transaction");
- string strHex = EncodeHexTx(tx);
+ string strHex = EncodeHexTx(*tx);
if (!fVerbose)
return strHex;
UniValue result(UniValue::VOBJ);
result.push_back(Pair("hex", strHex));
- TxToJSON(tx, hashBlock, result);
+ TxToJSON(*tx, hashBlock, result);
return result;
}
@@ -275,7 +289,7 @@ UniValue gettxoutproof(const JSONRPCRequest& request)
if (pblockindex == NULL)
{
- CTransaction tx;
+ CTransactionRef tx;
if (!GetTransaction(oneTxid, tx, Params().GetConsensus(), hashBlock, false) || hashBlock.IsNull())
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not yet in block");
if (!mapBlockIndex.count(hashBlock))
@@ -288,8 +302,8 @@ UniValue gettxoutproof(const JSONRPCRequest& request)
throw JSONRPCError(RPC_INTERNAL_ERROR, "Can't read block from disk");
unsigned int ntxFound = 0;
- BOOST_FOREACH(const CTransaction&tx, block.vtx)
- if (setTxids.count(tx.GetHash()))
+ for (const auto& tx : block.vtx)
+ if (setTxids.count(tx->GetHash()))
ntxFound++;
if (ntxFound != setTxids.size())
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "(Not all) transactions not found in specified block");
@@ -506,13 +520,13 @@ UniValue decoderawtransaction(const JSONRPCRequest& request)
LOCK(cs_main);
RPCTypeCheck(request.params, boost::assign::list_of(UniValue::VSTR));
- CTransaction tx;
+ CMutableTransaction mtx;
- if (!DecodeHexTx(tx, request.params[0].get_str(), true))
+ if (!DecodeHexTx(mtx, request.params[0].get_str(), true))
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
UniValue result(UniValue::VOBJ);
- TxToJSON(tx, uint256(), result);
+ TxToJSON(CTransaction(std::move(mtx)), uint256(), result);
return result;
}
@@ -869,9 +883,10 @@ UniValue sendrawtransaction(const JSONRPCRequest& request)
RPCTypeCheck(request.params, boost::assign::list_of(UniValue::VSTR)(UniValue::VBOOL));
// parse hex string from parameter
- CTransaction tx;
- if (!DecodeHexTx(tx, request.params[0].get_str()))
+ CMutableTransaction mtx;
+ if (!DecodeHexTx(mtx, request.params[0].get_str()))
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
+ CTransaction tx(std::move(mtx));
uint256 hashTx = tx.GetHash();
bool fLimitFree = false;
diff --git a/src/scheduler.cpp b/src/scheduler.cpp
index 52777b61f9..27c03f154c 100644
--- a/src/scheduler.cpp
+++ b/src/scheduler.cpp
@@ -54,9 +54,10 @@ void CScheduler::serviceQueue()
#else
// Some boost versions have a conflicting overload of wait_until that returns void.
// Explicitly use a template here to avoid hitting that overload.
- while (!shouldStop() && !taskQueue.empty() &&
- newTaskScheduled.wait_until<>(lock, taskQueue.begin()->first) != boost::cv_status::timeout) {
- // Keep waiting until timeout
+ while (!shouldStop() && !taskQueue.empty()) {
+ boost::chrono::system_clock::time_point timeToWaitFor = taskQueue.begin()->first;
+ if (newTaskScheduled.wait_until<>(lock, timeToWaitFor) == boost::cv_status::timeout)
+ break; // Exit loop after timeout, it means we reached the time of the event
}
#endif
// If there are multiple threads, the queue can empty while we're waiting (another
diff --git a/src/script/bitcoinconsensus.cpp b/src/script/bitcoinconsensus.cpp
index 069ac55bfb..036d6ca7bf 100644
--- a/src/script/bitcoinconsensus.cpp
+++ b/src/script/bitcoinconsensus.cpp
@@ -85,8 +85,7 @@ static int verify_script(const unsigned char *scriptPubKey, unsigned int scriptP
}
try {
TxInputStream stream(SER_NETWORK, PROTOCOL_VERSION, txTo, txToLen);
- CTransaction tx;
- stream >> tx;
+ CTransaction tx(deserialize, stream);
if (nIn >= tx.vin.size())
return set_error(err, bitcoinconsensus_ERR_TX_INDEX);
if (GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) != txToLen)
diff --git a/src/serialize.h b/src/serialize.h
index 91864e1b64..e28ca548c0 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -13,6 +13,7 @@
#include <ios>
#include <limits>
#include <map>
+#include <memory>
#include <set>
#include <stdint.h>
#include <string>
@@ -25,6 +26,20 @@
static const unsigned int MAX_SIZE = 0x02000000;
/**
+ * Dummy data type to identify deserializing constructors.
+ *
+ * By convention, a constructor of a type T with signature
+ *
+ * template <typename Stream> T::T(deserialize_type, Stream& s)
+ *
+ * is a deserializing constructor, which builds the type by
+ * deserializing it from s. If T contains const fields, this
+ * is likely the only way to do so.
+ */
+struct deserialize_type {};
+constexpr deserialize_type deserialize {};
+
+/**
* Used to bypass the rule against non-const reference to temporary
* where it makes sense with wrappers such as CFlatData or CTxDB
*/
@@ -521,7 +536,17 @@ template<typename Stream, typename K, typename T, typename Pred, typename A> voi
template<typename Stream, typename K, typename Pred, typename A> void Serialize(Stream& os, const std::set<K, Pred, A>& m);
template<typename Stream, typename K, typename Pred, typename A> void Unserialize(Stream& is, std::set<K, Pred, A>& m);
+/**
+ * shared_ptr
+ */
+template<typename Stream, typename T> void Serialize(Stream& os, const std::shared_ptr<const T>& p);
+template<typename Stream, typename T> void Unserialize(Stream& os, std::shared_ptr<const T>& p);
+/**
+ * unique_ptr
+ */
+template<typename Stream, typename T> void Serialize(Stream& os, const std::unique_ptr<const T>& p);
+template<typename Stream, typename T> void Unserialize(Stream& os, std::unique_ptr<const T>& p);
@@ -776,6 +801,40 @@ void Unserialize(Stream& is, std::set<K, Pred, A>& m)
/**
+ * unique_ptr
+ */
+template<typename Stream, typename T> void
+Serialize(Stream& os, const std::unique_ptr<const T>& p)
+{
+ Serialize(os, *p);
+}
+
+template<typename Stream, typename T>
+void Unserialize(Stream& is, std::unique_ptr<const T>& p)
+{
+ p.reset(new T(deserialize, is));
+}
+
+
+
+/**
+ * shared_ptr
+ */
+template<typename Stream, typename T> void
+Serialize(Stream& os, const std::shared_ptr<const T>& p)
+{
+ Serialize(os, *p);
+}
+
+template<typename Stream, typename T>
+void Unserialize(Stream& is, std::shared_ptr<const T>& p)
+{
+ p = std::make_shared<const T>(deserialize, is);
+}
+
+
+
+/**
* Support for ADD_SERIALIZE_METHODS and READWRITE macro
*/
struct CSerActionSerialize
diff --git a/src/streams.h b/src/streams.h
index c3e7c9e9e4..b508784238 100644
--- a/src/streams.h
+++ b/src/streams.h
@@ -69,6 +69,75 @@ OverrideStream<S> WithOrVersion(S* s, int nVersionFlag)
return OverrideStream<S>(s, s->GetType(), s->GetVersion() | nVersionFlag);
}
+/* Minimal stream for overwriting and/or appending to an existing byte vector
+ *
+ * The referenced vector will grow as necessary
+ */
+class CVectorWriter
+{
+ public:
+
+/*
+ * @param[in] nTypeIn Serialization Type
+ * @param[in] nVersionIn Serialization Version (including any flags)
+ * @param[in] vchDataIn Referenced byte vector to overwrite/append
+ * @param[in] nPosIn Starting position. Vector index where writes should start. The vector will initially
+ * grow as necessary to max(index, vec.size()). So to append, use vec.size().
+*/
+ CVectorWriter(int nTypeIn, int nVersionIn, std::vector<unsigned char>& vchDataIn, size_t nPosIn) : nType(nTypeIn), nVersion(nVersionIn), vchData(vchDataIn), nPos(nPosIn)
+ {
+ if(nPos > vchData.size())
+ vchData.resize(nPos);
+ }
+/*
+ * (other params same as above)
+ * @param[in] args A list of items to serialize starting at nPos.
+*/
+ template <typename... Args>
+ CVectorWriter(int nTypeIn, int nVersionIn, std::vector<unsigned char>& vchDataIn, size_t nPosIn, Args&&... args) : CVectorWriter(nTypeIn, nVersionIn, vchDataIn, nPosIn)
+ {
+ ::SerializeMany(*this, std::forward<Args>(args)...);
+ }
+ void write(const char* pch, size_t nSize)
+ {
+ assert(nPos <= vchData.size());
+ size_t nOverwrite = std::min(nSize, vchData.size() - nPos);
+ if (nOverwrite) {
+ memcpy(vchData.data() + nPos, reinterpret_cast<const unsigned char*>(pch), nOverwrite);
+ }
+ if (nOverwrite < nSize) {
+ vchData.insert(vchData.end(), reinterpret_cast<const unsigned char*>(pch) + nOverwrite, reinterpret_cast<const unsigned char*>(pch) + nSize);
+ }
+ nPos += nSize;
+ }
+ template<typename T>
+ CVectorWriter& operator<<(const T& obj)
+ {
+ // Serialize to this stream
+ ::Serialize(*this, obj);
+ return (*this);
+ }
+ int GetVersion() const
+ {
+ return nVersion;
+ }
+ int GetType() const
+ {
+ return nType;
+ }
+ void seek(size_t nSize)
+ {
+ nPos += nSize;
+ if(nPos > vchData.size())
+ vchData.resize(nPos);
+ }
+private:
+ const int nType;
+ const int nVersion;
+ std::vector<unsigned char>& vchData;
+ size_t nPos;
+};
+
/** Double ended buffer combining vector and stream-like interfaces.
*
* >> and << read and write unformatted data using the above serialization templates.
diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h
index 3403415436..f5212bc266 100644
--- a/src/support/lockedpool.h
+++ b/src/support/lockedpool.h
@@ -109,7 +109,7 @@ private:
* An arena manages a contiguous region of memory. The pool starts out with one arena
* but can grow to multiple arenas if the need arises.
*
- * Unlike a normal C heap, the administrative structures are seperate from the managed
+ * Unlike a normal C heap, the administrative structures are separate from the managed
* memory. This has been done as the sizes and bases of objects are not in themselves sensitive
* information, as to conserve precious locked memory. In some operating systems
* the amount of memory that can be locked is small.
diff --git a/src/test/DoS_tests.cpp b/src/test/DoS_tests.cpp
index 6eed636080..a8c3c4ebb9 100644
--- a/src/test/DoS_tests.cpp
+++ b/src/test/DoS_tests.cpp
@@ -6,8 +6,8 @@
#include "chainparams.h"
#include "keystore.h"
-#include "main.h"
#include "net.h"
+#include "net_processing.h"
#include "pow.h"
#include "script/sign.h"
#include "serialize.h"
@@ -29,6 +29,7 @@ extern unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans);
struct COrphanTx {
CTransaction tx;
NodeId fromPeer;
+ int64_t nTimeExpire;
};
extern std::map<uint256, COrphanTx> mapOrphanTransactions;
extern std::map<uint256, std::set<uint256> > mapOrphanTransactionsByPrev;
diff --git a/src/test/bitcoin-util-test.py b/src/test/bitcoin-util-test.py
index 4301b93b7c..1c090b3f3f 100755
--- a/src/test/bitcoin-util-test.py
+++ b/src/test/bitcoin-util-test.py
@@ -15,7 +15,7 @@ help_text="""Test framework for bitcoin utils.
Runs automatically during `make check`.
-Can also be run manually from the src directory by specifiying the source directory:
+Can also be run manually from the src directory by specifying the source directory:
test/bitcoin-util-test.py --srcdir='srcdir' [--verbose]
"""
diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp
index 0ed5d62ef6..b013cda6d7 100644
--- a/src/test/blockencodings_tests.cpp
+++ b/src/test/blockencodings_tests.cpp
@@ -26,21 +26,21 @@ static CBlock BuildBlockTestCase() {
tx.vout[0].nValue = 42;
block.vtx.resize(3);
- block.vtx[0] = tx;
+ block.vtx[0] = MakeTransactionRef(tx);
block.nVersion = 42;
block.hashPrevBlock = GetRandHash();
block.nBits = 0x207fffff;
tx.vin[0].prevout.hash = GetRandHash();
tx.vin[0].prevout.n = 0;
- block.vtx[1] = tx;
+ block.vtx[1] = MakeTransactionRef(tx);
tx.vin.resize(10);
for (size_t i = 0; i < tx.vin.size(); i++) {
tx.vin[i].prevout.hash = GetRandHash();
tx.vin[i].prevout.n = 0;
}
- block.vtx[2] = tx;
+ block.vtx[2] = MakeTransactionRef(tx);
bool mutated;
block.hashMerkleRoot = BlockMerkleRoot(block, &mutated);
@@ -59,8 +59,8 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest)
TestMemPoolEntryHelper entry;
CBlock block(BuildBlockTestCase());
- pool.addUnchecked(block.vtx[2].GetHash(), entry.FromTx(block.vtx[2]));
- BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0);
+ pool.addUnchecked(block.vtx[2]->GetHash(), entry.FromTx(*block.vtx[2]));
+ BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0);
// Do a simple ShortTxIDs RT
{
@@ -78,14 +78,14 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest)
BOOST_CHECK(!partialBlock.IsTxAvailable(1));
BOOST_CHECK( partialBlock.IsTxAvailable(2));
- BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1);
+ BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1);
- std::vector<std::shared_ptr<const CTransaction>> removed;
- pool.removeRecursive(block.vtx[2], &removed);
+ std::vector<CTransactionRef> removed;
+ pool.removeRecursive(*block.vtx[2], &removed);
BOOST_CHECK_EQUAL(removed.size(), 1);
CBlock block2;
- std::vector<CTransaction> vtx_missing;
+ std::vector<CTransactionRef> vtx_missing;
BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_INVALID); // No transactions
vtx_missing.push_back(block.vtx[2]); // Wrong transaction
@@ -152,8 +152,10 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest)
TestMemPoolEntryHelper entry;
CBlock block(BuildBlockTestCase());
- pool.addUnchecked(block.vtx[2].GetHash(), entry.FromTx(block.vtx[2]));
- BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0);
+ pool.addUnchecked(block.vtx[2]->GetHash(), entry.FromTx(*block.vtx[2]));
+ BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0);
+
+ uint256 txhash;
// Test with pre-forwarding tx 1, but not coinbase
{
@@ -161,8 +163,8 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest)
shortIDs.prefilledtxn.resize(1);
shortIDs.prefilledtxn[0] = {1, block.vtx[1]};
shortIDs.shorttxids.resize(2);
- shortIDs.shorttxids[0] = shortIDs.GetShortID(block.vtx[0].GetHash());
- shortIDs.shorttxids[1] = shortIDs.GetShortID(block.vtx[2].GetHash());
+ shortIDs.shorttxids[0] = shortIDs.GetShortID(block.vtx[0]->GetHash());
+ shortIDs.shorttxids[1] = shortIDs.GetShortID(block.vtx[2]->GetHash());
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << shortIDs;
@@ -176,10 +178,10 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest)
BOOST_CHECK( partialBlock.IsTxAvailable(1));
BOOST_CHECK( partialBlock.IsTxAvailable(2));
- BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1);
+ BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1);
CBlock block2;
- std::vector<CTransaction> vtx_missing;
+ std::vector<CTransactionRef> vtx_missing;
BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_INVALID); // No transactions
vtx_missing.push_back(block.vtx[1]); // Wrong transaction
@@ -194,9 +196,13 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest)
BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString());
BOOST_CHECK(!mutated);
- BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1);
+ txhash = block.vtx[2]->GetHash();
+ block.vtx.clear();
+ block2.vtx.clear();
+ block3.vtx.clear();
+ BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1);
}
- BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0);
+ BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0);
}
BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest)
@@ -205,8 +211,10 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest)
TestMemPoolEntryHelper entry;
CBlock block(BuildBlockTestCase());
- pool.addUnchecked(block.vtx[1].GetHash(), entry.FromTx(block.vtx[1]));
- BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[1].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0);
+ pool.addUnchecked(block.vtx[1]->GetHash(), entry.FromTx(*block.vtx[1]));
+ BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[1]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0);
+
+ uint256 txhash;
// Test with pre-forwarding coinbase + tx 2 with tx 1 in mempool
{
@@ -215,7 +223,7 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest)
shortIDs.prefilledtxn[0] = {0, block.vtx[0]};
shortIDs.prefilledtxn[1] = {1, block.vtx[2]}; // id == 1 as it is 1 after index 1
shortIDs.shorttxids.resize(1);
- shortIDs.shorttxids[0] = shortIDs.GetShortID(block.vtx[1].GetHash());
+ shortIDs.shorttxids[0] = shortIDs.GetShortID(block.vtx[1]->GetHash());
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << shortIDs;
@@ -229,19 +237,22 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest)
BOOST_CHECK( partialBlock.IsTxAvailable(1));
BOOST_CHECK( partialBlock.IsTxAvailable(2));
- BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[1].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1);
+ BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[1]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1);
CBlock block2;
- std::vector<CTransaction> vtx_missing;
+ std::vector<CTransactionRef> vtx_missing;
BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_OK);
BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString());
bool mutated;
BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString());
BOOST_CHECK(!mutated);
- BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[1].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1);
+ txhash = block.vtx[1]->GetHash();
+ block.vtx.clear();
+ block2.vtx.clear();
+ BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1);
}
- BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[1].GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0);
+ BOOST_CHECK_EQUAL(pool.mapTx.find(txhash)->GetSharedTx().use_count(), SHARED_TX_OFFSET + 0);
}
BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest)
@@ -255,7 +266,7 @@ BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest)
CBlock block;
block.vtx.resize(1);
- block.vtx[0] = coinbase;
+ block.vtx[0] = MakeTransactionRef(std::move(coinbase));
block.nVersion = 42;
block.hashPrevBlock = GetRandHash();
block.nBits = 0x207fffff;
@@ -280,7 +291,7 @@ BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest)
BOOST_CHECK(partialBlock.IsTxAvailable(0));
CBlock block2;
- std::vector<CTransaction> vtx_missing;
+ std::vector<CTransactionRef> vtx_missing;
BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_OK);
BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString());
BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString());
diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp
index 25fb9ea2b7..b15ff9e44b 100644
--- a/src/test/bloom_tests.cpp
+++ b/src/test/bloom_tests.cpp
@@ -30,15 +30,15 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize)
CBloomFilter filter(3, 0.01, 0, BLOOM_UPDATE_ALL);
filter.insert(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8"));
- BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "BloomFilter doesn't contain just-inserted object!");
+ BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter doesn't contain just-inserted object!");
// One bit different in first byte
- BOOST_CHECK_MESSAGE(!filter.contains(ParseHex("19108ad8ed9bb6274d3980bab5a85c048f0950c8")), "BloomFilter contains something it shouldn't!");
+ BOOST_CHECK_MESSAGE(!filter.contains(ParseHex("19108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter contains something it shouldn't!");
filter.insert(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee"));
- BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")), "BloomFilter doesn't contain just-inserted object (2)!");
+ BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")), "Bloom filter doesn't contain just-inserted object (2)!");
filter.insert(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5"));
- BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "BloomFilter doesn't contain just-inserted object (3)!");
+ BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "Bloom filter doesn't contain just-inserted object (3)!");
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << filter;
@@ -51,9 +51,9 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize)
BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(), expected.begin(), expected.end());
- BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "BloomFilter doesn't contain just-inserted object!");
+ BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter doesn't contain just-inserted object!");
filter.clear();
- BOOST_CHECK_MESSAGE( !filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "BloomFilter should be empty!");
+ BOOST_CHECK_MESSAGE( !filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter should be empty!");
}
BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize_with_tweak)
@@ -62,15 +62,15 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize_with_tweak)
CBloomFilter filter(3, 0.01, 2147483649UL, BLOOM_UPDATE_ALL);
filter.insert(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8"));
- BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "BloomFilter doesn't contain just-inserted object!");
+ BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter doesn't contain just-inserted object!");
// One bit different in first byte
- BOOST_CHECK_MESSAGE(!filter.contains(ParseHex("19108ad8ed9bb6274d3980bab5a85c048f0950c8")), "BloomFilter contains something it shouldn't!");
+ BOOST_CHECK_MESSAGE(!filter.contains(ParseHex("19108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter contains something it shouldn't!");
filter.insert(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee"));
- BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")), "BloomFilter doesn't contain just-inserted object (2)!");
+ BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")), "Bloom filter doesn't contain just-inserted object (2)!");
filter.insert(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5"));
- BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "BloomFilter doesn't contain just-inserted object (3)!");
+ BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "Bloom filter doesn't contain just-inserted object (3)!");
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << filter;
@@ -114,16 +114,14 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_key)
BOOST_AUTO_TEST_CASE(bloom_match)
{
// Random real transaction (b4749f017444b051c44dfd2720e88f314ff94f3dd6d56d40ef65854fcd7fff6b)
- CTransaction tx;
CDataStream stream(ParseHex("01000000010b26e9b7735eb6aabdf358bab62f9816a21ba9ebdb719d5299e88607d722c190000000008b4830450220070aca44506c5cef3a16ed519d7c3c39f8aab192c4e1c90d065f37b8a4af6141022100a8e160b856c2d43d27d8fba71e5aef6405b8643ac4cb7cb3c462aced7f14711a0141046d11fee51b0e60666d5049a9101a72741df480b96ee26488a4d3466b95c9a40ac5eeef87e10a5cd336c19a84565f80fa6c547957b7700ff4dfbdefe76036c339ffffffff021bff3d11000000001976a91404943fdd508053c75000106d3bc6e2754dbcff1988ac2f15de00000000001976a914a266436d2965547608b9e15d9032a7b9d64fa43188ac00000000"), SER_DISK, CLIENT_VERSION);
- stream >> tx;
+ CTransaction tx(deserialize, stream);
// and one which spends it (e2769b09e784f32f62ef849763d4f45b98e07ba658647343b915ff832b110436)
unsigned char ch[] = {0x01, 0x00, 0x00, 0x00, 0x01, 0x6b, 0xff, 0x7f, 0xcd, 0x4f, 0x85, 0x65, 0xef, 0x40, 0x6d, 0xd5, 0xd6, 0x3d, 0x4f, 0xf9, 0x4f, 0x31, 0x8f, 0xe8, 0x20, 0x27, 0xfd, 0x4d, 0xc4, 0x51, 0xb0, 0x44, 0x74, 0x01, 0x9f, 0x74, 0xb4, 0x00, 0x00, 0x00, 0x00, 0x8c, 0x49, 0x30, 0x46, 0x02, 0x21, 0x00, 0xda, 0x0d, 0xc6, 0xae, 0xce, 0xfe, 0x1e, 0x06, 0xef, 0xdf, 0x05, 0x77, 0x37, 0x57, 0xde, 0xb1, 0x68, 0x82, 0x09, 0x30, 0xe3, 0xb0, 0xd0, 0x3f, 0x46, 0xf5, 0xfc, 0xf1, 0x50, 0xbf, 0x99, 0x0c, 0x02, 0x21, 0x00, 0xd2, 0x5b, 0x5c, 0x87, 0x04, 0x00, 0x76, 0xe4, 0xf2, 0x53, 0xf8, 0x26, 0x2e, 0x76, 0x3e, 0x2d, 0xd5, 0x1e, 0x7f, 0xf0, 0xbe, 0x15, 0x77, 0x27, 0xc4, 0xbc, 0x42, 0x80, 0x7f, 0x17, 0xbd, 0x39, 0x01, 0x41, 0x04, 0xe6, 0xc2, 0x6e, 0xf6, 0x7d, 0xc6, 0x10, 0xd2, 0xcd, 0x19, 0x24, 0x84, 0x78, 0x9a, 0x6c, 0xf9, 0xae, 0xa9, 0x93, 0x0b, 0x94, 0x4b, 0x7e, 0x2d, 0xb5, 0x34, 0x2b, 0x9d, 0x9e, 0x5b, 0x9f, 0xf7, 0x9a, 0xff, 0x9a, 0x2e, 0xe1, 0x97, 0x8d, 0xd7, 0xfd, 0x01, 0xdf, 0xc5, 0x22, 0xee, 0x02, 0x28, 0x3d, 0x3b, 0x06, 0xa9, 0xd0, 0x3a, 0xcf, 0x80, 0x96, 0x96, 0x8d, 0x7d, 0xbb, 0x0f, 0x91, 0x78, 0xff, 0xff, 0xff, 0xff, 0x02, 0x8b, 0xa7, 0x94, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x19, 0x76, 0xa9, 0x14, 0xba, 0xde, 0xec, 0xfd, 0xef, 0x05, 0x07, 0x24, 0x7f, 0xc8, 0xf7, 0x42, 0x41, 0xd7, 0x3b, 0xc0, 0x39, 0x97, 0x2d, 0x7b, 0x88, 0xac, 0x40, 0x94, 0xa8, 0x02, 0x00, 0x00, 0x00, 0x00, 0x19, 0x76, 0xa9, 0x14, 0xc1, 0x09, 0x32, 0x48, 0x3f, 0xec, 0x93, 0xed, 0x51, 0xf5, 0xfe, 0x95, 0xe7, 0x25, 0x59, 0xf2, 0xcc, 0x70, 0x43, 0xf9, 0x88, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00};
vector<unsigned char> vch(ch, ch + sizeof(ch) -1);
CDataStream spendStream(vch, SER_DISK, CLIENT_VERSION);
- CTransaction spendingTx;
- spendStream >> spendingTx;
+ CTransaction spendingTx(deserialize, spendStream);
CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL);
filter.insert(uint256S("0xb4749f017444b051c44dfd2720e88f314ff94f3dd6d56d40ef65854fcd7fff6b"));
diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp
index 82de302053..79dea00e46 100644
--- a/src/test/coins_tests.cpp
+++ b/src/test/coins_tests.cpp
@@ -8,7 +8,7 @@
#include "utilstrencodings.h"
#include "test/test_bitcoin.h"
#include "test/test_random.h"
-#include "main.h"
+#include "validation.h"
#include "consensus/validation.h"
#include <vector>
diff --git a/src/test/data/bitcoin-util-test.json b/src/test/data/bitcoin-util-test.json
index de95044597..98be75919b 100644
--- a/src/test/data/bitcoin-util-test.json
+++ b/src/test/data/bitcoin-util-test.json
@@ -103,6 +103,16 @@
"description": "Creates a new transaction with a single empty output script (output in json)"
},
{ "exec": "./bitcoin-tx",
+ "args": ["01000000000100000000000000000000000000"],
+ "output_cmp": "txcreate2.hex",
+ "description": "Parses a transation with no inputs and a single output script"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json", "01000000000100000000000000000000000000"],
+ "output_cmp": "txcreate2.json",
+ "description": "Parses a transation with no inputs and a single output script (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
"args":
["-create",
"in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0",
diff --git a/src/test/data/tx_valid.json b/src/test/data/tx_valid.json
index 2f299aa5fe..27acb2f16a 100644
--- a/src/test/data/tx_valid.json
+++ b/src/test/data/tx_valid.json
@@ -53,7 +53,7 @@
["The following tests for the presence of a bug in the handling of SIGHASH_SINGLE"],
["It results in signing the constant 1, instead of something generated based on the transaction,"],
["when the input doing the signing has an index greater than the maximum output index"],
-[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "DUP HASH160 0x14 0xe52b482f2faa8ecbf0db344f93c84ac908557f33 EQUALVERIFY CHECKSIG"], ["0000000000000000000000000000000000000000000000000000000000000200", 0, "1"]],
+[[["0000000000000000000000000000000000000000000000000000000000000200", 0, "1"], ["0000000000000000000000000000000000000000000000000000000000000100", 0, "DUP HASH160 0x14 0xe52b482f2faa8ecbf0db344f93c84ac908557f33 EQUALVERIFY CHECKSIG"]],
"01000000020002000000000000000000000000000000000000000000000000000000000000000000000151ffffffff0001000000000000000000000000000000000000000000000000000000000000000000006b483045022100c9cdd08798a28af9d1baf44a6c77bcc7e279f47dc487c8c899911bc48feaffcc0220503c5c50ae3998a733263c5c0f7061b483e2b56c4c41b456e7d2f5a78a74c077032102d5c25adb51b61339d2b05315791e21bbe80ea470a49db0135720983c905aace0ffffffff010000000000000000015100000000", "P2SH"],
["An invalid P2SH Transaction"],
@@ -334,9 +334,9 @@
"0100000000010100010000000000000000000000000000000000000000000000000000000000000000000023220020ff25429251b5a84f452230a3c75fd886b7fc5a7865ce4a7bb7a9d7c5be6da3dbffffffff01e8030000000000001976a9144c9c3dfac4207d5d8cb89df5722cb3d712385e3f88ac02483045022100aa5d8aa40a90f23ce2c3d11bc845ca4a12acd99cbea37de6b9f6d86edebba8cb022022dedc2aa0a255f74d04c0b76ece2d7c691f9dd11a64a8ac49f62a99c3a05f9d01232103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ac00000000", "P2SH,WITNESS"],
["Witness with SigHash Single|AnyoneCanPay"],
-[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x51", 1100],
+[[["0000000000000000000000000000000000000000000000000000000000000100", 2, "0x51", 3100],
["0000000000000000000000000000000000000000000000000000000000000100", 1, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 2000],
-["0000000000000000000000000000000000000000000000000000000000000100", 2, "0x51", 3100],
+["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x51", 1100],
["0000000000000000000000000000000000000000000000000000000000000100", 3, "0x51", 4100]],
"0100000000010400010000000000000000000000000000000000000000000000000000000000000200000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000300000000ffffffff05540b0000000000000151d0070000000000000151840300000000000001513c0f00000000000001512c010000000000000151000248304502210092f4777a0f17bf5aeb8ae768dec5f2c14feabf9d1fe2c89c78dfed0f13fdb86902206da90a86042e252bcd1e80a168c719e4a1ddcc3cebea24b9812c5453c79107e9832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71000000000000", "P2SH,WITNESS"],
@@ -359,9 +359,9 @@
"0100000000010300010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000200000000ffffffff03e8030000000000000151d0070000000000000151b80b000000000000015100024730440220699e6b0cfe015b64ca3283e6551440a34f901ba62dd4c72fe1cb815afb2e6761022021cc5e84db498b1479de14efda49093219441adc6c543e5534979605e273d80b032103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000", "P2SH,WITNESS"],
["Witness with SigHash None|AnyoneCanPay"],
-[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x51", 1100],
+[[["0000000000000000000000000000000000000000000000000000000000000100", 2, "0x51", 3100],
+["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x51", 1100],
["0000000000000000000000000000000000000000000000000000000000000100", 1, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 2000],
-["0000000000000000000000000000000000000000000000000000000000000100", 2, "0x51", 3100],
["0000000000000000000000000000000000000000000000000000000000000100", 3, "0x51", 4100]],
"0100000000010400010000000000000000000000000000000000000000000000000000000000000200000000ffffffff00010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000300000000ffffffff04b60300000000000001519e070000000000000151860b00000000000001009600000000000000015100000248304502210091b32274295c2a3fa02f5bce92fb2789e3fc6ea947fbe1a76e52ea3f4ef2381a022079ad72aefa3837a2e0c033a8652a59731da05fa4a813f4fc48e87c075037256b822103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000", "P2SH,WITNESS"],
@@ -390,9 +390,9 @@
"01000000000103000100000000000000000000000000000000000000000000000000000000000000000000000200000000010000000000000000000000000000000000000000000000000000000000000100000000ffffffff000100000000000000000000000000000000000000000000000000000000000002000000000200000003e8030000000000000151d0070000000000000151b80b00000000000001510002473044022022fceb54f62f8feea77faac7083c3b56c4676a78f93745adc8a35800bc36adfa022026927df9abcf0a8777829bcfcce3ff0a385fa54c3f9df577405e3ef24ee56479022103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000", "P2SH,WITNESS"],
["Witness with SigHash All|AnyoneCanPay"],
-[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x51", 1100],
+[[["0000000000000000000000000000000000000000000000000000000000000100", 2, "0x51", 3100],
+["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x51", 1100],
["0000000000000000000000000000000000000000000000000000000000000100", 1, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 2000],
-["0000000000000000000000000000000000000000000000000000000000000100", 2, "0x51", 3100],
["0000000000000000000000000000000000000000000000000000000000000100", 3, "0x51", 4100]],
"0100000000010400010000000000000000000000000000000000000000000000000000000000000200000000ffffffff00010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000300000000ffffffff03e8030000000000000151d0070000000000000151b80b0000000000000151000002483045022100a3cec69b52cba2d2de623eeef89e0ba1606184ea55476c0f8189fda231bc9cbb022003181ad597f7c380a7d1c740286b1d022b8b04ded028b833282e055e03b8efef812103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000", "P2SH,WITNESS"],
@@ -458,8 +458,8 @@
"0100000000010200010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff02e8030000000000000151e90300000000000001510247304402206d59682663faab5e4cb733c562e22cdae59294895929ec38d7c016621ff90da0022063ef0af5f970afe8a45ea836e3509b8847ed39463253106ac17d19c437d3d56b832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710248304502210085001a820bfcbc9f9de0298af714493f8a37b3b354bfd21a7097c3e009f2018c022050a8b4dbc8155d4d04da2f5cdd575dcf8dd0108de8bec759bd897ea01ecb3af7832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc7100000000", "P2SH,WITNESS"],
["Witness Single|AnyoneCanPay does not hash input's position (permutation)"],
-[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 1000],
-["0000000000000000000000000000000000000000000000000000000000000100", 1, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 1001]],
+[[["0000000000000000000000000000000000000000000000000000000000000100", 1, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 1001],
+["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 1000]],
"0100000000010200010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000000000000ffffffff02e9030000000000000151e80300000000000001510248304502210085001a820bfcbc9f9de0298af714493f8a37b3b354bfd21a7097c3e009f2018c022050a8b4dbc8155d4d04da2f5cdd575dcf8dd0108de8bec759bd897ea01ecb3af7832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710247304402206d59682663faab5e4cb733c562e22cdae59294895929ec38d7c016621ff90da0022063ef0af5f970afe8a45ea836e3509b8847ed39463253106ac17d19c437d3d56b832103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc7100000000", "P2SH,WITNESS"],
["Non witness Single|AnyoneCanPay hash input's position"],
diff --git a/src/test/limitedmap_tests.cpp b/src/test/limitedmap_tests.cpp
index faaddffad8..e3531486aa 100644
--- a/src/test/limitedmap_tests.cpp
+++ b/src/test/limitedmap_tests.cpp
@@ -47,7 +47,7 @@ BOOST_AUTO_TEST_CASE(limitedmap_test)
// make sure the item is present
BOOST_CHECK(map.count(i) == 1);
- // use the iterator to check for the expected key adn value
+ // use the iterator to check for the expected key and value
BOOST_CHECK(it->first == i);
BOOST_CHECK(it->second == i + 1);
diff --git a/src/test/main_tests.cpp b/src/test/main_tests.cpp
index dbfbdd934f..697fbf792c 100644
--- a/src/test/main_tests.cpp
+++ b/src/test/main_tests.cpp
@@ -3,7 +3,8 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "chainparams.h"
-#include "main.h"
+#include "validation.h"
+#include "net.h"
#include "test/test_bitcoin.h"
diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp
index a73dbe725c..1faf8b6aeb 100644
--- a/src/test/mempool_tests.cpp
+++ b/src/test/mempool_tests.cpp
@@ -55,7 +55,7 @@ BOOST_AUTO_TEST_CASE(MempoolRemoveTest)
CTxMemPool testPool(CFeeRate(0));
- std::vector<std::shared_ptr<const CTransaction>> removed;
+ std::vector<CTransactionRef> removed;
// Nothing in pool, remove should do nothing:
testPool.removeRecursive(txParent, &removed);
@@ -410,8 +410,8 @@ BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest)
CheckSort<ancestor_score>(pool, sortedOrder);
/* after tx6 is mined, tx7 should move up in the sort */
- std::vector<CTransaction> vtx;
- vtx.push_back(tx6);
+ std::vector<CTransactionRef> vtx;
+ vtx.push_back(MakeTransactionRef(tx6));
pool.removeForBlock(vtx, 1, NULL, false);
sortedOrder.erase(sortedOrder.begin()+1);
@@ -546,7 +546,7 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
pool.addUnchecked(tx5.GetHash(), entry.Fee(1000LL).FromTx(tx5, &pool));
pool.addUnchecked(tx7.GetHash(), entry.Fee(9000LL).FromTx(tx7, &pool));
- std::vector<CTransaction> vtx;
+ std::vector<CTransactionRef> vtx;
SetMockTime(42);
SetMockTime(42 + CTxMemPool::ROLLING_FEE_HALFLIFE);
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), maxFeeRateRemoved.GetFeePerK() + 1000);
diff --git a/src/test/merkle_tests.cpp b/src/test/merkle_tests.cpp
index 706d30f489..55e6852a15 100644
--- a/src/test/merkle_tests.cpp
+++ b/src/test/merkle_tests.cpp
@@ -15,8 +15,8 @@ static uint256 BlockBuildMerkleTree(const CBlock& block, bool* fMutated, std::ve
{
vMerkleTree.clear();
vMerkleTree.reserve(block.vtx.size() * 2 + 16); // Safe upper bound for the number of total nodes.
- for (std::vector<CTransaction>::const_iterator it(block.vtx.begin()); it != block.vtx.end(); ++it)
- vMerkleTree.push_back(it->GetHash());
+ for (std::vector<CTransactionRef>::const_iterator it(block.vtx.begin()); it != block.vtx.end(); ++it)
+ vMerkleTree.push_back((*it)->GetHash());
int j = 0;
bool mutated = false;
for (int nSize = block.vtx.size(); nSize > 1; nSize = (nSize + 1) / 2)
@@ -86,7 +86,7 @@ BOOST_AUTO_TEST_CASE(merkle_test)
for (int j = 0; j < ntx; j++) {
CMutableTransaction mtx;
mtx.nLockTime = j;
- block.vtx[j] = mtx;
+ block.vtx[j] = MakeTransactionRef(std::move(mtx));
}
// Compute the root of the block before mutating it.
bool unmutatedMutated = false;
@@ -126,7 +126,7 @@ BOOST_AUTO_TEST_CASE(merkle_test)
std::vector<uint256> newBranch = BlockMerkleBranch(block, mtx);
std::vector<uint256> oldBranch = BlockGetMerkleBranch(block, merkleTree, mtx);
BOOST_CHECK(oldBranch == newBranch);
- BOOST_CHECK(ComputeMerkleRootFromBranch(block.vtx[mtx].GetHash(), newBranch, mtx) == oldRoot);
+ BOOST_CHECK(ComputeMerkleRootFromBranch(block.vtx[mtx]->GetHash(), newBranch, mtx) == oldRoot);
}
}
}
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index 2762cafa38..bc1bdd8874 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -7,7 +7,7 @@
#include "consensus/consensus.h"
#include "consensus/merkle.h"
#include "consensus/validation.h"
-#include "main.h"
+#include "validation.h"
#include "miner.h"
#include "pubkey.h"
#include "script/standard.h"
@@ -77,7 +77,7 @@ bool TestSequenceLocks(const CTransaction &tx, int flags)
// Implemented as an additional function, rather than a separate test case,
// to allow reusing the blockchain created in CreateNewBlock_validity.
// Note that this test assumes blockprioritysize is 0.
-void TestPackageSelection(const CChainParams& chainparams, CScript scriptPubKey, std::vector<CTransaction *>& txFirst)
+void TestPackageSelection(const CChainParams& chainparams, CScript scriptPubKey, std::vector<CTransactionRef>& txFirst)
{
// Test the ancestor feerate transaction selection.
TestMemPoolEntryHelper entry;
@@ -108,9 +108,9 @@ void TestPackageSelection(const CChainParams& chainparams, CScript scriptPubKey,
mempool.addUnchecked(hashHighFeeTx, entry.Fee(50000).Time(GetTime()).SpendsCoinbase(false).FromTx(tx));
std::unique_ptr<CBlockTemplate> pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey);
- BOOST_CHECK(pblocktemplate->block.vtx[1].GetHash() == hashParentTx);
- BOOST_CHECK(pblocktemplate->block.vtx[2].GetHash() == hashHighFeeTx);
- BOOST_CHECK(pblocktemplate->block.vtx[3].GetHash() == hashMediumFeeTx);
+ BOOST_CHECK(pblocktemplate->block.vtx[1]->GetHash() == hashParentTx);
+ BOOST_CHECK(pblocktemplate->block.vtx[2]->GetHash() == hashHighFeeTx);
+ BOOST_CHECK(pblocktemplate->block.vtx[3]->GetHash() == hashMediumFeeTx);
// Test that a package below the min relay fee doesn't get included
tx.vin[0].prevout.hash = hashHighFeeTx;
@@ -130,8 +130,8 @@ void TestPackageSelection(const CChainParams& chainparams, CScript scriptPubKey,
pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey);
// Verify that the free tx and the low fee tx didn't get selected
for (size_t i=0; i<pblocktemplate->block.vtx.size(); ++i) {
- BOOST_CHECK(pblocktemplate->block.vtx[i].GetHash() != hashFreeTx);
- BOOST_CHECK(pblocktemplate->block.vtx[i].GetHash() != hashLowFeeTx);
+ BOOST_CHECK(pblocktemplate->block.vtx[i]->GetHash() != hashFreeTx);
+ BOOST_CHECK(pblocktemplate->block.vtx[i]->GetHash() != hashLowFeeTx);
}
// Test that packages above the min relay fee do get included, even if one
@@ -142,8 +142,8 @@ void TestPackageSelection(const CChainParams& chainparams, CScript scriptPubKey,
hashLowFeeTx = tx.GetHash();
mempool.addUnchecked(hashLowFeeTx, entry.Fee(feeToUse+2).FromTx(tx));
pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey);
- BOOST_CHECK(pblocktemplate->block.vtx[4].GetHash() == hashFreeTx);
- BOOST_CHECK(pblocktemplate->block.vtx[5].GetHash() == hashLowFeeTx);
+ BOOST_CHECK(pblocktemplate->block.vtx[4]->GetHash() == hashFreeTx);
+ BOOST_CHECK(pblocktemplate->block.vtx[5]->GetHash() == hashLowFeeTx);
// Test that transaction selection properly updates ancestor fee
// calculations as ancestor transactions get included in a block.
@@ -166,8 +166,8 @@ void TestPackageSelection(const CChainParams& chainparams, CScript scriptPubKey,
// Verify that this tx isn't selected.
for (size_t i=0; i<pblocktemplate->block.vtx.size(); ++i) {
- BOOST_CHECK(pblocktemplate->block.vtx[i].GetHash() != hashFreeTx2);
- BOOST_CHECK(pblocktemplate->block.vtx[i].GetHash() != hashLowFeeTx2);
+ BOOST_CHECK(pblocktemplate->block.vtx[i]->GetHash() != hashFreeTx2);
+ BOOST_CHECK(pblocktemplate->block.vtx[i]->GetHash() != hashLowFeeTx2);
}
// This tx will be mineable, and should cause hashLowFeeTx2 to be selected
@@ -176,7 +176,7 @@ void TestPackageSelection(const CChainParams& chainparams, CScript scriptPubKey,
tx.vout[0].nValue = 100000000 - 10000; // 10k satoshi fee
mempool.addUnchecked(tx.GetHash(), entry.Fee(10000).FromTx(tx));
pblocktemplate = BlockAssembler(chainparams).CreateNewBlock(scriptPubKey);
- BOOST_CHECK(pblocktemplate->block.vtx[8].GetHash() == hashLowFeeTx2);
+ BOOST_CHECK(pblocktemplate->block.vtx[8]->GetHash() == hashLowFeeTx2);
}
// NOTE: These tests rely on CreateNewBlock doing its own self-validation!
@@ -203,28 +203,28 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
// We can't make transactions until we have inputs
// Therefore, load 100 blocks :)
int baseheight = 0;
- std::vector<CTransaction*>txFirst;
+ std::vector<CTransactionRef> txFirst;
for (unsigned int i = 0; i < sizeof(blockinfo)/sizeof(*blockinfo); ++i)
{
CBlock *pblock = &pblocktemplate->block; // pointer for convenience
pblock->nVersion = 1;
pblock->nTime = chainActive.Tip()->GetMedianTimePast()+1;
- CMutableTransaction txCoinbase(pblock->vtx[0]);
+ CMutableTransaction txCoinbase(*pblock->vtx[0]);
txCoinbase.nVersion = 1;
txCoinbase.vin[0].scriptSig = CScript();
txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce);
txCoinbase.vin[0].scriptSig.push_back(chainActive.Height());
+ txCoinbase.vout.resize(1); // Ignore the (optional) segwit commitment added by CreateNewBlock (as the hardcoded nonces don't account for this)
txCoinbase.vout[0].scriptPubKey = CScript();
- pblock->vtx[0] = CTransaction(txCoinbase);
+ pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
if (txFirst.size() == 0)
baseheight = chainActive.Height();
if (txFirst.size() < 4)
- txFirst.push_back(new CTransaction(pblock->vtx[0]));
+ txFirst.push_back(pblock->vtx[0]);
pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
pblock->nNonce = blockinfo[i].nonce;
- CValidationState state;
- BOOST_CHECK(ProcessNewBlock(state, chainparams, NULL, pblock, true, NULL, false));
- BOOST_CHECK(state.IsValid());
+ std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock);
+ BOOST_CHECK(ProcessNewBlock(chainparams, shared_pblock, true, NULL, NULL));
pblock->hashPrevBlock = pblock->GetHash();
}
@@ -487,9 +487,6 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
TestPackageSelection(chainparams, scriptPubKey, txFirst);
- BOOST_FOREACH(CTransaction *_tx, txFirst)
- delete _tx;
-
fCheckpointsEnabled = true;
}
diff --git a/src/test/pmt_tests.cpp b/src/test/pmt_tests.cpp
index c773129640..e6b689bc6c 100644
--- a/src/test/pmt_tests.cpp
+++ b/src/test/pmt_tests.cpp
@@ -45,14 +45,14 @@ BOOST_AUTO_TEST_CASE(pmt_test1)
for (unsigned int j=0; j<nTx; j++) {
CMutableTransaction tx;
tx.nLockTime = j; // actual transaction data doesn't matter; just make the nLockTime's unique
- block.vtx.push_back(CTransaction(tx));
+ block.vtx.push_back(MakeTransactionRef(std::move(tx)));
}
// calculate actual merkle root and height
uint256 merkleRoot1 = BlockMerkleRoot(block);
std::vector<uint256> vTxid(nTx, uint256());
for (unsigned int j=0; j<nTx; j++)
- vTxid[j] = block.vtx[j].GetHash();
+ vTxid[j] = block.vtx[j]->GetHash();
int nHeight = 1, nTx_ = nTx;
while (nTx_ > 1) {
nTx_ = (nTx_+1)/2;
diff --git a/src/test/policyestimator_tests.cpp b/src/test/policyestimator_tests.cpp
index 38aaaba267..c57feaec90 100644
--- a/src/test/policyestimator_tests.cpp
+++ b/src/test/policyestimator_tests.cpp
@@ -45,7 +45,7 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
CFeeRate baseRate(basefee, GetVirtualTransactionSize(tx));
// Create a fake block
- std::vector<CTransaction> block;
+ std::vector<CTransactionRef> block;
int blocknum = 0;
// Loop through 200 blocks
@@ -66,9 +66,9 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
// 9/10 blocks add 2nd highest and so on until ...
// 1/10 blocks add lowest fee transactions
while (txHashes[9-h].size()) {
- std::shared_ptr<const CTransaction> ptx = mpool.get(txHashes[9-h].back());
+ CTransactionRef ptx = mpool.get(txHashes[9-h].back());
if (ptx)
- block.push_back(*ptx);
+ block.push_back(ptx);
txHashes[9-h].pop_back();
}
}
@@ -95,17 +95,22 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
// Highest feerate is 10*baseRate and gets in all blocks,
// second highest feerate is 9*baseRate and gets in 9/10 blocks = 90%,
// third highest feerate is 8*base rate, and gets in 8/10 blocks = 80%,
- // so estimateFee(1) should return 10*baseRate.
+ // so estimateFee(1) would return 10*baseRate but is hardcoded to return failure
// Second highest feerate has 100% chance of being included by 2 blocks,
// so estimateFee(2) should return 9*baseRate etc...
for (int i = 1; i < 10;i++) {
origFeeEst.push_back(mpool.estimateFee(i).GetFeePerK());
- if (i > 1) { // Fee estimates should be monotonically decreasing
+ if (i > 2) { // Fee estimates should be monotonically decreasing
BOOST_CHECK(origFeeEst[i-1] <= origFeeEst[i-2]);
}
int mult = 11-i;
- BOOST_CHECK(origFeeEst[i-1] < mult*baseRate.GetFeePerK() + deltaFee);
- BOOST_CHECK(origFeeEst[i-1] > mult*baseRate.GetFeePerK() - deltaFee);
+ if (i > 1) {
+ BOOST_CHECK(origFeeEst[i-1] < mult*baseRate.GetFeePerK() + deltaFee);
+ BOOST_CHECK(origFeeEst[i-1] > mult*baseRate.GetFeePerK() - deltaFee);
+ }
+ else {
+ BOOST_CHECK(origFeeEst[i-1] == CFeeRate(0).GetFeePerK());
+ }
}
// Mine 50 more blocks with no transactions happening, estimates shouldn't change
@@ -113,7 +118,8 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
while (blocknum < 250)
mpool.removeForBlock(block, ++blocknum);
- for (int i = 1; i < 10;i++) {
+ BOOST_CHECK(mpool.estimateFee(1) == CFeeRate(0));
+ for (int i = 2; i < 10;i++) {
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] + deltaFee);
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
}
@@ -143,15 +149,16 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
// Estimates should still not be below original
for (int j = 0; j < 10; j++) {
while(txHashes[j].size()) {
- std::shared_ptr<const CTransaction> ptx = mpool.get(txHashes[j].back());
+ CTransactionRef ptx = mpool.get(txHashes[j].back());
if (ptx)
- block.push_back(*ptx);
+ block.push_back(ptx);
txHashes[j].pop_back();
}
}
mpool.removeForBlock(block, 265);
block.clear();
- for (int i = 1; i < 10;i++) {
+ BOOST_CHECK(mpool.estimateFee(1) == CFeeRate(0));
+ for (int i = 2; i < 10;i++) {
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
}
@@ -163,16 +170,17 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
tx.vin[0].prevout.n = 10000*blocknum+100*j+k;
uint256 hash = tx.GetHash();
mpool.addUnchecked(hash, entry.Fee(feeV[j]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool));
- std::shared_ptr<const CTransaction> ptx = mpool.get(hash);
+ CTransactionRef ptx = mpool.get(hash);
if (ptx)
- block.push_back(*ptx);
+ block.push_back(ptx);
}
}
mpool.removeForBlock(block, ++blocknum);
block.clear();
}
- for (int i = 1; i < 10; i++) {
+ BOOST_CHECK(mpool.estimateFee(1) == CFeeRate(0));
+ for (int i = 2; i < 10; i++) {
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] - deltaFee);
}
diff --git a/src/test/script_P2SH_tests.cpp b/src/test/script_P2SH_tests.cpp
index 1a01593a8e..789cc70d7f 100644
--- a/src/test/script_P2SH_tests.cpp
+++ b/src/test/script_P2SH_tests.cpp
@@ -5,7 +5,7 @@
#include "core_io.h"
#include "key.h"
#include "keystore.h"
-#include "main.h"
+#include "validation.h"
#include "policy/policy.h"
#include "script/script.h"
#include "script/script_error.h"
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index 3169afb13a..eb324d5a6b 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -276,7 +276,7 @@ private:
//! The Witness embedded script
CScript witscript;
CScriptWitness scriptWitness;
- CTransaction creditTx;
+ CTransactionRef creditTx;
CMutableTransaction spendTx;
bool havePush;
std::vector<unsigned char> push;
@@ -319,8 +319,8 @@ public:
redeemscript = scriptPubKey;
scriptPubKey = CScript() << OP_HASH160 << ToByteVector(CScriptID(redeemscript)) << OP_EQUAL;
}
- creditTx = BuildCreditingTransaction(scriptPubKey, nValue);
- spendTx = BuildSpendingTransaction(CScript(), CScriptWitness(), creditTx);
+ creditTx = MakeTransactionRef(BuildCreditingTransaction(scriptPubKey, nValue));
+ spendTx = BuildSpendingTransaction(CScript(), CScriptWitness(), *creditTx);
}
TestBuilder& ScriptError(ScriptError_t err)
@@ -421,7 +421,7 @@ public:
{
TestBuilder copy = *this; // Make a copy so we can rollback the push.
DoPush();
- DoTest(creditTx.vout[0].scriptPubKey, spendTx.vin[0].scriptSig, scriptWitness, flags, comment, scriptError, nValue);
+ DoTest(creditTx->vout[0].scriptPubKey, spendTx.vin[0].scriptSig, scriptWitness, flags, comment, scriptError, nValue);
*this = copy;
return *this;
}
@@ -447,7 +447,7 @@ public:
array.push_back(wit);
}
array.push_back(FormatScript(spendTx.vin[0].scriptSig));
- array.push_back(FormatScript(creditTx.vout[0].scriptPubKey));
+ array.push_back(FormatScript(creditTx->vout[0].scriptPubKey));
array.push_back(FormatScriptFlags(flags));
array.push_back(FormatScriptError((ScriptError_t)scriptError));
array.push_back(comment);
@@ -461,7 +461,7 @@ public:
const CScript& GetScriptPubKey()
{
- return creditTx.vout[0].scriptPubKey;
+ return creditTx->vout[0].scriptPubKey;
}
};
diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp
index bbadf57957..1dc86eb116 100644
--- a/src/test/serialize_tests.cpp
+++ b/src/test/serialize_tests.cpp
@@ -21,10 +21,10 @@ protected:
bool boolval;
std::string stringval;
const char* charstrval;
- CTransaction txval;
+ CTransactionRef txval;
public:
CSerializeMethodsTestSingle() = default;
- CSerializeMethodsTestSingle(int intvalin, bool boolvalin, std::string stringvalin, const char* charstrvalin, CTransaction txvalin) : intval(intvalin), boolval(boolvalin), stringval(std::move(stringvalin)), charstrval(charstrvalin), txval(txvalin){}
+ CSerializeMethodsTestSingle(int intvalin, bool boolvalin, std::string stringvalin, const char* charstrvalin, CTransaction txvalin) : intval(intvalin), boolval(boolvalin), stringval(std::move(stringvalin)), charstrval(charstrvalin), txval(MakeTransactionRef(txvalin)){}
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
@@ -42,7 +42,7 @@ public:
boolval == rhs.boolval && \
stringval == rhs.stringval && \
strcmp(charstrval, rhs.charstrval) == 0 && \
- txval == rhs.txval;
+ *txval == *rhs.txval;
}
};
diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp
index 3bc8341b02..a524f5b946 100644
--- a/src/test/sighash_tests.cpp
+++ b/src/test/sighash_tests.cpp
@@ -5,7 +5,7 @@
#include "consensus/validation.h"
#include "data/sighash.json.h"
#include "hash.h"
-#include "main.h" // For CheckTransaction
+#include "validation.h" // For CheckTransaction
#include "script/interpreter.h"
#include "script/script.h"
#include "serialize.h"
@@ -184,7 +184,7 @@ BOOST_AUTO_TEST_CASE(sighash_from_data)
std::string raw_tx, raw_script, sigHashHex;
int nIn, nHashType;
uint256 sh;
- CTransaction tx;
+ CTransactionRef tx;
CScript scriptCode = CScript();
try {
@@ -199,7 +199,7 @@ BOOST_AUTO_TEST_CASE(sighash_from_data)
stream >> tx;
CValidationState state;
- BOOST_CHECK_MESSAGE(CheckTransaction(tx, state), strTest);
+ BOOST_CHECK_MESSAGE(CheckTransaction(*tx, state), strTest);
BOOST_CHECK(state.IsValid());
std::vector<unsigned char> raw = ParseHex(raw_script);
@@ -209,7 +209,7 @@ BOOST_AUTO_TEST_CASE(sighash_from_data)
continue;
}
- sh = SignatureHash(scriptCode, tx, nIn, nHashType, 0, SIGVERSION_BASE);
+ sh = SignatureHash(scriptCode, *tx, nIn, nHashType, 0, SIGVERSION_BASE);
BOOST_CHECK_MESSAGE(sh.GetHex() == sigHashHex, strTest);
}
}
diff --git a/src/test/sigopcount_tests.cpp b/src/test/sigopcount_tests.cpp
index e8a63ae60c..2e974cc612 100644
--- a/src/test/sigopcount_tests.cpp
+++ b/src/test/sigopcount_tests.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include "main.h"
+#include "validation.h"
#include "pubkey.h"
#include "key.h"
#include "script/script.h"
diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp
index 34f501e867..8b715ce93e 100644
--- a/src/test/streams_tests.cpp
+++ b/src/test/streams_tests.cpp
@@ -15,6 +15,64 @@ using namespace boost::assign; // bring 'operator+=()' into scope
BOOST_FIXTURE_TEST_SUITE(streams_tests, BasicTestingSetup)
+BOOST_AUTO_TEST_CASE(streams_vector_writer)
+{
+ unsigned char a(1);
+ unsigned char b(2);
+ unsigned char bytes[] = { 3, 4, 5, 6 };
+ std::vector<unsigned char> vch;
+
+ // Each test runs twice. Serializing a second time at the same starting
+ // point should yield the same results, even if the first test grew the
+ // vector.
+
+ CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, a, b);
+ BOOST_CHECK((vch == std::vector<unsigned char>{{1, 2}}));
+ CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, a, b);
+ BOOST_CHECK((vch == std::vector<unsigned char>{{1, 2}}));
+ vch.clear();
+
+ CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b);
+ BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 1, 2}}));
+ CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b);
+ BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 1, 2}}));
+ vch.clear();
+
+ vch.resize(5, 0);
+ CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b);
+ BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 1, 2, 0}}));
+ CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b);
+ BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 1, 2, 0}}));
+ vch.clear();
+
+ vch.resize(4, 0);
+ CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 3, a, b);
+ BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 0, 1, 2}}));
+ CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 3, a, b);
+ BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 0, 1, 2}}));
+ vch.clear();
+
+ vch.resize(4, 0);
+ CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 4, a, b);
+ BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 0, 0, 1, 2}}));
+ CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 4, a, b);
+ BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 0, 0, 1, 2}}));
+ vch.clear();
+
+ CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, FLATDATA(bytes));
+ BOOST_CHECK((vch == std::vector<unsigned char>{{3, 4, 5, 6}}));
+ CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, FLATDATA(bytes));
+ BOOST_CHECK((vch == std::vector<unsigned char>{{3, 4, 5, 6}}));
+ vch.clear();
+
+ vch.resize(4, 8);
+ CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, FLATDATA(bytes), b);
+ BOOST_CHECK((vch == std::vector<unsigned char>{{8, 8, 1, 3, 4, 5, 6, 2}}));
+ CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, FLATDATA(bytes), b);
+ BOOST_CHECK((vch == std::vector<unsigned char>{{8, 8, 1, 3, 4, 5, 6, 2}}));
+ vch.clear();
+}
+
BOOST_AUTO_TEST_CASE(streams_serializedata_xor)
{
std::vector<char> in;
diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp
index 3da0be8ca4..139389117a 100644
--- a/src/test/test_bitcoin.cpp
+++ b/src/test/test_bitcoin.cpp
@@ -10,8 +10,9 @@
#include "consensus/consensus.h"
#include "consensus/validation.h"
#include "key.h"
-#include "main.h"
+#include "validation.h"
#include "miner.h"
+#include "net_processing.h"
#include "pubkey.h"
#include "random.h"
#include "txdb.h"
@@ -101,7 +102,7 @@ TestChain100Setup::TestChain100Setup() : TestingSetup(CBaseChainParams::REGTEST)
{
std::vector<CMutableTransaction> noTxns;
CBlock b = CreateAndProcessBlock(noTxns, scriptPubKey);
- coinbaseTxns.push_back(b.vtx[0]);
+ coinbaseTxns.push_back(*b.vtx[0]);
}
}
@@ -119,15 +120,15 @@ TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransaction>&
// Replace mempool-selected txns with just coinbase plus passed-in txns:
block.vtx.resize(1);
BOOST_FOREACH(const CMutableTransaction& tx, txns)
- block.vtx.push_back(tx);
+ block.vtx.push_back(MakeTransactionRef(tx));
// IncrementExtraNonce creates a valid coinbase and merkleRoot
unsigned int extraNonce = 0;
IncrementExtraNonce(&block, chainActive.Tip(), extraNonce);
while (!CheckProofOfWork(block.GetHash(), block.nBits, chainparams.GetConsensus())) ++block.nNonce;
- CValidationState state;
- ProcessNewBlock(state, chainparams, NULL, &block, true, NULL, false);
+ std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(block);
+ ProcessNewBlock(chainparams, shared_pblock, true, NULL, NULL);
CBlock result = block;
return result;
@@ -138,12 +139,12 @@ TestChain100Setup::~TestChain100Setup()
}
-CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(CMutableTransaction &tx, CTxMemPool *pool) {
+CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(const CMutableTransaction &tx, CTxMemPool *pool) {
CTransaction txn(tx);
return FromTx(txn, pool);
}
-CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(CTransaction &txn, CTxMemPool *pool) {
+CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(const CTransaction &txn, CTxMemPool *pool) {
bool hasNoDependencies = pool ? pool->HasNoInputsOf(txn) : hadNoDependencies;
// Hack to assume either its completely dependent on other mempool txs or not at all
CAmount inChainValue = hasNoDependencies ? txn.GetValueOut() : 0;
diff --git a/src/test/test_bitcoin.h b/src/test/test_bitcoin.h
index 9819a7097d..3dea20445d 100644
--- a/src/test/test_bitcoin.h
+++ b/src/test/test_bitcoin.h
@@ -79,8 +79,8 @@ struct TestMemPoolEntryHelper
nFee(0), nTime(0), dPriority(0.0), nHeight(1),
hadNoDependencies(false), spendsCoinbase(false), sigOpCost(4) { }
- CTxMemPoolEntry FromTx(CMutableTransaction &tx, CTxMemPool *pool = NULL);
- CTxMemPoolEntry FromTx(CTransaction &tx, CTxMemPool *pool = NULL);
+ CTxMemPoolEntry FromTx(const CMutableTransaction &tx, CTxMemPool *pool = NULL);
+ CTxMemPoolEntry FromTx(const CTransaction &tx, CTxMemPool *pool = NULL);
// Change the default value
TestMemPoolEntryHelper &Fee(CAmount _fee) { nFee = _fee; return *this; }
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index 34d9547f3d..ae5406ef7e 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -12,7 +12,7 @@
#include "core_io.h"
#include "key.h"
#include "keystore.h"
-#include "main.h" // For CheckTransaction
+#include "validation.h" // For CheckTransaction
#include "policy/policy.h"
#include "script/script.h"
#include "script/sign.h"
@@ -150,8 +150,7 @@ BOOST_AUTO_TEST_CASE(tx_valid)
string transaction = test[1].get_str();
CDataStream stream(ParseHex(transaction), SER_NETWORK, PROTOCOL_VERSION);
- CTransaction tx;
- stream >> tx;
+ CTransaction tx(deserialize, stream);
CValidationState state;
BOOST_CHECK_MESSAGE(CheckTransaction(tx, state), strTest);
@@ -236,8 +235,7 @@ BOOST_AUTO_TEST_CASE(tx_invalid)
string transaction = test[1].get_str();
CDataStream stream(ParseHex(transaction), SER_NETWORK, PROTOCOL_VERSION );
- CTransaction tx;
- stream >> tx;
+ CTransaction tx(deserialize, stream);
CValidationState state;
fValid = CheckTransaction(tx, state) && state.IsValid();
@@ -346,7 +344,7 @@ BOOST_AUTO_TEST_CASE(test_Get)
BOOST_CHECK_EQUAL(coins.GetValueIn(t1), (50+21+22)*CENT);
}
-void CreateCreditAndSpend(const CKeyStore& keystore, const CScript& outscript, CTransaction& output, CMutableTransaction& input, bool success = true)
+void CreateCreditAndSpend(const CKeyStore& keystore, const CScript& outscript, CTransactionRef& output, CMutableTransaction& input, bool success = true)
{
CMutableTransaction outputm;
outputm.nVersion = 1;
@@ -360,22 +358,22 @@ void CreateCreditAndSpend(const CKeyStore& keystore, const CScript& outscript, C
CDataStream ssout(SER_NETWORK, PROTOCOL_VERSION);
ssout << outputm;
ssout >> output;
- assert(output.vin.size() == 1);
- assert(output.vin[0] == outputm.vin[0]);
- assert(output.vout.size() == 1);
- assert(output.vout[0] == outputm.vout[0]);
- assert(output.wit.vtxinwit.size() == 0);
+ assert(output->vin.size() == 1);
+ assert(output->vin[0] == outputm.vin[0]);
+ assert(output->vout.size() == 1);
+ assert(output->vout[0] == outputm.vout[0]);
+ assert(output->wit.vtxinwit.size() == 0);
CMutableTransaction inputm;
inputm.nVersion = 1;
inputm.vin.resize(1);
- inputm.vin[0].prevout.hash = output.GetHash();
+ inputm.vin[0].prevout.hash = output->GetHash();
inputm.vin[0].prevout.n = 0;
inputm.wit.vtxinwit.resize(1);
inputm.vout.resize(1);
inputm.vout[0].nValue = 1;
inputm.vout[0].scriptPubKey = CScript();
- bool ret = SignSignature(keystore, output, inputm, 0, SIGHASH_ALL);
+ bool ret = SignSignature(keystore, *output, inputm, 0, SIGHASH_ALL);
assert(ret == success);
CDataStream ssin(SER_NETWORK, PROTOCOL_VERSION);
ssin << inputm;
@@ -393,11 +391,11 @@ void CreateCreditAndSpend(const CKeyStore& keystore, const CScript& outscript, C
}
}
-void CheckWithFlag(const CTransaction& output, const CMutableTransaction& input, int flags, bool success)
+void CheckWithFlag(const CTransactionRef& output, const CMutableTransaction& input, int flags, bool success)
{
ScriptError error;
CTransaction inputi(input);
- bool ret = VerifyScript(inputi.vin[0].scriptSig, output.vout[0].scriptPubKey, inputi.wit.vtxinwit.size() > 0 ? &inputi.wit.vtxinwit[0].scriptWitness : NULL, flags, TransactionSignatureChecker(&inputi, 0, output.vout[0].nValue), &error);
+ bool ret = VerifyScript(inputi.vin[0].scriptSig, output->vout[0].scriptPubKey, inputi.wit.vtxinwit.size() > 0 ? &inputi.wit.vtxinwit[0].scriptWitness : NULL, flags, TransactionSignatureChecker(&inputi, 0, output->vout[0].nValue), &error);
assert(ret == success);
}
@@ -466,10 +464,10 @@ BOOST_AUTO_TEST_CASE(test_big_witness_transaction) {
assert(hashSigned);
}
- CTransaction tx;
CDataStream ssout(SER_NETWORK, PROTOCOL_VERSION);
- WithOrVersion(&ssout, 0) << mtx;
- WithOrVersion(&ssout, 0) >> tx;
+ auto vstream = WithOrVersion(&ssout, 0);
+ vstream << mtx;
+ CTransaction tx(deserialize, vstream);
// check all inputs concurrently, with the cache
PrecomputedTransactionData txdata(tx);
@@ -547,7 +545,7 @@ BOOST_AUTO_TEST_CASE(test_witness)
keystore2.AddCScript(GetScriptForWitness(scriptMulti));
keystore2.AddKeyPubKey(key3, pubkey3);
- CTransaction output1, output2;
+ CTransactionRef output1, output2;
CMutableTransaction input1, input2;
SignatureData sigdata;
@@ -639,8 +637,8 @@ BOOST_AUTO_TEST_CASE(test_witness)
CheckWithFlag(output1, input1, 0, false);
CreateCreditAndSpend(keystore2, scriptMulti, output2, input2, false);
CheckWithFlag(output2, input2, 0, false);
- BOOST_CHECK(output1 == output2);
- UpdateTransaction(input1, 0, CombineSignatures(output1.vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1.vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
+ BOOST_CHECK(*output1 == *output2);
+ UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
// P2SH 2-of-2 multisig
@@ -650,8 +648,8 @@ BOOST_AUTO_TEST_CASE(test_witness)
CreateCreditAndSpend(keystore2, GetScriptForDestination(CScriptID(scriptMulti)), output2, input2, false);
CheckWithFlag(output2, input2, 0, true);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH, false);
- BOOST_CHECK(output1 == output2);
- UpdateTransaction(input1, 0, CombineSignatures(output1.vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1.vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
+ BOOST_CHECK(*output1 == *output2);
+ UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
@@ -662,8 +660,8 @@ BOOST_AUTO_TEST_CASE(test_witness)
CreateCreditAndSpend(keystore2, GetScriptForWitness(scriptMulti), output2, input2, false);
CheckWithFlag(output2, input2, 0, true);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false);
- BOOST_CHECK(output1 == output2);
- UpdateTransaction(input1, 0, CombineSignatures(output1.vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1.vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
+ BOOST_CHECK(*output1 == *output2);
+ UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
@@ -674,8 +672,8 @@ BOOST_AUTO_TEST_CASE(test_witness)
CreateCreditAndSpend(keystore2, GetScriptForDestination(CScriptID(GetScriptForWitness(scriptMulti))), output2, input2, false);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false);
- BOOST_CHECK(output1 == output2);
- UpdateTransaction(input1, 0, CombineSignatures(output1.vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1.vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
+ BOOST_CHECK(*output1 == *output2);
+ UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
}
diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp
index 76e4e7a4be..55b4d28fb2 100644
--- a/src/test/txvalidationcache_tests.cpp
+++ b/src/test/txvalidationcache_tests.cpp
@@ -4,7 +4,7 @@
#include "consensus/validation.h"
#include "key.h"
-#include "main.h"
+#include "validation.h"
#include "miner.h"
#include "pubkey.h"
#include "txmempool.h"
diff --git a/src/test/versionbits_tests.cpp b/src/test/versionbits_tests.cpp
index c05d593ed6..bae0eff7e5 100644
--- a/src/test/versionbits_tests.cpp
+++ b/src/test/versionbits_tests.cpp
@@ -7,7 +7,7 @@
#include "test/test_bitcoin.h"
#include "test/test_random.h"
#include "chainparams.h"
-#include "main.h"
+#include "validation.h"
#include "consensus/params.h"
#include <boost/test/unit_test.hpp>
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index 1ca6b46566..ffb9993f90 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -470,7 +470,7 @@ void TorController::auth_cb(TorControlConnection& _conn, const TorControlReply&
// Finally - now create the service
if (private_key.empty()) // No private key, generate one
- private_key = "NEW:BEST";
+ private_key = "NEW:RSA1024"; // Explicitly request RSA1024 - see issue #9214
// Request hidden service, redirect port.
// Note that the 'virtual' port doesn't have to be the same as our internal port, but this is just a convenient
// choice. TODO; refactor the shutdown sequence some day.
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index 45135a5f73..c035a84db5 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -8,7 +8,7 @@
#include "clientversion.h"
#include "consensus/consensus.h"
#include "consensus/validation.h"
-#include "main.h"
+#include "validation.h"
#include "policy/policy.h"
#include "policy/fees.h"
#include "streams.h"
@@ -24,7 +24,7 @@ CTxMemPoolEntry::CTxMemPoolEntry(const CTransaction& _tx, const CAmount& _nFee,
int64_t _nTime, double _entryPriority, unsigned int _entryHeight,
bool poolHasNoInputsOf, CAmount _inChainInputValue,
bool _spendsCoinbase, int64_t _sigOpsCost, LockPoints lp):
- tx(std::make_shared<CTransaction>(_tx)), nFee(_nFee), nTime(_nTime), entryPriority(_entryPriority), entryHeight(_entryHeight),
+ tx(MakeTransactionRef(_tx)), nFee(_nFee), nTime(_nTime), entryPriority(_entryPriority), entryHeight(_entryHeight),
hadNoDependencies(poolHasNoInputsOf), inChainInputValue(_inChainInputValue),
spendsCoinbase(_spendsCoinbase), sigOpCost(_sigOpsCost), lockPoints(lp)
{
@@ -503,7 +503,7 @@ void CTxMemPool::CalculateDescendants(txiter entryit, setEntries &setDescendants
}
}
-void CTxMemPool::removeRecursive(const CTransaction &origTx, std::vector<std::shared_ptr<const CTransaction>>* removed)
+void CTxMemPool::removeRecursive(const CTransaction &origTx, std::vector<CTransactionRef>* removed)
{
// Remove transaction from memory pool
{
@@ -576,7 +576,7 @@ void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMem
RemoveStaged(setAllRemoves, false);
}
-void CTxMemPool::removeConflicts(const CTransaction &tx, std::vector<std::shared_ptr<const CTransaction>>* removed)
+void CTxMemPool::removeConflicts(const CTransaction &tx, std::vector<CTransactionRef>* removed)
{
// Remove transactions which depend on inputs of tx, recursively
LOCK(cs);
@@ -596,29 +596,29 @@ void CTxMemPool::removeConflicts(const CTransaction &tx, std::vector<std::shared
/**
* Called when a block is connected. Removes from mempool and updates the miner fee estimator.
*/
-void CTxMemPool::removeForBlock(const std::vector<CTransaction>& vtx, unsigned int nBlockHeight,
- std::vector<std::shared_ptr<const CTransaction>>* conflicts, bool fCurrentEstimate)
+void CTxMemPool::removeForBlock(const std::vector<CTransactionRef>& vtx, unsigned int nBlockHeight,
+ std::vector<CTransactionRef>* conflicts, bool fCurrentEstimate)
{
LOCK(cs);
std::vector<CTxMemPoolEntry> entries;
- BOOST_FOREACH(const CTransaction& tx, vtx)
+ for (const auto& tx : vtx)
{
- uint256 hash = tx.GetHash();
+ uint256 hash = tx->GetHash();
indexed_transaction_set::iterator i = mapTx.find(hash);
if (i != mapTx.end())
entries.push_back(*i);
}
- BOOST_FOREACH(const CTransaction& tx, vtx)
+ for (const auto& tx : vtx)
{
- txiter it = mapTx.find(tx.GetHash());
+ txiter it = mapTx.find(tx->GetHash());
if (it != mapTx.end()) {
setEntries stage;
stage.insert(it);
RemoveStaged(stage, true);
}
- removeConflicts(tx, conflicts);
- ClearPrioritisation(tx.GetHash());
+ removeConflicts(*tx, conflicts);
+ ClearPrioritisation(tx->GetHash());
}
// After the txs in the new block have been removed from the mempool, update policy estimates
minerPolicyEstimator->processBlock(nBlockHeight, entries, fCurrentEstimate);
@@ -851,7 +851,7 @@ std::vector<TxMempoolInfo> CTxMemPool::infoAll() const
return ret;
}
-std::shared_ptr<const CTransaction> CTxMemPool::get(const uint256& hash) const
+CTransactionRef CTxMemPool::get(const uint256& hash) const
{
LOCK(cs);
indexed_transaction_set::const_iterator i = mapTx.find(hash);
@@ -978,7 +978,7 @@ bool CCoinsViewMemPool::GetCoins(const uint256 &txid, CCoins &coins) const {
// If an entry in the mempool exists, always return that one, as it's guaranteed to never
// conflict with the underlying cache, and it cannot have pruned entries (as it contains full)
// transactions. First checking the underlying cache risks returning a pruned entry instead.
- shared_ptr<const CTransaction> ptx = mempool.get(txid);
+ CTransactionRef ptx = mempool.get(txid);
if (ptx) {
coins = CCoins(*ptx, MEMPOOL_HEIGHT);
return true;
diff --git a/src/txmempool.h b/src/txmempool.h
index 9b0ca4655e..23fe5a7abe 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -62,7 +62,7 @@ class CTxMemPool;
/** \class CTxMemPoolEntry
*
- * CTxMemPoolEntry stores data about the correponding transaction, as well
+ * CTxMemPoolEntry stores data about the corresponding transaction, as well
* as data about all in-mempool transactions that depend on the transaction
* ("descendant" transactions).
*
@@ -80,7 +80,7 @@ class CTxMemPool;
class CTxMemPoolEntry
{
private:
- std::shared_ptr<const CTransaction> tx;
+ CTransactionRef tx;
CAmount nFee; //!< Cached to avoid expensive parent-transaction lookups
size_t nTxWeight; //!< ... and avoid recomputing tx weight (also used for GetTxSize())
size_t nModSize; //!< ... and modified size for priority
@@ -118,7 +118,7 @@ public:
CTxMemPoolEntry(const CTxMemPoolEntry& other);
const CTransaction& GetTx() const { return *this->tx; }
- std::shared_ptr<const CTransaction> GetSharedTx() const { return this->tx; }
+ CTransactionRef GetSharedTx() const { return this->tx; }
/**
* Fast calculation of lower bound of current priority as update
* from entry priority. Only inputs that were originally in-chain will age.
@@ -322,7 +322,7 @@ class CBlockPolicyEstimator;
struct TxMempoolInfo
{
/** The transaction itself */
- std::shared_ptr<const CTransaction> tx;
+ CTransactionRef tx;
/** Time the transaction entered the mempool. */
int64_t nTime;
@@ -527,11 +527,11 @@ public:
bool addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry, bool fCurrentEstimate = true);
bool addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry, setEntries &setAncestors, bool fCurrentEstimate = true);
- void removeRecursive(const CTransaction &tx, std::vector<std::shared_ptr<const CTransaction>>* removed = NULL);
+ void removeRecursive(const CTransaction &tx, std::vector<CTransactionRef>* removed = NULL);
void removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags);
- void removeConflicts(const CTransaction &tx, std::vector<std::shared_ptr<const CTransaction>>* removed = NULL);
- void removeForBlock(const std::vector<CTransaction>& vtx, unsigned int nBlockHeight,
- std::vector<std::shared_ptr<const CTransaction>>* conflicts = NULL, bool fCurrentEstimate = true);
+ void removeConflicts(const CTransaction &tx, std::vector<CTransactionRef>* removed = NULL);
+ void removeForBlock(const std::vector<CTransactionRef>& vtx, unsigned int nBlockHeight,
+ std::vector<CTransactionRef>* conflicts = NULL, bool fCurrentEstimate = true);
void clear();
void _clear(); //lock free
bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb);
@@ -623,7 +623,7 @@ public:
return (mapTx.count(hash) != 0);
}
- std::shared_ptr<const CTransaction> get(const uint256& hash) const;
+ CTransactionRef get(const uint256& hash) const;
TxMempoolInfo info(const uint256& hash) const;
std::vector<TxMempoolInfo> infoAll() const;
diff --git a/src/util.cpp b/src/util.cpp
index c20ede6221..014013d214 100644
--- a/src/util.cpp
+++ b/src/util.cpp
@@ -107,7 +107,6 @@ map<string, vector<string> > mapMultiArgs;
bool fDebug = false;
bool fPrintToConsole = false;
bool fPrintToDebugLog = true;
-bool fServer = false;
string strMiscWarning;
bool fLogTimestamps = DEFAULT_LOGTIMESTAMPS;
bool fLogTimeMicros = DEFAULT_LOGTIMEMICROS;
@@ -259,7 +258,7 @@ bool LogAcceptCategory(const char* category)
* suppress printing of the timestamp when multiple calls are made that don't
* end in a newline. Initialize it to true, and hold it, in the calling context.
*/
-static std::string LogTimestampStr(const std::string &str, bool *fStartedNewLine)
+static std::string LogTimestampStr(const std::string &str, std::atomic_bool *fStartedNewLine)
{
string strStamped;
@@ -286,7 +285,7 @@ static std::string LogTimestampStr(const std::string &str, bool *fStartedNewLine
int LogPrintStr(const std::string &str)
{
int ret = 0; // Returns total number of characters written
- static bool fStartedNewLine = true;
+ static std::atomic_bool fStartedNewLine(true);
string strTimestamped = LogTimestampStr(str, &fStartedNewLine);
diff --git a/src/util.h b/src/util.h
index bbb9b5db82..e8aa266f28 100644
--- a/src/util.h
+++ b/src/util.h
@@ -46,7 +46,6 @@ extern std::map<std::string, std::vector<std::string> > mapMultiArgs;
extern bool fDebug;
extern bool fPrintToConsole;
extern bool fPrintToDebugLog;
-extern bool fServer;
extern std::string strMiscWarning;
extern bool fLogTimestamps;
extern bool fLogTimeMicros;
diff --git a/src/utiltime.cpp b/src/utiltime.cpp
index da590f8889..51d545ef8a 100644
--- a/src/utiltime.cpp
+++ b/src/utiltime.cpp
@@ -74,8 +74,9 @@ void MilliSleep(int64_t n)
std::string DateTimeStrFormat(const char* pszFormat, int64_t nTime)
{
+ static std::locale classic(std::locale::classic());
// std::locale takes ownership of the pointer
- std::locale loc(std::locale::classic(), new boost::posix_time::time_facet(pszFormat));
+ std::locale loc(classic, new boost::posix_time::time_facet(pszFormat));
std::stringstream ss;
ss.imbue(loc);
ss << boost::posix_time::from_time_t(nTime);
diff --git a/src/main.cpp b/src/validation.cpp
index 10f9b15ec6..a541978c85 100644
--- a/src/main.cpp
+++ b/src/validation.cpp
@@ -3,11 +3,9 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include "main.h"
+#include "validation.h"
-#include "addrman.h"
#include "arith_uint256.h"
-#include "blockencodings.h"
#include "chainparams.h"
#include "checkpoints.h"
#include "checkqueue.h"
@@ -16,9 +14,6 @@
#include "consensus/validation.h"
#include "hash.h"
#include "init.h"
-#include "merkleblock.h"
-#include "net.h"
-#include "netbase.h"
#include "policy/fees.h"
#include "policy/policy.h"
#include "pow.h"
@@ -28,6 +23,7 @@
#include "script/script.h"
#include "script/sigcache.h"
#include "script/standard.h"
+#include "timedata.h"
#include "tinyformat.h"
#include "txdb.h"
#include "txmempool.h"
@@ -64,11 +60,10 @@ CCriticalSection cs_main;
BlockMap mapBlockIndex;
CChain chainActive;
CBlockIndex *pindexBestHeader = NULL;
-int64_t nTimeBestReceived = 0; // Used only to inform the wallet of when we last received a block
CWaitableCriticalSection csBestBlock;
CConditionVariable cvBlockChange;
int nScriptCheckThreads = 0;
-bool fImporting = false;
+std::atomic_bool fImporting(false);
bool fReindex = false;
bool fTxIndex = false;
bool fHavePruned = false;
@@ -87,25 +82,6 @@ CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
CAmount maxTxFee = DEFAULT_TRANSACTION_MAXFEE;
CTxMemPool mempool(::minRelayTxFee);
-FeeFilterRounder filterRounder(::minRelayTxFee);
-
-struct IteratorComparator
-{
- template<typename I>
- bool operator()(const I& a, const I& b)
- {
- return &(*a) < &(*b);
- }
-};
-
-struct COrphanTx {
- CTransaction tx;
- NodeId fromPeer;
- int64_t nTimeExpire;
-};
-map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);
-map<COutPoint, set<map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main);
-void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
static void CheckBlockIndex(const Consensus::Params& consensusParams);
@@ -114,8 +90,6 @@ CScript COINBASE_FLAGS;
const string strMessageMagic = "Bitcoin Signed Message:\n";
-static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // SHA256("main address relay")[0:8]
-
// Internal stuff
namespace {
@@ -148,8 +122,6 @@ namespace {
* missing the data for the block.
*/
set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates;
- /** Number of nodes with fSyncStarted. */
- int nSyncStarted = 0;
/** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
* Pruned nodes may have entries where B is missing data.
*/
@@ -176,528 +148,13 @@ namespace {
/** chainwork for the last block that preciousblock has been applied to. */
arith_uint256 nLastPreciousChainwork = 0;
- /**
- * Sources of received blocks, saved to be able to send them reject
- * messages or ban them when processing happens afterwards. Protected by
- * cs_main.
- * Set mapBlockSource[hash].second to false if the node should not be
- * punished if the block is invalid.
- */
- map<uint256, std::pair<NodeId, bool>> mapBlockSource;
-
- /**
- * Filter for transactions that were recently rejected by
- * AcceptToMemoryPool. These are not rerequested until the chain tip
- * changes, at which point the entire filter is reset. Protected by
- * cs_main.
- *
- * Without this filter we'd be re-requesting txs from each of our peers,
- * increasing bandwidth consumption considerably. For instance, with 100
- * peers, half of which relay a tx we don't accept, that might be a 50x
- * bandwidth increase. A flooding attacker attempting to roll-over the
- * filter using minimum-sized, 60byte, transactions might manage to send
- * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
- * two minute window to send invs to us.
- *
- * Decreasing the false positive rate is fairly cheap, so we pick one in a
- * million to make it highly unlikely for users to have issues with this
- * filter.
- *
- * Memory used: 1.3 MB
- */
- std::unique_ptr<CRollingBloomFilter> recentRejects;
- uint256 hashRecentRejectsChainTip;
-
- /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
- struct QueuedBlock {
- uint256 hash;
- CBlockIndex* pindex; //!< Optional.
- bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request.
- std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads
- };
- map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight;
-
- /** Stack of nodes which we have set to announce using compact blocks */
- list<NodeId> lNodesAnnouncingHeaderAndIDs;
-
- /** Number of preferable block download peers. */
- int nPreferredDownload = 0;
-
/** Dirty block index entries. */
set<CBlockIndex*> setDirtyBlockIndex;
/** Dirty block file entries. */
set<int> setDirtyFileInfo;
-
- /** Number of peers from which we're downloading blocks. */
- int nPeersWithValidatedDownloads = 0;
-
- /** Relay map, protected by cs_main. */
- typedef std::map<uint256, std::shared_ptr<const CTransaction>> MapRelay;
- MapRelay mapRelay;
- /** Expiration-time ordered list of (expire time, relay map entry) pairs, protected by cs_main). */
- std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration;
} // anon namespace
-//////////////////////////////////////////////////////////////////////////////
-//
-// Registration of network node signals.
-//
-
-namespace {
-
-struct CBlockReject {
- unsigned char chRejectCode;
- string strRejectReason;
- uint256 hashBlock;
-};
-
-/**
- * Maintain validation-specific state about nodes, protected by cs_main, instead
- * by CNode's own locks. This simplifies asynchronous operation, where
- * processing of incoming data is done after the ProcessMessage call returns,
- * and we're no longer holding the node's locks.
- */
-struct CNodeState {
- //! The peer's address
- const CService address;
- //! Whether we have a fully established connection.
- bool fCurrentlyConnected;
- //! Accumulated misbehaviour score for this peer.
- int nMisbehavior;
- //! Whether this peer should be disconnected and banned (unless whitelisted).
- bool fShouldBan;
- //! String name of this peer (debugging/logging purposes).
- const std::string name;
- //! List of asynchronously-determined block rejections to notify this peer about.
- std::vector<CBlockReject> rejects;
- //! The best known block we know this peer has announced.
- CBlockIndex *pindexBestKnownBlock;
- //! The hash of the last unknown block this peer has announced.
- uint256 hashLastUnknownBlock;
- //! The last full block we both have.
- CBlockIndex *pindexLastCommonBlock;
- //! The best header we have sent our peer.
- CBlockIndex *pindexBestHeaderSent;
- //! Length of current-streak of unconnecting headers announcements
- int nUnconnectingHeaders;
- //! Whether we've started headers synchronization with this peer.
- bool fSyncStarted;
- //! Since when we're stalling block download progress (in microseconds), or 0.
- int64_t nStallingSince;
- list<QueuedBlock> vBlocksInFlight;
- //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
- int64_t nDownloadingSince;
- int nBlocksInFlight;
- int nBlocksInFlightValidHeaders;
- //! Whether we consider this a preferred download peer.
- bool fPreferredDownload;
- //! Whether this peer wants invs or headers (when possible) for block announcements.
- bool fPreferHeaders;
- //! Whether this peer wants invs or cmpctblocks (when possible) for block announcements.
- bool fPreferHeaderAndIDs;
- /**
- * Whether this peer will send us cmpctblocks if we request them.
- * This is not used to gate request logic, as we really only care about fSupportsDesiredCmpctVersion,
- * but is used as a flag to "lock in" the version of compact blocks (fWantsCmpctWitness) we send.
- */
- bool fProvidesHeaderAndIDs;
- //! Whether this peer can give us witnesses
- bool fHaveWitness;
- //! Whether this peer wants witnesses in cmpctblocks/blocktxns
- bool fWantsCmpctWitness;
- /**
- * If we've announced NODE_WITNESS to this peer: whether the peer sends witnesses in cmpctblocks/blocktxns,
- * otherwise: whether this peer sends non-witnesses in cmpctblocks/blocktxns.
- */
- bool fSupportsDesiredCmpctVersion;
-
- CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) {
- fCurrentlyConnected = false;
- nMisbehavior = 0;
- fShouldBan = false;
- pindexBestKnownBlock = NULL;
- hashLastUnknownBlock.SetNull();
- pindexLastCommonBlock = NULL;
- pindexBestHeaderSent = NULL;
- nUnconnectingHeaders = 0;
- fSyncStarted = false;
- nStallingSince = 0;
- nDownloadingSince = 0;
- nBlocksInFlight = 0;
- nBlocksInFlightValidHeaders = 0;
- fPreferredDownload = false;
- fPreferHeaders = false;
- fPreferHeaderAndIDs = false;
- fProvidesHeaderAndIDs = false;
- fHaveWitness = false;
- fWantsCmpctWitness = false;
- fSupportsDesiredCmpctVersion = false;
- }
-};
-
-/** Map maintaining per-node state. Requires cs_main. */
-map<NodeId, CNodeState> mapNodeState;
-
-// Requires cs_main.
-CNodeState *State(NodeId pnode) {
- map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
- if (it == mapNodeState.end())
- return NULL;
- return &it->second;
-}
-
-void UpdatePreferredDownload(CNode* node, CNodeState* state)
-{
- nPreferredDownload -= state->fPreferredDownload;
-
- // Whether this node should be marked as a preferred download node.
- state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient;
-
- nPreferredDownload += state->fPreferredDownload;
-}
-
-void PushNodeVersion(CNode *pnode, CConnman& connman, int64_t nTime)
-{
- ServiceFlags nLocalNodeServices = pnode->GetLocalServices();
- uint64_t nonce = pnode->GetLocalNonce();
- int nNodeStartingHeight = pnode->GetMyStartingHeight();
- NodeId nodeid = pnode->GetId();
- CAddress addr = pnode->addr;
-
- CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices));
- CAddress addrMe = CAddress(CService(), nLocalNodeServices);
-
- connman.PushMessageWithVersion(pnode, INIT_PROTO_VERSION, NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
- nonce, strSubVersion, nNodeStartingHeight, ::fRelayTxes);
-
- if (fLogIPs)
- LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
- else
- LogPrint("net", "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
-}
-
-void InitializeNode(CNode *pnode, CConnman& connman) {
- CAddress addr = pnode->addr;
- std::string addrName = pnode->addrName;
- NodeId nodeid = pnode->GetId();
- {
- LOCK(cs_main);
- mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName)));
- }
- if(!pnode->fInbound)
- PushNodeVersion(pnode, connman, GetTime());
-}
-
-void FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
- fUpdateConnectionTime = false;
- LOCK(cs_main);
- CNodeState *state = State(nodeid);
-
- if (state->fSyncStarted)
- nSyncStarted--;
-
- if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
- fUpdateConnectionTime = true;
- }
-
- BOOST_FOREACH(const QueuedBlock& entry, state->vBlocksInFlight) {
- mapBlocksInFlight.erase(entry.hash);
- }
- EraseOrphansFor(nodeid);
- nPreferredDownload -= state->fPreferredDownload;
- nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
- assert(nPeersWithValidatedDownloads >= 0);
-
- mapNodeState.erase(nodeid);
-
- if (mapNodeState.empty()) {
- // Do a consistency check after the last peer is removed.
- assert(mapBlocksInFlight.empty());
- assert(nPreferredDownload == 0);
- assert(nPeersWithValidatedDownloads == 0);
- }
-}
-
-// Requires cs_main.
-// Returns a bool indicating whether we requested this block.
-// Also used if a block was /not/ received and timed out or started with another peer
-bool MarkBlockAsReceived(const uint256& hash) {
- map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
- if (itInFlight != mapBlocksInFlight.end()) {
- CNodeState *state = State(itInFlight->second.first);
- state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
- if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) {
- // Last validated block on the queue was received.
- nPeersWithValidatedDownloads--;
- }
- if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
- // First block on the queue was received, update the start download time for the next one
- state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros());
- }
- state->vBlocksInFlight.erase(itInFlight->second.second);
- state->nBlocksInFlight--;
- state->nStallingSince = 0;
- mapBlocksInFlight.erase(itInFlight);
- return true;
- }
- return false;
-}
-
-// Requires cs_main.
-// returns false, still setting pit, if the block was already in flight from the same peer
-// pit will only be valid as long as the same cs_main lock is being held
-bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL, list<QueuedBlock>::iterator **pit = NULL) {
- CNodeState *state = State(nodeid);
- assert(state != NULL);
-
- // Short-circuit most stuff in case its from the same node
- map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
- if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
- *pit = &itInFlight->second.second;
- return false;
- }
-
- // Make sure it's not listed somewhere already.
- MarkBlockAsReceived(hash);
-
- list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
- {hash, pindex, pindex != NULL, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : NULL)});
- state->nBlocksInFlight++;
- state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
- if (state->nBlocksInFlight == 1) {
- // We're starting a block download (batch) from this peer.
- state->nDownloadingSince = GetTimeMicros();
- }
- if (state->nBlocksInFlightValidHeaders == 1 && pindex != NULL) {
- nPeersWithValidatedDownloads++;
- }
- itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first;
- if (pit)
- *pit = &itInFlight->second.second;
- return true;
-}
-
-/** Check whether the last unknown block a peer advertised is not yet known. */
-void ProcessBlockAvailability(NodeId nodeid) {
- CNodeState *state = State(nodeid);
- assert(state != NULL);
-
- if (!state->hashLastUnknownBlock.IsNull()) {
- BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock);
- if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) {
- if (state->pindexBestKnownBlock == NULL || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
- state->pindexBestKnownBlock = itOld->second;
- state->hashLastUnknownBlock.SetNull();
- }
- }
-}
-
-/** Update tracking information about which blocks a peer is assumed to have. */
-void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
- CNodeState *state = State(nodeid);
- assert(state != NULL);
-
- ProcessBlockAvailability(nodeid);
-
- BlockMap::iterator it = mapBlockIndex.find(hash);
- if (it != mapBlockIndex.end() && it->second->nChainWork > 0) {
- // An actually better block was announced.
- if (state->pindexBestKnownBlock == NULL || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
- state->pindexBestKnownBlock = it->second;
- } else {
- // An unknown block was announced; just assume that the latest one is the best one.
- state->hashLastUnknownBlock = hash;
- }
-}
-
-void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pfrom, CConnman& connman) {
- if (!nodestate->fSupportsDesiredCmpctVersion) {
- // Never ask from peers who can't provide witnesses.
- return;
- }
- if (nodestate->fProvidesHeaderAndIDs) {
- for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
- if (*it == pfrom->GetId()) {
- lNodesAnnouncingHeaderAndIDs.erase(it);
- lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
- return;
- }
- }
- bool fAnnounceUsingCMPCTBLOCK = false;
- uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1;
- if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
- // As per BIP152, we only get 3 of our peers to announce
- // blocks using compact encodings.
- bool found = connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode* pnodeStop){
- connman.PushMessage(pnodeStop, NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
- return true;
- });
- if(found)
- lNodesAnnouncingHeaderAndIDs.pop_front();
- }
- fAnnounceUsingCMPCTBLOCK = true;
- connman.PushMessage(pfrom, NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
- lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
- }
-}
-
-// Requires cs_main
-bool CanDirectFetch(const Consensus::Params &consensusParams)
-{
- return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
-}
-
-// Requires cs_main
-bool PeerHasHeader(CNodeState *state, CBlockIndex *pindex)
-{
- if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
- return true;
- if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
- return true;
- return false;
-}
-
-/** Find the last common ancestor two blocks have.
- * Both pa and pb must be non-NULL. */
-CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
- if (pa->nHeight > pb->nHeight) {
- pa = pa->GetAncestor(pb->nHeight);
- } else if (pb->nHeight > pa->nHeight) {
- pb = pb->GetAncestor(pa->nHeight);
- }
-
- while (pa != pb && pa && pb) {
- pa = pa->pprev;
- pb = pb->pprev;
- }
-
- // Eventually all chain branches meet at the genesis block.
- assert(pa == pb);
- return pa;
-}
-
-/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
- * at most count entries. */
-void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) {
- if (count == 0)
- return;
-
- vBlocks.reserve(vBlocks.size() + count);
- CNodeState *state = State(nodeid);
- assert(state != NULL);
-
- // Make sure pindexBestKnownBlock is up to date, we'll need it.
- ProcessBlockAvailability(nodeid);
-
- if (state->pindexBestKnownBlock == NULL || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) {
- // This peer has nothing interesting.
- return;
- }
-
- if (state->pindexLastCommonBlock == NULL) {
- // Bootstrap quickly by guessing a parent of our best tip is the forking point.
- // Guessing wrong in either direction is not a problem.
- state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())];
- }
-
- // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
- // of its current tip anymore. Go back enough to fix that.
- state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
- if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
- return;
-
- std::vector<CBlockIndex*> vToFetch;
- CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
- // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
- // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
- // download that next block if the window were 1 larger.
- int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
- int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
- NodeId waitingfor = -1;
- while (pindexWalk->nHeight < nMaxHeight) {
- // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
- // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
- // as iterating over ~100 CBlockIndex* entries anyway.
- int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
- vToFetch.resize(nToFetch);
- pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
- vToFetch[nToFetch - 1] = pindexWalk;
- for (unsigned int i = nToFetch - 1; i > 0; i--) {
- vToFetch[i - 1] = vToFetch[i]->pprev;
- }
-
- // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
- // are not yet downloaded and not in flight to vBlocks. In the mean time, update
- // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
- // already part of our chain (and therefore don't need it even if pruned).
- BOOST_FOREACH(CBlockIndex* pindex, vToFetch) {
- if (!pindex->IsValid(BLOCK_VALID_TREE)) {
- // We consider the chain that this peer is on invalid.
- return;
- }
- if (!State(nodeid)->fHaveWitness && IsWitnessEnabled(pindex->pprev, consensusParams)) {
- // We wouldn't download this block or its descendants from this peer.
- return;
- }
- if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) {
- if (pindex->nChainTx)
- state->pindexLastCommonBlock = pindex;
- } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
- // The block is not already downloaded, and not yet in flight.
- if (pindex->nHeight > nWindowEnd) {
- // We reached the end of the window.
- if (vBlocks.size() == 0 && waitingfor != nodeid) {
- // We aren't able to fetch anything, but we would be if the download window was one larger.
- nodeStaller = waitingfor;
- }
- return;
- }
- vBlocks.push_back(pindex);
- if (vBlocks.size() == count) {
- return;
- }
- } else if (waitingfor == -1) {
- // This is the first already-in-flight block.
- waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
- }
- }
- }
-}
-
-} // anon namespace
-
-bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
- LOCK(cs_main);
- CNodeState *state = State(nodeid);
- if (state == NULL)
- return false;
- stats.nMisbehavior = state->nMisbehavior;
- stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
- stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
- BOOST_FOREACH(const QueuedBlock& queue, state->vBlocksInFlight) {
- if (queue.pindex)
- stats.vHeightInFlight.push_back(queue.pindex->nHeight);
- }
- return true;
-}
-
-void RegisterNodeSignals(CNodeSignals& nodeSignals)
-{
- nodeSignals.ProcessMessages.connect(&ProcessMessages);
- nodeSignals.SendMessages.connect(&SendMessages);
- nodeSignals.InitializeNode.connect(&InitializeNode);
- nodeSignals.FinalizeNode.connect(&FinalizeNode);
-}
-
-void UnregisterNodeSignals(CNodeSignals& nodeSignals)
-{
- nodeSignals.ProcessMessages.disconnect(&ProcessMessages);
- nodeSignals.SendMessages.disconnect(&SendMessages);
- nodeSignals.InitializeNode.disconnect(&InitializeNode);
- nodeSignals.FinalizeNode.disconnect(&FinalizeNode);
-}
-
CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator)
{
// Find the first block the caller has in the main chain
@@ -729,112 +186,6 @@ enum FlushStateMode {
// See definition for documentation
bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode);
-//////////////////////////////////////////////////////////////////////////////
-//
-// mapOrphanTransactions
-//
-
-bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
-{
- uint256 hash = tx.GetHash();
- if (mapOrphanTransactions.count(hash))
- return false;
-
- // Ignore big transactions, to avoid a
- // send-big-orphans memory exhaustion attack. If a peer has a legitimate
- // large transaction with a missing parent then we assume
- // it will rebroadcast it later, after the parent transaction(s)
- // have been mined or received.
- // 100 orphans, each of which is at most 99,999 bytes big is
- // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
- unsigned int sz = GetTransactionWeight(tx);
- if (sz >= MAX_STANDARD_TX_WEIGHT)
- {
- LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
- return false;
- }
-
- auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME});
- assert(ret.second);
- BOOST_FOREACH(const CTxIn& txin, tx.vin) {
- mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
- }
-
- LogPrint("mempool", "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
- mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
- return true;
-}
-
-int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
-{
- map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
- if (it == mapOrphanTransactions.end())
- return 0;
- BOOST_FOREACH(const CTxIn& txin, it->second.tx.vin)
- {
- auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
- if (itPrev == mapOrphanTransactionsByPrev.end())
- continue;
- itPrev->second.erase(it);
- if (itPrev->second.empty())
- mapOrphanTransactionsByPrev.erase(itPrev);
- }
- mapOrphanTransactions.erase(it);
- return 1;
-}
-
-void EraseOrphansFor(NodeId peer)
-{
- int nErased = 0;
- map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
- while (iter != mapOrphanTransactions.end())
- {
- map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
- if (maybeErase->second.fromPeer == peer)
- {
- nErased += EraseOrphanTx(maybeErase->second.tx.GetHash());
- }
- }
- if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased, peer);
-}
-
-
-unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
-{
- unsigned int nEvicted = 0;
- static int64_t nNextSweep;
- int64_t nNow = GetTime();
- if (nNextSweep <= nNow) {
- // Sweep out expired orphan pool entries:
- int nErased = 0;
- int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
- map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
- while (iter != mapOrphanTransactions.end())
- {
- map<uint256, COrphanTx>::iterator maybeErase = iter++;
- if (maybeErase->second.nTimeExpire <= nNow) {
- nErased += EraseOrphanTx(maybeErase->second.tx.GetHash());
- } else {
- nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
- }
- }
- // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
- nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
- if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx due to expiration\n", nErased);
- }
- while (mapOrphanTransactions.size() > nMaxOrphans)
- {
- // Evict a random orphan:
- uint256 randomhash = GetRandHash();
- map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.lower_bound(randomhash);
- if (it == mapOrphanTransactions.end())
- it = mapOrphanTransactions.begin();
- EraseOrphanTx(it->first);
- ++nEvicted;
- }
- return nEvicted;
-}
-
bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime)
{
if (tx.nLockTime == 0)
@@ -1633,16 +984,16 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa
}
/** Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock */
-bool GetTransaction(const uint256 &hash, CTransaction &txOut, const Consensus::Params& consensusParams, uint256 &hashBlock, bool fAllowSlow)
+bool GetTransaction(const uint256 &hash, CTransactionRef &txOut, const Consensus::Params& consensusParams, uint256 &hashBlock, bool fAllowSlow)
{
CBlockIndex *pindexSlow = NULL;
LOCK(cs_main);
- std::shared_ptr<const CTransaction> ptx = mempool.get(hash);
+ CTransactionRef ptx = mempool.get(hash);
if (ptx)
{
- txOut = *ptx;
+ txOut = ptx;
return true;
}
@@ -1661,7 +1012,7 @@ bool GetTransaction(const uint256 &hash, CTransaction &txOut, const Consensus::P
return error("%s: Deserialize or I/O error - %s", __func__, e.what());
}
hashBlock = header.GetHash();
- if (txOut.GetHash() != hash)
+ if (txOut->GetHash() != hash)
return error("%s: txid mismatch", __func__);
return true;
}
@@ -1682,8 +1033,8 @@ bool GetTransaction(const uint256 &hash, CTransaction &txOut, const Consensus::P
if (pindexSlow) {
CBlock block;
if (ReadBlockFromDisk(block, pindexSlow, consensusParams)) {
- BOOST_FOREACH(const CTransaction &tx, block.vtx) {
- if (tx.GetHash() == hash) {
+ for (const auto& tx : block.vtx) {
+ if (tx->GetHash() == hash) {
txOut = tx;
hashBlock = pindexSlow->GetBlockHash();
return true;
@@ -1893,26 +1244,6 @@ void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip)
CheckForkWarningConditions();
}
-// Requires cs_main.
-void Misbehaving(NodeId pnode, int howmuch)
-{
- if (howmuch == 0)
- return;
-
- CNodeState *state = State(pnode);
- if (state == NULL)
- return;
-
- state->nMisbehavior += howmuch;
- int banscore = GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD);
- if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore)
- {
- LogPrintf("%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior);
- state->fShouldBan = true;
- } else
- LogPrintf("%s: %s peer=%d (%d -> %d)\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior);
-}
-
void static InvalidChainFound(CBlockIndex* pindexNew)
{
if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
@@ -2223,7 +1554,7 @@ bool DisconnectBlock(const CBlock& block, CValidationState& state, const CBlockI
// undo transactions in reverse order
for (int i = block.vtx.size() - 1; i >= 0; i--) {
- const CTransaction &tx = block.vtx[i];
+ const CTransaction &tx = *(block.vtx[i]);
uint256 hash = tx.GetHash();
// Check that all outputs are available and match the outputs in the block itself
@@ -2417,8 +1748,8 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == chainparams.GetConsensus().BIP34Hash));
if (fEnforceBIP30) {
- BOOST_FOREACH(const CTransaction& tx, block.vtx) {
- const CCoins* coins = view.AccessCoins(tx.GetHash());
+ for (const auto& tx : block.vtx) {
+ const CCoins* coins = view.AccessCoins(tx->GetHash());
if (coins && !coins->IsPruned())
return state.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
REJECT_INVALID, "bad-txns-BIP30");
@@ -2461,7 +1792,6 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL);
- std::vector<uint256> vOrphanErase;
std::vector<int> prevheights;
CAmount nFees = 0;
int nInputs = 0;
@@ -2474,7 +1804,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
txdata.reserve(block.vtx.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated
for (unsigned int i = 0; i < block.vtx.size(); i++)
{
- const CTransaction &tx = block.vtx[i];
+ const CTransaction &tx = *(block.vtx[i]);
nInputs += tx.vin.size();
@@ -2492,17 +1822,6 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
prevheights[j] = view.AccessCoins(tx.vin[j].prevout.hash)->nHeight;
}
- // Which orphan pool entries must we evict?
- for (size_t j = 0; j < tx.vin.size(); j++) {
- auto itByPrev = mapOrphanTransactionsByPrev.find(tx.vin[j].prevout);
- if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
- for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
- const CTransaction& orphanTx = (*mi)->second.tx;
- const uint256& orphanHash = orphanTx.GetHash();
- vOrphanErase.push_back(orphanHash);
- }
- }
-
if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) {
return state.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__),
REJECT_INVALID, "bad-txns-nonfinal");
@@ -2544,10 +1863,10 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2), 0.001 * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * 0.000001);
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
- if (block.vtx[0].GetValueOut() > blockReward)
+ if (block.vtx[0]->GetValueOut() > blockReward)
return state.DoS(100,
error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
- block.vtx[0].GetValueOut(), blockReward),
+ block.vtx[0]->GetValueOut(), blockReward),
REJECT_INVALID, "bad-cb-amount");
if (!control.Wait())
@@ -2590,16 +1909,8 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
// Watch for changes to the previous coinbase transaction.
static uint256 hashPrevBestCoinBase;
GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase);
- hashPrevBestCoinBase = block.vtx[0].GetHash();
+ hashPrevBestCoinBase = block.vtx[0]->GetHash();
- // Erase orphan transactions include or precluded by this block
- if (vOrphanErase.size()) {
- int nErased = 0;
- BOOST_FOREACH(uint256 &orphanHash, vOrphanErase) {
- nErased += EraseOrphanTx(orphanHash);
- }
- LogPrint("mempool", "Erased %d orphan tx included or conflicted by block\n", nErased);
- }
int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001);
@@ -2807,7 +2118,8 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara
if (!fBare) {
// Resurrect mempool transactions from the disconnected block.
std::vector<uint256> vHashUpdate;
- BOOST_FOREACH(const CTransaction &tx, block.vtx) {
+ for (const auto& it : block.vtx) {
+ const CTransaction& tx = *it;
// ignore validation errors in resurrected transactions
CValidationState stateDummy;
if (tx.IsCoinBase() || !AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL, true)) {
@@ -2828,8 +2140,8 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara
UpdateTip(pindexDelete->pprev, chainparams);
// Let wallets know transactions went from 1-confirmed to
// 0-confirmed or conflicted:
- BOOST_FOREACH(const CTransaction &tx, block.vtx) {
- GetMainSignals().SyncTransaction(tx, pindexDelete->pprev, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK);
+ for (const auto& tx : block.vtx) {
+ GetMainSignals().SyncTransaction(*tx, pindexDelete->pprev, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK);
}
return true;
}
@@ -2841,28 +2153,44 @@ static int64_t nTimeChainState = 0;
static int64_t nTimePostConnect = 0;
/**
+ * Used to track conflicted transactions removed from mempool and transactions
+ * applied to the UTXO state as a part of a single ActivateBestChainStep call.
+ */
+struct ConnectTrace {
+ std::vector<CTransactionRef> txConflicted;
+ std::vector<std::pair<CBlockIndex*, std::shared_ptr<const CBlock> > > blocksConnected;
+};
+
+/**
* Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock
* corresponding to pindexNew, to bypass loading it again from disk.
+ *
+ * The block is always added to connectTrace (either after loading from disk or by copying
+ * pblock) - if that is not intended, care must be taken to remove the last entry in
+ * blocksConnected in case of failure.
*/
-bool static ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const CBlock* pblock, std::vector<std::shared_ptr<const CTransaction>> &txConflicted, std::vector<std::tuple<CTransaction,CBlockIndex*,int>> &txChanged)
+bool static ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace)
{
assert(pindexNew->pprev == chainActive.Tip());
// Read block from disk.
int64_t nTime1 = GetTimeMicros();
- CBlock block;
if (!pblock) {
- if (!ReadBlockFromDisk(block, pindexNew, chainparams.GetConsensus()))
+ std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
+ connectTrace.blocksConnected.emplace_back(pindexNew, pblockNew);
+ if (!ReadBlockFromDisk(*pblockNew, pindexNew, chainparams.GetConsensus()))
return AbortNode(state, "Failed to read block");
- pblock = &block;
+ } else {
+ connectTrace.blocksConnected.emplace_back(pindexNew, pblock);
}
+ const CBlock& blockConnecting = *connectTrace.blocksConnected.back().second;
// Apply the block atomically to the chain state.
int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
int64_t nTime3;
LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001);
{
CCoinsViewCache view(pcoinsTip);
- bool rv = ConnectBlock(*pblock, state, pindexNew, view, chainparams);
- GetMainSignals().BlockChecked(*pblock, state);
+ bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, chainparams);
+ GetMainSignals().BlockChecked(blockConnecting, state);
if (!rv) {
if (state.IsInvalid())
InvalidBlockFound(pindexNew, state);
@@ -2880,13 +2208,10 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams,
int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001);
// Remove conflicting transactions from the mempool.;
- mempool.removeForBlock(pblock->vtx, pindexNew->nHeight, &txConflicted, !IsInitialBlockDownload());
+ mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight, &connectTrace.txConflicted, !IsInitialBlockDownload());
// Update chainActive & related variables.
UpdateTip(pindexNew, chainparams);
- for(unsigned int i=0; i < pblock->vtx.size(); i++)
- txChanged.emplace_back(pblock->vtx[i], pindexNew, i);
-
int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001);
LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001);
@@ -2967,7 +2292,7 @@ static void PruneBlockIndexCandidates() {
* Try to make some progress towards making pindexMostWork the active block.
* pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork.
*/
-static bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const CBlock* pblock, bool& fInvalidFound, std::vector<std::shared_ptr<const CTransaction>>& txConflicted, std::vector<std::tuple<CTransaction,CBlockIndex*,int>>& txChanged)
+static bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
{
AssertLockHeld(cs_main);
const CBlockIndex *pindexOldTip = chainActive.Tip();
@@ -3000,7 +2325,7 @@ static bool ActivateBestChainStep(CValidationState& state, const CChainParams& c
// Connect new blocks.
BOOST_REVERSE_FOREACH(CBlockIndex *pindexConnect, vpindexToConnect) {
- if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : NULL, txConflicted, txChanged)) {
+ if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace)) {
if (state.IsInvalid()) {
// The block violates a consensus rule.
if (!state.CorruptionPossible())
@@ -3008,6 +2333,8 @@ static bool ActivateBestChainStep(CValidationState& state, const CChainParams& c
state = CValidationState();
fInvalidFound = true;
fContinue = false;
+ // If we didn't actually connect the block, don't notify listeners about it
+ connectTrace.blocksConnected.pop_back();
break;
} else {
// A system error occurred (disk space, database error, ...).
@@ -3065,20 +2392,16 @@ static void NotifyHeaderTip() {
* or an activated best chain. pblock is either NULL or a pointer to a block
* that is already loaded (to avoid loading it again from disk).
*/
-bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, const CBlock *pblock) {
+bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
CBlockIndex *pindexMostWork = NULL;
CBlockIndex *pindexNewTip = NULL;
- std::vector<std::tuple<CTransaction,CBlockIndex*,int>> txChanged;
- if (pblock)
- txChanged.reserve(pblock->vtx.size());
do {
- txChanged.clear();
boost::this_thread::interruption_point();
if (ShutdownRequested())
break;
const CBlockIndex *pindexFork;
- std::vector<std::shared_ptr<const CTransaction>> txConflicted;
+ ConnectTrace connectTrace;
bool fInitialDownload;
{
LOCK(cs_main);
@@ -3092,7 +2415,8 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams,
return true;
bool fInvalidFound = false;
- if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : NULL, fInvalidFound, txConflicted, txChanged))
+ std::shared_ptr<const CBlock> nullBlockPtr;
+ if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace))
return false;
if (fInvalidFound) {
@@ -3109,13 +2433,17 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams,
// throw all transactions though the signal-interface
// while _not_ holding the cs_main lock
- for(std::shared_ptr<const CTransaction> tx : txConflicted)
+ for (const auto& tx : connectTrace.txConflicted)
{
GetMainSignals().SyncTransaction(*tx, pindexNewTip, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK);
}
// ... and about transactions that got confirmed:
- for(unsigned int i = 0; i < txChanged.size(); i++)
- GetMainSignals().SyncTransaction(std::get<0>(txChanged[i]), std::get<1>(txChanged[i]), std::get<2>(txChanged[i]));
+ for (const auto& pair : connectTrace.blocksConnected) {
+ assert(pair.second);
+ const CBlock& block = *(pair.second);
+ for (unsigned int i = 0; i < block.vtx.size(); i++)
+ GetMainSignals().SyncTransaction(*block.vtx[i], pair.first, i);
+ }
// Notify external listeners about the new tip.
GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload);
@@ -3201,6 +2529,7 @@ bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, C
InvalidChainFound(pindex);
mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
+ uiInterface.NotifyBlockTip(IsInitialBlockDownload(), pindex->pprev);
return true;
}
@@ -3454,22 +2783,22 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed");
// First transaction must be coinbase, the rest must not be
- if (block.vtx.empty() || !block.vtx[0].IsCoinBase())
+ if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false, "first tx is not coinbase");
for (unsigned int i = 1; i < block.vtx.size(); i++)
- if (block.vtx[i].IsCoinBase())
+ if (block.vtx[i]->IsCoinBase())
return state.DoS(100, false, REJECT_INVALID, "bad-cb-multiple", false, "more than one coinbase");
// Check transactions
for (const auto& tx : block.vtx)
- if (!CheckTransaction(tx, state, false))
+ if (!CheckTransaction(*tx, state, false))
return state.Invalid(false, state.GetRejectCode(), state.GetRejectReason(),
- strprintf("Transaction check failed (tx hash %s) %s", tx.GetHash().ToString(), state.GetDebugMessage()));
+ strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), state.GetDebugMessage()));
unsigned int nSigOps = 0;
for (const auto& tx : block.vtx)
{
- nSigOps += GetLegacySigOpCount(tx);
+ nSigOps += GetLegacySigOpCount(*tx);
}
if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST)
return state.DoS(100, false, REJECT_INVALID, "bad-blk-sigops", false, "out-of-bounds SigOpCount");
@@ -3505,8 +2834,8 @@ bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& pa
static int GetWitnessCommitmentIndex(const CBlock& block)
{
int commitpos = -1;
- for (size_t o = 0; o < block.vtx[0].vout.size(); o++) {
- if (block.vtx[0].vout[o].scriptPubKey.size() >= 38 && block.vtx[0].vout[o].scriptPubKey[0] == OP_RETURN && block.vtx[0].vout[o].scriptPubKey[1] == 0x24 && block.vtx[0].vout[o].scriptPubKey[2] == 0xaa && block.vtx[0].vout[o].scriptPubKey[3] == 0x21 && block.vtx[0].vout[o].scriptPubKey[4] == 0xa9 && block.vtx[0].vout[o].scriptPubKey[5] == 0xed) {
+ for (size_t o = 0; o < block.vtx[0]->vout.size(); o++) {
+ if (block.vtx[0]->vout[o].scriptPubKey.size() >= 38 && block.vtx[0]->vout[o].scriptPubKey[0] == OP_RETURN && block.vtx[0]->vout[o].scriptPubKey[1] == 0x24 && block.vtx[0]->vout[o].scriptPubKey[2] == 0xaa && block.vtx[0]->vout[o].scriptPubKey[3] == 0x21 && block.vtx[0]->vout[o].scriptPubKey[4] == 0xa9 && block.vtx[0]->vout[o].scriptPubKey[5] == 0xed) {
commitpos = o;
}
}
@@ -3517,10 +2846,12 @@ void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPr
{
int commitpos = GetWitnessCommitmentIndex(block);
static const std::vector<unsigned char> nonce(32, 0x00);
- if (commitpos != -1 && IsWitnessEnabled(pindexPrev, consensusParams) && block.vtx[0].wit.IsEmpty()) {
- block.vtx[0].wit.vtxinwit.resize(1);
- block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.resize(1);
- block.vtx[0].wit.vtxinwit[0].scriptWitness.stack[0] = nonce;
+ if (commitpos != -1 && IsWitnessEnabled(pindexPrev, consensusParams) && block.vtx[0]->wit.IsEmpty()) {
+ CMutableTransaction tx(*block.vtx[0]);
+ tx.wit.vtxinwit.resize(1);
+ tx.wit.vtxinwit[0].scriptWitness.stack.resize(1);
+ tx.wit.vtxinwit[0].scriptWitness.stack[0] = nonce;
+ block.vtx[0] = MakeTransactionRef(std::move(tx));
}
}
@@ -3528,15 +2859,8 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
{
std::vector<unsigned char> commitment;
int commitpos = GetWitnessCommitmentIndex(block);
- bool fHaveWitness = false;
- for (size_t t = 1; t < block.vtx.size(); t++) {
- if (!block.vtx[t].wit.IsNull()) {
- fHaveWitness = true;
- break;
- }
- }
std::vector<unsigned char> ret(32, 0x00);
- if (fHaveWitness && IsWitnessEnabled(pindexPrev, consensusParams)) {
+ if (consensusParams.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout != 0) {
if (commitpos == -1) {
uint256 witnessroot = BlockWitnessMerkleRoot(block, NULL);
CHash256().Write(witnessroot.begin(), 32).Write(&ret[0], 32).Finalize(witnessroot.begin());
@@ -3551,8 +2875,9 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
out.scriptPubKey[5] = 0xed;
memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
- const_cast<std::vector<CTxOut>*>(&block.vtx[0].vout)->push_back(out);
- block.vtx[0].UpdateHash();
+ CMutableTransaction tx(*block.vtx[0]);
+ tx.vout.push_back(out);
+ block.vtx[0] = MakeTransactionRef(std::move(tx));
}
}
UpdateUncommittedBlockStructures(block, pindexPrev, consensusParams);
@@ -3601,7 +2926,7 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Co
// Check that all transactions are finalized
for (const auto& tx : block.vtx) {
- if (!IsFinalTx(tx, nHeight, nLockTimeCutoff)) {
+ if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
return state.DoS(10, false, REJECT_INVALID, "bad-txns-nonfinal", false, "non-final transaction");
}
}
@@ -3610,8 +2935,8 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Co
if (nHeight >= consensusParams.BIP34Height)
{
CScript expect = CScript() << nHeight;
- if (block.vtx[0].vin[0].scriptSig.size() < expect.size() ||
- !std::equal(expect.begin(), expect.end(), block.vtx[0].vin[0].scriptSig.begin())) {
+ if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
+ !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
return state.DoS(100, false, REJECT_INVALID, "bad-cb-height", false, "block height mismatch in coinbase");
}
}
@@ -3633,11 +2958,11 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Co
// The malleation check is ignored; as the transaction tree itself
// already does not permit it, it is impossible to trigger in the
// witness tree.
- if (block.vtx[0].wit.vtxinwit.size() != 1 || block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.size() != 1 || block.vtx[0].wit.vtxinwit[0].scriptWitness.stack[0].size() != 32) {
+ if (block.vtx[0]->wit.vtxinwit.size() != 1 || block.vtx[0]->wit.vtxinwit[0].scriptWitness.stack.size() != 1 || block.vtx[0]->wit.vtxinwit[0].scriptWitness.stack[0].size() != 32) {
return state.DoS(100, false, REJECT_INVALID, "bad-witness-nonce-size", true, strprintf("%s : invalid witness nonce size", __func__));
}
- CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0].wit.vtxinwit[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin());
- if (memcmp(hashWitness.begin(), &block.vtx[0].vout[commitpos].scriptPubKey[6], 32)) {
+ CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0]->wit.vtxinwit[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin());
+ if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
return state.DoS(100, false, REJECT_INVALID, "bad-witness-merkle-match", true, strprintf("%s : witness merkle commitment mismatch", __func__));
}
fHaveWitness = true;
@@ -3647,7 +2972,7 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Co
// No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
if (!fHaveWitness) {
for (size_t i = 0; i < block.vtx.size(); i++) {
- if (!block.vtx[i].wit.IsNull()) {
+ if (!block.vtx[i]->wit.IsNull()) {
return state.DoS(100, false, REJECT_INVALID, "unexpected-witness", true, strprintf("%s : unexpected witness data found", __func__));
}
}
@@ -3666,7 +2991,7 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Co
return true;
}
-static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex=NULL)
+static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
{
AssertLockHeld(cs_main);
// Check for duplicate
@@ -3715,6 +3040,21 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
return true;
}
+// Exposed wrapper for AcceptBlockHeader
+bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
+{
+ {
+ LOCK(cs_main);
+ for (const CBlockHeader& header : headers) {
+ if (!AcceptBlockHeader(header, state, chainparams, ppindex)) {
+ return false;
+ }
+ }
+ }
+ NotifyHeaderTip();
+ return true;
+}
+
/** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */
static bool AcceptBlock(const CBlock& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock)
{
@@ -3788,26 +3128,26 @@ static bool AcceptBlock(const CBlock& block, CValidationState& state, const CCha
return true;
}
-bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp, bool fMayBanPeerIfInvalid)
+bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, const CDiskBlockPos* dbp, bool *fNewBlock)
{
{
LOCK(cs_main);
// Store to disk
CBlockIndex *pindex = NULL;
- bool fNewBlock = false;
- bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fForceProcessing, dbp, &fNewBlock);
- if (pindex && pfrom) {
- mapBlockSource[pindex->GetBlockHash()] = std::make_pair(pfrom->GetId(), fMayBanPeerIfInvalid);
- if (fNewBlock) pfrom->nLastBlockTime = GetTime();
- }
+ if (fNewBlock) *fNewBlock = false;
+ CValidationState state;
+ bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fForceProcessing, dbp, fNewBlock);
CheckBlockIndex(chainparams.GetConsensus());
- if (!ret)
+ if (!ret) {
+ GetMainSignals().BlockChecked(*pblock, state);
return error("%s: AcceptBlock FAILED", __func__);
+ }
}
NotifyHeaderTip();
+ CValidationState state; // Only used to report errors, not invalidity - ignore it
if (!ActivateBestChain(state, chainparams, pblock))
return error("%s: ActivateBestChain failed", __func__);
@@ -4319,8 +3659,6 @@ void UnloadBlockIndex()
pindexBestInvalid = NULL;
pindexBestHeader = NULL;
mempool.clear();
- mapOrphanTransactions.clear();
- mapOrphanTransactionsByPrev.clear();
mapBlocksUnlinked.clear();
vinfoBlockFile.clear();
nLastBlockFile = 0;
@@ -4726,2273 +4064,6 @@ std::string GetWarnings(const std::string& strFor)
assert(!"GetWarnings(): invalid parameter");
return "error";
}
-
-
-
-
-
-
-
-
-//////////////////////////////////////////////////////////////////////////////
-//
-// blockchain -> download logic notification
-//
-
-PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn) : connman(connmanIn) {
- // Initialize global variables that cannot be constructed at startup.
- recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
-}
-
-void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
- const int nNewHeight = pindexNew->nHeight;
- connman->SetBestHeight(nNewHeight);
-
- if (!fInitialDownload) {
- // Find the hashes of all blocks that weren't previously in the best chain.
- std::vector<uint256> vHashes;
- const CBlockIndex *pindexToAnnounce = pindexNew;
- while (pindexToAnnounce != pindexFork) {
- vHashes.push_back(pindexToAnnounce->GetBlockHash());
- pindexToAnnounce = pindexToAnnounce->pprev;
- if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
- // Limit announcements in case of a huge reorganization.
- // Rely on the peer's synchronization mechanism in that case.
- break;
- }
- }
- // Relay inventory, but don't relay old inventory during initial block download.
- connman->ForEachNode([nNewHeight, &vHashes](CNode* pnode) {
- if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) {
- BOOST_REVERSE_FOREACH(const uint256& hash, vHashes) {
- pnode->PushBlockHash(hash);
- }
- }
- });
- }
-
- nTimeBestReceived = GetTime();
-}
-
-void PeerLogicValidation::BlockChecked(const CBlock& block, const CValidationState& state) {
- LOCK(cs_main);
-
- const uint256 hash(block.GetHash());
- std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
-
- int nDoS = 0;
- if (state.IsInvalid(nDoS)) {
- if (it != mapBlockSource.end() && State(it->second.first)) {
- assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
- CBlockReject reject = {(unsigned char)state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), hash};
- State(it->second.first)->rejects.push_back(reject);
- if (nDoS > 0 && it->second.second)
- Misbehaving(it->second.first, nDoS);
- }
- }
- if (it != mapBlockSource.end())
- mapBlockSource.erase(it);
-}
-
-//////////////////////////////////////////////////////////////////////////////
-//
-// Messages
-//
-
-
-bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
-{
- switch (inv.type)
- {
- case MSG_TX:
- case MSG_WITNESS_TX:
- {
- assert(recentRejects);
- if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip)
- {
- // If the chain tip has changed previously rejected transactions
- // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
- // or a double-spend. Reset the rejects filter and give those
- // txs a second chance.
- hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash();
- recentRejects->reset();
- }
-
- // Use pcoinsTip->HaveCoinsInCache as a quick approximation to exclude
- // requesting or processing some txs which have already been included in a block
- return recentRejects->contains(inv.hash) ||
- mempool.exists(inv.hash) ||
- mapOrphanTransactions.count(inv.hash) ||
- pcoinsTip->HaveCoinsInCache(inv.hash);
- }
- case MSG_BLOCK:
- case MSG_WITNESS_BLOCK:
- return mapBlockIndex.count(inv.hash);
- }
- // Don't know what it is, just say we already got one
- return true;
-}
-
-static void RelayTransaction(const CTransaction& tx, CConnman& connman)
-{
- CInv inv(MSG_TX, tx.GetHash());
- connman.ForEachNode([&inv](CNode* pnode)
- {
- pnode->PushInventory(inv);
- });
-}
-
-static void RelayAddress(const CAddress& addr, bool fReachable, CConnman& connman)
-{
- int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
-
- // Relay to a limited number of other nodes
- // Use deterministic randomness to send to the same nodes for 24 hours
- // at a time so the addrKnowns of the chosen nodes prevent repeats
- uint64_t hashAddr = addr.GetHash();
- std::multimap<uint64_t, CNode*> mapMix;
- const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24*60*60));
- FastRandomContext insecure_rand;
-
- auto sortfunc = [&mapMix, &hasher](CNode* pnode) {
- if (pnode->nVersion >= CADDR_TIME_VERSION) {
- uint64_t hashKey = CSipHasher(hasher).Write(pnode->id).Finalize();
- mapMix.emplace(hashKey, pnode);
- }
- };
-
- auto pushfunc = [&addr, &mapMix, &nRelayNodes, &insecure_rand] {
- for (auto mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi)
- mi->second->PushAddress(addr, insecure_rand);
- };
-
- connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
-}
-
-void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman& connman)
-{
- std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
- unsigned int nMaxSendBufferSize = connman.GetSendBufferSize();
-
- vector<CInv> vNotFound;
-
- LOCK(cs_main);
-
- while (it != pfrom->vRecvGetData.end()) {
- // Don't bother if send buffer is too full to respond anyway
- if (pfrom->nSendSize >= nMaxSendBufferSize)
- break;
-
- const CInv &inv = *it;
- {
- boost::this_thread::interruption_point();
- it++;
-
- if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
- {
- bool send = false;
- BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
- if (mi != mapBlockIndex.end())
- {
- if (chainActive.Contains(mi->second)) {
- send = true;
- } else {
- static const int nOneMonth = 30 * 24 * 60 * 60;
- // To prevent fingerprinting attacks, only send blocks outside of the active
- // chain if they are valid, and no more than a month older (both in time, and in
- // best equivalent proof of work) than the best header chain we know about.
- send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) &&
- (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) &&
- (GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, consensusParams) < nOneMonth);
- if (!send) {
- LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
- }
- }
- }
- // disconnect node in case we have reached the outbound limit for serving historical blocks
- // never disconnect whitelisted nodes
- static const int nOneWeek = 7 * 24 * 60 * 60; // assume > 1 week = historical
- if (send && connman.OutboundTargetReached(true) && ( ((pindexBestHeader != NULL) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
- {
- LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
-
- //disconnect node
- pfrom->fDisconnect = true;
- send = false;
- }
- // Pruned nodes may have deleted the block, so check whether
- // it's available before trying to send.
- if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
- {
- // Send block from disk
- CBlock block;
- if (!ReadBlockFromDisk(block, (*mi).second, consensusParams))
- assert(!"cannot load block from disk");
- if (inv.type == MSG_BLOCK)
- connman.PushMessageWithFlag(pfrom, SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block);
- else if (inv.type == MSG_WITNESS_BLOCK)
- connman.PushMessage(pfrom, NetMsgType::BLOCK, block);
- else if (inv.type == MSG_FILTERED_BLOCK)
- {
- bool sendMerkleBlock = false;
- CMerkleBlock merkleBlock;
- {
- LOCK(pfrom->cs_filter);
- if (pfrom->pfilter) {
- sendMerkleBlock = true;
- merkleBlock = CMerkleBlock(block, *pfrom->pfilter);
- }
- }
- if (sendMerkleBlock) {
- connman.PushMessage(pfrom, NetMsgType::MERKLEBLOCK, merkleBlock);
- // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
- // This avoids hurting performance by pointlessly requiring a round-trip
- // Note that there is currently no way for a node to request any single transactions we didn't send here -
- // they must either disconnect and retry or request the full block.
- // Thus, the protocol spec specified allows for us to provide duplicate txn here,
- // however we MUST always provide at least what the remote peer needs
- typedef std::pair<unsigned int, uint256> PairType;
- BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn)
- connman.PushMessageWithFlag(pfrom, SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, block.vtx[pair.first]);
- }
- // else
- // no response
- }
- else if (inv.type == MSG_CMPCT_BLOCK)
- {
- // If a peer is asking for old blocks, we're almost guaranteed
- // they wont have a useful mempool to match against a compact block,
- // and we don't feel like constructing the object for them, so
- // instead we respond with the full, non-compact block.
- bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
- if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
- CBlockHeaderAndShortTxIDs cmpctblock(block, fPeerWantsWitness);
- connman.PushMessageWithFlag(pfrom, fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock);
- } else
- connman.PushMessageWithFlag(pfrom, fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block);
- }
-
- // Trigger the peer node to send a getblocks request for the next batch of inventory
- if (inv.hash == pfrom->hashContinue)
- {
- // Bypass PushInventory, this must send even if redundant,
- // and we want it right after the last block so they don't
- // wait for other stuff first.
- vector<CInv> vInv;
- vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
- connman.PushMessage(pfrom, NetMsgType::INV, vInv);
- pfrom->hashContinue.SetNull();
- }
- }
- }
- else if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX)
- {
- // Send stream from relay memory
- bool push = false;
- auto mi = mapRelay.find(inv.hash);
- if (mi != mapRelay.end()) {
- connman.PushMessageWithFlag(pfrom, inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0, NetMsgType::TX, *mi->second);
- push = true;
- } else if (pfrom->timeLastMempoolReq) {
- auto txinfo = mempool.info(inv.hash);
- // To protect privacy, do not answer getdata using the mempool when
- // that TX couldn't have been INVed in reply to a MEMPOOL request.
- if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
- connman.PushMessageWithFlag(pfrom, inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0, NetMsgType::TX, *txinfo.tx);
- push = true;
- }
- }
- if (!push) {
- vNotFound.push_back(inv);
- }
- }
-
- // Track requests for our stuff.
- GetMainSignals().Inventory(inv.hash);
-
- if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
- break;
- }
- }
-
- pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
-
- if (!vNotFound.empty()) {
- // Let the peer know that we didn't find what it asked for, so it doesn't
- // have to wait around forever. Currently only SPV clients actually care
- // about this message: it's needed when they are recursively walking the
- // dependencies of relevant unconfirmed transactions. SPV clients want to
- // do that because they want to know about (and store and rebroadcast and
- // risk analyze) the dependencies of transactions relevant to them, without
- // having to download the entire memory pool.
- connman.PushMessage(pfrom, NetMsgType::NOTFOUND, vNotFound);
- }
-}
-
-uint32_t GetFetchFlags(CNode* pfrom, CBlockIndex* pprev, const Consensus::Params& chainparams) {
- uint32_t nFetchFlags = 0;
- if ((pfrom->GetLocalServices() & NODE_WITNESS) && State(pfrom->GetId())->fHaveWitness) {
- nFetchFlags |= MSG_WITNESS_FLAG;
- }
- return nFetchFlags;
-}
-
-bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman& connman)
-{
- unsigned int nMaxSendBufferSize = connman.GetSendBufferSize();
-
- LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id);
- if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0)
- {
- LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
- return true;
- }
-
-
- if (!(pfrom->GetLocalServices() & NODE_BLOOM) &&
- (strCommand == NetMsgType::FILTERLOAD ||
- strCommand == NetMsgType::FILTERADD))
- {
- if (pfrom->nVersion >= NO_BLOOM_VERSION) {
- LOCK(cs_main);
- Misbehaving(pfrom->GetId(), 100);
- return false;
- } else {
- pfrom->fDisconnect = true;
- return false;
- }
- }
-
-
- if (strCommand == NetMsgType::VERSION)
- {
- // Feeler connections exist only to verify if address is online.
- if (pfrom->fFeeler) {
- assert(pfrom->fInbound == false);
- pfrom->fDisconnect = true;
- }
-
- // Each connection can only send one version message
- if (pfrom->nVersion != 0)
- {
- connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, string("Duplicate version message"));
- LOCK(cs_main);
- Misbehaving(pfrom->GetId(), 1);
- return false;
- }
-
- int64_t nTime;
- CAddress addrMe;
- CAddress addrFrom;
- uint64_t nNonce = 1;
- uint64_t nServiceInt;
- vRecv >> pfrom->nVersion >> nServiceInt >> nTime >> addrMe;
- pfrom->nServices = ServiceFlags(nServiceInt);
- if (!pfrom->fInbound)
- {
- connman.SetServices(pfrom->addr, pfrom->nServices);
- }
- if (pfrom->nServicesExpected & ~pfrom->nServices)
- {
- LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, pfrom->nServices, pfrom->nServicesExpected);
- connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
- strprintf("Expected to offer services %08x", pfrom->nServicesExpected));
- pfrom->fDisconnect = true;
- return false;
- }
-
- if (pfrom->nVersion < MIN_PEER_PROTO_VERSION)
- {
- // disconnect from peers older than this proto version
- LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion);
- connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
- strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION));
- pfrom->fDisconnect = true;
- return false;
- }
-
- if (pfrom->nVersion == 10300)
- pfrom->nVersion = 300;
- if (!vRecv.empty())
- vRecv >> addrFrom >> nNonce;
- if (!vRecv.empty()) {
- vRecv >> LIMITED_STRING(pfrom->strSubVer, MAX_SUBVERSION_LENGTH);
- pfrom->cleanSubVer = SanitizeString(pfrom->strSubVer);
- }
- if (!vRecv.empty()) {
- vRecv >> pfrom->nStartingHeight;
- }
- {
- LOCK(pfrom->cs_filter);
- if (!vRecv.empty())
- vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message
- else
- pfrom->fRelayTxes = true;
- }
-
- // Disconnect if we connected to ourself
- if (pfrom->fInbound && !connman.CheckIncomingNonce(nNonce))
- {
- LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString());
- pfrom->fDisconnect = true;
- return true;
- }
-
- pfrom->addrLocal = addrMe;
- if (pfrom->fInbound && addrMe.IsRoutable())
- {
- SeenLocal(addrMe);
- }
-
- // Be shy and don't send version until we hear
- if (pfrom->fInbound)
- PushNodeVersion(pfrom, connman, GetAdjustedTime());
-
- pfrom->fClient = !(pfrom->nServices & NODE_NETWORK);
-
- if((pfrom->nServices & NODE_WITNESS))
- {
- LOCK(cs_main);
- State(pfrom->GetId())->fHaveWitness = true;
- }
-
- // Potentially mark this peer as a preferred download peer.
- {
- LOCK(cs_main);
- UpdatePreferredDownload(pfrom, State(pfrom->GetId()));
- }
-
- // Change version
- connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::VERACK);
- pfrom->SetSendVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
-
- if (!pfrom->fInbound)
- {
- // Advertise our address
- if (fListen && !IsInitialBlockDownload())
- {
- CAddress addr = GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices());
- FastRandomContext insecure_rand;
- if (addr.IsRoutable())
- {
- LogPrint("net", "ProcessMessages: advertising address %s\n", addr.ToString());
- pfrom->PushAddress(addr, insecure_rand);
- } else if (IsPeerAddrLocalGood(pfrom)) {
- addr.SetIP(pfrom->addrLocal);
- LogPrint("net", "ProcessMessages: advertising address %s\n", addr.ToString());
- pfrom->PushAddress(addr, insecure_rand);
- }
- }
-
- // Get recent addresses
- if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman.GetAddressCount() < 1000)
- {
- connman.PushMessage(pfrom, NetMsgType::GETADDR);
- pfrom->fGetAddr = true;
- }
- connman.MarkAddressGood(pfrom->addr);
- }
-
- pfrom->fSuccessfullyConnected = true;
-
- string remoteAddr;
- if (fLogIPs)
- remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
-
- LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
- pfrom->cleanSubVer, pfrom->nVersion,
- pfrom->nStartingHeight, addrMe.ToString(), pfrom->id,
- remoteAddr);
-
- int64_t nTimeOffset = nTime - GetTime();
- pfrom->nTimeOffset = nTimeOffset;
- AddTimeData(pfrom->addr, nTimeOffset);
- }
-
-
- else if (pfrom->nVersion == 0)
- {
- // Must have a version message before anything else
- LOCK(cs_main);
- Misbehaving(pfrom->GetId(), 1);
- return false;
- }
-
-
- else if (strCommand == NetMsgType::VERACK)
- {
- pfrom->SetRecvVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
-
- // Mark this node as currently connected, so we update its timestamp later.
- if (pfrom->fNetworkNode) {
- LOCK(cs_main);
- State(pfrom->GetId())->fCurrentlyConnected = true;
- }
-
- if (pfrom->nVersion >= SENDHEADERS_VERSION) {
- // Tell our peer we prefer to receive headers rather than inv's
- // We send this to non-NODE NETWORK peers as well, because even
- // non-NODE NETWORK peers can announce blocks (such as pruning
- // nodes)
- connman.PushMessage(pfrom, NetMsgType::SENDHEADERS);
- }
- if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) {
- // Tell our peer we are willing to provide version 1 or 2 cmpctblocks
- // However, we do not request new block announcements using
- // cmpctblock messages.
- // We send this to non-NODE NETWORK peers as well, because
- // they may wish to request compact blocks from us
- bool fAnnounceUsingCMPCTBLOCK = false;
- uint64_t nCMPCTBLOCKVersion = 2;
- if (pfrom->GetLocalServices() & NODE_WITNESS)
- connman.PushMessage(pfrom, NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
- nCMPCTBLOCKVersion = 1;
- connman.PushMessage(pfrom, NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
- }
- }
-
-
- else if (strCommand == NetMsgType::ADDR)
- {
- vector<CAddress> vAddr;
- vRecv >> vAddr;
-
- // Don't want addr from older versions unless seeding
- if (pfrom->nVersion < CADDR_TIME_VERSION && connman.GetAddressCount() > 1000)
- return true;
- if (vAddr.size() > 1000)
- {
- LOCK(cs_main);
- Misbehaving(pfrom->GetId(), 20);
- return error("message addr size() = %u", vAddr.size());
- }
-
- // Store the new addresses
- vector<CAddress> vAddrOk;
- int64_t nNow = GetAdjustedTime();
- int64_t nSince = nNow - 10 * 60;
- BOOST_FOREACH(CAddress& addr, vAddr)
- {
- boost::this_thread::interruption_point();
-
- if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES)
- continue;
-
- if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
- addr.nTime = nNow - 5 * 24 * 60 * 60;
- pfrom->AddAddressKnown(addr);
- bool fReachable = IsReachable(addr);
- if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
- {
- // Relay to a limited number of other nodes
- RelayAddress(addr, fReachable, connman);
- }
- // Do not store addresses outside our network
- if (fReachable)
- vAddrOk.push_back(addr);
- }
- connman.AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60);
- if (vAddr.size() < 1000)
- pfrom->fGetAddr = false;
- if (pfrom->fOneShot)
- pfrom->fDisconnect = true;
- }
-
- else if (strCommand == NetMsgType::SENDHEADERS)
- {
- LOCK(cs_main);
- State(pfrom->GetId())->fPreferHeaders = true;
- }
-
- else if (strCommand == NetMsgType::SENDCMPCT)
- {
- bool fAnnounceUsingCMPCTBLOCK = false;
- uint64_t nCMPCTBLOCKVersion = 0;
- vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
- if (nCMPCTBLOCKVersion == 1 || ((pfrom->GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) {
- LOCK(cs_main);
- // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
- if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) {
- State(pfrom->GetId())->fProvidesHeaderAndIDs = true;
- State(pfrom->GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2;
- }
- if (State(pfrom->GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces
- State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
- if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) {
- if (pfrom->GetLocalServices() & NODE_WITNESS)
- State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
- else
- State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1);
- }
- }
- }
-
-
- else if (strCommand == NetMsgType::INV)
- {
- vector<CInv> vInv;
- vRecv >> vInv;
- if (vInv.size() > MAX_INV_SZ)
- {
- LOCK(cs_main);
- Misbehaving(pfrom->GetId(), 20);
- return error("message inv size() = %u", vInv.size());
- }
-
- bool fBlocksOnly = !fRelayTxes;
-
- // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
- if (pfrom->fWhitelisted && GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY))
- fBlocksOnly = false;
-
- LOCK(cs_main);
-
- uint32_t nFetchFlags = GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus());
-
- std::vector<CInv> vToFetch;
-
- for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
- {
- CInv &inv = vInv[nInv];
-
- boost::this_thread::interruption_point();
-
- bool fAlreadyHave = AlreadyHave(inv);
- LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
-
- if (inv.type == MSG_TX) {
- inv.type |= nFetchFlags;
- }
-
- if (inv.type == MSG_BLOCK) {
- UpdateBlockAvailability(pfrom->GetId(), inv.hash);
- if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
- // First request the headers preceding the announced block. In the normal fully-synced
- // case where a new block is announced that succeeds the current tip (no reorganization),
- // there are no such headers.
- // Secondly, and only when we are close to being synced, we request the announced block directly,
- // to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the
- // time the block arrives, the header chain leading up to it is already validated. Not
- // doing this will result in the received block being rejected as an orphan in case it is
- // not a direct successor.
- connman.PushMessage(pfrom, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash);
- CNodeState *nodestate = State(pfrom->GetId());
- if (CanDirectFetch(chainparams.GetConsensus()) &&
- nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER &&
- (!IsWitnessEnabled(chainActive.Tip(), chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) {
- inv.type |= nFetchFlags;
- if (nodestate->fSupportsDesiredCmpctVersion)
- vToFetch.push_back(CInv(MSG_CMPCT_BLOCK, inv.hash));
- else
- vToFetch.push_back(inv);
- // Mark block as in flight already, even though the actual "getdata" message only goes out
- // later (within the same cs_main lock, though).
- MarkBlockAsInFlight(pfrom->GetId(), inv.hash, chainparams.GetConsensus());
- }
- LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
- }
- }
- else
- {
- pfrom->AddInventoryKnown(inv);
- if (fBlocksOnly)
- LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id);
- else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload())
- pfrom->AskFor(inv);
- }
-
- // Track requests for our stuff
- GetMainSignals().Inventory(inv.hash);
-
- if (pfrom->nSendSize > (nMaxSendBufferSize * 2)) {
- Misbehaving(pfrom->GetId(), 50);
- return error("send buffer size() = %u", pfrom->nSendSize);
- }
- }
-
- if (!vToFetch.empty())
- connman.PushMessage(pfrom, NetMsgType::GETDATA, vToFetch);
- }
-
-
- else if (strCommand == NetMsgType::GETDATA)
- {
- vector<CInv> vInv;
- vRecv >> vInv;
- if (vInv.size() > MAX_INV_SZ)
- {
- LOCK(cs_main);
- Misbehaving(pfrom->GetId(), 20);
- return error("message getdata size() = %u", vInv.size());
- }
-
- if (fDebug || (vInv.size() != 1))
- LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id);
-
- if ((fDebug && vInv.size() > 0) || (vInv.size() == 1))
- LogPrint("net", "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id);
-
- pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
- ProcessGetData(pfrom, chainparams.GetConsensus(), connman);
- }
-
-
- else if (strCommand == NetMsgType::GETBLOCKS)
- {
- CBlockLocator locator;
- uint256 hashStop;
- vRecv >> locator >> hashStop;
-
- LOCK(cs_main);
-
- // Find the last block the caller has in the main chain
- CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator);
-
- // Send the rest of the chain
- if (pindex)
- pindex = chainActive.Next(pindex);
- int nLimit = 500;
- LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id);
- for (; pindex; pindex = chainActive.Next(pindex))
- {
- if (pindex->GetBlockHash() == hashStop)
- {
- LogPrint("net", " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
- break;
- }
- // If pruning, don't inv blocks unless we have on disk and are likely to still have
- // for some reasonable time window (1 hour) that block relay might require.
- const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing;
- if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave))
- {
- LogPrint("net", " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
- break;
- }
- pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
- if (--nLimit <= 0)
- {
- // When this block is requested, we'll send an inv that'll
- // trigger the peer to getblocks the next batch of inventory.
- LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
- pfrom->hashContinue = pindex->GetBlockHash();
- break;
- }
- }
- }
-
-
- else if (strCommand == NetMsgType::GETBLOCKTXN)
- {
- BlockTransactionsRequest req;
- vRecv >> req;
-
- LOCK(cs_main);
-
- BlockMap::iterator it = mapBlockIndex.find(req.blockhash);
- if (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)) {
- LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom->id);
- return true;
- }
-
- if (it->second->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) {
- // If an older block is requested (should never happen in practice,
- // but can happen in tests) send a block response instead of a
- // blocktxn response. Sending a full block response instead of a
- // small blocktxn response is preferable in the case where a peer
- // might maliciously send lots of getblocktxn requests to trigger
- // expensive disk reads, because it will require the peer to
- // actually receive all the data read from disk over the network.
- LogPrint("net", "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->id, MAX_BLOCKTXN_DEPTH);
- CInv inv;
- inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
- inv.hash = req.blockhash;
- pfrom->vRecvGetData.push_back(inv);
- ProcessGetData(pfrom, chainparams.GetConsensus(), connman);
- return true;
- }
-
- CBlock block;
- assert(ReadBlockFromDisk(block, it->second, chainparams.GetConsensus()));
-
- BlockTransactions resp(req);
- for (size_t i = 0; i < req.indexes.size(); i++) {
- if (req.indexes[i] >= block.vtx.size()) {
- Misbehaving(pfrom->GetId(), 100);
- LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->id);
- return true;
- }
- resp.txn[i] = block.vtx[req.indexes[i]];
- }
- connman.PushMessageWithFlag(pfrom, State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCKTXN, resp);
- }
-
-
- else if (strCommand == NetMsgType::GETHEADERS)
- {
- CBlockLocator locator;
- uint256 hashStop;
- vRecv >> locator >> hashStop;
-
- LOCK(cs_main);
- if (IsInitialBlockDownload() && !pfrom->fWhitelisted) {
- LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->id);
- return true;
- }
-
- CNodeState *nodestate = State(pfrom->GetId());
- CBlockIndex* pindex = NULL;
- if (locator.IsNull())
- {
- // If locator is null, return the hashStop block
- BlockMap::iterator mi = mapBlockIndex.find(hashStop);
- if (mi == mapBlockIndex.end())
- return true;
- pindex = (*mi).second;
- }
- else
- {
- // Find the last block the caller has in the main chain
- pindex = FindForkInGlobalIndex(chainActive, locator);
- if (pindex)
- pindex = chainActive.Next(pindex);
- }
-
- // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
- vector<CBlock> vHeaders;
- int nLimit = MAX_HEADERS_RESULTS;
- LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->id);
- for (; pindex; pindex = chainActive.Next(pindex))
- {
- vHeaders.push_back(pindex->GetBlockHeader());
- if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
- break;
- }
- // pindex can be NULL either if we sent chainActive.Tip() OR
- // if our peer has chainActive.Tip() (and thus we are sending an empty
- // headers message). In both cases it's safe to update
- // pindexBestHeaderSent to be our tip.
- nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip();
- connman.PushMessage(pfrom, NetMsgType::HEADERS, vHeaders);
- }
-
-
- else if (strCommand == NetMsgType::TX)
- {
- // Stop processing the transaction early if
- // We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
- if (!fRelayTxes && (!pfrom->fWhitelisted || !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)))
- {
- LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom->id);
- return true;
- }
-
- deque<COutPoint> vWorkQueue;
- vector<uint256> vEraseQueue;
- CTransaction tx;
- vRecv >> tx;
-
- CInv inv(MSG_TX, tx.GetHash());
- pfrom->AddInventoryKnown(inv);
-
- LOCK(cs_main);
-
- bool fMissingInputs = false;
- CValidationState state;
-
- pfrom->setAskFor.erase(inv.hash);
- mapAlreadyAskedFor.erase(inv.hash);
-
- if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs)) {
- mempool.check(pcoinsTip);
- RelayTransaction(tx, connman);
- for (unsigned int i = 0; i < tx.vout.size(); i++) {
- vWorkQueue.emplace_back(inv.hash, i);
- }
-
- pfrom->nLastTXTime = GetTime();
-
- LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
- pfrom->id,
- tx.GetHash().ToString(),
- mempool.size(), mempool.DynamicMemoryUsage() / 1000);
-
- // Recursively process any orphan transactions that depended on this one
- set<NodeId> setMisbehaving;
- while (!vWorkQueue.empty()) {
- auto itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue.front());
- vWorkQueue.pop_front();
- if (itByPrev == mapOrphanTransactionsByPrev.end())
- continue;
- for (auto mi = itByPrev->second.begin();
- mi != itByPrev->second.end();
- ++mi)
- {
- const CTransaction& orphanTx = (*mi)->second.tx;
- const uint256& orphanHash = orphanTx.GetHash();
- NodeId fromPeer = (*mi)->second.fromPeer;
- bool fMissingInputs2 = false;
- // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
- // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
- // anyone relaying LegitTxX banned)
- CValidationState stateDummy;
-
-
- if (setMisbehaving.count(fromPeer))
- continue;
- if (AcceptToMemoryPool(mempool, stateDummy, orphanTx, true, &fMissingInputs2)) {
- LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString());
- RelayTransaction(orphanTx, connman);
- for (unsigned int i = 0; i < orphanTx.vout.size(); i++) {
- vWorkQueue.emplace_back(orphanHash, i);
- }
- vEraseQueue.push_back(orphanHash);
- }
- else if (!fMissingInputs2)
- {
- int nDos = 0;
- if (stateDummy.IsInvalid(nDos) && nDos > 0)
- {
- // Punish peer that gave us an invalid orphan tx
- Misbehaving(fromPeer, nDos);
- setMisbehaving.insert(fromPeer);
- LogPrint("mempool", " invalid orphan tx %s\n", orphanHash.ToString());
- }
- // Has inputs but not accepted to mempool
- // Probably non-standard or insufficient fee/priority
- LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString());
- vEraseQueue.push_back(orphanHash);
- if (orphanTx.wit.IsNull() && !stateDummy.CorruptionPossible()) {
- // Do not use rejection cache for witness transactions or
- // witness-stripped transactions, as they can have been malleated.
- // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
- assert(recentRejects);
- recentRejects->insert(orphanHash);
- }
- }
- mempool.check(pcoinsTip);
- }
- }
-
- BOOST_FOREACH(uint256 hash, vEraseQueue)
- EraseOrphanTx(hash);
- }
- else if (fMissingInputs)
- {
- bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
- BOOST_FOREACH(const CTxIn& txin, tx.vin) {
- if (recentRejects->contains(txin.prevout.hash)) {
- fRejectedParents = true;
- break;
- }
- }
- if (!fRejectedParents) {
- BOOST_FOREACH(const CTxIn& txin, tx.vin) {
- CInv _inv(MSG_TX, txin.prevout.hash);
- pfrom->AddInventoryKnown(_inv);
- if (!AlreadyHave(_inv)) pfrom->AskFor(_inv);
- }
- AddOrphanTx(tx, pfrom->GetId());
-
- // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
- unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
- unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
- if (nEvicted > 0)
- LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted);
- } else {
- LogPrint("mempool", "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
- }
- } else {
- if (tx.wit.IsNull() && !state.CorruptionPossible()) {
- // Do not use rejection cache for witness transactions or
- // witness-stripped transactions, as they can have been malleated.
- // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
- assert(recentRejects);
- recentRejects->insert(tx.GetHash());
- }
-
- if (pfrom->fWhitelisted && GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
- // Always relay transactions received from whitelisted peers, even
- // if they were already in the mempool or rejected from it due
- // to policy, allowing the node to function as a gateway for
- // nodes hidden behind it.
- //
- // Never relay transactions that we would assign a non-zero DoS
- // score for, as we expect peers to do the same with us in that
- // case.
- int nDoS = 0;
- if (!state.IsInvalid(nDoS) || nDoS == 0) {
- LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->id);
- RelayTransaction(tx, connman);
- } else {
- LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->id, FormatStateMessage(state));
- }
- }
- }
- int nDoS = 0;
- if (state.IsInvalid(nDoS))
- {
- LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
- pfrom->id,
- FormatStateMessage(state));
- if (state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
- connman.PushMessage(pfrom, NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
- state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash);
- if (nDoS > 0) {
- Misbehaving(pfrom->GetId(), nDoS);
- }
- }
- }
-
-
- else if (strCommand == NetMsgType::CMPCTBLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
- {
- CBlockHeaderAndShortTxIDs cmpctblock;
- vRecv >> cmpctblock;
-
- LOCK(cs_main);
-
- if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) {
- // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
- if (!IsInitialBlockDownload())
- connman.PushMessage(pfrom, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256());
- return true;
- }
-
- CBlockIndex *pindex = NULL;
- CValidationState state;
- if (!AcceptBlockHeader(cmpctblock.header, state, chainparams, &pindex)) {
- int nDoS;
- if (state.IsInvalid(nDoS)) {
- if (nDoS > 0)
- Misbehaving(pfrom->GetId(), nDoS);
- LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom->id);
- return true;
- }
- }
-
- // If AcceptBlockHeader returned true, it set pindex
- assert(pindex);
- UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
-
- std::map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
- bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
-
- if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
- return true;
-
- if (pindex->nChainWork <= chainActive.Tip()->nChainWork || // We know something better
- pindex->nTx != 0) { // We had this block at some point, but pruned it
- if (fAlreadyInFlight) {
- // We requested this block for some reason, but our mempool will probably be useless
- // so we just grab the block via normal getdata
- std::vector<CInv> vInv(1);
- vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
- connman.PushMessage(pfrom, NetMsgType::GETDATA, vInv);
- }
- return true;
- }
-
- // If we're not close to tip yet, give up and let parallel block fetch work its magic
- if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus()))
- return true;
-
- CNodeState *nodestate = State(pfrom->GetId());
-
- if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) {
- // Don't bother trying to process compact blocks from v1 peers
- // after segwit activates.
- return true;
- }
-
- // We want to be a bit conservative just to be extra careful about DoS
- // possibilities in compact block processing...
- if (pindex->nHeight <= chainActive.Height() + 2) {
- if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
- (fAlreadyInFlight && blockInFlightIt->second.first == pfrom->GetId())) {
- list<QueuedBlock>::iterator *queuedBlockIt = NULL;
- if (!MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex, &queuedBlockIt)) {
- if (!(*queuedBlockIt)->partialBlock)
- (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool));
- else {
- // The block was already in flight using compact blocks from the same peer
- LogPrint("net", "Peer sent us compact block we were already syncing!\n");
- return true;
- }
- }
-
- PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
- ReadStatus status = partialBlock.InitData(cmpctblock);
- if (status == READ_STATUS_INVALID) {
- MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case of whitelist
- Misbehaving(pfrom->GetId(), 100);
- LogPrintf("Peer %d sent us invalid compact block\n", pfrom->id);
- return true;
- } else if (status == READ_STATUS_FAILED) {
- // Duplicate txindexes, the block is now in-flight, so just request it
- std::vector<CInv> vInv(1);
- vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
- connman.PushMessage(pfrom, NetMsgType::GETDATA, vInv);
- return true;
- }
-
- if (!fAlreadyInFlight && mapBlocksInFlight.size() == 1 && pindex->pprev->IsValid(BLOCK_VALID_CHAIN)) {
- // We seem to be rather well-synced, so it appears pfrom was the first to provide us
- // with this block! Let's get them to announce using compact blocks in the future.
- MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate, pfrom, connman);
- }
-
- BlockTransactionsRequest req;
- for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
- if (!partialBlock.IsTxAvailable(i))
- req.indexes.push_back(i);
- }
- if (req.indexes.empty()) {
- // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
- BlockTransactions txn;
- txn.blockhash = cmpctblock.header.GetHash();
- CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
- blockTxnMsg << txn;
- return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman);
- } else {
- req.blockhash = pindex->GetBlockHash();
- connman.PushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
- }
- }
- } else {
- if (fAlreadyInFlight) {
- // We requested this block, but its far into the future, so our
- // mempool will probably be useless - request the block normally
- std::vector<CInv> vInv(1);
- vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
- connman.PushMessage(pfrom, NetMsgType::GETDATA, vInv);
- return true;
- } else {
- // If this was an announce-cmpctblock, we want the same treatment as a header message
- // Dirty hack to process as if it were just a headers message (TODO: move message handling into their own functions)
- std::vector<CBlock> headers;
- headers.push_back(cmpctblock.header);
- CDataStream vHeadersMsg(SER_NETWORK, PROTOCOL_VERSION);
- vHeadersMsg << headers;
- return ProcessMessage(pfrom, NetMsgType::HEADERS, vHeadersMsg, nTimeReceived, chainparams, connman);
- }
- }
- }
-
- else if (strCommand == NetMsgType::BLOCKTXN && !fImporting && !fReindex) // Ignore blocks received while importing
- {
- BlockTransactions resp;
- vRecv >> resp;
-
- CBlock block;
- bool fBlockRead = false;
- {
- LOCK(cs_main);
-
- map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
- if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
- it->second.first != pfrom->GetId()) {
- LogPrint("net", "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->id);
- return true;
- }
-
- PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock;
- ReadStatus status = partialBlock.FillBlock(block, resp.txn);
- if (status == READ_STATUS_INVALID) {
- MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case of whitelist
- Misbehaving(pfrom->GetId(), 100);
- LogPrintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom->id);
- return true;
- } else if (status == READ_STATUS_FAILED) {
- // Might have collided, fall back to getdata now :(
- std::vector<CInv> invs;
- invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()), resp.blockhash));
- connman.PushMessage(pfrom, NetMsgType::GETDATA, invs);
- } else {
- // Block is either okay, or possibly we received
- // READ_STATUS_CHECKBLOCK_FAILED.
- // Note that CheckBlock can only fail for one of a few reasons:
- // 1. bad-proof-of-work (impossible here, because we've already
- // accepted the header)
- // 2. merkleroot doesn't match the transactions given (already
- // caught in FillBlock with READ_STATUS_FAILED, so
- // impossible here)
- // 3. the block is otherwise invalid (eg invalid coinbase,
- // block is too big, too many legacy sigops, etc).
- // So if CheckBlock failed, #3 is the only possibility.
- // Under BIP 152, we don't DoS-ban unless proof of work is
- // invalid (we don't require all the stateless checks to have
- // been run). This is handled below, so just treat this as
- // though the block was successfully read, and rely on the
- // handling in ProcessNewBlock to ensure the block index is
- // updated, reject messages go out, etc.
- MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer
- fBlockRead = true;
- }
- } // Don't hold cs_main when we call into ProcessNewBlock
- if (fBlockRead) {
- CValidationState state;
- // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
- // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
- // BIP 152 permits peers to relay compact blocks after validating
- // the header only; we should not punish peers if the block turns
- // out to be invalid.
- ProcessNewBlock(state, chainparams, pfrom, &block, true, NULL, false);
- int nDoS;
- if (state.IsInvalid(nDoS)) {
- assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
- connman.PushMessage(pfrom, NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
- state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), block.GetHash());
- }
- }
- }
-
-
- else if (strCommand == NetMsgType::HEADERS && !fImporting && !fReindex) // Ignore headers received while importing
- {
- std::vector<CBlockHeader> headers;
-
- // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
- unsigned int nCount = ReadCompactSize(vRecv);
- if (nCount > MAX_HEADERS_RESULTS) {
- LOCK(cs_main);
- Misbehaving(pfrom->GetId(), 20);
- return error("headers message size = %u", nCount);
- }
- headers.resize(nCount);
- for (unsigned int n = 0; n < nCount; n++) {
- vRecv >> headers[n];
- ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
- }
-
- {
- LOCK(cs_main);
-
- if (nCount == 0) {
- // Nothing interesting. Stop asking this peers for more headers.
- return true;
- }
-
- CNodeState *nodestate = State(pfrom->GetId());
-
- // If this looks like it could be a block announcement (nCount <
- // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
- // don't connect:
- // - Send a getheaders message in response to try to connect the chain.
- // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
- // don't connect before giving DoS points
- // - Once a headers message is received that is valid and does connect,
- // nUnconnectingHeaders gets reset back to 0.
- if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
- nodestate->nUnconnectingHeaders++;
- connman.PushMessage(pfrom, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256());
- LogPrint("net", "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
- headers[0].GetHash().ToString(),
- headers[0].hashPrevBlock.ToString(),
- pindexBestHeader->nHeight,
- pfrom->id, nodestate->nUnconnectingHeaders);
- // Set hashLastUnknownBlock for this peer, so that if we
- // eventually get the headers - even from a different peer -
- // we can use this peer to download.
- UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash());
-
- if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
- Misbehaving(pfrom->GetId(), 20);
- }
- return true;
- }
-
- CBlockIndex *pindexLast = NULL;
- BOOST_FOREACH(const CBlockHeader& header, headers) {
- CValidationState state;
- if (pindexLast != NULL && header.hashPrevBlock != pindexLast->GetBlockHash()) {
- Misbehaving(pfrom->GetId(), 20);
- return error("non-continuous headers sequence");
- }
- if (!AcceptBlockHeader(header, state, chainparams, &pindexLast)) {
- int nDoS;
- if (state.IsInvalid(nDoS)) {
- if (nDoS > 0)
- Misbehaving(pfrom->GetId(), nDoS);
- return error("invalid header received");
- }
- }
- }
-
- if (nodestate->nUnconnectingHeaders > 0) {
- LogPrint("net", "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->id, nodestate->nUnconnectingHeaders);
- }
- nodestate->nUnconnectingHeaders = 0;
-
- assert(pindexLast);
- UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
-
- if (nCount == MAX_HEADERS_RESULTS) {
- // Headers message had its maximum size; the peer may have more headers.
- // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
- // from there instead.
- LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
- connman.PushMessage(pfrom, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256());
- }
-
- bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
- // If this set of headers is valid and ends in a block with at least as
- // much work as our tip, download as much as possible.
- if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) {
- vector<CBlockIndex *> vToFetch;
- CBlockIndex *pindexWalk = pindexLast;
- // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
- while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
- !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) &&
- (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) {
- // We don't have this block, and it's not yet in flight.
- vToFetch.push_back(pindexWalk);
- }
- pindexWalk = pindexWalk->pprev;
- }
- // If pindexWalk still isn't on our main chain, we're looking at a
- // very large reorg at a time we think we're close to caught up to
- // the main chain -- this shouldn't really happen. Bail out on the
- // direct fetch and rely on parallel download instead.
- if (!chainActive.Contains(pindexWalk)) {
- LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n",
- pindexLast->GetBlockHash().ToString(),
- pindexLast->nHeight);
- } else {
- vector<CInv> vGetData;
- // Download as much as possible, from earliest to latest.
- BOOST_REVERSE_FOREACH(CBlockIndex *pindex, vToFetch) {
- if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- // Can't download any more from this peer
- break;
- }
- uint32_t nFetchFlags = GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus());
- vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
- MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex);
- LogPrint("net", "Requesting block %s from peer=%d\n",
- pindex->GetBlockHash().ToString(), pfrom->id);
- }
- if (vGetData.size() > 1) {
- LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n",
- pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
- }
- if (vGetData.size() > 0) {
- if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
- // We seem to be rather well-synced, so it appears pfrom was the first to provide us
- // with this block! Let's get them to announce using compact blocks in the future.
- MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate, pfrom, connman);
- // In any case, we want to download using a compact block, not a regular one
- vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
- }
- connman.PushMessage(pfrom, NetMsgType::GETDATA, vGetData);
- }
- }
- }
- }
-
- NotifyHeaderTip();
- }
-
- else if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
- {
- CBlock block;
- vRecv >> block;
-
- LogPrint("net", "received block %s peer=%d\n", block.GetHash().ToString(), pfrom->id);
-
- CValidationState state;
- // Process all blocks from whitelisted peers, even if not requested,
- // unless we're still syncing with the network.
- // Such an unrequested block may still be processed, subject to the
- // conditions in AcceptBlock().
- bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload();
- {
- LOCK(cs_main);
- // Also always process if we requested the block explicitly, as we may
- // need it even though it is not a candidate for a new best tip.
- forceProcessing |= MarkBlockAsReceived(block.GetHash());
- }
- ProcessNewBlock(state, chainparams, pfrom, &block, forceProcessing, NULL, true);
- int nDoS;
- if (state.IsInvalid(nDoS)) {
- assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
- connman.PushMessage(pfrom, NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
- state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), block.GetHash());
- if (nDoS > 0) {
- LOCK(cs_main);
- Misbehaving(pfrom->GetId(), nDoS);
- }
- }
-
- }
-
-
- else if (strCommand == NetMsgType::GETADDR)
- {
- // This asymmetric behavior for inbound and outbound connections was introduced
- // to prevent a fingerprinting attack: an attacker can send specific fake addresses
- // to users' AddrMan and later request them by sending getaddr messages.
- // Making nodes which are behind NAT and can only make outgoing connections ignore
- // the getaddr message mitigates the attack.
- if (!pfrom->fInbound) {
- LogPrint("net", "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->id);
- return true;
- }
-
- // Only send one GetAddr response per connection to reduce resource waste
- // and discourage addr stamping of INV announcements.
- if (pfrom->fSentAddr) {
- LogPrint("net", "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->id);
- return true;
- }
- pfrom->fSentAddr = true;
-
- pfrom->vAddrToSend.clear();
- vector<CAddress> vAddr = connman.GetAddresses();
- FastRandomContext insecure_rand;
- BOOST_FOREACH(const CAddress &addr, vAddr)
- pfrom->PushAddress(addr, insecure_rand);
- }
-
-
- else if (strCommand == NetMsgType::MEMPOOL)
- {
- if (!(pfrom->GetLocalServices() & NODE_BLOOM) && !pfrom->fWhitelisted)
- {
- LogPrint("net", "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom->GetId());
- pfrom->fDisconnect = true;
- return true;
- }
-
- if (connman.OutboundTargetReached(false) && !pfrom->fWhitelisted)
- {
- LogPrint("net", "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId());
- pfrom->fDisconnect = true;
- return true;
- }
-
- LOCK(pfrom->cs_inventory);
- pfrom->fSendMempool = true;
- }
-
-
- else if (strCommand == NetMsgType::PING)
- {
- if (pfrom->nVersion > BIP0031_VERSION)
- {
- uint64_t nonce = 0;
- vRecv >> nonce;
- // Echo the message back with the nonce. This allows for two useful features:
- //
- // 1) A remote node can quickly check if the connection is operational
- // 2) Remote nodes can measure the latency of the network thread. If this node
- // is overloaded it won't respond to pings quickly and the remote node can
- // avoid sending us more work, like chain download requests.
- //
- // The nonce stops the remote getting confused between different pings: without
- // it, if the remote node sends a ping once per second and this node takes 5
- // seconds to respond to each, the 5th ping the remote sends would appear to
- // return very quickly.
- connman.PushMessage(pfrom, NetMsgType::PONG, nonce);
- }
- }
-
-
- else if (strCommand == NetMsgType::PONG)
- {
- int64_t pingUsecEnd = nTimeReceived;
- uint64_t nonce = 0;
- size_t nAvail = vRecv.in_avail();
- bool bPingFinished = false;
- std::string sProblem;
-
- if (nAvail >= sizeof(nonce)) {
- vRecv >> nonce;
-
- // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
- if (pfrom->nPingNonceSent != 0) {
- if (nonce == pfrom->nPingNonceSent) {
- // Matching pong received, this ping is no longer outstanding
- bPingFinished = true;
- int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart;
- if (pingUsecTime > 0) {
- // Successful ping time measurement, replace previous
- pfrom->nPingUsecTime = pingUsecTime;
- pfrom->nMinPingUsecTime = std::min(pfrom->nMinPingUsecTime, pingUsecTime);
- } else {
- // This should never happen
- sProblem = "Timing mishap";
- }
- } else {
- // Nonce mismatches are normal when pings are overlapping
- sProblem = "Nonce mismatch";
- if (nonce == 0) {
- // This is most likely a bug in another implementation somewhere; cancel this ping
- bPingFinished = true;
- sProblem = "Nonce zero";
- }
- }
- } else {
- sProblem = "Unsolicited pong without ping";
- }
- } else {
- // This is most likely a bug in another implementation somewhere; cancel this ping
- bPingFinished = true;
- sProblem = "Short payload";
- }
-
- if (!(sProblem.empty())) {
- LogPrint("net", "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
- pfrom->id,
- sProblem,
- pfrom->nPingNonceSent,
- nonce,
- nAvail);
- }
- if (bPingFinished) {
- pfrom->nPingNonceSent = 0;
- }
- }
-
-
- else if (strCommand == NetMsgType::FILTERLOAD)
- {
- CBloomFilter filter;
- vRecv >> filter;
-
- if (!filter.IsWithinSizeConstraints())
- {
- // There is no excuse for sending a too-large filter
- LOCK(cs_main);
- Misbehaving(pfrom->GetId(), 100);
- }
- else
- {
- LOCK(pfrom->cs_filter);
- delete pfrom->pfilter;
- pfrom->pfilter = new CBloomFilter(filter);
- pfrom->pfilter->UpdateEmptyFull();
- pfrom->fRelayTxes = true;
- }
- }
-
-
- else if (strCommand == NetMsgType::FILTERADD)
- {
- vector<unsigned char> vData;
- vRecv >> vData;
-
- // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
- // and thus, the maximum size any matched object can have) in a filteradd message
- bool bad = false;
- if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
- bad = true;
- } else {
- LOCK(pfrom->cs_filter);
- if (pfrom->pfilter) {
- pfrom->pfilter->insert(vData);
- } else {
- bad = true;
- }
- }
- if (bad) {
- LOCK(cs_main);
- Misbehaving(pfrom->GetId(), 100);
- }
- }
-
-
- else if (strCommand == NetMsgType::FILTERCLEAR)
- {
- LOCK(pfrom->cs_filter);
- if (pfrom->GetLocalServices() & NODE_BLOOM) {
- delete pfrom->pfilter;
- pfrom->pfilter = new CBloomFilter();
- }
- pfrom->fRelayTxes = true;
- }
-
-
- else if (strCommand == NetMsgType::REJECT)
- {
- if (fDebug) {
- try {
- string strMsg; unsigned char ccode; string strReason;
- vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
-
- ostringstream ss;
- ss << strMsg << " code " << itostr(ccode) << ": " << strReason;
-
- if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX)
- {
- uint256 hash;
- vRecv >> hash;
- ss << ": hash " << hash.ToString();
- }
- LogPrint("net", "Reject %s\n", SanitizeString(ss.str()));
- } catch (const std::ios_base::failure&) {
- // Avoid feedback loops by preventing reject messages from triggering a new reject message.
- LogPrint("net", "Unparseable reject message received\n");
- }
- }
- }
-
- else if (strCommand == NetMsgType::FEEFILTER) {
- CAmount newFeeFilter = 0;
- vRecv >> newFeeFilter;
- if (MoneyRange(newFeeFilter)) {
- {
- LOCK(pfrom->cs_feeFilter);
- pfrom->minFeeFilter = newFeeFilter;
- }
- LogPrint("net", "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->id);
- }
- }
-
- else if (strCommand == NetMsgType::NOTFOUND) {
- // We do not care about the NOTFOUND message, but logging an Unknown Command
- // message would be undesirable as we transmit it ourselves.
- }
-
- else {
- // Ignore unknown commands for extensibility
- LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id);
- }
-
-
-
- return true;
-}
-
-// requires LOCK(cs_vRecvMsg)
-bool ProcessMessages(CNode* pfrom, CConnman& connman)
-{
- const CChainParams& chainparams = Params();
- unsigned int nMaxSendBufferSize = connman.GetSendBufferSize();
- //if (fDebug)
- // LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
-
- //
- // Message format
- // (4) message start
- // (12) command
- // (4) size
- // (4) checksum
- // (x) data
- //
- bool fOk = true;
-
- if (!pfrom->vRecvGetData.empty())
- ProcessGetData(pfrom, chainparams.GetConsensus(), connman);
-
- // this maintains the order of responses
- if (!pfrom->vRecvGetData.empty()) return fOk;
-
- std::deque<CNetMessage>::iterator it = pfrom->vRecvMsg.begin();
- while (!pfrom->fDisconnect && it != pfrom->vRecvMsg.end()) {
- // Don't bother if send buffer is too full to respond anyway
- if (pfrom->nSendSize >= nMaxSendBufferSize)
- break;
-
- // get next message
- CNetMessage& msg = *it;
-
- //if (fDebug)
- // LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__,
- // msg.hdr.nMessageSize, msg.vRecv.size(),
- // msg.complete() ? "Y" : "N");
-
- // end, if an incomplete message is found
- if (!msg.complete())
- break;
-
- // at this point, any failure means we can delete the current message
- it++;
-
- // Scan for message start
- if (memcmp(msg.hdr.pchMessageStart, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) {
- LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id);
- fOk = false;
- break;
- }
-
- // Read header
- CMessageHeader& hdr = msg.hdr;
- if (!hdr.IsValid(chainparams.MessageStart()))
- {
- LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id);
- continue;
- }
- string strCommand = hdr.GetCommand();
-
- // Message size
- unsigned int nMessageSize = hdr.nMessageSize;
-
- // Checksum
- CDataStream& vRecv = msg.vRecv;
- const uint256& hash = msg.GetMessageHash();
- if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0)
- {
- LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__,
- SanitizeString(strCommand), nMessageSize,
- HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE),
- HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE));
- continue;
- }
-
- // Process message
- bool fRet = false;
- try
- {
- fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime, chainparams, connman);
- boost::this_thread::interruption_point();
- }
- catch (const std::ios_base::failure& e)
- {
- connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::REJECT, strCommand, REJECT_MALFORMED, string("error parsing message"));
- if (strstr(e.what(), "end of data"))
- {
- // Allow exceptions from under-length message on vRecv
- LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
- }
- else if (strstr(e.what(), "size too large"))
- {
- // Allow exceptions from over-long size
- LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
- }
- else if (strstr(e.what(), "non-canonical ReadCompactSize()"))
- {
- // Allow exceptions from non-canonical encoding
- LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
- }
- else
- {
- PrintExceptionContinue(&e, "ProcessMessages()");
- }
- }
- catch (const boost::thread_interrupted&) {
- throw;
- }
- catch (const std::exception& e) {
- PrintExceptionContinue(&e, "ProcessMessages()");
- } catch (...) {
- PrintExceptionContinue(NULL, "ProcessMessages()");
- }
-
- if (!fRet)
- LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->id);
-
- break;
- }
-
- // In case the connection got shut down, its receive buffer was wiped
- if (!pfrom->fDisconnect)
- pfrom->vRecvMsg.erase(pfrom->vRecvMsg.begin(), it);
-
- return fOk;
-}
-
-class CompareInvMempoolOrder
-{
- CTxMemPool *mp;
-public:
- CompareInvMempoolOrder(CTxMemPool *_mempool)
- {
- mp = _mempool;
- }
-
- bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
- {
- /* As std::make_heap produces a max-heap, we want the entries with the
- * fewest ancestors/highest fee to sort later. */
- return mp->CompareDepthAndScore(*b, *a);
- }
-};
-
-bool SendMessages(CNode* pto, CConnman& connman)
-{
- const Consensus::Params& consensusParams = Params().GetConsensus();
- {
- // Don't send anything until we get its version message
- if (pto->nVersion == 0)
- return true;
-
- //
- // Message: ping
- //
- bool pingSend = false;
- if (pto->fPingQueued) {
- // RPC ping request by user
- pingSend = true;
- }
- if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
- // Ping automatically sent as a latency probe & keepalive.
- pingSend = true;
- }
- if (pingSend && !pto->fDisconnect) {
- uint64_t nonce = 0;
- while (nonce == 0) {
- GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
- }
- pto->fPingQueued = false;
- pto->nPingUsecStart = GetTimeMicros();
- if (pto->nVersion > BIP0031_VERSION) {
- pto->nPingNonceSent = nonce;
- connman.PushMessage(pto, NetMsgType::PING, nonce);
- } else {
- // Peer is too old to support ping command with nonce, pong will never arrive.
- pto->nPingNonceSent = 0;
- connman.PushMessage(pto, NetMsgType::PING);
- }
- }
-
- TRY_LOCK(cs_main, lockMain); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
- if (!lockMain)
- return true;
-
- // Address refresh broadcast
- int64_t nNow = GetTimeMicros();
- if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) {
- AdvertiseLocal(pto);
- pto->nNextLocalAddrSend = PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
- }
-
- //
- // Message: addr
- //
- if (pto->nNextAddrSend < nNow) {
- pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
- vector<CAddress> vAddr;
- vAddr.reserve(pto->vAddrToSend.size());
- BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend)
- {
- if (!pto->addrKnown.contains(addr.GetKey()))
- {
- pto->addrKnown.insert(addr.GetKey());
- vAddr.push_back(addr);
- // receiver rejects addr messages larger than 1000
- if (vAddr.size() >= 1000)
- {
- connman.PushMessage(pto, NetMsgType::ADDR, vAddr);
- vAddr.clear();
- }
- }
- }
- pto->vAddrToSend.clear();
- if (!vAddr.empty())
- connman.PushMessage(pto, NetMsgType::ADDR, vAddr);
- // we only send the big addr message once
- if (pto->vAddrToSend.capacity() > 40)
- pto->vAddrToSend.shrink_to_fit();
- }
-
- CNodeState &state = *State(pto->GetId());
- if (state.fShouldBan) {
- if (pto->fWhitelisted)
- LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto->addr.ToString());
- else {
- pto->fDisconnect = true;
- if (pto->addr.IsLocal())
- LogPrintf("Warning: not banning local peer %s!\n", pto->addr.ToString());
- else
- {
- connman.Ban(pto->addr, BanReasonNodeMisbehaving);
- }
- }
- state.fShouldBan = false;
- }
-
- BOOST_FOREACH(const CBlockReject& reject, state.rejects)
- connman.PushMessage(pto, NetMsgType::REJECT, (string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock);
- state.rejects.clear();
-
- // Start block sync
- if (pindexBestHeader == NULL)
- pindexBestHeader = chainActive.Tip();
- bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
- if (!state.fSyncStarted && !pto->fClient && !pto->fDisconnect && !fImporting && !fReindex) {
- // Only actively request headers from a single peer, unless we're close to today.
- if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
- state.fSyncStarted = true;
- nSyncStarted++;
- const CBlockIndex *pindexStart = pindexBestHeader;
- /* If possible, start at the block preceding the currently
- best known header. This ensures that we always get a
- non-empty list of headers back as long as the peer
- is up-to-date. With a non-empty response, we can initialise
- the peer's known best block. This wouldn't be possible
- if we requested starting at pindexBestHeader and
- got back an empty response. */
- if (pindexStart->pprev)
- pindexStart = pindexStart->pprev;
- LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
- connman.PushMessage(pto, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256());
- }
- }
-
- // Resend wallet transactions that haven't gotten in a block yet
- // Except during reindex, importing and IBD, when old wallet
- // transactions become unconfirmed and spams other nodes.
- if (!fReindex && !fImporting && !IsInitialBlockDownload())
- {
- GetMainSignals().Broadcast(nTimeBestReceived, &connman);
- }
-
- //
- // Try sending block announcements via headers
- //
- {
- // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
- // list of block hashes we're relaying, and our peer wants
- // headers announcements, then find the first header
- // not yet known to our peer but would connect, and send.
- // If no header would connect, or if we have too many
- // blocks, or if the peer doesn't want headers, just
- // add all to the inv queue.
- LOCK(pto->cs_inventory);
- vector<CBlock> vHeaders;
- bool fRevertToInv = ((!state.fPreferHeaders &&
- (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
- pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
- CBlockIndex *pBestIndex = NULL; // last header queued for delivery
- ProcessBlockAvailability(pto->id); // ensure pindexBestKnownBlock is up-to-date
-
- if (!fRevertToInv) {
- bool fFoundStartingHeader = false;
- // Try to find first header that our peer doesn't have, and
- // then send all headers past that one. If we come across any
- // headers that aren't on chainActive, give up.
- BOOST_FOREACH(const uint256 &hash, pto->vBlockHashesToAnnounce) {
- BlockMap::iterator mi = mapBlockIndex.find(hash);
- assert(mi != mapBlockIndex.end());
- CBlockIndex *pindex = mi->second;
- if (chainActive[pindex->nHeight] != pindex) {
- // Bail out if we reorged away from this block
- fRevertToInv = true;
- break;
- }
- if (pBestIndex != NULL && pindex->pprev != pBestIndex) {
- // This means that the list of blocks to announce don't
- // connect to each other.
- // This shouldn't really be possible to hit during
- // regular operation (because reorgs should take us to
- // a chain that has some block not on the prior chain,
- // which should be caught by the prior check), but one
- // way this could happen is by using invalidateblock /
- // reconsiderblock repeatedly on the tip, causing it to
- // be added multiple times to vBlockHashesToAnnounce.
- // Robustly deal with this rare situation by reverting
- // to an inv.
- fRevertToInv = true;
- break;
- }
- pBestIndex = pindex;
- if (fFoundStartingHeader) {
- // add this to the headers message
- vHeaders.push_back(pindex->GetBlockHeader());
- } else if (PeerHasHeader(&state, pindex)) {
- continue; // keep looking for the first new block
- } else if (pindex->pprev == NULL || PeerHasHeader(&state, pindex->pprev)) {
- // Peer doesn't have this header but they do have the prior one.
- // Start sending headers.
- fFoundStartingHeader = true;
- vHeaders.push_back(pindex->GetBlockHeader());
- } else {
- // Peer doesn't have this header or the prior one -- nothing will
- // connect, so bail out.
- fRevertToInv = true;
- break;
- }
- }
- }
- if (!fRevertToInv && !vHeaders.empty()) {
- if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
- // We only send up to 1 block as header-and-ids, as otherwise
- // probably means we're doing an initial-ish-sync or they're slow
- LogPrint("net", "%s sending header-and-ids %s to peer %d\n", __func__,
- vHeaders.front().GetHash().ToString(), pto->id);
- //TODO: Shouldn't need to reload block from disk, but requires refactor
- CBlock block;
- assert(ReadBlockFromDisk(block, pBestIndex, consensusParams));
- CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
- connman.PushMessageWithFlag(pto, state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock);
- state.pindexBestHeaderSent = pBestIndex;
- } else if (state.fPreferHeaders) {
- if (vHeaders.size() > 1) {
- LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
- vHeaders.size(),
- vHeaders.front().GetHash().ToString(),
- vHeaders.back().GetHash().ToString(), pto->id);
- } else {
- LogPrint("net", "%s: sending header %s to peer=%d\n", __func__,
- vHeaders.front().GetHash().ToString(), pto->id);
- }
- connman.PushMessage(pto, NetMsgType::HEADERS, vHeaders);
- state.pindexBestHeaderSent = pBestIndex;
- } else
- fRevertToInv = true;
- }
- if (fRevertToInv) {
- // If falling back to using an inv, just try to inv the tip.
- // The last entry in vBlockHashesToAnnounce was our tip at some point
- // in the past.
- if (!pto->vBlockHashesToAnnounce.empty()) {
- const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
- BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce);
- assert(mi != mapBlockIndex.end());
- CBlockIndex *pindex = mi->second;
-
- // Warn if we're announcing a block that is not on the main chain.
- // This should be very rare and could be optimized out.
- // Just log for now.
- if (chainActive[pindex->nHeight] != pindex) {
- LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n",
- hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString());
- }
-
- // If the peer's chain has this block, don't inv it back.
- if (!PeerHasHeader(&state, pindex)) {
- pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
- LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__,
- pto->id, hashToAnnounce.ToString());
- }
- }
- }
- pto->vBlockHashesToAnnounce.clear();
- }
-
- //
- // Message: inventory
- //
- vector<CInv> vInv;
- {
- LOCK(pto->cs_inventory);
- vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX));
-
- // Add blocks
- BOOST_FOREACH(const uint256& hash, pto->vInventoryBlockToSend) {
- vInv.push_back(CInv(MSG_BLOCK, hash));
- if (vInv.size() == MAX_INV_SZ) {
- connman.PushMessage(pto, NetMsgType::INV, vInv);
- vInv.clear();
- }
- }
- pto->vInventoryBlockToSend.clear();
-
- // Check whether periodic sends should happen
- bool fSendTrickle = pto->fWhitelisted;
- if (pto->nNextInvSend < nNow) {
- fSendTrickle = true;
- // Use half the delay for outbound peers, as there is less privacy concern for them.
- pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
- }
-
- // Time to send but the peer has requested we not relay transactions.
- if (fSendTrickle) {
- LOCK(pto->cs_filter);
- if (!pto->fRelayTxes) pto->setInventoryTxToSend.clear();
- }
-
- // Respond to BIP35 mempool requests
- if (fSendTrickle && pto->fSendMempool) {
- auto vtxinfo = mempool.infoAll();
- pto->fSendMempool = false;
- CAmount filterrate = 0;
- {
- LOCK(pto->cs_feeFilter);
- filterrate = pto->minFeeFilter;
- }
-
- LOCK(pto->cs_filter);
-
- for (const auto& txinfo : vtxinfo) {
- const uint256& hash = txinfo.tx->GetHash();
- CInv inv(MSG_TX, hash);
- pto->setInventoryTxToSend.erase(hash);
- if (filterrate) {
- if (txinfo.feeRate.GetFeePerK() < filterrate)
- continue;
- }
- if (pto->pfilter) {
- if (!pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
- }
- pto->filterInventoryKnown.insert(hash);
- vInv.push_back(inv);
- if (vInv.size() == MAX_INV_SZ) {
- connman.PushMessage(pto, NetMsgType::INV, vInv);
- vInv.clear();
- }
- }
- pto->timeLastMempoolReq = GetTime();
- }
-
- // Determine transactions to relay
- if (fSendTrickle) {
- // Produce a vector with all candidates for sending
- vector<std::set<uint256>::iterator> vInvTx;
- vInvTx.reserve(pto->setInventoryTxToSend.size());
- for (std::set<uint256>::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) {
- vInvTx.push_back(it);
- }
- CAmount filterrate = 0;
- {
- LOCK(pto->cs_feeFilter);
- filterrate = pto->minFeeFilter;
- }
- // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
- // A heap is used so that not all items need sorting if only a few are being sent.
- CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
- std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
- // No reason to drain out at many times the network's capacity,
- // especially since we have many peers and some will draw much shorter delays.
- unsigned int nRelayedTransactions = 0;
- LOCK(pto->cs_filter);
- while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
- // Fetch the top element from the heap
- std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
- std::set<uint256>::iterator it = vInvTx.back();
- vInvTx.pop_back();
- uint256 hash = *it;
- // Remove it from the to-be-sent set
- pto->setInventoryTxToSend.erase(it);
- // Check if not in the filter already
- if (pto->filterInventoryKnown.contains(hash)) {
- continue;
- }
- // Not in the mempool anymore? don't bother sending it.
- auto txinfo = mempool.info(hash);
- if (!txinfo.tx) {
- continue;
- }
- if (filterrate && txinfo.feeRate.GetFeePerK() < filterrate) {
- continue;
- }
- if (pto->pfilter && !pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
- // Send
- vInv.push_back(CInv(MSG_TX, hash));
- nRelayedTransactions++;
- {
- // Expire old relay messages
- while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow)
- {
- mapRelay.erase(vRelayExpiration.front().second);
- vRelayExpiration.pop_front();
- }
-
- auto ret = mapRelay.insert(std::make_pair(hash, std::move(txinfo.tx)));
- if (ret.second) {
- vRelayExpiration.push_back(std::make_pair(nNow + 15 * 60 * 1000000, ret.first));
- }
- }
- if (vInv.size() == MAX_INV_SZ) {
- connman.PushMessage(pto, NetMsgType::INV, vInv);
- vInv.clear();
- }
- pto->filterInventoryKnown.insert(hash);
- }
- }
- }
- if (!vInv.empty())
- connman.PushMessage(pto, NetMsgType::INV, vInv);
-
- // Detect whether we're stalling
- nNow = GetTimeMicros();
- if (!pto->fDisconnect && state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
- // Stalling only triggers when the block download window cannot move. During normal steady state,
- // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
- // should only happen during initial block download.
- LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id);
- pto->fDisconnect = true;
- }
- // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
- // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
- // We compensate for other peers to prevent killing off peers due to our own downstream link
- // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
- // to unreasonably increase our timeout.
- if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0) {
- QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
- int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
- if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
- LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->id);
- pto->fDisconnect = true;
- }
- }
-
- //
- // Message: getdata (blocks)
- //
- vector<CInv> vGetData;
- if (!pto->fDisconnect && !pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- vector<CBlockIndex*> vToDownload;
- NodeId staller = -1;
- FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
- BOOST_FOREACH(CBlockIndex *pindex, vToDownload) {
- uint32_t nFetchFlags = GetFetchFlags(pto, pindex->pprev, consensusParams);
- vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
- MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
- LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
- pindex->nHeight, pto->id);
- }
- if (state.nBlocksInFlight == 0 && staller != -1) {
- if (State(staller)->nStallingSince == 0) {
- State(staller)->nStallingSince = nNow;
- LogPrint("net", "Stall started peer=%d\n", staller);
- }
- }
- }
-
- //
- // Message: getdata (non-blocks)
- //
- while (!pto->fDisconnect && !pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow)
- {
- const CInv& inv = (*pto->mapAskFor.begin()).second;
- if (!AlreadyHave(inv))
- {
- if (fDebug)
- LogPrint("net", "Requesting %s peer=%d\n", inv.ToString(), pto->id);
- vGetData.push_back(inv);
- if (vGetData.size() >= 1000)
- {
- connman.PushMessage(pto, NetMsgType::GETDATA, vGetData);
- vGetData.clear();
- }
- } else {
- //If we're not going to ask, don't expect a response.
- pto->setAskFor.erase(inv.hash);
- }
- pto->mapAskFor.erase(pto->mapAskFor.begin());
- }
- if (!vGetData.empty())
- connman.PushMessage(pto, NetMsgType::GETDATA, vGetData);
-
- //
- // Message: feefilter
- //
- // We don't want white listed peers to filter txs to us if we have -whitelistforcerelay
- if (pto->nVersion >= FEEFILTER_VERSION && GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
- !(pto->fWhitelisted && GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY))) {
- CAmount currentFilter = mempool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
- int64_t timeNow = GetTimeMicros();
- if (timeNow > pto->nextSendTimeFeeFilter) {
- CAmount filterToSend = filterRounder.round(currentFilter);
- if (filterToSend != pto->lastSentFeeFilter) {
- connman.PushMessage(pto, NetMsgType::FEEFILTER, filterToSend);
- pto->lastSentFeeFilter = filterToSend;
- }
- pto->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
- }
- // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
- // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
- else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->nextSendTimeFeeFilter &&
- (currentFilter < 3 * pto->lastSentFeeFilter / 4 || currentFilter > 4 * pto->lastSentFeeFilter / 3)) {
- pto->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
- }
- }
- }
- return true;
-}
-
std::string CBlockFileInfo::ToString() const {
return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast));
}
@@ -7036,10 +4107,9 @@ bool LoadMempool(void)
file >> num;
double prioritydummy = 0;
while (num--) {
- CTransaction tx;
int64_t nTime;
int64_t nFeeDelta;
- file >> tx;
+ CTransaction tx(deserialize, file);
file >> nTime;
file >> nFeeDelta;
@@ -7132,9 +4202,5 @@ public:
for (; it1 != mapBlockIndex.end(); it1++)
delete (*it1).second;
mapBlockIndex.clear();
-
- // orphan transactions
- mapOrphanTransactions.clear();
- mapOrphanTransactionsByPrev.clear();
}
} instance_of_cmaincleanup;
diff --git a/src/main.h b/src/validation.h
index 21829b6c25..28e81d21c6 100644
--- a/src/main.h
+++ b/src/validation.h
@@ -3,8 +3,8 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#ifndef BITCOIN_MAIN_H
-#define BITCOIN_MAIN_H
+#ifndef BITCOIN_VALIDATION_H
+#define BITCOIN_VALIDATION_H
#if defined(HAVE_CONFIG_H)
#include "config/bitcoin-config.h"
@@ -13,10 +13,9 @@
#include "amount.h"
#include "chain.h"
#include "coins.h"
-#include "net.h"
+#include "protocol.h" // For CMessageHeader::MessageStartChars
#include "script/script_error.h"
#include "sync.h"
-#include "validationinterface.h"
#include "versionbits.h"
#include <algorithm>
@@ -28,7 +27,10 @@
#include <utility>
#include <vector>
+#include <atomic>
+
#include <boost/unordered_map.hpp>
+#include <boost/filesystem/path.hpp>
class CBlockIndex;
class CBlockTreeDB;
@@ -125,7 +127,7 @@ static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE = 1000000;
/** Additional block download timeout per parallel downloading peer (i.e. 5 min) */
static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 500000;
-static const unsigned int DEFAULT_LIMITFREERELAY = 15;
+static const unsigned int DEFAULT_LIMITFREERELAY = 0;
static const bool DEFAULT_RELAYPRIORITY = true;
static const int64_t DEFAULT_MAX_TIP_AGE = 24 * 60 * 60;
@@ -165,7 +167,7 @@ extern uint64_t nLastBlockWeight;
extern const std::string strMessageMagic;
extern CWaitableCriticalSection csBestBlock;
extern CConditionVariable cvBlockChange;
-extern bool fImporting;
+extern std::atomic_bool fImporting;
extern bool fReindex;
extern int nScriptCheckThreads;
extern bool fTxIndex;
@@ -215,15 +217,36 @@ static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES = 550 * 1024 * 1024;
* Process an incoming block. This only returns after the best known valid
* block is made active. Note that it does not, however, guarantee that the
* specific block passed to it has been checked for validity!
+ *
+ * If you want to *possibly* get feedback on whether pblock is valid, you must
+ * install a CValidationInterface (see validationinterface.h) - this will have
+ * its BlockChecked method called whenever *any* block completes validation.
+ *
+ * Note that we guarantee that either the proof-of-work is valid on pblock, or
+ * (and possibly also) BlockChecked will have been called.
*
- * @param[out] state This may be set to an Error state if any error occurred processing it, including during validation/connection/etc of otherwise unrelated blocks during reorganization; or it may be set to an Invalid state if pblock is itself invalid (but this is not guaranteed even when the block is checked). If you want to *possibly* get feedback on whether pblock is valid, you must also install a CValidationInterface (see validationinterface.h) - this will have its BlockChecked method called whenever *any* block completes validation.
- * @param[in] pfrom The node which we are receiving the block from; it is added to mapBlockSource and may be penalised if the block is invalid.
+ * Call without cs_main held.
+ *
* @param[in] pblock The block we want to process.
* @param[in] fForceProcessing Process this block even if unrequested; used for non-network block sources and whitelisted peers.
* @param[out] dbp The already known disk position of pblock, or NULL if not yet stored.
+ * @param[out] fNewBlock A boolean which is set to indicate if the block was first received via this call
* @return True if state.IsValid()
*/
-bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp, bool fMayBanPeerIfInvalid);
+bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, const CDiskBlockPos* dbp, bool* fNewBlock);
+
+/**
+ * Process incoming block headers.
+ *
+ * Call without cs_main held.
+ *
+ * @param[in] block The block headers themselves
+ * @param[out] state This may be set to an Error state if any error occurred processing them
+ * @param[in] chainparams The params for the chain we want to connect to
+ * @param[out] ppindex If set, the pointer will be set to point to the last new block index object for the given headers
+ */
+bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex=NULL);
+
/** Check whether enough disk space is available for an incoming block */
bool CheckDiskSpace(uint64_t nAdditionalBytes = 0);
/** Open a block file (blk?????.dat) */
@@ -253,9 +276,9 @@ bool IsInitialBlockDownload();
*/
std::string GetWarnings(const std::string& strFor);
/** Retrieve a transaction (from memory pool, or from disk, if possible) */
-bool GetTransaction(const uint256 &hash, CTransaction &tx, const Consensus::Params& params, uint256 &hashBlock, bool fAllowSlow = false);
+bool GetTransaction(const uint256 &hash, CTransactionRef &tx, const Consensus::Params& params, uint256 &hashBlock, bool fAllowSlow = false);
/** Find the best known block, and make it the tip of the block chain */
-bool ActivateBestChain(CValidationState& state, const CChainParams& chainparams, const CBlock* pblock = NULL);
+bool ActivateBestChain(CValidationState& state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock = std::shared_ptr<const CBlock>());
CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams);
/**
@@ -539,45 +562,4 @@ void DumpMempool();
/** Load the mempool from disk. */
bool LoadMempool();
-// The following things handle network-processing logic
-// (and should be moved to a separate file)
-
-/** Register with a network node to receive its signals */
-void RegisterNodeSignals(CNodeSignals& nodeSignals);
-/** Unregister a network node */
-void UnregisterNodeSignals(CNodeSignals& nodeSignals);
-
-class PeerLogicValidation : public CValidationInterface {
-private:
- CConnman* connman;
-
-public:
- PeerLogicValidation(CConnman* connmanIn);
-
- virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload);
- virtual void BlockChecked(const CBlock& block, const CValidationState& state);
-};
-
-struct CNodeStateStats {
- int nMisbehavior;
- int nSyncHeight;
- int nCommonHeight;
- std::vector<int> vHeightInFlight;
-};
-
-/** Get statistics from node state */
-bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats);
-/** Increase a node's misbehavior score. */
-void Misbehaving(NodeId nodeid, int howmuch);
-
-/** Process protocol messages received from a given node */
-bool ProcessMessages(CNode* pfrom, CConnman& connman);
-/**
- * Send queued protocol messages to be sent to a give node.
- *
- * @param[in] pto The node which we are sending messages to.
- * @param[in] connman The connection manager for that node.
- */
-bool SendMessages(CNode* pto, CConnman& connman);
-
-#endif // BITCOIN_MAIN_H
+#endif // BITCOIN_VALIDATION_H
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index bb5337c4ad..008a4ece19 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -6,7 +6,7 @@
#include "chain.h"
#include "rpc/server.h"
#include "init.h"
-#include "main.h"
+#include "validation.h"
#include "script/script.h"
#include "script/standard.h"
#include "sync.h"
@@ -267,11 +267,11 @@ UniValue importprunedfunds(const JSONRPCRequest& request)
"2. \"txoutproof\" (string, required) The hex output from gettxoutproof that contains the transaction\n"
);
- CTransaction tx;
+ CMutableTransaction tx;
if (!DecodeHexTx(tx, request.params[0].get_str()))
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
uint256 hashTx = tx.GetHash();
- CWalletTx wtx(pwalletMain,tx);
+ CWalletTx wtx(pwalletMain, MakeTransactionRef(std::move(tx)));
CDataStream ssMB(ParseHexV(request.params[1], "proof"), SER_NETWORK, PROTOCOL_VERSION);
CMerkleBlock merkleBlock;
@@ -304,7 +304,7 @@ UniValue importprunedfunds(const JSONRPCRequest& request)
LOCK2(cs_main, pwalletMain->cs_wallet);
- if (pwalletMain->IsMine(tx)) {
+ if (pwalletMain->IsMine(wtx)) {
pwalletMain->AddToWallet(wtx, false);
return NullUniValue;
}
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 0e2105fc59..d4e21b542d 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -9,7 +9,7 @@
#include "consensus/validation.h"
#include "core_io.h"
#include "init.h"
-#include "main.h"
+#include "validation.h"
#include "net.h"
#include "policy/rbf.h"
#include "rpc/server.h"
@@ -583,10 +583,10 @@ UniValue getreceivedbyaddress(const JSONRPCRequest& request)
for (map<uint256, CWalletTx>::iterator it = pwalletMain->mapWallet.begin(); it != pwalletMain->mapWallet.end(); ++it)
{
const CWalletTx& wtx = (*it).second;
- if (wtx.IsCoinBase() || !CheckFinalTx(wtx))
+ if (wtx.IsCoinBase() || !CheckFinalTx(*wtx.tx))
continue;
- BOOST_FOREACH(const CTxOut& txout, wtx.vout)
+ BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
if (txout.scriptPubKey == scriptPubKey)
if (wtx.GetDepthInMainChain() >= nMinDepth)
nAmount += txout.nValue;
@@ -637,10 +637,10 @@ UniValue getreceivedbyaccount(const JSONRPCRequest& request)
for (map<uint256, CWalletTx>::iterator it = pwalletMain->mapWallet.begin(); it != pwalletMain->mapWallet.end(); ++it)
{
const CWalletTx& wtx = (*it).second;
- if (wtx.IsCoinBase() || !CheckFinalTx(wtx))
+ if (wtx.IsCoinBase() || !CheckFinalTx(*wtx.tx))
continue;
- BOOST_FOREACH(const CTxOut& txout, wtx.vout)
+ BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
{
CTxDestination address;
if (ExtractDestination(txout.scriptPubKey, address) && IsMine(*pwalletMain, address) && setAddress.count(address))
@@ -1149,14 +1149,14 @@ UniValue ListReceived(const UniValue& params, bool fByAccounts)
{
const CWalletTx& wtx = (*it).second;
- if (wtx.IsCoinBase() || !CheckFinalTx(wtx))
+ if (wtx.IsCoinBase() || !CheckFinalTx(*wtx.tx))
continue;
int nDepth = wtx.GetDepthInMainChain();
if (nDepth < nMinDepth)
continue;
- BOOST_FOREACH(const CTxOut& txout, wtx.vout)
+ BOOST_FOREACH(const CTxOut& txout, wtx.tx->vout)
{
CTxDestination address;
if (!ExtractDestination(txout.scriptPubKey, address))
@@ -1780,7 +1780,7 @@ UniValue gettransaction(const JSONRPCRequest& request)
CAmount nCredit = wtx.GetCredit(filter);
CAmount nDebit = wtx.GetDebit(filter);
CAmount nNet = nCredit - nDebit;
- CAmount nFee = (wtx.IsFromMe(filter) ? wtx.GetValueOut() - nDebit : 0);
+ CAmount nFee = (wtx.IsFromMe(filter) ? wtx.tx->GetValueOut() - nDebit : 0);
entry.push_back(Pair("amount", ValueFromAmount(nNet - nFee)));
if (wtx.IsFromMe(filter))
@@ -2420,7 +2420,7 @@ UniValue listunspent(const JSONRPCRequest& request)
continue;
CTxDestination address;
- const CScript& scriptPubKey = out.tx->vout[out.i].scriptPubKey;
+ const CScript& scriptPubKey = out.tx->tx->vout[out.i].scriptPubKey;
bool fValidAddress = ExtractDestination(scriptPubKey, address);
if (setAddress.size() && (!fValidAddress || !setAddress.count(address)))
@@ -2445,7 +2445,7 @@ UniValue listunspent(const JSONRPCRequest& request)
}
entry.push_back(Pair("scriptPubKey", HexStr(scriptPubKey.begin(), scriptPubKey.end())));
- entry.push_back(Pair("amount", ValueFromAmount(out.tx->vout[out.i].nValue)));
+ entry.push_back(Pair("amount", ValueFromAmount(out.tx->tx->vout[out.i].nValue)));
entry.push_back(Pair("confirmations", out.nDepth));
entry.push_back(Pair("spendable", out.fSpendable));
entry.push_back(Pair("solvable", out.fSolvable));
@@ -2557,17 +2557,16 @@ UniValue fundrawtransaction(const JSONRPCRequest& request)
}
// parse hex string from parameter
- CTransaction origTx;
- if (!DecodeHexTx(origTx, request.params[0].get_str(), true))
+ CMutableTransaction tx;
+ if (!DecodeHexTx(tx, request.params[0].get_str(), true))
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
- if (origTx.vout.size() == 0)
+ if (tx.vout.size() == 0)
throw JSONRPCError(RPC_INVALID_PARAMETER, "TX must have at least one output");
- if (changePosition != -1 && (changePosition < 0 || (unsigned int)changePosition > origTx.vout.size()))
+ if (changePosition != -1 && (changePosition < 0 || (unsigned int)changePosition > tx.vout.size()))
throw JSONRPCError(RPC_INVALID_PARAMETER, "changePosition out of bounds");
- CMutableTransaction tx(origTx);
CAmount nFeeOut;
string strFailReason;
diff --git a/src/wallet/test/accounting_tests.cpp b/src/wallet/test/accounting_tests.cpp
index a833be13d0..eb14d176bd 100644
--- a/src/wallet/test/accounting_tests.cpp
+++ b/src/wallet/test/accounting_tests.cpp
@@ -86,7 +86,7 @@ BOOST_AUTO_TEST_CASE(acc_orderupgrade)
{
CMutableTransaction tx(wtx);
--tx.nLockTime; // Just to change the hash :)
- *static_cast<CTransaction*>(&wtx) = CTransaction(tx);
+ wtx.SetTx(MakeTransactionRef(std::move(tx)));
}
pwalletMain->AddToWallet(wtx);
vpwtx.push_back(&pwalletMain->mapWallet[wtx.GetHash()]);
@@ -96,7 +96,7 @@ BOOST_AUTO_TEST_CASE(acc_orderupgrade)
{
CMutableTransaction tx(wtx);
--tx.nLockTime; // Just to change the hash :)
- *static_cast<CTransaction*>(&wtx) = CTransaction(tx);
+ wtx.SetTx(MakeTransactionRef(std::move(tx)));
}
pwalletMain->AddToWallet(wtx);
vpwtx.push_back(&pwalletMain->mapWallet[wtx.GetHash()]);
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index acf980c784..8085706a99 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -42,7 +42,7 @@ static void add_coin(const CAmount& nValue, int nAge = 6*24, bool fIsFromMe = fa
// so stop vin being empty, and cache a non-zero Debit to fake out IsFromMe()
tx.vin.resize(1);
}
- CWalletTx* wtx = new CWalletTx(&wallet, tx);
+ CWalletTx* wtx = new CWalletTx(&wallet, MakeTransactionRef(std::move(tx)));
if (fIsFromMe)
{
wtx->fDebitCached = true;
@@ -266,7 +266,7 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
// test with many inputs
for (CAmount amt=1500; amt < COIN; amt*=10) {
empty_wallet();
- // Create 676 inputs (= MAX_STANDARD_TX_SIZE / 148 bytes per input)
+ // Create 676 inputs (= (old MAX_STANDARD_TX_SIZE == 100000) / 148 bytes per input)
for (uint16_t j = 0; j < 676; j++)
add_coin(amt);
BOOST_CHECK(wallet.SelectCoinsMinConf(2000, 1, 1, vCoins, setCoinsRet, nValueRet));
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index c2bac6e330..638fca9917 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -13,7 +13,7 @@
#include "consensus/validation.h"
#include "key.h"
#include "keystore.h"
-#include "main.h"
+#include "validation.h"
#include "net.h"
#include "policy/policy.h"
#include "primitives/block.h"
@@ -75,7 +75,7 @@ struct CompareValueOnly
std::string COutput::ToString() const
{
- return strprintf("COutput(%s, %d, %d) [%s]", tx->GetHash().ToString(), i, nDepth, FormatMoney(tx->vout[i].nValue));
+ return strprintf("COutput(%s, %d, %d) [%s]", tx->GetHash().ToString(), i, nDepth, FormatMoney(tx->tx->vout[i].nValue));
}
const CWalletTx* CWallet::GetWalletTx(const uint256& hash) const
@@ -400,7 +400,7 @@ set<uint256> CWallet::GetConflicts(const uint256& txid) const
std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range;
- BOOST_FOREACH(const CTxIn& txin, wtx.vin)
+ BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
{
if (mapTxSpends.count(txin.prevout) <= 1)
continue; // No conflict if zero or one spends
@@ -552,7 +552,7 @@ void CWallet::AddToSpends(const uint256& wtxid)
if (thisTx.IsCoinBase()) // Coinbases don't spend anything!
return;
- BOOST_FOREACH(const CTxIn& txin, thisTx.vin)
+ BOOST_FOREACH(const CTxIn& txin, thisTx.tx->vin)
AddToSpends(txin.prevout, wtxid);
}
@@ -795,7 +795,7 @@ bool CWallet::GetAccountPubkey(CPubKey &pubKey, std::string strAccount, bool bFo
for (map<uint256, CWalletTx>::iterator it = mapWallet.begin();
it != mapWallet.end() && account.vchPubKey.IsValid();
++it)
- BOOST_FOREACH(const CTxOut& txout, (*it).second.vout)
+ BOOST_FOREACH(const CTxOut& txout, (*it).second.tx->vout)
if (txout.scriptPubKey == scriptPubKey) {
bForceNew = true;
break;
@@ -954,7 +954,7 @@ bool CWallet::LoadToWallet(const CWalletTx& wtxIn)
wtx.BindWallet(this);
wtxOrdered.insert(make_pair(wtx.nOrderPos, TxPair(&wtx, (CAccountingEntry*)0)));
AddToSpends(hash);
- BOOST_FOREACH(const CTxIn& txin, wtx.vin) {
+ BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin) {
if (mapWallet.count(txin.prevout.hash)) {
CWalletTx& prevtx = mapWallet[txin.prevout.hash];
if (prevtx.nIndex == -1 && !prevtx.hashUnset()) {
@@ -993,7 +993,7 @@ bool CWallet::AddToWalletIfInvolvingMe(const CTransaction& tx, const CBlockIndex
if (fExisted && !fUpdate) return false;
if (fExisted || IsMine(tx) || IsFromMe(tx))
{
- CWalletTx wtx(this,tx);
+ CWalletTx wtx(this, MakeTransactionRef(tx));
// Get merkle branch if transaction was found in a block
if (posInBlock != -1)
@@ -1052,7 +1052,7 @@ bool CWallet::AbandonTransaction(const uint256& hashTx)
}
// If a transaction changes 'conflicted' state, that changes the balance
// available of the outputs it spends. So force those to be recomputed
- BOOST_FOREACH(const CTxIn& txin, wtx.vin)
+ BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
{
if (mapWallet.count(txin.prevout.hash))
mapWallet[txin.prevout.hash].MarkDirty();
@@ -1113,7 +1113,7 @@ void CWallet::MarkConflicted(const uint256& hashBlock, const uint256& hashTx)
}
// If a transaction changes 'conflicted' state, that changes the balance
// available of the outputs it spends. So force those to be recomputed
- BOOST_FOREACH(const CTxIn& txin, wtx.vin)
+ BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
{
if (mapWallet.count(txin.prevout.hash))
mapWallet[txin.prevout.hash].MarkDirty();
@@ -1148,8 +1148,8 @@ isminetype CWallet::IsMine(const CTxIn &txin) const
if (mi != mapWallet.end())
{
const CWalletTx& prev = (*mi).second;
- if (txin.prevout.n < prev.vout.size())
- return IsMine(prev.vout[txin.prevout.n]);
+ if (txin.prevout.n < prev.tx->vout.size())
+ return IsMine(prev.tx->vout[txin.prevout.n]);
}
}
return ISMINE_NO;
@@ -1163,9 +1163,9 @@ CAmount CWallet::GetDebit(const CTxIn &txin, const isminefilter& filter) const
if (mi != mapWallet.end())
{
const CWalletTx& prev = (*mi).second;
- if (txin.prevout.n < prev.vout.size())
- if (IsMine(prev.vout[txin.prevout.n]) & filter)
- return prev.vout[txin.prevout.n].nValue;
+ if (txin.prevout.n < prev.tx->vout.size())
+ if (IsMine(prev.tx->vout[txin.prevout.n]) & filter)
+ return prev.tx->vout[txin.prevout.n].nValue;
}
}
return 0;
@@ -1380,14 +1380,14 @@ void CWalletTx::GetAmounts(list<COutputEntry>& listReceived,
CAmount nDebit = GetDebit(filter);
if (nDebit > 0) // debit>0 means we signed/sent this transaction
{
- CAmount nValueOut = GetValueOut();
+ CAmount nValueOut = tx->GetValueOut();
nFee = nDebit - nValueOut;
}
// Sent/received.
- for (unsigned int i = 0; i < vout.size(); ++i)
+ for (unsigned int i = 0; i < tx->vout.size(); ++i)
{
- const CTxOut& txout = vout[i];
+ const CTxOut& txout = tx->vout[i];
isminetype fIsMine = pwallet->IsMine(txout);
// Only need to handle txouts if AT LEAST one of these is true:
// 1) they debit from us (sent)
@@ -1492,7 +1492,7 @@ int CWallet::ScanForWalletTransactions(CBlockIndex* pindexStart, bool fUpdate)
int posInBlock;
for (posInBlock = 0; posInBlock < (int)block.vtx.size(); posInBlock++)
{
- if (AddToWalletIfInvolvingMe(block.vtx[posInBlock], pindex, posInBlock, fUpdate))
+ if (AddToWalletIfInvolvingMe(*block.vtx[posInBlock], pindex, posInBlock, fUpdate))
ret++;
}
pindex = chainActive.Next(pindex);
@@ -1573,7 +1573,7 @@ set<uint256> CWalletTx::GetConflicts() const
CAmount CWalletTx::GetDebit(const isminefilter& filter) const
{
- if (vin.empty())
+ if (tx->vin.empty())
return 0;
CAmount debit = 0;
@@ -1608,7 +1608,7 @@ CAmount CWalletTx::GetCredit(const isminefilter& filter) const
if (IsCoinBase() && GetBlocksToMaturity() > 0)
return 0;
- int64_t credit = 0;
+ CAmount credit = 0;
if (filter & ISMINE_SPENDABLE)
{
// GetBalance can assume transactions in mapWallet won't change
@@ -1663,11 +1663,11 @@ CAmount CWalletTx::GetAvailableCredit(bool fUseCache) const
CAmount nCredit = 0;
uint256 hashTx = GetHash();
- for (unsigned int i = 0; i < vout.size(); i++)
+ for (unsigned int i = 0; i < tx->vout.size(); i++)
{
if (!pwallet->IsSpent(hashTx, i))
{
- const CTxOut &txout = vout[i];
+ const CTxOut &txout = tx->vout[i];
nCredit += pwallet->GetCredit(txout, ISMINE_SPENDABLE);
if (!MoneyRange(nCredit))
throw std::runtime_error("CWalletTx::GetAvailableCredit() : value out of range");
@@ -1706,11 +1706,11 @@ CAmount CWalletTx::GetAvailableWatchOnlyCredit(const bool& fUseCache) const
return nAvailableWatchCreditCached;
CAmount nCredit = 0;
- for (unsigned int i = 0; i < vout.size(); i++)
+ for (unsigned int i = 0; i < tx->vout.size(); i++)
{
if (!pwallet->IsSpent(GetHash(), i))
{
- const CTxOut &txout = vout[i];
+ const CTxOut &txout = tx->vout[i];
nCredit += pwallet->GetCredit(txout, ISMINE_WATCH_ONLY);
if (!MoneyRange(nCredit))
throw std::runtime_error("CWalletTx::GetAvailableCredit() : value out of range");
@@ -1758,23 +1758,23 @@ bool CWalletTx::IsTrusted() const
return false;
// Trusted if all inputs are from us and are in the mempool:
- BOOST_FOREACH(const CTxIn& txin, vin)
+ BOOST_FOREACH(const CTxIn& txin, tx->vin)
{
// Transactions not sent by us: not trusted
const CWalletTx* parent = pwallet->GetWalletTx(txin.prevout.hash);
if (parent == NULL)
return false;
- const CTxOut& parentOut = parent->vout[txin.prevout.n];
+ const CTxOut& parentOut = parent->tx->vout[txin.prevout.n];
if (pwallet->IsMine(parentOut) != ISMINE_SPENDABLE)
return false;
}
return true;
}
-bool CWalletTx::IsEquivalentTo(const CWalletTx& tx) const
+bool CWalletTx::IsEquivalentTo(const CWalletTx& _tx) const
{
- CMutableTransaction tx1 = *this;
- CMutableTransaction tx2 = tx;
+ CMutableTransaction tx1 = *this->tx;
+ CMutableTransaction tx2 = *_tx.tx;
for (unsigned int i = 0; i < tx1.vin.size(); i++) tx1.vin[i].scriptSig = CScript();
for (unsigned int i = 0; i < tx2.vin.size(); i++) tx2.vin[i].scriptSig = CScript();
return CTransaction(tx1) == CTransaction(tx2);
@@ -1957,10 +1957,10 @@ void CWallet::AvailableCoins(vector<COutput>& vCoins, bool fOnlyConfirmed, const
if (nDepth == 0 && !pcoin->InMempool())
continue;
- for (unsigned int i = 0; i < pcoin->vout.size(); i++) {
- isminetype mine = IsMine(pcoin->vout[i]);
+ for (unsigned int i = 0; i < pcoin->tx->vout.size(); i++) {
+ isminetype mine = IsMine(pcoin->tx->vout[i]);
if (!(IsSpent(wtxid, i)) && mine != ISMINE_NO &&
- !IsLockedCoin((*it).first, i) && (pcoin->vout[i].nValue > 0 || fIncludeZeroValue) &&
+ !IsLockedCoin((*it).first, i) && (pcoin->tx->vout[i].nValue > 0 || fIncludeZeroValue) &&
(!coinControl || !coinControl->HasSelected() || coinControl->fAllowOtherInputs || coinControl->IsSelected(COutPoint((*it).first, i))))
vCoins.push_back(COutput(pcoin, i, nDepth,
((mine & ISMINE_SPENDABLE) != ISMINE_NO) ||
@@ -2043,7 +2043,7 @@ bool CWallet::SelectCoinsMinConf(const CAmount& nTargetValue, int nConfMine, int
continue;
int i = output.i;
- CAmount n = pcoin->vout[i].nValue;
+ CAmount n = pcoin->tx->vout[i].nValue;
pair<CAmount,pair<const CWalletTx*,unsigned int> > coin = make_pair(n,make_pair(pcoin, i));
@@ -2130,7 +2130,7 @@ bool CWallet::SelectCoins(const vector<COutput>& vAvailableCoins, const CAmount&
{
if (!out.fSpendable)
continue;
- nValueRet += out.tx->vout[out.i].nValue;
+ nValueRet += out.tx->tx->vout[out.i].nValue;
setCoinsRet.insert(make_pair(out.tx, out.i));
}
return (nValueRet >= nTargetValue);
@@ -2150,9 +2150,9 @@ bool CWallet::SelectCoins(const vector<COutput>& vAvailableCoins, const CAmount&
{
const CWalletTx* pcoin = &it->second;
// Clearly invalid input, fail
- if (pcoin->vout.size() <= outpoint.n)
+ if (pcoin->tx->vout.size() <= outpoint.n)
return false;
- nValueFromPresetInputs += pcoin->vout[outpoint.n].nValue;
+ nValueFromPresetInputs += pcoin->tx->vout[outpoint.n].nValue;
setPresetCoins.insert(make_pair(pcoin, outpoint.n));
} else
return false; // TODO: Allow non-wallet inputs
@@ -2208,10 +2208,10 @@ bool CWallet::FundTransaction(CMutableTransaction& tx, CAmount& nFeeRet, bool ov
return false;
if (nChangePosInOut != -1)
- tx.vout.insert(tx.vout.begin() + nChangePosInOut, wtx.vout[nChangePosInOut]);
+ tx.vout.insert(tx.vout.begin() + nChangePosInOut, wtx.tx->vout[nChangePosInOut]);
// Add new txins (keeping original txin scriptSig/order)
- BOOST_FOREACH(const CTxIn& txin, wtx.vin)
+ BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
{
if (!coinControl.IsSelected(txin.prevout))
{
@@ -2238,7 +2238,7 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt
{
if (nValue < 0 || recipient.nAmount < 0)
{
- strFailReason = _("Transaction amounts must be positive");
+ strFailReason = _("Transaction amounts must not be negative");
return false;
}
nValue += recipient.nAmount;
@@ -2246,9 +2246,9 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt
if (recipient.fSubtractFeeFromAmount)
nSubtractFeeFromAmount++;
}
- if (vecSend.empty() || nValue < 0)
+ if (vecSend.empty())
{
- strFailReason = _("Transaction amounts must be positive");
+ strFailReason = _("Transaction must have at least one recipient");
return false;
}
@@ -2351,7 +2351,7 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt
}
BOOST_FOREACH(PAIRTYPE(const CWalletTx*, unsigned int) pcoin, setCoins)
{
- CAmount nCredit = pcoin.first->vout[pcoin.second].nValue;
+ CAmount nCredit = pcoin.first->tx->vout[pcoin.second].nValue;
//The coin age after the next block (depth+1) is used instead of the current,
//reflecting an assumption the user would accept a bit more delay for
//a chance at a free transaction.
@@ -2466,10 +2466,10 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt
BOOST_FOREACH(const PAIRTYPE(const CWalletTx*,unsigned int)& coin, setCoins)
{
bool signSuccess;
- const CScript& scriptPubKey = coin.first->vout[coin.second].scriptPubKey;
+ const CScript& scriptPubKey = coin.first->tx->vout[coin.second].scriptPubKey;
SignatureData sigdata;
if (sign)
- signSuccess = ProduceSignature(TransactionSignatureCreator(this, &txNewConst, nIn, coin.first->vout[coin.second].nValue, SIGHASH_ALL), scriptPubKey, sigdata);
+ signSuccess = ProduceSignature(TransactionSignatureCreator(this, &txNewConst, nIn, coin.first->tx->vout[coin.second].nValue, SIGHASH_ALL), scriptPubKey, sigdata);
else
signSuccess = ProduceSignature(DummySignatureCreator(this), scriptPubKey, sigdata);
@@ -2494,16 +2494,16 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt
}
// Embed the constructed transaction data in wtxNew.
- *static_cast<CTransaction*>(&wtxNew) = CTransaction(txNew);
+ wtxNew.SetTx(MakeTransactionRef(std::move(txNew)));
// Limit size
- if (GetTransactionWeight(txNew) >= MAX_STANDARD_TX_WEIGHT)
+ if (GetTransactionWeight(wtxNew) >= MAX_STANDARD_TX_WEIGHT)
{
strFailReason = _("Transaction too large");
return false;
}
- dPriority = wtxNew.ComputePriority(dPriority, nBytes);
+ dPriority = wtxNew.tx->ComputePriority(dPriority, nBytes);
// Allow to override the default confirmation target over the CoinControl instance
int currentConfirmationTarget = nTxConfirmTarget;
@@ -2555,7 +2555,7 @@ bool CWallet::CommitTransaction(CWalletTx& wtxNew, CReserveKey& reservekey, CCon
{
{
LOCK2(cs_main, cs_wallet);
- LogPrintf("CommitTransaction:\n%s", wtxNew.ToString());
+ LogPrintf("CommitTransaction:\n%s", wtxNew.tx->ToString());
{
// Take key pair from key pool so it won't be used again
reservekey.KeepKey();
@@ -2565,7 +2565,7 @@ bool CWallet::CommitTransaction(CWalletTx& wtxNew, CReserveKey& reservekey, CCon
AddToWallet(wtxNew);
// Notify that old coins are spent
- BOOST_FOREACH(const CTxIn& txin, wtxNew.vin)
+ BOOST_FOREACH(const CTxIn& txin, wtxNew.tx->vin)
{
CWalletTx &coin = mapWallet[txin.prevout.hash];
coin.BindWallet(this);
@@ -2929,7 +2929,7 @@ std::map<CTxDestination, CAmount> CWallet::GetAddressBalances()
{
CWalletTx *pcoin = &walletEntry.second;
- if (!CheckFinalTx(*pcoin) || !pcoin->IsTrusted())
+ if (!pcoin->IsTrusted())
continue;
if (pcoin->IsCoinBase() && pcoin->GetBlocksToMaturity() > 0)
@@ -2939,15 +2939,15 @@ std::map<CTxDestination, CAmount> CWallet::GetAddressBalances()
if (nDepth < (pcoin->IsFromMe(ISMINE_ALL) ? 0 : 1))
continue;
- for (unsigned int i = 0; i < pcoin->vout.size(); i++)
+ for (unsigned int i = 0; i < pcoin->tx->vout.size(); i++)
{
CTxDestination addr;
- if (!IsMine(pcoin->vout[i]))
+ if (!IsMine(pcoin->tx->vout[i]))
continue;
- if(!ExtractDestination(pcoin->vout[i].scriptPubKey, addr))
+ if(!ExtractDestination(pcoin->tx->vout[i].scriptPubKey, addr))
continue;
- CAmount n = IsSpent(walletEntry.first, i) ? 0 : pcoin->vout[i].nValue;
+ CAmount n = IsSpent(walletEntry.first, i) ? 0 : pcoin->tx->vout[i].nValue;
if (!balances.count(addr))
balances[addr] = 0;
@@ -2969,16 +2969,16 @@ set< set<CTxDestination> > CWallet::GetAddressGroupings()
{
CWalletTx *pcoin = &walletEntry.second;
- if (pcoin->vin.size() > 0)
+ if (pcoin->tx->vin.size() > 0)
{
bool any_mine = false;
// group all input addresses with each other
- BOOST_FOREACH(CTxIn txin, pcoin->vin)
+ BOOST_FOREACH(CTxIn txin, pcoin->tx->vin)
{
CTxDestination address;
if(!IsMine(txin)) /* If this input isn't mine, ignore it */
continue;
- if(!ExtractDestination(mapWallet[txin.prevout.hash].vout[txin.prevout.n].scriptPubKey, address))
+ if(!ExtractDestination(mapWallet[txin.prevout.hash].tx->vout[txin.prevout.n].scriptPubKey, address))
continue;
grouping.insert(address);
any_mine = true;
@@ -2987,7 +2987,7 @@ set< set<CTxDestination> > CWallet::GetAddressGroupings()
// group change with input addresses
if (any_mine)
{
- BOOST_FOREACH(CTxOut txout, pcoin->vout)
+ BOOST_FOREACH(CTxOut txout, pcoin->tx->vout)
if (IsChange(txout))
{
CTxDestination txoutAddr;
@@ -3004,11 +3004,11 @@ set< set<CTxDestination> > CWallet::GetAddressGroupings()
}
// group lone addrs by themselves
- for (unsigned int i = 0; i < pcoin->vout.size(); i++)
- if (IsMine(pcoin->vout[i]))
+ for (unsigned int i = 0; i < pcoin->tx->vout.size(); i++)
+ if (IsMine(pcoin->tx->vout[i]))
{
CTxDestination address;
- if(!ExtractDestination(pcoin->vout[i].scriptPubKey, address))
+ if(!ExtractDestination(pcoin->tx->vout[i].scriptPubKey, address))
continue;
grouping.insert(address);
groupings.insert(grouping);
@@ -3275,7 +3275,7 @@ void CWallet::GetKeyBirthTimes(std::map<CKeyID, int64_t> &mapKeyBirth) const {
if (blit != mapBlockIndex.end() && chainActive.Contains(blit->second)) {
// ... which are already in a block
int nHeight = blit->second->nHeight;
- BOOST_FOREACH(const CTxOut &txout, wtx.vout) {
+ BOOST_FOREACH(const CTxOut &txout, wtx.tx->vout) {
// iterate over all their outputs
CAffectedKeysVisitor(*this, vAffected).Process(txout.scriptPubKey);
BOOST_FOREACH(const CKeyID &keyid, vAffected) {
@@ -3568,6 +3568,16 @@ bool CWallet::ParameterInteraction()
LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -walletbroadcast=0\n", __func__);
}
+ if (GetBoolArg("-salvagewallet", false) && SoftSetBoolArg("-rescan", true)) {
+ // Rewrite just private keys: rescan to find transactions
+ LogPrintf("%s: parameter interaction: -salvagewallet=1 -> setting -rescan=1\n", __func__);
+ }
+
+ // -zapwallettx implies a rescan
+ if (GetBoolArg("-zapwallettxes", false) && SoftSetBoolArg("-rescan", true)) {
+ LogPrintf("%s: parameter interaction: -zapwallettxes=<mode> -> setting -rescan=1\n", __func__);
+ }
+
if (GetBoolArg("-sysperms", false))
return InitError("-sysperms is not allowed in combination with enabled wallet functionality");
if (GetArg("-prune", 0) && GetBoolArg("-rescan", false))
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index 409d817046..f7103b6a80 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -161,13 +161,14 @@ struct COutputEntry
};
/** A transaction with a merkle branch linking it to the block chain. */
-class CMerkleTx : public CTransaction
+class CMerkleTx
{
private:
/** Constant used in hashBlock to indicate tx has been abandoned */
static const uint256 ABANDON_HASH;
public:
+ CTransactionRef tx;
uint256 hashBlock;
/* An nIndex == -1 means that hashBlock (in nonzero) refers to the earliest
@@ -179,26 +180,37 @@ public:
CMerkleTx()
{
+ SetTx(MakeTransactionRef());
Init();
}
- CMerkleTx(const CTransaction& txIn) : CTransaction(txIn)
+ CMerkleTx(CTransactionRef arg)
{
+ SetTx(std::move(arg));
Init();
}
+ /** Helper conversion operator to allow passing CMerkleTx where CTransaction is expected.
+ * TODO: adapt callers and remove this operator. */
+ operator const CTransaction&() const { return *tx; }
+
void Init()
{
hashBlock = uint256();
nIndex = -1;
}
+ void SetTx(CTransactionRef arg)
+ {
+ tx = std::move(arg);
+ }
+
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action) {
std::vector<uint256> vMerkleBranch; // For compatibility with older versions.
- READWRITE(*(CTransaction*)this);
+ READWRITE(tx);
READWRITE(hashBlock);
READWRITE(vMerkleBranch);
READWRITE(nIndex);
@@ -221,6 +233,9 @@ public:
bool hashUnset() const { return (hashBlock.IsNull() || hashBlock == ABANDON_HASH); }
bool isAbandoned() const { return (hashBlock == ABANDON_HASH); }
void setAbandoned() { hashBlock = ABANDON_HASH; }
+
+ const uint256& GetHash() const { return tx->GetHash(); }
+ bool IsCoinBase() const { return tx->IsCoinBase(); }
};
/**
@@ -267,17 +282,7 @@ public:
Init(NULL);
}
- CWalletTx(const CWallet* pwalletIn)
- {
- Init(pwalletIn);
- }
-
- CWalletTx(const CWallet* pwalletIn, const CMerkleTx& txIn) : CMerkleTx(txIn)
- {
- Init(pwalletIn);
- }
-
- CWalletTx(const CWallet* pwalletIn, const CTransaction& txIn) : CMerkleTx(txIn)
+ CWalletTx(const CWallet* pwalletIn, CTransactionRef arg) : CMerkleTx(std::move(arg))
{
Init(pwalletIn);
}
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index 43fd6a20ad..0395e4a072 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -7,7 +7,7 @@
#include "base58.h"
#include "consensus/validation.h"
-#include "main.h" // For CheckTransaction
+#include "validation.h" // For CheckTransaction
#include "protocol.h"
#include "serialize.h"
#include "sync.h"
diff --git a/src/zmq/zmqnotificationinterface.cpp b/src/zmq/zmqnotificationinterface.cpp
index a0196fe184..a7e20a8356 100644
--- a/src/zmq/zmqnotificationinterface.cpp
+++ b/src/zmq/zmqnotificationinterface.cpp
@@ -6,7 +6,7 @@
#include "zmqpublishnotifier.h"
#include "version.h"
-#include "main.h"
+#include "validation.h"
#include "streams.h"
#include "util.h"
diff --git a/src/zmq/zmqpublishnotifier.cpp b/src/zmq/zmqpublishnotifier.cpp
index b6c907980f..99d42ab526 100644
--- a/src/zmq/zmqpublishnotifier.cpp
+++ b/src/zmq/zmqpublishnotifier.cpp
@@ -3,8 +3,9 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "chainparams.h"
+#include "streams.h"
#include "zmqpublishnotifier.h"
-#include "main.h"
+#include "validation.h"
#include "util.h"
static std::multimap<std::string, CZMQAbstractPublishNotifier*> mapPublishNotifiers;