aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile.am7
-rw-r--r--README.md5
-rw-r--r--contrib/README.md2
-rw-r--r--contrib/devtools/README.md61
-rwxr-xr-xcontrib/devtools/copyright_header.py610
-rwxr-xr-xcontrib/devtools/fix-copyright-headers.py67
-rwxr-xr-xcontrib/devtools/github-merge.py2
-rw-r--r--depends/hosts/darwin.mk2
-rw-r--r--doc/README.md1
-rw-r--r--doc/developer-notes.md26
-rw-r--r--doc/files.md1
-rw-r--r--doc/release-notes.md9
-rw-r--r--doc/release-process.md5
-rw-r--r--doc/unit-tests.md18
-rwxr-xr-xqa/pull-tester/rpc-tests.py6
-rwxr-xr-xqa/rpc-tests/p2p-compactblocks.py37
-rwxr-xr-xqa/rpc-tests/preciousblock.py65
-rwxr-xr-xqa/rpc-tests/smartfees.py6
-rw-r--r--qa/rpc-tests/test_framework/authproxy.py13
-rwxr-xr-xqa/rpc-tests/test_framework/mininode.py26
-rwxr-xr-xqa/rpc-tests/test_framework/test_framework.py7
-rw-r--r--qa/rpc-tests/test_framework/util.py36
-rwxr-xr-xqa/rpc-tests/wallet-dump.py6
-rwxr-xr-xshare/rpcuser/rpcuser.py2
-rw-r--r--src/Makefile.am4
-rw-r--r--src/Makefile.bench.include20
-rw-r--r--src/Makefile.test.include2
-rw-r--r--src/addrdb.h2
-rw-r--r--src/addrman.h11
-rw-r--r--src/amount.h2
-rw-r--r--src/arith_uint256.cpp2
-rw-r--r--src/arith_uint256.h2
-rw-r--r--src/base58.cpp10
-rw-r--r--src/bench/base58.cpp2
-rw-r--r--src/bench/checkblock.cpp55
-rw-r--r--src/bench/data/block413567.rawbin0 -> 999887 bytes
-rw-r--r--src/bench/lockedpool.cpp47
-rw-r--r--src/bitcoin-cli.cpp30
-rw-r--r--src/bitcoin-tx.cpp22
-rw-r--r--src/bitcoind.cpp6
-rw-r--r--src/blockencodings.cpp4
-rw-r--r--src/blockencodings.h12
-rw-r--r--src/bloom.cpp2
-rw-r--r--src/bloom.h2
-rw-r--r--src/chain.h9
-rw-r--r--src/chainparams.cpp10
-rw-r--r--src/checkpoints.cpp10
-rw-r--r--src/checkpoints.h3
-rw-r--r--src/coins.h47
-rw-r--r--src/compat/byteswap.h2
-rw-r--r--src/compat/endian.h2
-rw-r--r--src/compressor.h14
-rw-r--r--src/consensus/params.h1
-rw-r--r--src/dbwrapper.h15
-rw-r--r--src/hash.h12
-rw-r--r--src/init.cpp22
-rw-r--r--src/key.cpp34
-rw-r--r--src/key.h31
-rw-r--r--src/main.cpp455
-rw-r--r--src/main.h18
-rw-r--r--src/memusage.h2
-rw-r--r--src/merkleblock.h4
-rw-r--r--src/net.cpp201
-rw-r--r--src/net.h267
-rw-r--r--src/netaddress.h6
-rw-r--r--src/policy/fees.cpp170
-rw-r--r--src/policy/fees.h118
-rw-r--r--src/policy/policy.cpp2
-rw-r--r--src/policy/policy.h2
-rw-r--r--src/policy/rbf.cpp2
-rw-r--r--src/policy/rbf.h2
-rw-r--r--src/primitives/block.h9
-rw-r--r--src/primitives/transaction.cpp5
-rw-r--r--src/primitives/transaction.h26
-rw-r--r--src/protocol.cpp6
-rw-r--r--src/protocol.h13
-rw-r--r--src/pubkey.h17
-rw-r--r--src/qt/bitcoin.cpp14
-rw-r--r--src/qt/clientmodel.cpp3
-rw-r--r--src/qt/forms/modaloverlay.ui4
-rw-r--r--src/qt/guiutil.cpp12
-rw-r--r--src/qt/guiutil.h3
-rw-r--r--src/qt/paymentrequestplus.h2
-rw-r--r--src/qt/paymentserver.cpp2
-rw-r--r--src/qt/recentrequeststablemodel.h2
-rw-r--r--src/qt/rpcconsole.cpp116
-rw-r--r--src/qt/rpcconsole.h4
-rw-r--r--src/qt/walletmodel.h2
-rw-r--r--src/rest.cpp2
-rw-r--r--src/rpc/mining.cpp10
-rw-r--r--src/rpc/misc.cpp43
-rw-r--r--src/rpc/net.cpp2
-rw-r--r--src/rpc/server.cpp2
-rw-r--r--src/script/bitcoinconsensus.cpp18
-rw-r--r--src/script/bitcoinconsensus.h4
-rw-r--r--src/script/interpreter.cpp30
-rw-r--r--src/serialize.h463
-rw-r--r--src/streams.h116
-rw-r--r--src/support/allocators/secure.h12
-rw-r--r--src/support/lockedpool.cpp385
-rw-r--r--src/support/lockedpool.h231
-rw-r--r--src/support/pagelocker.cpp70
-rw-r--r--src/support/pagelocker.h177
-rw-r--r--src/test/Checkpoints_tests.cpp27
-rw-r--r--src/test/DoS_tests.cpp20
-rw-r--r--src/test/README.md47
-rw-r--r--src/test/allocator_tests.cpp290
-rw-r--r--src/test/bctest.py160
-rwxr-xr-xsrc/test/bitcoin-util-test.py24
-rw-r--r--src/test/blockencodings_tests.cpp2
-rw-r--r--src/test/bloom_tests.cpp6
-rw-r--r--src/test/coins_tests.cpp2
-rw-r--r--src/test/crypto_tests.cpp2
-rw-r--r--src/test/dbwrapper_tests.cpp2
-rw-r--r--src/test/mempool_tests.cpp1
-rw-r--r--src/test/merkle_tests.cpp2
-rw-r--r--src/test/miner_tests.cpp2
-rw-r--r--src/test/net_tests.cpp12
-rw-r--r--src/test/pmt_tests.cpp2
-rw-r--r--src/test/policyestimator_tests.cpp60
-rw-r--r--src/test/prevector_tests.cpp2
-rw-r--r--src/test/script_tests.cpp13
-rw-r--r--src/test/serialize_tests.cpp71
-rw-r--r--src/test/sighash_tests.cpp2
-rw-r--r--src/test/skiplist_tests.cpp2
-rw-r--r--src/test/test_bitcoin.cpp2
-rw-r--r--src/test/uint256_tests.cpp46
-rw-r--r--src/test/univalue_tests.cpp2
-rw-r--r--src/test/util_tests.cpp2
-rw-r--r--src/test/versionbits_tests.cpp2
-rw-r--r--src/txdb.h2
-rw-r--r--src/txmempool.cpp13
-rw-r--r--src/txmempool.h3
-rw-r--r--src/uint256.h9
-rw-r--r--src/undo.h26
-rw-r--r--src/version.h5
-rw-r--r--src/wallet/crypter.cpp14
-rw-r--r--src/wallet/crypter.h21
-rw-r--r--src/wallet/rpcdump.cpp6
-rw-r--r--src/wallet/test/crypto_tests.cpp12
-rw-r--r--src/wallet/wallet.cpp83
-rw-r--r--src/wallet/wallet.h41
-rw-r--r--src/wallet/walletdb.cpp78
-rw-r--r--src/wallet/walletdb.h7
144 files changed, 3470 insertions, 2178 deletions
diff --git a/Makefile.am b/Makefile.am
index 44fdf9c9fb..76eba51906 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -39,6 +39,11 @@ OSX_PLIST=$(top_builddir)/share/qt/Info.plist #not installed
OSX_QT_TRANSLATIONS = da,de,es,hu,ru,uk,zh_CN,zh_TW
DIST_DOCS = $(wildcard doc/*.md) $(wildcard doc/release-notes/*.md)
+DIST_CONTRIB = $(top_srcdir)/contrib/bitcoin-cli.bash-completion \
+ $(top_srcdir)/contrib/bitcoin-tx.bash-completion \
+ $(top_srcdir)/contrib/bitcoind.bash-completion \
+ $(top_srcdir)/contrib/init \
+ $(top_srcdir)/contrib/rpm
BIN_CHECKS=$(top_srcdir)/contrib/devtools/symbol-check.py \
$(top_srcdir)/contrib/devtools/security-check.py
@@ -211,7 +216,7 @@ endif
dist_noinst_SCRIPTS = autogen.sh
-EXTRA_DIST = $(top_srcdir)/share/genbuild.sh qa/pull-tester/rpc-tests.py qa/rpc-tests $(DIST_DOCS) $(WINDOWS_PACKAGING) $(OSX_PACKAGING) $(BIN_CHECKS)
+EXTRA_DIST = $(top_srcdir)/share/genbuild.sh qa/pull-tester/rpc-tests.py qa/rpc-tests $(DIST_CONTRIB) $(DIST_DOCS) $(WINDOWS_PACKAGING) $(OSX_PACKAGING) $(BIN_CHECKS)
CLEANFILES = $(OSX_DMG) $(BITCOIN_WIN_INSTALLER)
diff --git a/README.md b/README.md
index 3c41649c1b..38a90dde49 100644
--- a/README.md
+++ b/README.md
@@ -49,9 +49,10 @@ lots of money.
### Automated Testing
-Developers are strongly encouraged to write [unit tests](/doc/unit-tests.md) for new code, and to
+Developers are strongly encouraged to write [unit tests](src/test/README.md) for new code, and to
submit new unit tests for old code. Unit tests can be compiled and run
-(assuming they weren't disabled in configure) with: `make check`
+(assuming they weren't disabled in configure) with: `make check`. Further details on running
+and extending unit tests can be found in [/src/test/README.md](/src/test/README.md).
There are also [regression and integration tests](/qa) of the RPC interface, written
in Python, that are run automatically on the build server.
diff --git a/contrib/README.md b/contrib/README.md
index ab5f57587e..4ea9700f59 100644
--- a/contrib/README.md
+++ b/contrib/README.md
@@ -38,7 +38,7 @@ Scripts and notes for Mac builds.
RPM spec file for building bitcoin-core on RPM based distributions
### [Gitian-build](/contrib/gitian-build.sh) ###
-Script for running full gitian builds.
+Script for running full Gitian builds.
Test and Verify Tools
---------------------
diff --git a/contrib/devtools/README.md b/contrib/devtools/README.md
index 60fe69e7e3..6c0047833f 100644
--- a/contrib/devtools/README.md
+++ b/contrib/devtools/README.md
@@ -24,21 +24,64 @@ the script should be called from the git root folder as follows.
```
git diff -U0 HEAD~1.. | ./contrib/devtools/clang-format-diff.py -p1 -i -v
```
+copyright\_header.py
+====================
-fix-copyright-headers.py
-========================
+Provides utilities for managing copyright headers of `The Bitcoin Core
+developers` in repository source files. It has three subcommands:
-Every year newly updated files need to have its copyright headers updated to reflect the current year.
-If you run this script from the root folder it will automatically update the year on the copyright header for all
-source files if these have a git commit from the current year.
+```
+$ ./copyright_header.py report <base_directory> [verbose]
+$ ./copyright_header.py update <base_directory>
+$ ./copyright_header.py insert <file>
+```
+Running these subcommands without arguments displays a usage string.
-For example a file changed in 2015 (with 2015 being the current year):
+copyright\_header.py report \<base\_directory\> [verbose]
+---------------------------------------------------------
-```// Copyright (c) 2009-2013 The Bitcoin Core developers```
+Produces a report of all copyright header notices found inside the source files
+of a repository. Useful to quickly visualize the state of the headers.
+Specifying `verbose` will list the full filenames of files of each category.
-would be changed to:
+copyright\_header.py update \<base\_directory\> [verbose]
+---------------------------------------------------------
+Updates all the copyright headers of `The Bitcoin Core developers` which were
+changed in a year more recent than is listed. For example:
+```
+// Copyright (c) <firstYear>-<lastYear> The Bitcoin Core developers
+```
+will be updated to:
+```
+// Copyright (c) <firstYear>-<lastModifiedYear> The Bitcoin Core developers
+```
+where `<lastModifiedYear>` is obtained from the `git log` history.
-```// Copyright (c) 2009-2015 The Bitcoin Core developers```
+This subcommand also handles copyright headers that have only a single year. In
+those cases:
+```
+// Copyright (c) <year> The Bitcoin Core developers
+```
+will be updated to:
+```
+// Copyright (c) <year>-<lastModifiedYear> The Bitcoin Core developers
+```
+where the update is appropriate.
+
+copyright\_header.py insert \<file\>
+------------------------------------
+Inserts a copyright header for `The Bitcoin Core developers` at the top of the
+file in either Python or C++ style as determined by the file extension. If the
+file is a Python file and it has `#!` starting the first line, the header is
+inserted in the line below it.
+
+The copyright dates will be set to be `<year_introduced>-<current_year>` where
+`<year_introduced>` is according to the `git log` history. If
+`<year_introduced>` is equal to `<current_year>`, it will be set as a single
+year rather than two hyphenated years.
+
+If the file already has a copyright for `The Bitcoin Core developers`, the
+script will exit.
gen-manpages.sh
===============
diff --git a/contrib/devtools/copyright_header.py b/contrib/devtools/copyright_header.py
new file mode 100755
index 0000000000..9f35c378bf
--- /dev/null
+++ b/contrib/devtools/copyright_header.py
@@ -0,0 +1,610 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+import re
+import fnmatch
+import sys
+import subprocess
+import datetime
+import os
+
+################################################################################
+# file filtering
+################################################################################
+
+EXCLUDE = [
+ # libsecp256k1:
+ 'src/secp256k1/include/secp256k1.h',
+ 'src/secp256k1/include/secp256k1_ecdh.h',
+ 'src/secp256k1/include/secp256k1_recovery.h',
+ 'src/secp256k1/include/secp256k1_schnorr.h',
+ 'src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c',
+ 'src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h',
+ 'src/secp256k1/src/java/org_bitcoin_Secp256k1Context.c',
+ 'src/secp256k1/src/java/org_bitcoin_Secp256k1Context.h',
+ # auto generated:
+ 'src/univalue/lib/univalue_escapes.h',
+ 'src/qt/bitcoinstrings.cpp',
+ 'src/chainparamsseeds.h',
+ # other external copyrights:
+ 'src/tinyformat.h',
+ 'src/leveldb/util/env_win.cc',
+ 'src/crypto/ctaes/bench.c',
+ 'qa/rpc-tests/test_framework/bignum.py',
+ # python init:
+ '*__init__.py',
+]
+EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
+
+INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.py']
+INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
+
+def applies_to_file(filename):
+ return ((EXCLUDE_COMPILED.match(filename) is None) and
+ (INCLUDE_COMPILED.match(filename) is not None))
+
+################################################################################
+# obtain list of files in repo according to INCLUDE and EXCLUDE
+################################################################################
+
+GIT_LS_CMD = 'git ls-files'
+
+def call_git_ls():
+ out = subprocess.check_output(GIT_LS_CMD.split(' '))
+ return [f for f in out.decode("utf-8").split('\n') if f != '']
+
+def get_filenames_to_examine():
+ filenames = call_git_ls()
+ return sorted([filename for filename in filenames if
+ applies_to_file(filename)])
+
+################################################################################
+# define and compile regexes for the patterns we are looking for
+################################################################################
+
+
+COPYRIGHT_WITH_C = 'Copyright \(c\)'
+COPYRIGHT_WITHOUT_C = 'Copyright'
+ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
+
+YEAR = "20[0-9][0-9]"
+YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
+YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
+ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
+ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
+ ANY_YEAR_STYLE))
+
+ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
+
+def compile_copyright_regex(copyright_style, year_style, name):
+ return re.compile('%s %s %s' % (copyright_style, year_style, name))
+
+EXPECTED_HOLDER_NAMES = [
+ "Satoshi Nakamoto\n",
+ "The Bitcoin Core developers\n",
+ "The Bitcoin Core developers \n",
+ "Bitcoin Core Developers\n",
+ "the Bitcoin Core developers\n",
+ "The Bitcoin developers\n",
+ "The LevelDB Authors\. All rights reserved\.\n",
+ "BitPay Inc\.\n",
+ "BitPay, Inc\.\n",
+ "University of Illinois at Urbana-Champaign\.\n",
+ "MarcoFalke\n",
+ "Pieter Wuille\n",
+ "Pieter Wuille +\*\n",
+ "Pieter Wuille, Gregory Maxwell +\*\n",
+ "Pieter Wuille, Andrew Poelstra +\*\n",
+ "Andrew Poelstra +\*\n",
+ "Wladimir J. van der Laan\n",
+ "Jeff Garzik\n",
+ "Diederik Huys, Pieter Wuille +\*\n",
+ "Thomas Daede, Cory Fields +\*\n",
+ "Jan-Klaas Kollhof\n",
+ "Sam Rushing\n",
+ "ArtForz -- public domain half-a-node\n",
+]
+
+DOMINANT_STYLE_COMPILED = {}
+YEAR_LIST_STYLE_COMPILED = {}
+WITHOUT_C_STYLE_COMPILED = {}
+
+for holder_name in EXPECTED_HOLDER_NAMES:
+ DOMINANT_STYLE_COMPILED[holder_name] = (
+ compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
+ YEAR_LIST_STYLE_COMPILED[holder_name] = (
+ compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
+ WITHOUT_C_STYLE_COMPILED[holder_name] = (
+ compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
+ holder_name))
+
+################################################################################
+# search file contents for copyright message of particular category
+################################################################################
+
+def get_count_of_copyrights_of_any_style_any_holder(contents):
+ return len(ANY_COPYRIGHT_COMPILED.findall(contents))
+
+def file_has_dominant_style_copyright_for_holder(contents, holder_name):
+ match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
+ return match is not None
+
+def file_has_year_list_style_copyright_for_holder(contents, holder_name):
+ match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
+ return match is not None
+
+def file_has_without_c_style_copyright_for_holder(contents, holder_name):
+ match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
+ return match is not None
+
+################################################################################
+# get file info
+################################################################################
+
+def read_file(filename):
+ return open(os.path.abspath(filename), 'r').read()
+
+def gather_file_info(filename):
+ info = {}
+ info['filename'] = filename
+ c = read_file(filename)
+ info['contents'] = c
+
+ info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
+
+ info['classified_copyrights'] = 0
+ info['dominant_style'] = {}
+ info['year_list_style'] = {}
+ info['without_c_style'] = {}
+ for holder_name in EXPECTED_HOLDER_NAMES:
+ has_dominant_style = (
+ file_has_dominant_style_copyright_for_holder(c, holder_name))
+ has_year_list_style = (
+ file_has_year_list_style_copyright_for_holder(c, holder_name))
+ has_without_c_style = (
+ file_has_without_c_style_copyright_for_holder(c, holder_name))
+ info['dominant_style'][holder_name] = has_dominant_style
+ info['year_list_style'][holder_name] = has_year_list_style
+ info['without_c_style'][holder_name] = has_without_c_style
+ if has_dominant_style or has_year_list_style or has_without_c_style:
+ info['classified_copyrights'] = info['classified_copyrights'] + 1
+ return info
+
+################################################################################
+# report execution
+################################################################################
+
+SEPARATOR = '-'.join(['' for _ in range(80)])
+
+def print_filenames(filenames, verbose):
+ if not verbose:
+ return
+ for filename in filenames:
+ print("\t%s" % filename)
+
+def print_report(file_infos, verbose):
+ print(SEPARATOR)
+ examined = [i['filename'] for i in file_infos]
+ print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
+ len(examined))
+ print_filenames(examined, verbose)
+
+ print(SEPARATOR)
+ print('')
+ zero_copyrights = [i['filename'] for i in file_infos if
+ i['all_copyrights'] == 0]
+ print("%4d with zero copyrights" % len(zero_copyrights))
+ print_filenames(zero_copyrights, verbose)
+ one_copyright = [i['filename'] for i in file_infos if
+ i['all_copyrights'] == 1]
+ print("%4d with one copyright" % len(one_copyright))
+ print_filenames(one_copyright, verbose)
+ two_copyrights = [i['filename'] for i in file_infos if
+ i['all_copyrights'] == 2]
+ print("%4d with two copyrights" % len(two_copyrights))
+ print_filenames(two_copyrights, verbose)
+ three_copyrights = [i['filename'] for i in file_infos if
+ i['all_copyrights'] == 3]
+ print("%4d with three copyrights" % len(three_copyrights))
+ print_filenames(three_copyrights, verbose)
+ four_or_more_copyrights = [i['filename'] for i in file_infos if
+ i['all_copyrights'] >= 4]
+ print("%4d with four or more copyrights" % len(four_or_more_copyrights))
+ print_filenames(four_or_more_copyrights, verbose)
+ print('')
+ print(SEPARATOR)
+ print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
+ '"<year>" or "<startYear>-<endYear>":\n')
+ for holder_name in EXPECTED_HOLDER_NAMES:
+ dominant_style = [i['filename'] for i in file_infos if
+ i['dominant_style'][holder_name]]
+ if len(dominant_style) > 0:
+ print("%4d with '%s'" % (len(dominant_style),
+ holder_name.replace('\n', '\\n')))
+ print_filenames(dominant_style, verbose)
+ print('')
+ print(SEPARATOR)
+ print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
+ '"<year1>, <year2>, ...":\n')
+ for holder_name in EXPECTED_HOLDER_NAMES:
+ year_list_style = [i['filename'] for i in file_infos if
+ i['year_list_style'][holder_name]]
+ if len(year_list_style) > 0:
+ print("%4d with '%s'" % (len(year_list_style),
+ holder_name.replace('\n', '\\n')))
+ print_filenames(year_list_style, verbose)
+ print('')
+ print(SEPARATOR)
+ print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
+ '"<startYear>-<endYear>":\n')
+ for holder_name in EXPECTED_HOLDER_NAMES:
+ without_c_style = [i['filename'] for i in file_infos if
+ i['without_c_style'][holder_name]]
+ if len(without_c_style) > 0:
+ print("%4d with '%s'" % (len(without_c_style),
+ holder_name.replace('\n', '\\n')))
+ print_filenames(without_c_style, verbose)
+
+ print('')
+ print(SEPARATOR)
+
+ unclassified_copyrights = [i['filename'] for i in file_infos if
+ i['classified_copyrights'] < i['all_copyrights']]
+ print("%d with unexpected copyright holder names" %
+ len(unclassified_copyrights))
+ print_filenames(unclassified_copyrights, verbose)
+ print(SEPARATOR)
+
+def exec_report(base_directory, verbose):
+ original_cwd = os.getcwd()
+ os.chdir(base_directory)
+ filenames = get_filenames_to_examine()
+ file_infos = [gather_file_info(f) for f in filenames]
+ print_report(file_infos, verbose)
+ os.chdir(original_cwd)
+
+################################################################################
+# report cmd
+################################################################################
+
+REPORT_USAGE = """
+Produces a report of all copyright header notices found inside the source files
+of a repository.
+
+Usage:
+ $ ./copyright_header.py report <base_directory> [verbose]
+
+Arguments:
+ <base_directory> - The base directory of a bitcoin source code repository.
+ [verbose] - Includes a list of every file of each subcategory in the report.
+"""
+
+def report_cmd(argv):
+ if len(argv) == 2:
+ sys.exit(REPORT_USAGE)
+
+ base_directory = argv[2]
+ if not os.path.exists(base_directory):
+ sys.exit("*** bad <base_directory>: %s" % base_directory)
+
+ if len(argv) == 3:
+ verbose = False
+ elif argv[3] == 'verbose':
+ verbose = True
+ else:
+ sys.exit("*** unknown argument: %s" % argv[2])
+
+ exec_report(base_directory, verbose)
+
+################################################################################
+# query git for year of last change
+################################################################################
+
+GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
+
+def call_git_log(filename):
+ out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
+ return out.decode("utf-8").split('\n')
+
+def get_git_change_years(filename):
+ git_log_lines = call_git_log(filename)
+ if len(git_log_lines) == 0:
+ return [datetime.date.today().year]
+ # timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
+ return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
+
+def get_most_recent_git_change_year(filename):
+ return max(get_git_change_years(filename))
+
+################################################################################
+# read and write to file
+################################################################################
+
+def read_file_lines(filename):
+ f = open(os.path.abspath(filename), 'r')
+ file_lines = f.readlines()
+ f.close()
+ return file_lines
+
+def write_file_lines(filename, file_lines):
+ f = open(os.path.abspath(filename), 'w')
+ f.write(''.join(file_lines))
+ f.close()
+
+################################################################################
+# update header years execution
+################################################################################
+
+COPYRIGHT = 'Copyright \(c\)'
+YEAR = "20[0-9][0-9]"
+YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
+HOLDER = 'The Bitcoin Core developers'
+UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
+
+def get_updatable_copyright_line(file_lines):
+ index = 0
+ for line in file_lines:
+ if UPDATEABLE_LINE_COMPILED.search(line) is not None:
+ return index, line
+ index = index + 1
+ return None, None
+
+def parse_year_range(year_range):
+ year_split = year_range.split('-')
+ start_year = year_split[0]
+ if len(year_split) == 1:
+ return start_year, start_year
+ return start_year, year_split[1]
+
+def year_range_to_str(start_year, end_year):
+ if start_year == end_year:
+ return start_year
+ return "%s-%s" % (start_year, end_year)
+
+def create_updated_copyright_line(line, last_git_change_year):
+ copyright_splitter = 'Copyright (c) '
+ copyright_split = line.split(copyright_splitter)
+ # Preserve characters on line that are ahead of the start of the copyright
+ # notice - they are part of the comment block and vary from file-to-file.
+ before_copyright = copyright_split[0]
+ after_copyright = copyright_split[1]
+
+ space_split = after_copyright.split(' ')
+ year_range = space_split[0]
+ start_year, end_year = parse_year_range(year_range)
+ if end_year == last_git_change_year:
+ return line
+ return (before_copyright + copyright_splitter +
+ year_range_to_str(start_year, last_git_change_year) + ' ' +
+ ' '.join(space_split[1:]))
+
+def update_updatable_copyright(filename):
+ file_lines = read_file_lines(filename)
+ index, line = get_updatable_copyright_line(file_lines)
+ if not line:
+ print_file_action_message(filename, "No updatable copyright.")
+ return
+ last_git_change_year = get_most_recent_git_change_year(filename)
+ new_line = create_updated_copyright_line(line, last_git_change_year)
+ if line == new_line:
+ print_file_action_message(filename, "Copyright up-to-date.")
+ return
+ file_lines[index] = new_line
+ write_file_lines(filename, file_lines)
+ print_file_action_message(filename,
+ "Copyright updated! -> %s" % last_git_change_year)
+
+def exec_update_header_year(base_directory):
+ original_cwd = os.getcwd()
+ os.chdir(base_directory)
+ for filename in get_filenames_to_examine():
+ update_updatable_copyright(filename)
+ os.chdir(original_cwd)
+
+################################################################################
+# update cmd
+################################################################################
+
+UPDATE_USAGE = """
+Updates all the copyright headers of "The Bitcoin Core developers" which were
+changed in a year more recent than is listed. For example:
+
+// Copyright (c) <firstYear>-<lastYear> The Bitcoin Core developers
+
+will be updated to:
+
+// Copyright (c) <firstYear>-<lastModifiedYear> The Bitcoin Core developers
+
+where <lastModifiedYear> is obtained from the 'git log' history.
+
+This subcommand also handles copyright headers that have only a single year. In those cases:
+
+// Copyright (c) <year> The Bitcoin Core developers
+
+will be updated to:
+
+// Copyright (c) <year>-<lastModifiedYear> The Bitcoin Core developers
+
+where the update is appropriate.
+
+Usage:
+ $ ./copyright_header.py update <base_directory>
+
+Arguments:
+ <base_directory> - The base directory of a bitcoin source code repository.
+"""
+
+def print_file_action_message(filename, action):
+ print("%-52s %s" % (filename, action))
+
+def update_cmd(argv):
+ if len(argv) != 3:
+ sys.exit(UPDATE_USAGE)
+
+ base_directory = argv[2]
+ if not os.path.exists(base_directory):
+ sys.exit("*** bad base_directory: %s" % base_directory)
+ exec_update_header_year(base_directory)
+
+################################################################################
+# inserted copyright header format
+################################################################################
+
+def get_header_lines(header, start_year, end_year):
+ lines = header.split('\n')[1:-1]
+ lines[0] = lines[0] % year_range_to_str(start_year, end_year)
+ return [line + '\n' for line in lines]
+
+CPP_HEADER = '''
+// Copyright (c) %s The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+'''
+
+def get_cpp_header_lines_to_insert(start_year, end_year):
+ return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
+
+PYTHON_HEADER = '''
+# Copyright (c) %s The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+'''
+
+def get_python_header_lines_to_insert(start_year, end_year):
+ return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
+
+################################################################################
+# query git for year of last change
+################################################################################
+
+def get_git_change_year_range(filename):
+ years = get_git_change_years(filename)
+ return min(years), max(years)
+
+################################################################################
+# check for existing core copyright
+################################################################################
+
+def file_already_has_core_copyright(file_lines):
+ index, _ = get_updatable_copyright_line(file_lines)
+ return index != None
+
+################################################################################
+# insert header execution
+################################################################################
+
+def file_has_hashbang(file_lines):
+ if len(file_lines) < 1:
+ return False
+ if len(file_lines[0]) <= 2:
+ return False
+ return file_lines[0][:2] == '#!'
+
+def insert_python_header(filename, file_lines, start_year, end_year):
+ if file_has_hashbang(file_lines):
+ insert_idx = 1
+ else:
+ insert_idx = 0
+ header_lines = get_python_header_lines_to_insert(start_year, end_year)
+ for line in header_lines:
+ file_lines.insert(insert_idx, line)
+ write_file_lines(filename, file_lines)
+
+def insert_cpp_header(filename, file_lines, start_year, end_year):
+ header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
+ for line in header_lines:
+ file_lines.insert(0, line)
+ write_file_lines(filename, file_lines)
+
+def exec_insert_header(filename, style):
+ file_lines = read_file_lines(filename)
+ if file_already_has_core_copyright(file_lines):
+ sys.exit('*** %s already has a copyright by The Bitcoin Core developers'
+ % (filename))
+ start_year, end_year = get_git_change_year_range(filename)
+ if style == 'python':
+ insert_python_header(filename, file_lines, start_year, end_year)
+ else:
+ insert_cpp_header(filename, file_lines, start_year, end_year)
+
+################################################################################
+# insert cmd
+################################################################################
+
+INSERT_USAGE = """
+Inserts a copyright header for "The Bitcoin Core developers" at the top of the
+file in either Python or C++ style as determined by the file extension. If the
+file is a Python file and it has a '#!' starting the first line, the header is
+inserted in the line below it.
+
+The copyright dates will be set to be:
+
+"<year_introduced>-<current_year>"
+
+where <year_introduced> is according to the 'git log' history. If
+<year_introduced> is equal to <current_year>, the date will be set to be:
+
+"<current_year>"
+
+If the file already has a copyright for "The Bitcoin Core developers", the
+script will exit.
+
+Usage:
+ $ ./copyright_header.py insert <file>
+
+Arguments:
+ <file> - A source file in the bitcoin repository.
+"""
+
+def insert_cmd(argv):
+ if len(argv) != 3:
+ sys.exit(INSERT_USAGE)
+
+ filename = argv[2]
+ if not os.path.isfile(filename):
+ sys.exit("*** bad filename: %s" % filename)
+ _, extension = os.path.splitext(filename)
+ if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
+ sys.exit("*** cannot insert for file extension %s" % extension)
+
+ if extension == '.py':
+ style = 'python'
+ else:
+ style = 'cpp'
+ exec_insert_header(filename, style)
+
+################################################################################
+# UI
+################################################################################
+
+USAGE = """
+copyright_header.py - utilities for managing copyright headers of 'The Bitcoin
+Core developers' in repository source files.
+
+Usage:
+ $ ./copyright_header <subcommand>
+
+Subcommands:
+ report
+ update
+ insert
+
+To see subcommand usage, run them without arguments.
+"""
+
+SUBCOMMANDS = ['report', 'update', 'insert']
+
+if __name__ == "__main__":
+ if len(sys.argv) == 1:
+ sys.exit(USAGE)
+ subcommand = sys.argv[1]
+ if subcommand not in SUBCOMMANDS:
+ sys.exit(USAGE)
+ if subcommand == 'report':
+ report_cmd(sys.argv)
+ elif subcommand == 'update':
+ update_cmd(sys.argv)
+ elif subcommand == 'insert':
+ insert_cmd(sys.argv)
diff --git a/contrib/devtools/fix-copyright-headers.py b/contrib/devtools/fix-copyright-headers.py
deleted file mode 100755
index 54836bd83f..0000000000
--- a/contrib/devtools/fix-copyright-headers.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env python3
-"""
-Run this script to update all the copyright headers of files
-that were changed this year.
-
-For example:
-
-// Copyright (c) 2009-2012 The Bitcoin Core developers
-
-it will change it to
-
-// Copyright (c) 2009-2015 The Bitcoin Core developers
-"""
-import subprocess
-import time
-import re
-
-CMD_GIT_LIST_FILES = ['git', 'ls-files']
-CMD_GIT_DATE = ['git', 'log', '--format=%ad', '--date=short', '-1']
-CMD_PERL_REGEX = ['perl', '-pi', '-e']
-REGEX_TEMPLATE = 's/(20\\d\\d)(?:-20\\d\\d)? The Bitcoin/$1-%s The Bitcoin/'
-
-FOLDERS = ["qa/", "src/"]
-EXTENSIONS = [".cpp",".h", ".py"]
-
-
-def get_git_date(file_path):
- d = subprocess.run(CMD_GIT_DATE + [file_path],
- stdout=subprocess.PIPE,
- check=True,
- universal_newlines=True).stdout
- # yyyy-mm-dd
- return d.split('-')[0]
-
-
-def skip_file(file_path):
- for ext in EXTENSIONS:
- if file_path.endswith(ext):
- return False
- else:
- return True
-
-if __name__ == "__main__":
- year = str(time.gmtime()[0])
- regex_current = re.compile("%s The Bitcoin" % year)
- n = 1
- for folder in FOLDERS:
- for file_path in subprocess.run(
- CMD_GIT_LIST_FILES + [folder],
- stdout=subprocess.PIPE,
- check=True,
- universal_newlines=True
- ).stdout.split("\n"):
- if skip_file(file_path):
- # print(file_path, "(skip)")
- continue
- git_date = get_git_date(file_path)
- if not year == git_date:
- # print(file_path, year, "(skip)")
- continue
- if regex_current.search(open(file_path, "r").read()) is not None:
- # already up to date
- # print(file_path, year, "(skip)")
- continue
- print(n, file_path, "(update to %s)" % year)
- subprocess.run(CMD_PERL_REGEX + [REGEX_TEMPLATE % year, file_path], check=True)
- n = n + 1
diff --git a/contrib/devtools/github-merge.py b/contrib/devtools/github-merge.py
index f82362fe41..aae966a8f6 100755
--- a/contrib/devtools/github-merge.py
+++ b/contrib/devtools/github-merge.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2016 Bitcoin Core Developers
+# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk
index 985649619f..4e58bec74e 100644
--- a/depends/hosts/darwin.mk
+++ b/depends/hosts/darwin.mk
@@ -1,4 +1,4 @@
-OSX_MIN_VERSION=10.7
+OSX_MIN_VERSION=10.8
OSX_SDK_VERSION=10.11
OSX_SDK=$(SDK_PATH)/MacOSX$(OSX_SDK_VERSION).sdk
LD64_VERSION=253.9
diff --git a/doc/README.md b/doc/README.md
index e4fa49614a..8b9c0ea262 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -53,7 +53,6 @@ The Bitcoin repo's [root README](/README.md) contains relevant information on th
- [Source Code Documentation (External Link)](https://dev.visucore.com/bitcoin/doxygen/)
- [Translation Process](translation_process.md)
- [Translation Strings Policy](translation_strings_policy.md)
-- [Unit Tests](unit-tests.md)
- [Travis CI](travis-ci.md)
- [Unauthenticated REST Interface](REST-interface.md)
- [Shared Libraries](shared-libraries.md)
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index 70c0690ba3..b0794e6d30 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -331,6 +331,32 @@ Strings and formatting
- *Rationale*: Bitcoin Core uses tinyformat, which is type safe. Leave them out to avoid confusion
+Variable names
+--------------
+
+The shadowing warning (`-Wshadow`) is enabled by default. It prevents issues rising
+from using a different variable with the same name.
+
+Please name variables so that their names do not shadow variables defined in the source code.
+
+E.g. in member initializers, prepend `_` to the argument name shadowing the
+member name:
+
+```c++
+class AddressBookPage
+{
+ Mode mode;
+}
+
+AddressBookPage::AddressBookPage(Mode _mode) :
+ mode(_mode)
+...
+```
+
+When using nested cycles, do not name the inner cycle variable the same as in
+upper cycle etc.
+
+
Threads and synchronization
----------------------------
diff --git a/doc/files.md b/doc/files.md
index f7eca57dcb..928977143b 100644
--- a/doc/files.md
+++ b/doc/files.md
@@ -10,6 +10,7 @@
* db.log: wallet database log file
* debug.log: contains debug information and general logging generated by bitcoind or bitcoin-qt
* fee_estimates.dat: stores statistics used to estimate minimum transaction fees and priorities required for confirmation; since 0.10.0
+* mempool.dat: dump of the mempool's transactions; since 0.14.0.
* peers.dat: peer IP address database (custom format); since 0.7.0
* wallet.dat: personal wallet (BDB) with keys and transactions
* .cookie: session RPC authentication cookie (written at start when cookie authentication is used, deleted on shutdown): since 0.12.0
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 0463cb8a61..f511fee22e 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -48,6 +48,15 @@ Low-level RPC changes
an optional third arg, which was always ignored. Make sure to never pass more
than two arguments.
+Removal of Priority Estimation
+------------------------------
+
+- Estimation of "priority" needed for a transaction to be included within a target
+ number of blocks has been removed. The rpc calls are deprecated and will either
+ return -1 or 1e24 appropriately. The format for fee_estimates.dat has also
+ changed to no longer save these priority estimates. It will automatically be
+ converted to the new format which is not readable by prior versions of the
+ software.
0.14.0 Change log
=================
diff --git a/doc/release-process.md b/doc/release-process.md
index 63f75fb399..61f05b0771 100644
--- a/doc/release-process.md
+++ b/doc/release-process.md
@@ -12,6 +12,7 @@ Before every minor and major release:
* Update [bips.md](bips.md) to account for changes since the last release.
* Update version in sources (see below)
* Write release notes (see below)
+* Update `src/chainparams.cpp` nMinimumChainWork with information from the getblockchaininfo rpc.
Before every major release:
@@ -279,6 +280,8 @@ bitcoin.org (see below for bitcoin.org update instructions).
- Notify BlueMatt so that he can start building [the PPAs](https://launchpad.net/~bitcoin/+archive/ubuntu/bitcoin)
- - Add release notes for the new version to the directory `doc/release-notes` in git master
+ - Archive release notes for the new version to `doc/release-notes/` (branch `master` and branch of the release)
+
+ - Create a [new GitHub release](https://github.com/bitcoin/bitcoin/releases/new) with a link to the archived release notes.
- Celebrate
diff --git a/doc/unit-tests.md b/doc/unit-tests.md
deleted file mode 100644
index afaece829c..0000000000
--- a/doc/unit-tests.md
+++ /dev/null
@@ -1,18 +0,0 @@
-Compiling/running unit tests
-------------------------------------
-
-Unit tests will be automatically compiled if dependencies were met in `./configure`
-and tests weren't explicitly disabled.
-
-After configuring, they can be run with `make check`.
-
-To run the bitcoind tests manually, launch `src/test/test_bitcoin`.
-
-To add more bitcoind tests, add `BOOST_AUTO_TEST_CASE` functions to the existing
-.cpp files in the `test/` directory or add new .cpp files that
-implement new BOOST_AUTO_TEST_SUITE sections.
-
-To run the bitcoin-qt tests manually, launch `src/qt/test/test_bitcoin-qt`
-
-To add more bitcoin-qt tests, add them to the `src/qt/test/` directory and
-the `src/qt/test/test_main.cpp` file.
diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py
index 778f8d8a77..58bd00fdfc 100755
--- a/qa/pull-tester/rpc-tests.py
+++ b/qa/pull-tester/rpc-tests.py
@@ -246,6 +246,10 @@ class RPCTestHandler:
self.test_list = test_list
self.flags = flags
self.num_running = 0
+ # In case there is a graveyard of zombie bitcoinds, we can apply a
+ # pseudorandom offset to hopefully jump over them.
+ # (625 is PORT_RANGE/MAX_NODES)
+ self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
@@ -253,7 +257,7 @@ class RPCTestHandler:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
- port_seed = ["--portseed=%s" % len(self.test_list)]
+ port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
self.jobs.append((t,
diff --git a/qa/rpc-tests/p2p-compactblocks.py b/qa/rpc-tests/p2p-compactblocks.py
index ecd1e42169..1b4c8d90e7 100755
--- a/qa/rpc-tests/p2p-compactblocks.py
+++ b/qa/rpc-tests/p2p-compactblocks.py
@@ -716,6 +716,33 @@ class CompactBlocksTest(BitcoinTestFramework):
l.last_cmpctblock.header_and_shortids.header.calc_sha256()
assert_equal(l.last_cmpctblock.header_and_shortids.header.sha256, block.sha256)
+ # Test that we don't get disconnected if we relay a compact block with valid header,
+ # but invalid transactions.
+ def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit):
+ assert(len(self.utxos))
+ utxo = self.utxos[0]
+
+ block = self.build_block_with_transactions(node, utxo, 5)
+ del block.vtx[3]
+ block.hashMerkleRoot = block.calc_merkle_root()
+ if use_segwit:
+ # If we're testing with segwit, also drop the coinbase witness,
+ # but include the witness commitment.
+ add_witness_commitment(block)
+ block.vtx[0].wit.vtxinwit = []
+ block.solve()
+
+ # Now send the compact block with all transactions prefilled, and
+ # verify that we don't get disconnected.
+ comp_block = HeaderAndShortIDs()
+ comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
+ msg = msg_cmpctblock(comp_block.to_p2p())
+ test_node.send_and_ping(msg)
+
+ # Check that the tip didn't advance
+ assert(int(node.getbestblockhash(), 16) is not block.sha256)
+ test_node.sync_with_ping()
+
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version):
@@ -806,6 +833,11 @@ class CompactBlocksTest(BitcoinTestFramework):
self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
+ print("\tTesting handling of invalid compact blocks...")
+ self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
+ self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
+ self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
+
# Advance to segwit activation
print ("\nAdvancing to segwit activation\n")
self.activate_segwit(self.nodes[1])
@@ -852,6 +884,11 @@ class CompactBlocksTest(BitcoinTestFramework):
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
+ print("\tTesting handling of invalid compact blocks...")
+ self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
+ self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True)
+ self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True)
+
print("\tTesting invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
diff --git a/qa/rpc-tests/preciousblock.py b/qa/rpc-tests/preciousblock.py
index 854dcc7251..3cefa51c0a 100755
--- a/qa/rpc-tests/preciousblock.py
+++ b/qa/rpc-tests/preciousblock.py
@@ -8,7 +8,12 @@
#
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import *
+from test_framework.util import (
+ assert_equal,
+ connect_nodes_bi,
+ sync_chain,
+ sync_blocks,
+)
def unidirectional_node_sync_via_rpc(node_src, node_dest):
blocks_to_copy = []
@@ -33,84 +38,82 @@ def node_sync_via_rpc(nodes):
unidirectional_node_sync_via_rpc(node_src, node_dest)
class PreciousTest(BitcoinTestFramework):
- def setup_chain(self):
- print("Initializing test directory "+self.options.tmpdir)
- initialize_chain_clean(self.options.tmpdir, 3)
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 3
+ self.extra_args = [["-debug"]] * self.num_nodes
def setup_network(self):
- self.nodes = []
- self.is_network_split = False
- self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
- self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
- self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
+ self.nodes = self.setup_nodes()
def run_test(self):
print("Ensure submitblock can in principle reorg to a competing chain")
self.nodes[0].generate(1)
- assert(self.nodes[0].getblockcount() == 1)
+ assert_equal(self.nodes[0].getblockcount(), 1)
(hashY, hashZ) = self.nodes[1].generate(2)
- assert(self.nodes[1].getblockcount() == 2)
+ assert_equal(self.nodes[1].getblockcount(), 2)
node_sync_via_rpc(self.nodes[0:3])
- assert(self.nodes[0].getbestblockhash() == hashZ)
+ assert_equal(self.nodes[0].getbestblockhash(), hashZ)
print("Mine blocks A-B-C on Node 0")
(hashA, hashB, hashC) = self.nodes[0].generate(3)
- assert(self.nodes[0].getblockcount() == 5)
+ assert_equal(self.nodes[0].getblockcount(), 5)
print("Mine competing blocks E-F-G on Node 1")
(hashE, hashF, hashG) = self.nodes[1].generate(3)
- assert(self.nodes[1].getblockcount() == 5)
+ assert_equal(self.nodes[1].getblockcount(), 5)
assert(hashC != hashG)
print("Connect nodes and check no reorg occurs")
# Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync)
node_sync_via_rpc(self.nodes[0:2])
connect_nodes_bi(self.nodes,0,1)
- assert(self.nodes[0].getbestblockhash() == hashC)
- assert(self.nodes[1].getbestblockhash() == hashG)
+ assert_equal(self.nodes[0].getbestblockhash(), hashC)
+ assert_equal(self.nodes[1].getbestblockhash(), hashG)
print("Make Node0 prefer block G")
self.nodes[0].preciousblock(hashG)
- assert(self.nodes[0].getbestblockhash() == hashG)
+ assert_equal(self.nodes[0].getbestblockhash(), hashG)
print("Make Node0 prefer block C again")
self.nodes[0].preciousblock(hashC)
- assert(self.nodes[0].getbestblockhash() == hashC)
+ assert_equal(self.nodes[0].getbestblockhash(), hashC)
print("Make Node1 prefer block C")
self.nodes[1].preciousblock(hashC)
sync_chain(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
- assert(self.nodes[1].getbestblockhash() == hashC)
+ assert_equal(self.nodes[1].getbestblockhash(), hashC)
print("Make Node1 prefer block G again")
self.nodes[1].preciousblock(hashG)
- assert(self.nodes[1].getbestblockhash() == hashG)
+ assert_equal(self.nodes[1].getbestblockhash(), hashG)
print("Make Node0 prefer block G again")
self.nodes[0].preciousblock(hashG)
- assert(self.nodes[0].getbestblockhash() == hashG)
+ assert_equal(self.nodes[0].getbestblockhash(), hashG)
print("Make Node1 prefer block C again")
self.nodes[1].preciousblock(hashC)
- assert(self.nodes[1].getbestblockhash() == hashC)
+ assert_equal(self.nodes[1].getbestblockhash(), hashC)
print("Mine another block (E-F-G-)H on Node 0 and reorg Node 1")
self.nodes[0].generate(1)
- assert(self.nodes[0].getblockcount() == 6)
+ assert_equal(self.nodes[0].getblockcount(), 6)
sync_blocks(self.nodes[0:2])
hashH = self.nodes[0].getbestblockhash()
- assert(self.nodes[1].getbestblockhash() == hashH)
+ assert_equal(self.nodes[1].getbestblockhash(), hashH)
print("Node1 should not be able to prefer block C anymore")
self.nodes[1].preciousblock(hashC)
- assert(self.nodes[1].getbestblockhash() == hashH)
+ assert_equal(self.nodes[1].getbestblockhash(), hashH)
print("Mine competing blocks I-J-K-L on Node 2")
self.nodes[2].generate(4)
- assert(self.nodes[2].getblockcount() == 6)
+ assert_equal(self.nodes[2].getblockcount(), 6)
hashL = self.nodes[2].getbestblockhash()
print("Connect nodes and check no reorg occurs")
node_sync_via_rpc(self.nodes[0:3])
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
- assert(self.nodes[0].getbestblockhash() == hashH)
- assert(self.nodes[1].getbestblockhash() == hashH)
- assert(self.nodes[2].getbestblockhash() == hashL)
+ assert_equal(self.nodes[0].getbestblockhash(), hashH)
+ assert_equal(self.nodes[1].getbestblockhash(), hashH)
+ assert_equal(self.nodes[2].getbestblockhash(), hashL)
print("Make Node1 prefer block L")
self.nodes[1].preciousblock(hashL)
- assert(self.nodes[1].getbestblockhash() == hashL)
+ assert_equal(self.nodes[1].getbestblockhash(), hashL)
print("Make Node2 prefer block H")
self.nodes[2].preciousblock(hashH)
- assert(self.nodes[2].getbestblockhash() == hashH)
+ assert_equal(self.nodes[2].getbestblockhash(), hashH)
if __name__ == '__main__':
PreciousTest().main()
diff --git a/qa/rpc-tests/smartfees.py b/qa/rpc-tests/smartfees.py
index d76fba4b07..74a74f679a 100755
--- a/qa/rpc-tests/smartfees.py
+++ b/qa/rpc-tests/smartfees.py
@@ -225,9 +225,9 @@ class EstimateFeeTest(BitcoinTestFramework):
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex) // 2) / 1000.0
self.fees_per_kb.append(float(fee)/tx_kbytes)
- sync_mempools(self.nodes[0:3],.1)
+ sync_mempools(self.nodes[0:3], wait=.1)
mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"]
- sync_blocks(self.nodes[0:3],.1)
+ sync_blocks(self.nodes[0:3], wait=.1)
# update which txouts are confirmed
newmem = []
for utx in self.memutxo:
@@ -259,7 +259,7 @@ class EstimateFeeTest(BitcoinTestFramework):
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
- sync_blocks(self.nodes[0:3],.1)
+ sync_blocks(self.nodes[0:3], wait=.1)
print("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb, 2)
diff --git a/qa/rpc-tests/test_framework/authproxy.py b/qa/rpc-tests/test_framework/authproxy.py
index fd7f32b5c6..9bee1962e2 100644
--- a/qa/rpc-tests/test_framework/authproxy.py
+++ b/qa/rpc-tests/test_framework/authproxy.py
@@ -1,6 +1,6 @@
"""
- Copyright 2011 Jeff Garzik
+ Copyright (c) 2011 Jeff Garzik
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
@@ -42,6 +42,7 @@ import base64
import decimal
import json
import logging
+import socket
try:
import urllib.parse as urlparse
except ImportError:
@@ -161,7 +162,15 @@ class AuthServiceProxy(object):
return self._request('POST', self.__url.path, postdata.encode('utf-8'))
def _get_response(self):
- http_response = self.__conn.getresponse()
+ try:
+ http_response = self.__conn.getresponse()
+ except socket.timeout as e:
+ raise JSONRPCException({
+ 'code': -344,
+ 'message': '%r RPC took longer than %f seconds. Consider '
+ 'using larger timeout for calls that take '
+ 'longer to return.' % (self._service_name,
+ self.__conn.timeout)})
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
diff --git a/qa/rpc-tests/test_framework/mininode.py b/qa/rpc-tests/test_framework/mininode.py
index 4d238c08d9..495c6bdf35 100755
--- a/qa/rpc-tests/test_framework/mininode.py
+++ b/qa/rpc-tests/test_framework/mininode.py
@@ -41,6 +41,7 @@ from test_framework.siphash import siphash256
BIP0031_VERSION = 60000
MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
+MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_SIZE = 1000000
@@ -951,6 +952,7 @@ class msg_version(object):
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
+ self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
@@ -960,21 +962,32 @@ class msg_version(object):
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
+
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
- if self.nVersion >= 209:
- self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
- else:
- self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
+ if self.nVersion >= 209:
+ self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
+ else:
+ self.nStartingHeight = None
+
+ if self.nVersion >= 70001:
+ # Relay field is optional for version 70001 onwards
+ try:
+ self.nRelay = struct.unpack("<b", f.read(1))[0]
+ except:
+ self.nRelay = 0
+ else:
+ self.nRelay = 0
+
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
@@ -985,13 +998,14 @@ class msg_version(object):
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
+ r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
- return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
+ return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
- self.strSubVer, self.nStartingHeight)
+ self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack(object):
diff --git a/qa/rpc-tests/test_framework/test_framework.py b/qa/rpc-tests/test_framework/test_framework.py
index e6fc5fd8a2..e6d3e9ab9a 100755
--- a/qa/rpc-tests/test_framework/test_framework.py
+++ b/qa/rpc-tests/test_framework/test_framework.py
@@ -139,16 +139,11 @@ class BitcoinTestFramework(object):
success = False
try:
- if not os.path.isdir(self.options.tmpdir):
- os.makedirs(self.options.tmpdir)
+ os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
-
self.setup_network()
-
self.run_test()
-
success = True
-
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
diff --git a/qa/rpc-tests/test_framework/util.py b/qa/rpc-tests/test_framework/util.py
index c818af4bd7..b5ef0689b4 100644
--- a/qa/rpc-tests/test_framework/util.py
+++ b/qa/rpc-tests/test_framework/util.py
@@ -121,33 +121,35 @@ def hex_str_to_bytes(hex_str):
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
-def sync_blocks(rpc_connections, wait=1, timeout=60):
+def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip
"""
maxheight = 0
while timeout > 0:
- tips = [ x.waitforblockheight(maxheight, int(wait * 1000)) for x in rpc_connections ]
- heights = [ x["height"] for x in tips ]
- if tips == [ tips[0] ]*len(tips):
- return True
- if heights == [ heights[0] ]*len(heights): #heights are the same but hashes are not
- raise AssertionError("Block sync failed")
+ tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
+ heights = [t["height"] for t in tips]
+ if tips == [tips[0]] * len(tips):
+ return
+ if heights == [heights[0]] * len(heights):
+ raise AssertionError("Block sync failed: (Hashes don't match)")
timeout -= wait
maxheight = max(heights)
- raise AssertionError("Block sync failed")
+ raise AssertionError("Block sync failed with heights: {}".format(heights))
-def sync_chain(rpc_connections, wait=1):
+def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
- while True:
- counts = [ x.getbestblockhash() for x in rpc_connections ]
- if counts == [ counts[0] ]*len(counts):
- break
+ while timeout > 0:
+ best_hash = [x.getbestblockhash() for x in rpc_connections]
+ if best_hash == [best_hash[0]]*len(best_hash):
+ return
time.sleep(wait)
+ timeout -= wait
+ raise AssertionError("Chain sync failed: Best block hashes don't match")
-def sync_mempools(rpc_connections, wait=1, timeout=60):
+def sync_mempools(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
@@ -159,7 +161,7 @@ def sync_mempools(rpc_connections, wait=1, timeout=60):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
- return True
+ return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
@@ -341,7 +343,7 @@ def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=
return proxy
-def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
+def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
@@ -350,7 +352,7 @@ def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
rpcs = []
try:
for i in range(num_nodes):
- rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]))
+ rpcs.append(start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
diff --git a/qa/rpc-tests/wallet-dump.py b/qa/rpc-tests/wallet-dump.py
index a37096a40c..c6dc2e3d10 100755
--- a/qa/rpc-tests/wallet-dump.py
+++ b/qa/rpc-tests/wallet-dump.py
@@ -61,7 +61,11 @@ class WalletDumpTest(BitcoinTestFramework):
self.extra_args = [["-keypool=90"]]
def setup_network(self, split=False):
- self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
+ # Use 1 minute timeout because the initial getnewaddress RPC can take
+ # longer than the default 30 seconds due to an expensive
+ # CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in
+ # the test often takes even longer.
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, timewait=60)
def run_test (self):
tmpdir = self.options.tmpdir
diff --git a/share/rpcuser/rpcuser.py b/share/rpcuser/rpcuser.py
index 9fd176908b..f806a810e0 100755
--- a/share/rpcuser/rpcuser.py
+++ b/share/rpcuser/rpcuser.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python2
-# Copyright (c) 2015 The Bitcoin Core developers
+# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/Makefile.am b/src/Makefile.am
index ab3104ec63..5a5e3abcfa 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -132,7 +132,7 @@ BITCOIN_CORE_H = \
support/allocators/secure.h \
support/allocators/zeroafterfree.h \
support/cleanse.h \
- support/pagelocker.h \
+ support/lockedpool.h \
sync.h \
threadsafety.h \
timedata.h \
@@ -310,7 +310,7 @@ libbitcoin_common_a_SOURCES = \
libbitcoin_util_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
libbitcoin_util_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
libbitcoin_util_a_SOURCES = \
- support/pagelocker.cpp \
+ support/lockedpool.cpp \
chainparamsbase.cpp \
clientversion.cpp \
compat/glibc_sanity.cpp \
diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include
index c83432e91a..840d33c1b5 100644
--- a/src/Makefile.bench.include
+++ b/src/Makefile.bench.include
@@ -6,18 +6,25 @@ bin_PROGRAMS += bench/bench_bitcoin
BENCH_SRCDIR = bench
BENCH_BINARY = bench/bench_bitcoin$(EXEEXT)
+RAW_TEST_FILES = \
+ bench/data/block413567.raw
+GENERATED_TEST_FILES = $(RAW_TEST_FILES:.raw=.raw.h)
bench_bench_bitcoin_SOURCES = \
bench/bench_bitcoin.cpp \
bench/bench.cpp \
bench/bench.h \
+ bench/checkblock.cpp \
bench/Examples.cpp \
bench/rollingbloom.cpp \
bench/crypto_hash.cpp \
bench/ccoins_caching.cpp \
bench/mempool_eviction.cpp \
bench/verify_script.cpp \
- bench/base58.cpp
+ bench/base58.cpp \
+ bench/lockedpool.cpp
+
+nodist_bench_bench_bitcoin_SOURCES = $(GENERATED_TEST_FILES)
bench_bench_bitcoin_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(EVENT_CLFAGS) $(EVENT_PTHREADS_CFLAGS) -I$(builddir)/bench/
bench_bench_bitcoin_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
@@ -44,10 +51,12 @@ endif
bench_bench_bitcoin_LDADD += $(BOOST_LIBS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(MINIUPNPC_LIBS) $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS)
bench_bench_bitcoin_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-CLEAN_BITCOIN_BENCH = bench/*.gcda bench/*.gcno
+CLEAN_BITCOIN_BENCH = bench/*.gcda bench/*.gcno $(GENERATED_TEST_FILES)
CLEANFILES += $(CLEAN_BITCOIN_BENCH)
+bench/checkblock.cpp: bench/data/block413567.raw.h
+
bitcoin_bench: $(BENCH_BINARY)
bench: $(BENCH_BINARY) FORCE
@@ -55,3 +64,10 @@ bench: $(BENCH_BINARY) FORCE
bitcoin_bench_clean : FORCE
rm -f $(CLEAN_BITCOIN_BENCH) $(bench_bench_bitcoin_OBJECTS) $(BENCH_BINARY)
+
+%.raw.h: %.raw
+ @$(MKDIR_P) $(@D)
+ @echo "static unsigned const char $(*F)[] = {" >> $@
+ @$(HEXDUMP) -v -e '8/1 "0x%02x, "' -e '"\n"' $< | $(SED) -e 's/0x ,//g' >> $@
+ @echo "};" >> $@
+ @echo "Generated $@"
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 5ce1bbb896..fa610e300c 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -50,7 +50,6 @@ BITCOIN_TESTS =\
test/bip32_tests.cpp \
test/blockencodings_tests.cpp \
test/bloom_tests.cpp \
- test/Checkpoints_tests.cpp \
test/coins_tests.cpp \
test/compress_tests.cpp \
test/crypto_tests.cpp \
@@ -85,6 +84,7 @@ BITCOIN_TESTS =\
test/streams_tests.cpp \
test/test_bitcoin.cpp \
test/test_bitcoin.h \
+ test/test_random.h \
test/testutil.cpp \
test/testutil.h \
test/timedata_tests.cpp \
diff --git a/src/addrdb.h b/src/addrdb.h
index 62835a6fb4..339943ca5a 100644
--- a/src/addrdb.h
+++ b/src/addrdb.h
@@ -46,7 +46,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(this->nVersion);
READWRITE(nCreateTime);
READWRITE(nBanUntil);
diff --git a/src/addrman.h b/src/addrman.h
index e9e137c978..cabacbbea9 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -58,7 +58,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(*(CAddress*)this);
READWRITE(source);
READWRITE(nLastSuccess);
@@ -293,7 +293,7 @@ public:
* very little in common.
*/
template<typename Stream>
- void Serialize(Stream &s, int nType, int nVersionDummy) const
+ void Serialize(Stream &s) const
{
LOCK(cs);
@@ -343,7 +343,7 @@ public:
}
template<typename Stream>
- void Unserialize(Stream& s, int nType, int nVersionDummy)
+ void Unserialize(Stream& s)
{
LOCK(cs);
@@ -448,11 +448,6 @@ public:
Check();
}
- unsigned int GetSerializeSize(int nType, int nVersion) const
- {
- return (CSizeComputer(nType, nVersion) << *this).size();
- }
-
void Clear()
{
std::vector<int>().swap(vRandom);
diff --git a/src/amount.h b/src/amount.h
index 5e52f37f23..ba0c86040f 100644
--- a/src/amount.h
+++ b/src/amount.h
@@ -64,7 +64,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(nSatoshisPerK);
}
};
diff --git a/src/arith_uint256.cpp b/src/arith_uint256.cpp
index 2e61363576..a58ad01b5a 100644
--- a/src/arith_uint256.cpp
+++ b/src/arith_uint256.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2014 The Bitcoin developers
+// Copyright (c) 2009-2014 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/arith_uint256.h b/src/arith_uint256.h
index ba3d620158..5cc52f6e72 100644
--- a/src/arith_uint256.h
+++ b/src/arith_uint256.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2015 The Bitcoin developers
+// Copyright (c) 2009-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/base58.cpp b/src/base58.cpp
index d1d60a6f1d..f7768b5a64 100644
--- a/src/base58.cpp
+++ b/src/base58.cpp
@@ -25,12 +25,14 @@ bool DecodeBase58(const char* psz, std::vector<unsigned char>& vch)
psz++;
// Skip and count leading '1's.
int zeroes = 0;
+ int length = 0;
while (*psz == '1') {
zeroes++;
psz++;
}
// Allocate enough space in big-endian base256 representation.
- std::vector<unsigned char> b256(strlen(psz) * 733 / 1000 + 1); // log(58) / log(256), rounded up.
+ int size = strlen(psz) * 733 /1000 + 1; // log(58) / log(256), rounded up.
+ std::vector<unsigned char> b256(size);
// Process the characters.
while (*psz && !isspace(*psz)) {
// Decode base58 character
@@ -39,12 +41,14 @@ bool DecodeBase58(const char* psz, std::vector<unsigned char>& vch)
return false;
// Apply "b256 = b256 * 58 + ch".
int carry = ch - pszBase58;
- for (std::vector<unsigned char>::reverse_iterator it = b256.rbegin(); it != b256.rend(); it++) {
+ int i = 0;
+ for (std::vector<unsigned char>::reverse_iterator it = b256.rbegin(); (carry != 0 || i < length) && (it != b256.rend()); ++it, ++i) {
carry += 58 * (*it);
*it = carry % 256;
carry /= 256;
}
assert(carry == 0);
+ length = i;
psz++;
}
// Skip trailing spaces.
@@ -53,7 +57,7 @@ bool DecodeBase58(const char* psz, std::vector<unsigned char>& vch)
if (*psz != 0)
return false;
// Skip leading zeroes in b256.
- std::vector<unsigned char>::iterator it = b256.begin();
+ std::vector<unsigned char>::iterator it = b256.begin() + (size - length);
while (it != b256.end() && *it == 0)
it++;
// Copy result into output vector.
diff --git a/src/bench/base58.cpp b/src/bench/base58.cpp
index 1279c3e7df..a791b5b7fa 100644
--- a/src/bench/base58.cpp
+++ b/src/bench/base58.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 the Bitcoin Core developers
+// Copyright (c) 2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/bench/checkblock.cpp b/src/bench/checkblock.cpp
new file mode 100644
index 0000000000..bb596ce7f9
--- /dev/null
+++ b/src/bench/checkblock.cpp
@@ -0,0 +1,55 @@
+// Copyright (c) 2016 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include "bench.h"
+
+#include "chainparams.h"
+#include "main.h"
+#include "consensus/validation.h"
+
+namespace block_bench {
+#include "bench/data/block413567.raw.h"
+}
+
+// These are the two major time-sinks which happen after we have fully received
+// a block off the wire, but before we can relay the block on to peers using
+// compact block relay.
+
+static void DeserializeBlockTest(benchmark::State& state)
+{
+ CDataStream stream((const char*)block_bench::block413567,
+ (const char*)&block_bench::block413567[sizeof(block_bench::block413567)],
+ SER_NETWORK, PROTOCOL_VERSION);
+ char a;
+ stream.write(&a, 1); // Prevent compaction
+
+ while (state.KeepRunning()) {
+ CBlock block;
+ stream >> block;
+ assert(stream.Rewind(sizeof(block_bench::block413567)));
+ }
+}
+
+static void DeserializeAndCheckBlockTest(benchmark::State& state)
+{
+ CDataStream stream((const char*)block_bench::block413567,
+ (const char*)&block_bench::block413567[sizeof(block_bench::block413567)],
+ SER_NETWORK, PROTOCOL_VERSION);
+ char a;
+ stream.write(&a, 1); // Prevent compaction
+
+ Consensus::Params params = Params(CBaseChainParams::MAIN).GetConsensus();
+
+ while (state.KeepRunning()) {
+ CBlock block; // Note that CBlock caches its checked state, so we need to recreate it here
+ stream >> block;
+ assert(stream.Rewind(sizeof(block_bench::block413567)));
+
+ CValidationState state;
+ assert(CheckBlock(block, state, params));
+ }
+}
+
+BENCHMARK(DeserializeBlockTest);
+BENCHMARK(DeserializeAndCheckBlockTest);
diff --git a/src/bench/data/block413567.raw b/src/bench/data/block413567.raw
new file mode 100644
index 0000000000..67d2d5d382
--- /dev/null
+++ b/src/bench/data/block413567.raw
Binary files differ
diff --git a/src/bench/lockedpool.cpp b/src/bench/lockedpool.cpp
new file mode 100644
index 0000000000..5df5b1ac6e
--- /dev/null
+++ b/src/bench/lockedpool.cpp
@@ -0,0 +1,47 @@
+// Copyright (c) 2016 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include "bench.h"
+
+#include "support/lockedpool.h"
+
+#include <iostream>
+#include <vector>
+
+#define ASIZE 2048
+#define BITER 5000
+#define MSIZE 2048
+
+static void LockedPool(benchmark::State& state)
+{
+ void *synth_base = reinterpret_cast<void*>(0x08000000);
+ const size_t synth_size = 1024*1024;
+ Arena b(synth_base, synth_size, 16);
+
+ std::vector<void*> addr;
+ for (int x=0; x<ASIZE; ++x)
+ addr.push_back(0);
+ uint32_t s = 0x12345678;
+ while (state.KeepRunning()) {
+ for (int x=0; x<BITER; ++x) {
+ int idx = s & (addr.size()-1);
+ if (s & 0x80000000) {
+ b.free(addr[idx]);
+ addr[idx] = 0;
+ } else if(!addr[idx]) {
+ addr[idx] = b.alloc((s >> 16) & (MSIZE-1));
+ }
+ bool lsb = s & 1;
+ s >>= 1;
+ if (lsb)
+ s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0
+ }
+ }
+ for (void *ptr: addr)
+ b.free(ptr);
+ addr.clear();
+}
+
+BENCHMARK(LockedPool);
+
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index 2d66448d80..392d1b9329 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -28,6 +28,7 @@ using namespace std;
static const char DEFAULT_RPCCONNECT[] = "127.0.0.1";
static const int DEFAULT_HTTP_CLIENT_TIMEOUT=900;
+static const int CONTINUE_EXECUTION=-1;
std::string HelpMessageCli()
{
@@ -67,7 +68,11 @@ public:
};
-static bool AppInitRPC(int argc, char* argv[])
+//
+// This function returns either one of EXIT_ codes when it's expected to stop the process or
+// CONTINUE_EXECUTION when it's expected to continue further.
+//
+static int AppInitRPC(int argc, char* argv[])
{
//
// Parameters
@@ -85,31 +90,35 @@ static bool AppInitRPC(int argc, char* argv[])
}
fprintf(stdout, "%s", strUsage.c_str());
- return false;
+ if (argc < 2) {
+ fprintf(stderr, "Error: too few parameters\n");
+ return EXIT_FAILURE;
+ }
+ return EXIT_SUCCESS;
}
if (!boost::filesystem::is_directory(GetDataDir(false))) {
fprintf(stderr, "Error: Specified data directory \"%s\" does not exist.\n", mapArgs["-datadir"].c_str());
- return false;
+ return EXIT_FAILURE;
}
try {
ReadConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME), mapArgs, mapMultiArgs);
} catch (const std::exception& e) {
fprintf(stderr,"Error reading configuration file: %s\n", e.what());
- return false;
+ return EXIT_FAILURE;
}
// Check for -testnet or -regtest parameter (BaseParams() calls are only valid after this clause)
try {
SelectBaseParams(ChainNameFromCommandLine());
} catch (const std::exception& e) {
fprintf(stderr, "Error: %s\n", e.what());
- return false;
+ return EXIT_FAILURE;
}
if (GetBoolArg("-rpcssl", false))
{
fprintf(stderr, "Error: SSL mode for RPC (-rpcssl) is no longer supported.\n");
- return false;
+ return EXIT_FAILURE;
}
- return true;
+ return CONTINUE_EXECUTION;
}
@@ -240,7 +249,7 @@ UniValue CallRPC(const string& strMethod, const UniValue& params)
event_base_free(base);
if (response.status == 0)
- throw CConnectionFailed(strprintf("couldn't connect to server (%d %s)", response.error, http_errorstring(response.error)));
+ throw CConnectionFailed(strprintf("couldn't connect to server\n(make sure server is running and you are connecting to the correct RPC port: %d %s)", response.error, http_errorstring(response.error)));
else if (response.status == HTTP_UNAUTHORIZED)
throw runtime_error("incorrect rpcuser or rpcpassword (authorization failed)");
else if (response.status >= 400 && response.status != HTTP_BAD_REQUEST && response.status != HTTP_NOT_FOUND && response.status != HTTP_INTERNAL_SERVER_ERROR)
@@ -354,8 +363,9 @@ int main(int argc, char* argv[])
}
try {
- if(!AppInitRPC(argc, argv))
- return EXIT_FAILURE;
+ int ret = AppInitRPC(argc, argv);
+ if (ret != CONTINUE_EXECUTION)
+ return ret;
}
catch (const std::exception& e) {
PrintExceptionContinue(&e, "AppInitRPC()");
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index e09afd632e..6c66efcc9c 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -30,8 +30,13 @@ using namespace std;
static bool fCreateBlank;
static map<string,UniValue> registers;
+static const int CONTINUE_EXECUTION=-1;
-static bool AppInitRawTx(int argc, char* argv[])
+//
+// This function returns either one of EXIT_ codes when it's expected to stop the process or
+// CONTINUE_EXECUTION when it's expected to continue further.
+//
+static int AppInitRawTx(int argc, char* argv[])
{
//
// Parameters
@@ -43,7 +48,7 @@ static bool AppInitRawTx(int argc, char* argv[])
SelectParams(ChainNameFromCommandLine());
} catch (const std::exception& e) {
fprintf(stderr, "Error: %s\n", e.what());
- return false;
+ return EXIT_FAILURE;
}
fCreateBlank = GetBoolArg("-create", false);
@@ -89,9 +94,13 @@ static bool AppInitRawTx(int argc, char* argv[])
strUsage += HelpMessageOpt("set=NAME:JSON-STRING", _("Set register NAME to given JSON-STRING"));
fprintf(stdout, "%s", strUsage.c_str());
- return false;
+ if (argc < 2) {
+ fprintf(stderr, "Error: too few parameters\n");
+ return EXIT_FAILURE;
+ }
+ return EXIT_SUCCESS;
}
- return true;
+ return CONTINUE_EXECUTION;
}
static void RegisterSetJson(const string& key, const string& rawJson)
@@ -678,8 +687,9 @@ int main(int argc, char* argv[])
SetupEnvironment();
try {
- if(!AppInitRawTx(argc, argv))
- return EXIT_FAILURE;
+ int ret = AppInitRawTx(argc, argv);
+ if (ret != CONTINUE_EXECUTION)
+ return ret;
}
catch (const std::exception& e) {
PrintExceptionContinue(&e, "AppInitRawTx()");
diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp
index 351463c256..3352a76de6 100644
--- a/src/bitcoind.cpp
+++ b/src/bitcoind.cpp
@@ -92,7 +92,7 @@ bool AppInit(int argc, char* argv[])
}
fprintf(stdout, "%s", strUsage.c_str());
- return false;
+ return true;
}
try
@@ -126,7 +126,7 @@ bool AppInit(int argc, char* argv[])
if (fCommandLine)
{
fprintf(stderr, "Error: There is no RPC client functionality in bitcoind anymore. Use the bitcoin-cli utility instead.\n");
- exit(1);
+ exit(EXIT_FAILURE);
}
if (GetBoolArg("-daemon", false))
{
@@ -177,5 +177,5 @@ int main(int argc, char* argv[])
// Connect bitcoind signal handlers
noui_connect();
- return (AppInit(argc, argv) ? 0 : 1);
+ return (AppInit(argc, argv) ? EXIT_SUCCESS : EXIT_FAILURE);
}
diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp
index 93d3fa372b..dbed90583d 100644
--- a/src/blockencodings.cpp
+++ b/src/blockencodings.cpp
@@ -131,7 +131,7 @@ ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& c
break;
}
- LogPrint("cmpctblock", "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), cmpctblock.GetSerializeSize(SER_NETWORK, PROTOCOL_VERSION));
+ LogPrint("cmpctblock", "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock, SER_NETWORK, PROTOCOL_VERSION));
return READ_STATUS_OK;
}
@@ -167,7 +167,7 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<
// check its own merkle root and cache that check.
if (state.CorruptionPossible())
return READ_STATUS_FAILED; // Possible Short ID collision
- return READ_STATUS_INVALID;
+ return READ_STATUS_CHECKBLOCK_FAILED;
}
LogPrint("cmpctblock", "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool and %lu txn requested\n", header.GetHash().ToString(), prefilled_count, mempool_count, vtx_missing.size());
diff --git a/src/blockencodings.h b/src/blockencodings.h
index 99b1cb140d..1f9491867a 100644
--- a/src/blockencodings.h
+++ b/src/blockencodings.h
@@ -21,7 +21,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(tx); //TODO: Compress tx encoding
}
};
@@ -35,7 +35,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(blockhash);
uint64_t indexes_size = (uint64_t)indexes.size();
READWRITE(COMPACTSIZE(indexes_size));
@@ -81,7 +81,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(blockhash);
uint64_t txn_size = (uint64_t)txn.size();
READWRITE(COMPACTSIZE(txn_size));
@@ -109,7 +109,7 @@ struct PrefilledTransaction {
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
uint64_t idx = index;
READWRITE(COMPACTSIZE(idx));
if (idx > std::numeric_limits<uint16_t>::max())
@@ -124,6 +124,8 @@ typedef enum ReadStatus_t
READ_STATUS_OK,
READ_STATUS_INVALID, // Invalid object, peer is sending bogus crap
READ_STATUS_FAILED, // Failed to process object
+ READ_STATUS_CHECKBLOCK_FAILED, // Used only by FillBlock to indicate a
+ // failure in CheckBlock.
} ReadStatus;
class CBlockHeaderAndShortTxIDs {
@@ -155,7 +157,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(header);
READWRITE(nonce);
diff --git a/src/bloom.cpp b/src/bloom.cpp
index 2677652ada..d00befc61c 100644
--- a/src/bloom.cpp
+++ b/src/bloom.cpp
@@ -34,7 +34,7 @@ CBloomFilter::CBloomFilter(unsigned int nElements, double nFPRate, unsigned int
* See https://en.wikipedia.org/wiki/Bloom_filter for an explanation of these formulas
*/
isFull(false),
- isEmpty(false),
+ isEmpty(true),
nHashFuncs(min((unsigned int)(vData.size() * 8 / nElements * LN2), MAX_HASH_FUNCS)),
nTweak(nTweakIn),
nFlags(nFlagsIn)
diff --git a/src/bloom.h b/src/bloom.h
index ad6de625d8..d3a017371f 100644
--- a/src/bloom.h
+++ b/src/bloom.h
@@ -73,7 +73,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(vData);
READWRITE(nHashFuncs);
READWRITE(nTweak);
diff --git a/src/chain.h b/src/chain.h
index 46a16a3061..0aac5de5c2 100644
--- a/src/chain.h
+++ b/src/chain.h
@@ -28,7 +28,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(VARINT(nBlocks));
READWRITE(VARINT(nSize));
READWRITE(VARINT(nUndoSize));
@@ -76,7 +76,7 @@ struct CDiskBlockPos
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(VARINT(nFile));
READWRITE(VARINT(nPos));
}
@@ -357,8 +357,9 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
- if (!(nType & SER_GETHASH))
+ inline void SerializationOp(Stream& s, Operation ser_action) {
+ int nVersion = s.GetVersion();
+ if (!(s.GetType() & SER_GETHASH))
READWRITE(VARINT(nVersion));
READWRITE(VARINT(nHeight));
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index 5850016ae2..a57ab632e4 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -96,6 +96,9 @@ public:
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nStartTime = 1479168000; // November 15th, 2016.
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout = 1510704000; // November 15th, 2017.
+ // The best chain should have at least this much work.
+ consensus.nMinimumChainWork = uint256S("0x0000000000000000000000000000000000000000002cb971dd56d1c583c20f90");
+
/**
* The message start string is designed to be unlikely to occur in normal data.
* The characters are rarely used upper ASCII, not valid as UTF-8, and produce
@@ -191,6 +194,9 @@ public:
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nStartTime = 1462060800; // May 1st 2016
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout = 1493596800; // May 1st 2017
+ // The best chain should have at least this much work.
+ consensus.nMinimumChainWork = uint256S("0x0000000000000000000000000000000000000000000000198b4def2baa9338d6");
+
pchMessageStart[0] = 0x0b;
pchMessageStart[1] = 0x11;
pchMessageStart[2] = 0x09;
@@ -224,6 +230,7 @@ public:
fRequireStandard = false;
fMineBlocksOnDemand = false;
+
checkpointData = (CCheckpointData) {
boost::assign::map_list_of
( 546, uint256S("000000002a936ca763904c3c35fce2f3556c559c0214345d31b1bcebf76acb70")),
@@ -265,6 +272,9 @@ public:
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nStartTime = 0;
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout = 999999999999ULL;
+ // The best chain should have at least this much work.
+ consensus.nMinimumChainWork = uint256S("0x00");
+
pchMessageStart[0] = 0xfa;
pchMessageStart[1] = 0xbf;
pchMessageStart[2] = 0xb5;
diff --git a/src/checkpoints.cpp b/src/checkpoints.cpp
index aefddce464..d22c188c16 100644
--- a/src/checkpoints.cpp
+++ b/src/checkpoints.cpp
@@ -55,16 +55,6 @@ namespace Checkpoints {
return fWorkBefore / (fWorkBefore + fWorkAfter);
}
- int GetTotalBlocksEstimate(const CCheckpointData& data)
- {
- const MapCheckpoints& checkpoints = data.mapCheckpoints;
-
- if (checkpoints.empty())
- return 0;
-
- return checkpoints.rbegin()->first;
- }
-
CBlockIndex* GetLastCheckpoint(const CCheckpointData& data)
{
const MapCheckpoints& checkpoints = data.mapCheckpoints;
diff --git a/src/checkpoints.h b/src/checkpoints.h
index cd25ea5379..04346f35ff 100644
--- a/src/checkpoints.h
+++ b/src/checkpoints.h
@@ -19,9 +19,6 @@ struct CCheckpointData;
namespace Checkpoints
{
-//! Return conservative estimate of total number of blocks, 0 if unknown
-int GetTotalBlocksEstimate(const CCheckpointData& data);
-
//! Returns last CBlockIndex* in mapBlockIndex that is a checkpoint
CBlockIndex* GetLastCheckpoint(const CCheckpointData& data);
diff --git a/src/coins.h b/src/coins.h
index 033651a435..d295b3c940 100644
--- a/src/coins.h
+++ b/src/coins.h
@@ -153,31 +153,8 @@ public:
return fCoinBase;
}
- unsigned int GetSerializeSize(int nType, int nVersion) const {
- unsigned int nSize = 0;
- unsigned int nMaskSize = 0, nMaskCode = 0;
- CalcMaskSize(nMaskSize, nMaskCode);
- bool fFirst = vout.size() > 0 && !vout[0].IsNull();
- bool fSecond = vout.size() > 1 && !vout[1].IsNull();
- assert(fFirst || fSecond || nMaskCode);
- unsigned int nCode = 8*(nMaskCode - (fFirst || fSecond ? 0 : 1)) + (fCoinBase ? 1 : 0) + (fFirst ? 2 : 0) + (fSecond ? 4 : 0);
- // version
- nSize += ::GetSerializeSize(VARINT(this->nVersion), nType, nVersion);
- // size of header code
- nSize += ::GetSerializeSize(VARINT(nCode), nType, nVersion);
- // spentness bitmask
- nSize += nMaskSize;
- // txouts themself
- for (unsigned int i = 0; i < vout.size(); i++)
- if (!vout[i].IsNull())
- nSize += ::GetSerializeSize(CTxOutCompressor(REF(vout[i])), nType, nVersion);
- // height
- nSize += ::GetSerializeSize(VARINT(nHeight), nType, nVersion);
- return nSize;
- }
-
template<typename Stream>
- void Serialize(Stream &s, int nType, int nVersion) const {
+ void Serialize(Stream &s) const {
unsigned int nMaskSize = 0, nMaskCode = 0;
CalcMaskSize(nMaskSize, nMaskCode);
bool fFirst = vout.size() > 0 && !vout[0].IsNull();
@@ -185,33 +162,33 @@ public:
assert(fFirst || fSecond || nMaskCode);
unsigned int nCode = 8*(nMaskCode - (fFirst || fSecond ? 0 : 1)) + (fCoinBase ? 1 : 0) + (fFirst ? 2 : 0) + (fSecond ? 4 : 0);
// version
- ::Serialize(s, VARINT(this->nVersion), nType, nVersion);
+ ::Serialize(s, VARINT(this->nVersion));
// header code
- ::Serialize(s, VARINT(nCode), nType, nVersion);
+ ::Serialize(s, VARINT(nCode));
// spentness bitmask
for (unsigned int b = 0; b<nMaskSize; b++) {
unsigned char chAvail = 0;
for (unsigned int i = 0; i < 8 && 2+b*8+i < vout.size(); i++)
if (!vout[2+b*8+i].IsNull())
chAvail |= (1 << i);
- ::Serialize(s, chAvail, nType, nVersion);
+ ::Serialize(s, chAvail);
}
// txouts themself
for (unsigned int i = 0; i < vout.size(); i++) {
if (!vout[i].IsNull())
- ::Serialize(s, CTxOutCompressor(REF(vout[i])), nType, nVersion);
+ ::Serialize(s, CTxOutCompressor(REF(vout[i])));
}
// coinbase height
- ::Serialize(s, VARINT(nHeight), nType, nVersion);
+ ::Serialize(s, VARINT(nHeight));
}
template<typename Stream>
- void Unserialize(Stream &s, int nType, int nVersion) {
+ void Unserialize(Stream &s) {
unsigned int nCode = 0;
// version
- ::Unserialize(s, VARINT(this->nVersion), nType, nVersion);
+ ::Unserialize(s, VARINT(this->nVersion));
// header code
- ::Unserialize(s, VARINT(nCode), nType, nVersion);
+ ::Unserialize(s, VARINT(nCode));
fCoinBase = nCode & 1;
std::vector<bool> vAvail(2, false);
vAvail[0] = (nCode & 2) != 0;
@@ -220,7 +197,7 @@ public:
// spentness bitmask
while (nMaskCode > 0) {
unsigned char chAvail = 0;
- ::Unserialize(s, chAvail, nType, nVersion);
+ ::Unserialize(s, chAvail);
for (unsigned int p = 0; p < 8; p++) {
bool f = (chAvail & (1 << p)) != 0;
vAvail.push_back(f);
@@ -232,10 +209,10 @@ public:
vout.assign(vAvail.size(), CTxOut());
for (unsigned int i = 0; i < vAvail.size(); i++) {
if (vAvail[i])
- ::Unserialize(s, REF(CTxOutCompressor(vout[i])), nType, nVersion);
+ ::Unserialize(s, REF(CTxOutCompressor(vout[i])));
}
// coinbase height
- ::Unserialize(s, VARINT(nHeight), nType, nVersion);
+ ::Unserialize(s, VARINT(nHeight));
Cleanup();
}
diff --git a/src/compat/byteswap.h b/src/compat/byteswap.h
index 899220bdc5..07ca535728 100644
--- a/src/compat/byteswap.h
+++ b/src/compat/byteswap.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2014 The Bitcoin developers
+// Copyright (c) 2014 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/compat/endian.h b/src/compat/endian.h
index 6bfae42c77..f7c1f9318a 100644
--- a/src/compat/endian.h
+++ b/src/compat/endian.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2014-2015 The Bitcoin developers
+// Copyright (c) 2014-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/compressor.h b/src/compressor.h
index fa702f0dfa..961365d261 100644
--- a/src/compressor.h
+++ b/src/compressor.h
@@ -55,16 +55,8 @@ protected:
public:
CScriptCompressor(CScript &scriptIn) : script(scriptIn) { }
- unsigned int GetSerializeSize(int nType, int nVersion) const {
- std::vector<unsigned char> compr;
- if (Compress(compr))
- return compr.size();
- unsigned int nSize = script.size() + nSpecialScripts;
- return script.size() + VARINT(nSize).GetSerializeSize(nType, nVersion);
- }
-
template<typename Stream>
- void Serialize(Stream &s, int nType, int nVersion) const {
+ void Serialize(Stream &s) const {
std::vector<unsigned char> compr;
if (Compress(compr)) {
s << CFlatData(compr);
@@ -76,7 +68,7 @@ public:
}
template<typename Stream>
- void Unserialize(Stream &s, int nType, int nVersion) {
+ void Unserialize(Stream &s) {
unsigned int nSize = 0;
s >> VARINT(nSize);
if (nSize < nSpecialScripts) {
@@ -112,7 +104,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
if (!ser_action.ForRead()) {
uint64_t nVal = CompressAmount(txout.nValue);
READWRITE(VARINT(nVal));
diff --git a/src/consensus/params.h b/src/consensus/params.h
index 0e73cace83..20efc68ade 100644
--- a/src/consensus/params.h
+++ b/src/consensus/params.h
@@ -61,6 +61,7 @@ struct Params {
int64_t nPowTargetSpacing;
int64_t nPowTargetTimespan;
int64_t DifficultyAdjustmentInterval() const { return nPowTargetTimespan / nPowTargetSpacing; }
+ uint256 nMinimumChainWork;
};
} // namespace Consensus
diff --git a/src/dbwrapper.h b/src/dbwrapper.h
index 47bdb31b5b..4a79bbd17d 100644
--- a/src/dbwrapper.h
+++ b/src/dbwrapper.h
@@ -17,6 +17,9 @@
#include <leveldb/db.h>
#include <leveldb/write_batch.h>
+static const size_t DBWRAPPER_PREALLOC_KEY_SIZE = 64;
+static const size_t DBWRAPPER_PREALLOC_VALUE_SIZE = 1024;
+
class dbwrapper_error : public std::runtime_error
{
public:
@@ -60,12 +63,12 @@ public:
void Write(const K& key, const V& value)
{
CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- ssKey.reserve(ssKey.GetSerializeSize(key));
+ ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey(&ssKey[0], ssKey.size());
CDataStream ssValue(SER_DISK, CLIENT_VERSION);
- ssValue.reserve(ssValue.GetSerializeSize(value));
+ ssValue.reserve(DBWRAPPER_PREALLOC_VALUE_SIZE);
ssValue << value;
ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent));
leveldb::Slice slValue(&ssValue[0], ssValue.size());
@@ -77,7 +80,7 @@ public:
void Erase(const K& key)
{
CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- ssKey.reserve(ssKey.GetSerializeSize(key));
+ ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey(&ssKey[0], ssKey.size());
@@ -107,7 +110,7 @@ public:
template<typename K> void Seek(const K& key) {
CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- ssKey.reserve(ssKey.GetSerializeSize(key));
+ ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey(&ssKey[0], ssKey.size());
piter->Seek(slKey);
@@ -200,7 +203,7 @@ public:
bool Read(const K& key, V& value) const
{
CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- ssKey.reserve(ssKey.GetSerializeSize(key));
+ ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey(&ssKey[0], ssKey.size());
@@ -234,7 +237,7 @@ public:
bool Exists(const K& key) const
{
CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- ssKey.reserve(ssKey.GetSerializeSize(key));
+ ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey(&ssKey[0], ssKey.size());
diff --git a/src/hash.h b/src/hash.h
index db4e130ae7..94e7f5ea6c 100644
--- a/src/hash.h
+++ b/src/hash.h
@@ -132,15 +132,17 @@ class CHashWriter
private:
CHash256 ctx;
+ const int nType;
+ const int nVersion;
public:
- int nType;
- int nVersion;
CHashWriter(int nTypeIn, int nVersionIn) : nType(nTypeIn), nVersion(nVersionIn) {}
- CHashWriter& write(const char *pch, size_t size) {
+ int GetType() const { return nType; }
+ int GetVersion() const { return nVersion; }
+
+ void write(const char *pch, size_t size) {
ctx.Write((const unsigned char*)pch, size);
- return (*this);
}
// invalidates the object
@@ -153,7 +155,7 @@ public:
template<typename T>
CHashWriter& operator<<(const T& obj) {
// Serialize to this stream
- ::Serialize(*this, obj, nType, nVersion);
+ ::Serialize(*this, obj);
return (*this);
}
};
diff --git a/src/init.cpp b/src/init.cpp
index 84b0108ea8..31e3efb459 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -207,6 +207,7 @@ void Shutdown()
StopTorControl();
UnregisterNodeSignals(GetNodeSignals());
+ DumpMempool();
if (fFeeEstimatesInitialized)
{
@@ -659,6 +660,8 @@ void ThreadImport(std::vector<boost::filesystem::path> vImportFiles)
LogPrintf("Stopping after block import\n");
StartShutdown();
}
+
+ LoadMempool();
}
/** Sanity checks
@@ -1100,6 +1103,10 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
return false;
#endif
// ********************************************************* Step 6: network initialization
+ // Note that we absolutely cannot open any actual connections
+ // until the very end ("start node") as the UTXO/block state
+ // is not yet setup and may end up being set up twice if we
+ // need to reindex later.
assert(!g_connman);
g_connman = std::unique_ptr<CConnman>(new CConnman(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max())));
@@ -1320,7 +1327,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
CleanupBlockRevFiles();
}
- if (!LoadBlockIndex()) {
+ if (!LoadBlockIndex(chainparams)) {
strLoadError = _("Error loading block database");
break;
}
@@ -1493,13 +1500,6 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
uiInterface.NotifyBlockTip.disconnect(BlockNotifyGenesisWait);
}
-#ifdef ENABLE_WALLET
- // Add wallet transactions that aren't already in a block to mempool
- // Do this here as mempool requires genesis block to be loaded
- if (pwalletMain)
- pwalletMain->ReacceptWalletTransactions();
-#endif
-
// ********************************************************* Step 11: start node
//// debug print
@@ -1537,10 +1537,8 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
uiInterface.InitMessage(_("Done loading"));
#ifdef ENABLE_WALLET
- if (pwalletMain) {
- // Run a thread to flush wallet periodically
- threadGroup.create_thread(boost::bind(&ThreadFlushWalletDB, boost::ref(pwalletMain->strWalletFile)));
- }
+ if (pwalletMain)
+ pwalletMain->postInitProcess(threadGroup);
#endif
return !fRequestShutdown;
diff --git a/src/key.cpp b/src/key.cpp
index aae9b042ac..b3ea98fb92 100644
--- a/src/key.cpp
+++ b/src/key.cpp
@@ -125,8 +125,8 @@ bool CKey::Check(const unsigned char *vch) {
void CKey::MakeNewKey(bool fCompressedIn) {
do {
- GetStrongRandBytes(vch, sizeof(vch));
- } while (!Check(vch));
+ GetStrongRandBytes(keydata.data(), keydata.size());
+ } while (!Check(keydata.data()));
fValid = true;
fCompressed = fCompressedIn;
}
@@ -224,20 +224,18 @@ bool CKey::Load(CPrivKey &privkey, CPubKey &vchPubKey, bool fSkipCheck=false) {
bool CKey::Derive(CKey& keyChild, ChainCode &ccChild, unsigned int nChild, const ChainCode& cc) const {
assert(IsValid());
assert(IsCompressed());
- unsigned char out[64];
- LockObject(out);
+ std::vector<unsigned char, secure_allocator<unsigned char>> vout(64);
if ((nChild >> 31) == 0) {
CPubKey pubkey = GetPubKey();
assert(pubkey.begin() + 33 == pubkey.end());
- BIP32Hash(cc, nChild, *pubkey.begin(), pubkey.begin()+1, out);
+ BIP32Hash(cc, nChild, *pubkey.begin(), pubkey.begin()+1, vout.data());
} else {
assert(begin() + 32 == end());
- BIP32Hash(cc, nChild, 0, begin(), out);
+ BIP32Hash(cc, nChild, 0, begin(), vout.data());
}
- memcpy(ccChild.begin(), out+32, 32);
+ memcpy(ccChild.begin(), vout.data()+32, 32);
memcpy((unsigned char*)keyChild.begin(), begin(), 32);
- bool ret = secp256k1_ec_privkey_tweak_add(secp256k1_context_sign, (unsigned char*)keyChild.begin(), out);
- UnlockObject(out);
+ bool ret = secp256k1_ec_privkey_tweak_add(secp256k1_context_sign, (unsigned char*)keyChild.begin(), vout.data());
keyChild.fCompressed = true;
keyChild.fValid = ret;
return ret;
@@ -253,12 +251,10 @@ bool CExtKey::Derive(CExtKey &out, unsigned int _nChild) const {
void CExtKey::SetMaster(const unsigned char *seed, unsigned int nSeedLen) {
static const unsigned char hashkey[] = {'B','i','t','c','o','i','n',' ','s','e','e','d'};
- unsigned char out[64];
- LockObject(out);
- CHMAC_SHA512(hashkey, sizeof(hashkey)).Write(seed, nSeedLen).Finalize(out);
- key.Set(&out[0], &out[32], true);
- memcpy(chaincode.begin(), &out[32], 32);
- UnlockObject(out);
+ std::vector<unsigned char, secure_allocator<unsigned char>> vout(64);
+ CHMAC_SHA512(hashkey, sizeof(hashkey)).Write(seed, nSeedLen).Finalize(vout.data());
+ key.Set(&vout[0], &vout[32], true);
+ memcpy(chaincode.begin(), &vout[32], 32);
nDepth = 0;
nChild = 0;
memset(vchFingerprint, 0, sizeof(vchFingerprint));
@@ -308,12 +304,10 @@ void ECC_Start() {
{
// Pass in a random blinding seed to the secp256k1 context.
- unsigned char seed[32];
- LockObject(seed);
- GetRandBytes(seed, 32);
- bool ret = secp256k1_context_randomize(ctx, seed);
+ std::vector<unsigned char, secure_allocator<unsigned char>> vseed(32);
+ GetRandBytes(vseed.data(), 32);
+ bool ret = secp256k1_context_randomize(ctx, vseed.data());
assert(ret);
- UnlockObject(seed);
}
secp256k1_context_sign = ctx;
diff --git a/src/key.h b/src/key.h
index b589710bad..ff5252b7a0 100644
--- a/src/key.h
+++ b/src/key.h
@@ -43,9 +43,7 @@ private:
bool fCompressed;
//! The actual byte data
- unsigned char vch[32];
-
- static_assert(sizeof(vch) == 32, "vch must be 32 bytes in length to not break serialization");
+ std::vector<unsigned char, secure_allocator<unsigned char> > keydata;
//! Check whether the 32-byte array pointed to be vch is valid keydata.
bool static Check(const unsigned char* vch);
@@ -54,37 +52,30 @@ public:
//! Construct an invalid private key.
CKey() : fValid(false), fCompressed(false)
{
- LockObject(vch);
- }
-
- //! Copy constructor. This is necessary because of memlocking.
- CKey(const CKey& secret) : fValid(secret.fValid), fCompressed(secret.fCompressed)
- {
- LockObject(vch);
- memcpy(vch, secret.vch, sizeof(vch));
+ // Important: vch must be 32 bytes in length to not break serialization
+ keydata.resize(32);
}
//! Destructor (again necessary because of memlocking).
~CKey()
{
- UnlockObject(vch);
}
friend bool operator==(const CKey& a, const CKey& b)
{
return a.fCompressed == b.fCompressed &&
a.size() == b.size() &&
- memcmp(&a.vch[0], &b.vch[0], a.size()) == 0;
+ memcmp(a.keydata.data(), b.keydata.data(), a.size()) == 0;
}
//! Initialize using begin and end iterators to byte data.
template <typename T>
void Set(const T pbegin, const T pend, bool fCompressedIn)
{
- if (pend - pbegin != sizeof(vch)) {
+ if (size_t(pend - pbegin) != keydata.size()) {
fValid = false;
} else if (Check(&pbegin[0])) {
- memcpy(vch, (unsigned char*)&pbegin[0], sizeof(vch));
+ memcpy(keydata.data(), (unsigned char*)&pbegin[0], keydata.size());
fValid = true;
fCompressed = fCompressedIn;
} else {
@@ -93,9 +84,9 @@ public:
}
//! Simple read-only vector-like interface.
- unsigned int size() const { return (fValid ? sizeof(vch) : 0); }
- const unsigned char* begin() const { return vch; }
- const unsigned char* end() const { return vch + size(); }
+ unsigned int size() const { return (fValid ? keydata.size() : 0); }
+ const unsigned char* begin() const { return keydata.data(); }
+ const unsigned char* end() const { return keydata.data() + size(); }
//! Check whether this private key is valid.
bool IsValid() const { return fValid; }
@@ -171,7 +162,7 @@ struct CExtKey {
CExtPubKey Neuter() const;
void SetMaster(const unsigned char* seed, unsigned int nSeedLen);
template <typename Stream>
- void Serialize(Stream& s, int nType, int nVersion) const
+ void Serialize(Stream& s) const
{
unsigned int len = BIP32_EXTKEY_SIZE;
::WriteCompactSize(s, len);
@@ -180,7 +171,7 @@ struct CExtKey {
s.write((const char *)&code[0], len);
}
template <typename Stream>
- void Unserialize(Stream& s, int nType, int nVersion)
+ void Unserialize(Stream& s)
{
unsigned int len = ::ReadCompactSize(s);
unsigned char code[BIP32_EXTKEY_SIZE];
diff --git a/src/main.cpp b/src/main.cpp
index 55e3d934ea..e868e3c5f9 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -18,6 +18,7 @@
#include "init.h"
#include "merkleblock.h"
#include "net.h"
+#include "netbase.h"
#include "policy/fees.h"
#include "policy/policy.h"
#include "pow.h"
@@ -63,7 +64,7 @@ CCriticalSection cs_main;
BlockMap mapBlockIndex;
CChain chainActive;
CBlockIndex *pindexBestHeader = NULL;
-int64_t nTimeBestReceived = 0;
+int64_t nTimeBestReceived = 0; // Used only to inform the wallet of when we last received a block
CWaitableCriticalSection csBestBlock;
CConditionVariable cvBlockChange;
int nScriptCheckThreads = 0;
@@ -179,8 +180,10 @@ namespace {
* Sources of received blocks, saved to be able to send them reject
* messages or ban them when processing happens afterwards. Protected by
* cs_main.
+ * Set mapBlockSource[hash].second to false if the node should not be
+ * punished if the block is invalid.
*/
- map<uint256, NodeId> mapBlockSource;
+ map<uint256, std::pair<NodeId, bool>> mapBlockSource;
/**
* Filter for transactions that were recently rejected by
@@ -257,7 +260,7 @@ struct CBlockReject {
*/
struct CNodeState {
//! The peer's address
- CService address;
+ const CService address;
//! Whether we have a fully established connection.
bool fCurrentlyConnected;
//! Accumulated misbehaviour score for this peer.
@@ -265,7 +268,7 @@ struct CNodeState {
//! Whether this peer should be disconnected and banned (unless whitelisted).
bool fShouldBan;
//! String name of this peer (debugging/logging purposes).
- std::string name;
+ const std::string name;
//! List of asynchronously-determined block rejections to notify this peer about.
std::vector<CBlockReject> rejects;
//! The best known block we know this peer has announced.
@@ -309,7 +312,7 @@ struct CNodeState {
*/
bool fSupportsDesiredCmpctVersion;
- CNodeState() {
+ CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) {
fCurrentlyConnected = false;
nMisbehavior = 0;
fShouldBan = false;
@@ -354,11 +357,36 @@ void UpdatePreferredDownload(CNode* node, CNodeState* state)
nPreferredDownload += state->fPreferredDownload;
}
-void InitializeNode(NodeId nodeid, const CNode *pnode) {
- LOCK(cs_main);
- CNodeState &state = mapNodeState.insert(std::make_pair(nodeid, CNodeState())).first->second;
- state.name = pnode->addrName;
- state.address = pnode->addr;
+void PushNodeVersion(CNode *pnode, CConnman& connman, int64_t nTime)
+{
+ ServiceFlags nLocalNodeServices = pnode->GetLocalServices();
+ uint64_t nonce = pnode->GetLocalNonce();
+ int nNodeStartingHeight = pnode->GetMyStartingHeight();
+ NodeId nodeid = pnode->GetId();
+ CAddress addr = pnode->addr;
+
+ CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices));
+ CAddress addrMe = CAddress(CService(), nLocalNodeServices);
+
+ connman.PushMessageWithVersion(pnode, INIT_PROTO_VERSION, NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
+ nonce, strSubVersion, nNodeStartingHeight, ::fRelayTxes);
+
+ if (fLogIPs)
+ LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
+ else
+ LogPrint("net", "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
+}
+
+void InitializeNode(CNode *pnode, CConnman& connman) {
+ CAddress addr = pnode->addr;
+ std::string addrName = pnode->addrName;
+ NodeId nodeid = pnode->GetId();
+ {
+ LOCK(cs_main);
+ mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName)));
+ }
+ if(!pnode->fInbound)
+ PushNodeVersion(pnode, connman, GetTime());
}
void FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
@@ -501,15 +529,15 @@ void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pf
if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
// As per BIP152, we only get 3 of our peers to announce
// blocks using compact encodings.
- bool found = connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode* pnodeStop){
- pnodeStop->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
+ bool found = connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode* pnodeStop){
+ connman.PushMessage(pnodeStop, NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
return true;
});
if(found)
lNodesAnnouncingHeaderAndIDs.pop_front();
}
fAnnounceUsingCMPCTBLOCK = true;
- pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
+ connman.PushMessage(pfrom, NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
}
}
@@ -691,6 +719,16 @@ CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& loc
CCoinsViewCache *pcoinsTip = NULL;
CBlockTreeDB *pblocktree = NULL;
+enum FlushStateMode {
+ FLUSH_STATE_NONE,
+ FLUSH_STATE_IF_NEEDED,
+ FLUSH_STATE_PERIODIC,
+ FLUSH_STATE_ALWAYS
+};
+
+// See definition for documentation
+bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode);
+
//////////////////////////////////////////////////////////////////////////////
//
// mapOrphanTransactions
@@ -1066,7 +1104,7 @@ int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& i
-bool CheckTransaction(const CTransaction& tx, CValidationState &state)
+bool CheckTransaction(const CTransaction& tx, CValidationState &state, bool fCheckDuplicateInputs)
{
// Basic checks that don't depend on any context
if (tx.vin.empty())
@@ -1090,13 +1128,14 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state)
return state.DoS(100, false, REJECT_INVALID, "bad-txns-txouttotal-toolarge");
}
- // Check for duplicate inputs
- set<COutPoint> vInOutPoints;
- for (const auto& txin : tx.vin)
- {
- if (vInOutPoints.count(txin.prevout))
- return state.DoS(100, false, REJECT_INVALID, "bad-txns-inputs-duplicate");
- vInOutPoints.insert(txin.prevout);
+ // Check for duplicate inputs - note that this check is slow so we skip it in CheckBlock
+ if (fCheckDuplicateInputs) {
+ set<COutPoint> vInOutPoints;
+ for (const auto& txin : tx.vin)
+ {
+ if (!vInOutPoints.insert(txin.prevout).second)
+ return state.DoS(100, false, REJECT_INVALID, "bad-txns-inputs-duplicate");
+ }
}
if (tx.IsCoinBase())
@@ -1135,7 +1174,7 @@ std::string FormatStateMessage(const CValidationState &state)
}
bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const CTransaction& tx, bool fLimitFree,
- bool* pfMissingInputs, bool fOverrideMempoolLimit, const CAmount& nAbsurdFee,
+ bool* pfMissingInputs, int64_t nAcceptTime, bool fOverrideMempoolLimit, const CAmount& nAbsurdFee,
std::vector<uint256>& vHashTxnToUncache)
{
const uint256 hash = tx.GetHash();
@@ -1308,7 +1347,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
}
}
- CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height(), pool.HasNoInputsOf(tx), inChainInputValue, fSpendsCoinbase, nSigOpsCost, lp);
+ CTxMemPoolEntry entry(tx, nFees, nAcceptTime, dPriority, chainActive.Height(), pool.HasNoInputsOf(tx), inChainInputValue, fSpendsCoinbase, nSigOpsCost, lp);
unsigned int nSize = entry.GetTxSize();
// Check that the transaction doesn't have an excessive number of
@@ -1572,18 +1611,27 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
return true;
}
-bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree,
- bool* pfMissingInputs, bool fOverrideMempoolLimit, const CAmount nAbsurdFee)
+bool AcceptToMemoryPoolWithTime(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree,
+ bool* pfMissingInputs, int64_t nAcceptTime, bool fOverrideMempoolLimit, const CAmount nAbsurdFee)
{
std::vector<uint256> vHashTxToUncache;
- bool res = AcceptToMemoryPoolWorker(pool, state, tx, fLimitFree, pfMissingInputs, fOverrideMempoolLimit, nAbsurdFee, vHashTxToUncache);
+ bool res = AcceptToMemoryPoolWorker(pool, state, tx, fLimitFree, pfMissingInputs, nAcceptTime, fOverrideMempoolLimit, nAbsurdFee, vHashTxToUncache);
if (!res) {
BOOST_FOREACH(const uint256& hashTx, vHashTxToUncache)
pcoinsTip->Uncache(hashTx);
}
+ // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
+ CValidationState stateDummy;
+ FlushStateToDisk(stateDummy, FLUSH_STATE_PERIODIC);
return res;
}
+bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree,
+ bool* pfMissingInputs, bool fOverrideMempoolLimit, const CAmount nAbsurdFee)
+{
+ return AcceptToMemoryPoolWithTime(pool, state, tx, fLimitFree, pfMissingInputs, GetTime(), fOverrideMempoolLimit, nAbsurdFee);
+}
+
/** Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock */
bool GetTransaction(const uint256 &hash, CTransaction &txOut, const Consensus::Params& consensusParams, uint256 &hashBlock, bool fAllowSlow)
{
@@ -1665,7 +1713,7 @@ bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMessageHea
return error("WriteBlockToDisk: OpenBlockFile failed");
// Write index header
- unsigned int nSize = fileout.GetSerializeSize(block);
+ unsigned int nSize = GetSerializeSize(fileout, block);
fileout << FLATDATA(messageStart) << nSize;
// Write block
@@ -1740,13 +1788,14 @@ bool IsInitialBlockDownload()
return false;
if (fImporting || fReindex)
return true;
- if (fCheckpointsEnabled && chainActive.Height() < Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints()))
+ if (chainActive.Tip() == NULL)
+ return true;
+ if (chainActive.Tip()->nChainWork < UintToArith256(chainParams.GetConsensus().nMinimumChainWork))
+ return true;
+ if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
return true;
- bool state = (chainActive.Height() < pindexBestHeader->nHeight - 24 * 6 ||
- std::max(chainActive.Tip()->GetBlockTime(), pindexBestHeader->GetBlockTime()) < GetTime() - nMaxTipAge);
- if (!state)
- latchToFalse.store(true, std::memory_order_relaxed);
- return state;
+ latchToFalse.store(true, std::memory_order_relaxed);
+ return false;
}
bool fLargeWorkForkFound = false;
@@ -1774,7 +1823,7 @@ void CheckForkWarningConditions()
{
AssertLockHeld(cs_main);
// Before we get past initial download, we cannot reliably alert about forks
- // (we assume we don't get stuck on a fork before the last checkpoint)
+ // (we assume we don't get stuck on a fork before finishing our initial sync)
if (IsInitialBlockDownload())
return;
@@ -2054,7 +2103,7 @@ bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint
return error("%s: OpenUndoFile failed", __func__);
// Write index header
- unsigned int nSize = fileout.GetSerializeSize(blockundo);
+ unsigned int nSize = GetSerializeSize(fileout, blockundo);
fileout << FLATDATA(messageStart) << nSize;
// Write undo data
@@ -2558,13 +2607,6 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
return true;
}
-enum FlushStateMode {
- FLUSH_STATE_NONE,
- FLUSH_STATE_IF_NEEDED,
- FLUSH_STATE_PERIODIC,
- FLUSH_STATE_ALWAYS
-};
-
/**
* Update the on-disk chain state.
* The caches and indexes are flushed depending on the mode we're called with
@@ -2684,7 +2726,6 @@ void static UpdateTip(CBlockIndex *pindexNew, const CChainParams& chainParams) {
chainActive.SetTip(pindexNew);
// New best block
- nTimeBestReceived = GetTime();
mempool.AddTransactionsUpdated(1);
cvBlockChange.notify_all();
@@ -3421,7 +3462,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
// Check transactions
for (const auto& tx : block.vtx)
- if (!CheckTransaction(tx, state))
+ if (!CheckTransaction(tx, state, false))
return state.Invalid(false, state.GetRejectCode(), state.GetRejectReason(),
strprintf("Transaction check failed (tx hash %s) %s", tx.GetHash().ToString(), state.GetDebugMessage()));
@@ -3669,6 +3710,8 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
if (ppindex)
*ppindex = pindex;
+ CheckBlockIndex(chainparams.GetConsensus());
+
return true;
}
@@ -3696,6 +3739,11 @@ static bool AcceptBlock(const CBlock& block, CValidationState& state, const CCha
// not process unrequested blocks.
bool fTooFarAhead = (pindex->nHeight > int(chainActive.Height() + MIN_BLOCKS_TO_KEEP));
+ // TODO: Decouple this function from the block download logic by removing fRequested
+ // This requires some new chain datastructure to efficiently look up if a
+ // block is in a chain leading to a candidate for best tip, despite not
+ // being such a candidate itself.
+
// TODO: deal better with return value and error conditions for duplicate
// and unrequested blocks.
if (fAlreadyHave) return true;
@@ -3740,19 +3788,17 @@ static bool AcceptBlock(const CBlock& block, CValidationState& state, const CCha
return true;
}
-bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp)
+bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp, bool fMayBanPeerIfInvalid)
{
{
LOCK(cs_main);
- bool fRequested = MarkBlockAsReceived(pblock->GetHash());
- fRequested |= fForceProcessing;
// Store to disk
CBlockIndex *pindex = NULL;
bool fNewBlock = false;
- bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fRequested, dbp, &fNewBlock);
+ bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fForceProcessing, dbp, &fNewBlock);
if (pindex && pfrom) {
- mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId();
+ mapBlockSource[pindex->GetBlockHash()] = std::make_pair(pfrom->GetId(), fMayBanPeerIfInvalid);
if (fNewBlock) pfrom->nLastBlockTime = GetTime();
}
CheckBlockIndex(chainparams.GetConsensus());
@@ -3966,9 +4012,8 @@ CBlockIndex * InsertBlockIndex(uint256 hash)
return pindexNew;
}
-bool static LoadBlockIndexDB()
+bool static LoadBlockIndexDB(const CChainParams& chainparams)
{
- const CChainParams& chainparams = Params();
if (!pblocktree->LoadBlockIndexGuts(InsertBlockIndex))
return false;
@@ -4263,6 +4308,9 @@ bool RewindBlockIndex(const CChainParams& params)
return true;
}
+// May NOT be used after any connections are up as much
+// of the peer-processing logic assumes a consistent
+// block index state
void UnloadBlockIndex()
{
LOCK(cs_main);
@@ -4273,18 +4321,12 @@ void UnloadBlockIndex()
mempool.clear();
mapOrphanTransactions.clear();
mapOrphanTransactionsByPrev.clear();
- nSyncStarted = 0;
mapBlocksUnlinked.clear();
vinfoBlockFile.clear();
nLastBlockFile = 0;
nBlockSequenceId = 1;
- mapBlockSource.clear();
- mapBlocksInFlight.clear();
- nPreferredDownload = 0;
setDirtyBlockIndex.clear();
setDirtyFileInfo.clear();
- mapNodeState.clear();
- recentRejects.reset(NULL);
versionbitscache.Clear();
for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
warningcache[b].clear();
@@ -4297,10 +4339,10 @@ void UnloadBlockIndex()
fHavePruned = false;
}
-bool LoadBlockIndex()
+bool LoadBlockIndex(const CChainParams& chainparams)
{
// Load block index from databases
- if (!fReindex && !LoadBlockIndexDB())
+ if (!fReindex && !LoadBlockIndexDB(chainparams))
return false;
return true;
}
@@ -4309,9 +4351,6 @@ bool InitBlockIndex(const CChainParams& chainparams)
{
LOCK(cs_main);
- // Initialize global variables that cannot be constructed at startup.
- recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
-
// Check whether we're already initialized
if (chainActive.Genesis() != NULL)
return true;
@@ -4700,6 +4739,11 @@ std::string GetWarnings(const std::string& strFor)
// blockchain -> download logic notification
//
+PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn) : connman(connmanIn) {
+ // Initialize global variables that cannot be constructed at startup.
+ recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
+}
+
void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
const int nNewHeight = pindexNew->nHeight;
connman->SetBestHeight(nNewHeight);
@@ -4726,22 +4770,24 @@ void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CB
}
});
}
+
+ nTimeBestReceived = GetTime();
}
void PeerLogicValidation::BlockChecked(const CBlock& block, const CValidationState& state) {
LOCK(cs_main);
const uint256 hash(block.GetHash());
- std::map<uint256, NodeId>::iterator it = mapBlockSource.find(hash);
+ std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
int nDoS = 0;
if (state.IsInvalid(nDoS)) {
- if (it != mapBlockSource.end() && State(it->second)) {
+ if (it != mapBlockSource.end() && State(it->second.first)) {
assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
CBlockReject reject = {(unsigned char)state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), hash};
- State(it->second)->rejects.push_back(reject);
- if (nDoS > 0)
- Misbehaving(it->second, nDoS);
+ State(it->second.first)->rejects.push_back(reject);
+ if (nDoS > 0 && it->second.second)
+ Misbehaving(it->second.first, nDoS);
}
}
if (it != mapBlockSource.end())
@@ -4883,9 +4929,9 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
if (!ReadBlockFromDisk(block, (*mi).second, consensusParams))
assert(!"cannot load block from disk");
if (inv.type == MSG_BLOCK)
- pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block);
+ connman.PushMessageWithFlag(pfrom, SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block);
else if (inv.type == MSG_WITNESS_BLOCK)
- pfrom->PushMessage(NetMsgType::BLOCK, block);
+ connman.PushMessage(pfrom, NetMsgType::BLOCK, block);
else if (inv.type == MSG_FILTERED_BLOCK)
{
bool sendMerkleBlock = false;
@@ -4898,7 +4944,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
}
}
if (sendMerkleBlock) {
- pfrom->PushMessage(NetMsgType::MERKLEBLOCK, merkleBlock);
+ connman.PushMessage(pfrom, NetMsgType::MERKLEBLOCK, merkleBlock);
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip
// Note that there is currently no way for a node to request any single transactions we didn't send here -
@@ -4907,7 +4953,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
// however we MUST always provide at least what the remote peer needs
typedef std::pair<unsigned int, uint256> PairType;
BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn)
- pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, block.vtx[pair.first]);
+ connman.PushMessageWithFlag(pfrom, SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, block.vtx[pair.first]);
}
// else
// no response
@@ -4921,9 +4967,9 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
CBlockHeaderAndShortTxIDs cmpctblock(block, fPeerWantsWitness);
- pfrom->PushMessageWithFlag(fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock);
+ connman.PushMessageWithFlag(pfrom, fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock);
} else
- pfrom->PushMessageWithFlag(fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block);
+ connman.PushMessageWithFlag(pfrom, fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block);
}
// Trigger the peer node to send a getblocks request for the next batch of inventory
@@ -4934,7 +4980,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
// wait for other stuff first.
vector<CInv> vInv;
vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
- pfrom->PushMessage(NetMsgType::INV, vInv);
+ connman.PushMessage(pfrom, NetMsgType::INV, vInv);
pfrom->hashContinue.SetNull();
}
}
@@ -4945,14 +4991,14 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
bool push = false;
auto mi = mapRelay.find(inv.hash);
if (mi != mapRelay.end()) {
- pfrom->PushMessageWithFlag(inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0, NetMsgType::TX, *mi->second);
+ connman.PushMessageWithFlag(pfrom, inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0, NetMsgType::TX, *mi->second);
push = true;
} else if (pfrom->timeLastMempoolReq) {
auto txinfo = mempool.info(inv.hash);
// To protect privacy, do not answer getdata using the mempool when
// that TX couldn't have been INVed in reply to a MEMPOOL request.
if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
- pfrom->PushMessageWithFlag(inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0, NetMsgType::TX, *txinfo.tx);
+ connman.PushMessageWithFlag(pfrom, inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0, NetMsgType::TX, *txinfo.tx);
push = true;
}
}
@@ -4979,7 +5025,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
// do that because they want to know about (and store and rebroadcast and
// risk analyze) the dependencies of transactions relevant to them, without
// having to download the entire memory pool.
- pfrom->PushMessage(NetMsgType::NOTFOUND, vNotFound);
+ connman.PushMessage(pfrom, NetMsgType::NOTFOUND, vNotFound);
}
}
@@ -5005,8 +5051,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
if (!(pfrom->GetLocalServices() & NODE_BLOOM) &&
(strCommand == NetMsgType::FILTERLOAD ||
- strCommand == NetMsgType::FILTERADD ||
- strCommand == NetMsgType::FILTERCLEAR))
+ strCommand == NetMsgType::FILTERADD))
{
if (pfrom->nVersion >= NO_BLOOM_VERSION) {
LOCK(cs_main);
@@ -5030,7 +5075,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Each connection can only send one version message
if (pfrom->nVersion != 0)
{
- pfrom->PushMessage(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, string("Duplicate version message"));
+ connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, string("Duplicate version message"));
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 1);
return false;
@@ -5050,7 +5095,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
if (pfrom->nServicesExpected & ~pfrom->nServices)
{
LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, pfrom->nServices, pfrom->nServicesExpected);
- pfrom->PushMessage(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
+ connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
strprintf("Expected to offer services %08x", pfrom->nServicesExpected));
pfrom->fDisconnect = true;
return false;
@@ -5060,7 +5105,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
{
// disconnect from peers older than this proto version
LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion);
- pfrom->PushMessage(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
+ connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION));
pfrom->fDisconnect = true;
return false;
@@ -5101,7 +5146,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Be shy and don't send version until we hear
if (pfrom->fInbound)
- pfrom->PushVersion();
+ PushNodeVersion(pfrom, connman, GetAdjustedTime());
pfrom->fClient = !(pfrom->nServices & NODE_NETWORK);
@@ -5118,8 +5163,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
// Change version
- pfrom->PushMessage(NetMsgType::VERACK);
- pfrom->ssSend.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
+ connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::VERACK);
+ pfrom->SetSendVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
if (!pfrom->fInbound)
{
@@ -5142,7 +5187,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Get recent addresses
if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman.GetAddressCount() < 1000)
{
- pfrom->PushMessage(NetMsgType::GETADDR);
+ connman.PushMessage(pfrom, NetMsgType::GETADDR);
pfrom->fGetAddr = true;
}
connman.MarkAddressGood(pfrom->addr);
@@ -5189,7 +5234,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// We send this to non-NODE NETWORK peers as well, because even
// non-NODE NETWORK peers can announce blocks (such as pruning
// nodes)
- pfrom->PushMessage(NetMsgType::SENDHEADERS);
+ connman.PushMessage(pfrom, NetMsgType::SENDHEADERS);
}
if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) {
// Tell our peer we are willing to provide version 1 or 2 cmpctblocks
@@ -5200,9 +5245,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
bool fAnnounceUsingCMPCTBLOCK = false;
uint64_t nCMPCTBLOCKVersion = 2;
if (pfrom->GetLocalServices() & NODE_WITNESS)
- pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
+ connman.PushMessage(pfrom, NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
nCMPCTBLOCKVersion = 1;
- pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
+ connman.PushMessage(pfrom, NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
}
}
@@ -5330,7 +5375,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// time the block arrives, the header chain leading up to it is already validated. Not
// doing this will result in the received block being rejected as an orphan in case it is
// not a direct successor.
- pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash);
+ connman.PushMessage(pfrom, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash);
CNodeState *nodestate = State(pfrom->GetId());
if (CanDirectFetch(chainparams.GetConsensus()) &&
nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER &&
@@ -5366,7 +5411,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
if (!vToFetch.empty())
- pfrom->PushMessage(NetMsgType::GETDATA, vToFetch);
+ connman.PushMessage(pfrom, NetMsgType::GETDATA, vToFetch);
}
@@ -5478,7 +5523,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
resp.txn[i] = block.vtx[req.indexes[i]];
}
- pfrom->PushMessageWithFlag(State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCKTXN, resp);
+ connman.PushMessageWithFlag(pfrom, State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCKTXN, resp);
}
@@ -5527,7 +5572,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// headers message). In both cases it's safe to update
// pindexBestHeaderSent to be our tip.
nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip();
- pfrom->PushMessage(NetMsgType::HEADERS, vHeaders);
+ connman.PushMessage(pfrom, NetMsgType::HEADERS, vHeaders);
}
@@ -5690,13 +5735,12 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
pfrom->id,
FormatStateMessage(state));
if (state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
- pfrom->PushMessage(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
+ connman.PushMessage(pfrom, NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash);
if (nDoS > 0) {
Misbehaving(pfrom->GetId(), nDoS);
}
}
- FlushStateToDisk(state, FLUSH_STATE_PERIODIC);
}
@@ -5710,7 +5754,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) {
// Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
if (!IsInitialBlockDownload())
- pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256());
+ connman.PushMessage(pfrom, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256());
return true;
}
@@ -5743,7 +5787,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// so we just grab the block via normal getdata
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
- pfrom->PushMessage(NetMsgType::GETDATA, vInv);
+ connman.PushMessage(pfrom, NetMsgType::GETDATA, vInv);
}
return true;
}
@@ -5787,7 +5831,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Duplicate txindexes, the block is now in-flight, so just request it
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
- pfrom->PushMessage(NetMsgType::GETDATA, vInv);
+ connman.PushMessage(pfrom, NetMsgType::GETDATA, vInv);
return true;
}
@@ -5811,7 +5855,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman);
} else {
req.blockhash = pindex->GetBlockHash();
- pfrom->PushMessage(NetMsgType::GETBLOCKTXN, req);
+ connman.PushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
}
}
} else {
@@ -5820,7 +5864,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// mempool will probably be useless - request the block normally
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
- pfrom->PushMessage(NetMsgType::GETDATA, vInv);
+ connman.PushMessage(pfrom, NetMsgType::GETDATA, vInv);
return true;
} else {
// If this was an announce-cmpctblock, we want the same treatment as a header message
@@ -5832,8 +5876,6 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
return ProcessMessage(pfrom, NetMsgType::HEADERS, vHeadersMsg, nTimeReceived, chainparams, connman);
}
}
-
- CheckBlockIndex(chainparams.GetConsensus());
}
else if (strCommand == NetMsgType::BLOCKTXN && !fImporting && !fReindex) // Ignore blocks received while importing
@@ -5864,22 +5906,42 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Might have collided, fall back to getdata now :(
std::vector<CInv> invs;
invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()), resp.blockhash));
- pfrom->PushMessage(NetMsgType::GETDATA, invs);
- } else
+ connman.PushMessage(pfrom, NetMsgType::GETDATA, invs);
+ } else {
+ // Block is either okay, or possibly we received
+ // READ_STATUS_CHECKBLOCK_FAILED.
+ // Note that CheckBlock can only fail for one of a few reasons:
+ // 1. bad-proof-of-work (impossible here, because we've already
+ // accepted the header)
+ // 2. merkleroot doesn't match the transactions given (already
+ // caught in FillBlock with READ_STATUS_FAILED, so
+ // impossible here)
+ // 3. the block is otherwise invalid (eg invalid coinbase,
+ // block is too big, too many legacy sigops, etc).
+ // So if CheckBlock failed, #3 is the only possibility.
+ // Under BIP 152, we don't DoS-ban unless proof of work is
+ // invalid (we don't require all the stateless checks to have
+ // been run). This is handled below, so just treat this as
+ // though the block was successfully read, and rely on the
+ // handling in ProcessNewBlock to ensure the block index is
+ // updated, reject messages go out, etc.
+ MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer
fBlockRead = true;
+ }
} // Don't hold cs_main when we call into ProcessNewBlock
if (fBlockRead) {
CValidationState state;
- ProcessNewBlock(state, chainparams, pfrom, &block, false, NULL);
+ // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
+ // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
+ // BIP 152 permits peers to relay compact blocks after validating
+ // the header only; we should not punish peers if the block turns
+ // out to be invalid.
+ ProcessNewBlock(state, chainparams, pfrom, &block, true, NULL, false);
int nDoS;
if (state.IsInvalid(nDoS)) {
assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
- pfrom->PushMessage(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
+ connman.PushMessage(pfrom, NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), block.GetHash());
- if (nDoS > 0) {
- LOCK(cs_main);
- Misbehaving(pfrom->GetId(), nDoS);
- }
}
}
}
@@ -5922,7 +5984,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// nUnconnectingHeaders gets reset back to 0.
if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
nodestate->nUnconnectingHeaders++;
- pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256());
+ connman.PushMessage(pfrom, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256());
LogPrint("net", "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
headers[0].GetHash().ToString(),
headers[0].hashPrevBlock.ToString(),
@@ -5969,7 +6031,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
// from there instead.
LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
- pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256());
+ connman.PushMessage(pfrom, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256());
}
bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
@@ -6022,12 +6084,10 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// In any case, we want to download using a compact block, not a regular one
vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
}
- pfrom->PushMessage(NetMsgType::GETDATA, vGetData);
+ connman.PushMessage(pfrom, NetMsgType::GETDATA, vGetData);
}
}
}
-
- CheckBlockIndex(chainparams.GetConsensus());
}
NotifyHeaderTip();
@@ -6046,11 +6106,17 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Such an unrequested block may still be processed, subject to the
// conditions in AcceptBlock().
bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload();
- ProcessNewBlock(state, chainparams, pfrom, &block, forceProcessing, NULL);
+ {
+ LOCK(cs_main);
+ // Also always process if we requested the block explicitly, as we may
+ // need it even though it is not a candidate for a new best tip.
+ forceProcessing |= MarkBlockAsReceived(block.GetHash());
+ }
+ ProcessNewBlock(state, chainparams, pfrom, &block, forceProcessing, NULL, true);
int nDoS;
if (state.IsInvalid(nDoS)) {
assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
- pfrom->PushMessage(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
+ connman.PushMessage(pfrom, NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), block.GetHash());
if (nDoS > 0) {
LOCK(cs_main);
@@ -6127,7 +6193,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// it, if the remote node sends a ping once per second and this node takes 5
// seconds to respond to each, the 5th ping the remote sends would appear to
// return very quickly.
- pfrom->PushMessage(NetMsgType::PONG, nonce);
+ connman.PushMessage(pfrom, NetMsgType::PONG, nonce);
}
}
@@ -6239,8 +6305,10 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
else if (strCommand == NetMsgType::FILTERCLEAR)
{
LOCK(pfrom->cs_filter);
- delete pfrom->pfilter;
- pfrom->pfilter = new CBloomFilter();
+ if (pfrom->GetLocalServices() & NODE_BLOOM) {
+ delete pfrom->pfilter;
+ pfrom->pfilter = new CBloomFilter();
+ }
pfrom->fRelayTxes = true;
}
@@ -6362,7 +6430,7 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman)
// Checksum
CDataStream& vRecv = msg.vRecv;
- uint256 hash = Hash(vRecv.begin(), vRecv.begin() + nMessageSize);
+ const uint256& hash = msg.GetMessageHash();
if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0)
{
LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__,
@@ -6381,7 +6449,7 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman)
}
catch (const std::ios_base::failure& e)
{
- pfrom->PushMessage(NetMsgType::REJECT, strCommand, REJECT_MALFORMED, string("error parsing message"));
+ connman.PushMessageWithVersion(pfrom, INIT_PROTO_VERSION, NetMsgType::REJECT, strCommand, REJECT_MALFORMED, string("error parsing message"));
if (strstr(e.what(), "end of data"))
{
// Allow exceptions from under-length message on vRecv
@@ -6470,11 +6538,11 @@ bool SendMessages(CNode* pto, CConnman& connman)
pto->nPingUsecStart = GetTimeMicros();
if (pto->nVersion > BIP0031_VERSION) {
pto->nPingNonceSent = nonce;
- pto->PushMessage(NetMsgType::PING, nonce);
+ connman.PushMessage(pto, NetMsgType::PING, nonce);
} else {
// Peer is too old to support ping command with nonce, pong will never arrive.
pto->nPingNonceSent = 0;
- pto->PushMessage(NetMsgType::PING);
+ connman.PushMessage(pto, NetMsgType::PING);
}
}
@@ -6505,14 +6573,14 @@ bool SendMessages(CNode* pto, CConnman& connman)
// receiver rejects addr messages larger than 1000
if (vAddr.size() >= 1000)
{
- pto->PushMessage(NetMsgType::ADDR, vAddr);
+ connman.PushMessage(pto, NetMsgType::ADDR, vAddr);
vAddr.clear();
}
}
}
pto->vAddrToSend.clear();
if (!vAddr.empty())
- pto->PushMessage(NetMsgType::ADDR, vAddr);
+ connman.PushMessage(pto, NetMsgType::ADDR, vAddr);
// we only send the big addr message once
if (pto->vAddrToSend.capacity() > 40)
pto->vAddrToSend.shrink_to_fit();
@@ -6535,7 +6603,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
}
BOOST_FOREACH(const CBlockReject& reject, state.rejects)
- pto->PushMessage(NetMsgType::REJECT, (string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock);
+ connman.PushMessage(pto, NetMsgType::REJECT, (string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock);
state.rejects.clear();
// Start block sync
@@ -6558,7 +6626,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
if (pindexStart->pprev)
pindexStart = pindexStart->pprev;
LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
- pto->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256());
+ connman.PushMessage(pto, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256());
}
}
@@ -6647,7 +6715,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
CBlock block;
assert(ReadBlockFromDisk(block, pBestIndex, consensusParams));
CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
- pto->PushMessageWithFlag(state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock);
+ connman.PushMessageWithFlag(pto, state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock);
state.pindexBestHeaderSent = pBestIndex;
} else if (state.fPreferHeaders) {
if (vHeaders.size() > 1) {
@@ -6659,7 +6727,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
LogPrint("net", "%s: sending header %s to peer=%d\n", __func__,
vHeaders.front().GetHash().ToString(), pto->id);
}
- pto->PushMessage(NetMsgType::HEADERS, vHeaders);
+ connman.PushMessage(pto, NetMsgType::HEADERS, vHeaders);
state.pindexBestHeaderSent = pBestIndex;
} else
fRevertToInv = true;
@@ -6705,7 +6773,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
BOOST_FOREACH(const uint256& hash, pto->vInventoryBlockToSend) {
vInv.push_back(CInv(MSG_BLOCK, hash));
if (vInv.size() == MAX_INV_SZ) {
- pto->PushMessage(NetMsgType::INV, vInv);
+ connman.PushMessage(pto, NetMsgType::INV, vInv);
vInv.clear();
}
}
@@ -6751,7 +6819,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
pto->filterInventoryKnown.insert(hash);
vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ) {
- pto->PushMessage(NetMsgType::INV, vInv);
+ connman.PushMessage(pto, NetMsgType::INV, vInv);
vInv.clear();
}
}
@@ -6817,7 +6885,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
}
}
if (vInv.size() == MAX_INV_SZ) {
- pto->PushMessage(NetMsgType::INV, vInv);
+ connman.PushMessage(pto, NetMsgType::INV, vInv);
vInv.clear();
}
pto->filterInventoryKnown.insert(hash);
@@ -6825,7 +6893,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
}
}
if (!vInv.empty())
- pto->PushMessage(NetMsgType::INV, vInv);
+ connman.PushMessage(pto, NetMsgType::INV, vInv);
// Detect whether we're stalling
nNow = GetTimeMicros();
@@ -6886,7 +6954,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
vGetData.push_back(inv);
if (vGetData.size() >= 1000)
{
- pto->PushMessage(NetMsgType::GETDATA, vGetData);
+ connman.PushMessage(pto, NetMsgType::GETDATA, vGetData);
vGetData.clear();
}
} else {
@@ -6896,7 +6964,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
pto->mapAskFor.erase(pto->mapAskFor.begin());
}
if (!vGetData.empty())
- pto->PushMessage(NetMsgType::GETDATA, vGetData);
+ connman.PushMessage(pto, NetMsgType::GETDATA, vGetData);
//
// Message: feefilter
@@ -6909,7 +6977,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
if (timeNow > pto->nextSendTimeFeeFilter) {
CAmount filterToSend = filterRounder.round(currentFilter);
if (filterToSend != pto->lastSentFeeFilter) {
- pto->PushMessage(NetMsgType::FEEFILTER, filterToSend);
+ connman.PushMessage(pto, NetMsgType::FEEFILTER, filterToSend);
pto->lastSentFeeFilter = filterToSend;
}
pto->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
@@ -6941,6 +7009,119 @@ int VersionBitsTipStateSinceHeight(const Consensus::Params& params, Consensus::D
return VersionBitsStateSinceHeight(chainActive.Tip(), params, pos, versionbitscache);
}
+static const uint64_t MEMPOOL_DUMP_VERSION = 1;
+
+bool LoadMempool(void)
+{
+ int64_t nExpiryTimeout = GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
+ FILE* filestr = fopen((GetDataDir() / "mempool.dat").string().c_str(), "r");
+ CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
+ if (file.IsNull()) {
+ LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n");
+ return false;
+ }
+
+ int64_t count = 0;
+ int64_t skipped = 0;
+ int64_t failed = 0;
+ int64_t nNow = GetTime();
+
+ try {
+ uint64_t version;
+ file >> version;
+ if (version != MEMPOOL_DUMP_VERSION) {
+ return false;
+ }
+ uint64_t num;
+ file >> num;
+ double prioritydummy = 0;
+ while (num--) {
+ CTransaction tx;
+ int64_t nTime;
+ int64_t nFeeDelta;
+ file >> tx;
+ file >> nTime;
+ file >> nFeeDelta;
+
+ CAmount amountdelta = nFeeDelta;
+ if (amountdelta) {
+ mempool.PrioritiseTransaction(tx.GetHash(), tx.GetHash().ToString(), prioritydummy, amountdelta);
+ }
+ CValidationState state;
+ if (nTime + nExpiryTimeout > nNow) {
+ LOCK(cs_main);
+ AcceptToMemoryPoolWithTime(mempool, state, tx, true, NULL, nTime);
+ if (state.IsValid()) {
+ ++count;
+ } else {
+ ++failed;
+ }
+ } else {
+ ++skipped;
+ }
+ }
+ std::map<uint256, CAmount> mapDeltas;
+ file >> mapDeltas;
+
+ for (const auto& i : mapDeltas) {
+ mempool.PrioritiseTransaction(i.first, i.first.ToString(), prioritydummy, i.second);
+ }
+ } catch (const std::exception& e) {
+ LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e.what());
+ return false;
+ }
+
+ LogPrintf("Imported mempool transactions from disk: %i successes, %i failed, %i expired\n", count, failed, skipped);
+ return true;
+}
+
+void DumpMempool(void)
+{
+ int64_t start = GetTimeMicros();
+
+ std::map<uint256, CAmount> mapDeltas;
+ std::vector<TxMempoolInfo> vinfo;
+
+ {
+ LOCK(mempool.cs);
+ for (const auto &i : mempool.mapDeltas) {
+ mapDeltas[i.first] = i.second.first;
+ }
+ vinfo = mempool.infoAll();
+ }
+
+ int64_t mid = GetTimeMicros();
+
+ try {
+ FILE* filestr = fopen((GetDataDir() / "mempool.dat.new").string().c_str(), "w");
+ if (!filestr) {
+ return;
+ }
+
+ CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
+
+ uint64_t version = MEMPOOL_DUMP_VERSION;
+ file << version;
+
+ file << (uint64_t)vinfo.size();
+ for (const auto& i : vinfo) {
+ file << *(i.tx);
+ file << (int64_t)i.nTime;
+ file << (int64_t)i.nFeeDelta;
+ mapDeltas.erase(i.tx->GetHash());
+ }
+
+ file << mapDeltas;
+ FileCommit(file.Get());
+ file.fclose();
+ RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat");
+ int64_t last = GetTimeMicros();
+ LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid-start)*0.000001, (last-mid)*0.000001);
+ } catch (const std::exception& e) {
+ LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what());
+ }
+}
+
class CMainCleanup
{
public:
diff --git a/src/main.h b/src/main.h
index 3eab9b89da..21829b6c25 100644
--- a/src/main.h
+++ b/src/main.h
@@ -223,7 +223,7 @@ static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES = 550 * 1024 * 1024;
* @param[out] dbp The already known disk position of pblock, or NULL if not yet stored.
* @return True if state.IsValid()
*/
-bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp);
+bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp, bool fMayBanPeerIfInvalid);
/** Check whether enough disk space is available for an incoming block */
bool CheckDiskSpace(uint64_t nAdditionalBytes = 0);
/** Open a block file (blk?????.dat) */
@@ -237,7 +237,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
/** Initialize a new block tree database + block data on disk */
bool InitBlockIndex(const CChainParams& chainparams);
/** Load the block tree and coins database from disk */
-bool LoadBlockIndex();
+bool LoadBlockIndex(const CChainParams& chainparams);
/** Unload database information */
void UnloadBlockIndex();
/** Run an instance of the script checking thread */
@@ -291,6 +291,10 @@ void PruneAndFlush();
bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree,
bool* pfMissingInputs, bool fOverrideMempoolLimit=false, const CAmount nAbsurdFee=0);
+/** (try to) add transaction to memory pool with a specified acceptance time **/
+bool AcceptToMemoryPoolWithTime(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree,
+ bool* pfMissingInputs, int64_t nAcceptTime, bool fOverrideMempoolLimit=false, const CAmount nAbsurdFee=0);
+
/** Convert CValidationState to a human-readable message for logging */
std::string FormatStateMessage(const CValidationState &state);
@@ -339,7 +343,7 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight);
/** Transaction validation functions */
/** Context-independent validity checks */
-bool CheckTransaction(const CTransaction& tx, CValidationState& state);
+bool CheckTransaction(const CTransaction& tx, CValidationState& state, bool fCheckDuplicateInputs=true);
namespace Consensus {
@@ -529,6 +533,12 @@ static const unsigned int REJECT_ALREADY_KNOWN = 0x101;
/** Transaction conflicts with a transaction already known */
static const unsigned int REJECT_CONFLICT = 0x102;
+/** Dump the mempool to disk. */
+void DumpMempool();
+
+/** Load the mempool from disk. */
+bool LoadMempool();
+
// The following things handle network-processing logic
// (and should be moved to a separate file)
@@ -542,7 +552,7 @@ private:
CConnman* connman;
public:
- PeerLogicValidation(CConnman* connmanIn) : connman(connmanIn) {}
+ PeerLogicValidation(CConnman* connmanIn);
virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload);
virtual void BlockChecked(const CBlock& block, const CValidationState& state);
diff --git a/src/memusage.h b/src/memusage.h
index 3810bfad07..2e3c5a9b92 100644
--- a/src/memusage.h
+++ b/src/memusage.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2015 The Bitcoin developers
+// Copyright (c) 2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/merkleblock.h b/src/merkleblock.h
index 835cbcce55..17c33194a9 100644
--- a/src/merkleblock.h
+++ b/src/merkleblock.h
@@ -85,7 +85,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(nTransactions);
READWRITE(vHash);
std::vector<unsigned char> vBytes;
@@ -148,7 +148,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(header);
READWRITE(txn);
}
diff --git a/src/net.cpp b/src/net.cpp
index 48ba9588d9..e47a8bb168 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -64,6 +64,7 @@
const static std::string NET_MESSAGE_COMMAND_OTHER = "*other*";
static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL; // SHA256("netgroup")[0:8]
+static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL; // SHA256("localhostnonce")[0:8]
//
// Global state variables
//
@@ -389,18 +390,18 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
addrman.Attempt(addrConnect, fCountFailure);
// Add node
- CNode* pnode = new CNode(GetNewNodeId(), nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), pszDest ? pszDest : "", false);
- GetNodeSignals().InitializeNode(pnode->GetId(), pnode);
+ NodeId id = GetNewNodeId();
+ uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize();
+ CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, pszDest ? pszDest : "", false);
+ pnode->nServicesExpected = ServiceFlags(addrConnect.nServices & nRelevantServices);
+ pnode->nTimeConnected = GetTime();
pnode->AddRef();
-
+ GetNodeSignals().InitializeNode(pnode, *this);
{
LOCK(cs_vNodes);
vNodes.push_back(pnode);
}
- pnode->nServicesExpected = ServiceFlags(addrConnect.nServices & nRelevantServices);
- pnode->nTimeConnected = GetTime();
-
return pnode;
} else if (!proxyConnectionFailed) {
// If connecting to the node failed, and failure is not caused by a problem connecting to
@@ -446,23 +447,6 @@ void CNode::CloseSocketDisconnect()
vRecvMsg.clear();
}
-void CNode::PushVersion()
-{
- int64_t nTime = (fInbound ? GetAdjustedTime() : GetTime());
- CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices));
- CAddress addrMe = CAddress(CService(), nLocalServices);
- if (fLogIPs)
- LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nMyStartingHeight, addrMe.ToString(), addrYou.ToString(), id);
- else
- LogPrint("net", "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nMyStartingHeight, addrMe.ToString(), id);
- PushMessage(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalServices, nTime, addrYou, addrMe,
- nLocalHostNonce, strSubVersion, nMyStartingHeight, ::fRelayTxes);
-}
-
-
-
-
-
void CConnman::ClearBanned()
{
{
@@ -758,12 +742,21 @@ int CNetMessage::readData(const char *pch, unsigned int nBytes)
vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024));
}
+ hasher.Write((const unsigned char*)pch, nCopy);
memcpy(&vRecv[nDataPos], pch, nCopy);
nDataPos += nCopy;
return nCopy;
}
+const uint256& CNetMessage::GetMessageHash() const
+{
+ assert(complete());
+ if (data_hash.IsNull())
+ hasher.Finalize(data_hash.begin());
+ return data_hash;
+}
+
@@ -825,7 +818,7 @@ struct NodeEvictionCandidate
int64_t nMinPingUsecTime;
int64_t nLastBlockTime;
int64_t nLastTXTime;
- bool fNetworkNode;
+ bool fRelevantServices;
bool fRelayTxes;
bool fBloomFilter;
CAddress addr;
@@ -850,7 +843,7 @@ static bool CompareNodeBlockTime(const NodeEvictionCandidate &a, const NodeEvict
{
// There is a fall-through here because it is common for a node to have many peers which have not yet relayed a block.
if (a.nLastBlockTime != b.nLastBlockTime) return a.nLastBlockTime < b.nLastBlockTime;
- if (a.fNetworkNode != b.fNetworkNode) return b.fNetworkNode;
+ if (a.fRelevantServices != b.fRelevantServices) return b.fRelevantServices;
return a.nTimeConnected > b.nTimeConnected;
}
@@ -885,7 +878,8 @@ bool CConnman::AttemptToEvictConnection()
if (node->fDisconnect)
continue;
NodeEvictionCandidate candidate = {node->id, node->nTimeConnected, node->nMinPingUsecTime,
- node->nLastBlockTime, node->nLastTXTime, node->fNetworkNode,
+ node->nLastBlockTime, node->nLastTXTime,
+ (node->nServices & nRelevantServices) == nRelevantServices,
node->fRelayTxes, node->pfilter != NULL, node->addr, node->nKeyedNetGroup};
vEvictionCandidates.push_back(candidate);
}
@@ -1024,10 +1018,13 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
}
}
- CNode* pnode = new CNode(GetNewNodeId(), nLocalServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), "", true);
- GetNodeSignals().InitializeNode(pnode->GetId(), pnode);
+ NodeId id = GetNewNodeId();
+ uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize();
+
+ CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, "", true);
pnode->AddRef();
pnode->fWhitelisted = whitelisted;
+ GetNodeSignals().InitializeNode(pnode, *this);
LogPrint("net", "connection from %s accepted\n", addr.ToString());
@@ -1052,7 +1049,7 @@ void CConnman::ThreadSocketHandler()
BOOST_FOREACH(CNode* pnode, vNodesCopy)
{
if (pnode->fDisconnect ||
- (pnode->GetRefCount() <= 0 && pnode->vRecvMsg.empty() && pnode->nSendSize == 0 && pnode->ssSend.empty()))
+ (pnode->GetRefCount() <= 0 && pnode->vRecvMsg.empty() && pnode->nSendSize == 0))
{
// remove from vNodes
vNodes.erase(remove(vNodes.begin(), vNodes.end(), pnode), vNodes.end());
@@ -1156,10 +1153,6 @@ void CConnman::ThreadSocketHandler()
{
TRY_LOCK(pnode->cs_vSend, lockSend);
if (lockSend) {
- if (pnode->nOptimisticBytesWritten) {
- RecordBytesSent(pnode->nOptimisticBytesWritten);
- pnode->nOptimisticBytesWritten = 0;
- }
if (!pnode->vSendMsg.empty()) {
FD_SET(pnode->hSocket, &fdsetSend);
continue;
@@ -2118,8 +2111,12 @@ bool CConnman::Start(boost::thread_group& threadGroup, CScheduler& scheduler, st
if (pnodeLocalHost == NULL) {
CNetAddr local;
LookupHost("127.0.0.1", local, false);
- pnodeLocalHost = new CNode(GetNewNodeId(), nLocalServices, GetBestHeight(), INVALID_SOCKET, CAddress(CService(local, 0), nLocalServices), 0);
- GetNodeSignals().InitializeNode(pnodeLocalHost->GetId(), pnodeLocalHost);
+
+ NodeId id = GetNewNodeId();
+ uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize();
+
+ pnodeLocalHost = new CNode(id, nLocalServices, GetBestHeight(), INVALID_SOCKET, CAddress(CService(local, 0), nLocalServices), 0, nonce);
+ GetNodeSignals().InitializeNode(pnodeLocalHost, *this);
}
//
@@ -2471,50 +2468,20 @@ int CConnman::GetBestHeight() const
return nBestHeight.load(std::memory_order_acquire);
}
-void CNode::Fuzz(int nChance)
-{
- if (!fSuccessfullyConnected) return; // Don't fuzz initial handshake
- if (GetRand(nChance) != 0) return; // Fuzz 1 of every nChance messages
-
- switch (GetRand(3))
- {
- case 0:
- // xor a random byte with a random value:
- if (!ssSend.empty()) {
- CDataStream::size_type pos = GetRand(ssSend.size());
- ssSend[pos] ^= (unsigned char)(GetRand(256));
- }
- break;
- case 1:
- // delete a random byte:
- if (!ssSend.empty()) {
- CDataStream::size_type pos = GetRand(ssSend.size());
- ssSend.erase(ssSend.begin()+pos);
- }
- break;
- case 2:
- // insert a random byte at a random position
- {
- CDataStream::size_type pos = GetRand(ssSend.size());
- char ch = (char)GetRand(256);
- ssSend.insert(ssSend.begin()+pos, ch);
- }
- break;
- }
- // Chance of more than one change half the time:
- // (more changes exponentially less likely):
- Fuzz(2);
-}
-
unsigned int CConnman::GetReceiveFloodSize() const { return nReceiveFloodSize; }
unsigned int CConnman::GetSendBufferSize() const{ return nSendBufferMaxSize; }
-CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, const std::string& addrNameIn, bool fInboundIn) :
- ssSend(SER_NETWORK, INIT_PROTO_VERSION),
+CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const std::string& addrNameIn, bool fInboundIn) :
addr(addrIn),
+ fInbound(fInboundIn),
+ id(idIn),
nKeyedNetGroup(nKeyedNetGroupIn),
addrKnown(5000, 0.001),
- filterInventoryKnown(50000, 0.000001)
+ filterInventoryKnown(50000, 0.000001),
+ nLocalHostNonce(nLocalHostNonceIn),
+ nLocalServices(nLocalServicesIn),
+ nMyStartingHeight(nMyStartingHeightIn),
+ nSendVersion(0)
{
nServices = NODE_NONE;
nServicesExpected = NODE_NONE;
@@ -2533,7 +2500,6 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn
fOneShot = false;
fClient = false; // set by version message
fFeeler = false;
- fInbound = fInboundIn;
fNetworkNode = false;
fSuccessfullyConnected = false;
fDisconnect = false;
@@ -2562,12 +2528,6 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn
minFeeFilter = 0;
lastSentFeeFilter = 0;
nextSendTimeFeeFilter = 0;
- id = idIn;
- nOptimisticBytesWritten = 0;
- nLocalServices = nLocalServicesIn;
-
- GetRandBytes((unsigned char*)&nLocalHostNonce, sizeof(nLocalHostNonce));
- nMyStartingHeight = nMyStartingHeightIn;
BOOST_FOREACH(const std::string &msg, getAllNetMessageTypes())
mapRecvBytesPerMsgCmd[msg] = 0;
@@ -2577,10 +2537,6 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn
LogPrint("net", "Added connection to %s peer=%d\n", addrName, id);
else
LogPrint("net", "Added connection peer=%d\n", id);
-
- // Be shy and don't send version until we hear
- if (hSocket != INVALID_SOCKET && !fInbound)
- PushVersion();
}
CNode::~CNode()
@@ -2625,65 +2581,50 @@ void CNode::AskFor(const CInv& inv)
mapAskFor.insert(std::make_pair(nRequestTime, inv));
}
-void CNode::BeginMessage(const char* pszCommand) EXCLUSIVE_LOCK_FUNCTION(cs_vSend)
+CDataStream CConnman::BeginMessage(CNode* pnode, int nVersion, int flags, const std::string& sCommand)
{
- ENTER_CRITICAL_SECTION(cs_vSend);
- assert(ssSend.size() == 0);
- ssSend << CMessageHeader(Params().MessageStart(), pszCommand, 0);
- LogPrint("net", "sending: %s ", SanitizeString(pszCommand));
+ return {SER_NETWORK, (nVersion ? nVersion : pnode->GetSendVersion()) | flags, CMessageHeader(Params().MessageStart(), sCommand.c_str(), 0) };
}
-void CNode::AbortMessage() UNLOCK_FUNCTION(cs_vSend)
+void CConnman::EndMessage(CDataStream& strm)
{
- ssSend.clear();
-
- LEAVE_CRITICAL_SECTION(cs_vSend);
+ // Set the size
+ assert(strm.size () >= CMessageHeader::HEADER_SIZE);
+ unsigned int nSize = strm.size() - CMessageHeader::HEADER_SIZE;
+ WriteLE32((uint8_t*)&strm[CMessageHeader::MESSAGE_SIZE_OFFSET], nSize);
+ // Set the checksum
+ uint256 hash = Hash(strm.begin() + CMessageHeader::HEADER_SIZE, strm.end());
+ memcpy((char*)&strm[CMessageHeader::CHECKSUM_OFFSET], hash.begin(), CMessageHeader::CHECKSUM_SIZE);
- LogPrint("net", "(aborted)\n");
}
-void CNode::EndMessage(const char* pszCommand) UNLOCK_FUNCTION(cs_vSend)
+void CConnman::PushMessage(CNode* pnode, CDataStream& strm, const std::string& sCommand)
{
- // The -*messagestest options are intentionally not documented in the help message,
- // since they are only used during development to debug the networking code and are
- // not intended for end-users.
- if (mapArgs.count("-dropmessagestest") && GetRand(GetArg("-dropmessagestest", 2)) == 0)
- {
- LogPrint("net", "dropmessages DROPPING SEND MESSAGE\n");
- AbortMessage();
+ if(strm.empty())
return;
- }
- if (mapArgs.count("-fuzzmessagestest"))
- Fuzz(GetArg("-fuzzmessagestest", 10));
- if (ssSend.size() == 0)
- {
- LEAVE_CRITICAL_SECTION(cs_vSend);
- return;
- }
- // Set the size
- unsigned int nSize = ssSend.size() - CMessageHeader::HEADER_SIZE;
- WriteLE32((uint8_t*)&ssSend[CMessageHeader::MESSAGE_SIZE_OFFSET], nSize);
-
- //log total amount of bytes per command
- mapSendBytesPerMsgCmd[std::string(pszCommand)] += nSize + CMessageHeader::HEADER_SIZE;
-
- // Set the checksum
- uint256 hash = Hash(ssSend.begin() + CMessageHeader::HEADER_SIZE, ssSend.end());
- assert(ssSend.size () >= CMessageHeader::CHECKSUM_OFFSET + CMessageHeader::CHECKSUM_SIZE);
- memcpy((char*)&ssSend[CMessageHeader::CHECKSUM_OFFSET], hash.begin(), CMessageHeader::CHECKSUM_SIZE);
-
- LogPrint("net", "(%d bytes) peer=%d\n", nSize, id);
+ unsigned int nSize = strm.size() - CMessageHeader::HEADER_SIZE;
+ LogPrint("net", "sending %s (%d bytes) peer=%d\n", SanitizeString(sCommand.c_str()), nSize, pnode->id);
- std::deque<CSerializeData>::iterator it = vSendMsg.insert(vSendMsg.end(), CSerializeData());
- ssSend.GetAndClear(*it);
- nSendSize += (*it).size();
+ size_t nBytesSent = 0;
+ {
+ LOCK(pnode->cs_vSend);
+ if(pnode->hSocket == INVALID_SOCKET) {
+ return;
+ }
+ bool optimisticSend(pnode->vSendMsg.empty());
+ pnode->vSendMsg.emplace_back(strm.begin(), strm.end());
- // If write queue empty, attempt "optimistic write"
- if (it == vSendMsg.begin())
- nOptimisticBytesWritten += SocketSendData(this);
+ //log total amount of bytes per command
+ pnode->mapSendBytesPerMsgCmd[sCommand] += strm.size();
+ pnode->nSendSize += strm.size();
- LEAVE_CRITICAL_SECTION(cs_vSend);
+ // If write queue empty, attempt "optimistic write"
+ if (optimisticSend == true)
+ nBytesSent = SocketSendData(pnode);
+ }
+ if (nBytesSent)
+ RecordBytesSent(nBytesSent);
}
bool CConnman::ForNode(NodeId id, std::function<bool(CNode* pnode)> func)
diff --git a/src/net.h b/src/net.h
index 58b492e592..25d9bbc02b 100644
--- a/src/net.h
+++ b/src/net.h
@@ -136,6 +136,33 @@ public:
bool ForNode(NodeId id, std::function<bool(CNode* pnode)> func);
+ template <typename... Args>
+ void PushMessageWithVersionAndFlag(CNode* pnode, int nVersion, int flag, const std::string& sCommand, Args&&... args)
+ {
+ auto msg(BeginMessage(pnode, nVersion, flag, sCommand));
+ ::SerializeMany(msg, std::forward<Args>(args)...);
+ EndMessage(msg);
+ PushMessage(pnode, msg, sCommand);
+ }
+
+ template <typename... Args>
+ void PushMessageWithFlag(CNode* pnode, int flag, const std::string& sCommand, Args&&... args)
+ {
+ PushMessageWithVersionAndFlag(pnode, 0, flag, sCommand, std::forward<Args>(args)...);
+ }
+
+ template <typename... Args>
+ void PushMessageWithVersion(CNode* pnode, int nVersion, const std::string& sCommand, Args&&... args)
+ {
+ PushMessageWithVersionAndFlag(pnode, nVersion, 0, sCommand, std::forward<Args>(args)...);
+ }
+
+ template <typename... Args>
+ void PushMessage(CNode* pnode, const std::string& sCommand, Args&&... args)
+ {
+ PushMessageWithVersionAndFlag(pnode, 0, 0, sCommand, std::forward<Args>(args)...);
+ }
+
template<typename Callable>
bool ForEachNodeContinueIf(Callable&& func)
{
@@ -345,6 +372,10 @@ private:
unsigned int GetReceiveFloodSize() const;
+ CDataStream BeginMessage(CNode* node, int nVersion, int flags, const std::string& sCommand);
+ void PushMessage(CNode* pnode, CDataStream& strm, const std::string& sCommand);
+ void EndMessage(CDataStream& strm);
+
// Network stats
void RecordBytesRecv(uint64_t bytes);
void RecordBytesSent(uint64_t bytes);
@@ -428,7 +459,7 @@ struct CNodeSignals
{
boost::signals2::signal<bool (CNode*, CConnman&), CombinerAll> ProcessMessages;
boost::signals2::signal<bool (CNode*, CConnman&), CombinerAll> SendMessages;
- boost::signals2::signal<void (NodeId, const CNode*)> InitializeNode;
+ boost::signals2::signal<void (CNode*, CConnman&)> InitializeNode;
boost::signals2::signal<void (NodeId, bool&)> FinalizeNode;
};
@@ -512,6 +543,9 @@ public:
class CNetMessage {
+private:
+ mutable CHash256 hasher;
+ mutable uint256 data_hash;
public:
bool in_data; // parsing header (false) or data (true)
@@ -539,6 +573,8 @@ public:
return (hdr.nMessageSize == nDataPos);
}
+ const uint256& GetMessageHash() const;
+
void SetVersion(int nVersionIn)
{
hdrbuf.SetVersion(nVersionIn);
@@ -553,15 +589,14 @@ public:
/** Information about a peer */
class CNode
{
+ friend class CConnman;
public:
// socket
ServiceFlags nServices;
ServiceFlags nServicesExpected;
SOCKET hSocket;
- CDataStream ssSend;
size_t nSendSize; // total size of all vSendMsg entries
size_t nSendOffset; // offset inside the first vSendMsg already sent
- uint64_t nOptimisticBytesWritten;
uint64_t nSendBytes;
std::deque<CSerializeData> vSendMsg;
CCriticalSection cs_vSend;
@@ -589,7 +624,7 @@ public:
bool fFeeler; // If true this node is being used as a short lived feeler.
bool fOneShot;
bool fClient;
- bool fInbound;
+ const bool fInbound;
bool fNetworkNode;
bool fSuccessfullyConnected;
bool fDisconnect;
@@ -603,7 +638,7 @@ public:
CCriticalSection cs_filter;
CBloomFilter* pfilter;
int nRefCount;
- NodeId id;
+ const NodeId id;
const uint64_t nKeyedNetGroup;
protected:
@@ -611,9 +646,6 @@ protected:
mapMsgCmdSize mapSendBytesPerMsgCmd;
mapMsgCmdSize mapRecvBytesPerMsgCmd;
- // Basic fuzz-testing
- void Fuzz(int nChance); // modifies ssSend
-
public:
uint256 hashContinue;
int nStartingHeight;
@@ -669,7 +701,7 @@ public:
CAmount lastSentFeeFilter;
int64_t nextSendTimeFeeFilter;
- CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, const std::string &addrNameIn = "", bool fInboundIn = false);
+ CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const std::string &addrNameIn = "", bool fInboundIn = false);
~CNode();
private:
@@ -677,10 +709,11 @@ private:
void operator=(const CNode&);
- uint64_t nLocalHostNonce;
+ const uint64_t nLocalHostNonce;
// Services offered to this peer
- ServiceFlags nLocalServices;
- int nMyStartingHeight;
+ const ServiceFlags nLocalServices;
+ const int nMyStartingHeight;
+ int nSendVersion;
public:
NodeId GetId() const {
@@ -691,6 +724,10 @@ public:
return nLocalHostNonce;
}
+ int GetMyStartingHeight() const {
+ return nMyStartingHeight;
+ }
+
int GetRefCount()
{
assert(nRefCount >= 0);
@@ -716,6 +753,25 @@ public:
BOOST_FOREACH(CNetMessage &msg, vRecvMsg)
msg.SetVersion(nVersionIn);
}
+ void SetSendVersion(int nVersionIn)
+ {
+ // Send version may only be changed in the version message, and
+ // only one version message is allowed per session. We can therefore
+ // treat this value as const and even atomic as long as it's only used
+ // once the handshake is complete. Any attempt to set this twice is an
+ // error.
+ assert(nSendVersion == 0);
+ nSendVersion = nVersionIn;
+ }
+
+ int GetSendVersion() const
+ {
+ // The send version should always be explicitly set to
+ // INIT_PROTO_VERSION rather than using this value until the handshake
+ // is complete. See PushMessageWithVersion().
+ assert(nSendVersion != 0);
+ return nSendVersion;
+ }
CNode* AddRef()
{
@@ -778,193 +834,6 @@ public:
void AskFor(const CInv& inv);
- // TODO: Document the postcondition of this function. Is cs_vSend locked?
- void BeginMessage(const char* pszCommand) EXCLUSIVE_LOCK_FUNCTION(cs_vSend);
-
- // TODO: Document the precondition of this function. Is cs_vSend locked?
- void AbortMessage() UNLOCK_FUNCTION(cs_vSend);
-
- // TODO: Document the precondition of this function. Is cs_vSend locked?
- void EndMessage(const char* pszCommand) UNLOCK_FUNCTION(cs_vSend);
-
- void PushVersion();
-
-
- void PushMessage(const char* pszCommand)
- {
- try
- {
- BeginMessage(pszCommand);
- EndMessage(pszCommand);
- }
- catch (...)
- {
- AbortMessage();
- throw;
- }
- }
-
- template<typename T1>
- void PushMessage(const char* pszCommand, const T1& a1)
- {
- try
- {
- BeginMessage(pszCommand);
- ssSend << a1;
- EndMessage(pszCommand);
- }
- catch (...)
- {
- AbortMessage();
- throw;
- }
- }
-
- /** Send a message containing a1, serialized with flag flag. */
- template<typename T1>
- void PushMessageWithFlag(int flag, const char* pszCommand, const T1& a1)
- {
- try
- {
- BeginMessage(pszCommand);
- WithOrVersion(&ssSend, flag) << a1;
- EndMessage(pszCommand);
- }
- catch (...)
- {
- AbortMessage();
- throw;
- }
- }
-
- template<typename T1, typename T2>
- void PushMessage(const char* pszCommand, const T1& a1, const T2& a2)
- {
- try
- {
- BeginMessage(pszCommand);
- ssSend << a1 << a2;
- EndMessage(pszCommand);
- }
- catch (...)
- {
- AbortMessage();
- throw;
- }
- }
-
- template<typename T1, typename T2, typename T3>
- void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3)
- {
- try
- {
- BeginMessage(pszCommand);
- ssSend << a1 << a2 << a3;
- EndMessage(pszCommand);
- }
- catch (...)
- {
- AbortMessage();
- throw;
- }
- }
-
- template<typename T1, typename T2, typename T3, typename T4>
- void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4)
- {
- try
- {
- BeginMessage(pszCommand);
- ssSend << a1 << a2 << a3 << a4;
- EndMessage(pszCommand);
- }
- catch (...)
- {
- AbortMessage();
- throw;
- }
- }
-
- template<typename T1, typename T2, typename T3, typename T4, typename T5>
- void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5)
- {
- try
- {
- BeginMessage(pszCommand);
- ssSend << a1 << a2 << a3 << a4 << a5;
- EndMessage(pszCommand);
- }
- catch (...)
- {
- AbortMessage();
- throw;
- }
- }
-
- template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
- void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5, const T6& a6)
- {
- try
- {
- BeginMessage(pszCommand);
- ssSend << a1 << a2 << a3 << a4 << a5 << a6;
- EndMessage(pszCommand);
- }
- catch (...)
- {
- AbortMessage();
- throw;
- }
- }
-
- template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
- void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5, const T6& a6, const T7& a7)
- {
- try
- {
- BeginMessage(pszCommand);
- ssSend << a1 << a2 << a3 << a4 << a5 << a6 << a7;
- EndMessage(pszCommand);
- }
- catch (...)
- {
- AbortMessage();
- throw;
- }
- }
-
- template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
- void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5, const T6& a6, const T7& a7, const T8& a8)
- {
- try
- {
- BeginMessage(pszCommand);
- ssSend << a1 << a2 << a3 << a4 << a5 << a6 << a7 << a8;
- EndMessage(pszCommand);
- }
- catch (...)
- {
- AbortMessage();
- throw;
- }
- }
-
- template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
- void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5, const T6& a6, const T7& a7, const T8& a8, const T9& a9)
- {
- try
- {
- BeginMessage(pszCommand);
- ssSend << a1 << a2 << a3 << a4 << a5 << a6 << a7 << a8 << a9;
- EndMessage(pszCommand);
- }
- catch (...)
- {
- AbortMessage();
- throw;
- }
- }
-
void CloseSocketDisconnect();
void copyStats(CNodeStats &stats);
diff --git a/src/netaddress.h b/src/netaddress.h
index 9330fe3328..9dffaa57e7 100644
--- a/src/netaddress.h
+++ b/src/netaddress.h
@@ -85,7 +85,7 @@ class CNetAddr
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(FLATDATA(ip));
}
@@ -122,7 +122,7 @@ class CSubNet
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(network);
READWRITE(FLATDATA(netmask));
READWRITE(FLATDATA(valid));
@@ -159,7 +159,7 @@ class CService : public CNetAddr
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(FLATDATA(ip));
unsigned short portN = htons(port);
READWRITE(FLATDATA(portN));
diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp
index c07cd2eff8..7113390cdf 100644
--- a/src/policy/fees.cpp
+++ b/src/policy/fees.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2015 The Bitcoin developers
+// Copyright (c) 2009-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -14,10 +14,9 @@
#include "util.h"
void TxConfirmStats::Initialize(std::vector<double>& defaultBuckets,
- unsigned int maxConfirms, double _decay, std::string _dataTypeString)
+ unsigned int maxConfirms, double _decay)
{
decay = _decay;
- dataTypeString = _dataTypeString;
for (unsigned int i = 0; i < defaultBuckets.size(); i++) {
buckets.push_back(defaultBuckets[i]);
bucketMap[defaultBuckets[i]] = i;
@@ -87,10 +86,10 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
int maxbucketindex = buckets.size() - 1;
- // requireGreater means we are looking for the lowest fee/priority such that all higher
- // values pass, so we start at maxbucketindex (highest fee) and look at successively
+ // requireGreater means we are looking for the lowest feerate such that all higher
+ // values pass, so we start at maxbucketindex (highest feerate) and look at successively
// smaller buckets until we reach failure. Otherwise, we are looking for the highest
- // fee/priority such that all lower values fail, and we go in the opposite direction.
+ // feerate such that all lower values fail, and we go in the opposite direction.
unsigned int startbucket = requireGreater ? maxbucketindex : 0;
int step = requireGreater ? -1 : 1;
@@ -107,7 +106,7 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
bool foundAnswer = false;
unsigned int bins = unconfTxs.size();
- // Start counting from highest(default) or lowest fee/pri transactions
+ // Start counting from highest(default) or lowest feerate transactions
for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex; bucket += step) {
curFarBucket = bucket;
nConf += confAvg[confTarget - 1][bucket];
@@ -145,8 +144,8 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
double median = -1;
double txSum = 0;
- // Calculate the "average" fee of the best bucket range that met success conditions
- // Find the bucket with the median transaction and then report the average fee from that bucket
+ // Calculate the "average" feerate of the best bucket range that met success conditions
+ // Find the bucket with the median transaction and then report the average feerate from that bucket
// This is a compromise between finding the median which we can't since we don't save all tx's
// and reporting the average which is less accurate
unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket;
@@ -166,8 +165,8 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
}
}
- LogPrint("estimatefee", "%3d: For conf success %s %4.2f need %s %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
- confTarget, requireGreater ? ">" : "<", successBreakPoint, dataTypeString,
+ LogPrint("estimatefee", "%3d: For conf success %s %4.2f need feerate %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
+ confTarget, requireGreater ? ">" : "<", successBreakPoint,
requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket],
100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum);
@@ -200,10 +199,10 @@ void TxConfirmStats::Read(CAutoFile& filein)
filein >> fileBuckets;
numBuckets = fileBuckets.size();
if (numBuckets <= 1 || numBuckets > 1000)
- throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 fee/pri buckets");
+ throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
filein >> fileAvg;
if (fileAvg.size() != numBuckets)
- throw std::runtime_error("Corrupt estimates file. Mismatch in fee/pri average bucket count");
+ throw std::runtime_error("Corrupt estimates file. Mismatch in feerate average bucket count");
filein >> fileTxCtAvg;
if (fileTxCtAvg.size() != numBuckets)
throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count");
@@ -213,9 +212,9 @@ void TxConfirmStats::Read(CAutoFile& filein)
throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms");
for (unsigned int i = 0; i < maxConfirms; i++) {
if (fileConfAvg[i].size() != numBuckets)
- throw std::runtime_error("Corrupt estimates file. Mismatch in fee/pri conf average bucket count");
+ throw std::runtime_error("Corrupt estimates file. Mismatch in feerate conf average bucket count");
}
- // Now that we've processed the entire fee estimate data file and not
+ // Now that we've processed the entire feerate estimate data file and not
// thrown any errors, we can copy it to our data structures
decay = fileDecay;
buckets = fileBuckets;
@@ -242,8 +241,8 @@ void TxConfirmStats::Read(CAutoFile& filein)
for (unsigned int i = 0; i < buckets.size(); i++)
bucketMap[buckets[i]] = i;
- LogPrint("estimatefee", "Reading estimates: %u %s buckets counting confirms up to %u blocks\n",
- numBuckets, dataTypeString, maxConfirms);
+ LogPrint("estimatefee", "Reading estimates: %u buckets counting confirms up to %u blocks\n",
+ numBuckets, maxConfirms);
}
unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val)
@@ -251,7 +250,6 @@ unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val)
unsigned int bucketindex = bucketMap.lower_bound(val)->second;
unsigned int blockIndex = nBlockHeight % unconfTxs.size();
unconfTxs[blockIndex][bucketindex]++;
- LogPrint("estimatefee", "adding to %s", dataTypeString);
return bucketindex;
}
@@ -291,12 +289,10 @@ void CBlockPolicyEstimator::removeTx(uint256 hash)
hash.ToString().c_str());
return;
}
- TxConfirmStats *stats = pos->second.stats;
unsigned int entryHeight = pos->second.blockHeight;
unsigned int bucketIndex = pos->second.bucketIndex;
- if (stats != NULL)
- stats->removeTx(entryHeight, nBestSeenHeight, bucketIndex);
+ feeStats.removeTx(entryHeight, nBestSeenHeight, bucketIndex);
mapMemPoolTxs.erase(hash);
}
@@ -309,45 +305,14 @@ CBlockPolicyEstimator::CBlockPolicyEstimator(const CFeeRate& _minRelayFee)
vfeelist.push_back(bucketBoundary);
}
vfeelist.push_back(INF_FEERATE);
- feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY, "FeeRate");
-
- minTrackedPriority = AllowFreeThreshold() < MIN_PRIORITY ? MIN_PRIORITY : AllowFreeThreshold();
- std::vector<double> vprilist;
- for (double bucketBoundary = minTrackedPriority; bucketBoundary <= MAX_PRIORITY; bucketBoundary *= PRI_SPACING) {
- vprilist.push_back(bucketBoundary);
- }
- vprilist.push_back(INF_PRIORITY);
- priStats.Initialize(vprilist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY, "Priority");
-
- feeUnlikely = CFeeRate(0);
- feeLikely = CFeeRate(INF_FEERATE);
- priUnlikely = 0;
- priLikely = INF_PRIORITY;
-}
-
-bool CBlockPolicyEstimator::isFeeDataPoint(const CFeeRate &fee, double pri)
-{
- if ((pri < minTrackedPriority && fee >= minTrackedFee) ||
- (pri < priUnlikely && fee > feeLikely)) {
- return true;
- }
- return false;
-}
-
-bool CBlockPolicyEstimator::isPriDataPoint(const CFeeRate &fee, double pri)
-{
- if ((fee < minTrackedFee && pri >= minTrackedPriority) ||
- (fee < feeUnlikely && pri > priLikely)) {
- return true;
- }
- return false;
+ feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY);
}
void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, bool fCurrentEstimate)
{
unsigned int txHeight = entry.GetHeight();
uint256 hash = entry.GetTx().GetHash();
- if (mapMemPoolTxs[hash].stats != NULL) {
+ if (mapMemPoolTxs.count(hash)) {
LogPrint("estimatefee", "Blockpolicy error mempool tx %s already being tracked\n",
hash.ToString().c_str());
return;
@@ -371,30 +336,11 @@ void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, boo
return;
}
- // Fees are stored and reported as BTC-per-kb:
+ // Feerates are stored and reported as BTC-per-kb:
CFeeRate feeRate(entry.GetFee(), entry.GetTxSize());
- // Want the priority of the tx at confirmation. However we don't know
- // what that will be and its too hard to continue updating it
- // so use starting priority as a proxy
- double curPri = entry.GetPriority(txHeight);
mapMemPoolTxs[hash].blockHeight = txHeight;
-
- LogPrint("estimatefee", "Blockpolicy mempool tx %s ", hash.ToString().substr(0,10));
- // Record this as a priority estimate
- if (entry.GetFee() == 0 || isPriDataPoint(feeRate, curPri)) {
- mapMemPoolTxs[hash].stats = &priStats;
- mapMemPoolTxs[hash].bucketIndex = priStats.NewTx(txHeight, curPri);
- }
- // Record this as a fee estimate
- else if (isFeeDataPoint(feeRate, curPri)) {
- mapMemPoolTxs[hash].stats = &feeStats;
- mapMemPoolTxs[hash].bucketIndex = feeStats.NewTx(txHeight, (double)feeRate.GetFeePerK());
- }
- else {
- LogPrint("estimatefee", "not adding");
- }
- LogPrint("estimatefee", "\n");
+ mapMemPoolTxs[hash].bucketIndex = feeStats.NewTx(txHeight, (double)feeRate.GetFeePerK());
}
void CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry& entry)
@@ -417,21 +363,10 @@ void CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxM
return;
}
- // Fees are stored and reported as BTC-per-kb:
+ // Feerates are stored and reported as BTC-per-kb:
CFeeRate feeRate(entry.GetFee(), entry.GetTxSize());
- // Want the priority of the tx at confirmation. The priority when it
- // entered the mempool could easily be very small and change quickly
- double curPri = entry.GetPriority(nBlockHeight);
-
- // Record this as a priority estimate
- if (entry.GetFee() == 0 || isPriDataPoint(feeRate, curPri)) {
- priStats.Record(blocksToConfirm, curPri);
- }
- // Record this as a fee estimate
- else if (isFeeDataPoint(feeRate, curPri)) {
- feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK());
- }
+ feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK());
}
void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
@@ -452,41 +387,15 @@ void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
if (!fCurrentEstimate)
return;
- // Update the dynamic cutoffs
- // a fee/priority is "likely" the reason your tx was included in a block if >85% of such tx's
- // were confirmed in 2 blocks and is "unlikely" if <50% were confirmed in 10 blocks
- LogPrint("estimatefee", "Blockpolicy recalculating dynamic cutoffs:\n");
- priLikely = priStats.EstimateMedianVal(2, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBlockHeight);
- if (priLikely == -1)
- priLikely = INF_PRIORITY;
-
- double feeLikelyEst = feeStats.EstimateMedianVal(2, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBlockHeight);
- if (feeLikelyEst == -1)
- feeLikely = CFeeRate(INF_FEERATE);
- else
- feeLikely = CFeeRate(feeLikelyEst);
-
- priUnlikely = priStats.EstimateMedianVal(10, SUFFICIENT_PRITXS, UNLIKELY_PCT, false, nBlockHeight);
- if (priUnlikely == -1)
- priUnlikely = 0;
-
- double feeUnlikelyEst = feeStats.EstimateMedianVal(10, SUFFICIENT_FEETXS, UNLIKELY_PCT, false, nBlockHeight);
- if (feeUnlikelyEst == -1)
- feeUnlikely = CFeeRate(0);
- else
- feeUnlikely = CFeeRate(feeUnlikelyEst);
-
- // Clear the current block states
+ // Clear the current block state
feeStats.ClearCurrent(nBlockHeight);
- priStats.ClearCurrent(nBlockHeight);
// Repopulate the current block states
for (unsigned int i = 0; i < entries.size(); i++)
processBlockTx(nBlockHeight, entries[i]);
- // Update all exponential averages with the current block states
+ // Update all exponential averages with the current block state
feeStats.UpdateMovingAverages();
- priStats.UpdateMovingAverages();
LogPrint("estimatefee", "Blockpolicy after updating estimates for %u confirmed entries, new mempool map size %u\n",
entries.size(), mapMemPoolTxs.size());
@@ -522,7 +431,7 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoun
if (answerFoundAtTarget)
*answerFoundAtTarget = confTarget - 1;
- // If mempool is limiting txs , return at least the min fee from the mempool
+ // If mempool is limiting txs , return at least the min feerate from the mempool
CAmount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
if (minPoolFee > 0 && minPoolFee > median)
return CFeeRate(minPoolFee);
@@ -535,51 +444,38 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoun
double CBlockPolicyEstimator::estimatePriority(int confTarget)
{
- // Return failure if trying to analyze a target we're not tracking
- if (confTarget <= 0 || (unsigned int)confTarget > priStats.GetMaxConfirms())
- return -1;
-
- return priStats.EstimateMedianVal(confTarget, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
+ return -1;
}
double CBlockPolicyEstimator::estimateSmartPriority(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool)
{
if (answerFoundAtTarget)
*answerFoundAtTarget = confTarget;
- // Return failure if trying to analyze a target we're not tracking
- if (confTarget <= 0 || (unsigned int)confTarget > priStats.GetMaxConfirms())
- return -1;
// If mempool is limiting txs, no priority txs are allowed
CAmount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
if (minPoolFee > 0)
return INF_PRIORITY;
- double median = -1;
- while (median < 0 && (unsigned int)confTarget <= priStats.GetMaxConfirms()) {
- median = priStats.EstimateMedianVal(confTarget++, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
- }
-
- if (answerFoundAtTarget)
- *answerFoundAtTarget = confTarget - 1;
-
- return median;
+ return -1;
}
void CBlockPolicyEstimator::Write(CAutoFile& fileout)
{
fileout << nBestSeenHeight;
feeStats.Write(fileout);
- priStats.Write(fileout);
}
-void CBlockPolicyEstimator::Read(CAutoFile& filein)
+void CBlockPolicyEstimator::Read(CAutoFile& filein, int nFileVersion)
{
int nFileBestSeenHeight;
filein >> nFileBestSeenHeight;
feeStats.Read(filein);
- priStats.Read(filein);
nBestSeenHeight = nFileBestSeenHeight;
+ if (nFileVersion < 139900) {
+ TxConfirmStats priStats;
+ priStats.Read(filein);
+ }
}
FeeFilterRounder::FeeFilterRounder(const CFeeRate& minIncrementalFee)
diff --git a/src/policy/fees.h b/src/policy/fees.h
index 2c1ac3b934..ea4c70e616 100644
--- a/src/policy/fees.h
+++ b/src/policy/fees.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2015 The Bitcoin developers
+// Copyright (c) 2009-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_POLICYESTIMATOR_H
@@ -19,60 +19,50 @@ class CTxMemPoolEntry;
class CTxMemPool;
/** \class CBlockPolicyEstimator
- * The BlockPolicyEstimator is used for estimating the fee or priority needed
+ * The BlockPolicyEstimator is used for estimating the feerate needed
* for a transaction to be included in a block within a certain number of
* blocks.
*
* At a high level the algorithm works by grouping transactions into buckets
- * based on having similar priorities or fees and then tracking how long it
+ * based on having similar feerates and then tracking how long it
* takes transactions in the various buckets to be mined. It operates under
- * the assumption that in general transactions of higher fee/priority will be
- * included in blocks before transactions of lower fee/priority. So for
- * example if you wanted to know what fee you should put on a transaction to
+ * the assumption that in general transactions of higher feerate will be
+ * included in blocks before transactions of lower feerate. So for
+ * example if you wanted to know what feerate you should put on a transaction to
* be included in a block within the next 5 blocks, you would start by looking
- * at the bucket with the highest fee transactions and verifying that a
+ * at the bucket with the highest feerate transactions and verifying that a
* sufficiently high percentage of them were confirmed within 5 blocks and
- * then you would look at the next highest fee bucket, and so on, stopping at
- * the last bucket to pass the test. The average fee of transactions in this
- * bucket will give you an indication of the lowest fee you can put on a
+ * then you would look at the next highest feerate bucket, and so on, stopping at
+ * the last bucket to pass the test. The average feerate of transactions in this
+ * bucket will give you an indication of the lowest feerate you can put on a
* transaction and still have a sufficiently high chance of being confirmed
* within your desired 5 blocks.
*
- * When a transaction enters the mempool or is included within a block we
- * decide whether it can be used as a data point for fee estimation, priority
- * estimation or neither. If the value of exactly one of those properties was
- * below the required minimum it can be used to estimate the other. In
- * addition, if a priori our estimation code would indicate that the
- * transaction would be much more quickly included in a block because of one
- * of the properties compared to the other, we can also decide to use it as
- * an estimate for that property.
- *
- * Here is a brief description of the implementation for fee estimation.
- * When a transaction that counts for fee estimation enters the mempool, we
+ * Here is a brief description of the implementation:
+ * When a transaction enters the mempool, we
* track the height of the block chain at entry. Whenever a block comes in,
- * we count the number of transactions in each bucket and the total amount of fee
+ * we count the number of transactions in each bucket and the total amount of feerate
* paid in each bucket. Then we calculate how many blocks Y it took each
* transaction to be mined and we track an array of counters in each bucket
* for how long it to took transactions to get confirmed from 1 to a max of 25
* and we increment all the counters from Y up to 25. This is because for any
* number Z>=Y the transaction was successfully mined within Z blocks. We
* want to save a history of this information, so at any time we have a
- * counter of the total number of transactions that happened in a given fee
+ * counter of the total number of transactions that happened in a given feerate
* bucket and the total number that were confirmed in each number 1-25 blocks
* or less for any bucket. We save this history by keeping an exponentially
* decaying moving average of each one of these stats. Furthermore we also
* keep track of the number unmined (in mempool) transactions in each bucket
* and for how many blocks they have been outstanding and use that to increase
- * the number of transactions we've seen in that fee bucket when calculating
+ * the number of transactions we've seen in that feerate bucket when calculating
* an estimate for any number of confirmations below the number of blocks
* they've been outstanding.
*/
/**
- * We will instantiate two instances of this class, one to track transactions
- * that were included in a block due to fee, and one for tx's included due to
- * priority. We will lump transactions into a bucket according to their approximate
- * fee or priority and then track how long it took for those txs to be included in a block
+ * We will instantiate an instance of this class to track transactions that were
+ * included in a block. We will lump transactions into a bucket according to their
+ * approximate feerate and then track how long it took for those txs to be included in a block
*
* The tracking of unconfirmed (mempool) transactions is completely independent of the
* historical tracking of transactions that have been confirmed in a block.
@@ -80,7 +70,7 @@ class CTxMemPool;
class TxConfirmStats
{
private:
- //Define the buckets we will group transactions into (both fee buckets and priority buckets)
+ //Define the buckets we will group transactions into
std::vector<double> buckets; // The upper-bound of the range for the bucket (inclusive)
std::map<double, unsigned int> bucketMap; // Map of bucket upper-bound to index into all vectors by bucket
@@ -97,16 +87,15 @@ private:
// and calculate the totals for the current block to update the moving averages
std::vector<std::vector<int> > curBlockConf; // curBlockConf[Y][X]
- // Sum the total priority/fee of all tx's in each bucket
+ // Sum the total feerate of all tx's in each bucket
// Track the historical moving average of this total over blocks
std::vector<double> avg;
// and calculate the total for the current block to update the moving average
std::vector<double> curBlockVal;
// Combine the conf counts with tx counts to calculate the confirmation % for each Y,X
- // Combine the total value with the tx counts to calculate the avg fee/priority per bucket
+ // Combine the total value with the tx counts to calculate the avg feerate per bucket
- std::string dataTypeString;
double decay;
// Mempool counts of outstanding transactions
@@ -123,9 +112,8 @@ public:
* @param defaultBuckets contains the upper limits for the bucket boundaries
* @param maxConfirms max number of confirms to track
* @param decay how much to decay the historical moving average per block
- * @param dataTypeString for logging purposes
*/
- void Initialize(std::vector<double>& defaultBuckets, unsigned int maxConfirms, double decay, std::string dataTypeString);
+ void Initialize(std::vector<double>& defaultBuckets, unsigned int maxConfirms, double decay);
/** Clear the state of the curBlock variables to start counting for the new block */
void ClearCurrent(unsigned int nBlockHeight);
@@ -133,7 +121,7 @@ public:
/**
* Record a new transaction data point in the current block stats
* @param blocksToConfirm the number of blocks it took this transaction to confirm
- * @param val either the fee or the priority when entered of the transaction
+ * @param val the feerate of the transaction
* @warning blocksToConfirm is 1-based and has to be >= 1
*/
void Record(int blocksToConfirm, double val);
@@ -150,14 +138,14 @@ public:
void UpdateMovingAverages();
/**
- * Calculate a fee or priority estimate. Find the lowest value bucket (or range of buckets
+ * Calculate a feerate estimate. Find the lowest value bucket (or range of buckets
* to make sure we have enough data points) whose transactions still have sufficient likelihood
* of being confirmed within the target number of confirmations
* @param confTarget target number of confirmations
* @param sufficientTxVal required average number of transactions per block in a bucket range
* @param minSuccess the success probability we require
- * @param requireGreater return the lowest fee/pri such that all higher values pass minSuccess OR
- * return the highest fee/pri such that all lower values fail minSuccess
+ * @param requireGreater return the lowest feerate such that all higher values pass minSuccess OR
+ * return the highest feerate such that all lower values fail minSuccess
* @param nBlockHeight the current block height
*/
double EstimateMedianVal(int confTarget, double sufficientTxVal,
@@ -184,35 +172,26 @@ static const unsigned int MAX_BLOCK_CONFIRMS = 25;
/** Decay of .998 is a half-life of 346 blocks or about 2.4 days */
static const double DEFAULT_DECAY = .998;
-/** Require greater than 95% of X fee transactions to be confirmed within Y blocks for X to be big enough */
+/** Require greater than 95% of X feerate transactions to be confirmed within Y blocks for X to be big enough */
static const double MIN_SUCCESS_PCT = .95;
-static const double UNLIKELY_PCT = .5;
-/** Require an avg of 1 tx in the combined fee bucket per block to have stat significance */
+/** Require an avg of 1 tx in the combined feerate bucket per block to have stat significance */
static const double SUFFICIENT_FEETXS = 1;
-/** Require only an avg of 1 tx every 5 blocks in the combined pri bucket (way less pri txs) */
-static const double SUFFICIENT_PRITXS = .2;
-
-// Minimum and Maximum values for tracking fees and priorities
+// Minimum and Maximum values for tracking feerates
static const double MIN_FEERATE = 10;
static const double MAX_FEERATE = 1e7;
static const double INF_FEERATE = MAX_MONEY;
-static const double MIN_PRIORITY = 10;
-static const double MAX_PRIORITY = 1e16;
static const double INF_PRIORITY = 1e9 * MAX_MONEY;
-// We have to lump transactions into buckets based on fee or priority, but we want to be able
-// to give accurate estimates over a large range of potential fees and priorities
+// We have to lump transactions into buckets based on feerate, but we want to be able
+// to give accurate estimates over a large range of potential feerates
// Therefore it makes sense to exponentially space the buckets
/** Spacing of FeeRate buckets */
static const double FEE_SPACING = 1.1;
-/** Spacing of Priority buckets */
-static const double PRI_SPACING = 2;
-
/**
- * We want to be able to estimate fees or priorities that are needed on tx's to be included in
+ * We want to be able to estimate feerates that are needed on tx's to be included in
* a certain number of blocks. Every time a block is added to the best chain, this class records
* stats on the transactions included in that block
*/
@@ -235,27 +214,26 @@ public:
/** Remove a transaction from the mempool tracking stats*/
void removeTx(uint256 hash);
- /** Is this transaction likely included in a block because of its fee?*/
- bool isFeeDataPoint(const CFeeRate &fee, double pri);
-
- /** Is this transaction likely included in a block because of its priority?*/
- bool isPriDataPoint(const CFeeRate &fee, double pri);
-
- /** Return a fee estimate */
+ /** Return a feerate estimate */
CFeeRate estimateFee(int confTarget);
- /** Estimate fee rate needed to get be included in a block within
+ /** Estimate feerate needed to get be included in a block within
* confTarget blocks. If no answer can be given at confTarget, return an
* estimate at the lowest target where one can be given.
*/
CFeeRate estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool);
- /** Return a priority estimate */
+ /** Return a priority estimate.
+ * DEPRECATED
+ * Returns -1
+ */
double estimatePriority(int confTarget);
/** Estimate priority needed to get be included in a block within
- * confTarget blocks. If no answer can be given at confTarget, return an
- * estimate at the lowest target where one can be given.
+ * confTarget blocks.
+ * DEPRECATED
+ * Returns -1 unless mempool is currently limited then returns INF_PRIORITY
+ * answerFoundAtTarget is set to confTarget
*/
double estimateSmartPriority(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool);
@@ -263,29 +241,23 @@ public:
void Write(CAutoFile& fileout);
/** Read estimation data from a file */
- void Read(CAutoFile& filein);
+ void Read(CAutoFile& filein, int nFileVersion);
private:
CFeeRate minTrackedFee; //!< Passed to constructor to avoid dependency on main
- double minTrackedPriority; //!< Set to AllowFreeThreshold
unsigned int nBestSeenHeight;
struct TxStatsInfo
{
- TxConfirmStats *stats;
unsigned int blockHeight;
unsigned int bucketIndex;
- TxStatsInfo() : stats(NULL), blockHeight(0), bucketIndex(0) {}
+ TxStatsInfo() : blockHeight(0), bucketIndex(0) {}
};
// map of txids to information about that transaction
std::map<uint256, TxStatsInfo> mapMemPoolTxs;
/** Classes to track historical data on transaction confirmations */
- TxConfirmStats feeStats, priStats;
-
- /** Breakpoints to help determine whether a transaction was confirmed by priority or Fee */
- CFeeRate feeLikely, feeUnlikely;
- double priLikely, priUnlikely;
+ TxConfirmStats feeStats;
};
class FeeFilterRounder
diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp
index ae42b2bd74..a3eee474ab 100644
--- a/src/policy/policy.cpp
+++ b/src/policy/policy.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2016 The Bitcoin developers
+// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/policy/policy.h b/src/policy/policy.h
index 814e6c0b6f..764ee27806 100644
--- a/src/policy/policy.h
+++ b/src/policy/policy.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2016 The Bitcoin developers
+// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/policy/rbf.cpp b/src/policy/rbf.cpp
index 133cff611d..d9b47e71bb 100644
--- a/src/policy/rbf.cpp
+++ b/src/policy/rbf.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 The Bitcoin developers
+// Copyright (c) 2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/policy/rbf.h b/src/policy/rbf.h
index 5a711dba07..139aec5760 100644
--- a/src/policy/rbf.h
+++ b/src/policy/rbf.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 The Bitcoin developers
+// Copyright (c) 2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/primitives/block.h b/src/primitives/block.h
index 72dfed985a..d148aec1e0 100644
--- a/src/primitives/block.h
+++ b/src/primitives/block.h
@@ -36,7 +36,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(this->nVersion);
READWRITE(hashPrevBlock);
READWRITE(hashMerkleRoot);
@@ -92,7 +92,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(*(CBlockHeader*)this);
READWRITE(vtx);
}
@@ -137,8 +137,9 @@ struct CBlockLocator
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
- if (!(nType & SER_GETHASH))
+ inline void SerializationOp(Stream& s, Operation ser_action) {
+ int nVersion = s.GetVersion();
+ if (!(s.GetType() & SER_GETHASH))
READWRITE(nVersion);
READWRITE(vHave);
}
diff --git a/src/primitives/transaction.cpp b/src/primitives/transaction.cpp
index 4afbe99fd3..7acdac17f2 100644
--- a/src/primitives/transaction.cpp
+++ b/src/primitives/transaction.cpp
@@ -49,11 +49,6 @@ CTxOut::CTxOut(const CAmount& nValueIn, CScript scriptPubKeyIn)
scriptPubKey = scriptPubKeyIn;
}
-uint256 CTxOut::GetHash() const
-{
- return SerializeHash(*this);
-}
-
std::string CTxOut::ToString() const
{
return strprintf("CTxOut(nValue=%d.%08d, scriptPubKey=%s)", nValue / COIN, nValue % COIN, HexStr(scriptPubKey).substr(0, 30));
diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h
index 16c2e5c454..1d176e5d8c 100644
--- a/src/primitives/transaction.h
+++ b/src/primitives/transaction.h
@@ -28,7 +28,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(hash);
READWRITE(n);
}
@@ -104,7 +104,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(prevout);
READWRITE(*(CScriptBase*)(&scriptSig));
READWRITE(nSequence);
@@ -144,7 +144,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(nValue);
READWRITE(*(CScriptBase*)(&scriptPubKey));
}
@@ -160,8 +160,6 @@ public:
return (nValue == -1);
}
- uint256 GetHash() const;
-
CAmount GetDustThreshold(const CFeeRate &minRelayTxFee) const
{
// "Dust" is defined in terms of CTransaction::minRelayTxFee,
@@ -179,7 +177,7 @@ public:
if (scriptPubKey.IsUnspendable())
return 0;
- size_t nSize = GetSerializeSize(SER_DISK, 0);
+ size_t nSize = GetSerializeSize(*this, SER_DISK, 0);
int witnessversion = 0;
std::vector<unsigned char> witnessprogram;
@@ -221,7 +219,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion)
+ inline void SerializationOp(Stream& s, Operation ser_action)
{
READWRITE(scriptWitness.stack);
}
@@ -257,7 +255,7 @@ public:
}
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion)
+ inline void SerializationOp(Stream& s, Operation ser_action)
{
for (size_t n = 0; n < vtxinwit.size(); n++) {
READWRITE(vtxinwit[n]);
@@ -289,8 +287,8 @@ struct CMutableTransaction;
* - uint32_t nLockTime
*/
template<typename Stream, typename Operation, typename TxType>
-inline void SerializeTransaction(TxType& tx, Stream& s, Operation ser_action, int nType, int nVersion) {
- const bool fAllowWitness = !(nVersion & SERIALIZE_TRANSACTION_NO_WITNESS);
+inline void SerializeTransaction(TxType& tx, Stream& s, Operation ser_action) {
+ const bool fAllowWitness = !(s.GetVersion() & SERIALIZE_TRANSACTION_NO_WITNESS);
READWRITE(*const_cast<int32_t*>(&tx.nVersion));
unsigned char flags = 0;
@@ -387,8 +385,8 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
- SerializeTransaction(*this, s, ser_action, nType, nVersion);
+ inline void SerializationOp(Stream& s, Operation ser_action) {
+ SerializeTransaction(*this, s, ser_action);
if (ser_action.ForRead()) {
UpdateHash();
}
@@ -458,8 +456,8 @@ struct CMutableTransaction
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
- SerializeTransaction(*this, s, ser_action, nType, nVersion);
+ inline void SerializationOp(Stream& s, Operation ser_action) {
+ SerializeTransaction(*this, s, ser_action);
}
/** Compute the hash of this CMutableTransaction. This is computed on the
diff --git a/src/protocol.cpp b/src/protocol.cpp
index 54ad62b1a2..87d6e06848 100644
--- a/src/protocol.cpp
+++ b/src/protocol.cpp
@@ -181,7 +181,11 @@ std::string CInv::GetCommand() const
std::string CInv::ToString() const
{
- return strprintf("%s %s", GetCommand(), hash.ToString());
+ try {
+ return strprintf("%s %s", GetCommand(), hash.ToString());
+ } catch(const std::out_of_range &) {
+ return strprintf("0x%08x %s", type, hash.ToString());
+ }
}
const std::vector<std::string> &getAllNetMessageTypes()
diff --git a/src/protocol.h b/src/protocol.h
index d19e0d3a5e..a52d9a67b0 100644
--- a/src/protocol.h
+++ b/src/protocol.h
@@ -48,7 +48,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion)
+ inline void SerializationOp(Stream& s, Operation ser_action)
{
READWRITE(FLATDATA(pchMessageStart));
READWRITE(FLATDATA(pchCommand));
@@ -289,14 +289,15 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion)
+ inline void SerializationOp(Stream& s, Operation ser_action)
{
if (ser_action.ForRead())
Init();
- if (nType & SER_DISK)
+ int nVersion = s.GetVersion();
+ if (s.GetType() & SER_DISK)
READWRITE(nVersion);
- if ((nType & SER_DISK) ||
- (nVersion >= CADDR_TIME_VERSION && !(nType & SER_GETHASH)))
+ if ((s.GetType() & SER_DISK) ||
+ (nVersion >= CADDR_TIME_VERSION && !(s.GetType() & SER_GETHASH)))
READWRITE(nTime);
uint64_t nServicesInt = nServices;
READWRITE(nServicesInt);
@@ -343,7 +344,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion)
+ inline void SerializationOp(Stream& s, Operation ser_action)
{
READWRITE(type);
READWRITE(hash);
diff --git a/src/pubkey.h b/src/pubkey.h
index 3a554877f8..9499862210 100644
--- a/src/pubkey.h
+++ b/src/pubkey.h
@@ -116,19 +116,15 @@ public:
}
//! Implement serialization, as if this was a byte vector.
- unsigned int GetSerializeSize(int nType, int nVersion) const
- {
- return size() + 1;
- }
template <typename Stream>
- void Serialize(Stream& s, int nType, int nVersion) const
+ void Serialize(Stream& s) const
{
unsigned int len = size();
::WriteCompactSize(s, len);
s.write((char*)vch, len);
}
template <typename Stream>
- void Unserialize(Stream& s, int nType, int nVersion)
+ void Unserialize(Stream& s)
{
unsigned int len = ::ReadCompactSize(s);
if (len <= 65) {
@@ -214,12 +210,13 @@ struct CExtPubKey {
void Decode(const unsigned char code[BIP32_EXTKEY_SIZE]);
bool Derive(CExtPubKey& out, unsigned int nChild) const;
- unsigned int GetSerializeSize(int nType, int nVersion) const
+ void Serialize(CSizeComputer& s) const
{
- return BIP32_EXTKEY_SIZE+1; //add one byte for the size (compact int)
+ // Optimized implementation for ::GetSerializeSize that avoids copying.
+ s.seek(BIP32_EXTKEY_SIZE + 1); // add one byte for the size (compact int)
}
template <typename Stream>
- void Serialize(Stream& s, int nType, int nVersion) const
+ void Serialize(Stream& s) const
{
unsigned int len = BIP32_EXTKEY_SIZE;
::WriteCompactSize(s, len);
@@ -228,7 +225,7 @@ struct CExtPubKey {
s.write((const char *)&code[0], len);
}
template <typename Stream>
- void Unserialize(Stream& s, int nType, int nVersion)
+ void Unserialize(Stream& s)
{
unsigned int len = ::ReadCompactSize(s);
unsigned char code[BIP32_EXTKEY_SIZE];
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index 9986af4957..c828234f44 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -496,7 +496,7 @@ void BitcoinApplication::shutdownResult(int retval)
void BitcoinApplication::handleRunawayException(const QString &message)
{
QMessageBox::critical(0, "Runaway exception", BitcoinGUI::tr("A fatal error occurred. Bitcoin can no longer continue safely and will quit.") + QString("\n\n") + message);
- ::exit(1);
+ ::exit(EXIT_FAILURE);
}
WId BitcoinApplication::getMainWinId() const
@@ -573,13 +573,13 @@ int main(int argc, char *argv[])
{
HelpMessageDialog help(NULL, mapArgs.count("-version"));
help.showOrPrint();
- return 1;
+ return EXIT_SUCCESS;
}
/// 5. Now that settings and translations are available, ask user for data directory
// User language is set up: pick a data directory
if (!Intro::pickDataDirectory())
- return 0;
+ return EXIT_SUCCESS;
/// 6. Determine availability of data directory and parse bitcoin.conf
/// - Do not call GetDataDir(true) before this step finishes
@@ -587,14 +587,14 @@ int main(int argc, char *argv[])
{
QMessageBox::critical(0, QObject::tr(PACKAGE_NAME),
QObject::tr("Error: Specified data directory \"%1\" does not exist.").arg(QString::fromStdString(mapArgs["-datadir"])));
- return 1;
+ return EXIT_FAILURE;
}
try {
ReadConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME), mapArgs, mapMultiArgs);
} catch (const std::exception& e) {
QMessageBox::critical(0, QObject::tr(PACKAGE_NAME),
QObject::tr("Error: Cannot parse configuration file: %1. Only use key=value syntax.").arg(e.what()));
- return false;
+ return EXIT_FAILURE;
}
/// 7. Determine network (and switch to network specific options)
@@ -608,7 +608,7 @@ int main(int argc, char *argv[])
SelectParams(ChainNameFromCommandLine());
} catch(std::exception &e) {
QMessageBox::critical(0, QObject::tr(PACKAGE_NAME), QObject::tr("Error: %1").arg(e.what()));
- return 1;
+ return EXIT_FAILURE;
}
#ifdef ENABLE_WALLET
// Parse URIs on command line -- this can affect Params()
@@ -630,7 +630,7 @@ int main(int argc, char *argv[])
// - Do this after creating app and setting up translations, so errors are
// translated properly.
if (PaymentServer::ipcSendCommandLine())
- exit(0);
+ exit(EXIT_SUCCESS);
// Start up the payment server early, too, so impatient users that click on
// bitcoin: links repeatedly have their payment requests routed to this process:
diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp
index 87704c641d..f9caca6878 100644
--- a/src/qt/clientmodel.cpp
+++ b/src/qt/clientmodel.cpp
@@ -6,6 +6,7 @@
#include "bantablemodel.h"
#include "guiconstants.h"
+#include "guiutil.h"
#include "peertablemodel.h"
#include "chainparams.h"
@@ -208,7 +209,7 @@ QString ClientModel::formatClientStartupTime() const
QString ClientModel::dataDir() const
{
- return QString::fromStdString(GetDataDir().string());
+ return GUIUtil::boostPathToQString(GetDataDir());
}
void ClientModel::updateBanlist()
diff --git a/src/qt/forms/modaloverlay.ui b/src/qt/forms/modaloverlay.ui
index 27998f90c5..a37672ad53 100644
--- a/src/qt/forms/modaloverlay.ui
+++ b/src/qt/forms/modaloverlay.ui
@@ -130,7 +130,7 @@ QLabel { color: rgb(40,40,40); }</string>
<item>
<widget class="QLabel" name="infoText">
<property name="text">
- <string>The displayed information may be out of date. Your wallet automatically synchronizes with the Bitcoin network after a connection is established, but this process has not completed yet. This means that recent transactions will not be visible, and the balance will not be up-to-date until this process has completed.</string>
+ <string>Recent transactions may not yet be visible, and therefore your wallet's balance might be incorrect. This information will be correct once your wallet has finished synchronizing with the bitcoin network, as detailed below.</string>
</property>
<property name="textFormat">
<enum>Qt::RichText</enum>
@@ -149,7 +149,7 @@ QLabel { color: rgb(40,40,40); }</string>
</font>
</property>
<property name="text">
- <string>Spending bitcoins may not be possible during that phase!</string>
+ <string>Attempting to spend bitcoins that are affected by not-yet-displayed transactions will not be accepted by the network.</string>
</property>
<property name="textFormat">
<enum>Qt::RichText</enum>
diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp
index 42dafa1175..9dc75c2e1a 100644
--- a/src/qt/guiutil.cpp
+++ b/src/qt/guiutil.cpp
@@ -291,17 +291,11 @@ void copyEntryData(QAbstractItemView *view, int column, int role)
}
}
-QVariant getEntryData(QAbstractItemView *view, int column, int role)
+QList<QModelIndex> getEntryData(QAbstractItemView *view, int column)
{
if(!view || !view->selectionModel())
- return QVariant();
- QModelIndexList selection = view->selectionModel()->selectedRows(column);
-
- if(!selection.isEmpty()) {
- // Return first item
- return (selection.at(0).data(role));
- }
- return QVariant();
+ return QList<QModelIndex>();
+ return view->selectionModel()->selectedRows(column);
}
QString getSaveFileName(QWidget *parent, const QString &caption, const QString &dir,
diff --git a/src/qt/guiutil.h b/src/qt/guiutil.h
index e28f68930f..64cbd51eb6 100644
--- a/src/qt/guiutil.h
+++ b/src/qt/guiutil.h
@@ -67,10 +67,9 @@ namespace GUIUtil
/** Return a field of the currently selected entry as a QString. Does nothing if nothing
is selected.
@param[in] column Data column to extract from the model
- @param[in] role Data role to extract from the model
@see TransactionView::copyLabel, TransactionView::copyAmount, TransactionView::copyAddress
*/
- QVariant getEntryData(QAbstractItemView *view, int column, int role);
+ QList<QModelIndex> getEntryData(QAbstractItemView *view, int column);
void setClipboard(const QString& str);
diff --git a/src/qt/paymentrequestplus.h b/src/qt/paymentrequestplus.h
index a73fe5f29d..a8bfcd2ac4 100644
--- a/src/qt/paymentrequestplus.h
+++ b/src/qt/paymentrequestplus.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2015 The Bitcoin developers
+// Copyright (c) 2011-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/qt/paymentserver.cpp b/src/qt/paymentserver.cpp
index 9f23e77a13..478f5ccf12 100644
--- a/src/qt/paymentserver.cpp
+++ b/src/qt/paymentserver.cpp
@@ -80,7 +80,7 @@ static QString ipcServerName()
// Append a simple hash of the datadir
// Note that GetDataDir(true) returns a different path
// for -testnet versus main net
- QString ddir(QString::fromStdString(GetDataDir(true).string()));
+ QString ddir(GUIUtil::boostPathToQString(GetDataDir(true)));
name.append(QString::number(qHash(ddir)));
return name;
diff --git a/src/qt/recentrequeststablemodel.h b/src/qt/recentrequeststablemodel.h
index 0193e748d7..8ee2c9cbac 100644
--- a/src/qt/recentrequeststablemodel.h
+++ b/src/qt/recentrequeststablemodel.h
@@ -27,7 +27,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
unsigned int nDate = date.toTime_t();
READWRITE(this->nVersion);
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index f10dddf589..a9fef731e1 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -343,7 +343,6 @@ RPCConsole::RPCConsole(const PlatformStyle *_platformStyle, QWidget *parent) :
ui(new Ui::RPCConsole),
clientModel(0),
historyPtr(0),
- cachedNodeid(-1),
platformStyle(_platformStyle),
peersTableContextMenu(0),
banTableContextMenu(0),
@@ -469,7 +468,7 @@ void RPCConsole::setClientModel(ClientModel *model)
ui->peerWidget->verticalHeader()->hide();
ui->peerWidget->setEditTriggers(QAbstractItemView::NoEditTriggers);
ui->peerWidget->setSelectionBehavior(QAbstractItemView::SelectRows);
- ui->peerWidget->setSelectionMode(QAbstractItemView::SingleSelection);
+ ui->peerWidget->setSelectionMode(QAbstractItemView::ExtendedSelection);
ui->peerWidget->setContextMenuPolicy(Qt::CustomContextMenu);
ui->peerWidget->setColumnWidth(PeerTableModel::Address, ADDRESS_COLUMN_WIDTH);
ui->peerWidget->setColumnWidth(PeerTableModel::Subversion, SUBVERSION_COLUMN_WIDTH);
@@ -477,11 +476,11 @@ void RPCConsole::setClientModel(ClientModel *model)
ui->peerWidget->horizontalHeader()->setStretchLastSection(true);
// create peer table context menu actions
- QAction* disconnectAction = new QAction(tr("&Disconnect Node"), this);
- QAction* banAction1h = new QAction(tr("Ban Node for") + " " + tr("1 &hour"), this);
- QAction* banAction24h = new QAction(tr("Ban Node for") + " " + tr("1 &day"), this);
- QAction* banAction7d = new QAction(tr("Ban Node for") + " " + tr("1 &week"), this);
- QAction* banAction365d = new QAction(tr("Ban Node for") + " " + tr("1 &year"), this);
+ QAction* disconnectAction = new QAction(tr("&Disconnect"), this);
+ QAction* banAction1h = new QAction(tr("Ban for") + " " + tr("1 &hour"), this);
+ QAction* banAction24h = new QAction(tr("Ban for") + " " + tr("1 &day"), this);
+ QAction* banAction7d = new QAction(tr("Ban for") + " " + tr("1 &week"), this);
+ QAction* banAction365d = new QAction(tr("Ban for") + " " + tr("1 &year"), this);
// create peer table context menu
peersTableContextMenu = new QMenu();
@@ -514,7 +513,9 @@ void RPCConsole::setClientModel(ClientModel *model)
this, SLOT(peerSelected(const QItemSelection &, const QItemSelection &)));
// peer table signal handling - update peer details when new nodes are added to the model
connect(model->getPeerTableModel(), SIGNAL(layoutChanged()), this, SLOT(peerLayoutChanged()));
-
+ // peer table signal handling - cache selected node ids
+ connect(model->getPeerTableModel(), SIGNAL(layoutAboutToChange()), this, SLOT(peerLayoutAboutToChange()));
+
// set up ban table
ui->banlistWidget->setModel(model->getBanTableModel());
ui->banlistWidget->verticalHeader()->hide();
@@ -527,7 +528,7 @@ void RPCConsole::setClientModel(ClientModel *model)
ui->banlistWidget->horizontalHeader()->setStretchLastSection(true);
// create ban table context menu action
- QAction* unbanAction = new QAction(tr("&Unban Node"), this);
+ QAction* unbanAction = new QAction(tr("&Unban"), this);
// create ban table context menu
banTableContextMenu = new QMenu();
@@ -825,6 +826,17 @@ void RPCConsole::peerSelected(const QItemSelection &selected, const QItemSelecti
updateNodeDetail(stats);
}
+void RPCConsole::peerLayoutAboutToChange()
+{
+ QModelIndexList selected = ui->peerWidget->selectionModel()->selectedIndexes();
+ cachedNodeids.clear();
+ for(int i = 0; i < selected.size(); i++)
+ {
+ const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(selected.at(i).row());
+ cachedNodeids.append(stats->nodeStats.nodeid);
+ }
+}
+
void RPCConsole::peerLayoutChanged()
{
if (!clientModel || !clientModel->getPeerTableModel())
@@ -834,7 +846,7 @@ void RPCConsole::peerLayoutChanged()
bool fUnselect = false;
bool fReselect = false;
- if (cachedNodeid == -1) // no node selected yet
+ if (cachedNodeids.empty()) // no node selected yet
return;
// find the currently selected row
@@ -846,7 +858,7 @@ void RPCConsole::peerLayoutChanged()
// check if our detail node has a row in the table (it may not necessarily
// be at selectedRow since its position can change after a layout change)
- int detailNodeRow = clientModel->getPeerTableModel()->getRowByNodeId(cachedNodeid);
+ int detailNodeRow = clientModel->getPeerTableModel()->getRowByNodeId(cachedNodeids.first());
if (detailNodeRow < 0)
{
@@ -872,7 +884,10 @@ void RPCConsole::peerLayoutChanged()
if (fReselect)
{
- ui->peerWidget->selectRow(detailNodeRow);
+ for(int i = 0; i < cachedNodeids.size(); i++)
+ {
+ ui->peerWidget->selectRow(clientModel->getPeerTableModel()->getRowByNodeId(cachedNodeids.at(i)));
+ }
}
if (stats)
@@ -881,9 +896,6 @@ void RPCConsole::peerLayoutChanged()
void RPCConsole::updateNodeDetail(const CNodeCombinedStats *stats)
{
- // Update cached nodeid
- cachedNodeid = stats->nodeStats.nodeid;
-
// update the detail ui with latest node information
QString peerAddrDetails(QString::fromStdString(stats->nodeStats.addrName) + " ");
peerAddrDetails += tr("(node id: %1)").arg(QString::number(stats->nodeStats.nodeid));
@@ -973,33 +985,44 @@ void RPCConsole::disconnectSelectedNode()
{
if(!g_connman)
return;
- // Get currently selected peer address
- NodeId id = GUIUtil::getEntryData(ui->peerWidget, 0, PeerTableModel::NetNodeId).toInt();
- // Find the node, disconnect it and clear the selected node
- if(g_connman->DisconnectNode(id))
- clearSelectedNode();
+
+ // Get selected peer addresses
+ QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->peerWidget, 0);
+ for(int i = 0; i < nodes.count(); i++)
+ {
+ // Get currently selected peer address
+ NodeId id = nodes.at(i).data(PeerTableModel::NetNodeId).toInt();
+ // Find the node, disconnect it and clear the selected node
+ if(g_connman->DisconnectNode(id))
+ clearSelectedNode();
+ }
}
void RPCConsole::banSelectedNode(int bantime)
{
if (!clientModel || !g_connman)
return;
-
- if(cachedNodeid == -1)
- return;
-
- // Get currently selected peer address
- int detailNodeRow = clientModel->getPeerTableModel()->getRowByNodeId(cachedNodeid);
- if(detailNodeRow < 0)
- return;
-
- // Find possible nodes, ban it and clear the selected node
- const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(detailNodeRow);
- if(stats) {
- g_connman->Ban(stats->nodeStats.addr, BanReasonManuallyAdded, bantime);
- clearSelectedNode();
- clientModel->getBanTableModel()->refresh();
+
+ // Get selected peer addresses
+ QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->peerWidget, 0);
+ for(int i = 0; i < nodes.count(); i++)
+ {
+ // Get currently selected peer address
+ NodeId id = nodes.at(i).data(PeerTableModel::NetNodeId).toInt();
+
+ // Get currently selected peer address
+ int detailNodeRow = clientModel->getPeerTableModel()->getRowByNodeId(id);
+ if(detailNodeRow < 0)
+ return;
+
+ // Find possible nodes, ban it and clear the selected node
+ const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(detailNodeRow);
+ if(stats) {
+ g_connman->Ban(stats->nodeStats.addr, BanReasonManuallyAdded, bantime);
+ }
}
+ clearSelectedNode();
+ clientModel->getBanTableModel()->refresh();
}
void RPCConsole::unbanSelectedNode()
@@ -1007,22 +1030,27 @@ void RPCConsole::unbanSelectedNode()
if (!clientModel)
return;
- // Get currently selected ban address
- QString strNode = GUIUtil::getEntryData(ui->banlistWidget, 0, BanTableModel::Address).toString();
- CSubNet possibleSubnet;
-
- LookupSubNet(strNode.toStdString().c_str(), possibleSubnet);
- if (possibleSubnet.IsValid() && g_connman)
+ // Get selected ban addresses
+ QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->banlistWidget, 0);
+ for(int i = 0; i < nodes.count(); i++)
{
- g_connman->Unban(possibleSubnet);
- clientModel->getBanTableModel()->refresh();
+ // Get currently selected ban address
+ QString strNode = nodes.at(i).data(BanTableModel::Address).toString();
+ CSubNet possibleSubnet;
+
+ LookupSubNet(strNode.toStdString().c_str(), possibleSubnet);
+ if (possibleSubnet.IsValid() && g_connman)
+ {
+ g_connman->Unban(possibleSubnet);
+ clientModel->getBanTableModel()->refresh();
+ }
}
}
void RPCConsole::clearSelectedNode()
{
ui->peerWidget->selectionModel()->clearSelection();
- cachedNodeid = -1;
+ cachedNodeids.clear();
ui->detailWidget->hide();
ui->peerHeading->setText(tr("Select a peer to view detailed information."));
}
diff --git a/src/qt/rpcconsole.h b/src/qt/rpcconsole.h
index 50224a1cc0..8e1d878ae5 100644
--- a/src/qt/rpcconsole.h
+++ b/src/qt/rpcconsole.h
@@ -98,6 +98,8 @@ public Q_SLOTS:
void scrollToEnd();
/** Handle selection of peer in peers list */
void peerSelected(const QItemSelection &selected, const QItemSelection &deselected);
+ /** Handle selection caching before update */
+ void peerLayoutAboutToChange();
/** Handle updated peer information */
void peerLayoutChanged();
/** Disconnect a selected node on the Peers tab */
@@ -135,7 +137,7 @@ private:
ClientModel *clientModel;
QStringList history;
int historyPtr;
- NodeId cachedNodeid;
+ QList<NodeId> cachedNodeids;
const PlatformStyle *platformStyle;
RPCTimerInterface *rpcTimerInterface;
QMenu *peersTableContextMenu;
diff --git a/src/qt/walletmodel.h b/src/qt/walletmodel.h
index 6a5670e378..eedf6e8cea 100644
--- a/src/qt/walletmodel.h
+++ b/src/qt/walletmodel.h
@@ -65,7 +65,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
std::string sAddress = address.toStdString();
std::string sLabel = label.toStdString();
std::string sMessage = message.toStdString();
diff --git a/src/rest.cpp b/src/rest.cpp
index b8b5420626..90cca6f480 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -50,7 +50,7 @@ struct CCoin {
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion)
+ inline void SerializationOp(Stream& s, Operation ser_action)
{
READWRITE(nTxVer);
READWRITE(nHeight);
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index be0776ea22..8824898feb 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -132,8 +132,8 @@ UniValue generateBlocks(boost::shared_ptr<CReserveScript> coinbaseScript, int nG
continue;
}
CValidationState state;
- if (!ProcessNewBlock(state, Params(), NULL, pblock, true, NULL))
- throw JSONRPCError(RPC_INTERNAL_ERROR, "ProcessNewBlock, block not accepted");
+ if (!ProcessNewBlock(state, Params(), NULL, pblock, true, NULL, false))
+ throw JSONRPCError(RPC_INTERNAL_ERROR, strprintf("ProcessNewBlock: block not accepted: %s", FormatStateMessage(state)));
++nHeight;
blockHashes.push_back(pblock->GetHash().GetHex());
@@ -757,7 +757,7 @@ UniValue submitblock(const JSONRPCRequest& request)
CValidationState state;
submitblock_StateCatcher sc(block.GetHash());
RegisterValidationInterface(&sc);
- bool fAccepted = ProcessNewBlock(state, Params(), NULL, &block, true, NULL);
+ bool fAccepted = ProcessNewBlock(state, Params(), NULL, &block, true, NULL, false);
UnregisterValidationInterface(&sc);
if (fBlockPresent)
{
@@ -810,7 +810,7 @@ UniValue estimatepriority(const JSONRPCRequest& request)
if (request.fHelp || request.params.size() != 1)
throw runtime_error(
"estimatepriority nblocks\n"
- "\nEstimates the approximate priority a zero-fee transaction needs to begin\n"
+ "\nDEPRECATED. Estimates the approximate priority a zero-fee transaction needs to begin\n"
"confirmation within nblocks blocks.\n"
"\nArguments:\n"
"1. nblocks (numeric)\n"
@@ -873,7 +873,7 @@ UniValue estimatesmartpriority(const JSONRPCRequest& request)
if (request.fHelp || request.params.size() != 1)
throw runtime_error(
"estimatesmartpriority nblocks\n"
- "\nWARNING: This interface is unstable and may disappear or change!\n"
+ "\nDEPRECATED. WARNING: This interface is unstable and may disappear or change!\n"
"\nEstimates the approximate priority a zero-fee transaction needs to begin\n"
"confirmation within nblocks blocks if possible and return the number of blocks\n"
"for which the estimate is valid.\n"
diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp
index 850ecffb0e..3193985803 100644
--- a/src/rpc/misc.cpp
+++ b/src/rpc/misc.cpp
@@ -450,10 +450,53 @@ UniValue setmocktime(const JSONRPCRequest& request)
return NullUniValue;
}
+static UniValue RPCLockedMemoryInfo()
+{
+ LockedPool::Stats stats = LockedPoolManager::Instance().stats();
+ UniValue obj(UniValue::VOBJ);
+ obj.push_back(Pair("used", uint64_t(stats.used)));
+ obj.push_back(Pair("free", uint64_t(stats.free)));
+ obj.push_back(Pair("total", uint64_t(stats.total)));
+ obj.push_back(Pair("locked", uint64_t(stats.locked)));
+ obj.push_back(Pair("chunks_used", uint64_t(stats.chunks_used)));
+ obj.push_back(Pair("chunks_free", uint64_t(stats.chunks_free)));
+ return obj;
+}
+
+UniValue getmemoryinfo(const JSONRPCRequest& request)
+{
+ /* Please, avoid using the word "pool" here in the RPC interface or help,
+ * as users will undoubtedly confuse it with the other "memory pool"
+ */
+ if (request.fHelp || request.params.size() != 0)
+ throw runtime_error(
+ "getmemoryinfo\n"
+ "Returns an object containing information about memory usage.\n"
+ "\nResult:\n"
+ "{\n"
+ " \"locked\": { (json object) Information about locked memory manager\n"
+ " \"used\": xxxxx, (numeric) Number of bytes used\n"
+ " \"free\": xxxxx, (numeric) Number of bytes available in current arenas\n"
+ " \"total\": xxxxxxx, (numeric) Total number of bytes managed\n"
+ " \"locked\": xxxxxx, (numeric) Amount of bytes that succeeded locking. If this number is smaller than total, locking pages failed at some point and key data could be swapped to disk.\n"
+ " \"chunks_used\": xxxxx, (numeric) Number allocated chunks\n"
+ " \"chunks_free\": xxxxx, (numeric) Number unused chunks\n"
+ " }\n"
+ "}\n"
+ "\nExamples:\n"
+ + HelpExampleCli("getmemoryinfo", "")
+ + HelpExampleRpc("getmemoryinfo", "")
+ );
+ UniValue obj(UniValue::VOBJ);
+ obj.push_back(Pair("locked", RPCLockedMemoryInfo()));
+ return obj;
+}
+
static const CRPCCommand commands[] =
{ // category name actor (function) okSafeMode
// --------------------- ------------------------ ----------------------- ----------
{ "control", "getinfo", &getinfo, true }, /* uses wallet if enabled */
+ { "control", "getmemoryinfo", &getmemoryinfo, true },
{ "util", "validateaddress", &validateaddress, true }, /* uses wallet if enabled */
{ "util", "createmultisig", &createmultisig, true },
{ "util", "verifymessage", &verifymessage, true },
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 2b43f08f0b..bd68abdbb8 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -331,7 +331,7 @@ UniValue getnettotals(const JSONRPCRequest& request)
"{\n"
" \"totalbytesrecv\": n, (numeric) Total bytes received\n"
" \"totalbytessent\": n, (numeric) Total bytes sent\n"
- " \"timemillis\": t, (numeric) Total cpu time\n"
+ " \"timemillis\": t, (numeric) Current UNIX time in milliseconds\n"
" \"uploadtarget\":\n"
" {\n"
" \"timeframe\": n, (numeric) Length of the measuring timeframe in seconds\n"
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index 29d0bee1b2..164e0f00e2 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -147,6 +147,8 @@ uint256 ParseHashV(const UniValue& v, string strName)
strHex = v.get_str();
if (!IsHex(strHex)) // Note: IsHex("") is false
throw JSONRPCError(RPC_INVALID_PARAMETER, strName+" must be hexadecimal string (not '"+strHex+"')");
+ if (64 != strHex.length())
+ throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("%s must be of length %d (not %d)", strName, 64, strHex.length()));
uint256 result;
result.SetHex(strHex);
return result;
diff --git a/src/script/bitcoinconsensus.cpp b/src/script/bitcoinconsensus.cpp
index b629f4278b..069ac55bfb 100644
--- a/src/script/bitcoinconsensus.cpp
+++ b/src/script/bitcoinconsensus.cpp
@@ -23,7 +23,7 @@ public:
m_remaining(txToLen)
{}
- TxInputStream& read(char* pch, size_t nSize)
+ void read(char* pch, size_t nSize)
{
if (nSize > m_remaining)
throw std::ios_base::failure(std::string(__func__) + ": end of data");
@@ -37,16 +37,17 @@ public:
memcpy(pch, m_data, nSize);
m_remaining -= nSize;
m_data += nSize;
- return *this;
}
template<typename T>
TxInputStream& operator>>(T& obj)
{
- ::Unserialize(*this, obj, m_type, m_version);
+ ::Unserialize(*this, obj);
return *this;
}
+ int GetVersion() const { return m_version; }
+ int GetType() const { return m_type; }
private:
const int m_type;
const int m_version;
@@ -69,17 +70,26 @@ struct ECCryptoClosure
ECCryptoClosure instance_of_eccryptoclosure;
}
+/** Check that all specified flags are part of the libconsensus interface. */
+static bool verify_flags(unsigned int flags)
+{
+ return (flags & ~(bitcoinconsensus_SCRIPT_FLAGS_VERIFY_ALL)) == 0;
+}
+
static int verify_script(const unsigned char *scriptPubKey, unsigned int scriptPubKeyLen, CAmount amount,
const unsigned char *txTo , unsigned int txToLen,
unsigned int nIn, unsigned int flags, bitcoinconsensus_error* err)
{
+ if (!verify_flags(flags)) {
+ return bitcoinconsensus_ERR_INVALID_FLAGS;
+ }
try {
TxInputStream stream(SER_NETWORK, PROTOCOL_VERSION, txTo, txToLen);
CTransaction tx;
stream >> tx;
if (nIn >= tx.vin.size())
return set_error(err, bitcoinconsensus_ERR_TX_INDEX);
- if (tx.GetSerializeSize(SER_NETWORK, PROTOCOL_VERSION) != txToLen)
+ if (GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) != txToLen)
return set_error(err, bitcoinconsensus_ERR_TX_SIZE_MISMATCH);
// Regardless of the verification result, the tx did not error.
diff --git a/src/script/bitcoinconsensus.h b/src/script/bitcoinconsensus.h
index 1d2d5c23e4..1bef4fe9e9 100644
--- a/src/script/bitcoinconsensus.h
+++ b/src/script/bitcoinconsensus.h
@@ -42,6 +42,7 @@ typedef enum bitcoinconsensus_error_t
bitcoinconsensus_ERR_TX_SIZE_MISMATCH,
bitcoinconsensus_ERR_TX_DESERIALIZE,
bitcoinconsensus_ERR_AMOUNT_REQUIRED,
+ bitcoinconsensus_ERR_INVALID_FLAGS,
} bitcoinconsensus_error;
/** Script verification flags */
@@ -54,6 +55,9 @@ enum
bitcoinconsensus_SCRIPT_FLAGS_VERIFY_CHECKLOCKTIMEVERIFY = (1U << 9), // enable CHECKLOCKTIMEVERIFY (BIP65)
bitcoinconsensus_SCRIPT_FLAGS_VERIFY_CHECKSEQUENCEVERIFY = (1U << 10), // enable CHECKSEQUENCEVERIFY (BIP112)
bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS = (1U << 11), // enable WITNESS (BIP141)
+ bitcoinconsensus_SCRIPT_FLAGS_VERIFY_ALL = bitcoinconsensus_SCRIPT_FLAGS_VERIFY_P2SH | bitcoinconsensus_SCRIPT_FLAGS_VERIFY_DERSIG |
+ bitcoinconsensus_SCRIPT_FLAGS_VERIFY_NULLDUMMY | bitcoinconsensus_SCRIPT_FLAGS_VERIFY_CHECKLOCKTIMEVERIFY |
+ bitcoinconsensus_SCRIPT_FLAGS_VERIFY_CHECKSEQUENCEVERIFY | bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS
};
/// Returns 1 if the input nIn of the serialized transaction pointed to by
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index 0e17ddc130..a6403f9363 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -1069,7 +1069,7 @@ public:
/** Serialize the passed scriptCode, skipping OP_CODESEPARATORs */
template<typename S>
- void SerializeScriptCode(S &s, int nType, int nVersion) const {
+ void SerializeScriptCode(S &s) const {
CScript::const_iterator it = scriptCode.begin();
CScript::const_iterator itBegin = it;
opcodetype opcode;
@@ -1092,53 +1092,53 @@ public:
/** Serialize an input of txTo */
template<typename S>
- void SerializeInput(S &s, unsigned int nInput, int nType, int nVersion) const {
+ void SerializeInput(S &s, unsigned int nInput) const {
// In case of SIGHASH_ANYONECANPAY, only the input being signed is serialized
if (fAnyoneCanPay)
nInput = nIn;
// Serialize the prevout
- ::Serialize(s, txTo.vin[nInput].prevout, nType, nVersion);
+ ::Serialize(s, txTo.vin[nInput].prevout);
// Serialize the script
if (nInput != nIn)
// Blank out other inputs' signatures
- ::Serialize(s, CScriptBase(), nType, nVersion);
+ ::Serialize(s, CScriptBase());
else
- SerializeScriptCode(s, nType, nVersion);
+ SerializeScriptCode(s);
// Serialize the nSequence
if (nInput != nIn && (fHashSingle || fHashNone))
// let the others update at will
- ::Serialize(s, (int)0, nType, nVersion);
+ ::Serialize(s, (int)0);
else
- ::Serialize(s, txTo.vin[nInput].nSequence, nType, nVersion);
+ ::Serialize(s, txTo.vin[nInput].nSequence);
}
/** Serialize an output of txTo */
template<typename S>
- void SerializeOutput(S &s, unsigned int nOutput, int nType, int nVersion) const {
+ void SerializeOutput(S &s, unsigned int nOutput) const {
if (fHashSingle && nOutput != nIn)
// Do not lock-in the txout payee at other indices as txin
- ::Serialize(s, CTxOut(), nType, nVersion);
+ ::Serialize(s, CTxOut());
else
- ::Serialize(s, txTo.vout[nOutput], nType, nVersion);
+ ::Serialize(s, txTo.vout[nOutput]);
}
/** Serialize txTo */
template<typename S>
- void Serialize(S &s, int nType, int nVersion) const {
+ void Serialize(S &s) const {
// Serialize nVersion
- ::Serialize(s, txTo.nVersion, nType, nVersion);
+ ::Serialize(s, txTo.nVersion);
// Serialize vin
unsigned int nInputs = fAnyoneCanPay ? 1 : txTo.vin.size();
::WriteCompactSize(s, nInputs);
for (unsigned int nInput = 0; nInput < nInputs; nInput++)
- SerializeInput(s, nInput, nType, nVersion);
+ SerializeInput(s, nInput);
// Serialize vout
unsigned int nOutputs = fHashNone ? 0 : (fHashSingle ? nIn+1 : txTo.vout.size());
::WriteCompactSize(s, nOutputs);
for (unsigned int nOutput = 0; nOutput < nOutputs; nOutput++)
- SerializeOutput(s, nOutput, nType, nVersion);
+ SerializeOutput(s, nOutput);
// Serialize nLockTime
- ::Serialize(s, txTo.nLockTime, nType, nVersion);
+ ::Serialize(s, txTo.nLockTime);
}
};
diff --git a/src/serialize.h b/src/serialize.h
index 1f51da82ff..91864e1b64 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -151,6 +151,8 @@ inline float ser_uint32_to_float(uint32_t y)
// i.e. anything that supports .read(char*, size_t) and .write(char*, size_t)
//
+class CSizeComputer;
+
enum
{
// primary actions
@@ -159,7 +161,8 @@ enum
SER_GETHASH = (1 << 2),
};
-#define READWRITE(obj) (::SerReadWrite(s, (obj), nType, nVersion, ser_action))
+#define READWRITE(obj) (::SerReadWrite(s, (obj), ser_action))
+#define READWRITEMANY(...) (::SerReadWriteMany(s, ser_action, __VA_ARGS__))
/**
* Implement three methods for serializable objects. These are actually wrappers over
@@ -167,63 +170,42 @@ enum
* code. Adding "ADD_SERIALIZE_METHODS" in the body of the class causes these wrappers to be
* added as members.
*/
-#define ADD_SERIALIZE_METHODS \
- size_t GetSerializeSize(int nType, int nVersion) const { \
- CSizeComputer s(nType, nVersion); \
- NCONST_PTR(this)->SerializationOp(s, CSerActionSerialize(), nType, nVersion);\
- return s.size(); \
- } \
- template<typename Stream> \
- void Serialize(Stream& s, int nType, int nVersion) const { \
- NCONST_PTR(this)->SerializationOp(s, CSerActionSerialize(), nType, nVersion);\
- } \
- template<typename Stream> \
- void Unserialize(Stream& s, int nType, int nVersion) { \
- SerializationOp(s, CSerActionUnserialize(), nType, nVersion); \
+#define ADD_SERIALIZE_METHODS \
+ template<typename Stream> \
+ void Serialize(Stream& s) const { \
+ NCONST_PTR(this)->SerializationOp(s, CSerActionSerialize()); \
+ } \
+ template<typename Stream> \
+ void Unserialize(Stream& s) { \
+ SerializationOp(s, CSerActionUnserialize()); \
}
-/*
- * Basic Types
- */
-inline unsigned int GetSerializeSize(char a, int, int=0) { return 1; }
-inline unsigned int GetSerializeSize(int8_t a, int, int=0) { return 1; }
-inline unsigned int GetSerializeSize(uint8_t a, int, int=0) { return 1; }
-inline unsigned int GetSerializeSize(int16_t a, int, int=0) { return 2; }
-inline unsigned int GetSerializeSize(uint16_t a, int, int=0) { return 2; }
-inline unsigned int GetSerializeSize(int32_t a, int, int=0) { return 4; }
-inline unsigned int GetSerializeSize(uint32_t a, int, int=0) { return 4; }
-inline unsigned int GetSerializeSize(int64_t a, int, int=0) { return 8; }
-inline unsigned int GetSerializeSize(uint64_t a, int, int=0) { return 8; }
-inline unsigned int GetSerializeSize(float a, int, int=0) { return 4; }
-inline unsigned int GetSerializeSize(double a, int, int=0) { return 8; }
-
-template<typename Stream> inline void Serialize(Stream& s, char a, int, int=0) { ser_writedata8(s, a); } // TODO Get rid of bare char
-template<typename Stream> inline void Serialize(Stream& s, int8_t a, int, int=0) { ser_writedata8(s, a); }
-template<typename Stream> inline void Serialize(Stream& s, uint8_t a, int, int=0) { ser_writedata8(s, a); }
-template<typename Stream> inline void Serialize(Stream& s, int16_t a, int, int=0) { ser_writedata16(s, a); }
-template<typename Stream> inline void Serialize(Stream& s, uint16_t a, int, int=0) { ser_writedata16(s, a); }
-template<typename Stream> inline void Serialize(Stream& s, int32_t a, int, int=0) { ser_writedata32(s, a); }
-template<typename Stream> inline void Serialize(Stream& s, uint32_t a, int, int=0) { ser_writedata32(s, a); }
-template<typename Stream> inline void Serialize(Stream& s, int64_t a, int, int=0) { ser_writedata64(s, a); }
-template<typename Stream> inline void Serialize(Stream& s, uint64_t a, int, int=0) { ser_writedata64(s, a); }
-template<typename Stream> inline void Serialize(Stream& s, float a, int, int=0) { ser_writedata32(s, ser_float_to_uint32(a)); }
-template<typename Stream> inline void Serialize(Stream& s, double a, int, int=0) { ser_writedata64(s, ser_double_to_uint64(a)); }
-
-template<typename Stream> inline void Unserialize(Stream& s, char& a, int, int=0) { a = ser_readdata8(s); } // TODO Get rid of bare char
-template<typename Stream> inline void Unserialize(Stream& s, int8_t& a, int, int=0) { a = ser_readdata8(s); }
-template<typename Stream> inline void Unserialize(Stream& s, uint8_t& a, int, int=0) { a = ser_readdata8(s); }
-template<typename Stream> inline void Unserialize(Stream& s, int16_t& a, int, int=0) { a = ser_readdata16(s); }
-template<typename Stream> inline void Unserialize(Stream& s, uint16_t& a, int, int=0) { a = ser_readdata16(s); }
-template<typename Stream> inline void Unserialize(Stream& s, int32_t& a, int, int=0) { a = ser_readdata32(s); }
-template<typename Stream> inline void Unserialize(Stream& s, uint32_t& a, int, int=0) { a = ser_readdata32(s); }
-template<typename Stream> inline void Unserialize(Stream& s, int64_t& a, int, int=0) { a = ser_readdata64(s); }
-template<typename Stream> inline void Unserialize(Stream& s, uint64_t& a, int, int=0) { a = ser_readdata64(s); }
-template<typename Stream> inline void Unserialize(Stream& s, float& a, int, int=0) { a = ser_uint32_to_float(ser_readdata32(s)); }
-template<typename Stream> inline void Unserialize(Stream& s, double& a, int, int=0) { a = ser_uint64_to_double(ser_readdata64(s)); }
-
-inline unsigned int GetSerializeSize(bool a, int, int=0) { return sizeof(char); }
-template<typename Stream> inline void Serialize(Stream& s, bool a, int, int=0) { char f=a; ser_writedata8(s, f); }
-template<typename Stream> inline void Unserialize(Stream& s, bool& a, int, int=0) { char f=ser_readdata8(s); a=f; }
+template<typename Stream> inline void Serialize(Stream& s, char a ) { ser_writedata8(s, a); } // TODO Get rid of bare char
+template<typename Stream> inline void Serialize(Stream& s, int8_t a ) { ser_writedata8(s, a); }
+template<typename Stream> inline void Serialize(Stream& s, uint8_t a ) { ser_writedata8(s, a); }
+template<typename Stream> inline void Serialize(Stream& s, int16_t a ) { ser_writedata16(s, a); }
+template<typename Stream> inline void Serialize(Stream& s, uint16_t a) { ser_writedata16(s, a); }
+template<typename Stream> inline void Serialize(Stream& s, int32_t a ) { ser_writedata32(s, a); }
+template<typename Stream> inline void Serialize(Stream& s, uint32_t a) { ser_writedata32(s, a); }
+template<typename Stream> inline void Serialize(Stream& s, int64_t a ) { ser_writedata64(s, a); }
+template<typename Stream> inline void Serialize(Stream& s, uint64_t a) { ser_writedata64(s, a); }
+template<typename Stream> inline void Serialize(Stream& s, float a ) { ser_writedata32(s, ser_float_to_uint32(a)); }
+template<typename Stream> inline void Serialize(Stream& s, double a ) { ser_writedata64(s, ser_double_to_uint64(a)); }
+
+template<typename Stream> inline void Unserialize(Stream& s, char& a ) { a = ser_readdata8(s); } // TODO Get rid of bare char
+template<typename Stream> inline void Unserialize(Stream& s, int8_t& a ) { a = ser_readdata8(s); }
+template<typename Stream> inline void Unserialize(Stream& s, uint8_t& a ) { a = ser_readdata8(s); }
+template<typename Stream> inline void Unserialize(Stream& s, int16_t& a ) { a = ser_readdata16(s); }
+template<typename Stream> inline void Unserialize(Stream& s, uint16_t& a) { a = ser_readdata16(s); }
+template<typename Stream> inline void Unserialize(Stream& s, int32_t& a ) { a = ser_readdata32(s); }
+template<typename Stream> inline void Unserialize(Stream& s, uint32_t& a) { a = ser_readdata32(s); }
+template<typename Stream> inline void Unserialize(Stream& s, int64_t& a ) { a = ser_readdata64(s); }
+template<typename Stream> inline void Unserialize(Stream& s, uint64_t& a) { a = ser_readdata64(s); }
+template<typename Stream> inline void Unserialize(Stream& s, float& a ) { a = ser_uint32_to_float(ser_readdata32(s)); }
+template<typename Stream> inline void Unserialize(Stream& s, double& a ) { a = ser_uint64_to_double(ser_readdata64(s)); }
+
+template<typename Stream> inline void Serialize(Stream& s, bool a) { char f=a; ser_writedata8(s, f); }
+template<typename Stream> inline void Unserialize(Stream& s, bool& a) { char f=ser_readdata8(s); a=f; }
@@ -245,6 +227,8 @@ inline unsigned int GetSizeOfCompactSize(uint64_t nSize)
else return sizeof(unsigned char) + sizeof(uint64_t);
}
+inline void WriteCompactSize(CSizeComputer& os, uint64_t nSize);
+
template<typename Stream>
void WriteCompactSize(Stream& os, uint64_t nSize)
{
@@ -339,6 +323,9 @@ inline unsigned int GetSizeOfVarInt(I n)
return nRet;
}
+template<typename I>
+inline void WriteVarInt(CSizeComputer& os, I n);
+
template<typename Stream, typename I>
void WriteVarInt(Stream& os, I n)
{
@@ -402,19 +389,14 @@ public:
char* end() { return pend; }
const char* end() const { return pend; }
- unsigned int GetSerializeSize(int, int=0) const
- {
- return pend - pbegin;
- }
-
template<typename Stream>
- void Serialize(Stream& s, int, int=0) const
+ void Serialize(Stream& s) const
{
s.write(pbegin, pend - pbegin);
}
template<typename Stream>
- void Unserialize(Stream& s, int, int=0)
+ void Unserialize(Stream& s)
{
s.read(pbegin, pend - pbegin);
}
@@ -428,17 +410,13 @@ protected:
public:
CVarInt(I& nIn) : n(nIn) { }
- unsigned int GetSerializeSize(int, int) const {
- return GetSizeOfVarInt<I>(n);
- }
-
template<typename Stream>
- void Serialize(Stream &s, int, int) const {
+ void Serialize(Stream &s) const {
WriteVarInt<Stream,I>(s, n);
}
template<typename Stream>
- void Unserialize(Stream& s, int, int) {
+ void Unserialize(Stream& s) {
n = ReadVarInt<Stream,I>(s);
}
};
@@ -450,17 +428,13 @@ protected:
public:
CCompactSize(uint64_t& nIn) : n(nIn) { }
- unsigned int GetSerializeSize(int, int) const {
- return GetSizeOfCompactSize(n);
- }
-
template<typename Stream>
- void Serialize(Stream &s, int, int) const {
+ void Serialize(Stream &s) const {
WriteCompactSize<Stream>(s, n);
}
template<typename Stream>
- void Unserialize(Stream& s, int, int) {
+ void Unserialize(Stream& s) {
n = ReadCompactSize<Stream>(s);
}
};
@@ -471,10 +445,10 @@ class LimitedString
protected:
std::string& string;
public:
- LimitedString(std::string& string) : string(string) {}
+ LimitedString(std::string& _string) : string(_string) {}
template<typename Stream>
- void Unserialize(Stream& s, int, int=0)
+ void Unserialize(Stream& s)
{
size_t size = ReadCompactSize(s);
if (size > Limit) {
@@ -486,17 +460,12 @@ public:
}
template<typename Stream>
- void Serialize(Stream& s, int, int=0) const
+ void Serialize(Stream& s) const
{
WriteCompactSize(s, string.size());
if (!string.empty())
s.write((char*)&string[0], string.size());
}
-
- unsigned int GetSerializeSize(int, int=0) const
- {
- return GetSizeOfCompactSize(string.size()) + string.size();
- }
};
template<typename I>
@@ -509,58 +478,48 @@ CVarInt<I> WrapVarInt(I& n) { return CVarInt<I>(n); }
/**
* string
*/
-template<typename C> unsigned int GetSerializeSize(const std::basic_string<C>& str, int, int=0);
-template<typename Stream, typename C> void Serialize(Stream& os, const std::basic_string<C>& str, int, int=0);
-template<typename Stream, typename C> void Unserialize(Stream& is, std::basic_string<C>& str, int, int=0);
+template<typename Stream, typename C> void Serialize(Stream& os, const std::basic_string<C>& str);
+template<typename Stream, typename C> void Unserialize(Stream& is, std::basic_string<C>& str);
/**
* prevector
* prevectors of unsigned char are a special case and are intended to be serialized as a single opaque blob.
*/
-template<unsigned int N, typename T> unsigned int GetSerializeSize_impl(const prevector<N, T>& v, int nType, int nVersion, const unsigned char&);
-template<unsigned int N, typename T, typename V> unsigned int GetSerializeSize_impl(const prevector<N, T>& v, int nType, int nVersion, const V&);
-template<unsigned int N, typename T> inline unsigned int GetSerializeSize(const prevector<N, T>& v, int nType, int nVersion);
-template<typename Stream, unsigned int N, typename T> void Serialize_impl(Stream& os, const prevector<N, T>& v, int nType, int nVersion, const unsigned char&);
-template<typename Stream, unsigned int N, typename T, typename V> void Serialize_impl(Stream& os, const prevector<N, T>& v, int nType, int nVersion, const V&);
-template<typename Stream, unsigned int N, typename T> inline void Serialize(Stream& os, const prevector<N, T>& v, int nType, int nVersion);
-template<typename Stream, unsigned int N, typename T> void Unserialize_impl(Stream& is, prevector<N, T>& v, int nType, int nVersion, const unsigned char&);
-template<typename Stream, unsigned int N, typename T, typename V> void Unserialize_impl(Stream& is, prevector<N, T>& v, int nType, int nVersion, const V&);
-template<typename Stream, unsigned int N, typename T> inline void Unserialize(Stream& is, prevector<N, T>& v, int nType, int nVersion);
+template<typename Stream, unsigned int N, typename T> void Serialize_impl(Stream& os, const prevector<N, T>& v, const unsigned char&);
+template<typename Stream, unsigned int N, typename T, typename V> void Serialize_impl(Stream& os, const prevector<N, T>& v, const V&);
+template<typename Stream, unsigned int N, typename T> inline void Serialize(Stream& os, const prevector<N, T>& v);
+template<typename Stream, unsigned int N, typename T> void Unserialize_impl(Stream& is, prevector<N, T>& v, const unsigned char&);
+template<typename Stream, unsigned int N, typename T, typename V> void Unserialize_impl(Stream& is, prevector<N, T>& v, const V&);
+template<typename Stream, unsigned int N, typename T> inline void Unserialize(Stream& is, prevector<N, T>& v);
/**
* vector
* vectors of unsigned char are a special case and are intended to be serialized as a single opaque blob.
*/
-template<typename T, typename A> unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const unsigned char&);
-template<typename T, typename A, typename V> unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const V&);
-template<typename T, typename A> inline unsigned int GetSerializeSize(const std::vector<T, A>& v, int nType, int nVersion);
-template<typename Stream, typename T, typename A> void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const unsigned char&);
-template<typename Stream, typename T, typename A, typename V> void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const V&);
-template<typename Stream, typename T, typename A> inline void Serialize(Stream& os, const std::vector<T, A>& v, int nType, int nVersion);
-template<typename Stream, typename T, typename A> void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const unsigned char&);
-template<typename Stream, typename T, typename A, typename V> void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const V&);
-template<typename Stream, typename T, typename A> inline void Unserialize(Stream& is, std::vector<T, A>& v, int nType, int nVersion);
+template<typename Stream, typename T, typename A> void Serialize_impl(Stream& os, const std::vector<T, A>& v, const unsigned char&);
+template<typename Stream, typename T, typename A, typename V> void Serialize_impl(Stream& os, const std::vector<T, A>& v, const V&);
+template<typename Stream, typename T, typename A> inline void Serialize(Stream& os, const std::vector<T, A>& v);
+template<typename Stream, typename T, typename A> void Unserialize_impl(Stream& is, std::vector<T, A>& v, const unsigned char&);
+template<typename Stream, typename T, typename A, typename V> void Unserialize_impl(Stream& is, std::vector<T, A>& v, const V&);
+template<typename Stream, typename T, typename A> inline void Unserialize(Stream& is, std::vector<T, A>& v);
/**
* pair
*/
-template<typename K, typename T> unsigned int GetSerializeSize(const std::pair<K, T>& item, int nType, int nVersion);
-template<typename Stream, typename K, typename T> void Serialize(Stream& os, const std::pair<K, T>& item, int nType, int nVersion);
-template<typename Stream, typename K, typename T> void Unserialize(Stream& is, std::pair<K, T>& item, int nType, int nVersion);
+template<typename Stream, typename K, typename T> void Serialize(Stream& os, const std::pair<K, T>& item);
+template<typename Stream, typename K, typename T> void Unserialize(Stream& is, std::pair<K, T>& item);
/**
* map
*/
-template<typename K, typename T, typename Pred, typename A> unsigned int GetSerializeSize(const std::map<K, T, Pred, A>& m, int nType, int nVersion);
-template<typename Stream, typename K, typename T, typename Pred, typename A> void Serialize(Stream& os, const std::map<K, T, Pred, A>& m, int nType, int nVersion);
-template<typename Stream, typename K, typename T, typename Pred, typename A> void Unserialize(Stream& is, std::map<K, T, Pred, A>& m, int nType, int nVersion);
+template<typename Stream, typename K, typename T, typename Pred, typename A> void Serialize(Stream& os, const std::map<K, T, Pred, A>& m);
+template<typename Stream, typename K, typename T, typename Pred, typename A> void Unserialize(Stream& is, std::map<K, T, Pred, A>& m);
/**
* set
*/
-template<typename K, typename Pred, typename A> unsigned int GetSerializeSize(const std::set<K, Pred, A>& m, int nType, int nVersion);
-template<typename Stream, typename K, typename Pred, typename A> void Serialize(Stream& os, const std::set<K, Pred, A>& m, int nType, int nVersion);
-template<typename Stream, typename K, typename Pred, typename A> void Unserialize(Stream& is, std::set<K, Pred, A>& m, int nType, int nVersion);
+template<typename Stream, typename K, typename Pred, typename A> void Serialize(Stream& os, const std::set<K, Pred, A>& m);
+template<typename Stream, typename K, typename Pred, typename A> void Unserialize(Stream& is, std::set<K, Pred, A>& m);
@@ -568,26 +527,17 @@ template<typename Stream, typename K, typename Pred, typename A> void Unserializ
/**
* If none of the specialized versions above matched, default to calling member function.
- * "int nType" is changed to "long nType" to keep from getting an ambiguous overload error.
- * The compiler will only cast int to long if none of the other templates matched.
- * Thanks to Boost serialization for this idea.
*/
-template<typename T>
-inline unsigned int GetSerializeSize(const T& a, long nType, int nVersion)
-{
- return a.GetSerializeSize((int)nType, nVersion);
-}
-
template<typename Stream, typename T>
-inline void Serialize(Stream& os, const T& a, long nType, int nVersion)
+inline void Serialize(Stream& os, const T& a)
{
- a.Serialize(os, (int)nType, nVersion);
+ a.Serialize(os);
}
template<typename Stream, typename T>
-inline void Unserialize(Stream& is, T& a, long nType, int nVersion)
+inline void Unserialize(Stream& is, T& a)
{
- a.Unserialize(is, (int)nType, nVersion);
+ a.Unserialize(is);
}
@@ -597,14 +547,8 @@ inline void Unserialize(Stream& is, T& a, long nType, int nVersion)
/**
* string
*/
-template<typename C>
-unsigned int GetSerializeSize(const std::basic_string<C>& str, int, int)
-{
- return GetSizeOfCompactSize(str.size()) + str.size() * sizeof(str[0]);
-}
-
template<typename Stream, typename C>
-void Serialize(Stream& os, const std::basic_string<C>& str, int, int)
+void Serialize(Stream& os, const std::basic_string<C>& str)
{
WriteCompactSize(os, str.size());
if (!str.empty())
@@ -612,7 +556,7 @@ void Serialize(Stream& os, const std::basic_string<C>& str, int, int)
}
template<typename Stream, typename C>
-void Unserialize(Stream& is, std::basic_string<C>& str, int, int)
+void Unserialize(Stream& is, std::basic_string<C>& str)
{
unsigned int nSize = ReadCompactSize(is);
str.resize(nSize);
@@ -625,30 +569,8 @@ void Unserialize(Stream& is, std::basic_string<C>& str, int, int)
/**
* prevector
*/
-template<unsigned int N, typename T>
-unsigned int GetSerializeSize_impl(const prevector<N, T>& v, int nType, int nVersion, const unsigned char&)
-{
- return (GetSizeOfCompactSize(v.size()) + v.size() * sizeof(T));
-}
-
-template<unsigned int N, typename T, typename V>
-unsigned int GetSerializeSize_impl(const prevector<N, T>& v, int nType, int nVersion, const V&)
-{
- unsigned int nSize = GetSizeOfCompactSize(v.size());
- for (typename prevector<N, T>::const_iterator vi = v.begin(); vi != v.end(); ++vi)
- nSize += GetSerializeSize((*vi), nType, nVersion);
- return nSize;
-}
-
-template<unsigned int N, typename T>
-inline unsigned int GetSerializeSize(const prevector<N, T>& v, int nType, int nVersion)
-{
- return GetSerializeSize_impl(v, nType, nVersion, T());
-}
-
-
template<typename Stream, unsigned int N, typename T>
-void Serialize_impl(Stream& os, const prevector<N, T>& v, int nType, int nVersion, const unsigned char&)
+void Serialize_impl(Stream& os, const prevector<N, T>& v, const unsigned char&)
{
WriteCompactSize(os, v.size());
if (!v.empty())
@@ -656,22 +578,22 @@ void Serialize_impl(Stream& os, const prevector<N, T>& v, int nType, int nVersio
}
template<typename Stream, unsigned int N, typename T, typename V>
-void Serialize_impl(Stream& os, const prevector<N, T>& v, int nType, int nVersion, const V&)
+void Serialize_impl(Stream& os, const prevector<N, T>& v, const V&)
{
WriteCompactSize(os, v.size());
for (typename prevector<N, T>::const_iterator vi = v.begin(); vi != v.end(); ++vi)
- ::Serialize(os, (*vi), nType, nVersion);
+ ::Serialize(os, (*vi));
}
template<typename Stream, unsigned int N, typename T>
-inline void Serialize(Stream& os, const prevector<N, T>& v, int nType, int nVersion)
+inline void Serialize(Stream& os, const prevector<N, T>& v)
{
- Serialize_impl(os, v, nType, nVersion, T());
+ Serialize_impl(os, v, T());
}
template<typename Stream, unsigned int N, typename T>
-void Unserialize_impl(Stream& is, prevector<N, T>& v, int nType, int nVersion, const unsigned char&)
+void Unserialize_impl(Stream& is, prevector<N, T>& v, const unsigned char&)
{
// Limit size per read so bogus size value won't cause out of memory
v.clear();
@@ -687,7 +609,7 @@ void Unserialize_impl(Stream& is, prevector<N, T>& v, int nType, int nVersion, c
}
template<typename Stream, unsigned int N, typename T, typename V>
-void Unserialize_impl(Stream& is, prevector<N, T>& v, int nType, int nVersion, const V&)
+void Unserialize_impl(Stream& is, prevector<N, T>& v, const V&)
{
v.clear();
unsigned int nSize = ReadCompactSize(is);
@@ -700,14 +622,14 @@ void Unserialize_impl(Stream& is, prevector<N, T>& v, int nType, int nVersion, c
nMid = nSize;
v.resize(nMid);
for (; i < nMid; i++)
- Unserialize(is, v[i], nType, nVersion);
+ Unserialize(is, v[i]);
}
}
template<typename Stream, unsigned int N, typename T>
-inline void Unserialize(Stream& is, prevector<N, T>& v, int nType, int nVersion)
+inline void Unserialize(Stream& is, prevector<N, T>& v)
{
- Unserialize_impl(is, v, nType, nVersion, T());
+ Unserialize_impl(is, v, T());
}
@@ -715,30 +637,8 @@ inline void Unserialize(Stream& is, prevector<N, T>& v, int nType, int nVersion)
/**
* vector
*/
-template<typename T, typename A>
-unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const unsigned char&)
-{
- return (GetSizeOfCompactSize(v.size()) + v.size() * sizeof(T));
-}
-
-template<typename T, typename A, typename V>
-unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const V&)
-{
- unsigned int nSize = GetSizeOfCompactSize(v.size());
- for (typename std::vector<T, A>::const_iterator vi = v.begin(); vi != v.end(); ++vi)
- nSize += GetSerializeSize((*vi), nType, nVersion);
- return nSize;
-}
-
-template<typename T, typename A>
-inline unsigned int GetSerializeSize(const std::vector<T, A>& v, int nType, int nVersion)
-{
- return GetSerializeSize_impl(v, nType, nVersion, T());
-}
-
-
template<typename Stream, typename T, typename A>
-void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const unsigned char&)
+void Serialize_impl(Stream& os, const std::vector<T, A>& v, const unsigned char&)
{
WriteCompactSize(os, v.size());
if (!v.empty())
@@ -746,22 +646,22 @@ void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVers
}
template<typename Stream, typename T, typename A, typename V>
-void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const V&)
+void Serialize_impl(Stream& os, const std::vector<T, A>& v, const V&)
{
WriteCompactSize(os, v.size());
for (typename std::vector<T, A>::const_iterator vi = v.begin(); vi != v.end(); ++vi)
- ::Serialize(os, (*vi), nType, nVersion);
+ ::Serialize(os, (*vi));
}
template<typename Stream, typename T, typename A>
-inline void Serialize(Stream& os, const std::vector<T, A>& v, int nType, int nVersion)
+inline void Serialize(Stream& os, const std::vector<T, A>& v)
{
- Serialize_impl(os, v, nType, nVersion, T());
+ Serialize_impl(os, v, T());
}
template<typename Stream, typename T, typename A>
-void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const unsigned char&)
+void Unserialize_impl(Stream& is, std::vector<T, A>& v, const unsigned char&)
{
// Limit size per read so bogus size value won't cause out of memory
v.clear();
@@ -777,7 +677,7 @@ void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion,
}
template<typename Stream, typename T, typename A, typename V>
-void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const V&)
+void Unserialize_impl(Stream& is, std::vector<T, A>& v, const V&)
{
v.clear();
unsigned int nSize = ReadCompactSize(is);
@@ -790,14 +690,14 @@ void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion,
nMid = nSize;
v.resize(nMid);
for (; i < nMid; i++)
- Unserialize(is, v[i], nType, nVersion);
+ Unserialize(is, v[i]);
}
}
template<typename Stream, typename T, typename A>
-inline void Unserialize(Stream& is, std::vector<T, A>& v, int nType, int nVersion)
+inline void Unserialize(Stream& is, std::vector<T, A>& v)
{
- Unserialize_impl(is, v, nType, nVersion, T());
+ Unserialize_impl(is, v, T());
}
@@ -805,24 +705,18 @@ inline void Unserialize(Stream& is, std::vector<T, A>& v, int nType, int nVersio
/**
* pair
*/
-template<typename K, typename T>
-unsigned int GetSerializeSize(const std::pair<K, T>& item, int nType, int nVersion)
-{
- return GetSerializeSize(item.first, nType, nVersion) + GetSerializeSize(item.second, nType, nVersion);
-}
-
template<typename Stream, typename K, typename T>
-void Serialize(Stream& os, const std::pair<K, T>& item, int nType, int nVersion)
+void Serialize(Stream& os, const std::pair<K, T>& item)
{
- Serialize(os, item.first, nType, nVersion);
- Serialize(os, item.second, nType, nVersion);
+ Serialize(os, item.first);
+ Serialize(os, item.second);
}
template<typename Stream, typename K, typename T>
-void Unserialize(Stream& is, std::pair<K, T>& item, int nType, int nVersion)
+void Unserialize(Stream& is, std::pair<K, T>& item)
{
- Unserialize(is, item.first, nType, nVersion);
- Unserialize(is, item.second, nType, nVersion);
+ Unserialize(is, item.first);
+ Unserialize(is, item.second);
}
@@ -830,25 +724,16 @@ void Unserialize(Stream& is, std::pair<K, T>& item, int nType, int nVersion)
/**
* map
*/
-template<typename K, typename T, typename Pred, typename A>
-unsigned int GetSerializeSize(const std::map<K, T, Pred, A>& m, int nType, int nVersion)
-{
- unsigned int nSize = GetSizeOfCompactSize(m.size());
- for (typename std::map<K, T, Pred, A>::const_iterator mi = m.begin(); mi != m.end(); ++mi)
- nSize += GetSerializeSize((*mi), nType, nVersion);
- return nSize;
-}
-
template<typename Stream, typename K, typename T, typename Pred, typename A>
-void Serialize(Stream& os, const std::map<K, T, Pred, A>& m, int nType, int nVersion)
+void Serialize(Stream& os, const std::map<K, T, Pred, A>& m)
{
WriteCompactSize(os, m.size());
for (typename std::map<K, T, Pred, A>::const_iterator mi = m.begin(); mi != m.end(); ++mi)
- Serialize(os, (*mi), nType, nVersion);
+ Serialize(os, (*mi));
}
template<typename Stream, typename K, typename T, typename Pred, typename A>
-void Unserialize(Stream& is, std::map<K, T, Pred, A>& m, int nType, int nVersion)
+void Unserialize(Stream& is, std::map<K, T, Pred, A>& m)
{
m.clear();
unsigned int nSize = ReadCompactSize(is);
@@ -856,7 +741,7 @@ void Unserialize(Stream& is, std::map<K, T, Pred, A>& m, int nType, int nVersion
for (unsigned int i = 0; i < nSize; i++)
{
std::pair<K, T> item;
- Unserialize(is, item, nType, nVersion);
+ Unserialize(is, item);
mi = m.insert(mi, item);
}
}
@@ -866,25 +751,16 @@ void Unserialize(Stream& is, std::map<K, T, Pred, A>& m, int nType, int nVersion
/**
* set
*/
-template<typename K, typename Pred, typename A>
-unsigned int GetSerializeSize(const std::set<K, Pred, A>& m, int nType, int nVersion)
-{
- unsigned int nSize = GetSizeOfCompactSize(m.size());
- for (typename std::set<K, Pred, A>::const_iterator it = m.begin(); it != m.end(); ++it)
- nSize += GetSerializeSize((*it), nType, nVersion);
- return nSize;
-}
-
template<typename Stream, typename K, typename Pred, typename A>
-void Serialize(Stream& os, const std::set<K, Pred, A>& m, int nType, int nVersion)
+void Serialize(Stream& os, const std::set<K, Pred, A>& m)
{
WriteCompactSize(os, m.size());
for (typename std::set<K, Pred, A>::const_iterator it = m.begin(); it != m.end(); ++it)
- Serialize(os, (*it), nType, nVersion);
+ Serialize(os, (*it));
}
template<typename Stream, typename K, typename Pred, typename A>
-void Unserialize(Stream& is, std::set<K, Pred, A>& m, int nType, int nVersion)
+void Unserialize(Stream& is, std::set<K, Pred, A>& m)
{
m.clear();
unsigned int nSize = ReadCompactSize(is);
@@ -892,7 +768,7 @@ void Unserialize(Stream& is, std::set<K, Pred, A>& m, int nType, int nVersion)
for (unsigned int i = 0; i < nSize; i++)
{
K key;
- Unserialize(is, key, nType, nVersion);
+ Unserialize(is, key);
it = m.insert(it, key);
}
}
@@ -904,23 +780,23 @@ void Unserialize(Stream& is, std::set<K, Pred, A>& m, int nType, int nVersion)
*/
struct CSerActionSerialize
{
- bool ForRead() const { return false; }
+ constexpr bool ForRead() const { return false; }
};
struct CSerActionUnserialize
{
- bool ForRead() const { return true; }
+ constexpr bool ForRead() const { return true; }
};
template<typename Stream, typename T>
-inline void SerReadWrite(Stream& s, const T& obj, int nType, int nVersion, CSerActionSerialize ser_action)
+inline void SerReadWrite(Stream& s, const T& obj, CSerActionSerialize ser_action)
{
- ::Serialize(s, obj, nType, nVersion);
+ ::Serialize(s, obj);
}
template<typename Stream, typename T>
-inline void SerReadWrite(Stream& s, T& obj, int nType, int nVersion, CSerActionUnserialize ser_action)
+inline void SerReadWrite(Stream& s, T& obj, CSerActionUnserialize ser_action)
{
- ::Unserialize(s, obj, nType, nVersion);
+ ::Unserialize(s, obj);
}
@@ -931,33 +807,122 @@ inline void SerReadWrite(Stream& s, T& obj, int nType, int nVersion, CSerActionU
+/* ::GetSerializeSize implementations
+ *
+ * Computing the serialized size of objects is done through a special stream
+ * object of type CSizeComputer, which only records the number of bytes written
+ * to it.
+ *
+ * If your Serialize or SerializationOp method has non-trivial overhead for
+ * serialization, it may be worthwhile to implement a specialized version for
+ * CSizeComputer, which uses the s.seek() method to record bytes that would
+ * be written instead.
+ */
class CSizeComputer
{
protected:
size_t nSize;
+ const int nType;
+ const int nVersion;
public:
- int nType;
- int nVersion;
-
CSizeComputer(int nTypeIn, int nVersionIn) : nSize(0), nType(nTypeIn), nVersion(nVersionIn) {}
- CSizeComputer& write(const char *psz, size_t nSize)
+ void write(const char *psz, size_t _nSize)
+ {
+ this->nSize += _nSize;
+ }
+
+ /** Pretend _nSize bytes are written, without specifying them. */
+ void seek(size_t _nSize)
{
- this->nSize += nSize;
- return *this;
+ this->nSize += _nSize;
}
template<typename T>
CSizeComputer& operator<<(const T& obj)
{
- ::Serialize(*this, obj, nType, nVersion);
+ ::Serialize(*this, obj);
return (*this);
}
size_t size() const {
return nSize;
}
+
+ int GetVersion() const { return nVersion; }
+ int GetType() const { return nType; }
};
+template<typename Stream>
+void SerializeMany(Stream& s)
+{
+}
+
+template<typename Stream, typename Arg>
+void SerializeMany(Stream& s, Arg&& arg)
+{
+ ::Serialize(s, std::forward<Arg>(arg));
+}
+
+template<typename Stream, typename Arg, typename... Args>
+void SerializeMany(Stream& s, Arg&& arg, Args&&... args)
+{
+ ::Serialize(s, std::forward<Arg>(arg));
+ ::SerializeMany(s, std::forward<Args>(args)...);
+}
+
+template<typename Stream>
+inline void UnserializeMany(Stream& s)
+{
+}
+
+template<typename Stream, typename Arg>
+inline void UnserializeMany(Stream& s, Arg& arg)
+{
+ ::Unserialize(s, arg);
+}
+
+template<typename Stream, typename Arg, typename... Args>
+inline void UnserializeMany(Stream& s, Arg& arg, Args&... args)
+{
+ ::Unserialize(s, arg);
+ ::UnserializeMany(s, args...);
+}
+
+template<typename Stream, typename... Args>
+inline void SerReadWriteMany(Stream& s, CSerActionSerialize ser_action, Args&&... args)
+{
+ ::SerializeMany(s, std::forward<Args>(args)...);
+}
+
+template<typename Stream, typename... Args>
+inline void SerReadWriteMany(Stream& s, CSerActionUnserialize ser_action, Args&... args)
+{
+ ::UnserializeMany(s, args...);
+}
+
+template<typename I>
+inline void WriteVarInt(CSizeComputer &s, I n)
+{
+ s.seek(GetSizeOfVarInt<I>(n));
+}
+
+inline void WriteCompactSize(CSizeComputer &s, uint64_t nSize)
+{
+ s.seek(GetSizeOfCompactSize(nSize));
+}
+
+template <typename T>
+size_t GetSerializeSize(const T& t, int nType, int nVersion = 0)
+{
+ return (CSizeComputer(nType, nVersion) << t).size();
+}
+
+template <typename S, typename T>
+size_t GetSerializeSize(const S& s, const T& t)
+{
+ return (CSizeComputer(s.GetType(), s.GetVersion()) << t).size();
+}
+
#endif // BITCOIN_SERIALIZE_H
diff --git a/src/streams.h b/src/streams.h
index 7132364eb1..c3e7c9e9e4 100644
--- a/src/streams.h
+++ b/src/streams.h
@@ -26,17 +26,18 @@ template<typename Stream>
class OverrideStream
{
Stream* stream;
-public:
+
const int nType;
const int nVersion;
+public:
OverrideStream(Stream* stream_, int nType_, int nVersion_) : stream(stream_), nType(nType_), nVersion(nVersion_) {}
template<typename T>
OverrideStream<Stream>& operator<<(const T& obj)
{
// Serialize to this stream
- ::Serialize(*this->stream, obj, nType, nVersion);
+ ::Serialize(*this, obj);
return (*this);
}
@@ -44,9 +45,22 @@ public:
OverrideStream<Stream>& operator>>(T& obj)
{
// Unserialize from this stream
- ::Unserialize(*this->stream, obj, nType, nVersion);
+ ::Unserialize(*this, obj);
return (*this);
}
+
+ void write(const char* pch, size_t nSize)
+ {
+ stream->write(pch, nSize);
+ }
+
+ void read(char* pch, size_t nSize)
+ {
+ stream->read(pch, nSize);
+ }
+
+ int GetVersion() const { return nVersion; }
+ int GetType() const { return nType; }
};
template<typename S>
@@ -66,9 +80,10 @@ protected:
typedef CSerializeData vector_type;
vector_type vch;
unsigned int nReadPos;
-public:
+
int nType;
int nVersion;
+public:
typedef vector_type::allocator_type allocator_type;
typedef vector_type::size_type size_type;
@@ -112,6 +127,13 @@ public:
Init(nTypeIn, nVersionIn);
}
+ template <typename... Args>
+ CDataStream(int nTypeIn, int nVersionIn, Args&&... args)
+ {
+ Init(nTypeIn, nVersionIn);
+ ::SerializeMany(*this, std::forward<Args>(args)...);
+ }
+
void Init(int nTypeIn, int nVersionIn)
{
nReadPos = 0;
@@ -244,13 +266,11 @@ public:
int in_avail() { return size(); }
void SetType(int n) { nType = n; }
- int GetType() { return nType; }
+ int GetType() const { return nType; }
void SetVersion(int n) { nVersion = n; }
- int GetVersion() { return nVersion; }
- void ReadVersion() { *this >> nVersion; }
- void WriteVersion() { *this << nVersion; }
+ int GetVersion() const { return nVersion; }
- CDataStream& read(char* pch, size_t nSize)
+ void read(char* pch, size_t nSize)
{
// Read from the beginning of the buffer
unsigned int nReadPosNext = nReadPos + nSize;
@@ -263,14 +283,13 @@ public:
memcpy(pch, &vch[nReadPos], nSize);
nReadPos = 0;
vch.clear();
- return (*this);
+ return;
}
memcpy(pch, &vch[nReadPos], nSize);
nReadPos = nReadPosNext;
- return (*this);
}
- CDataStream& ignore(int nSize)
+ void ignore(int nSize)
{
// Ignore from the beginning of the buffer
if (nSize < 0) {
@@ -283,21 +302,19 @@ public:
throw std::ios_base::failure("CDataStream::ignore(): end of data");
nReadPos = 0;
vch.clear();
- return (*this);
+ return;
}
nReadPos = nReadPosNext;
- return (*this);
}
- CDataStream& write(const char* pch, size_t nSize)
+ void write(const char* pch, size_t nSize)
{
// Write to the end of the buffer
vch.insert(vch.end(), pch, pch + nSize);
- return (*this);
}
template<typename Stream>
- void Serialize(Stream& s, int nType, int nVersion) const
+ void Serialize(Stream& s) const
{
// Special case: stream << stream concatenates like stream += stream
if (!vch.empty())
@@ -305,17 +322,10 @@ public:
}
template<typename T>
- unsigned int GetSerializeSize(const T& obj)
- {
- // Tells the size of the object if serialized to this stream
- return ::GetSerializeSize(obj, nType, nVersion);
- }
-
- template<typename T>
CDataStream& operator<<(const T& obj)
{
// Serialize to this stream
- ::Serialize(*this, obj, nType, nVersion);
+ ::Serialize(*this, obj);
return (*this);
}
@@ -323,7 +333,7 @@ public:
CDataStream& operator>>(T& obj)
{
// Unserialize from this stream
- ::Unserialize(*this, obj, nType, nVersion);
+ ::Unserialize(*this, obj);
return (*this);
}
@@ -378,17 +388,15 @@ private:
CAutoFile(const CAutoFile&);
CAutoFile& operator=(const CAutoFile&);
- int nType;
- int nVersion;
-
+ const int nType;
+ const int nVersion;
+
FILE* file;
public:
- CAutoFile(FILE* filenew, int nTypeIn, int nVersionIn)
+ CAutoFile(FILE* filenew, int nTypeIn, int nVersionIn) : nType(nTypeIn), nVersion(nVersionIn)
{
file = filenew;
- nType = nTypeIn;
- nVersion = nVersionIn;
}
~CAutoFile()
@@ -423,23 +431,18 @@ public:
//
// Stream subset
//
- void SetType(int n) { nType = n; }
- int GetType() { return nType; }
- void SetVersion(int n) { nVersion = n; }
- int GetVersion() { return nVersion; }
- void ReadVersion() { *this >> nVersion; }
- void WriteVersion() { *this << nVersion; }
+ int GetType() const { return nType; }
+ int GetVersion() const { return nVersion; }
- CAutoFile& read(char* pch, size_t nSize)
+ void read(char* pch, size_t nSize)
{
if (!file)
throw std::ios_base::failure("CAutoFile::read: file handle is NULL");
if (fread(pch, 1, nSize, file) != nSize)
throw std::ios_base::failure(feof(file) ? "CAutoFile::read: end of file" : "CAutoFile::read: fread failed");
- return (*this);
}
- CAutoFile& ignore(size_t nSize)
+ void ignore(size_t nSize)
{
if (!file)
throw std::ios_base::failure("CAutoFile::ignore: file handle is NULL");
@@ -450,23 +453,14 @@ public:
throw std::ios_base::failure(feof(file) ? "CAutoFile::ignore: end of file" : "CAutoFile::read: fread failed");
nSize -= nNow;
}
- return (*this);
}
- CAutoFile& write(const char* pch, size_t nSize)
+ void write(const char* pch, size_t nSize)
{
if (!file)
throw std::ios_base::failure("CAutoFile::write: file handle is NULL");
if (fwrite(pch, 1, nSize, file) != nSize)
throw std::ios_base::failure("CAutoFile::write: write failed");
- return (*this);
- }
-
- template<typename T>
- unsigned int GetSerializeSize(const T& obj)
- {
- // Tells the size of the object if serialized to this stream
- return ::GetSerializeSize(obj, nType, nVersion);
}
template<typename T>
@@ -475,7 +469,7 @@ public:
// Serialize to this stream
if (!file)
throw std::ios_base::failure("CAutoFile::operator<<: file handle is NULL");
- ::Serialize(*this, obj, nType, nVersion);
+ ::Serialize(*this, obj);
return (*this);
}
@@ -485,7 +479,7 @@ public:
// Unserialize from this stream
if (!file)
throw std::ios_base::failure("CAutoFile::operator>>: file handle is NULL");
- ::Unserialize(*this, obj, nType, nVersion);
+ ::Unserialize(*this, obj);
return (*this);
}
};
@@ -503,8 +497,8 @@ private:
CBufferedFile(const CBufferedFile&);
CBufferedFile& operator=(const CBufferedFile&);
- int nType;
- int nVersion;
+ const int nType;
+ const int nVersion;
FILE *src; // source file
uint64_t nSrcPos; // how many bytes have been read from source
@@ -534,11 +528,9 @@ protected:
public:
CBufferedFile(FILE *fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nTypeIn, int nVersionIn) :
- nSrcPos(0), nReadPos(0), nReadLimit((uint64_t)(-1)), nRewind(nRewindIn), vchBuf(nBufSize, 0)
+ nType(nTypeIn), nVersion(nVersionIn), nSrcPos(0), nReadPos(0), nReadLimit((uint64_t)(-1)), nRewind(nRewindIn), vchBuf(nBufSize, 0)
{
src = fileIn;
- nType = nTypeIn;
- nVersion = nVersionIn;
}
~CBufferedFile()
@@ -546,6 +538,9 @@ public:
fclose();
}
+ int GetVersion() const { return nVersion; }
+ int GetType() const { return nType; }
+
void fclose()
{
if (src) {
@@ -560,7 +555,7 @@ public:
}
// read a number of bytes
- CBufferedFile& read(char *pch, size_t nSize) {
+ void read(char *pch, size_t nSize) {
if (nSize + nReadPos > nReadLimit)
throw std::ios_base::failure("Read attempted past buffer limit");
if (nSize + nRewind > vchBuf.size())
@@ -579,7 +574,6 @@ public:
pch += nNow;
nSize -= nNow;
}
- return (*this);
}
// return the current reading position
@@ -625,7 +619,7 @@ public:
template<typename T>
CBufferedFile& operator>>(T& obj) {
// Unserialize from this stream
- ::Unserialize(*this, obj, nType, nVersion);
+ ::Unserialize(*this, obj);
return (*this);
}
diff --git a/src/support/allocators/secure.h b/src/support/allocators/secure.h
index 1ec40fe830..67064314ef 100644
--- a/src/support/allocators/secure.h
+++ b/src/support/allocators/secure.h
@@ -6,7 +6,8 @@
#ifndef BITCOIN_SUPPORT_ALLOCATORS_SECURE_H
#define BITCOIN_SUPPORT_ALLOCATORS_SECURE_H
-#include "support/pagelocker.h"
+#include "support/lockedpool.h"
+#include "support/cleanse.h"
#include <string>
@@ -39,20 +40,15 @@ struct secure_allocator : public std::allocator<T> {
T* allocate(std::size_t n, const void* hint = 0)
{
- T* p;
- p = std::allocator<T>::allocate(n, hint);
- if (p != NULL)
- LockedPageManager::Instance().LockRange(p, sizeof(T) * n);
- return p;
+ return static_cast<T*>(LockedPoolManager::Instance().alloc(sizeof(T) * n));
}
void deallocate(T* p, std::size_t n)
{
if (p != NULL) {
memory_cleanse(p, sizeof(T) * n);
- LockedPageManager::Instance().UnlockRange(p, sizeof(T) * n);
}
- std::allocator<T>::deallocate(p, n);
+ LockedPoolManager::Instance().free(p);
}
};
diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp
new file mode 100644
index 0000000000..01273c9791
--- /dev/null
+++ b/src/support/lockedpool.cpp
@@ -0,0 +1,385 @@
+// Copyright (c) 2016 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include "support/lockedpool.h"
+#include "support/cleanse.h"
+
+#if defined(HAVE_CONFIG_H)
+#include "config/bitcoin-config.h"
+#endif
+
+#ifdef WIN32
+#ifdef _WIN32_WINNT
+#undef _WIN32_WINNT
+#endif
+#define _WIN32_WINNT 0x0501
+#define WIN32_LEAN_AND_MEAN 1
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+#include <windows.h>
+#else
+#include <sys/mman.h> // for mmap
+#include <sys/resource.h> // for getrlimit
+#include <limits.h> // for PAGESIZE
+#include <unistd.h> // for sysconf
+#endif
+
+#include <algorithm>
+
+LockedPoolManager* LockedPoolManager::_instance = NULL;
+std::once_flag LockedPoolManager::init_flag;
+
+/*******************************************************************************/
+// Utilities
+//
+/** Align up to power of 2 */
+static inline size_t align_up(size_t x, size_t align)
+{
+ return (x + align - 1) & ~(align - 1);
+}
+
+/*******************************************************************************/
+// Implementation: Arena
+
+Arena::Arena(void *base_in, size_t size_in, size_t alignment_in):
+ base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
+{
+ // Start with one free chunk that covers the entire arena
+ chunks_free.emplace(base, size_in);
+}
+
+Arena::~Arena()
+{
+}
+
+void* Arena::alloc(size_t size)
+{
+ // Round to next multiple of alignment
+ size = align_up(size, alignment);
+
+ // Don't handle zero-sized chunks
+ if (size == 0)
+ return nullptr;
+
+ // Pick a large enough free-chunk
+ auto it = std::find_if(chunks_free.begin(), chunks_free.end(),
+ [=](const std::map<char*, size_t>::value_type& chunk){ return chunk.second >= size; });
+ if (it == chunks_free.end())
+ return nullptr;
+
+ // Create the used-chunk, taking its space from the end of the free-chunk
+ auto alloced = chunks_used.emplace(it->first + it->second - size, size).first;
+ if (!(it->second -= size))
+ chunks_free.erase(it);
+ return reinterpret_cast<void*>(alloced->first);
+}
+
+/* extend the Iterator if other begins at its end */
+template <class Iterator, class Pair> bool extend(Iterator it, const Pair& other) {
+ if (it->first + it->second == other.first) {
+ it->second += other.second;
+ return true;
+ }
+ return false;
+}
+
+void Arena::free(void *ptr)
+{
+ // Freeing the NULL pointer is OK.
+ if (ptr == nullptr) {
+ return;
+ }
+
+ // Remove chunk from used map
+ auto i = chunks_used.find(static_cast<char*>(ptr));
+ if (i == chunks_used.end()) {
+ throw std::runtime_error("Arena: invalid or double free");
+ }
+ auto freed = *i;
+ chunks_used.erase(i);
+
+ // Add space to free map, coalescing contiguous chunks
+ auto next = chunks_free.upper_bound(freed.first);
+ auto prev = (next == chunks_free.begin()) ? chunks_free.end() : std::prev(next);
+ if (prev == chunks_free.end() || !extend(prev, freed))
+ prev = chunks_free.emplace_hint(next, freed);
+ if (next != chunks_free.end() && extend(prev, *next))
+ chunks_free.erase(next);
+}
+
+Arena::Stats Arena::stats() const
+{
+ Arena::Stats r{ 0, 0, 0, chunks_used.size(), chunks_free.size() };
+ for (const auto& chunk: chunks_used)
+ r.used += chunk.second;
+ for (const auto& chunk: chunks_free)
+ r.free += chunk.second;
+ r.total = r.used + r.free;
+ return r;
+}
+
+#ifdef ARENA_DEBUG
+void printchunk(char* base, size_t sz, bool used) {
+ std::cout <<
+ "0x" << std::hex << std::setw(16) << std::setfill('0') << base <<
+ " 0x" << std::hex << std::setw(16) << std::setfill('0') << sz <<
+ " 0x" << used << std::endl;
+}
+void Arena::walk() const
+{
+ for (const auto& chunk: chunks_used)
+ printchunk(chunk.first, chunk.second, true);
+ std::cout << std::endl;
+ for (const auto& chunk: chunks_free)
+ printchunk(chunk.first, chunk.second, false);
+ std::cout << std::endl;
+}
+#endif
+
+/*******************************************************************************/
+// Implementation: Win32LockedPageAllocator
+
+#ifdef WIN32
+/** LockedPageAllocator specialized for Windows.
+ */
+class Win32LockedPageAllocator: public LockedPageAllocator
+{
+public:
+ Win32LockedPageAllocator();
+ void* AllocateLocked(size_t len, bool *lockingSuccess);
+ void FreeLocked(void* addr, size_t len);
+ size_t GetLimit();
+private:
+ size_t page_size;
+};
+
+Win32LockedPageAllocator::Win32LockedPageAllocator()
+{
+ // Determine system page size in bytes
+ SYSTEM_INFO sSysInfo;
+ GetSystemInfo(&sSysInfo);
+ page_size = sSysInfo.dwPageSize;
+}
+void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
+{
+ len = align_up(len, page_size);
+ void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+ if (addr) {
+ // VirtualLock is used to attempt to keep keying material out of swap. Note
+ // that it does not provide this as a guarantee, but, in practice, memory
+ // that has been VirtualLock'd almost never gets written to the pagefile
+ // except in rare circumstances where memory is extremely low.
+ *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
+ }
+ return addr;
+}
+void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len)
+{
+ len = align_up(len, page_size);
+ memory_cleanse(addr, len);
+ VirtualUnlock(const_cast<void*>(addr), len);
+}
+
+size_t Win32LockedPageAllocator::GetLimit()
+{
+ // TODO is there a limit on windows, how to get it?
+ return std::numeric_limits<size_t>::max();
+}
+#endif
+
+/*******************************************************************************/
+// Implementation: PosixLockedPageAllocator
+
+#ifndef WIN32
+/** LockedPageAllocator specialized for OSes that don't try to be
+ * special snowflakes.
+ */
+class PosixLockedPageAllocator: public LockedPageAllocator
+{
+public:
+ PosixLockedPageAllocator();
+ void* AllocateLocked(size_t len, bool *lockingSuccess);
+ void FreeLocked(void* addr, size_t len);
+ size_t GetLimit();
+private:
+ size_t page_size;
+};
+
+PosixLockedPageAllocator::PosixLockedPageAllocator()
+{
+ // Determine system page size in bytes
+#if defined(PAGESIZE) // defined in limits.h
+ page_size = PAGESIZE;
+#else // assume some POSIX OS
+ page_size = sysconf(_SC_PAGESIZE);
+#endif
+}
+
+// Some systems (at least OS X) do not define MAP_ANONYMOUS yet and define
+// MAP_ANON which is deprecated
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
+{
+ void *addr;
+ len = align_up(len, page_size);
+ addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if (addr) {
+ *lockingSuccess = mlock(addr, len) == 0;
+ }
+ return addr;
+}
+void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len)
+{
+ len = align_up(len, page_size);
+ memory_cleanse(addr, len);
+ munlock(addr, len);
+ munmap(addr, len);
+}
+size_t PosixLockedPageAllocator::GetLimit()
+{
+#ifdef RLIMIT_MEMLOCK
+ struct rlimit rlim;
+ if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
+ if (rlim.rlim_cur != RLIM_INFINITY) {
+ return rlim.rlim_cur;
+ }
+ }
+#endif
+ return std::numeric_limits<size_t>::max();
+}
+#endif
+
+/*******************************************************************************/
+// Implementation: LockedPool
+
+LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in):
+ allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0)
+{
+}
+
+LockedPool::~LockedPool()
+{
+}
+void* LockedPool::alloc(size_t size)
+{
+ std::lock_guard<std::mutex> lock(mutex);
+
+ // Don't handle impossible sizes
+ if (size == 0 || size > ARENA_SIZE)
+ return nullptr;
+
+ // Try allocating from each current arena
+ for (auto &arena: arenas) {
+ void *addr = arena.alloc(size);
+ if (addr) {
+ return addr;
+ }
+ }
+ // If that fails, create a new one
+ if (new_arena(ARENA_SIZE, ARENA_ALIGN)) {
+ return arenas.back().alloc(size);
+ }
+ return nullptr;
+}
+
+void LockedPool::free(void *ptr)
+{
+ std::lock_guard<std::mutex> lock(mutex);
+ // TODO we can do better than this linear search by keeping a map of arena
+ // extents to arena, and looking up the address.
+ for (auto &arena: arenas) {
+ if (arena.addressInArena(ptr)) {
+ arena.free(ptr);
+ return;
+ }
+ }
+ throw std::runtime_error("LockedPool: invalid address not pointing to any arena");
+}
+
+LockedPool::Stats LockedPool::stats() const
+{
+ std::lock_guard<std::mutex> lock(mutex);
+ LockedPool::Stats r{0, 0, 0, cumulative_bytes_locked, 0, 0};
+ for (const auto &arena: arenas) {
+ Arena::Stats i = arena.stats();
+ r.used += i.used;
+ r.free += i.free;
+ r.total += i.total;
+ r.chunks_used += i.chunks_used;
+ r.chunks_free += i.chunks_free;
+ }
+ return r;
+}
+
+bool LockedPool::new_arena(size_t size, size_t align)
+{
+ bool locked;
+ // If this is the first arena, handle this specially: Cap the upper size
+ // by the process limit. This makes sure that the first arena will at least
+ // be locked. An exception to this is if the process limit is 0:
+ // in this case no memory can be locked at all so we'll skip past this logic.
+ if (arenas.empty()) {
+ size_t limit = allocator->GetLimit();
+ if (limit > 0) {
+ size = std::min(size, limit);
+ }
+ }
+ void *addr = allocator->AllocateLocked(size, &locked);
+ if (!addr) {
+ return false;
+ }
+ if (locked) {
+ cumulative_bytes_locked += size;
+ } else if (lf_cb) { // Call the locking-failed callback if locking failed
+ if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed.
+ allocator->FreeLocked(addr, size);
+ return false;
+ }
+ }
+ arenas.emplace_back(allocator.get(), addr, size, align);
+ return true;
+}
+
+LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in):
+ Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
+{
+}
+LockedPool::LockedPageArena::~LockedPageArena()
+{
+ allocator->FreeLocked(base, size);
+}
+
+/*******************************************************************************/
+// Implementation: LockedPoolManager
+//
+LockedPoolManager::LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator):
+ LockedPool(std::move(allocator), &LockedPoolManager::LockingFailed)
+{
+}
+
+bool LockedPoolManager::LockingFailed()
+{
+ // TODO: log something but how? without including util.h
+ return true;
+}
+
+void LockedPoolManager::CreateInstance()
+{
+ // Using a local static instance guarantees that the object is initialized
+ // when it's first needed and also deinitialized after all objects that use
+ // it are done with it. I can think of one unlikely scenario where we may
+ // have a static deinitialization order/problem, but the check in
+ // LockedPoolManagerBase's destructor helps us detect if that ever happens.
+#ifdef WIN32
+ std::unique_ptr<LockedPageAllocator> allocator(new Win32LockedPageAllocator());
+#else
+ std::unique_ptr<LockedPageAllocator> allocator(new PosixLockedPageAllocator());
+#endif
+ static LockedPoolManager instance(std::move(allocator));
+ LockedPoolManager::_instance = &instance;
+}
diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h
new file mode 100644
index 0000000000..3403415436
--- /dev/null
+++ b/src/support/lockedpool.h
@@ -0,0 +1,231 @@
+// Copyright (c) 2016 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H
+#define BITCOIN_SUPPORT_LOCKEDPOOL_H
+
+#include <stdint.h>
+#include <list>
+#include <map>
+#include <mutex>
+#include <memory>
+
+/**
+ * OS-dependent allocation and deallocation of locked/pinned memory pages.
+ * Abstract base class.
+ */
+class LockedPageAllocator
+{
+public:
+ virtual ~LockedPageAllocator() {}
+ /** Allocate and lock memory pages.
+ * If len is not a multiple of the system page size, it is rounded up.
+ * Returns 0 in case of allocation failure.
+ *
+ * If locking the memory pages could not be accomplished it will still
+ * return the memory, however the lockingSuccess flag will be false.
+ * lockingSuccess is undefined if the allocation fails.
+ */
+ virtual void* AllocateLocked(size_t len, bool *lockingSuccess) = 0;
+
+ /** Unlock and free memory pages.
+ * Clear the memory before unlocking.
+ */
+ virtual void FreeLocked(void* addr, size_t len) = 0;
+
+ /** Get the total limit on the amount of memory that may be locked by this
+ * process, in bytes. Return size_t max if there is no limit or the limit
+ * is unknown. Return 0 if no memory can be locked at all.
+ */
+ virtual size_t GetLimit() = 0;
+};
+
+/* An arena manages a contiguous region of memory by dividing it into
+ * chunks.
+ */
+class Arena
+{
+public:
+ Arena(void *base, size_t size, size_t alignment);
+ virtual ~Arena();
+
+ /** Memory statistics. */
+ struct Stats
+ {
+ size_t used;
+ size_t free;
+ size_t total;
+ size_t chunks_used;
+ size_t chunks_free;
+ };
+
+ /** Allocate size bytes from this arena.
+ * Returns pointer on success, or 0 if memory is full or
+ * the application tried to allocate 0 bytes.
+ */
+ void* alloc(size_t size);
+
+ /** Free a previously allocated chunk of memory.
+ * Freeing the zero pointer has no effect.
+ * Raises std::runtime_error in case of error.
+ */
+ void free(void *ptr);
+
+ /** Get arena usage statistics */
+ Stats stats() const;
+
+#ifdef ARENA_DEBUG
+ void walk() const;
+#endif
+
+ /** Return whether a pointer points inside this arena.
+ * This returns base <= ptr < (base+size) so only use it for (inclusive)
+ * chunk starting addresses.
+ */
+ bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; }
+private:
+ Arena(const Arena& other) = delete; // non construction-copyable
+ Arena& operator=(const Arena&) = delete; // non copyable
+
+ /** Map of chunk address to chunk information. This class makes use of the
+ * sorted order to merge previous and next chunks during deallocation.
+ */
+ std::map<char*, size_t> chunks_free;
+ std::map<char*, size_t> chunks_used;
+ /** Base address of arena */
+ char* base;
+ /** End address of arena */
+ char* end;
+ /** Minimum chunk alignment */
+ size_t alignment;
+};
+
+/** Pool for locked memory chunks.
+ *
+ * To avoid sensitive key data from being swapped to disk, the memory in this pool
+ * is locked/pinned.
+ *
+ * An arena manages a contiguous region of memory. The pool starts out with one arena
+ * but can grow to multiple arenas if the need arises.
+ *
+ * Unlike a normal C heap, the administrative structures are seperate from the managed
+ * memory. This has been done as the sizes and bases of objects are not in themselves sensitive
+ * information, as to conserve precious locked memory. In some operating systems
+ * the amount of memory that can be locked is small.
+ */
+class LockedPool
+{
+public:
+ /** Size of one arena of locked memory. This is a compromise.
+ * Do not set this too low, as managing many arenas will increase
+ * allocation and deallocation overhead. Setting it too high allocates
+ * more locked memory from the OS than strictly necessary.
+ */
+ static const size_t ARENA_SIZE = 256*1024;
+ /** Chunk alignment. Another compromise. Setting this too high will waste
+ * memory, setting it too low will facilitate fragmentation.
+ */
+ static const size_t ARENA_ALIGN = 16;
+
+ /** Callback when allocation succeeds but locking fails.
+ */
+ typedef bool (*LockingFailed_Callback)();
+
+ /** Memory statistics. */
+ struct Stats
+ {
+ size_t used;
+ size_t free;
+ size_t total;
+ size_t locked;
+ size_t chunks_used;
+ size_t chunks_free;
+ };
+
+ /** Create a new LockedPool. This takes ownership of the MemoryPageLocker,
+ * you can only instantiate this with LockedPool(std::move(...)).
+ *
+ * The second argument is an optional callback when locking a newly allocated arena failed.
+ * If this callback is provided and returns false, the allocation fails (hard fail), if
+ * it returns true the allocation proceeds, but it could warn.
+ */
+ LockedPool(std::unique_ptr<LockedPageAllocator> allocator, LockingFailed_Callback lf_cb_in = 0);
+ ~LockedPool();
+
+ /** Allocate size bytes from this arena.
+ * Returns pointer on success, or 0 if memory is full or
+ * the application tried to allocate 0 bytes.
+ */
+ void* alloc(size_t size);
+
+ /** Free a previously allocated chunk of memory.
+ * Freeing the zero pointer has no effect.
+ * Raises std::runtime_error in case of error.
+ */
+ void free(void *ptr);
+
+ /** Get pool usage statistics */
+ Stats stats() const;
+private:
+ LockedPool(const LockedPool& other) = delete; // non construction-copyable
+ LockedPool& operator=(const LockedPool&) = delete; // non copyable
+
+ std::unique_ptr<LockedPageAllocator> allocator;
+
+ /** Create an arena from locked pages */
+ class LockedPageArena: public Arena
+ {
+ public:
+ LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align);
+ ~LockedPageArena();
+ private:
+ void *base;
+ size_t size;
+ LockedPageAllocator *allocator;
+ };
+
+ bool new_arena(size_t size, size_t align);
+
+ std::list<LockedPageArena> arenas;
+ LockingFailed_Callback lf_cb;
+ size_t cumulative_bytes_locked;
+ /** Mutex protects access to this pool's data structures, including arenas.
+ */
+ mutable std::mutex mutex;
+};
+
+/**
+ * Singleton class to keep track of locked (ie, non-swappable) memory, for use in
+ * std::allocator templates.
+ *
+ * Some implementations of the STL allocate memory in some constructors (i.e., see
+ * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
+ * Due to the unpredictable order of static initializers, we have to make sure the
+ * LockedPoolManager instance exists before any other STL-based objects that use
+ * secure_allocator are created. So instead of having LockedPoolManager also be
+ * static-initialized, it is created on demand.
+ */
+class LockedPoolManager : public LockedPool
+{
+public:
+ /** Return the current instance, or create it once */
+ static LockedPoolManager& Instance()
+ {
+ std::call_once(LockedPoolManager::init_flag, LockedPoolManager::CreateInstance);
+ return *LockedPoolManager::_instance;
+ }
+
+private:
+ LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator);
+
+ /** Create a new LockedPoolManager specialized to the OS */
+ static void CreateInstance();
+ /** Called when locking fails, warn the user here */
+ static bool LockingFailed();
+
+ static LockedPoolManager* _instance;
+ static std::once_flag init_flag;
+};
+
+#endif // BITCOIN_SUPPORT_LOCKEDPOOL_H
diff --git a/src/support/pagelocker.cpp b/src/support/pagelocker.cpp
deleted file mode 100644
index 7cea2d88c5..0000000000
--- a/src/support/pagelocker.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (c) 2009-2015 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include "support/pagelocker.h"
-
-#if defined(HAVE_CONFIG_H)
-#include "config/bitcoin-config.h"
-#endif
-
-#ifdef WIN32
-#ifdef _WIN32_WINNT
-#undef _WIN32_WINNT
-#endif
-#define _WIN32_WINNT 0x0501
-#define WIN32_LEAN_AND_MEAN 1
-#ifndef NOMINMAX
-#define NOMINMAX
-#endif
-#include <windows.h>
-// This is used to attempt to keep keying material out of swap
-// Note that VirtualLock does not provide this as a guarantee on Windows,
-// but, in practice, memory that has been VirtualLock'd almost never gets written to
-// the pagefile except in rare circumstances where memory is extremely low.
-#else
-#include <sys/mman.h>
-#include <limits.h> // for PAGESIZE
-#include <unistd.h> // for sysconf
-#endif
-
-LockedPageManager* LockedPageManager::_instance = NULL;
-boost::once_flag LockedPageManager::init_flag = BOOST_ONCE_INIT;
-
-/** Determine system page size in bytes */
-static inline size_t GetSystemPageSize()
-{
- size_t page_size;
-#if defined(WIN32)
- SYSTEM_INFO sSysInfo;
- GetSystemInfo(&sSysInfo);
- page_size = sSysInfo.dwPageSize;
-#elif defined(PAGESIZE) // defined in limits.h
- page_size = PAGESIZE;
-#else // assume some POSIX OS
- page_size = sysconf(_SC_PAGESIZE);
-#endif
- return page_size;
-}
-
-bool MemoryPageLocker::Lock(const void* addr, size_t len)
-{
-#ifdef WIN32
- return VirtualLock(const_cast<void*>(addr), len) != 0;
-#else
- return mlock(addr, len) == 0;
-#endif
-}
-
-bool MemoryPageLocker::Unlock(const void* addr, size_t len)
-{
-#ifdef WIN32
- return VirtualUnlock(const_cast<void*>(addr), len) != 0;
-#else
- return munlock(addr, len) == 0;
-#endif
-}
-
-LockedPageManager::LockedPageManager() : LockedPageManagerBase<MemoryPageLocker>(GetSystemPageSize())
-{
-}
diff --git a/src/support/pagelocker.h b/src/support/pagelocker.h
deleted file mode 100644
index 538bf39453..0000000000
--- a/src/support/pagelocker.h
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2015 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef BITCOIN_SUPPORT_PAGELOCKER_H
-#define BITCOIN_SUPPORT_PAGELOCKER_H
-
-#include "support/cleanse.h"
-
-#include <map>
-
-#include <boost/thread/mutex.hpp>
-#include <boost/thread/once.hpp>
-
-/**
- * Thread-safe class to keep track of locked (ie, non-swappable) memory pages.
- *
- * Memory locks do not stack, that is, pages which have been locked several times by calls to mlock()
- * will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when
- * those functions are used naively. This class simulates stacking memory locks by keeping a counter per page.
- *
- * @note By using a map from each page base address to lock count, this class is optimized for
- * small objects that span up to a few pages, mostly smaller than a page. To support large allocations,
- * something like an interval tree would be the preferred data structure.
- */
-template <class Locker>
-class LockedPageManagerBase
-{
-public:
- LockedPageManagerBase(size_t _page_size) : page_size(_page_size)
- {
- // Determine bitmask for extracting page from address
- assert(!(_page_size & (_page_size - 1))); // size must be power of two
- page_mask = ~(_page_size - 1);
- }
-
- ~LockedPageManagerBase()
- {
- }
-
-
- // For all pages in affected range, increase lock count
- void LockRange(void* p, size_t size)
- {
- boost::mutex::scoped_lock lock(mutex);
- if (!size)
- return;
- const size_t base_addr = reinterpret_cast<size_t>(p);
- const size_t start_page = base_addr & page_mask;
- const size_t end_page = (base_addr + size - 1) & page_mask;
- for (size_t page = start_page; page <= end_page; page += page_size) {
- Histogram::iterator it = histogram.find(page);
- if (it == histogram.end()) // Newly locked page
- {
- locker.Lock(reinterpret_cast<void*>(page), page_size);
- histogram.insert(std::make_pair(page, 1));
- } else // Page was already locked; increase counter
- {
- it->second += 1;
- }
- }
- }
-
- // For all pages in affected range, decrease lock count
- void UnlockRange(void* p, size_t size)
- {
- boost::mutex::scoped_lock lock(mutex);
- if (!size)
- return;
- const size_t base_addr = reinterpret_cast<size_t>(p);
- const size_t start_page = base_addr & page_mask;
- const size_t end_page = (base_addr + size - 1) & page_mask;
- for (size_t page = start_page; page <= end_page; page += page_size) {
- Histogram::iterator it = histogram.find(page);
- assert(it != histogram.end()); // Cannot unlock an area that was not locked
- // Decrease counter for page, when it is zero, the page will be unlocked
- it->second -= 1;
- if (it->second == 0) // Nothing on the page anymore that keeps it locked
- {
- // Unlock page and remove the count from histogram
- locker.Unlock(reinterpret_cast<void*>(page), page_size);
- histogram.erase(it);
- }
- }
- }
-
- // Get number of locked pages for diagnostics
- int GetLockedPageCount()
- {
- boost::mutex::scoped_lock lock(mutex);
- return histogram.size();
- }
-
-private:
- Locker locker;
- boost::mutex mutex;
- size_t page_size, page_mask;
- // map of page base address to lock count
- typedef std::map<size_t, int> Histogram;
- Histogram histogram;
-};
-
-
-/**
- * OS-dependent memory page locking/unlocking.
- * Defined as policy class to make stubbing for test possible.
- */
-class MemoryPageLocker
-{
-public:
- /** Lock memory pages.
- * addr and len must be a multiple of the system page size
- */
- bool Lock(const void* addr, size_t len);
- /** Unlock memory pages.
- * addr and len must be a multiple of the system page size
- */
- bool Unlock(const void* addr, size_t len);
-};
-
-/**
- * Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in
- * std::allocator templates.
- *
- * Some implementations of the STL allocate memory in some constructors (i.e., see
- * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
- * Due to the unpredictable order of static initializers, we have to make sure the
- * LockedPageManager instance exists before any other STL-based objects that use
- * secure_allocator are created. So instead of having LockedPageManager also be
- * static-initialized, it is created on demand.
- */
-class LockedPageManager : public LockedPageManagerBase<MemoryPageLocker>
-{
-public:
- static LockedPageManager& Instance()
- {
- boost::call_once(LockedPageManager::CreateInstance, LockedPageManager::init_flag);
- return *LockedPageManager::_instance;
- }
-
-private:
- LockedPageManager();
-
- static void CreateInstance()
- {
- // Using a local static instance guarantees that the object is initialized
- // when it's first needed and also deinitialized after all objects that use
- // it are done with it. I can think of one unlikely scenario where we may
- // have a static deinitialization order/problem, but the check in
- // LockedPageManagerBase's destructor helps us detect if that ever happens.
- static LockedPageManager instance;
- LockedPageManager::_instance = &instance;
- }
-
- static LockedPageManager* _instance;
- static boost::once_flag init_flag;
-};
-
-//
-// Functions for directly locking/unlocking memory objects.
-// Intended for non-dynamically allocated structures.
-//
-template <typename T>
-void LockObject(const T& t)
-{
- LockedPageManager::Instance().LockRange((void*)(&t), sizeof(T));
-}
-
-template <typename T>
-void UnlockObject(const T& t)
-{
- memory_cleanse((void*)(&t), sizeof(T));
- LockedPageManager::Instance().UnlockRange((void*)(&t), sizeof(T));
-}
-
-#endif // BITCOIN_SUPPORT_PAGELOCKER_H
diff --git a/src/test/Checkpoints_tests.cpp b/src/test/Checkpoints_tests.cpp
deleted file mode 100644
index 1b7d368e13..0000000000
--- a/src/test/Checkpoints_tests.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2011-2015 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-//
-// Unit tests for block-chain checkpoints
-//
-
-#include "checkpoints.h"
-
-#include "uint256.h"
-#include "test/test_bitcoin.h"
-#include "chainparams.h"
-
-#include <boost/test/unit_test.hpp>
-
-using namespace std;
-
-BOOST_FIXTURE_TEST_SUITE(Checkpoints_tests, BasicTestingSetup)
-
-BOOST_AUTO_TEST_CASE(sanity)
-{
- const CCheckpointData& checkpoints = Params(CBaseChainParams::MAIN).Checkpoints();
- BOOST_CHECK(Checkpoints::GetTotalBlocksEstimate(checkpoints) >= 134444);
-}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/DoS_tests.cpp b/src/test/DoS_tests.cpp
index 97abeb7211..6eed636080 100644
--- a/src/test/DoS_tests.cpp
+++ b/src/test/DoS_tests.cpp
@@ -48,8 +48,9 @@ BOOST_AUTO_TEST_CASE(DoS_banning)
{
connman->ClearBanned();
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
- CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, "", true);
- GetNodeSignals().InitializeNode(dummyNode1.GetId(), &dummyNode1);
+ CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, 0, "", true);
+ dummyNode1.SetSendVersion(PROTOCOL_VERSION);
+ GetNodeSignals().InitializeNode(&dummyNode1, *connman);
dummyNode1.nVersion = 1;
Misbehaving(dummyNode1.GetId(), 100); // Should get banned
SendMessages(&dummyNode1, *connman);
@@ -57,8 +58,9 @@ BOOST_AUTO_TEST_CASE(DoS_banning)
BOOST_CHECK(!connman->IsBanned(ip(0xa0b0c001|0x0000ff00))); // Different IP, not banned
CAddress addr2(ip(0xa0b0c002), NODE_NONE);
- CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, "", true);
- GetNodeSignals().InitializeNode(dummyNode2.GetId(), &dummyNode2);
+ CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, 1, "", true);
+ dummyNode2.SetSendVersion(PROTOCOL_VERSION);
+ GetNodeSignals().InitializeNode(&dummyNode2, *connman);
dummyNode2.nVersion = 1;
Misbehaving(dummyNode2.GetId(), 50);
SendMessages(&dummyNode2, *connman);
@@ -74,8 +76,9 @@ BOOST_AUTO_TEST_CASE(DoS_banscore)
connman->ClearBanned();
mapArgs["-banscore"] = "111"; // because 11 is my favorite number
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
- CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 3, "", true);
- GetNodeSignals().InitializeNode(dummyNode1.GetId(), &dummyNode1);
+ CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 3, 1, "", true);
+ dummyNode1.SetSendVersion(PROTOCOL_VERSION);
+ GetNodeSignals().InitializeNode(&dummyNode1, *connman);
dummyNode1.nVersion = 1;
Misbehaving(dummyNode1.GetId(), 100);
SendMessages(&dummyNode1, *connman);
@@ -96,8 +99,9 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
SetMockTime(nStartTime); // Overrides future calls to GetTime()
CAddress addr(ip(0xa0b0c001), NODE_NONE);
- CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, "", true);
- GetNodeSignals().InitializeNode(dummyNode.GetId(), &dummyNode);
+ CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, 4, "", true);
+ dummyNode.SetSendVersion(PROTOCOL_VERSION);
+ GetNodeSignals().InitializeNode(&dummyNode, *connman);
dummyNode.nVersion = 1;
Misbehaving(dummyNode.GetId(), 100);
diff --git a/src/test/README.md b/src/test/README.md
index 3afdefe5fc..8f99804e10 100644
--- a/src/test/README.md
+++ b/src/test/README.md
@@ -1,4 +1,36 @@
-# Notes
+### Compiling/running unit tests
+
+Unit tests will be automatically compiled if dependencies were met in `./configure`
+and tests weren't explicitly disabled.
+
+After configuring, they can be run with `make check`.
+
+To run the bitcoind tests manually, launch `src/test/test_bitcoin`.
+
+To add more bitcoind tests, add `BOOST_AUTO_TEST_CASE` functions to the existing
+.cpp files in the `test/` directory or add new .cpp files that
+implement new BOOST_AUTO_TEST_SUITE sections.
+
+To run the bitcoin-qt tests manually, launch `src/qt/test/test_bitcoin-qt`
+
+To add more bitcoin-qt tests, add them to the `src/qt/test/` directory and
+the `src/qt/test/test_main.cpp` file.
+
+### Running individual tests
+
+test_bitcoin has some built-in command-line arguments; for
+example, to run just the getarg_tests verbosely:
+
+ test_bitcoin --log_level=all --run_test=getarg_tests
+
+... or to run just the doubledash test:
+
+ test_bitcoin --run_test=getarg_tests/doubledash
+
+Run `test_bitcoin --help` for the full list.
+
+### Note on adding test cases
+
The sources in this directory are unit test cases. Boost includes a
unit testing framework, and since bitcoin already uses boost, it makes
sense to simply use this framework rather than require developers to
@@ -19,17 +51,6 @@ For further reading, I found the following website to be helpful in
explaining how the boost unit test framework works:
[http://www.alittlemadness.com/2009/03/31/c-unit-testing-with-boosttest/](http://www.alittlemadness.com/2009/03/31/c-unit-testing-with-boosttest/).
-test_bitcoin has some built-in command-line arguments; for
-example, to run just the getarg_tests verbosely:
-
- test_bitcoin --log_level=all --run_test=getarg_tests
-
-... or to run just the doubledash test:
-
- test_bitcoin --run_test=getarg_tests/doubledash
-
-Run `test_bitcoin --help` for the full list.
-
### bitcoin-util-test.py
The test directory also contains the bitcoin-util-test.py tool, which tests bitcoin utils (currently just bitcoin-tx). This test gets run automatically during the `make check` build process. It is also possible to run the test manually from the src directory:
@@ -37,4 +58,4 @@ The test directory also contains the bitcoin-util-test.py tool, which tests bitc
```
test/bitcoin-util-test.py --srcdir=[current directory]
-``` \ No newline at end of file
+```
diff --git a/src/test/allocator_tests.cpp b/src/test/allocator_tests.cpp
index 613f6c12d7..77e9df5d82 100644
--- a/src/test/allocator_tests.cpp
+++ b/src/test/allocator_tests.cpp
@@ -11,110 +11,224 @@
BOOST_FIXTURE_TEST_SUITE(allocator_tests, BasicTestingSetup)
-// Dummy memory page locker for platform independent tests
-static const void *last_lock_addr, *last_unlock_addr;
-static size_t last_lock_len, last_unlock_len;
-class TestLocker
+BOOST_AUTO_TEST_CASE(arena_tests)
{
-public:
- bool Lock(const void *addr, size_t len)
+ // Fake memory base address for testing
+ // without actually using memory.
+ void *synth_base = reinterpret_cast<void*>(0x08000000);
+ const size_t synth_size = 1024*1024;
+ Arena b(synth_base, synth_size, 16);
+ void *chunk = b.alloc(1000);
+#ifdef ARENA_DEBUG
+ b.walk();
+#endif
+ BOOST_CHECK(chunk != nullptr);
+ BOOST_CHECK(b.stats().used == 1008); // Aligned to 16
+ BOOST_CHECK(b.stats().total == synth_size); // Nothing has disappeared?
+ b.free(chunk);
+#ifdef ARENA_DEBUG
+ b.walk();
+#endif
+ BOOST_CHECK(b.stats().used == 0);
+ BOOST_CHECK(b.stats().free == synth_size);
+ try { // Test exception on double-free
+ b.free(chunk);
+ BOOST_CHECK(0);
+ } catch(std::runtime_error &)
{
- last_lock_addr = addr;
- last_lock_len = len;
- return true;
}
- bool Unlock(const void *addr, size_t len)
- {
- last_unlock_addr = addr;
- last_unlock_len = len;
- return true;
+
+ void *a0 = b.alloc(128);
+ void *a1 = b.alloc(256);
+ void *a2 = b.alloc(512);
+ BOOST_CHECK(b.stats().used == 896);
+ BOOST_CHECK(b.stats().total == synth_size);
+#ifdef ARENA_DEBUG
+ b.walk();
+#endif
+ b.free(a0);
+#ifdef ARENA_DEBUG
+ b.walk();
+#endif
+ BOOST_CHECK(b.stats().used == 768);
+ b.free(a1);
+ BOOST_CHECK(b.stats().used == 512);
+ void *a3 = b.alloc(128);
+#ifdef ARENA_DEBUG
+ b.walk();
+#endif
+ BOOST_CHECK(b.stats().used == 640);
+ b.free(a2);
+ BOOST_CHECK(b.stats().used == 128);
+ b.free(a3);
+ BOOST_CHECK(b.stats().used == 0);
+ BOOST_CHECK_EQUAL(b.stats().chunks_used, 0);
+ BOOST_CHECK(b.stats().total == synth_size);
+ BOOST_CHECK(b.stats().free == synth_size);
+ BOOST_CHECK_EQUAL(b.stats().chunks_free, 1);
+
+ std::vector<void*> addr;
+ BOOST_CHECK(b.alloc(0) == nullptr); // allocating 0 always returns nullptr
+#ifdef ARENA_DEBUG
+ b.walk();
+#endif
+ // Sweeping allocate all memory
+ for (int x=0; x<1024; ++x)
+ addr.push_back(b.alloc(1024));
+ BOOST_CHECK(b.stats().free == 0);
+ BOOST_CHECK(b.alloc(1024) == nullptr); // memory is full, this must return nullptr
+ BOOST_CHECK(b.alloc(0) == nullptr);
+ for (int x=0; x<1024; ++x)
+ b.free(addr[x]);
+ addr.clear();
+ BOOST_CHECK(b.stats().total == synth_size);
+ BOOST_CHECK(b.stats().free == synth_size);
+
+ // Now in the other direction...
+ for (int x=0; x<1024; ++x)
+ addr.push_back(b.alloc(1024));
+ for (int x=0; x<1024; ++x)
+ b.free(addr[1023-x]);
+ addr.clear();
+
+ // Now allocate in smaller unequal chunks, then deallocate haphazardly
+ // Not all the chunks will succeed allocating, but freeing nullptr is
+ // allowed so that is no problem.
+ for (int x=0; x<2048; ++x)
+ addr.push_back(b.alloc(x+1));
+ for (int x=0; x<2048; ++x)
+ b.free(addr[((x*23)%2048)^242]);
+ addr.clear();
+
+ // Go entirely wild: free and alloc interleaved,
+ // generate targets and sizes using pseudo-randomness.
+ for (int x=0; x<2048; ++x)
+ addr.push_back(0);
+ uint32_t s = 0x12345678;
+ for (int x=0; x<5000; ++x) {
+ int idx = s & (addr.size()-1);
+ if (s & 0x80000000) {
+ b.free(addr[idx]);
+ addr[idx] = 0;
+ } else if(!addr[idx]) {
+ addr[idx] = b.alloc((s >> 16) & 2047);
+ }
+ bool lsb = s & 1;
+ s >>= 1;
+ if (lsb)
+ s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0
}
-};
+ for (void *ptr: addr)
+ b.free(ptr);
+ addr.clear();
-BOOST_AUTO_TEST_CASE(test_LockedPageManagerBase)
+ BOOST_CHECK(b.stats().total == synth_size);
+ BOOST_CHECK(b.stats().free == synth_size);
+}
+
+/** Mock LockedPageAllocator for testing */
+class TestLockedPageAllocator: public LockedPageAllocator
{
- const size_t test_page_size = 4096;
- LockedPageManagerBase<TestLocker> lpm(test_page_size);
- size_t addr;
- last_lock_addr = last_unlock_addr = 0;
- last_lock_len = last_unlock_len = 0;
-
- /* Try large number of small objects */
- addr = 0;
- for(int i=0; i<1000; ++i)
- {
- lpm.LockRange(reinterpret_cast<void*>(addr), 33);
- addr += 33;
- }
- /* Try small number of page-sized objects, straddling two pages */
- addr = test_page_size*100 + 53;
- for(int i=0; i<100; ++i)
- {
- lpm.LockRange(reinterpret_cast<void*>(addr), test_page_size);
- addr += test_page_size;
- }
- /* Try small number of page-sized objects aligned to exactly one page */
- addr = test_page_size*300;
- for(int i=0; i<100; ++i)
- {
- lpm.LockRange(reinterpret_cast<void*>(addr), test_page_size);
- addr += test_page_size;
- }
- /* one very large object, straddling pages */
- lpm.LockRange(reinterpret_cast<void*>(test_page_size*600+1), test_page_size*500);
- BOOST_CHECK(last_lock_addr == reinterpret_cast<void*>(test_page_size*(600+500)));
- /* one very large object, page aligned */
- lpm.LockRange(reinterpret_cast<void*>(test_page_size*1200), test_page_size*500-1);
- BOOST_CHECK(last_lock_addr == reinterpret_cast<void*>(test_page_size*(1200+500-1)));
-
- BOOST_CHECK(lpm.GetLockedPageCount() == (
- (1000*33+test_page_size-1)/test_page_size + // small objects
- 101 + 100 + // page-sized objects
- 501 + 500)); // large objects
- BOOST_CHECK((last_lock_len & (test_page_size-1)) == 0); // always lock entire pages
- BOOST_CHECK(last_unlock_len == 0); // nothing unlocked yet
-
- /* And unlock again */
- addr = 0;
- for(int i=0; i<1000; ++i)
+public:
+ TestLockedPageAllocator(int count_in, int lockedcount_in): count(count_in), lockedcount(lockedcount_in) {}
+ void* AllocateLocked(size_t len, bool *lockingSuccess)
{
- lpm.UnlockRange(reinterpret_cast<void*>(addr), 33);
- addr += 33;
+ *lockingSuccess = false;
+ if (count > 0) {
+ --count;
+
+ if (lockedcount > 0) {
+ --lockedcount;
+ *lockingSuccess = true;
+ }
+
+ return reinterpret_cast<void*>(0x08000000 + (count<<24)); // Fake address, do not actually use this memory
+ }
+ return 0;
}
- addr = test_page_size*100 + 53;
- for(int i=0; i<100; ++i)
+ void FreeLocked(void* addr, size_t len)
{
- lpm.UnlockRange(reinterpret_cast<void*>(addr), test_page_size);
- addr += test_page_size;
}
- addr = test_page_size*300;
- for(int i=0; i<100; ++i)
+ size_t GetLimit()
{
- lpm.UnlockRange(reinterpret_cast<void*>(addr), test_page_size);
- addr += test_page_size;
+ return std::numeric_limits<size_t>::max();
}
- lpm.UnlockRange(reinterpret_cast<void*>(test_page_size*600+1), test_page_size*500);
- lpm.UnlockRange(reinterpret_cast<void*>(test_page_size*1200), test_page_size*500-1);
+private:
+ int count;
+ int lockedcount;
+};
- /* Check that everything is released */
- BOOST_CHECK(lpm.GetLockedPageCount() == 0);
+BOOST_AUTO_TEST_CASE(lockedpool_tests_mock)
+{
+ // Test over three virtual arenas, of which one will succeed being locked
+ std::unique_ptr<LockedPageAllocator> x(new TestLockedPageAllocator(3, 1));
+ LockedPool pool(std::move(x));
+ BOOST_CHECK(pool.stats().total == 0);
+ BOOST_CHECK(pool.stats().locked == 0);
- /* A few and unlocks of size zero (should have no effect) */
- addr = 0;
- for(int i=0; i<1000; ++i)
- {
- lpm.LockRange(reinterpret_cast<void*>(addr), 0);
- addr += 1;
- }
- BOOST_CHECK(lpm.GetLockedPageCount() == 0);
- addr = 0;
- for(int i=0; i<1000; ++i)
+ // Ensure unreasonable requests are refused without allocating anything
+ void *invalid_toosmall = pool.alloc(0);
+ BOOST_CHECK(invalid_toosmall == nullptr);
+ BOOST_CHECK(pool.stats().used == 0);
+ BOOST_CHECK(pool.stats().free == 0);
+ void *invalid_toobig = pool.alloc(LockedPool::ARENA_SIZE+1);
+ BOOST_CHECK(invalid_toobig == nullptr);
+ BOOST_CHECK(pool.stats().used == 0);
+ BOOST_CHECK(pool.stats().free == 0);
+
+ void *a0 = pool.alloc(LockedPool::ARENA_SIZE / 2);
+ BOOST_CHECK(a0);
+ BOOST_CHECK(pool.stats().locked == LockedPool::ARENA_SIZE);
+ void *a1 = pool.alloc(LockedPool::ARENA_SIZE / 2);
+ BOOST_CHECK(a1);
+ void *a2 = pool.alloc(LockedPool::ARENA_SIZE / 2);
+ BOOST_CHECK(a2);
+ void *a3 = pool.alloc(LockedPool::ARENA_SIZE / 2);
+ BOOST_CHECK(a3);
+ void *a4 = pool.alloc(LockedPool::ARENA_SIZE / 2);
+ BOOST_CHECK(a4);
+ void *a5 = pool.alloc(LockedPool::ARENA_SIZE / 2);
+ BOOST_CHECK(a5);
+ // We've passed a count of three arenas, so this allocation should fail
+ void *a6 = pool.alloc(16);
+ BOOST_CHECK(!a6);
+
+ pool.free(a0);
+ pool.free(a2);
+ pool.free(a4);
+ pool.free(a1);
+ pool.free(a3);
+ pool.free(a5);
+ BOOST_CHECK(pool.stats().total == 3*LockedPool::ARENA_SIZE);
+ BOOST_CHECK(pool.stats().locked == LockedPool::ARENA_SIZE);
+ BOOST_CHECK(pool.stats().used == 0);
+}
+
+// These tests used the live LockedPoolManager object, this is also used
+// by other tests so the conditions are somewhat less controllable and thus the
+// tests are somewhat more error-prone.
+BOOST_AUTO_TEST_CASE(lockedpool_tests_live)
+{
+ LockedPoolManager &pool = LockedPoolManager::Instance();
+ LockedPool::Stats initial = pool.stats();
+
+ void *a0 = pool.alloc(16);
+ BOOST_CHECK(a0);
+ // Test reading and writing the allocated memory
+ *((uint32_t*)a0) = 0x1234;
+ BOOST_CHECK(*((uint32_t*)a0) == 0x1234);
+
+ pool.free(a0);
+ try { // Test exception on double-free
+ pool.free(a0);
+ BOOST_CHECK(0);
+ } catch(std::runtime_error &)
{
- lpm.UnlockRange(reinterpret_cast<void*>(addr), 0);
- addr += 1;
}
- BOOST_CHECK(lpm.GetLockedPageCount() == 0);
- BOOST_CHECK((last_unlock_len & (test_page_size-1)) == 0); // always unlock entire pages
+ // If more than one new arena was allocated for the above tests, something is wrong
+ BOOST_CHECK(pool.stats().total <= (initial.total + LockedPool::ARENA_SIZE));
+ // Usage must be back to where it started
+ BOOST_CHECK(pool.stats().used == initial.used);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/bctest.py b/src/test/bctest.py
index d801415c70..adc5d0e418 100644
--- a/src/test/bctest.py
+++ b/src/test/bctest.py
@@ -1,4 +1,5 @@
-# Copyright 2014 BitPay, Inc.
+# Copyright 2014 BitPay Inc.
+# Copyright 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
@@ -6,56 +7,115 @@ import subprocess
import os
import json
import sys
+import binascii
+import difflib
+import logging
+
+def parse_output(a, fmt):
+ """Parse the output according to specified format.
+
+ Raise an error if the output can't be parsed."""
+ if fmt == 'json': # json: compare parsed data
+ return json.loads(a)
+ elif fmt == 'hex': # hex: parse and compare binary data
+ return binascii.a2b_hex(a.strip())
+ else:
+ raise NotImplementedError("Don't know how to compare %s" % fmt)
def bctest(testDir, testObj, exeext):
+ """Runs a single test, comparing output and RC to expected output and RC.
+
+ Raises an error if input can't be read, executable fails, or output/RC
+ are not as expected. Error is caught by bctester() and reported.
+ """
+ # Get the exec names and arguments
+ execprog = testObj['exec'] + exeext
+ execargs = testObj['args']
+ execrun = [execprog] + execargs
+
+ # Read the input data (if there is any)
+ stdinCfg = None
+ inputData = None
+ if "input" in testObj:
+ filename = testDir + "/" + testObj['input']
+ inputData = open(filename).read()
+ stdinCfg = subprocess.PIPE
+
+ # Read the expected output data (if there is any)
+ outputFn = None
+ outputData = None
+ if "output_cmp" in testObj:
+ outputFn = testObj['output_cmp']
+ outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
+ try:
+ outputData = open(testDir + "/" + outputFn).read()
+ except:
+ logging.error("Output file " + outputFn + " can not be opened")
+ raise
+ if not outputData:
+ logging.error("Output data missing for " + outputFn)
+ raise Exception
+
+ # Run the test
+ proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE,universal_newlines=True)
+ try:
+ outs = proc.communicate(input=inputData)
+ except OSError:
+ logging.error("OSError, Failed to execute " + execprog)
+ raise
+
+ if outputData:
+ # Parse command output and expected output
+ try:
+ a_parsed = parse_output(outs[0], outputType)
+ except Exception as e:
+ logging.error('Error parsing command output as %s: %s' % (outputType,e))
+ raise
+ try:
+ b_parsed = parse_output(outputData, outputType)
+ except Exception as e:
+ logging.error('Error parsing expected output %s as %s: %s' % (outputFn,outputType,e))
+ raise
+ # Compare data
+ if a_parsed != b_parsed:
+ logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
+ raise Exception
+ # Compare formatting
+ if outs[0] != outputData:
+ error_message = "Output formatting mismatch for " + outputFn + ":\n"
+ error_message += "".join(difflib.context_diff(outputData.splitlines(True),
+ outs[0].splitlines(True),
+ fromfile=outputFn,
+ tofile="returned"))
+ logging.error(error_message)
+ raise Exception
+
+ # Compare the return code to the expected return code
+ wantRC = 0
+ if "return_code" in testObj:
+ wantRC = testObj['return_code']
+ if proc.returncode != wantRC:
+ logging.error("Return code mismatch for " + outputFn)
+ raise Exception
+
+def bctester(testDir, input_basename, buildenv):
+ """ Loads and parses the input file, runs all tests and reports results"""
+ input_filename = testDir + "/" + input_basename
+ raw_data = open(input_filename).read()
+ input_data = json.loads(raw_data)
+
+ failed_testcases = []
- execprog = testObj['exec'] + exeext
- execargs = testObj['args']
- execrun = [execprog] + execargs
- stdinCfg = None
- inputData = None
- if "input" in testObj:
- filename = testDir + "/" + testObj['input']
- inputData = open(filename).read()
- stdinCfg = subprocess.PIPE
-
- outputFn = None
- outputData = None
- if "output_cmp" in testObj:
- outputFn = testObj['output_cmp']
- outputData = open(testDir + "/" + outputFn).read()
- if not outputData:
- print("Output data missing for " + outputFn)
- sys.exit(1)
- proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE,universal_newlines=True)
- try:
- outs = proc.communicate(input=inputData)
- except OSError:
- print("OSError, Failed to execute " + execprog)
- sys.exit(1)
-
- if outputData and (outs[0] != outputData):
- print("Output data mismatch for " + outputFn)
- sys.exit(1)
-
- wantRC = 0
- if "return_code" in testObj:
- wantRC = testObj['return_code']
- if proc.returncode != wantRC:
- print("Return code mismatch for " + outputFn)
- sys.exit(1)
-
-def bctester(testDir, input_basename, buildenv, verbose = False):
- input_filename = testDir + "/" + input_basename
- raw_data = open(input_filename).read()
- input_data = json.loads(raw_data)
-
- for testObj in input_data:
- if verbose and "description" in testObj:
- print ("Testing: " + testObj["description"])
- bctest(testDir, testObj, buildenv.exeext)
- if verbose and "description" in testObj:
- print ("PASS")
-
- sys.exit(0)
+ for testObj in input_data:
+ try:
+ bctest(testDir, testObj, buildenv.exeext)
+ logging.info("PASSED: " + testObj["description"])
+ except:
+ logging.info("FAILED: " + testObj["description"])
+ failed_testcases.append(testObj["description"])
+ if failed_testcases:
+ logging.error("FAILED TESTCASES: [" + ", ".join(failed_testcases) + "]")
+ sys.exit(1)
+ else:
+ sys.exit(0)
diff --git a/src/test/bitcoin-util-test.py b/src/test/bitcoin-util-test.py
index 3099506d6d..4301b93b7c 100755
--- a/src/test/bitcoin-util-test.py
+++ b/src/test/bitcoin-util-test.py
@@ -1,12 +1,15 @@
#!/usr/bin/env python
-# Copyright 2014 BitPay, Inc.
+# Copyright 2014 BitPay Inc.
+# Copyright 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import os
+import sys
import bctest
import buildenv
import argparse
+import logging
help_text="""Test framework for bitcoin utils.
@@ -14,14 +17,16 @@ Runs automatically during `make check`.
Can also be run manually from the src directory by specifiying the source directory:
-test/bitcoin-util-test.py --src=[srcdir]
+test/bitcoin-util-test.py --srcdir='srcdir' [--verbose]
"""
-
if __name__ == '__main__':
- verbose = False
+ # Try to get the source directory from the environment variables. This will
+ # be set for `make check` automated runs. If environment variable is not set,
+ # then get the source directory from command line args.
try:
srcdir = os.environ["srcdir"]
+ verbose = False
except:
parser = argparse.ArgumentParser(description=help_text)
parser.add_argument('-s', '--srcdir')
@@ -29,4 +34,13 @@ if __name__ == '__main__':
args = parser.parse_args()
srcdir = args.srcdir
verbose = args.verbose
- bctest.bctester(srcdir + "/test/data", "bitcoin-util-test.json", buildenv, verbose = verbose)
+
+ if verbose:
+ level = logging.DEBUG
+ else:
+ level = logging.ERROR
+ formatter = '%(asctime)s - %(levelname)s - %(message)s'
+ # Add the format/level to the logger
+ logging.basicConfig(format = formatter, level=level)
+
+ bctest.bctester(srcdir + "/test/data", "bitcoin-util-test.json", buildenv)
diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp
index b0d9184816..0ed5d62ef6 100644
--- a/src/test/blockencodings_tests.cpp
+++ b/src/test/blockencodings_tests.cpp
@@ -129,7 +129,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(header);
READWRITE(nonce);
size_t shorttxids_size = shorttxids.size();
diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp
index 042fad42da..25fb9ea2b7 100644
--- a/src/test/bloom_tests.cpp
+++ b/src/test/bloom_tests.cpp
@@ -41,7 +41,7 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize)
BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "BloomFilter doesn't contain just-inserted object (3)!");
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
- filter.Serialize(stream, SER_NETWORK, PROTOCOL_VERSION);
+ stream << filter;
vector<unsigned char> vch = ParseHex("03614e9b050000000000000001");
vector<char> expected(vch.size());
@@ -73,7 +73,7 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize_with_tweak)
BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "BloomFilter doesn't contain just-inserted object (3)!");
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
- filter.Serialize(stream, SER_NETWORK, PROTOCOL_VERSION);
+ stream << filter;
vector<unsigned char> vch = ParseHex("03ce4299050000000100008001");
vector<char> expected(vch.size());
@@ -100,7 +100,7 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_key)
filter.insert(vector<unsigned char>(hash.begin(), hash.end()));
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
- filter.Serialize(stream, SER_NETWORK, PROTOCOL_VERSION);
+ stream << filter;
vector<unsigned char> vch = ParseHex("038fc16b080000000000000001");
vector<char> expected(vch.size());
diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp
index b487686136..82de302053 100644
--- a/src/test/coins_tests.cpp
+++ b/src/test/coins_tests.cpp
@@ -3,11 +3,11 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "coins.h"
-#include "test_random.h"
#include "script/standard.h"
#include "uint256.h"
#include "utilstrencodings.h"
#include "test/test_bitcoin.h"
+#include "test/test_random.h"
#include "main.h"
#include "consensus/validation.h"
diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp
index c7b4fb240c..7dcd548edf 100644
--- a/src/test/crypto_tests.cpp
+++ b/src/test/crypto_tests.cpp
@@ -9,9 +9,9 @@
#include "crypto/sha512.h"
#include "crypto/hmac_sha256.h"
#include "crypto/hmac_sha512.h"
-#include "test_random.h"
#include "utilstrencodings.h"
#include "test/test_bitcoin.h"
+#include "test/test_random.h"
#include <vector>
diff --git a/src/test/dbwrapper_tests.cpp b/src/test/dbwrapper_tests.cpp
index d4d825d199..2d791ee18d 100644
--- a/src/test/dbwrapper_tests.cpp
+++ b/src/test/dbwrapper_tests.cpp
@@ -254,7 +254,7 @@ struct StringContentsSerializer {
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
if (ser_action.ForRead()) {
str.clear();
char c = 0;
diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp
index 555d36faac..a73dbe725c 100644
--- a/src/test/mempool_tests.cpp
+++ b/src/test/mempool_tests.cpp
@@ -547,7 +547,6 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
pool.addUnchecked(tx7.GetHash(), entry.Fee(9000LL).FromTx(tx7, &pool));
std::vector<CTransaction> vtx;
- std::vector<std::shared_ptr<const CTransaction>> conflicts;
SetMockTime(42);
SetMockTime(42 + CTxMemPool::ROLLING_FEE_HALFLIFE);
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), maxFeeRateRemoved.GetFeePerK() + 1000);
diff --git a/src/test/merkle_tests.cpp b/src/test/merkle_tests.cpp
index 66ca381ea7..706d30f489 100644
--- a/src/test/merkle_tests.cpp
+++ b/src/test/merkle_tests.cpp
@@ -4,7 +4,7 @@
#include "consensus/merkle.h"
#include "test/test_bitcoin.h"
-#include "test_random.h"
+#include "test/test_random.h"
#include <boost/test/unit_test.hpp>
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index a94979fd77..2762cafa38 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -223,7 +223,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
pblock->nNonce = blockinfo[i].nonce;
CValidationState state;
- BOOST_CHECK(ProcessNewBlock(state, chainparams, NULL, pblock, true, NULL));
+ BOOST_CHECK(ProcessNewBlock(state, chainparams, NULL, pblock, true, NULL, false));
BOOST_CHECK(state.IsValid());
pblock->hashPrevBlock = pblock->GetHash();
}
diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp
index f4b5768693..87cb38daac 100644
--- a/src/test/net_tests.cpp
+++ b/src/test/net_tests.cpp
@@ -17,7 +17,7 @@ using namespace std;
class CAddrManSerializationMock : public CAddrMan
{
public:
- virtual void Serialize(CDataStream& s, int nType, int nVersionDummy) const = 0;
+ virtual void Serialize(CDataStream& s) const = 0;
//! Ensure that bucket placement is always the same for testing purposes.
void MakeDeterministic()
@@ -30,16 +30,16 @@ public:
class CAddrManUncorrupted : public CAddrManSerializationMock
{
public:
- void Serialize(CDataStream& s, int nType, int nVersionDummy) const
+ void Serialize(CDataStream& s) const
{
- CAddrMan::Serialize(s, nType, nVersionDummy);
+ CAddrMan::Serialize(s);
}
};
class CAddrManCorrupted : public CAddrManSerializationMock
{
public:
- void Serialize(CDataStream& s, int nType, int nVersionDummy) const
+ void Serialize(CDataStream& s) const
{
// Produces corrupt output that claims addrman has 20 addrs when it only has one addr.
unsigned char nVersion = 1;
@@ -164,12 +164,12 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test)
bool fInboundIn = false;
// Test that fFeeler is false by default.
- CNode* pnode1 = new CNode(id++, NODE_NETWORK, height, hSocket, addr, 0, pszDest, fInboundIn);
+ CNode* pnode1 = new CNode(id++, NODE_NETWORK, height, hSocket, addr, 0, 0, pszDest, fInboundIn);
BOOST_CHECK(pnode1->fInbound == false);
BOOST_CHECK(pnode1->fFeeler == false);
fInboundIn = true;
- CNode* pnode2 = new CNode(id++, NODE_NETWORK, height, hSocket, addr, 1, pszDest, fInboundIn);
+ CNode* pnode2 = new CNode(id++, NODE_NETWORK, height, hSocket, addr, 1, 1, pszDest, fInboundIn);
BOOST_CHECK(pnode2->fInbound == true);
BOOST_CHECK(pnode2->fFeeler == false);
}
diff --git a/src/test/pmt_tests.cpp b/src/test/pmt_tests.cpp
index b7f83d38f0..c773129640 100644
--- a/src/test/pmt_tests.cpp
+++ b/src/test/pmt_tests.cpp
@@ -9,8 +9,8 @@
#include "uint256.h"
#include "arith_uint256.h"
#include "version.h"
-#include "test_random.h"
#include "test/test_bitcoin.h"
+#include "test/test_random.h"
#include <vector>
diff --git a/src/test/policyestimator_tests.cpp b/src/test/policyestimator_tests.cpp
index f57c24270c..38aaaba267 100644
--- a/src/test/policyestimator_tests.cpp
+++ b/src/test/policyestimator_tests.cpp
@@ -19,26 +19,18 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
CTxMemPool mpool(CFeeRate(1000));
TestMemPoolEntryHelper entry;
CAmount basefee(2000);
- double basepri = 1e6;
CAmount deltaFee(100);
- double deltaPri=5e5;
- std::vector<CAmount> feeV[2];
- std::vector<double> priV[2];
+ std::vector<CAmount> feeV;
- // Populate vectors of increasing fees or priorities
+ // Populate vectors of increasing fees
for (int j = 0; j < 10; j++) {
- //V[0] is for fee transactions
- feeV[0].push_back(basefee * (j+1));
- priV[0].push_back(0);
- //V[1] is for priority transactions
- feeV[1].push_back(CAmount(0));
- priV[1].push_back(basepri * pow(10, j+1));
+ feeV.push_back(basefee * (j+1));
}
// Store the hashes of transactions that have been
- // added to the mempool by their associate fee/pri
+ // added to the mempool by their associate fee
// txHashes[j] is populated with transactions either of
- // fee = basefee * (j+1) OR pri = 10^6 * 10^(j+1)
+ // fee = basefee * (j+1)
std::vector<uint256> txHashes[10];
// Create a transaction template
@@ -60,19 +52,19 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
// At a decay .998 and 4 fee transactions per block
// This makes the tx count about 1.33 per bucket, above the 1 threshold
while (blocknum < 200) {
- for (int j = 0; j < 10; j++) { // For each fee/pri multiple
- for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx
+ for (int j = 0; j < 10; j++) { // For each fee
+ for (int k = 0; k < 4; k++) { // add 4 fee txs
tx.vin[0].prevout.n = 10000*blocknum+100*j+k; // make transaction unique
uint256 hash = tx.GetHash();
- mpool.addUnchecked(hash, entry.Fee(feeV[k/4][j]).Time(GetTime()).Priority(priV[k/4][j]).Height(blocknum).FromTx(tx, &mpool));
+ mpool.addUnchecked(hash, entry.Fee(feeV[j]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool));
txHashes[j].push_back(hash);
}
}
- //Create blocks where higher fee/pri txs are included more often
+ //Create blocks where higher fee txs are included more often
for (int h = 0; h <= blocknum%10; h++) {
- // 10/10 blocks add highest fee/pri transactions
+ // 10/10 blocks add highest fee transactions
// 9/10 blocks add 2nd highest and so on until ...
- // 1/10 blocks add lowest fee/pri transactions
+ // 1/10 blocks add lowest fee transactions
while (txHashes[9-h].size()) {
std::shared_ptr<const CTransaction> ptx = mpool.get(txHashes[9-h].back());
if (ptx)
@@ -100,7 +92,6 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
}
std::vector<CAmount> origFeeEst;
- std::vector<double> origPriEst;
// Highest feerate is 10*baseRate and gets in all blocks,
// second highest feerate is 9*baseRate and gets in 9/10 blocks = 90%,
// third highest feerate is 8*base rate, and gets in 8/10 blocks = 80%,
@@ -109,16 +100,12 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
// so estimateFee(2) should return 9*baseRate etc...
for (int i = 1; i < 10;i++) {
origFeeEst.push_back(mpool.estimateFee(i).GetFeePerK());
- origPriEst.push_back(mpool.estimatePriority(i));
if (i > 1) { // Fee estimates should be monotonically decreasing
BOOST_CHECK(origFeeEst[i-1] <= origFeeEst[i-2]);
- BOOST_CHECK(origPriEst[i-1] <= origPriEst[i-2]);
}
int mult = 11-i;
BOOST_CHECK(origFeeEst[i-1] < mult*baseRate.GetFeePerK() + deltaFee);
BOOST_CHECK(origFeeEst[i-1] > mult*baseRate.GetFeePerK() - deltaFee);
- BOOST_CHECK(origPriEst[i-1] < pow(10,mult) * basepri + deltaPri);
- BOOST_CHECK(origPriEst[i-1] > pow(10,mult) * basepri - deltaPri);
}
// Mine 50 more blocks with no transactions happening, estimates shouldn't change
@@ -129,19 +116,17 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
for (int i = 1; i < 10;i++) {
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] + deltaFee);
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
- BOOST_CHECK(mpool.estimatePriority(i) < origPriEst[i-1] + deltaPri);
- BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri);
}
// Mine 15 more blocks with lots of transactions happening and not getting mined
// Estimates should go up
while (blocknum < 265) {
- for (int j = 0; j < 10; j++) { // For each fee/pri multiple
- for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx
+ for (int j = 0; j < 10; j++) { // For each fee multiple
+ for (int k = 0; k < 4; k++) { // add 4 fee txs
tx.vin[0].prevout.n = 10000*blocknum+100*j+k;
uint256 hash = tx.GetHash();
- mpool.addUnchecked(hash, entry.Fee(feeV[k/4][j]).Time(GetTime()).Priority(priV[k/4][j]).Height(blocknum).FromTx(tx, &mpool));
+ mpool.addUnchecked(hash, entry.Fee(feeV[j]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool));
txHashes[j].push_back(hash);
}
}
@@ -152,8 +137,6 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
for (int i = 1; i < 10;i++) {
BOOST_CHECK(mpool.estimateFee(i) == CFeeRate(0) || mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
BOOST_CHECK(mpool.estimateSmartFee(i, &answerFound).GetFeePerK() > origFeeEst[answerFound-1] - deltaFee);
- BOOST_CHECK(mpool.estimatePriority(i) == -1 || mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri);
- BOOST_CHECK(mpool.estimateSmartPriority(i, &answerFound) > origPriEst[answerFound-1] - deltaPri);
}
// Mine all those transactions
@@ -170,20 +153,20 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
block.clear();
for (int i = 1; i < 10;i++) {
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
- BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri);
}
// Mine 200 more blocks where everything is mined every block
// Estimates should be below original estimates
while (blocknum < 465) {
- for (int j = 0; j < 10; j++) { // For each fee/pri multiple
- for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx
+ for (int j = 0; j < 10; j++) { // For each fee multiple
+ for (int k = 0; k < 4; k++) { // add 4 fee txs
tx.vin[0].prevout.n = 10000*blocknum+100*j+k;
uint256 hash = tx.GetHash();
- mpool.addUnchecked(hash, entry.Fee(feeV[k/4][j]).Time(GetTime()).Priority(priV[k/4][j]).Height(blocknum).FromTx(tx, &mpool));
+ mpool.addUnchecked(hash, entry.Fee(feeV[j]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool));
std::shared_ptr<const CTransaction> ptx = mpool.get(hash);
if (ptx)
block.push_back(*ptx);
+
}
}
mpool.removeForBlock(block, ++blocknum);
@@ -191,15 +174,14 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
}
for (int i = 1; i < 10; i++) {
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] - deltaFee);
- BOOST_CHECK(mpool.estimatePriority(i) < origPriEst[i-1] - deltaPri);
}
// Test that if the mempool is limited, estimateSmartFee won't return a value below the mempool min fee
// and that estimateSmartPriority returns essentially an infinite value
- mpool.addUnchecked(tx.GetHash(), entry.Fee(feeV[0][5]).Time(GetTime()).Priority(priV[1][5]).Height(blocknum).FromTx(tx, &mpool));
- // evict that transaction which should set a mempool min fee of minRelayTxFee + feeV[0][5]
+ mpool.addUnchecked(tx.GetHash(), entry.Fee(feeV[5]).Time(GetTime()).Priority(0).Height(blocknum).FromTx(tx, &mpool));
+ // evict that transaction which should set a mempool min fee of minRelayTxFee + feeV[5]
mpool.TrimToSize(1);
- BOOST_CHECK(mpool.GetMinFee(1).GetFeePerK() > feeV[0][5]);
+ BOOST_CHECK(mpool.GetMinFee(1).GetFeePerK() > feeV[5]);
for (int i = 1; i < 10; i++) {
BOOST_CHECK(mpool.estimateSmartFee(i).GetFeePerK() >= mpool.estimateFee(i).GetFeePerK());
BOOST_CHECK(mpool.estimateSmartFee(i).GetFeePerK() >= mpool.GetMinFee(1).GetFeePerK());
diff --git a/src/test/prevector_tests.cpp b/src/test/prevector_tests.cpp
index 6cad02e738..1e5de2021c 100644
--- a/src/test/prevector_tests.cpp
+++ b/src/test/prevector_tests.cpp
@@ -4,12 +4,12 @@
#include <vector>
#include "prevector.h"
-#include "test_random.h"
#include "serialize.h"
#include "streams.h"
#include "test/test_bitcoin.h"
+#include "test/test_random.h"
#include <boost/test/unit_test.hpp>
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index 561adb8ea2..3169afb13a 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -173,11 +173,14 @@ void DoTest(const CScript& scriptPubKey, const CScript& scriptSig, const CScript
#if defined(HAVE_CONSENSUS_LIB)
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << tx2;
- if (flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS) {
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(begin_ptr(scriptPubKey), scriptPubKey.size(), txCredit.vout[0].nValue, (const unsigned char*)&stream[0], stream.size(), 0, flags, NULL) == expect, message);
- } else {
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(begin_ptr(scriptPubKey), scriptPubKey.size(), 0, (const unsigned char*)&stream[0], stream.size(), 0, flags, NULL) == expect, message);
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script(begin_ptr(scriptPubKey), scriptPubKey.size(), (const unsigned char*)&stream[0], stream.size(), 0, flags, NULL) == expect,message);
+ int libconsensus_flags = flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_ALL;
+ if (libconsensus_flags == flags) {
+ if (flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS) {
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(begin_ptr(scriptPubKey), scriptPubKey.size(), txCredit.vout[0].nValue, (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, NULL) == expect, message);
+ } else {
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(begin_ptr(scriptPubKey), scriptPubKey.size(), 0, (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, NULL) == expect, message);
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script(begin_ptr(scriptPubKey), scriptPubKey.size(), (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, NULL) == expect,message);
+ }
}
#endif
}
diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp
index bec2c7459d..bbadf57957 100644
--- a/src/test/serialize_tests.cpp
+++ b/src/test/serialize_tests.cpp
@@ -10,11 +10,54 @@
#include <stdint.h>
#include <boost/test/unit_test.hpp>
-
using namespace std;
BOOST_FIXTURE_TEST_SUITE(serialize_tests, BasicTestingSetup)
+class CSerializeMethodsTestSingle
+{
+protected:
+ int intval;
+ bool boolval;
+ std::string stringval;
+ const char* charstrval;
+ CTransaction txval;
+public:
+ CSerializeMethodsTestSingle() = default;
+ CSerializeMethodsTestSingle(int intvalin, bool boolvalin, std::string stringvalin, const char* charstrvalin, CTransaction txvalin) : intval(intvalin), boolval(boolvalin), stringval(std::move(stringvalin)), charstrval(charstrvalin), txval(txvalin){}
+ ADD_SERIALIZE_METHODS;
+
+ template <typename Stream, typename Operation>
+ inline void SerializationOp(Stream& s, Operation ser_action) {
+ READWRITE(intval);
+ READWRITE(boolval);
+ READWRITE(stringval);
+ READWRITE(FLATDATA(charstrval));
+ READWRITE(txval);
+ }
+
+ bool operator==(const CSerializeMethodsTestSingle& rhs)
+ {
+ return intval == rhs.intval && \
+ boolval == rhs.boolval && \
+ stringval == rhs.stringval && \
+ strcmp(charstrval, rhs.charstrval) == 0 && \
+ txval == rhs.txval;
+ }
+};
+
+class CSerializeMethodsTestMany : public CSerializeMethodsTestSingle
+{
+public:
+ using CSerializeMethodsTestSingle::CSerializeMethodsTestSingle;
+ ADD_SERIALIZE_METHODS;
+
+ template <typename Stream, typename Operation>
+ inline void SerializationOp(Stream& s, Operation ser_action) {
+ READWRITEMANY(intval, boolval, stringval, FLATDATA(charstrval), txval);
+ }
+};
+
BOOST_AUTO_TEST_CASE(sizes)
{
BOOST_CHECK_EQUAL(sizeof(char), GetSerializeSize(char(0), 0));
@@ -297,4 +340,30 @@ BOOST_AUTO_TEST_CASE(insert_delete)
BOOST_CHECK_EQUAL(ss.size(), 0);
}
+BOOST_AUTO_TEST_CASE(class_methods)
+{
+ int intval(100);
+ bool boolval(true);
+ std::string stringval("testing");
+ const char* charstrval("testing charstr");
+ CMutableTransaction txval;
+ CSerializeMethodsTestSingle methodtest1(intval, boolval, stringval, charstrval, txval);
+ CSerializeMethodsTestMany methodtest2(intval, boolval, stringval, charstrval, txval);
+ CSerializeMethodsTestSingle methodtest3;
+ CSerializeMethodsTestMany methodtest4;
+ CDataStream ss(SER_DISK, PROTOCOL_VERSION);
+ BOOST_CHECK(methodtest1 == methodtest2);
+ ss << methodtest1;
+ ss >> methodtest4;
+ ss << methodtest2;
+ ss >> methodtest3;
+ BOOST_CHECK(methodtest1 == methodtest2);
+ BOOST_CHECK(methodtest2 == methodtest3);
+ BOOST_CHECK(methodtest3 == methodtest4);
+
+ CDataStream ss2(SER_DISK, PROTOCOL_VERSION, intval, boolval, stringval, FLATDATA(charstrval), txval);
+ ss2 >> methodtest3;
+ BOOST_CHECK(methodtest3 == methodtest4);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp
index 0b1050d020..3bc8341b02 100644
--- a/src/test/sighash_tests.cpp
+++ b/src/test/sighash_tests.cpp
@@ -6,12 +6,12 @@
#include "data/sighash.json.h"
#include "hash.h"
#include "main.h" // For CheckTransaction
-#include "test_random.h"
#include "script/interpreter.h"
#include "script/script.h"
#include "serialize.h"
#include "streams.h"
#include "test/test_bitcoin.h"
+#include "test/test_random.h"
#include "util.h"
#include "utilstrencodings.h"
#include "version.h"
diff --git a/src/test/skiplist_tests.cpp b/src/test/skiplist_tests.cpp
index b19f8fbffb..d6835df71f 100644
--- a/src/test/skiplist_tests.cpp
+++ b/src/test/skiplist_tests.cpp
@@ -3,9 +3,9 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "chain.h"
-#include "test_random.h"
#include "util.h"
#include "test/test_bitcoin.h"
+#include "test/test_random.h"
#include <vector>
diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp
index 98f4ed939f..3da0be8ca4 100644
--- a/src/test/test_bitcoin.cpp
+++ b/src/test/test_bitcoin.cpp
@@ -127,7 +127,7 @@ TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransaction>&
while (!CheckProofOfWork(block.GetHash(), block.nBits, chainparams.GetConsensus())) ++block.nNonce;
CValidationState state;
- ProcessNewBlock(state, chainparams, NULL, &block, true, NULL);
+ ProcessNewBlock(state, chainparams, NULL, &block, true, NULL, false);
CBlock result = block;
return result;
diff --git a/src/test/uint256_tests.cpp b/src/test/uint256_tests.cpp
index da0a3d73e0..2732948060 100644
--- a/src/test/uint256_tests.cpp
+++ b/src/test/uint256_tests.cpp
@@ -184,25 +184,25 @@ BOOST_AUTO_TEST_CASE( methods ) // GetHex SetHex begin() end() size() GetLow64 G
BOOST_CHECK(OneL.begin() + 32 == OneL.end());
BOOST_CHECK(MaxL.begin() + 32 == MaxL.end());
BOOST_CHECK(TmpL.begin() + 32 == TmpL.end());
- BOOST_CHECK(R1L.GetSerializeSize(0,PROTOCOL_VERSION) == 32);
- BOOST_CHECK(ZeroL.GetSerializeSize(0,PROTOCOL_VERSION) == 32);
+ BOOST_CHECK(GetSerializeSize(R1L, 0, PROTOCOL_VERSION) == 32);
+ BOOST_CHECK(GetSerializeSize(ZeroL, 0, PROTOCOL_VERSION) == 32);
- std::stringstream ss;
- R1L.Serialize(ss,0,PROTOCOL_VERSION);
+ CDataStream ss(0, PROTOCOL_VERSION);
+ ss << R1L;
BOOST_CHECK(ss.str() == std::string(R1Array,R1Array+32));
- TmpL.Unserialize(ss,0,PROTOCOL_VERSION);
+ ss >> TmpL;
BOOST_CHECK(R1L == TmpL);
- ss.str("");
- ZeroL.Serialize(ss,0,PROTOCOL_VERSION);
+ ss.clear();
+ ss << ZeroL;
BOOST_CHECK(ss.str() == std::string(ZeroArray,ZeroArray+32));
- TmpL.Unserialize(ss,0,PROTOCOL_VERSION);
+ ss >> TmpL;
BOOST_CHECK(ZeroL == TmpL);
- ss.str("");
- MaxL.Serialize(ss,0,PROTOCOL_VERSION);
+ ss.clear();
+ ss << MaxL;
BOOST_CHECK(ss.str() == std::string(MaxArray,MaxArray+32));
- TmpL.Unserialize(ss,0,PROTOCOL_VERSION);
+ ss >> TmpL;
BOOST_CHECK(MaxL == TmpL);
- ss.str("");
+ ss.clear();
BOOST_CHECK(R1S.GetHex() == R1S.ToString());
BOOST_CHECK(R2S.GetHex() == R2S.ToString());
@@ -230,24 +230,24 @@ BOOST_AUTO_TEST_CASE( methods ) // GetHex SetHex begin() end() size() GetLow64 G
BOOST_CHECK(OneS.begin() + 20 == OneS.end());
BOOST_CHECK(MaxS.begin() + 20 == MaxS.end());
BOOST_CHECK(TmpS.begin() + 20 == TmpS.end());
- BOOST_CHECK(R1S.GetSerializeSize(0,PROTOCOL_VERSION) == 20);
- BOOST_CHECK(ZeroS.GetSerializeSize(0,PROTOCOL_VERSION) == 20);
+ BOOST_CHECK(GetSerializeSize(R1S, 0, PROTOCOL_VERSION) == 20);
+ BOOST_CHECK(GetSerializeSize(ZeroS, 0, PROTOCOL_VERSION) == 20);
- R1S.Serialize(ss,0,PROTOCOL_VERSION);
+ ss << R1S;
BOOST_CHECK(ss.str() == std::string(R1Array,R1Array+20));
- TmpS.Unserialize(ss,0,PROTOCOL_VERSION);
+ ss >> TmpS;
BOOST_CHECK(R1S == TmpS);
- ss.str("");
- ZeroS.Serialize(ss,0,PROTOCOL_VERSION);
+ ss.clear();
+ ss << ZeroS;
BOOST_CHECK(ss.str() == std::string(ZeroArray,ZeroArray+20));
- TmpS.Unserialize(ss,0,PROTOCOL_VERSION);
+ ss >> TmpS;
BOOST_CHECK(ZeroS == TmpS);
- ss.str("");
- MaxS.Serialize(ss,0,PROTOCOL_VERSION);
+ ss.clear();
+ ss << MaxS;
BOOST_CHECK(ss.str() == std::string(MaxArray,MaxArray+20));
- TmpS.Unserialize(ss,0,PROTOCOL_VERSION);
+ ss >> TmpS;
BOOST_CHECK(MaxS == TmpS);
- ss.str("");
+ ss.clear();
}
BOOST_AUTO_TEST_CASE( conversion )
diff --git a/src/test/univalue_tests.cpp b/src/test/univalue_tests.cpp
index 45d480c816..7f794fcbe9 100644
--- a/src/test/univalue_tests.cpp
+++ b/src/test/univalue_tests.cpp
@@ -1,4 +1,4 @@
-// Copyright 2014 BitPay, Inc.
+// Copyright (c) 2014 BitPay Inc.
// Copyright (c) 2014-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp
index 0f1c7ab222..bad72ffc0f 100644
--- a/src/test/util_tests.cpp
+++ b/src/test/util_tests.cpp
@@ -6,11 +6,11 @@
#include "clientversion.h"
#include "primitives/transaction.h"
-#include "test_random.h"
#include "sync.h"
#include "utilstrencodings.h"
#include "utilmoneystr.h"
#include "test/test_bitcoin.h"
+#include "test/test_random.h"
#include <stdint.h>
#include <vector>
diff --git a/src/test/versionbits_tests.cpp b/src/test/versionbits_tests.cpp
index 784e796998..c05d593ed6 100644
--- a/src/test/versionbits_tests.cpp
+++ b/src/test/versionbits_tests.cpp
@@ -3,9 +3,9 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "chain.h"
-#include "test_random.h"
#include "versionbits.h"
#include "test/test_bitcoin.h"
+#include "test/test_random.h"
#include "chainparams.h"
#include "main.h"
#include "consensus/params.h"
diff --git a/src/txdb.h b/src/txdb.h
index adb3f66327..687c686775 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -43,7 +43,7 @@ struct CDiskTxPos : public CDiskBlockPos
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(*(CDiskBlockPos*)this);
READWRITE(VARINT(nTxOffset));
}
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index e5d28ac2ea..45135a5f73 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -833,6 +833,10 @@ void CTxMemPool::queryHashes(vector<uint256>& vtxid)
}
}
+static TxMempoolInfo GetInfo(CTxMemPool::indexed_transaction_set::const_iterator it) {
+ return TxMempoolInfo{it->GetSharedTx(), it->GetTime(), CFeeRate(it->GetFee(), it->GetTxSize()), it->GetModifiedFee() - it->GetFee()};
+}
+
std::vector<TxMempoolInfo> CTxMemPool::infoAll() const
{
LOCK(cs);
@@ -841,7 +845,7 @@ std::vector<TxMempoolInfo> CTxMemPool::infoAll() const
std::vector<TxMempoolInfo> ret;
ret.reserve(mapTx.size());
for (auto it : iters) {
- ret.push_back(TxMempoolInfo{it->GetSharedTx(), it->GetTime(), CFeeRate(it->GetFee(), it->GetTxSize())});
+ ret.push_back(GetInfo(it));
}
return ret;
@@ -862,7 +866,7 @@ TxMempoolInfo CTxMemPool::info(const uint256& hash) const
indexed_transaction_set::const_iterator i = mapTx.find(hash);
if (i == mapTx.end())
return TxMempoolInfo();
- return TxMempoolInfo{i->GetSharedTx(), i->GetTime(), CFeeRate(i->GetFee(), i->GetTxSize())};
+ return GetInfo(i);
}
CFeeRate CTxMemPool::estimateFee(int nBlocks) const
@@ -891,7 +895,7 @@ CTxMemPool::WriteFeeEstimates(CAutoFile& fileout) const
{
try {
LOCK(cs);
- fileout << 109900; // version required to read: 0.10.99 or later
+ fileout << 139900; // version required to read: 0.13.99 or later
fileout << CLIENT_VERSION; // version that wrote the file
minerPolicyEstimator->Write(fileout);
}
@@ -910,9 +914,8 @@ CTxMemPool::ReadFeeEstimates(CAutoFile& filein)
filein >> nVersionRequired >> nVersionThatWrote;
if (nVersionRequired > CLIENT_VERSION)
return error("CTxMemPool::ReadFeeEstimates(): up-version (%d) fee estimate file", nVersionRequired);
-
LOCK(cs);
- minerPolicyEstimator->Read(filein);
+ minerPolicyEstimator->Read(filein, nVersionThatWrote);
}
catch (const std::exception&) {
LogPrintf("CTxMemPool::ReadFeeEstimates(): unable to read policy estimator data (non-fatal)\n");
diff --git a/src/txmempool.h b/src/txmempool.h
index bb2638c3b7..9b0ca4655e 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -329,6 +329,9 @@ struct TxMempoolInfo
/** Feerate of the transaction. */
CFeeRate feeRate;
+
+ /** The fee delta. */
+ int64_t nFeeDelta;
};
/**
diff --git a/src/uint256.h b/src/uint256.h
index dd8432d74c..86e7c0b6c6 100644
--- a/src/uint256.h
+++ b/src/uint256.h
@@ -78,11 +78,6 @@ public:
return sizeof(data);
}
- unsigned int GetSerializeSize(int nType, int nVersion) const
- {
- return sizeof(data);
- }
-
uint64_t GetUint64(int pos) const
{
const uint8_t* ptr = data + pos * 8;
@@ -97,13 +92,13 @@ public:
}
template<typename Stream>
- void Serialize(Stream& s, int nType, int nVersion) const
+ void Serialize(Stream& s) const
{
s.write((char*)data, sizeof(data));
}
template<typename Stream>
- void Unserialize(Stream& s, int nType, int nVersion)
+ void Unserialize(Stream& s)
{
s.read((char*)data, sizeof(data));
}
diff --git a/src/undo.h b/src/undo.h
index d4fc84c90c..a5d276e7f3 100644
--- a/src/undo.h
+++ b/src/undo.h
@@ -27,29 +27,23 @@ public:
CTxInUndo() : txout(), fCoinBase(false), nHeight(0), nVersion(0) {}
CTxInUndo(const CTxOut &txoutIn, bool fCoinBaseIn = false, unsigned int nHeightIn = 0, int nVersionIn = 0) : txout(txoutIn), fCoinBase(fCoinBaseIn), nHeight(nHeightIn), nVersion(nVersionIn) { }
- unsigned int GetSerializeSize(int nType, int nVersion) const {
- return ::GetSerializeSize(VARINT(nHeight*2+(fCoinBase ? 1 : 0)), nType, nVersion) +
- (nHeight > 0 ? ::GetSerializeSize(VARINT(this->nVersion), nType, nVersion) : 0) +
- ::GetSerializeSize(CTxOutCompressor(REF(txout)), nType, nVersion);
- }
-
template<typename Stream>
- void Serialize(Stream &s, int nType, int nVersion) const {
- ::Serialize(s, VARINT(nHeight*2+(fCoinBase ? 1 : 0)), nType, nVersion);
+ void Serialize(Stream &s) const {
+ ::Serialize(s, VARINT(nHeight*2+(fCoinBase ? 1 : 0)));
if (nHeight > 0)
- ::Serialize(s, VARINT(this->nVersion), nType, nVersion);
- ::Serialize(s, CTxOutCompressor(REF(txout)), nType, nVersion);
+ ::Serialize(s, VARINT(this->nVersion));
+ ::Serialize(s, CTxOutCompressor(REF(txout)));
}
template<typename Stream>
- void Unserialize(Stream &s, int nType, int nVersion) {
+ void Unserialize(Stream &s) {
unsigned int nCode = 0;
- ::Unserialize(s, VARINT(nCode), nType, nVersion);
+ ::Unserialize(s, VARINT(nCode));
nHeight = nCode / 2;
fCoinBase = nCode & 1;
if (nHeight > 0)
- ::Unserialize(s, VARINT(this->nVersion), nType, nVersion);
- ::Unserialize(s, REF(CTxOutCompressor(REF(txout))), nType, nVersion);
+ ::Unserialize(s, VARINT(this->nVersion));
+ ::Unserialize(s, REF(CTxOutCompressor(REF(txout))));
}
};
@@ -63,7 +57,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(vprevout);
}
};
@@ -77,7 +71,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(vtxundo);
}
};
diff --git a/src/version.h b/src/version.h
index 87bd655066..87fb1a3a75 100644
--- a/src/version.h
+++ b/src/version.h
@@ -9,7 +9,7 @@
* network protocol versioning
*/
-static const int PROTOCOL_VERSION = 70014;
+static const int PROTOCOL_VERSION = 70015;
//! initial proto version, to be increased after version/verack negotiation
static const int INIT_PROTO_VERSION = 209;
@@ -42,4 +42,7 @@ static const int FEEFILTER_VERSION = 70013;
//! short-id-based block download starts with this version
static const int SHORT_IDS_BLOCKS_VERSION = 70014;
+//! not banning for invalid compact blocks starts with this version
+static const int INVALID_CB_NO_BAN_VERSION = 70015;
+
#endif // BITCOIN_VERSION_H
diff --git a/src/wallet/crypter.cpp b/src/wallet/crypter.cpp
index 190f8ecf2a..31ee060677 100644
--- a/src/wallet/crypter.cpp
+++ b/src/wallet/crypter.cpp
@@ -48,12 +48,12 @@ bool CCrypter::SetKeyFromPassphrase(const SecureString& strKeyData, const std::v
int i = 0;
if (nDerivationMethod == 0)
- i = BytesToKeySHA512AES(chSalt, strKeyData, nRounds, chKey, chIV);
+ i = BytesToKeySHA512AES(chSalt, strKeyData, nRounds, vchKey.data(), vchIV.data());
if (i != (int)WALLET_CRYPTO_KEY_SIZE)
{
- memory_cleanse(chKey, sizeof(chKey));
- memory_cleanse(chIV, sizeof(chIV));
+ memory_cleanse(vchKey.data(), vchKey.size());
+ memory_cleanse(vchIV.data(), vchIV.size());
return false;
}
@@ -66,8 +66,8 @@ bool CCrypter::SetKey(const CKeyingMaterial& chNewKey, const std::vector<unsigne
if (chNewKey.size() != WALLET_CRYPTO_KEY_SIZE || chNewIV.size() != WALLET_CRYPTO_IV_SIZE)
return false;
- memcpy(&chKey[0], &chNewKey[0], sizeof chKey);
- memcpy(&chIV[0], &chNewIV[0], sizeof chIV);
+ memcpy(vchKey.data(), chNewKey.data(), chNewKey.size());
+ memcpy(vchIV.data(), chNewIV.data(), chNewIV.size());
fKeySet = true;
return true;
@@ -82,7 +82,7 @@ bool CCrypter::Encrypt(const CKeyingMaterial& vchPlaintext, std::vector<unsigned
// n + AES_BLOCKSIZE bytes
vchCiphertext.resize(vchPlaintext.size() + AES_BLOCKSIZE);
- AES256CBCEncrypt enc(chKey, chIV, true);
+ AES256CBCEncrypt enc(vchKey.data(), vchIV.data(), true);
size_t nLen = enc.Encrypt(&vchPlaintext[0], vchPlaintext.size(), &vchCiphertext[0]);
if(nLen < vchPlaintext.size())
return false;
@@ -101,7 +101,7 @@ bool CCrypter::Decrypt(const std::vector<unsigned char>& vchCiphertext, CKeyingM
vchPlaintext.resize(nLen);
- AES256CBCDecrypt dec(chKey, chIV, true);
+ AES256CBCDecrypt dec(vchKey.data(), vchIV.data(), true);
nLen = dec.Decrypt(&vchCiphertext[0], vchCiphertext.size(), &vchPlaintext[0]);
if(nLen == 0)
return false;
diff --git a/src/wallet/crypter.h b/src/wallet/crypter.h
index 5d0a4a3305..e89c15b5d4 100644
--- a/src/wallet/crypter.h
+++ b/src/wallet/crypter.h
@@ -47,7 +47,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(vchCryptedKey);
READWRITE(vchSalt);
READWRITE(nDerivationMethod);
@@ -77,8 +77,8 @@ class CCrypter
{
friend class wallet_crypto::TestCrypter; // for test access to chKey/chIV
private:
- unsigned char chKey[WALLET_CRYPTO_KEY_SIZE];
- unsigned char chIV[WALLET_CRYPTO_IV_SIZE];
+ std::vector<unsigned char, secure_allocator<unsigned char>> vchKey;
+ std::vector<unsigned char, secure_allocator<unsigned char>> vchIV;
bool fKeySet;
int BytesToKeySHA512AES(const std::vector<unsigned char>& chSalt, const SecureString& strKeyData, int count, unsigned char *key,unsigned char *iv) const;
@@ -91,28 +91,21 @@ public:
void CleanKey()
{
- memory_cleanse(chKey, sizeof(chKey));
- memory_cleanse(chIV, sizeof(chIV));
+ memory_cleanse(vchKey.data(), vchKey.size());
+ memory_cleanse(vchIV.data(), vchIV.size());
fKeySet = false;
}
CCrypter()
{
fKeySet = false;
-
- // Try to keep the key data out of swap (and be a bit over-careful to keep the IV that we don't even use out of swap)
- // Note that this does nothing about suspend-to-disk (which will put all our key data on disk)
- // Note as well that at no point in this program is any attempt made to prevent stealing of keys by reading the memory of the running process.
- LockedPageManager::Instance().LockRange(&chKey[0], sizeof chKey);
- LockedPageManager::Instance().LockRange(&chIV[0], sizeof chIV);
+ vchKey.resize(WALLET_CRYPTO_KEY_SIZE);
+ vchIV.resize(WALLET_CRYPTO_IV_SIZE);
}
~CCrypter()
{
CleanKey();
-
- LockedPageManager::Instance().UnlockRange(&chKey[0], sizeof chKey);
- LockedPageManager::Instance().UnlockRange(&chIV[0], sizeof chIV);
}
};
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index b638810e9d..bb5337c4ad 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -798,8 +798,8 @@ UniValue processImport(const UniValue& data) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey must be a hex string");
}
- std::vector<unsigned char> data(ParseHex(strPubKey));
- CPubKey pubKey(data.begin(), data.end());
+ std::vector<unsigned char> vData(ParseHex(strPubKey));
+ CPubKey pubKey(vData.begin(), vData.end());
if (!pubKey.IsFullyValid()) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey is not a valid public key");
@@ -1017,7 +1017,7 @@ UniValue importmulti(const JSONRPCRequest& mainRequest)
bool fRunScan = false;
const int64_t minimumTimestamp = 1;
- int64_t nLowestTimestamp;
+ int64_t nLowestTimestamp = 0;
if (fRescan && chainActive.Tip()) {
nLowestTimestamp = chainActive.Tip()->GetBlockTime();
diff --git a/src/wallet/test/crypto_tests.cpp b/src/wallet/test/crypto_tests.cpp
index c5f55ef5f0..ce35c53c48 100644
--- a/src/wallet/test/crypto_tests.cpp
+++ b/src/wallet/test/crypto_tests.cpp
@@ -97,10 +97,10 @@ static void TestPassphraseSingle(const std::vector<unsigned char>& vchSalt, cons
OldSetKeyFromPassphrase(passphrase, vchSalt, rounds, 0, chKey, chIV);
- BOOST_CHECK_MESSAGE(memcmp(chKey, crypt.chKey, sizeof(chKey)) == 0, \
- HexStr(chKey, chKey+sizeof(chKey)) + std::string(" != ") + HexStr(crypt.chKey, crypt.chKey + (sizeof crypt.chKey)));
- BOOST_CHECK_MESSAGE(memcmp(chIV, crypt.chIV, sizeof(chIV)) == 0, \
- HexStr(chIV, chIV+sizeof(chIV)) + std::string(" != ") + HexStr(crypt.chIV, crypt.chIV + (sizeof crypt.chIV)));
+ BOOST_CHECK_MESSAGE(memcmp(chKey, crypt.vchKey.data(), crypt.vchKey.size()) == 0, \
+ HexStr(chKey, chKey+sizeof(chKey)) + std::string(" != ") + HexStr(crypt.vchKey));
+ BOOST_CHECK_MESSAGE(memcmp(chIV, crypt.vchIV.data(), crypt.vchIV.size()) == 0, \
+ HexStr(chIV, chIV+sizeof(chIV)) + std::string(" != ") + HexStr(crypt.vchIV));
if(!correctKey.empty())
BOOST_CHECK_MESSAGE(memcmp(chKey, &correctKey[0], sizeof(chKey)) == 0, \
@@ -127,7 +127,7 @@ static void TestDecrypt(const CCrypter& crypt, const std::vector<unsigned char>&
CKeyingMaterial vchDecrypted2;
int result1, result2;
result1 = crypt.Decrypt(vchCiphertext, vchDecrypted1);
- result2 = OldDecrypt(vchCiphertext, vchDecrypted2, crypt.chKey, crypt.chIV);
+ result2 = OldDecrypt(vchCiphertext, vchDecrypted2, crypt.vchKey.data(), crypt.vchIV.data());
BOOST_CHECK(result1 == result2);
// These two should be equal. However, OpenSSL 1.0.1j introduced a change
@@ -152,7 +152,7 @@ static void TestEncryptSingle(const CCrypter& crypt, const CKeyingMaterial& vchP
std::vector<unsigned char> vchCiphertext2;
int result1 = crypt.Encrypt(vchPlaintext, vchCiphertext1);
- int result2 = OldEncrypt(vchPlaintext, vchCiphertext2, crypt.chKey, crypt.chIV);
+ int result2 = OldEncrypt(vchPlaintext, vchCiphertext2, crypt.vchKey.data(), crypt.vchIV.data());
BOOST_CHECK(result1 == result2);
BOOST_CHECK(vchCiphertext1 == vchCiphertext2);
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index c9e926266c..c2bac6e330 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -658,8 +658,79 @@ bool CWallet::EncryptWallet(const SecureString& strWalletPassphrase)
DBErrors CWallet::ReorderTransactions()
{
+ LOCK(cs_wallet);
CWalletDB walletdb(strWalletFile);
- return walletdb.ReorderTransactions(this);
+
+ // Old wallets didn't have any defined order for transactions
+ // Probably a bad idea to change the output of this
+
+ // First: get all CWalletTx and CAccountingEntry into a sorted-by-time multimap.
+ typedef pair<CWalletTx*, CAccountingEntry*> TxPair;
+ typedef multimap<int64_t, TxPair > TxItems;
+ TxItems txByTime;
+
+ for (map<uint256, CWalletTx>::iterator it = mapWallet.begin(); it != mapWallet.end(); ++it)
+ {
+ CWalletTx* wtx = &((*it).second);
+ txByTime.insert(make_pair(wtx->nTimeReceived, TxPair(wtx, (CAccountingEntry*)0)));
+ }
+ list<CAccountingEntry> acentries;
+ walletdb.ListAccountCreditDebit("", acentries);
+ BOOST_FOREACH(CAccountingEntry& entry, acentries)
+ {
+ txByTime.insert(make_pair(entry.nTime, TxPair((CWalletTx*)0, &entry)));
+ }
+
+ nOrderPosNext = 0;
+ std::vector<int64_t> nOrderPosOffsets;
+ for (TxItems::iterator it = txByTime.begin(); it != txByTime.end(); ++it)
+ {
+ CWalletTx *const pwtx = (*it).second.first;
+ CAccountingEntry *const pacentry = (*it).second.second;
+ int64_t& nOrderPos = (pwtx != 0) ? pwtx->nOrderPos : pacentry->nOrderPos;
+
+ if (nOrderPos == -1)
+ {
+ nOrderPos = nOrderPosNext++;
+ nOrderPosOffsets.push_back(nOrderPos);
+
+ if (pwtx)
+ {
+ if (!walletdb.WriteTx(*pwtx))
+ return DB_LOAD_FAIL;
+ }
+ else
+ if (!walletdb.WriteAccountingEntry(pacentry->nEntryNo, *pacentry))
+ return DB_LOAD_FAIL;
+ }
+ else
+ {
+ int64_t nOrderPosOff = 0;
+ BOOST_FOREACH(const int64_t& nOffsetStart, nOrderPosOffsets)
+ {
+ if (nOrderPos >= nOffsetStart)
+ ++nOrderPosOff;
+ }
+ nOrderPos += nOrderPosOff;
+ nOrderPosNext = std::max(nOrderPosNext, nOrderPos + 1);
+
+ if (!nOrderPosOff)
+ continue;
+
+ // Since we're changing the order, write it back
+ if (pwtx)
+ {
+ if (!walletdb.WriteTx(*pwtx))
+ return DB_LOAD_FAIL;
+ }
+ else
+ if (!walletdb.WriteAccountingEntry(pacentry->nEntryNo, *pacentry))
+ return DB_LOAD_FAIL;
+ }
+ }
+ walletdb.WriteOrderPosNext(nOrderPosNext);
+
+ return DB_LOAD_OK;
}
int64_t CWallet::IncOrderPosNext(CWalletDB *pwalletdb)
@@ -3478,6 +3549,16 @@ bool CWallet::InitLoadWallet()
return true;
}
+void CWallet::postInitProcess(boost::thread_group& threadGroup)
+{
+ // Add wallet transactions that aren't already in a block to mempool
+ // Do this here as mempool requires genesis block to be loaded
+ ReacceptWalletTransactions();
+
+ // Run a thread to flush wallet periodically
+ threadGroup.create_thread(boost::bind(&ThreadFlushWalletDB, boost::ref(this->strWalletFile)));
+}
+
bool CWallet::ParameterInteraction()
{
if (GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET))
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index c33a6ca91f..a527c6d84e 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -27,6 +27,7 @@
#include <vector>
#include <boost/shared_ptr.hpp>
+#include <boost/thread.hpp>
extern CWallet* pwalletMain;
@@ -53,7 +54,7 @@ static const bool DEFAULT_SPEND_ZEROCONF_CHANGE = true;
//! Default for -sendfreetransactions
static const bool DEFAULT_SEND_FREE_TRANSACTIONS = false;
//! -txconfirmtarget default
-static const unsigned int DEFAULT_TX_CONFIRM_TARGET = 2;
+static const unsigned int DEFAULT_TX_CONFIRM_TARGET = 6;
//! -walletrbf default
static const bool DEFAULT_WALLET_RBF = false;
//! Largest (in bytes) free transaction we're willing to create
@@ -99,8 +100,9 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
- if (!(nType & SER_GETHASH))
+ inline void SerializationOp(Stream& s, Operation ser_action) {
+ int nVersion = s.GetVersion();
+ if (!(s.GetType() & SER_GETHASH))
READWRITE(nVersion);
READWRITE(nTime);
READWRITE(vchPubKey);
@@ -133,7 +135,7 @@ struct CRecipient
typedef std::map<std::string, std::string> mapValue_t;
-static void ReadOrderPos(int64_t& nOrderPos, mapValue_t& mapValue)
+static inline void ReadOrderPos(int64_t& nOrderPos, mapValue_t& mapValue)
{
if (!mapValue.count("n"))
{
@@ -144,7 +146,7 @@ static void ReadOrderPos(int64_t& nOrderPos, mapValue_t& mapValue)
}
-static void WriteOrderPos(const int64_t& nOrderPos, mapValue_t& mapValue)
+static inline void WriteOrderPos(const int64_t& nOrderPos, mapValue_t& mapValue)
{
if (nOrderPos == -1)
return;
@@ -194,7 +196,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
std::vector<uint256> vMerkleBranch; // For compatibility with older versions.
READWRITE(*(CTransaction*)this);
READWRITE(hashBlock);
@@ -314,7 +316,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
if (ser_action.ForRead())
Init(NULL);
char fSpent = false;
@@ -447,8 +449,9 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
- if (!(nType & SER_GETHASH))
+ inline void SerializationOp(Stream& s, Operation ser_action) {
+ int nVersion = s.GetVersion();
+ if (!(s.GetType() & SER_GETHASH))
READWRITE(nVersion);
READWRITE(vchPrivKey);
READWRITE(nTimeCreated);
@@ -492,8 +495,9 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
- if (!(nType & SER_GETHASH))
+ inline void SerializationOp(Stream& s, Operation ser_action) {
+ int nVersion = s.GetVersion();
+ if (!(s.GetType() & SER_GETHASH))
READWRITE(nVersion);
//! Note: strAccount is serialized as part of the key, not here.
READWRITE(nCreditDebit);
@@ -506,7 +510,7 @@ public:
if (!(mapValue.empty() && _ssExtra.empty()))
{
- CDataStream ss(nType, nVersion);
+ CDataStream ss(s.GetType(), s.GetVersion());
ss.insert(ss.begin(), '\0');
ss << mapValue;
ss.insert(ss.end(), _ssExtra.begin(), _ssExtra.end());
@@ -522,7 +526,7 @@ public:
mapValue.clear();
if (std::string::npos != nSepPos)
{
- CDataStream ss(std::vector<char>(strComment.begin() + nSepPos + 1, strComment.end()), nType, nVersion);
+ CDataStream ss(std::vector<char>(strComment.begin() + nSepPos + 1, strComment.end()), s.GetType(), s.GetVersion());
ss >> mapValue;
_ssExtra = std::vector<char>(ss.begin(), ss.end());
}
@@ -912,6 +916,12 @@ public:
/* Initializes the wallet, returns a new CWallet instance or a null pointer in case of an error */
static bool InitLoadWallet();
+ /**
+ * Wallet post-init setup
+ * Gives the wallet a chance to register repetitive tasks and complete post-init tasks
+ */
+ void postInitProcess(boost::thread_group& threadGroup);
+
/* Wallets parameter interaction */
static bool ParameterInteraction();
@@ -979,8 +989,9 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
- if (!(nType & SER_GETHASH))
+ inline void SerializationOp(Stream& s, Operation ser_action) {
+ int nVersion = s.GetVersion();
+ if (!(s.GetType() & SER_GETHASH))
READWRITE(nVersion);
READWRITE(vchPubKey);
}
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index 80bfe8255d..43fd6a20ad 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -251,82 +251,6 @@ void CWalletDB::ListAccountCreditDebit(const string& strAccount, list<CAccountin
pcursor->close();
}
-DBErrors CWalletDB::ReorderTransactions(CWallet* pwallet)
-{
- LOCK(pwallet->cs_wallet);
- // Old wallets didn't have any defined order for transactions
- // Probably a bad idea to change the output of this
-
- // First: get all CWalletTx and CAccountingEntry into a sorted-by-time multimap.
- typedef pair<CWalletTx*, CAccountingEntry*> TxPair;
- typedef multimap<int64_t, TxPair > TxItems;
- TxItems txByTime;
-
- for (map<uint256, CWalletTx>::iterator it = pwallet->mapWallet.begin(); it != pwallet->mapWallet.end(); ++it)
- {
- CWalletTx* wtx = &((*it).second);
- txByTime.insert(make_pair(wtx->nTimeReceived, TxPair(wtx, (CAccountingEntry*)0)));
- }
- list<CAccountingEntry> acentries;
- ListAccountCreditDebit("", acentries);
- BOOST_FOREACH(CAccountingEntry& entry, acentries)
- {
- txByTime.insert(make_pair(entry.nTime, TxPair((CWalletTx*)0, &entry)));
- }
-
- int64_t& nOrderPosNext = pwallet->nOrderPosNext;
- nOrderPosNext = 0;
- std::vector<int64_t> nOrderPosOffsets;
- for (TxItems::iterator it = txByTime.begin(); it != txByTime.end(); ++it)
- {
- CWalletTx *const pwtx = (*it).second.first;
- CAccountingEntry *const pacentry = (*it).second.second;
- int64_t& nOrderPos = (pwtx != 0) ? pwtx->nOrderPos : pacentry->nOrderPos;
-
- if (nOrderPos == -1)
- {
- nOrderPos = nOrderPosNext++;
- nOrderPosOffsets.push_back(nOrderPos);
-
- if (pwtx)
- {
- if (!WriteTx(*pwtx))
- return DB_LOAD_FAIL;
- }
- else
- if (!WriteAccountingEntry(pacentry->nEntryNo, *pacentry))
- return DB_LOAD_FAIL;
- }
- else
- {
- int64_t nOrderPosOff = 0;
- BOOST_FOREACH(const int64_t& nOffsetStart, nOrderPosOffsets)
- {
- if (nOrderPos >= nOffsetStart)
- ++nOrderPosOff;
- }
- nOrderPos += nOrderPosOff;
- nOrderPosNext = std::max(nOrderPosNext, nOrderPos + 1);
-
- if (!nOrderPosOff)
- continue;
-
- // Since we're changing the order, write it back
- if (pwtx)
- {
- if (!WriteTx(*pwtx))
- return DB_LOAD_FAIL;
- }
- else
- if (!WriteAccountingEntry(pacentry->nEntryNo, *pacentry))
- return DB_LOAD_FAIL;
- }
- }
- WriteOrderPosNext(nOrderPosNext);
-
- return DB_LOAD_OK;
-}
-
class CWalletScanState {
public:
unsigned int nKeys;
@@ -711,7 +635,7 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet)
WriteVersion(CLIENT_VERSION);
if (wss.fAnyUnordered)
- result = ReorderTransactions(pwallet);
+ result = pwallet->ReorderTransactions();
pwallet->laccentries.clear();
ListAccountCreditDebit("*", pwallet->laccentries);
diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h
index 9c9d4922a4..eb25ac613d 100644
--- a/src/wallet/walletdb.h
+++ b/src/wallet/walletdb.h
@@ -54,7 +54,7 @@ public:
CHDChain() { SetNull(); }
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion)
+ inline void SerializationOp(Stream& s, Operation ser_action)
{
READWRITE(this->nVersion);
READWRITE(nExternalChainCounter);
@@ -93,7 +93,7 @@ public:
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
+ inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(this->nVersion);
READWRITE(nCreateTime);
if (this->nVersion >= VERSION_WITH_HDDATA)
@@ -153,6 +153,7 @@ public:
/// This writes directly to the database, and will not update the CWallet's cached accounting entries!
/// Use wallet.AddAccountingEntry instead, to write *and* update its caches.
+ bool WriteAccountingEntry(const uint64_t nAccEntryNum, const CAccountingEntry& acentry);
bool WriteAccountingEntry_Backend(const CAccountingEntry& acentry);
bool ReadAccount(const std::string& strAccount, CAccount& account);
bool WriteAccount(const std::string& strAccount, const CAccount& account);
@@ -165,7 +166,6 @@ public:
CAmount GetAccountCreditDebit(const std::string& strAccount);
void ListAccountCreditDebit(const std::string& strAccount, std::list<CAccountingEntry>& acentries);
- DBErrors ReorderTransactions(CWallet* pwallet);
DBErrors LoadWallet(CWallet* pwallet);
DBErrors FindWalletTx(CWallet* pwallet, std::vector<uint256>& vTxHash, std::vector<CWalletTx>& vWtx);
DBErrors ZapWalletTx(CWallet* pwallet, std::vector<CWalletTx>& vWtx);
@@ -180,7 +180,6 @@ private:
CWalletDB(const CWalletDB&);
void operator=(const CWalletDB&);
- bool WriteAccountingEntry(const uint64_t nAccEntryNum, const CAccountingEntry& acentry);
};
void ThreadFlushWalletDB(const std::string& strFile);