aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml4
-rw-r--r--CONTRIBUTING.md8
-rw-r--r--Makefile.am4
-rw-r--r--README.md2
-rwxr-xr-xautogen.sh1
-rw-r--r--configure.ac46
-rwxr-xr-xcontrib/devtools/circular-dependencies.py2
-rwxr-xr-xcontrib/devtools/clang-format-diff.py2
-rwxr-xr-xcontrib/devtools/copyright_header.py6
-rwxr-xr-xcontrib/devtools/gen-manpages.sh1
-rwxr-xr-xcontrib/devtools/github-merge.py2
-rwxr-xr-xcontrib/devtools/test-security-check.py2
-rwxr-xr-xcontrib/devtools/update-translations.py10
-rwxr-xr-xcontrib/filter-lcov.py4
-rwxr-xr-xcontrib/gitian-build.sh1
-rw-r--r--contrib/init/README.md2
-rwxr-xr-xcontrib/install_db4.sh1
-rwxr-xr-xcontrib/linearize/linearize-data.py4
-rwxr-xr-xcontrib/linearize/linearize-hashes.py4
-rwxr-xr-xcontrib/macdeploy/detached-sig-apply.sh1
-rwxr-xr-xcontrib/macdeploy/detached-sig-create.sh1
-rwxr-xr-xcontrib/macdeploy/extract-osx-sdk.sh1
-rw-r--r--contrib/qos/tc.sh1
-rwxr-xr-xcontrib/seeds/generate-seeds.py4
-rw-r--r--contrib/verify-commits/README.md10
-rw-r--r--contrib/verify-commits/allow-incorrect-sha512-commits2
-rw-r--r--contrib/verify-commits/allow-unclean-merge-commits4
-rwxr-xr-xcontrib/verify-commits/gpg.sh3
-rwxr-xr-xcontrib/verify-commits/pre-push-hook.sh5
-rwxr-xr-xcontrib/verify-commits/verify-commits.py155
-rwxr-xr-xcontrib/verify-commits/verify-commits.sh153
-rwxr-xr-xcontrib/verifybinaries/verify.sh1
-rwxr-xr-xcontrib/windeploy/detached-sig-create.sh1
-rw-r--r--depends/README.md6
-rw-r--r--depends/description.md2
-rw-r--r--doc/README.md4
-rw-r--r--doc/README_osx.md10
-rw-r--r--doc/build-freebsd.md2
-rw-r--r--doc/build-osx.md6
-rw-r--r--doc/build-unix.md14
-rw-r--r--doc/developer-notes.md40
-rw-r--r--doc/init.md6
-rw-r--r--doc/release-notes-pr12892.md2
-rw-r--r--doc/release-notes.md5
-rw-r--r--doc/release-notes/release-notes-0.16.1.md145
-rw-r--r--doc/release-process.md22
-rw-r--r--doc/translation_process.md8
-rwxr-xr-xshare/genbuild.sh1
-rwxr-xr-xshare/qt/extract_strings_qt.py2
-rw-r--r--src/Makefile.am40
-rw-r--r--src/Makefile.bench.include4
-rw-r--r--src/Makefile.qt.include2
-rw-r--r--src/Makefile.qttest.include2
-rw-r--r--src/Makefile.test.include6
-rw-r--r--src/arith_uint256.h8
-rw-r--r--src/bench/examples.cpp (renamed from src/bench/Examples.cpp)0
-rw-r--r--src/bench/merkle_root.cpp8
-rw-r--r--src/bitcoin-cli.cpp13
-rw-r--r--src/bitcoin-tx.cpp6
-rw-r--r--src/bitcoind.cpp3
-rw-r--r--src/core_io.h1
-rw-r--r--src/core_read.cpp8
-rw-r--r--src/crypto/sha256.cpp114
-rw-r--r--src/crypto/sha256_avx2.cpp4
-rw-r--r--src/crypto/sha256_sse41.cpp4
-rw-r--r--src/index/base.cpp278
-rw-r--r--src/index/base.h98
-rw-r--r--src/index/txindex.cpp419
-rw-r--r--src/index/txindex.h74
-rw-r--r--src/init.cpp57
-rw-r--r--src/net_processing.cpp2
-rw-r--r--src/policy/policy.cpp9
-rw-r--r--src/policy/policy.h4
-rw-r--r--src/primitives/transaction.h5
-rw-r--r--src/qt/README.md8
-rw-r--r--src/qt/forms/optionsdialog.ui67
-rw-r--r--src/qt/optionsdialog.cpp19
-rw-r--r--src/qt/optionsdialog.h1
-rw-r--r--src/qt/optionsmodel.cpp26
-rw-r--r--src/qt/optionsmodel.h2
-rwxr-xr-xsrc/qt/res/movies/makespinner.sh1
-rw-r--r--src/rest.cpp2
-rw-r--r--src/rpc/blockchain.cpp26
-rw-r--r--src/rpc/blockchain.h3
-rw-r--r--src/rpc/client.cpp1
-rw-r--r--src/rpc/mining.cpp2
-rw-r--r--src/rpc/net.cpp2
-rw-r--r--src/rpc/rawtransaction.cpp4
-rw-r--r--src/script/sign.cpp8
-rw-r--r--src/script/sign.h1
-rw-r--r--src/script/standard.cpp1
-rw-r--r--src/test/arith_uint256_tests.cpp7
-rw-r--r--src/test/blockchain_tests.cpp58
-rw-r--r--src/test/denialofservice_tests.cpp (renamed from src/test/DoS_tests.cpp)2
-rw-r--r--src/test/mempool_tests.cpp175
-rw-r--r--src/test/script_p2sh_tests.cpp (renamed from src/test/script_P2SH_tests.cpp)2
-rw-r--r--src/test/script_standard_tests.cpp26
-rw-r--r--src/test/transaction_tests.cpp8
-rw-r--r--src/test/txindex_tests.cpp2
-rw-r--r--src/test/txvalidationcache_tests.cpp6
-rw-r--r--src/txdb.cpp183
-rw-r--r--src/txdb.h59
-rw-r--r--src/txmempool.cpp37
-rw-r--r--src/txmempool.h8
-rw-r--r--src/util.cpp27
-rw-r--r--src/util.h5
-rw-r--r--src/validation.cpp30
-rw-r--r--src/validation.h25
-rw-r--r--src/wallet/rpcwallet.cpp105
-rw-r--r--src/wallet/test/coinselector_tests.cpp16
-rw-r--r--src/wallet/wallet.cpp43
-rw-r--r--src/wallet/wallet.h5
-rwxr-xr-xtest/functional/combine_logs.py2
-rwxr-xr-xtest/functional/feature_includeconf.py19
-rwxr-xr-xtest/functional/feature_notifications.py6
-rwxr-xr-xtest/functional/feature_nulldummy.py2
-rwxr-xr-xtest/functional/feature_pruning.py7
-rwxr-xr-xtest/functional/feature_segwit.py31
-rwxr-xr-xtest/functional/p2p_segwit.py160
-rwxr-xr-xtest/functional/p2p_sendheaders.py16
-rwxr-xr-xtest/functional/rpc_blockchain.py1
-rwxr-xr-xtest/functional/rpc_deprecated.py5
-rwxr-xr-xtest/functional/rpc_getblockstats.py4
-rwxr-xr-xtest/functional/test_framework/test_framework.py2
-rw-r--r--test/functional/test_framework/util.py2
-rwxr-xr-xtest/functional/test_runner.py12
-rwxr-xr-xtest/functional/wallet_bumpfee.py2
-rwxr-xr-xtest/functional/wallet_labels.py81
-rwxr-xr-xtest/functional/wallet_multiwallet.py2
-rwxr-xr-xtest/lint/check-doc.py2
-rwxr-xr-xtest/lint/check-rpc-mappings.py4
-rwxr-xr-xtest/lint/commit-script-check.sh1
-rwxr-xr-xtest/lint/git-subtree-check.sh1
-rwxr-xr-xtest/lint/lint-all.sh4
-rwxr-xr-xtest/lint/lint-filenames.sh21
-rwxr-xr-xtest/lint/lint-include-guards.sh1
-rwxr-xr-xtest/lint/lint-includes.sh90
-rwxr-xr-xtest/lint/lint-locale-dependence.sh230
-rwxr-xr-xtest/lint/lint-logs.sh2
-rwxr-xr-xtest/lint/lint-python-shebang.sh2
-rwxr-xr-xtest/lint/lint-python-utf8-encoding.sh20
-rwxr-xr-xtest/lint/lint-python.sh2
-rwxr-xr-xtest/lint/lint-shell-locale.sh24
-rwxr-xr-xtest/lint/lint-shell.sh4
-rwxr-xr-xtest/lint/lint-tests.sh1
-rwxr-xr-xtest/lint/lint-whitespace.sh1
-rwxr-xr-xtest/util/bitcoin-util-test.py8
-rwxr-xr-xtest/util/rpcauth-test.py2
148 files changed, 2364 insertions, 1305 deletions
diff --git a/.travis.yml b/.travis.yml
index 7cbe0b83f1..75df290187 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -40,7 +40,7 @@ env:
# x86_64 Linux, No wallet
- HOST=x86_64-unknown-linux-gnu PACKAGES="python3" DEP_OPTS="NO_WALLET=1" RUN_TESTS=true GOAL="install" BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports"
# Cross-Mac
- - HOST=x86_64-apple-darwin11 PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python-dev python3-setuptools-git" BITCOIN_CONFIG="--enable-gui --enable-reduce-exports --enable-werror" OSX_SDK=10.11 GOAL="deploy"
+ - HOST=x86_64-apple-darwin11 PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python-dev python3-setuptools-git" BITCOIN_CONFIG="--enable-gui --enable-reduce-exports --enable-werror" OSX_SDK=10.11 GOAL="all deploy"
before_install:
- export PATH=$(echo $PATH | tr ':' "\n" | sed '/\/opt\/python/d' | tr "\n" ":" | sed "s|::|:|g")
@@ -104,5 +104,5 @@ jobs:
- test/lint/lint-all.sh
- if [ "$TRAVIS_REPO_SLUG" = "bitcoin/bitcoin" -a "$TRAVIS_EVENT_TYPE" = "cron" ]; then
while read LINE; do travis_retry gpg --keyserver hkp://subset.pool.sks-keyservers.net --recv-keys $LINE; done < contrib/verify-commits/trusted-keys &&
- travis_wait 30 contrib/verify-commits/verify-commits.sh;
+ travis_wait 50 contrib/verify-commits/verify-commits.py;
fi
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c390595abf..3ee5a04796 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -127,10 +127,10 @@ before it will be merged. The basic squashing workflow is shown below.
git checkout your_branch_name
git rebase -i HEAD~n
- # n is normally the number of commits in the pull
- # set commits from 'pick' to 'squash', save and quit
- # on the next screen, edit/refine commit messages
- # save and quit
+ # n is normally the number of commits in the pull request.
+ # Set commits (except the one in the first line) from 'pick' to 'squash', save and quit.
+ # On the next screen, edit/refine commit messages.
+ # Save and quit.
git push -f # (force push to GitHub)
If you have problems with squashing (or other workflows with `git`), you can
diff --git a/Makefile.am b/Makefile.am
index 0ed3dd289a..f3f3302fce 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -95,9 +95,9 @@ $(OSX_APP)/Contents/Resources/bitcoin.icns: $(OSX_INSTALLER_ICONS)
$(MKDIR_P) $(@D)
$(INSTALL_DATA) $< $@
-$(OSX_APP)/Contents/MacOS/Bitcoin-Qt: $(BITCOIN_QT_BIN)
+$(OSX_APP)/Contents/MacOS/Bitcoin-Qt: all-recursive
$(MKDIR_P) $(@D)
- STRIPPROG="$(STRIP)" $(INSTALL_STRIP_PROGRAM) $< $@
+ STRIPPROG="$(STRIP)" $(INSTALL_STRIP_PROGRAM) $(BITCOIN_QT_BIN) $@
$(OSX_APP)/Contents/Resources/Base.lproj/InfoPlist.strings:
$(MKDIR_P) $(@D)
diff --git a/README.md b/README.md
index acdbe46104..4e830109c2 100644
--- a/README.md
+++ b/README.md
@@ -52,7 +52,7 @@ There are also [regression and integration tests](/test), written
in Python, that are run automatically on the build server.
These tests can be run (if the [test dependencies](/test) are installed) with: `test/functional/test_runner.py`
-The Travis CI system makes sure that every pull request is built for Windows, Linux, and OS X, and that unit/sanity tests are run automatically.
+The Travis CI system makes sure that every pull request is built for Windows, Linux, and macOS, and that unit/sanity tests are run automatically.
### Manual Quality Assurance (QA) Testing
diff --git a/autogen.sh b/autogen.sh
index 27417daf76..0c05626ccc 100755
--- a/autogen.sh
+++ b/autogen.sh
@@ -3,6 +3,7 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+export LC_ALL=C
set -e
srcdir="$(dirname $0)"
cd "$srcdir"
diff --git a/configure.ac b/configure.ac
index af60b28c71..926d0b8d00 100644
--- a/configure.ac
+++ b/configure.ac
@@ -243,6 +243,10 @@ AC_LANG_PUSH([C++])
AX_CHECK_COMPILE_FLAG([-Werror],[CXXFLAG_WERROR="-Werror"],[CXXFLAG_WERROR=""])
if test "x$enable_debug" = xyes; then
+ # Clear default -g -O2 flags
+ if test "x$CXXFLAGS_overridden" = xno; then
+ CXXFLAGS=""
+ fi
# Prefer -Og, fall back to -O0 if that is unavailable.
AX_CHECK_COMPILE_FLAG(
[-Og],
@@ -403,25 +407,25 @@ case $host in
use_pkgconfig=no
TARGET_OS=windows
- AC_CHECK_LIB([mingwthrd], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([kernel32], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([user32], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([gdi32], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([comdlg32], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([winspool], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([winmm], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([shell32], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([comctl32], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([ole32], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([oleaut32], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([uuid], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([rpcrt4], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([advapi32], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([ws2_32], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([mswsock], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([shlwapi], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([iphlpapi], [main],, AC_MSG_ERROR(lib missing))
- AC_CHECK_LIB([crypt32], [main],, AC_MSG_ERROR(lib missing))
+ AC_CHECK_LIB([mingwthrd], [main],, AC_MSG_ERROR(libmingwthrd missing))
+ AC_CHECK_LIB([kernel32], [main],, AC_MSG_ERROR(libkernel32 missing))
+ AC_CHECK_LIB([user32], [main],, AC_MSG_ERROR(libuser32 missing))
+ AC_CHECK_LIB([gdi32], [main],, AC_MSG_ERROR(libgdi32 missing))
+ AC_CHECK_LIB([comdlg32], [main],, AC_MSG_ERROR(libcomdlg32 missing))
+ AC_CHECK_LIB([winspool], [main],, AC_MSG_ERROR(libwinspool missing))
+ AC_CHECK_LIB([winmm], [main],, AC_MSG_ERROR(libwinmm missing))
+ AC_CHECK_LIB([shell32], [main],, AC_MSG_ERROR(libshell32 missing))
+ AC_CHECK_LIB([comctl32], [main],, AC_MSG_ERROR(libcomctl32 missing))
+ AC_CHECK_LIB([ole32], [main],, AC_MSG_ERROR(libole32 missing))
+ AC_CHECK_LIB([oleaut32], [main],, AC_MSG_ERROR(liboleaut32 missing))
+ AC_CHECK_LIB([uuid], [main],, AC_MSG_ERROR(libuuid missing))
+ AC_CHECK_LIB([rpcrt4], [main],, AC_MSG_ERROR(librpcrt4 missing))
+ AC_CHECK_LIB([advapi32], [main],, AC_MSG_ERROR(libadvapi32 missing))
+ AC_CHECK_LIB([ws2_32], [main],, AC_MSG_ERROR(libws2_32 missing))
+ AC_CHECK_LIB([mswsock], [main],, AC_MSG_ERROR(libmswsock missing))
+ AC_CHECK_LIB([shlwapi], [main],, AC_MSG_ERROR(libshlwapi missing))
+ AC_CHECK_LIB([iphlpapi], [main],, AC_MSG_ERROR(libiphlpapi missing))
+ AC_CHECK_LIB([crypt32], [main],, AC_MSG_ERROR(libcrypt32 missing))
# -static is interpreted by libtool, where it has a different meaning.
# In libtool-speak, it's -all-static.
@@ -620,7 +624,7 @@ if test x$use_glibc_compat != xno; then
#glibc absorbed clock_gettime in 2.17. librt (its previous location) is safe to link
#in anyway for back-compat.
- AC_CHECK_LIB([rt],[clock_gettime],, AC_MSG_ERROR(lib missing))
+ AC_CHECK_LIB([rt],[clock_gettime],, AC_MSG_ERROR(librt missing))
#__fdelt_chk's params and return type have changed from long unsigned int to long int.
# See which one is present here.
@@ -682,7 +686,7 @@ if test x$use_hardening != xno; then
case $host in
*mingw*)
- AC_CHECK_LIB([ssp], [main],, AC_MSG_ERROR(lib missing))
+ AC_CHECK_LIB([ssp], [main],, AC_MSG_ERROR(libssp missing))
;;
esac
fi
diff --git a/contrib/devtools/circular-dependencies.py b/contrib/devtools/circular-dependencies.py
index d544d5c371..abfa5ed5ae 100755
--- a/contrib/devtools/circular-dependencies.py
+++ b/contrib/devtools/circular-dependencies.py
@@ -37,7 +37,7 @@ for arg in sys.argv[1:]:
# TODO: implement support for multiple include directories
for arg in sorted(files.keys()):
module = files[arg]
- with open(arg, 'r') as f:
+ with open(arg, 'r', encoding="utf8") as f:
for line in f:
match = RE.match(line)
if match:
diff --git a/contrib/devtools/clang-format-diff.py b/contrib/devtools/clang-format-diff.py
index 5402870fba..77e845a9b4 100755
--- a/contrib/devtools/clang-format-diff.py
+++ b/contrib/devtools/clang-format-diff.py
@@ -152,7 +152,7 @@ def main():
sys.exit(p.returncode)
if not args.i:
- with open(filename) as f:
+ with open(filename, encoding="utf8") as f:
code = f.readlines()
formatted_code = io.StringIO(stdout).readlines()
diff = difflib.unified_diff(code, formatted_code,
diff --git a/contrib/devtools/copyright_header.py b/contrib/devtools/copyright_header.py
index 82d3c19683..da7d74bdc4 100755
--- a/contrib/devtools/copyright_header.py
+++ b/contrib/devtools/copyright_header.py
@@ -146,7 +146,7 @@ def file_has_without_c_style_copyright_for_holder(contents, holder_name):
################################################################################
def read_file(filename):
- return open(os.path.abspath(filename), 'r').read()
+ return open(os.path.abspath(filename), 'r', encoding="utf8").read()
def gather_file_info(filename):
info = {}
@@ -325,13 +325,13 @@ def get_most_recent_git_change_year(filename):
################################################################################
def read_file_lines(filename):
- f = open(os.path.abspath(filename), 'r')
+ f = open(os.path.abspath(filename), 'r', encoding="utf8")
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
- f = open(os.path.abspath(filename), 'w')
+ f = open(os.path.abspath(filename), 'w', encoding="utf8")
f.write(''.join(file_lines))
f.close()
diff --git a/contrib/devtools/gen-manpages.sh b/contrib/devtools/gen-manpages.sh
index 27c80548c1..76d4adc81c 100755
--- a/contrib/devtools/gen-manpages.sh
+++ b/contrib/devtools/gen-manpages.sh
@@ -1,5 +1,6 @@
#!/bin/bash
+export LC_ALL=C
TOPDIR=${TOPDIR:-$(git rev-parse --show-toplevel)}
BUILDDIR=${BUILDDIR:-$TOPDIR}
diff --git a/contrib/devtools/github-merge.py b/contrib/devtools/github-merge.py
index 187ef75fb7..4e90f85f50 100755
--- a/contrib/devtools/github-merge.py
+++ b/contrib/devtools/github-merge.py
@@ -191,7 +191,7 @@ def main():
merge_branch = 'pull/'+pull+'/merge'
local_merge_branch = 'pull/'+pull+'/local-merge'
- devnull = open(os.devnull,'w')
+ devnull = open(os.devnull, 'w', encoding="utf8")
try:
subprocess.check_call([GIT,'checkout','-q',branch])
except subprocess.CalledProcessError:
diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py
index 307e057736..9b6d6bf665 100755
--- a/contrib/devtools/test-security-check.py
+++ b/contrib/devtools/test-security-check.py
@@ -9,7 +9,7 @@ import subprocess
import unittest
def write_testcode(filename):
- with open(filename, 'w') as f:
+ with open(filename, 'w', encoding="utf8") as f:
f.write('''
#include <stdio.h>
int main()
diff --git a/contrib/devtools/update-translations.py b/contrib/devtools/update-translations.py
index b36e6968bf..f0098cfcdf 100755
--- a/contrib/devtools/update-translations.py
+++ b/contrib/devtools/update-translations.py
@@ -30,6 +30,8 @@ SOURCE_LANG = 'bitcoin_en.ts'
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
+# Regexp to check for Bitcoin addresses
+ADDRESS_REGEXP = re.compile('([13]|bc1)[a-zA-Z0-9]{30,}')
def check_at_repository_root():
if not os.path.exists('.git'):
@@ -122,6 +124,12 @@ def escape_cdata(text):
text = text.replace('"', '&quot;')
return text
+def contains_bitcoin_addr(text, errors):
+ if text != None and ADDRESS_REGEXP.search(text) != None:
+ errors.append('Translation "%s" contains a bitcoin address. This will be removed.' % (text))
+ return True
+ return False
+
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
@@ -160,7 +168,7 @@ def postprocess_translations(reduce_diff_hacks=False):
if translation is None:
continue
errors = []
- valid = check_format_specifiers(source, translation, errors, numerus)
+ valid = check_format_specifiers(source, translation, errors, numerus) and not contains_bitcoin_addr(translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
diff --git a/contrib/filter-lcov.py b/contrib/filter-lcov.py
index 299377d691..df1db76e92 100755
--- a/contrib/filter-lcov.py
+++ b/contrib/filter-lcov.py
@@ -13,8 +13,8 @@ pattern = args.pattern
outfile = args.outfile
in_remove = False
-with open(tracefile, 'r') as f:
- with open(outfile, 'w') as wf:
+with open(tracefile, 'r', encoding="utf8") as f:
+ with open(outfile, 'w', encoding="utf8") as wf:
for line in f:
for p in pattern:
if line.startswith("SF:") and p in line:
diff --git a/contrib/gitian-build.sh b/contrib/gitian-build.sh
index 5a925f2282..8bfbf6d1a0 100755
--- a/contrib/gitian-build.sh
+++ b/contrib/gitian-build.sh
@@ -3,6 +3,7 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+export LC_ALL=C
# What to do
sign=false
verify=false
diff --git a/contrib/init/README.md b/contrib/init/README.md
index 1a949f3c07..8d3e57c526 100644
--- a/contrib/init/README.md
+++ b/contrib/init/README.md
@@ -5,7 +5,7 @@ Upstart: bitcoind.conf
OpenRC: bitcoind.openrc
bitcoind.openrcconf
CentOS: bitcoind.init
-OS X: org.bitcoin.bitcoind.plist
+macOS: org.bitcoin.bitcoind.plist
```
have been made available to assist packagers in creating node packages here.
diff --git a/contrib/install_db4.sh b/contrib/install_db4.sh
index d315a7d3b7..4f74e67f2f 100755
--- a/contrib/install_db4.sh
+++ b/contrib/install_db4.sh
@@ -2,6 +2,7 @@
# Install libdb4.8 (Berkeley DB).
+export LC_ALL=C
set -e
if [ -z "${1}" ]; then
diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py
index f8aea27342..b501388fd2 100755
--- a/contrib/linearize/linearize-data.py
+++ b/contrib/linearize/linearize-data.py
@@ -75,7 +75,7 @@ def get_blk_dt(blk_hdr):
# When getting the list of block hashes, undo any byte reversals.
def get_block_hashes(settings):
blkindex = []
- f = open(settings['hashlist'], "r")
+ f = open(settings['hashlist'], "r", encoding="utf8")
for line in f:
line = line.rstrip()
if settings['rev_hash_bytes'] == 'true':
@@ -261,7 +261,7 @@ if __name__ == '__main__':
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
- f = open(sys.argv[1])
+ f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
diff --git a/contrib/linearize/linearize-hashes.py b/contrib/linearize/linearize-hashes.py
index 8e1266ae0b..bfd2171947 100755
--- a/contrib/linearize/linearize-hashes.py
+++ b/contrib/linearize/linearize-hashes.py
@@ -96,7 +96,7 @@ def get_block_hashes(settings, max_blocks_per_call=10000):
def get_rpc_cookie():
# Open the cookie file
- with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r') as f:
+ with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
@@ -107,7 +107,7 @@ if __name__ == '__main__':
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
- f = open(sys.argv[1])
+ f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
diff --git a/contrib/macdeploy/detached-sig-apply.sh b/contrib/macdeploy/detached-sig-apply.sh
index 91674a92e6..f8503e4de8 100755
--- a/contrib/macdeploy/detached-sig-apply.sh
+++ b/contrib/macdeploy/detached-sig-apply.sh
@@ -3,6 +3,7 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+export LC_ALL=C
set -e
UNSIGNED="$1"
diff --git a/contrib/macdeploy/detached-sig-create.sh b/contrib/macdeploy/detached-sig-create.sh
index 3379a4599c..5281ebcc47 100755
--- a/contrib/macdeploy/detached-sig-create.sh
+++ b/contrib/macdeploy/detached-sig-create.sh
@@ -3,6 +3,7 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+export LC_ALL=C
set -e
ROOTDIR=dist
diff --git a/contrib/macdeploy/extract-osx-sdk.sh b/contrib/macdeploy/extract-osx-sdk.sh
index ff9fbd58df..94122b56fc 100755
--- a/contrib/macdeploy/extract-osx-sdk.sh
+++ b/contrib/macdeploy/extract-osx-sdk.sh
@@ -3,6 +3,7 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+export LC_ALL=C
set -e
INPUTFILE="Xcode_7.3.1.dmg"
diff --git a/contrib/qos/tc.sh b/contrib/qos/tc.sh
index 0d1dd65b4f..738ea70dbe 100644
--- a/contrib/qos/tc.sh
+++ b/contrib/qos/tc.sh
@@ -2,6 +2,7 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+export LC_ALL=C
#network interface on which to limit traffic
IF="eth0"
#limit of the network interface in question
diff --git a/contrib/seeds/generate-seeds.py b/contrib/seeds/generate-seeds.py
index ace7d3534f..fe7cd1d597 100755
--- a/contrib/seeds/generate-seeds.py
+++ b/contrib/seeds/generate-seeds.py
@@ -127,10 +127,10 @@ def main():
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n')
g.write(' */\n')
- with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
+ with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 8333)
g.write('\n')
- with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
+ with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 18333)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
diff --git a/contrib/verify-commits/README.md b/contrib/verify-commits/README.md
index fa492fdd27..aa805ad1b9 100644
--- a/contrib/verify-commits/README.md
+++ b/contrib/verify-commits/README.md
@@ -7,18 +7,18 @@ are PGP signed (nearly always merge commits), as well as a script to verify
commits against a trusted keys list.
-Using verify-commits.sh safely
+Using verify-commits.py safely
------------------------------
Remember that you can't use an untrusted script to verify itself. This means
-that checking out code, then running `verify-commits.sh` against `HEAD` is
-_not_ safe, because the version of `verify-commits.sh` that you just ran could
+that checking out code, then running `verify-commits.py` against `HEAD` is
+_not_ safe, because the version of `verify-commits.py` that you just ran could
be backdoored. Instead, you need to use a trusted version of verify-commits
prior to checkout to make sure you're checking out only code signed by trusted
keys:
git fetch origin && \
- ./contrib/verify-commits/verify-commits.sh origin/master && \
+ ./contrib/verify-commits/verify-commits.py origin/master && \
git checkout origin/master
Note that the above isn't a good UI/UX yet, and needs significant improvements
@@ -42,6 +42,6 @@ said key. In order to avoid bumping the root-of-trust `trusted-git-root`
file, individual commits which were signed by such a key can be added to the
`allow-revsig-commits` file. That way, the PGP signatures are still verified
but no new commits can be signed by any expired/revoked key. To easily build a
-list of commits which need to be added, verify-commits.sh can be edited to test
+list of commits which need to be added, verify-commits.py can be edited to test
each commit with BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG set to both 1 and 0, and
those which need it set to 1 printed.
diff --git a/contrib/verify-commits/allow-incorrect-sha512-commits b/contrib/verify-commits/allow-incorrect-sha512-commits
new file mode 100644
index 0000000000..c572806f26
--- /dev/null
+++ b/contrib/verify-commits/allow-incorrect-sha512-commits
@@ -0,0 +1,2 @@
+f8feaa4636260b599294c7285bcf1c8b7737f74e
+8040ae6fc576e9504186f2ae3ff2c8125de1095c
diff --git a/contrib/verify-commits/allow-unclean-merge-commits b/contrib/verify-commits/allow-unclean-merge-commits
new file mode 100644
index 0000000000..7aab274b9a
--- /dev/null
+++ b/contrib/verify-commits/allow-unclean-merge-commits
@@ -0,0 +1,4 @@
+6052d509105790a26b3ad5df43dd61e7f1b24a12
+3798e5de334c3deb5f71302b782f6b8fbd5087f1
+326ffed09bfcc209a2efd6a2ebc69edf6bd200b5
+97d83739db0631be5d4ba86af3616014652c00ec
diff --git a/contrib/verify-commits/gpg.sh b/contrib/verify-commits/gpg.sh
index 8f3e4b8063..7a10ba7d7d 100755
--- a/contrib/verify-commits/gpg.sh
+++ b/contrib/verify-commits/gpg.sh
@@ -3,6 +3,7 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+export LC_ALL=C
INPUT=$(cat /dev/stdin)
VALID=false
REVSIG=false
@@ -57,7 +58,7 @@ if ! $VALID; then
exit 1
fi
if $VALID && $REVSIG; then
- printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null | grep "\[GNUPG:\] \(NEWSIG\|SIG_ID\|VALIDSIG\)"
+ printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null | grep "^\[GNUPG:\] \(NEWSIG\|SIG_ID\|VALIDSIG\)"
echo "$GOODREVSIG"
else
printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null
diff --git a/contrib/verify-commits/pre-push-hook.sh b/contrib/verify-commits/pre-push-hook.sh
index c21febb9e9..152f4e32e4 100755
--- a/contrib/verify-commits/pre-push-hook.sh
+++ b/contrib/verify-commits/pre-push-hook.sh
@@ -3,6 +3,7 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+export LC_ALL=C
if ! [[ "$2" =~ ^(git@)?(www.)?github.com(:|/)bitcoin/bitcoin(.git)?$ ]]; then
exit 0
fi
@@ -12,9 +13,9 @@ while read LINE; do
if [ "$4" != "refs/heads/master" ]; then
continue
fi
- if ! ./contrib/verify-commits/verify-commits.sh $3 > /dev/null 2>&1; then
+ if ! ./contrib/verify-commits/verify-commits.py $3 > /dev/null 2>&1; then
echo "ERROR: A commit is not signed, can't push"
- ./contrib/verify-commits/verify-commits.sh
+ ./contrib/verify-commits/verify-commits.py
exit 1
fi
done < /dev/stdin
diff --git a/contrib/verify-commits/verify-commits.py b/contrib/verify-commits/verify-commits.py
new file mode 100755
index 0000000000..a9e4977715
--- /dev/null
+++ b/contrib/verify-commits/verify-commits.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+# Copyright (c) 2018 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Verify commits against a trusted keys list."""
+import argparse
+import hashlib
+import os
+import subprocess
+import sys
+import time
+
+GIT = os.getenv('GIT', 'git')
+
+def tree_sha512sum(commit='HEAD'):
+ """Calculate the Tree-sha512 for the commit.
+
+ This is copied from github-merge.py."""
+
+ # request metadata for entire tree, recursively
+ files = []
+ blob_by_name = {}
+ for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines():
+ name_sep = line.index(b'\t')
+ metadata = line[:name_sep].split() # perms, 'blob', blobid
+ assert metadata[1] == b'blob'
+ name = line[name_sep + 1:]
+ files.append(name)
+ blob_by_name[name] = metadata[2]
+
+ files.sort()
+ # open connection to git-cat-file in batch mode to request data for all blobs
+ # this is much faster than launching it per file
+ p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
+ overall = hashlib.sha512()
+ for f in files:
+ blob = blob_by_name[f]
+ # request blob
+ p.stdin.write(blob + b'\n')
+ p.stdin.flush()
+ # read header: blob, "blob", size
+ reply = p.stdout.readline().split()
+ assert reply[0] == blob and reply[1] == b'blob'
+ size = int(reply[2])
+ # hash the blob data
+ intern = hashlib.sha512()
+ ptr = 0
+ while ptr < size:
+ bs = min(65536, size - ptr)
+ piece = p.stdout.read(bs)
+ if len(piece) == bs:
+ intern.update(piece)
+ else:
+ raise IOError('Premature EOF reading git cat-file output')
+ ptr += bs
+ dig = intern.hexdigest()
+ assert p.stdout.read(1) == b'\n' # ignore LF that follows blob data
+ # update overall hash with file hash
+ overall.update(dig.encode("utf-8"))
+ overall.update(" ".encode("utf-8"))
+ overall.update(f)
+ overall.update("\n".encode("utf-8"))
+ p.stdin.close()
+ if p.wait():
+ raise IOError('Non-zero return value executing git cat-file')
+ return overall.hexdigest()
+
+def main():
+ # Parse arguments
+ parser = argparse.ArgumentParser(usage='%(prog)s [options] [commit id]')
+ parser.add_argument('--disable-tree-check', action='store_false', dest='verify_tree', help='disable SHA-512 tree check')
+ parser.add_argument('--clean-merge', type=float, dest='clean_merge', default=float('inf'), help='Only check clean merge after <NUMBER> days ago (default: %(default)s)', metavar='NUMBER')
+ parser.add_argument('commit', nargs='?', default='HEAD', help='Check clean merge up to commit <commit>')
+ args = parser.parse_args()
+
+ # get directory of this program and read data files
+ dirname = os.path.dirname(os.path.abspath(__file__))
+ print("Using verify-commits data from " + dirname)
+ verified_root = open(dirname + "/trusted-git-root", "r", encoding="utf8").read().splitlines()[0]
+ verified_sha512_root = open(dirname + "/trusted-sha512-root-commit", "r", encoding="utf8").read().splitlines()[0]
+ revsig_allowed = open(dirname + "/allow-revsig-commits", "r", encoding="utf-8").read().splitlines()
+ unclean_merge_allowed = open(dirname + "/allow-unclean-merge-commits", "r", encoding="utf-8").read().splitlines()
+ incorrect_sha512_allowed = open(dirname + "/allow-incorrect-sha512-commits", "r", encoding="utf-8").read().splitlines()
+
+ # Set commit and branch and set variables
+ current_commit = args.commit
+ if ' ' in current_commit:
+ print("Commit must not contain spaces", file=sys.stderr)
+ sys.exit(1)
+ verify_tree = args.verify_tree
+ no_sha1 = True
+ prev_commit = ""
+ initial_commit = current_commit
+ branch = subprocess.check_output([GIT, 'show', '-s', '--format=%H', initial_commit], universal_newlines=True).splitlines()[0]
+
+ # Iterate through commits
+ while True:
+ if current_commit == verified_root:
+ print('There is a valid path from "{}" to {} where all commits are signed!'.format(initial_commit, verified_root))
+ sys.exit(0)
+ if current_commit == verified_sha512_root:
+ if verify_tree:
+ print("All Tree-SHA512s matched up to {}".format(verified_sha512_root), file=sys.stderr)
+ verify_tree = False
+ no_sha1 = False
+
+ os.environ['BITCOIN_VERIFY_COMMITS_ALLOW_SHA1'] = "0" if no_sha1 else "1"
+ os.environ['BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG'] = "1" if current_commit in revsig_allowed else "0"
+
+ # Check that the commit (and parents) was signed with a trusted key
+ if subprocess.call([GIT, '-c', 'gpg.program={}/gpg.sh'.format(dirname), 'verify-commit', current_commit], stdout=subprocess.DEVNULL):
+ if prev_commit != "":
+ print("No parent of {} was signed with a trusted key!".format(prev_commit), file=sys.stderr)
+ print("Parents are:", file=sys.stderr)
+ parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', prev_commit], universal_newlines=True).splitlines()[0].split(' ')
+ for parent in parents:
+ subprocess.call([GIT, 'show', '-s', parent], stdout=sys.stderr)
+ else:
+ print("{} was not signed with a trusted key!".format(current_commit), file=sys.stderr)
+ sys.exit(1)
+
+ # Check the Tree-SHA512
+ if (verify_tree or prev_commit == "") and current_commit not in incorrect_sha512_allowed:
+ tree_hash = tree_sha512sum(current_commit)
+ if ("Tree-SHA512: {}".format(tree_hash)) not in subprocess.check_output([GIT, 'show', '-s', '--format=format:%B', current_commit], universal_newlines=True).splitlines():
+ print("Tree-SHA512 did not match for commit " + current_commit, file=sys.stderr)
+ sys.exit(1)
+
+ # Merge commits should only have two parents
+ parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', current_commit], universal_newlines=True).splitlines()[0].split(' ')
+ if len(parents) > 2:
+ print("Commit {} is an octopus merge".format(current_commit), file=sys.stderr)
+ sys.exit(1)
+
+ # Check that the merge commit is clean
+ commit_time = int(subprocess.check_output([GIT, 'show', '-s', '--format=format:%ct', current_commit], universal_newlines=True).splitlines()[0])
+ check_merge = commit_time > time.time() - args.clean_merge * 24 * 60 * 60 # Only check commits in clean_merge days
+ allow_unclean = current_commit in unclean_merge_allowed
+ if len(parents) == 2 and check_merge and not allow_unclean:
+ current_tree = subprocess.check_output([GIT, 'show', '--format=%T', current_commit], universal_newlines=True).splitlines()[0]
+ subprocess.call([GIT, 'checkout', '--force', '--quiet', parents[0]])
+ subprocess.call([GIT, 'merge', '--no-ff', '--quiet', parents[1]], stdout=subprocess.DEVNULL)
+ recreated_tree = subprocess.check_output([GIT, 'show', '--format=format:%T', 'HEAD'], universal_newlines=True).splitlines()[0]
+ if current_tree != recreated_tree:
+ print("Merge commit {} is not clean".format(current_commit), file=sys.stderr)
+ subprocess.call([GIT, 'diff', current_commit])
+ subprocess.call([GIT, 'checkout', '--force', '--quiet', branch])
+ sys.exit(1)
+ subprocess.call([GIT, 'checkout', '--force', '--quiet', branch])
+
+ prev_commit = current_commit
+ current_commit = parents[0]
+
+if __name__ == '__main__':
+ main()
diff --git a/contrib/verify-commits/verify-commits.sh b/contrib/verify-commits/verify-commits.sh
deleted file mode 100755
index 6415eea4d5..0000000000
--- a/contrib/verify-commits/verify-commits.sh
+++ /dev/null
@@ -1,153 +0,0 @@
-#!/bin/sh
-# Copyright (c) 2014-2016 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-DIR=$(dirname "$0")
-[ "/${DIR#/}" != "$DIR" ] && DIR=$(dirname "$(pwd)/$0")
-
-echo "Using verify-commits data from ${DIR}"
-
-VERIFIED_ROOT=$(cat "${DIR}/trusted-git-root")
-VERIFIED_SHA512_ROOT=$(cat "${DIR}/trusted-sha512-root-commit")
-REVSIG_ALLOWED=$(cat "${DIR}/allow-revsig-commits")
-
-HAVE_GNU_SHA512=1
-[ ! -x "$(which sha512sum)" ] && HAVE_GNU_SHA512=0
-
-if [ x"$1" = "x" ]; then
- CURRENT_COMMIT="HEAD"
-else
- CURRENT_COMMIT="$1"
-fi
-
-if [ "${CURRENT_COMMIT#* }" != "$CURRENT_COMMIT" ]; then
- echo "Commit must not contain spaces?" > /dev/stderr
- exit 1
-fi
-
-VERIFY_TREE=0
-if [ x"$2" = "x--tree-checks" ]; then
- VERIFY_TREE=1
-fi
-
-NO_SHA1=1
-PREV_COMMIT=""
-INITIAL_COMMIT="${CURRENT_COMMIT}"
-
-BRANCH="$(git rev-parse --abbrev-ref HEAD)"
-
-while true; do
- if [ "$CURRENT_COMMIT" = $VERIFIED_ROOT ]; then
- echo "There is a valid path from \"$INITIAL_COMMIT\" to $VERIFIED_ROOT where all commits are signed!"
- exit 0
- fi
-
- if [ "$CURRENT_COMMIT" = $VERIFIED_SHA512_ROOT ]; then
- if [ "$VERIFY_TREE" = "1" ]; then
- echo "All Tree-SHA512s matched up to $VERIFIED_SHA512_ROOT" > /dev/stderr
- fi
- VERIFY_TREE=0
- NO_SHA1=0
- fi
-
- if [ "$NO_SHA1" = "1" ]; then
- export BITCOIN_VERIFY_COMMITS_ALLOW_SHA1=0
- else
- export BITCOIN_VERIFY_COMMITS_ALLOW_SHA1=1
- fi
-
- if [ "${REVSIG_ALLOWED#*$CURRENT_COMMIT}" != "$REVSIG_ALLOWED" ]; then
- export BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG=1
- else
- export BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG=0
- fi
-
- if ! git -c "gpg.program=${DIR}/gpg.sh" verify-commit "$CURRENT_COMMIT" > /dev/null; then
- if [ "$PREV_COMMIT" != "" ]; then
- echo "No parent of $PREV_COMMIT was signed with a trusted key!" > /dev/stderr
- echo "Parents are:" > /dev/stderr
- PARENTS=$(git show -s --format=format:%P $PREV_COMMIT)
- for PARENT in $PARENTS; do
- git show -s $PARENT > /dev/stderr
- done
- else
- echo "$CURRENT_COMMIT was not signed with a trusted key!" > /dev/stderr
- fi
- exit 1
- fi
-
- # We always verify the top of the tree
- if [ "$VERIFY_TREE" = 1 -o "$PREV_COMMIT" = "" ]; then
- IFS_CACHE="$IFS"
- IFS='
-'
- for LINE in $(git ls-tree --full-tree -r "$CURRENT_COMMIT"); do
- case "$LINE" in
- "12"*)
- echo "Repo contains symlinks" > /dev/stderr
- IFS="$IFS_CACHE"
- exit 1
- ;;
- esac
- done
- IFS="$IFS_CACHE"
-
- FILE_HASHES=""
- for FILE in $(git ls-tree --full-tree -r --name-only "$CURRENT_COMMIT" | LC_ALL=C sort); do
- if [ "$HAVE_GNU_SHA512" = 1 ]; then
- HASH=$(git cat-file blob "$CURRENT_COMMIT":"$FILE" | sha512sum | { read FIRST _; echo $FIRST; } )
- else
- HASH=$(git cat-file blob "$CURRENT_COMMIT":"$FILE" | shasum -a 512 | { read FIRST _; echo $FIRST; } )
- fi
- [ "$FILE_HASHES" != "" ] && FILE_HASHES="$FILE_HASHES"'
-'
- FILE_HASHES="$FILE_HASHES$HASH $FILE"
- done
-
- if [ "$HAVE_GNU_SHA512" = 1 ]; then
- TREE_HASH="$(echo "$FILE_HASHES" | sha512sum)"
- else
- TREE_HASH="$(echo "$FILE_HASHES" | shasum -a 512)"
- fi
- HASH_MATCHES=0
- MSG="$(git show -s --format=format:%B "$CURRENT_COMMIT" | tail -n1)"
-
- case "$MSG -" in
- "Tree-SHA512: $TREE_HASH")
- HASH_MATCHES=1;;
- esac
-
- if [ "$HASH_MATCHES" = "0" ]; then
- echo "Tree-SHA512 did not match for commit $CURRENT_COMMIT" > /dev/stderr
- exit 1
- fi
- fi
-
- PARENTS=$(git show -s --format=format:%P "$CURRENT_COMMIT")
- PARENT1=${PARENTS%% *}
- PARENT2=""
- if [ "x$PARENT1" != "x$PARENTS" ]; then
- PARENTX=${PARENTS#* }
- PARENT2=${PARENTX%% *}
- if [ "x$PARENT2" != "x$PARENTX" ]; then
- echo "Commit $CURRENT_COMMIT is an octopus merge" > /dev/stderr
- exit 1
- fi
- fi
- if [ "x$PARENT2" != "x" ]; then
- CURRENT_TREE="$(git show --format="%T" "$CURRENT_COMMIT")"
- git checkout --force --quiet "$PARENT1"
- git merge --no-ff --quiet "$PARENT2" >/dev/null
- RECREATED_TREE="$(git show --format="%T" HEAD)"
- if [ "$CURRENT_TREE" != "$RECREATED_TREE" ]; then
- echo "Merge commit $CURRENT_COMMIT is not clean" > /dev/stderr
- git diff "$CURRENT_COMMIT"
- git checkout --force --quiet "$BRANCH"
- exit 1
- fi
- git checkout --force --quiet "$BRANCH"
- fi
- PREV_COMMIT="$CURRENT_COMMIT"
- CURRENT_COMMIT="$PARENT1"
-done
diff --git a/contrib/verifybinaries/verify.sh b/contrib/verifybinaries/verify.sh
index e0266bf08a..42d9ffb752 100755
--- a/contrib/verifybinaries/verify.sh
+++ b/contrib/verifybinaries/verify.sh
@@ -11,6 +11,7 @@
### The script returns 0 if everything passes the checks. It returns 1 if either the
### signature check or the hash check doesn't pass. If an error occurs the return value is 2
+export LC_ALL=C
function clean_up {
for file in $*
do
diff --git a/contrib/windeploy/detached-sig-create.sh b/contrib/windeploy/detached-sig-create.sh
index bf4978d143..15f8108cf0 100755
--- a/contrib/windeploy/detached-sig-create.sh
+++ b/contrib/windeploy/detached-sig-create.sh
@@ -3,6 +3,7 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+export LC_ALL=C
if [ -z "$OSSLSIGNCODE" ]; then
OSSLSIGNCODE=osslsigncode
fi
diff --git a/depends/README.md b/depends/README.md
index 99eef1952c..482b94a64f 100644
--- a/depends/README.md
+++ b/depends/README.md
@@ -22,7 +22,7 @@ Common `host-platform-triplets` for cross compilation are:
- `i686-w64-mingw32` for Win32
- `x86_64-w64-mingw32` for Win64
-- `x86_64-apple-darwin11` for MacOSX
+- `x86_64-apple-darwin11` for macOS
- `arm-linux-gnueabihf` for Linux ARM 32 bit
- `aarch64-linux-gnu` for Linux ARM 64 bit
@@ -49,7 +49,7 @@ The following can be set when running make: make FOO=bar
SOURCES_PATH: downloaded sources will be placed here
BASE_CACHE: built packages will be placed here
- SDK_PATH: Path where sdk's can be found (used by OSX)
+ SDK_PATH: Path where sdk's can be found (used by macOS)
FALLBACK_DOWNLOAD_PATH: If a source file can't be fetched, try here before giving up
NO_QT: Don't download/build/cache qt and its dependencies
NO_WALLET: Don't download/build/cache libs needed to enable the wallet
@@ -64,7 +64,7 @@ options will be passed to bitcoin's configure. In this case, `--disable-wallet`.
Additional targets:
download: run 'make download' to fetch all sources without building them
- download-osx: run 'make download-osx' to fetch all sources needed for osx builds
+ download-osx: run 'make download-osx' to fetch all sources needed for macOS builds
download-win: run 'make download-win' to fetch all sources needed for win builds
download-linux: run 'make download-linux' to fetch all sources needed for linux builds
diff --git a/depends/description.md b/depends/description.md
index 74f9ef3f20..9fc7093be4 100644
--- a/depends/description.md
+++ b/depends/description.md
@@ -7,7 +7,7 @@ In theory, binaries for any target OS/architecture can be created, from a
builder running any OS/architecture. In practice, build-side tools must be
specified when the defaults don't fit, and packages must be amended to work
on new hosts. For now, a build architecture of x86_64 is assumed, either on
-Linux or OSX.
+Linux or macOS.
### No reliance on timestamps
diff --git a/doc/README.md b/doc/README.md
index ddb239f60c..45762b2374 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -22,7 +22,7 @@ Unpack the files into a directory and run:
Unpack the files into a directory, and then run bitcoin-qt.exe.
-### OS X
+### macOS
Drag Bitcoin-Core to your applications folder, and then run Bitcoin-Core.
@@ -38,7 +38,7 @@ Building
The following are developer notes on how to build Bitcoin on your native platform. They are not complete guides, but include notes on the necessary libraries, compile flags, etc.
- [Dependencies](dependencies.md)
-- [OS X Build Notes](build-osx.md)
+- [macOS Build Notes](build-osx.md)
- [Unix Build Notes](build-unix.md)
- [Windows Build Notes](build-windows.md)
- [OpenBSD Build Notes](build-openbsd.md)
diff --git a/doc/README_osx.md b/doc/README_osx.md
index 975be4be9e..739e22d634 100644
--- a/doc/README_osx.md
+++ b/doc/README_osx.md
@@ -1,12 +1,12 @@
-Deterministic OS X DMG Notes.
+Deterministic macOS DMG Notes.
-Working OS X DMGs are created in Linux by combining a recent clang,
+Working macOS DMGs are created in Linux by combining a recent clang,
the Apple binutils (ld, ar, etc) and DMG authoring tools.
Apple uses clang extensively for development and has upstreamed the necessary
functionality so that a vanilla clang can take advantage. It supports the use
of -F, -target, -mmacosx-version-min, and --sysroot, which are all necessary
-when building for OS X.
+when building for macOS.
Apple's version of binutils (called cctools) contains lots of functionality
missing in the FSF's binutils. In addition to extra linker options for
@@ -38,7 +38,7 @@ Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.1
Unfortunately, the usual linux tools (7zip, hpmount, loopback mount) are incapable of opening this file.
To create a tarball suitable for Gitian input, there are two options:
-Using Mac OS X, you can mount the dmg, and then create it with:
+Using macOS, you can mount the dmg, and then create it with:
```
$ hdiutil attach Xcode_7.3.1.dmg
$ tar -C /Volumes/Xcode/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/ -czf MacOSX10.11.sdk.tar.gz MacOSX10.11.sdk
@@ -81,7 +81,7 @@ Background images and other features can be added to DMG files by inserting a
.DS_Store before creation. This is generated by the script
contrib/macdeploy/custom_dsstore.py.
-As of OS X Mavericks (10.9), using an Apple-blessed key to sign binaries is a
+As of OS X 10.9 Mavericks, using an Apple-blessed key to sign binaries is a
requirement in order to satisfy the new Gatekeeper requirements. Because this
private key cannot be shared, we'll have to be a bit creative in order for the
build process to remain somewhat deterministic. Here's how it works:
diff --git a/doc/build-freebsd.md b/doc/build-freebsd.md
index c2e4e36dff..48746ce0c2 100644
--- a/doc/build-freebsd.md
+++ b/doc/build-freebsd.md
@@ -17,7 +17,7 @@ pkg install autoconf automake boost-libs git gmake libevent libtool openssl pkgc
For the wallet (optional):
```
./contrib/install_db4.sh `pwd`
-export BDB_PREFIX='$PWD/db4'
+export BDB_PREFIX="$PWD/db4"
```
See [dependencies.md](dependencies.md) for a complete overview.
diff --git a/doc/build-osx.md b/doc/build-osx.md
index e52a770ced..abd305cf9a 100644
--- a/doc/build-osx.md
+++ b/doc/build-osx.md
@@ -1,11 +1,11 @@
-Mac OS X Build Instructions and Notes
+macOS Build Instructions and Notes
====================================
The commands in this guide should be executed in a Terminal application.
The built-in one is located in `/Applications/Utilities/Terminal.app`.
Preparation
-----------
-Install the OS X command line tools:
+Install the macOS command line tools:
`xcode-select --install`
@@ -93,6 +93,6 @@ Other commands:
Notes
-----
-* Tested on OS X 10.8 through 10.13 on 64-bit Intel processors only.
+* Tested on OS X 10.8 Mountain Lion through macOS 10.13 High Sierra on 64-bit Intel processors only.
* Building with downloaded Qt binaries is not officially supported. See the notes in [#7714](https://github.com/bitcoin/bitcoin/issues/7714)
diff --git a/doc/build-unix.md b/doc/build-unix.md
index 60d888a297..1ab5659511 100644
--- a/doc/build-unix.md
+++ b/doc/build-unix.md
@@ -70,19 +70,7 @@ tuned to conserve memory with additional CXXFLAGS:
Build requirements:
- sudo apt-get install build-essential libtool autotools-dev automake pkg-config libssl-dev libevent-dev bsdmainutils python3
-
-Options when installing required Boost library files:
-
-1. On at least Ubuntu 14.04+ and Debian 7+ there are generic names for the
-individual boost development packages, so the following can be used to only
-install necessary parts of boost:
-
- sudo apt-get install libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-program-options-dev libboost-test-dev libboost-thread-dev
-
-2. If that doesn't work, you can install all boost development packages with:
-
- sudo apt-get install libboost-all-dev
+ sudo apt-get install build-essential libtool autotools-dev automake pkg-config libssl-dev libevent-dev bsdmainutils python3 libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-program-options-dev libboost-test-dev libboost-thread-dev
BerkeleyDB is required for the wallet.
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index 9081cab911..2fa91ecb02 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -499,7 +499,35 @@ Strings and formatting
- Use `ParseInt32`, `ParseInt64`, `ParseUInt32`, `ParseUInt64`, `ParseDouble` from `utilstrencodings.h` for number parsing
- - *Rationale*: These functions do overflow checking, and avoid pesky locale issues
+ - *Rationale*: These functions do overflow checking, and avoid pesky locale issues.
+
+- Avoid using locale dependent functions if possible. You can use the provided
+ [`lint-locale-dependence.sh`](/contrib/devtools/lint-locale-dependence.sh)
+ to check for accidental use of locale dependent functions.
+
+ - *Rationale*: Unnecessary locale dependence can cause bugs that are very tricky to isolate and fix.
+
+ - These functions are known to be locale dependent:
+ `alphasort`, `asctime`, `asprintf`, `atof`, `atoi`, `atol`, `atoll`, `atoq`,
+ `btowc`, `ctime`, `dprintf`, `fgetwc`, `fgetws`, `fprintf`, `fputwc`,
+ `fputws`, `fscanf`, `fwprintf`, `getdate`, `getwc`, `getwchar`, `isalnum`,
+ `isalpha`, `isblank`, `iscntrl`, `isdigit`, `isgraph`, `islower`, `isprint`,
+ `ispunct`, `isspace`, `isupper`, `iswalnum`, `iswalpha`, `iswblank`,
+ `iswcntrl`, `iswctype`, `iswdigit`, `iswgraph`, `iswlower`, `iswprint`,
+ `iswpunct`, `iswspace`, `iswupper`, `iswxdigit`, `isxdigit`, `mblen`,
+ `mbrlen`, `mbrtowc`, `mbsinit`, `mbsnrtowcs`, `mbsrtowcs`, `mbstowcs`,
+ `mbtowc`, `mktime`, `putwc`, `putwchar`, `scanf`, `snprintf`, `sprintf`,
+ `sscanf`, `stoi`, `stol`, `stoll`, `strcasecmp`, `strcasestr`, `strcoll`,
+ `strfmon`, `strftime`, `strncasecmp`, `strptime`, `strtod`, `strtof`,
+ `strtoimax`, `strtol`, `strtold`, `strtoll`, `strtoq`, `strtoul`,
+ `strtoull`, `strtoumax`, `strtouq`, `strxfrm`, `swprintf`, `tolower`,
+ `toupper`, `towctrans`, `towlower`, `towupper`, `ungetwc`, `vasprintf`,
+ `vdprintf`, `versionsort`, `vfprintf`, `vfscanf`, `vfwprintf`, `vprintf`,
+ `vscanf`, `vsnprintf`, `vsprintf`, `vsscanf`, `vswprintf`, `vwprintf`,
+ `wcrtomb`, `wcscasecmp`, `wcscoll`, `wcsftime`, `wcsncasecmp`, `wcsnrtombs`,
+ `wcsrtombs`, `wcstod`, `wcstof`, `wcstoimax`, `wcstol`, `wcstold`,
+ `wcstoll`, `wcstombs`, `wcstoul`, `wcstoull`, `wcstoumax`, `wcswidth`,
+ `wcsxfrm`, `wctob`, `wctomb`, `wctrans`, `wctype`, `wcwidth`, `wprintf`
- For `strprintf`, `LogPrint`, `LogPrintf` formatting characters don't need size specifiers
@@ -567,6 +595,12 @@ Source code organization
- *Rationale*: Shorter and simpler header files are easier to read, and reduce compile time
+- Use only the lowercase alphanumerics (`a-z0-9`), underscore (`_`) and hyphen (`-`) in source code filenames.
+
+ - *Rationale*: `grep`:ing and auto-completing filenames is easier when using a consistent
+ naming pattern. Potential problems when building on case-insensitive filesystems are
+ avoided when using only lowercase characters in source code filenames.
+
- Every `.cpp` and `.h` file should `#include` every header file it directly uses classes, functions or other
definitions from, even if those headers are already included indirectly through other headers.
@@ -594,8 +628,8 @@ namespace {
- *Rationale*: Avoids confusion about the namespace context
-- Prefer `#include <primitives/transaction.h>` bracket syntax instead of
- `#include "primitives/transactions.h"` quote syntax when possible.
+- Use `#include <primitives/transaction.h>` bracket syntax instead of
+ `#include "primitives/transactions.h"` quote syntax.
- *Rationale*: Bracket syntax is less ambiguous because the preprocessor
searches a fixed list of include directories without taking location of the
diff --git a/doc/init.md b/doc/init.md
index ffd13ae1f9..d04f7d186a 100644
--- a/doc/init.md
+++ b/doc/init.md
@@ -15,7 +15,7 @@ Service User
All three Linux startup configurations assume the existence of a "bitcoin" user
and group. They must be created before attempting to use these scripts.
-The OS X configuration assumes bitcoind will be set up for the current user.
+The macOS configuration assumes bitcoind will be set up for the current user.
Configuration
---------------------------------
@@ -65,7 +65,7 @@ reasons to make the configuration file and data directory only readable by the
bitcoin user and group. Access to bitcoin-cli and other bitcoind rpc clients
can then be controlled by group membership.
-### Mac OS X
+### macOS
Binary: `/usr/local/bin/bitcoind`
Configuration file: `~/Library/Application Support/Bitcoin/bitcoin.conf`
@@ -111,7 +111,7 @@ Using this script, you can adjust the path and flags to the bitcoind program by
setting the BITCOIND and FLAGS environment variables in the file
/etc/sysconfig/bitcoind. You can also use the DAEMONOPTS environment variable here.
-### Mac OS X
+### macOS
Copy org.bitcoin.bitcoind.plist into ~/Library/LaunchAgents. Load the launch agent by
running `launchctl load ~/Library/LaunchAgents/org.bitcoin.bitcoind.plist`.
diff --git a/doc/release-notes-pr12892.md b/doc/release-notes-pr12892.md
index 8105eca5c0..f4a95bd40f 100644
--- a/doc/release-notes-pr12892.md
+++ b/doc/release-notes-pr12892.md
@@ -18,7 +18,7 @@ Here are the changes to RPC methods:
| Deprecated Method | New Method | Notes |
| :---------------------- | :-------------------- | :-----------|
| `getaccount` | `getaddressinfo` | `getaddressinfo` returns a json object with address information instead of just the name of the account as a string. |
-| `getaccountaddress` | `getlabeladdress` | `getlabeladdress` throws an error by default if the label does not already exist, but provides a `force` option for compatibility with existing applications. |
+| `getaccountaddress` | n/a | There is no replacement for `getaccountaddress` since labels do not have an associated receive address. |
| `getaddressesbyaccount` | `getaddressesbylabel` | `getaddressesbylabel` returns a json object with the addresses as keys, instead of a list of strings. |
| `getreceivedbyaccount` | `getreceivedbylabel` | _no change in behavior_ |
| `listaccounts` | `listlabels` | `listlabels` does not return a balance or accept `minconf` and `watchonly` arguments. |
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 7a9a98bfec..e1bb84cca9 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -56,6 +56,11 @@ frequently tested on them.
Notable changes
===============
+GUI changes
+-----------
+
+- Block storage can be limited under Preferences, in the Main tab. Undoing this setting requires downloading the full blockchain again. This mode is incompatible with -txindex and -rescan.
+
RPC changes
------------
diff --git a/doc/release-notes/release-notes-0.16.1.md b/doc/release-notes/release-notes-0.16.1.md
new file mode 100644
index 0000000000..d99361ae1d
--- /dev/null
+++ b/doc/release-notes/release-notes-0.16.1.md
@@ -0,0 +1,145 @@
+Bitcoin Core version 0.16.1 is now available from:
+
+ <https://bitcoincore.org/bin/bitcoin-core-0.16.1/>
+
+This is a new minor version release, with various bugfixes
+as well as updated translations.
+
+Please report bugs using the issue tracker at GitHub:
+
+ <https://github.com/bitcoin/bitcoin/issues>
+
+To receive security and update notifications, please subscribe to:
+
+ <https://bitcoincore.org/en/list/announcements/join/>
+
+How to Upgrade
+==============
+
+If you are running an older version, shut it down. Wait until it has completely
+shut down (which might take a few minutes for older versions), then run the
+installer (on Windows) or just copy over `/Applications/Bitcoin-Qt` (on Mac)
+or `bitcoind`/`bitcoin-qt` (on Linux).
+
+The first time you run version 0.15.0 or newer, your chainstate database will be converted to a
+new format, which will take anywhere from a few minutes to half an hour,
+depending on the speed of your machine.
+
+Note that the block database format also changed in version 0.8.0 and there is no
+automatic upgrade code from before version 0.8 to version 0.15.0 or higher. Upgrading
+directly from 0.7.x and earlier without re-downloading the blockchain is not supported.
+However, as usual, old wallet versions are still supported.
+
+Downgrading warning
+-------------------
+
+Wallets created in 0.16 and later are not compatible with versions prior to 0.16
+and will not work if you try to use newly created wallets in older versions. Existing
+wallets that were created with older versions are not affected by this.
+
+Compatibility
+==============
+
+Bitcoin Core is extensively tested on multiple operating systems using
+the Linux kernel, macOS 10.8+, and Windows Vista and later. Windows XP is not supported.
+
+Bitcoin Core should also work on most other Unix-like systems but is not
+frequently tested on them.
+
+Notable changes
+===============
+
+Miner block size removed
+------------------------
+
+The `-blockmaxsize` option for miners to limit their blocks' sizes was
+deprecated in version 0.15.1, and has now been removed. Miners should use the
+`-blockmaxweight` option if they want to limit the weight of their blocks'
+weights.
+
+0.16.1 change log
+------------------
+
+### Policy
+- #11423 `d353dd1` [Policy] Several transaction standardness rules (jl2012)
+
+### Mining
+- #12756 `e802c22` [config] Remove blockmaxsize option (jnewbery)
+
+### Block and transaction handling
+- #13199 `c71e535` Bugfix: ensure consistency of m_failed_blocks after reconsiderblock (sdaftuar)
+- #13023 `bb79aaf` Fix some concurrency issues in ActivateBestChain() (skeees)
+
+### P2P protocol and network code
+- #12626 `f60e84d` Limit the number of IPs addrman learns from each DNS seeder (EthanHeilman)
+
+### Wallet
+- #13265 `5d8de76` Exit SyncMetaData if there are no transactions to sync (laanwj)
+- #13030 `5ff571e` Fix zapwallettxes/multiwallet interaction. (jnewbery)
+
+### GUI
+- #12999 `1720eb3` Show the Window when double clicking the taskbar icon (ken2812221)
+- #12650 `f118a7a` Fix issue: "default port not shown correctly in settings dialog" (251Labs)
+- #13251 `ea487f9` Rephrase Bech32 checkbox texts, and enable it with legacy address default (fanquake)
+
+### Build system
+- #12474 `b0f692f` Allow depends system to support armv7l (hkjn)
+- #12585 `72a3290` depends: Switch to downloading expat from GitHub (fanquake)
+- #12648 `46ca8f3` test: Update trusted git root (MarcoFalke)
+- #11995 `686cb86` depends: Fix Qt build with Xcode 9 (fanquake)
+- #12636 `845838c` backport: #11995 Fix Qt build with Xcode 9 (fanquake)
+- #12946 `e055bc0` depends: Fix Qt build with XCode 9.3 (fanquake)
+- #12998 `7847b92` Default to defining endian-conversion DECLs in compat w/o config (TheBlueMatt)
+
+### Tests and QA
+- #12447 `01f931b` Add missing signal.h header (laanwj)
+- #12545 `1286f3e` Use wait_until to ensure ping goes out (Empact)
+- #12804 `4bdb0ce` Fix intermittent rpc_net.py failure. (jnewbery)
+- #12553 `0e98f96` Prefer wait_until over polling with time.sleep (Empact)
+- #12486 `cfebd40` Round target fee to 8 decimals in assert_fee_amount (kallewoof)
+- #12843 `df38b13` Test starting bitcoind with -h and -version (jnewbery)
+- #12475 `41c29f6` Fix python TypeError in script.py (MarcoFalke)
+- #12638 `0a76ed2` Cache only chain and wallet for regtest datadir (MarcoFalke)
+- #12902 `7460945` Handle potential cookie race when starting node (sdaftuar)
+- #12904 `6c26df0` Ensure bitcoind processes are cleaned up when tests end (sdaftuar)
+- #13049 `9ea62a3` Backports (MarcoFalke)
+- #13201 `b8aacd6` Handle disconnect_node race (sdaftuar)
+
+### Miscellaneous
+- #12518 `a17fecf` Bump leveldb subtree (MarcoFalke)
+- #12442 `f3b8d85` devtools: Exclude patches from lint-whitespace (MarcoFalke)
+- #12988 `acdf433` Hold cs_main while calling UpdatedBlockTip() signal (skeees)
+- #12985 `0684cf9` Windows: Avoid launching as admin when NSIS installer ends. (JeremyRand)
+
+### Documentation
+- #12637 `60086dd` backport: #12556 fix version typo in getpeerinfo RPC call help (fanquake)
+- #13184 `4087dd0` RPC Docs: `gettxout*`: clarify bestblock and unspent counts (harding)
+- #13246 `6de7543` Bump to Ubuntu Bionic 18.04 in build-windows.md (ken2812221)
+- #12556 `e730b82` Fix version typo in getpeerinfo RPC call help (tamasblummer)
+
+Credits
+=======
+
+Thanks to everyone who directly contributed to this release:
+
+- 251
+- Ben Woosley
+- Chun Kuan Lee
+- David A. Harding
+- e0
+- fanquake
+- Henrik Jonsson
+- JeremyRand
+- Jesse Cohen
+- John Newbery
+- Johnson Lau
+- Karl-Johan Alm
+- Luke Dashjr
+- MarcoFalke
+- Matt Corallo
+- Pieter Wuille
+- Suhas Daftuar
+- Tamas Blummer
+- Wladimir J. van der Laan
+
+As well as everyone that helped translating on [Transifex](https://www.transifex.com/projects/p/bitcoin/).
diff --git a/doc/release-process.md b/doc/release-process.md
index fb6f08750d..912b620794 100644
--- a/doc/release-process.md
+++ b/doc/release-process.md
@@ -89,7 +89,7 @@ Ensure gitian-builder is up-to-date:
wget -P inputs http://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz
popd
-Create the OS X SDK tarball, see the [OS X readme](README_osx.md) for details, and copy it into the inputs directory.
+Create the macOS SDK tarball, see the [macOS readme](README_osx.md) for details, and copy it into the inputs directory.
### Optional: Seed the Gitian sources cache and offline git repositories
@@ -111,7 +111,7 @@ NOTE: Offline builds must use the --url flag to ensure Gitian fetches only from
The gbuild invocations below <b>DO NOT DO THIS</b> by default.
-### Build and sign Bitcoin Core for Linux, Windows, and OS X:
+### Build and sign Bitcoin Core for Linux, Windows, and macOS:
pushd ./gitian-builder
./bin/gbuild --num-make 2 --memory 3000 --commit bitcoin=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-linux.yml
@@ -134,7 +134,7 @@ Build output expected:
1. source tarball (`bitcoin-${VERSION}.tar.gz`)
2. linux 32-bit and 64-bit dist tarballs (`bitcoin-${VERSION}-linux[32|64].tar.gz`)
3. windows 32-bit and 64-bit unsigned installers and dist zips (`bitcoin-${VERSION}-win[32|64]-setup-unsigned.exe`, `bitcoin-${VERSION}-win[32|64].zip`)
- 4. OS X unsigned installer and dist tarball (`bitcoin-${VERSION}-osx-unsigned.dmg`, `bitcoin-${VERSION}-osx64.tar.gz`)
+ 4. macOS unsigned installer and dist tarball (`bitcoin-${VERSION}-osx-unsigned.dmg`, `bitcoin-${VERSION}-osx64.tar.gz`)
5. Gitian signatures (in `gitian.sigs/${VERSION}-<linux|{win,osx}-unsigned>/(your Gitian key)/`)
### Verify other gitian builders signatures to your own. (Optional)
@@ -161,13 +161,13 @@ Commit your signature to gitian.sigs:
git push # Assuming you can push to the gitian.sigs tree
popd
-Codesigner only: Create Windows/OS X detached signatures:
+Codesigner only: Create Windows/macOS detached signatures:
- Only one person handles codesigning. Everyone else should skip to the next step.
-- Only once the Windows/OS X builds each have 3 matching signatures may they be signed with their respective release keys.
+- Only once the Windows/macOS builds each have 3 matching signatures may they be signed with their respective release keys.
-Codesigner only: Sign the osx binary:
+Codesigner only: Sign the macOS binary:
- transfer bitcoin-osx-unsigned.tar.gz to osx for signing
+ transfer bitcoin-osx-unsigned.tar.gz to macOS for signing
tar xf bitcoin-osx-unsigned.tar.gz
./detached-sig-create.sh -s "Key ID"
Enter the keychain password and authorize the signature
@@ -192,12 +192,12 @@ Codesigner only: Commit the detached codesign payloads:
git tag -s v${VERSION} HEAD
git push the current branch and new tag
-Non-codesigners: wait for Windows/OS X detached signatures:
+Non-codesigners: wait for Windows/macOS detached signatures:
-- Once the Windows/OS X builds each have 3 matching signatures, they will be signed with their respective release keys.
+- Once the Windows/macOS builds each have 3 matching signatures, they will be signed with their respective release keys.
- Detached signatures will then be committed to the [bitcoin-detached-sigs](https://github.com/bitcoin-core/bitcoin-detached-sigs) repository, which can be combined with the unsigned apps to create signed binaries.
-Create (and optionally verify) the signed OS X binary:
+Create (and optionally verify) the signed macOS binary:
pushd ./gitian-builder
./bin/gbuild -i --commit signature=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-osx-signer.yml
@@ -216,7 +216,7 @@ Create (and optionally verify) the signed Windows binaries:
mv build/out/bitcoin-*win32-setup.exe ../bitcoin-${VERSION}-win32-setup.exe
popd
-Commit your signature for the signed OS X/Windows binaries:
+Commit your signature for the signed macOS/Windows binaries:
pushd gitian.sigs
git add ${VERSION}-osx-signed/"${SIGNER}"
diff --git a/doc/translation_process.md b/doc/translation_process.md
index 5a9c59914e..022d7bb00b 100644
--- a/doc/translation_process.md
+++ b/doc/translation_process.md
@@ -46,9 +46,7 @@ Visit the [Transifex Signup](https://www.transifex.com/signup/) page to create a
You can find the Bitcoin translation project at [https://www.transifex.com/projects/p/bitcoin/](https://www.transifex.com/projects/p/bitcoin/).
### Installing the Transifex client command-line tool
-The client it used to fetch updated translations. If you are having problems, or need more details, see [http://docs.transifex.com/developer/client/setup](http://docs.transifex.com/developer/client/setup)
-
-**For Linux and Mac**
+The client is used to fetch updated translations. If you are having problems, or need more details, see [https://docs.transifex.com/client/installing-the-client](https://docs.transifex.com/client/installing-the-client)
`pip install transifex-client`
@@ -64,10 +62,6 @@ token =
username = USERNAME
```
-**For Windows**
-
-Please see [http://docs.transifex.com/developer/client/setup#windows](http://docs.transifex.com/developer/client/setup#windows) for details on installation.
-
The Transifex Bitcoin project config file is included as part of the repo. It can be found at `.tx/config`, however you shouldn’t need change anything.
### Synchronising translations
diff --git a/share/genbuild.sh b/share/genbuild.sh
index 419e0da0fd..38c9ba176c 100755
--- a/share/genbuild.sh
+++ b/share/genbuild.sh
@@ -3,6 +3,7 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+export LC_ALL=C
if [ $# -gt 1 ]; then
cd "$2" || exit 1
fi
diff --git a/share/qt/extract_strings_qt.py b/share/qt/extract_strings_qt.py
index e8f0820ca8..e908071321 100755
--- a/share/qt/extract_strings_qt.py
+++ b/share/qt/extract_strings_qt.py
@@ -63,7 +63,7 @@ child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
messages = parse_po(out.decode('utf-8'))
-f = open(OUT_CPP, 'w')
+f = open(OUT_CPP, 'w', encoding="utf8")
f.write("""
#include <QtGlobal>
diff --git a/src/Makefile.am b/src/Makefile.am
index 96e56915a6..e03c21f16e 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -29,9 +29,7 @@ LIBBITCOIN_COMMON=libbitcoin_common.a
LIBBITCOIN_CONSENSUS=libbitcoin_consensus.a
LIBBITCOIN_CLI=libbitcoin_cli.a
LIBBITCOIN_UTIL=libbitcoin_util.a
-LIBBITCOIN_CRYPTO=crypto/libbitcoin_crypto.a
-LIBBITCOIN_CRYPTO_SSE41=crypto/libbitcoin_crypto_sse41.a
-LIBBITCOIN_CRYPTO_AVX2=crypto/libbitcoin_crypto_avx2.a
+LIBBITCOIN_CRYPTO_BASE=crypto/libbitcoin_crypto_base.a
LIBBITCOINQT=qt/libbitcoinqt.a
LIBSECP256K1=secp256k1/libsecp256k1.la
@@ -45,6 +43,16 @@ if ENABLE_WALLET
LIBBITCOIN_WALLET=libbitcoin_wallet.a
endif
+LIBBITCOIN_CRYPTO= $(LIBBITCOIN_CRYPTO_BASE)
+if ENABLE_SSE41
+LIBBITCOIN_CRYPTO_SSE41 = crypto/libbitcoin_crypto_sse41.a
+LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_SSE41)
+endif
+if ENABLE_AVX2
+LIBBITCOIN_CRYPTO_AVX2 = crypto/libbitcoin_crypto_avx2.a
+LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_AVX2)
+endif
+
$(LIBSECP256K1): $(wildcard secp256k1/src/*) $(wildcard secp256k1/include/*)
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C $(@D) $(@F)
@@ -52,8 +60,6 @@ $(LIBSECP256K1): $(wildcard secp256k1/src/*) $(wildcard secp256k1/include/*)
# But to build the less dependent modules first, we manually select their order here:
EXTRA_LIBRARIES += \
$(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2) \
$(LIBBITCOIN_UTIL) \
$(LIBBITCOIN_COMMON) \
$(LIBBITCOIN_CONSENSUS) \
@@ -107,6 +113,7 @@ BITCOIN_CORE_H = \
fs.h \
httprpc.h \
httpserver.h \
+ index/base.h \
index/txindex.h \
indirectmap.h \
init.h \
@@ -208,6 +215,7 @@ libbitcoin_server_a_SOURCES = \
consensus/tx_verify.cpp \
httprpc.cpp \
httpserver.cpp \
+ index/base.cpp \
index/txindex.cpp \
init.cpp \
dbwrapper.cpp \
@@ -268,9 +276,9 @@ libbitcoin_wallet_a_SOURCES = \
$(BITCOIN_CORE_H)
# crypto primitives library
-crypto_libbitcoin_crypto_a_CPPFLAGS = $(AM_CPPFLAGS)
-crypto_libbitcoin_crypto_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-crypto_libbitcoin_crypto_a_SOURCES = \
+crypto_libbitcoin_crypto_base_a_CPPFLAGS = $(AM_CPPFLAGS)
+crypto_libbitcoin_crypto_base_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+crypto_libbitcoin_crypto_base_a_SOURCES = \
crypto/aes.cpp \
crypto/aes.h \
crypto/chacha20.h \
@@ -290,23 +298,19 @@ crypto_libbitcoin_crypto_a_SOURCES = \
crypto/sha512.h
if USE_ASM
-crypto_libbitcoin_crypto_a_SOURCES += crypto/sha256_sse4.cpp
+crypto_libbitcoin_crypto_base_a_SOURCES += crypto/sha256_sse4.cpp
endif
crypto_libbitcoin_crypto_sse41_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
crypto_libbitcoin_crypto_sse41_a_CPPFLAGS = $(AM_CPPFLAGS)
-if ENABLE_SSE41
crypto_libbitcoin_crypto_sse41_a_CXXFLAGS += $(SSE41_CXXFLAGS)
crypto_libbitcoin_crypto_sse41_a_CPPFLAGS += -DENABLE_SSE41
-endif
crypto_libbitcoin_crypto_sse41_a_SOURCES = crypto/sha256_sse41.cpp
crypto_libbitcoin_crypto_avx2_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
crypto_libbitcoin_crypto_avx2_a_CPPFLAGS = $(AM_CPPFLAGS)
-if ENABLE_AVX2
crypto_libbitcoin_crypto_avx2_a_CXXFLAGS += $(AVX2_CXXFLAGS)
crypto_libbitcoin_crypto_avx2_a_CPPFLAGS += -DENABLE_AVX2
-endif
crypto_libbitcoin_crypto_avx2_a_SOURCES = crypto/sha256_avx2.cpp
# consensus: shared between all executables that validate any consensus rules.
@@ -431,8 +435,6 @@ bitcoind_LDADD = \
$(LIBBITCOIN_ZMQ) \
$(LIBBITCOIN_CONSENSUS) \
$(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2) \
$(LIBLEVELDB) \
$(LIBLEVELDB_SSE42) \
$(LIBMEMENV) \
@@ -454,9 +456,7 @@ bitcoin_cli_LDADD = \
$(LIBBITCOIN_CLI) \
$(LIBUNIVALUE) \
$(LIBBITCOIN_UTIL) \
- $(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2)
+ $(LIBBITCOIN_CRYPTO)
bitcoin_cli_LDADD += $(BOOST_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(EVENT_LIBS)
#
@@ -477,8 +477,6 @@ bitcoin_tx_LDADD = \
$(LIBBITCOIN_UTIL) \
$(LIBBITCOIN_CONSENSUS) \
$(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2) \
$(LIBSECP256K1)
bitcoin_tx_LDADD += $(BOOST_LIBS) $(CRYPTO_LIBS)
@@ -487,7 +485,7 @@ bitcoin_tx_LDADD += $(BOOST_LIBS) $(CRYPTO_LIBS)
# bitcoinconsensus library #
if BUILD_BITCOIN_LIBS
include_HEADERS = script/bitcoinconsensus.h
-libbitcoinconsensus_la_SOURCES = $(crypto_libbitcoin_crypto_a_SOURCES) $(libbitcoin_consensus_a_SOURCES)
+libbitcoinconsensus_la_SOURCES = $(crypto_libbitcoin_crypto_base_a_SOURCES) $(libbitcoin_consensus_a_SOURCES)
if GLIBC_BACK_COMPAT
libbitcoinconsensus_la_SOURCES += compat/glibc_compat.cpp
diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include
index 804df3bf21..bc2c46f228 100644
--- a/src/Makefile.bench.include
+++ b/src/Makefile.bench.include
@@ -17,7 +17,7 @@ bench_bench_bitcoin_SOURCES = \
bench/bench.h \
bench/checkblock.cpp \
bench/checkqueue.cpp \
- bench/Examples.cpp \
+ bench/examples.cpp \
bench/rollingbloom.cpp \
bench/crypto_hash.cpp \
bench/ccoins_caching.cpp \
@@ -39,8 +39,6 @@ bench_bench_bitcoin_LDADD = \
$(LIBBITCOIN_UTIL) \
$(LIBBITCOIN_CONSENSUS) \
$(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2) \
$(LIBLEVELDB) \
$(LIBLEVELDB_SSE42) \
$(LIBMEMENV) \
diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include
index f8c31be3d4..a84a11ac45 100644
--- a/src/Makefile.qt.include
+++ b/src/Makefile.qt.include
@@ -408,7 +408,7 @@ endif
if ENABLE_ZMQ
qt_bitcoin_qt_LDADD += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS)
endif
-qt_bitcoin_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBBITCOIN_CRYPTO_SSE41) $(LIBBITCOIN_CRYPTO_AVX2) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) \
+qt_bitcoin_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) \
$(BOOST_LIBS) $(QT_LIBS) $(QT_DBUS_LIBS) $(QR_LIBS) $(PROTOBUF_LIBS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(MINIUPNPC_LIBS) $(LIBSECP256K1) \
$(EVENT_PTHREADS_LIBS) $(EVENT_LIBS)
qt_bitcoin_qt_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(QT_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
diff --git a/src/Makefile.qttest.include b/src/Makefile.qttest.include
index a4356f1cbd..4b14212b2e 100644
--- a/src/Makefile.qttest.include
+++ b/src/Makefile.qttest.include
@@ -62,7 +62,7 @@ endif
if ENABLE_ZMQ
qt_test_test_bitcoin_qt_LDADD += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS)
endif
-qt_test_test_bitcoin_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBBITCOIN_CRYPTO_SSE41) $(LIBBITCOIN_CRYPTO_AVX2) $(LIBUNIVALUE) $(LIBLEVELDB) \
+qt_test_test_bitcoin_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) \
$(LIBLEVELDB_SSE42) $(LIBMEMENV) $(BOOST_LIBS) $(QT_DBUS_LIBS) $(QT_TEST_LIBS) $(QT_LIBS) \
$(QR_LIBS) $(PROTOBUF_LIBS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(MINIUPNPC_LIBS) $(LIBSECP256K1) \
$(EVENT_PTHREADS_LIBS) $(EVENT_LIBS)
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index cbd63cd53d..a4d31795ec 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -46,7 +46,7 @@ BITCOIN_TESTS =\
test/compress_tests.cpp \
test/crypto_tests.cpp \
test/cuckoocache_tests.cpp \
- test/DoS_tests.cpp \
+ test/denialofservice_tests.cpp \
test/getarg_tests.cpp \
test/hash_tests.cpp \
test/key_io_tests.cpp \
@@ -71,7 +71,7 @@ BITCOIN_TESTS =\
test/rpc_tests.cpp \
test/sanity_tests.cpp \
test/scheduler_tests.cpp \
- test/script_P2SH_tests.cpp \
+ test/script_p2sh_tests.cpp \
test/script_tests.cpp \
test/script_standard_tests.cpp \
test/scriptnum_tests.cpp \
@@ -110,7 +110,7 @@ if ENABLE_WALLET
test_test_bitcoin_LDADD += $(LIBBITCOIN_WALLET)
endif
-test_test_bitcoin_LDADD += $(LIBBITCOIN_SERVER) $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBBITCOIN_CRYPTO_SSE41) $(LIBBITCOIN_CRYPTO_AVX2) $(LIBUNIVALUE) \
+test_test_bitcoin_LDADD += $(LIBBITCOIN_SERVER) $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) \
$(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) $(BOOST_LIBS) $(BOOST_UNIT_TEST_FRAMEWORK_LIB) $(LIBSECP256K1) $(EVENT_LIBS) $(EVENT_PTHREADS_LIBS)
test_test_bitcoin_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
diff --git a/src/arith_uint256.h b/src/arith_uint256.h
index 3f4cc8c2bf..e4c7575e2d 100644
--- a/src/arith_uint256.h
+++ b/src/arith_uint256.h
@@ -64,14 +64,6 @@ public:
explicit base_uint(const std::string& str);
- bool operator!() const
- {
- for (int i = 0; i < WIDTH; i++)
- if (pn[i] != 0)
- return false;
- return true;
- }
-
const base_uint operator~() const
{
base_uint ret;
diff --git a/src/bench/Examples.cpp b/src/bench/examples.cpp
index b68c9cd156..b68c9cd156 100644
--- a/src/bench/Examples.cpp
+++ b/src/bench/examples.cpp
diff --git a/src/bench/merkle_root.cpp b/src/bench/merkle_root.cpp
index ae2a0a28dc..fab12da311 100644
--- a/src/bench/merkle_root.cpp
+++ b/src/bench/merkle_root.cpp
@@ -2,11 +2,11 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include "bench.h"
+#include <bench/bench.h>
-#include "uint256.h"
-#include "random.h"
-#include "consensus/merkle.h"
+#include <uint256.h>
+#include <random.h>
+#include <consensus/merkle.h>
static void MerkleRoot(benchmark::State& state)
{
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index be5ce14480..b332b5e581 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -56,6 +56,18 @@ static void SetupCliArgs()
gArgs.AddArg("-help", "", false, OptionsCategory::HIDDEN);
}
+/** libevent event log callback */
+static void libevent_log_cb(int severity, const char *msg)
+{
+#ifndef EVENT_LOG_ERR // EVENT_LOG_ERR was added in 2.0.19; but before then _EVENT_LOG_ERR existed.
+# define EVENT_LOG_ERR _EVENT_LOG_ERR
+#endif
+ // Ignore everything other than errors
+ if (severity >= EVENT_LOG_ERR) {
+ throw std::runtime_error(strprintf("libevent error: %s", msg));
+ }
+}
+
//////////////////////////////////////////////////////////////////////////////
//
// Start
@@ -506,6 +518,7 @@ int main(int argc, char* argv[])
fprintf(stderr, "Error: Initializing networking failed\n");
return EXIT_FAILURE;
}
+ event_set_log_callback(&libevent_log_cb);
try {
int ret = AppInitRPC(argc, argv);
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index 3fb505d739..e6eb723cf4 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -591,7 +591,7 @@ static void MutateTxSign(CMutableTransaction& tx, const std::string& flagStr)
if (!prevOut.checkObject(types))
throw std::runtime_error("prevtxs internal object typecheck fail");
- uint256 txid = ParseHashUV(prevOut["txid"], "txid");
+ uint256 txid = ParseHashStr(prevOut["txid"].get_str(), "txid");
int nOut = atoi(prevOut["vout"].getValStr());
if (nOut < 0)
@@ -637,7 +637,7 @@ static void MutateTxSign(CMutableTransaction& tx, const std::string& flagStr)
// Sign what we can:
for (unsigned int i = 0; i < mergedTx.vin.size(); i++) {
- const CTxIn& txin = mergedTx.vin[i];
+ CTxIn& txin = mergedTx.vin[i];
const Coin& coin = view.AccessCoin(txin.prevout);
if (coin.IsSpent()) {
continue;
@@ -652,7 +652,7 @@ static void MutateTxSign(CMutableTransaction& tx, const std::string& flagStr)
// ... and merge in other signatures:
sigdata = CombineSignatures(prevPubKey, MutableTransactionSignatureChecker(&mergedTx, i, amount), sigdata, DataFromTransaction(txv, i));
- UpdateTransaction(mergedTx, i, sigdata);
+ UpdateInput(txin, sigdata);
}
tx = mergedTx;
diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp
index a9b952e5a4..4b9abb2a1b 100644
--- a/src/bitcoind.cpp
+++ b/src/bitcoind.cpp
@@ -62,9 +62,6 @@ static bool AppInit(int argc, char* argv[])
//
// If Qt is used, parameters/bitcoin.conf are parsed in qt/bitcoin.cpp's main()
SetupServerArgs();
-#if HAVE_DECL_DAEMON
- gArgs.AddArg("-daemon", "Run in the background as a daemon and accept commands", false, OptionsCategory::OPTIONS);
-#endif
std::string error;
if (!gArgs.ParseParameters(argc, argv, error)) {
fprintf(stderr, "Error parsing command line arguments: %s\n", error.c_str());
diff --git a/src/core_io.h b/src/core_io.h
index 377633ac77..1d87d21d40 100644
--- a/src/core_io.h
+++ b/src/core_io.h
@@ -22,7 +22,6 @@ CScript ParseScript(const std::string& s);
std::string ScriptToAsmStr(const CScript& script, const bool fAttemptSighashDecode = false);
bool DecodeHexTx(CMutableTransaction& tx, const std::string& hex_tx, bool try_no_witness = false, bool try_witness = true);
bool DecodeHexBlk(CBlock&, const std::string& strHexBlk);
-uint256 ParseHashUV(const UniValue& v, const std::string& strName);
uint256 ParseHashStr(const std::string&, const std::string& strName);
std::vector<unsigned char> ParseHexUV(const UniValue& v, const std::string& strName);
diff --git a/src/core_read.cpp b/src/core_read.cpp
index aade7e21ca..4d851610ef 100644
--- a/src/core_read.cpp
+++ b/src/core_read.cpp
@@ -160,14 +160,6 @@ bool DecodeHexBlk(CBlock& block, const std::string& strHexBlk)
return true;
}
-uint256 ParseHashUV(const UniValue& v, const std::string& strName)
-{
- std::string strHex;
- if (v.isStr())
- strHex = v.getValStr();
- return ParseHashStr(strHex, strName); // Note: ParseHashStr("") throws a runtime_error
-}
-
uint256 ParseHashStr(const std::string& strHex, const std::string& strName)
{
if (!IsHex(strHex)) // Note: IsHex("") is false
diff --git a/src/crypto/sha256.cpp b/src/crypto/sha256.cpp
index 6ac51d11cd..100d0d1fac 100644
--- a/src/crypto/sha256.cpp
+++ b/src/crypto/sha256.cpp
@@ -446,39 +446,91 @@ void TransformD64Wrapper(unsigned char* out, const unsigned char* in)
WriteBE32(out + 28, s[7]);
}
-bool SelfTest(TransformType tr) {
- static const unsigned char in1[65] = {0, 0x80};
- static const unsigned char in2[129] = {
- 0,
- 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
- 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
- 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0
- };
- static const uint32_t init[8] = {0x6a09e667ul, 0xbb67ae85ul, 0x3c6ef372ul, 0xa54ff53aul, 0x510e527ful, 0x9b05688cul, 0x1f83d9abul, 0x5be0cd19ul};
- static const uint32_t out1[8] = {0xe3b0c442ul, 0x98fc1c14ul, 0x9afbf4c8ul, 0x996fb924ul, 0x27ae41e4ul, 0x649b934cul, 0xa495991bul, 0x7852b855ul};
- static const uint32_t out2[8] = {0xce4153b0ul, 0x147c2a86ul, 0x3ed4298eul, 0xe0676bc8ul, 0x79fc77a1ul, 0x2abe1f49ul, 0xb2b055dful, 0x1069523eul};
- uint32_t buf[8];
- memcpy(buf, init, sizeof(buf));
- // Process nothing, and check we remain in the initial state.
- tr(buf, nullptr, 0);
- if (memcmp(buf, init, sizeof(buf))) return false;
- // Process the padded empty string (unaligned)
- tr(buf, in1 + 1, 1);
- if (memcmp(buf, out1, sizeof(buf))) return false;
- // Process 64 spaces (unaligned)
- memcpy(buf, init, sizeof(buf));
- tr(buf, in2 + 1, 2);
- if (memcmp(buf, out2, sizeof(buf))) return false;
- return true;
-}
-
TransformType Transform = sha256::Transform;
TransformD64Type TransformD64 = sha256::TransformD64;
TransformD64Type TransformD64_4way = nullptr;
TransformD64Type TransformD64_8way = nullptr;
-#if defined(USE_ASM) && (defined(__x86_64__) || defined(__amd64__))
+bool SelfTest() {
+ // Input state (equal to the initial SHA256 state)
+ static const uint32_t init[8] = {
+ 0x6a09e667ul, 0xbb67ae85ul, 0x3c6ef372ul, 0xa54ff53aul, 0x510e527ful, 0x9b05688cul, 0x1f83d9abul, 0x5be0cd19ul
+ };
+ // Some random input data to test with
+ static const unsigned char data[641] = "-" // Intentionally not aligned
+ "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do "
+ "eiusmod tempor incididunt ut labore et dolore magna aliqua. Et m"
+ "olestie ac feugiat sed lectus vestibulum mattis ullamcorper. Mor"
+ "bi blandit cursus risus at ultrices mi tempus imperdiet nulla. N"
+ "unc congue nisi vita suscipit tellus mauris. Imperdiet proin fer"
+ "mentum leo vel orci. Massa tempor nec feugiat nisl pretium fusce"
+ " id velit. Telus in metus vulputate eu scelerisque felis. Mi tem"
+ "pus imperdiet nulla malesuada pellentesque. Tristique magna sit.";
+ // Expected output state for hashing the i*64 first input bytes above (excluding SHA256 padding).
+ static const uint32_t result[9][8] = {
+ {0x6a09e667ul, 0xbb67ae85ul, 0x3c6ef372ul, 0xa54ff53aul, 0x510e527ful, 0x9b05688cul, 0x1f83d9abul, 0x5be0cd19ul},
+ {0x91f8ec6bul, 0x4da10fe3ul, 0x1c9c292cul, 0x45e18185ul, 0x435cc111ul, 0x3ca26f09ul, 0xeb954caeul, 0x402a7069ul},
+ {0xcabea5acul, 0x374fb97cul, 0x182ad996ul, 0x7bd69cbful, 0x450ff900ul, 0xc1d2be8aul, 0x6a41d505ul, 0xe6212dc3ul},
+ {0xbcff09d6ul, 0x3e76f36eul, 0x3ecb2501ul, 0x78866e97ul, 0xe1c1e2fdul, 0x32f4eafful, 0x8aa6c4e5ul, 0xdfc024bcul},
+ {0xa08c5d94ul, 0x0a862f93ul, 0x6b7f2f40ul, 0x8f9fae76ul, 0x6d40439ful, 0x79dcee0cul, 0x3e39ff3aul, 0xdc3bdbb1ul},
+ {0x216a0895ul, 0x9f1a3662ul, 0xe99946f9ul, 0x87ba4364ul, 0x0fb5db2cul, 0x12bed3d3ul, 0x6689c0c7ul, 0x292f1b04ul},
+ {0xca3067f8ul, 0xbc8c2656ul, 0x37cb7e0dul, 0x9b6b8b0ful, 0x46dc380bul, 0xf1287f57ul, 0xc42e4b23ul, 0x3fefe94dul},
+ {0x3e4c4039ul, 0xbb6fca8cul, 0x6f27d2f7ul, 0x301e44a4ul, 0x8352ba14ul, 0x5769ce37ul, 0x48a1155ful, 0xc0e1c4c6ul},
+ {0xfe2fa9ddul, 0x69d0862bul, 0x1ae0db23ul, 0x471f9244ul, 0xf55c0145ul, 0xc30f9c3bul, 0x40a84ea0ul, 0x5b8a266cul},
+ };
+ // Expected output for each of the individual 8 64-byte messages under full double SHA256 (including padding).
+ static const unsigned char result_d64[256] = {
+ 0x09, 0x3a, 0xc4, 0xd0, 0x0f, 0xf7, 0x57, 0xe1, 0x72, 0x85, 0x79, 0x42, 0xfe, 0xe7, 0xe0, 0xa0,
+ 0xfc, 0x52, 0xd7, 0xdb, 0x07, 0x63, 0x45, 0xfb, 0x53, 0x14, 0x7d, 0x17, 0x22, 0x86, 0xf0, 0x52,
+ 0x48, 0xb6, 0x11, 0x9e, 0x6e, 0x48, 0x81, 0x6d, 0xcc, 0x57, 0x1f, 0xb2, 0x97, 0xa8, 0xd5, 0x25,
+ 0x9b, 0x82, 0xaa, 0x89, 0xe2, 0xfd, 0x2d, 0x56, 0xe8, 0x28, 0x83, 0x0b, 0xe2, 0xfa, 0x53, 0xb7,
+ 0xd6, 0x6b, 0x07, 0x85, 0x83, 0xb0, 0x10, 0xa2, 0xf5, 0x51, 0x3c, 0xf9, 0x60, 0x03, 0xab, 0x45,
+ 0x6c, 0x15, 0x6e, 0xef, 0xb5, 0xac, 0x3e, 0x6c, 0xdf, 0xb4, 0x92, 0x22, 0x2d, 0xce, 0xbf, 0x3e,
+ 0xe9, 0xe5, 0xf6, 0x29, 0x0e, 0x01, 0x4f, 0xd2, 0xd4, 0x45, 0x65, 0xb3, 0xbb, 0xf2, 0x4c, 0x16,
+ 0x37, 0x50, 0x3c, 0x6e, 0x49, 0x8c, 0x5a, 0x89, 0x2b, 0x1b, 0xab, 0xc4, 0x37, 0xd1, 0x46, 0xe9,
+ 0x3d, 0x0e, 0x85, 0xa2, 0x50, 0x73, 0xa1, 0x5e, 0x54, 0x37, 0xd7, 0x94, 0x17, 0x56, 0xc2, 0xd8,
+ 0xe5, 0x9f, 0xed, 0x4e, 0xae, 0x15, 0x42, 0x06, 0x0d, 0x74, 0x74, 0x5e, 0x24, 0x30, 0xce, 0xd1,
+ 0x9e, 0x50, 0xa3, 0x9a, 0xb8, 0xf0, 0x4a, 0x57, 0x69, 0x78, 0x67, 0x12, 0x84, 0x58, 0xbe, 0xc7,
+ 0x36, 0xaa, 0xee, 0x7c, 0x64, 0xa3, 0x76, 0xec, 0xff, 0x55, 0x41, 0x00, 0x2a, 0x44, 0x68, 0x4d,
+ 0xb6, 0x53, 0x9e, 0x1c, 0x95, 0xb7, 0xca, 0xdc, 0x7f, 0x7d, 0x74, 0x27, 0x5c, 0x8e, 0xa6, 0x84,
+ 0xb5, 0xac, 0x87, 0xa9, 0xf3, 0xff, 0x75, 0xf2, 0x34, 0xcd, 0x1a, 0x3b, 0x82, 0x2c, 0x2b, 0x4e,
+ 0x6a, 0x46, 0x30, 0xa6, 0x89, 0x86, 0x23, 0xac, 0xf8, 0xa5, 0x15, 0xe9, 0x0a, 0xaa, 0x1e, 0x9a,
+ 0xd7, 0x93, 0x6b, 0x28, 0xe4, 0x3b, 0xfd, 0x59, 0xc6, 0xed, 0x7c, 0x5f, 0xa5, 0x41, 0xcb, 0x51
+ };
+
+
+ // Test Transform() for 0 through 8 transformations.
+ for (size_t i = 0; i <= 8; ++i) {
+ uint32_t state[8];
+ std::copy(init, init + 8, state);
+ Transform(state, data + 1, i);
+ if (!std::equal(state, state + 8, result[i])) return false;
+ }
+
+ // Test TransformD64
+ unsigned char out[32];
+ TransformD64(out, data + 1);
+ if (!std::equal(out, out + 32, result_d64)) return false;
+
+ // Test TransformD64_4way, if available.
+ if (TransformD64_4way) {
+ unsigned char out[128];
+ TransformD64_4way(out, data + 1);
+ if (!std::equal(out, out + 128, result_d64)) return false;
+ }
+
+ // Test TransformD64_8way, if available.
+ if (TransformD64_8way) {
+ unsigned char out[256];
+ TransformD64_8way(out, data + 1);
+ if (!std::equal(out, out + 256, result_d64)) return false;
+ }
+
+ return true;
+}
+
+
+#if defined(USE_ASM) && (defined(__x86_64__) || defined(__amd64__) || defined(__i386__))
// We can't use cpuid.h's __get_cpuid as it does not support subleafs.
void inline cpuid(uint32_t leaf, uint32_t subleaf, uint32_t& a, uint32_t& b, uint32_t& c, uint32_t& d)
{
@@ -491,12 +543,14 @@ void inline cpuid(uint32_t leaf, uint32_t subleaf, uint32_t& a, uint32_t& b, uin
std::string SHA256AutoDetect()
{
std::string ret = "standard";
-#if defined(USE_ASM) && (defined(__x86_64__) || defined(__amd64__))
+#if defined(USE_ASM) && (defined(__x86_64__) || defined(__amd64__) || defined(__i386__))
uint32_t eax, ebx, ecx, edx;
cpuid(1, 0, eax, ebx, ecx, edx);
if ((ecx >> 19) & 1) {
+#if defined(__x86_64__) || defined(__amd64__)
Transform = sha256_sse4::Transform;
TransformD64 = TransformD64Wrapper<sha256_sse4::Transform>;
+#endif
#if defined(ENABLE_SSE41) && !defined(BUILD_BITCOIN_INTERNAL)
TransformD64_4way = sha256d64_sse41::Transform_4way;
ret = "sse4(1way+4way)";
@@ -513,7 +567,7 @@ std::string SHA256AutoDetect()
}
#endif
- assert(SelfTest(Transform));
+ assert(SelfTest());
return ret;
}
diff --git a/src/crypto/sha256_avx2.cpp b/src/crypto/sha256_avx2.cpp
index f45c1d4ab6..b338b06927 100644
--- a/src/crypto/sha256_avx2.cpp
+++ b/src/crypto/sha256_avx2.cpp
@@ -7,8 +7,8 @@
#include <x86intrin.h>
#endif
-#include "crypto/sha256.h"
-#include "crypto/common.h"
+#include <crypto/sha256.h>
+#include <crypto/common.h>
namespace sha256d64_avx2 {
namespace {
diff --git a/src/crypto/sha256_sse41.cpp b/src/crypto/sha256_sse41.cpp
index a11d658c70..be71dd8fb8 100644
--- a/src/crypto/sha256_sse41.cpp
+++ b/src/crypto/sha256_sse41.cpp
@@ -7,8 +7,8 @@
#include <x86intrin.h>
#endif
-#include "crypto/sha256.h"
-#include "crypto/common.h"
+#include <crypto/sha256.h>
+#include <crypto/common.h>
namespace sha256d64_sse41 {
namespace {
diff --git a/src/index/base.cpp b/src/index/base.cpp
new file mode 100644
index 0000000000..738166dc94
--- /dev/null
+++ b/src/index/base.cpp
@@ -0,0 +1,278 @@
+// Copyright (c) 2017-2018 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <chainparams.h>
+#include <index/base.h>
+#include <init.h>
+#include <tinyformat.h>
+#include <ui_interface.h>
+#include <util.h>
+#include <validation.h>
+#include <warnings.h>
+
+constexpr char DB_BEST_BLOCK = 'B';
+
+constexpr int64_t SYNC_LOG_INTERVAL = 30; // seconds
+constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds
+
+template<typename... Args>
+static void FatalError(const char* fmt, const Args&... args)
+{
+ std::string strMessage = tfm::format(fmt, args...);
+ SetMiscWarning(strMessage);
+ LogPrintf("*** %s\n", strMessage);
+ uiInterface.ThreadSafeMessageBox(
+ "Error: A fatal internal error occurred, see debug.log for details",
+ "", CClientUIInterface::MSG_ERROR);
+ StartShutdown();
+}
+
+BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) :
+ CDBWrapper(path, n_cache_size, f_memory, f_wipe, f_obfuscate)
+{}
+
+bool BaseIndex::DB::ReadBestBlock(CBlockLocator& locator) const
+{
+ bool success = Read(DB_BEST_BLOCK, locator);
+ if (!success) {
+ locator.SetNull();
+ }
+ return success;
+}
+
+bool BaseIndex::DB::WriteBestBlock(const CBlockLocator& locator)
+{
+ return Write(DB_BEST_BLOCK, locator);
+}
+
+BaseIndex::~BaseIndex()
+{
+ Interrupt();
+ Stop();
+}
+
+bool BaseIndex::Init()
+{
+ CBlockLocator locator;
+ if (!GetDB().ReadBestBlock(locator)) {
+ locator.SetNull();
+ }
+
+ LOCK(cs_main);
+ m_best_block_index = FindForkInGlobalIndex(chainActive, locator);
+ m_synced = m_best_block_index.load() == chainActive.Tip();
+ return true;
+}
+
+static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev)
+{
+ AssertLockHeld(cs_main);
+
+ if (!pindex_prev) {
+ return chainActive.Genesis();
+ }
+
+ const CBlockIndex* pindex = chainActive.Next(pindex_prev);
+ if (pindex) {
+ return pindex;
+ }
+
+ return chainActive.Next(chainActive.FindFork(pindex_prev));
+}
+
+void BaseIndex::ThreadSync()
+{
+ const CBlockIndex* pindex = m_best_block_index.load();
+ if (!m_synced) {
+ auto& consensus_params = Params().GetConsensus();
+
+ int64_t last_log_time = 0;
+ int64_t last_locator_write_time = 0;
+ while (true) {
+ if (m_interrupt) {
+ WriteBestBlock(pindex);
+ return;
+ }
+
+ {
+ LOCK(cs_main);
+ const CBlockIndex* pindex_next = NextSyncBlock(pindex);
+ if (!pindex_next) {
+ WriteBestBlock(pindex);
+ m_best_block_index = pindex;
+ m_synced = true;
+ break;
+ }
+ pindex = pindex_next;
+ }
+
+ int64_t current_time = GetTime();
+ if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
+ LogPrintf("Syncing %s with block chain from height %d\n",
+ GetName(), pindex->nHeight);
+ last_log_time = current_time;
+ }
+
+ if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
+ WriteBestBlock(pindex);
+ last_locator_write_time = current_time;
+ }
+
+ CBlock block;
+ if (!ReadBlockFromDisk(block, pindex, consensus_params)) {
+ FatalError("%s: Failed to read block %s from disk",
+ __func__, pindex->GetBlockHash().ToString());
+ return;
+ }
+ if (!WriteBlock(block, pindex)) {
+ FatalError("%s: Failed to write block %s to index database",
+ __func__, pindex->GetBlockHash().ToString());
+ return;
+ }
+ }
+ }
+
+ if (pindex) {
+ LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight);
+ } else {
+ LogPrintf("%s is enabled\n", GetName());
+ }
+}
+
+bool BaseIndex::WriteBestBlock(const CBlockIndex* block_index)
+{
+ LOCK(cs_main);
+ if (!GetDB().WriteBestBlock(chainActive.GetLocator(block_index))) {
+ return error("%s: Failed to write locator to disk", __func__);
+ }
+ return true;
+}
+
+void BaseIndex::BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
+ const std::vector<CTransactionRef>& txn_conflicted)
+{
+ if (!m_synced) {
+ return;
+ }
+
+ const CBlockIndex* best_block_index = m_best_block_index.load();
+ if (!best_block_index) {
+ if (pindex->nHeight != 0) {
+ FatalError("%s: First block connected is not the genesis block (height=%d)",
+ __func__, pindex->nHeight);
+ return;
+ }
+ } else {
+ // Ensure block connects to an ancestor of the current best block. This should be the case
+ // most of the time, but may not be immediately after the sync thread catches up and sets
+ // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
+ // in the ValidationInterface queue backlog even after the sync thread has caught up to the
+ // new chain tip. In this unlikely event, log a warning and let the queue clear.
+ if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
+ LogPrintf("%s: WARNING: Block %s does not connect to an ancestor of " /* Continued */
+ "known best chain (tip=%s); not updating index\n",
+ __func__, pindex->GetBlockHash().ToString(),
+ best_block_index->GetBlockHash().ToString());
+ return;
+ }
+ }
+
+ if (WriteBlock(*block, pindex)) {
+ m_best_block_index = pindex;
+ } else {
+ FatalError("%s: Failed to write block %s to index",
+ __func__, pindex->GetBlockHash().ToString());
+ return;
+ }
+}
+
+void BaseIndex::ChainStateFlushed(const CBlockLocator& locator)
+{
+ if (!m_synced) {
+ return;
+ }
+
+ const uint256& locator_tip_hash = locator.vHave.front();
+ const CBlockIndex* locator_tip_index;
+ {
+ LOCK(cs_main);
+ locator_tip_index = LookupBlockIndex(locator_tip_hash);
+ }
+
+ if (!locator_tip_index) {
+ FatalError("%s: First block (hash=%s) in locator was not found",
+ __func__, locator_tip_hash.ToString());
+ return;
+ }
+
+ // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
+ // immediately after the sync thread catches up and sets m_synced. Consider the case where
+ // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
+ // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
+ // event, log a warning and let the queue clear.
+ const CBlockIndex* best_block_index = m_best_block_index.load();
+ if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
+ LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known best " /* Continued */
+ "chain (tip=%s); not writing index locator\n",
+ __func__, locator_tip_hash.ToString(),
+ best_block_index->GetBlockHash().ToString());
+ return;
+ }
+
+ if (!GetDB().WriteBestBlock(locator)) {
+ error("%s: Failed to write locator to disk", __func__);
+ }
+}
+
+bool BaseIndex::BlockUntilSyncedToCurrentChain()
+{
+ AssertLockNotHeld(cs_main);
+
+ if (!m_synced) {
+ return false;
+ }
+
+ {
+ // Skip the queue-draining stuff if we know we're caught up with
+ // chainActive.Tip().
+ LOCK(cs_main);
+ const CBlockIndex* chain_tip = chainActive.Tip();
+ const CBlockIndex* best_block_index = m_best_block_index.load();
+ if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
+ return true;
+ }
+ }
+
+ LogPrintf("%s: %s is catching up on block notifications\n", __func__, GetName());
+ SyncWithValidationInterfaceQueue();
+ return true;
+}
+
+void BaseIndex::Interrupt()
+{
+ m_interrupt();
+}
+
+void BaseIndex::Start()
+{
+ // Need to register this ValidationInterface before running Init(), so that
+ // callbacks are not missed if Init sets m_synced to true.
+ RegisterValidationInterface(this);
+ if (!Init()) {
+ FatalError("%s: %s failed to initialize", __func__, GetName());
+ return;
+ }
+
+ m_thread_sync = std::thread(&TraceThread<std::function<void()>>, GetName(),
+ std::bind(&BaseIndex::ThreadSync, this));
+}
+
+void BaseIndex::Stop()
+{
+ UnregisterValidationInterface(this);
+
+ if (m_thread_sync.joinable()) {
+ m_thread_sync.join();
+ }
+}
diff --git a/src/index/base.h b/src/index/base.h
new file mode 100644
index 0000000000..04ee6e6cc2
--- /dev/null
+++ b/src/index/base.h
@@ -0,0 +1,98 @@
+// Copyright (c) 2017-2018 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_INDEX_BASE_H
+#define BITCOIN_INDEX_BASE_H
+
+#include <dbwrapper.h>
+#include <primitives/block.h>
+#include <primitives/transaction.h>
+#include <threadinterrupt.h>
+#include <uint256.h>
+#include <validationinterface.h>
+
+class CBlockIndex;
+
+/**
+ * Base class for indices of blockchain data. This implements
+ * CValidationInterface and ensures blocks are indexed sequentially according
+ * to their position in the active chain.
+ */
+class BaseIndex : public CValidationInterface
+{
+protected:
+ class DB : public CDBWrapper
+ {
+ public:
+ DB(const fs::path& path, size_t n_cache_size,
+ bool f_memory = false, bool f_wipe = false, bool f_obfuscate = false);
+
+ /// Read block locator of the chain that the txindex is in sync with.
+ bool ReadBestBlock(CBlockLocator& locator) const;
+
+ /// Write block locator of the chain that the txindex is in sync with.
+ bool WriteBestBlock(const CBlockLocator& locator);
+ };
+
+private:
+ /// Whether the index is in sync with the main chain. The flag is flipped
+ /// from false to true once, after which point this starts processing
+ /// ValidationInterface notifications to stay in sync.
+ std::atomic<bool> m_synced{false};
+
+ /// The last block in the chain that the index is in sync with.
+ std::atomic<const CBlockIndex*> m_best_block_index{nullptr};
+
+ std::thread m_thread_sync;
+ CThreadInterrupt m_interrupt;
+
+ /// Sync the index with the block index starting from the current best block.
+ /// Intended to be run in its own thread, m_thread_sync, and can be
+ /// interrupted with m_interrupt. Once the index gets in sync, the m_synced
+ /// flag is set and the BlockConnected ValidationInterface callback takes
+ /// over and the sync thread exits.
+ void ThreadSync();
+
+ /// Write the current chain block locator to the DB.
+ bool WriteBestBlock(const CBlockIndex* block_index);
+
+protected:
+ void BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
+ const std::vector<CTransactionRef>& txn_conflicted) override;
+
+ void ChainStateFlushed(const CBlockLocator& locator) override;
+
+ /// Initialize internal state from the database and block index.
+ virtual bool Init();
+
+ /// Write update index entries for a newly connected block.
+ virtual bool WriteBlock(const CBlock& block, const CBlockIndex* pindex) { return true; }
+
+ virtual DB& GetDB() const = 0;
+
+ /// Get the name of the index for display in logs.
+ virtual const char* GetName() const = 0;
+
+public:
+ /// Destructor interrupts sync thread if running and blocks until it exits.
+ virtual ~BaseIndex();
+
+ /// Blocks the current thread until the index is caught up to the current
+ /// state of the block chain. This only blocks if the index has gotten in
+ /// sync once and only needs to process blocks in the ValidationInterface
+ /// queue. If the index is catching up from far behind, this method does
+ /// not block and immediately returns false.
+ bool BlockUntilSyncedToCurrentChain();
+
+ void Interrupt();
+
+ /// Start initializes the sync state and registers the instance as a
+ /// ValidationInterface so that it stays in sync with blockchain updates.
+ void Start();
+
+ /// Stops the instance from staying in sync with blockchain updates.
+ void Stop();
+};
+
+#endif // BITCOIN_INDEX_BASE_H
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index 3ff16b7664..e106b9b420 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -2,258 +2,261 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <chainparams.h>
#include <index/txindex.h>
#include <init.h>
-#include <tinyformat.h>
#include <ui_interface.h>
#include <util.h>
#include <validation.h>
-#include <warnings.h>
-constexpr int64_t SYNC_LOG_INTERVAL = 30; // seconds
-constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds
+#include <boost/thread.hpp>
+
+constexpr char DB_BEST_BLOCK = 'B';
+constexpr char DB_TXINDEX = 't';
+constexpr char DB_TXINDEX_BLOCK = 'T';
std::unique_ptr<TxIndex> g_txindex;
-template<typename... Args>
-static void FatalError(const char* fmt, const Args&... args)
+struct CDiskTxPos : public CDiskBlockPos
{
- std::string strMessage = tfm::format(fmt, args...);
- SetMiscWarning(strMessage);
- LogPrintf("*** %s\n", strMessage);
- uiInterface.ThreadSafeMessageBox(
- "Error: A fatal internal error occurred, see debug.log for details",
- "", CClientUIInterface::MSG_ERROR);
- StartShutdown();
-}
+ unsigned int nTxOffset; // after header
-TxIndex::TxIndex(std::unique_ptr<TxIndexDB> db) :
- m_db(std::move(db)), m_synced(false), m_best_block_index(nullptr)
-{}
+ ADD_SERIALIZE_METHODS;
-TxIndex::~TxIndex()
-{
- Interrupt();
- Stop();
-}
-
-bool TxIndex::Init()
-{
- LOCK(cs_main);
-
- // Attempt to migrate txindex from the old database to the new one. Even if
- // chain_tip is null, the node could be reindexing and we still want to
- // delete txindex records in the old database.
- if (!m_db->MigrateData(*pblocktree, chainActive.GetLocator())) {
- return false;
+ template <typename Stream, typename Operation>
+ inline void SerializationOp(Stream& s, Operation ser_action) {
+ READWRITEAS(CDiskBlockPos, *this);
+ READWRITE(VARINT(nTxOffset));
}
- CBlockLocator locator;
- if (!m_db->ReadBestBlock(locator)) {
- locator.SetNull();
+ CDiskTxPos(const CDiskBlockPos &blockIn, unsigned int nTxOffsetIn) : CDiskBlockPos(blockIn.nFile, blockIn.nPos), nTxOffset(nTxOffsetIn) {
}
- m_best_block_index = FindForkInGlobalIndex(chainActive, locator);
- m_synced = m_best_block_index.load() == chainActive.Tip();
- return true;
-}
-
-static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev)
-{
- AssertLockHeld(cs_main);
-
- if (!pindex_prev) {
- return chainActive.Genesis();
+ CDiskTxPos() {
+ SetNull();
}
- const CBlockIndex* pindex = chainActive.Next(pindex_prev);
- if (pindex) {
- return pindex;
+ void SetNull() {
+ CDiskBlockPos::SetNull();
+ nTxOffset = 0;
}
-
- return chainActive.Next(chainActive.FindFork(pindex_prev));
-}
-
-void TxIndex::ThreadSync()
+};
+
+/**
+ * Access to the txindex database (indexes/txindex/)
+ *
+ * The database stores a block locator of the chain the database is synced to
+ * so that the TxIndex can efficiently determine the point it last stopped at.
+ * A locator is used instead of a simple hash of the chain tip because blocks
+ * and block index entries may not be flushed to disk until after this database
+ * is updated.
+ */
+class TxIndex::DB : public BaseIndex::DB
{
- const CBlockIndex* pindex = m_best_block_index.load();
- if (!m_synced) {
- auto& consensus_params = Params().GetConsensus();
-
- int64_t last_log_time = 0;
- int64_t last_locator_write_time = 0;
- while (true) {
- if (m_interrupt) {
- WriteBestBlock(pindex);
- return;
- }
+public:
+ explicit DB(size_t n_cache_size, bool f_memory = false, bool f_wipe = false);
- {
- LOCK(cs_main);
- const CBlockIndex* pindex_next = NextSyncBlock(pindex);
- if (!pindex_next) {
- WriteBestBlock(pindex);
- m_best_block_index = pindex;
- m_synced = true;
- break;
- }
- pindex = pindex_next;
- }
+ /// Read the disk location of the transaction data with the given hash. Returns false if the
+ /// transaction hash is not indexed.
+ bool ReadTxPos(const uint256& txid, CDiskTxPos& pos) const;
- int64_t current_time = GetTime();
- if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
- LogPrintf("Syncing txindex with block chain from height %d\n", pindex->nHeight);
- last_log_time = current_time;
- }
+ /// Write a batch of transaction positions to the DB.
+ bool WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_pos);
- if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
- WriteBestBlock(pindex);
- last_locator_write_time = current_time;
- }
+ /// Migrate txindex data from the block tree DB, where it may be for older nodes that have not
+ /// been upgraded yet to the new database.
+ bool MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator& best_locator);
+};
- CBlock block;
- if (!ReadBlockFromDisk(block, pindex, consensus_params)) {
- FatalError("%s: Failed to read block %s from disk",
- __func__, pindex->GetBlockHash().ToString());
- return;
- }
- if (!WriteBlock(block, pindex)) {
- FatalError("%s: Failed to write block %s to tx index database",
- __func__, pindex->GetBlockHash().ToString());
- return;
- }
- }
- }
+TxIndex::DB::DB(size_t n_cache_size, bool f_memory, bool f_wipe) :
+ BaseIndex::DB(GetDataDir() / "indexes" / "txindex", n_cache_size, f_memory, f_wipe)
+{}
- if (pindex) {
- LogPrintf("txindex is enabled at height %d\n", pindex->nHeight);
- } else {
- LogPrintf("txindex is enabled\n");
- }
+bool TxIndex::DB::ReadTxPos(const uint256 &txid, CDiskTxPos& pos) const
+{
+ return Read(std::make_pair(DB_TXINDEX, txid), pos);
}
-bool TxIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
+bool TxIndex::DB::WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_pos)
{
- CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size()));
- std::vector<std::pair<uint256, CDiskTxPos>> vPos;
- vPos.reserve(block.vtx.size());
- for (const auto& tx : block.vtx) {
- vPos.emplace_back(tx->GetHash(), pos);
- pos.nTxOffset += ::GetSerializeSize(*tx, SER_DISK, CLIENT_VERSION);
+ CDBBatch batch(*this);
+ for (const auto& tuple : v_pos) {
+ batch.Write(std::make_pair(DB_TXINDEX, tuple.first), tuple.second);
}
- return m_db->WriteTxs(vPos);
+ return WriteBatch(batch);
}
-bool TxIndex::WriteBestBlock(const CBlockIndex* block_index)
+/*
+ * Safely persist a transfer of data from the old txindex database to the new one, and compact the
+ * range of keys updated. This is used internally by MigrateData.
+ */
+static void WriteTxIndexMigrationBatches(CDBWrapper& newdb, CDBWrapper& olddb,
+ CDBBatch& batch_newdb, CDBBatch& batch_olddb,
+ const std::pair<unsigned char, uint256>& begin_key,
+ const std::pair<unsigned char, uint256>& end_key)
{
- LOCK(cs_main);
- if (!m_db->WriteBestBlock(chainActive.GetLocator(block_index))) {
- return error("%s: Failed to write locator to disk", __func__);
- }
- return true;
+ // Sync new DB changes to disk before deleting from old DB.
+ newdb.WriteBatch(batch_newdb, /*fSync=*/ true);
+ olddb.WriteBatch(batch_olddb);
+ olddb.CompactRange(begin_key, end_key);
+
+ batch_newdb.Clear();
+ batch_olddb.Clear();
}
-void TxIndex::BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
- const std::vector<CTransactionRef>& txn_conflicted)
+bool TxIndex::DB::MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator& best_locator)
{
- if (!m_synced) {
- return;
- }
-
- const CBlockIndex* best_block_index = m_best_block_index.load();
- if (!best_block_index) {
- if (pindex->nHeight != 0) {
- FatalError("%s: First block connected is not the genesis block (height=%d)",
- __func__, pindex->nHeight);
- return;
+ // The prior implementation of txindex was always in sync with block index
+ // and presence was indicated with a boolean DB flag. If the flag is set,
+ // this means the txindex from a previous version is valid and in sync with
+ // the chain tip. The first step of the migration is to unset the flag and
+ // write the chain hash to a separate key, DB_TXINDEX_BLOCK. After that, the
+ // index entries are copied over in batches to the new database. Finally,
+ // DB_TXINDEX_BLOCK is erased from the old database and the block hash is
+ // written to the new database.
+ //
+ // Unsetting the boolean flag ensures that if the node is downgraded to a
+ // previous version, it will not see a corrupted, partially migrated index
+ // -- it will see that the txindex is disabled. When the node is upgraded
+ // again, the migration will pick up where it left off and sync to the block
+ // with hash DB_TXINDEX_BLOCK.
+ bool f_legacy_flag = false;
+ block_tree_db.ReadFlag("txindex", f_legacy_flag);
+ if (f_legacy_flag) {
+ if (!block_tree_db.Write(DB_TXINDEX_BLOCK, best_locator)) {
+ return error("%s: cannot write block indicator", __func__);
}
- } else {
- // Ensure block connects to an ancestor of the current best block. This should be the case
- // most of the time, but may not be immediately after the sync thread catches up and sets
- // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
- // in the ValidationInterface queue backlog even after the sync thread has caught up to the
- // new chain tip. In this unlikely event, log a warning and let the queue clear.
- if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
- LogPrintf("%s: WARNING: Block %s does not connect to an ancestor of " /* Continued */
- "known best chain (tip=%s); not updating txindex\n",
- __func__, pindex->GetBlockHash().ToString(),
- best_block_index->GetBlockHash().ToString());
- return;
+ if (!block_tree_db.WriteFlag("txindex", false)) {
+ return error("%s: cannot write block index db flag", __func__);
}
}
- if (WriteBlock(*block, pindex)) {
- m_best_block_index = pindex;
- } else {
- FatalError("%s: Failed to write block %s to txindex",
- __func__, pindex->GetBlockHash().ToString());
- return;
+ CBlockLocator locator;
+ if (!block_tree_db.Read(DB_TXINDEX_BLOCK, locator)) {
+ return true;
}
-}
-void TxIndex::ChainStateFlushed(const CBlockLocator& locator)
-{
- if (!m_synced) {
- return;
- }
+ int64_t count = 0;
+ LogPrintf("Upgrading txindex database... [0%%]\n");
+ uiInterface.ShowProgress(_("Upgrading txindex database"), 0, true);
+ int report_done = 0;
+ const size_t batch_size = 1 << 24; // 16 MiB
+
+ CDBBatch batch_newdb(*this);
+ CDBBatch batch_olddb(block_tree_db);
+
+ std::pair<unsigned char, uint256> key;
+ std::pair<unsigned char, uint256> begin_key{DB_TXINDEX, uint256()};
+ std::pair<unsigned char, uint256> prev_key = begin_key;
+
+ bool interrupted = false;
+ std::unique_ptr<CDBIterator> cursor(block_tree_db.NewIterator());
+ for (cursor->Seek(begin_key); cursor->Valid(); cursor->Next()) {
+ boost::this_thread::interruption_point();
+ if (ShutdownRequested()) {
+ interrupted = true;
+ break;
+ }
- const uint256& locator_tip_hash = locator.vHave.front();
- const CBlockIndex* locator_tip_index;
- {
- LOCK(cs_main);
- locator_tip_index = LookupBlockIndex(locator_tip_hash);
- }
+ if (!cursor->GetKey(key)) {
+ return error("%s: cannot get key from valid cursor", __func__);
+ }
+ if (key.first != DB_TXINDEX) {
+ break;
+ }
- if (!locator_tip_index) {
- FatalError("%s: First block (hash=%s) in locator was not found",
- __func__, locator_tip_hash.ToString());
- return;
+ // Log progress every 10%.
+ if (++count % 256 == 0) {
+ // Since txids are uniformly random and traversed in increasing order, the high 16 bits
+ // of the hash can be used to estimate the current progress.
+ const uint256& txid = key.second;
+ uint32_t high_nibble =
+ (static_cast<uint32_t>(*(txid.begin() + 0)) << 8) +
+ (static_cast<uint32_t>(*(txid.begin() + 1)) << 0);
+ int percentage_done = (int)(high_nibble * 100.0 / 65536.0 + 0.5);
+
+ uiInterface.ShowProgress(_("Upgrading txindex database"), percentage_done, true);
+ if (report_done < percentage_done/10) {
+ LogPrintf("Upgrading txindex database... [%d%%]\n", percentage_done);
+ report_done = percentage_done/10;
+ }
+ }
+
+ CDiskTxPos value;
+ if (!cursor->GetValue(value)) {
+ return error("%s: cannot parse txindex record", __func__);
+ }
+ batch_newdb.Write(key, value);
+ batch_olddb.Erase(key);
+
+ if (batch_newdb.SizeEstimate() > batch_size || batch_olddb.SizeEstimate() > batch_size) {
+ // NOTE: it's OK to delete the key pointed at by the current DB cursor while iterating
+ // because LevelDB iterators are guaranteed to provide a consistent view of the
+ // underlying data, like a lightweight snapshot.
+ WriteTxIndexMigrationBatches(*this, block_tree_db,
+ batch_newdb, batch_olddb,
+ prev_key, key);
+ prev_key = key;
+ }
}
- // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
- // immediately after the sync thread catches up and sets m_synced. Consider the case where
- // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
- // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
- // event, log a warning and let the queue clear.
- const CBlockIndex* best_block_index = m_best_block_index.load();
- if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
- LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known best " /* Continued */
- "chain (tip=%s); not writing txindex locator\n",
- __func__, locator_tip_hash.ToString(),
- best_block_index->GetBlockHash().ToString());
- return;
+ // If these final DB batches complete the migration, write the best block
+ // hash marker to the new database and delete from the old one. This signals
+ // that the former is fully caught up to that point in the blockchain and
+ // that all txindex entries have been removed from the latter.
+ if (!interrupted) {
+ batch_olddb.Erase(DB_TXINDEX_BLOCK);
+ batch_newdb.Write(DB_BEST_BLOCK, locator);
}
- if (!m_db->WriteBestBlock(locator)) {
- error("%s: Failed to write locator to disk", __func__);
+ WriteTxIndexMigrationBatches(*this, block_tree_db,
+ batch_newdb, batch_olddb,
+ begin_key, key);
+
+ if (interrupted) {
+ LogPrintf("[CANCELLED].\n");
+ return false;
}
+
+ uiInterface.ShowProgress("", 100, false);
+
+ LogPrintf("[DONE].\n");
+ return true;
}
-bool TxIndex::BlockUntilSyncedToCurrentChain()
+TxIndex::TxIndex(size_t n_cache_size, bool f_memory, bool f_wipe)
+ : m_db(MakeUnique<TxIndex::DB>(n_cache_size, f_memory, f_wipe))
+{}
+
+TxIndex::~TxIndex() {}
+
+bool TxIndex::Init()
{
- AssertLockNotHeld(cs_main);
+ LOCK(cs_main);
- if (!m_synced) {
+ // Attempt to migrate txindex from the old database to the new one. Even if
+ // chain_tip is null, the node could be reindexing and we still want to
+ // delete txindex records in the old database.
+ if (!m_db->MigrateData(*pblocktree, chainActive.GetLocator())) {
return false;
}
- {
- // Skip the queue-draining stuff if we know we're caught up with
- // chainActive.Tip().
- LOCK(cs_main);
- const CBlockIndex* chain_tip = chainActive.Tip();
- const CBlockIndex* best_block_index = m_best_block_index.load();
- if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
- return true;
- }
- }
+ return BaseIndex::Init();
+}
- LogPrintf("%s: txindex is catching up on block notifications\n", __func__);
- SyncWithValidationInterfaceQueue();
- return true;
+bool TxIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
+{
+ CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size()));
+ std::vector<std::pair<uint256, CDiskTxPos>> vPos;
+ vPos.reserve(block.vtx.size());
+ for (const auto& tx : block.vtx) {
+ vPos.emplace_back(tx->GetHash(), pos);
+ pos.nTxOffset += ::GetSerializeSize(*tx, SER_DISK, CLIENT_VERSION);
+ }
+ return m_db->WriteTxs(vPos);
}
+BaseIndex::DB& TxIndex::GetDB() const { return *m_db; }
+
bool TxIndex::FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRef& tx) const
{
CDiskTxPos postx;
@@ -281,31 +284,3 @@ bool TxIndex::FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRe
block_hash = header.GetHash();
return true;
}
-
-void TxIndex::Interrupt()
-{
- m_interrupt();
-}
-
-void TxIndex::Start()
-{
- // Need to register this ValidationInterface before running Init(), so that
- // callbacks are not missed if Init sets m_synced to true.
- RegisterValidationInterface(this);
- if (!Init()) {
- FatalError("%s: txindex failed to initialize", __func__);
- return;
- }
-
- m_thread_sync = std::thread(&TraceThread<std::function<void()>>, "txindex",
- std::bind(&TxIndex::ThreadSync, this));
-}
-
-void TxIndex::Stop()
-{
- UnregisterValidationInterface(this);
-
- if (m_thread_sync.joinable()) {
- m_thread_sync.join();
- }
-}
diff --git a/src/index/txindex.h b/src/index/txindex.h
index 4937bd64e9..8202c3c951 100644
--- a/src/index/txindex.h
+++ b/src/index/txindex.h
@@ -5,70 +5,39 @@
#ifndef BITCOIN_INDEX_TXINDEX_H
#define BITCOIN_INDEX_TXINDEX_H
-#include <primitives/block.h>
-#include <primitives/transaction.h>
-#include <threadinterrupt.h>
+#include <chain.h>
+#include <index/base.h>
#include <txdb.h>
-#include <uint256.h>
-#include <validationinterface.h>
-
-class CBlockIndex;
/**
* TxIndex is used to look up transactions included in the blockchain by hash.
* The index is written to a LevelDB database and records the filesystem
* location of each transaction by transaction hash.
*/
-class TxIndex final : public CValidationInterface
+class TxIndex final : public BaseIndex
{
-private:
- const std::unique_ptr<TxIndexDB> m_db;
-
- /// Whether the index is in sync with the main chain. The flag is flipped
- /// from false to true once, after which point this starts processing
- /// ValidationInterface notifications to stay in sync.
- std::atomic<bool> m_synced;
-
- /// The last block in the chain that the TxIndex is in sync with.
- std::atomic<const CBlockIndex*> m_best_block_index;
-
- std::thread m_thread_sync;
- CThreadInterrupt m_interrupt;
-
- /// Initialize internal state from the database and block index.
- bool Init();
+protected:
+ class DB;
- /// Sync the tx index with the block index starting from the current best
- /// block. Intended to be run in its own thread, m_thread_sync, and can be
- /// interrupted with m_interrupt. Once the txindex gets in sync, the
- /// m_synced flag is set and the BlockConnected ValidationInterface callback
- /// takes over and the sync thread exits.
- void ThreadSync();
+private:
+ const std::unique_ptr<DB> m_db;
- /// Write update index entries for a newly connected block.
- bool WriteBlock(const CBlock& block, const CBlockIndex* pindex);
+protected:
+ /// Override base class init to migrate from old database.
+ bool Init() override;
- /// Write the current chain block locator to the DB.
- bool WriteBestBlock(const CBlockIndex* block_index);
+ bool WriteBlock(const CBlock& block, const CBlockIndex* pindex) override;
-protected:
- void BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
- const std::vector<CTransactionRef>& txn_conflicted) override;
+ BaseIndex::DB& GetDB() const override;
- void ChainStateFlushed(const CBlockLocator& locator) override;
+ const char* GetName() const override { return "txindex"; }
public:
- /// Constructs the TxIndex, which becomes available to be queried.
- explicit TxIndex(std::unique_ptr<TxIndexDB> db);
-
- /// Destructor interrupts sync thread if running and blocks until it exits.
- ~TxIndex();
+ /// Constructs the index, which becomes available to be queried.
+ explicit TxIndex(size_t n_cache_size, bool f_memory = false, bool f_wipe = false);
- /// Blocks the current thread until the transaction index is caught up to
- /// the current state of the block chain. This only blocks if the index has gotten in sync once
- /// and only needs to process blocks in the ValidationInterface queue. If the index is catching
- /// up from far behind, this method does not block and immediately returns false.
- bool BlockUntilSyncedToCurrentChain();
+ // Destructor is declared because this class contains a unique_ptr to an incomplete type.
+ virtual ~TxIndex() override;
/// Look up a transaction by hash.
///
@@ -77,15 +46,6 @@ public:
/// @param[out] tx The transaction itself.
/// @return true if transaction is found, false otherwise
bool FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRef& tx) const;
-
- void Interrupt();
-
- /// Start initializes the sync state and registers the instance as a
- /// ValidationInterface so that it stays in sync with blockchain updates.
- void Start();
-
- /// Stops the instance from staying in sync with blockchain updates.
- void Stop();
};
/// The global transaction index, used in GetTransaction. May be null.
diff --git a/src/init.cpp b/src/init.cpp
index b4e2eec0d2..1b5507703c 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -76,7 +76,7 @@ std::unique_ptr<PeerLogicValidation> peerLogic;
class DummyWalletInit : public WalletInitInterface {
public:
- void AddWalletOptions() const override {}
+ void AddWalletOptions() const override;
bool ParameterInteraction() const override {return true;}
void RegisterRPC(CRPCTable &) const override {}
bool Verify() const override {return true;}
@@ -87,6 +87,15 @@ public:
void Close() const override {}
};
+void DummyWalletInit::AddWalletOptions() const
+{
+ std::vector<std::string> opts = {"-addresstype", "-changetype", "-disablewallet", "-discardfee=<amt>", "-fallbackfee=<amt>",
+ "-keypool=<n>", "-mintxfee=<amt>", "-paytxfee=<amt>", "-rescan", "-salvagewallet", "-spendzeroconfchange", "-txconfirmtarget=<n>",
+ "-upgradewallet", "-wallet=<path>", "-walletbroadcast", "-walletdir=<dir>", "-walletnotify=<cmd>", "-walletrbf", "-zapwallettxes=<mode>",
+ "-dblogsize=<n>", "-flushwallet", "-privdb", "-walletrejectlongchains"};
+ gArgs.AddHiddenArgs(opts);
+}
+
const WalletInitInterface& g_wallet_init_interface = DummyWalletInit();
#endif
@@ -348,6 +357,12 @@ void SetupServerArgs()
const auto defaultChainParams = CreateChainParams(CBaseChainParams::MAIN);
const auto testnetChainParams = CreateChainParams(CBaseChainParams::TESTNET);
+ // Hidden Options
+ std::vector<std::string> hidden_args = {"-rpcssl", "-benchmark", "-h", "-help", "-socks", "-tor", "-debugnet", "-whitelistalwaysrelay",
+ "-prematurewitness", "-walletprematurewitness", "-promiscuousmempoolflags", "-blockminsize", "-dbcrashratio", "-forcecompactdb", "-usehd",
+ // GUI args. These will be overwritten by SetupUIArgs for the GUI
+ "-allowselfsignedrootcertificates", "-choosedatadir", "-lang=<lang>", "-min", "-resetguisettings", "-rootcertificates=<file>", "-splash", "-uiplatform"};
+
// Set all of the args and their help
// When adding new options to the categories, please keep and ensure alphabetical ordering.
gArgs.AddArg("-?", "Print this help message and exit", false, OptionsCategory::OPTIONS);
@@ -375,6 +390,8 @@ void SetupServerArgs()
gArgs.AddArg("-persistmempool", strprintf("Whether to save the mempool on shutdown and load on restart (default: %u)", DEFAULT_PERSIST_MEMPOOL), false, OptionsCategory::OPTIONS);
#ifndef WIN32
gArgs.AddArg("-pid=<file>", strprintf("Specify pid file. Relative paths will be prefixed by a net-specific datadir location. (default: %s)", BITCOIN_PID_FILENAME), false, OptionsCategory::OPTIONS);
+#else
+ hidden_args.emplace_back("-pid");
#endif
gArgs.AddArg("-prune=<n>", strprintf("Reduce storage requirements by enabling pruning (deleting) of old blocks. This allows the pruneblockchain RPC to be called to delete specific blocks, and enables automatic pruning of old blocks if a target size in MiB is provided. This mode is incompatible with -txindex and -rescan. "
"Warning: Reverting this setting requires re-downloading the entire blockchain. "
@@ -383,6 +400,8 @@ void SetupServerArgs()
gArgs.AddArg("-reindex-chainstate", "Rebuild chain state from the currently indexed blocks", false, OptionsCategory::OPTIONS);
#ifndef WIN32
gArgs.AddArg("-sysperms", "Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)", false, OptionsCategory::OPTIONS);
+#else
+ hidden_args.emplace_back("-sysperms");
#endif
gArgs.AddArg("-txindex", strprintf("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)", DEFAULT_TXINDEX), false, OptionsCategory::OPTIONS);
@@ -421,6 +440,8 @@ void SetupServerArgs()
#else
gArgs.AddArg("-upnp", strprintf("Use UPnP to map the listening port (default: %u)", 0), false, OptionsCategory::CONNECTION);
#endif
+#else
+ hidden_args.emplace_back("-upnp");
#endif
gArgs.AddArg("-whitebind=<addr>", "Bind to given address and whitelist peers connecting to it. Use [host]:port notation for IPv6", false, OptionsCategory::CONNECTION);
gArgs.AddArg("-whitelist=<IP address or network>", "Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) or CIDR notated network (e.g. 1.2.3.0/24). Can be specified multiple times."
@@ -433,6 +454,11 @@ void SetupServerArgs()
gArgs.AddArg("-zmqpubhashtx=<address>", "Enable publish hash transaction in <address>", false, OptionsCategory::ZMQ);
gArgs.AddArg("-zmqpubrawblock=<address>", "Enable publish raw block in <address>", false, OptionsCategory::ZMQ);
gArgs.AddArg("-zmqpubrawtx=<address>", "Enable publish raw transaction in <address>", false, OptionsCategory::ZMQ);
+#else
+ hidden_args.emplace_back("-zmqpubhashblock=<address>");
+ hidden_args.emplace_back("-zmqpubhashtx=<address>");
+ hidden_args.emplace_back("-zmqpubrawblock=<address>");
+ hidden_args.emplace_back("-zmqpubrawtx=<address>");
#endif
gArgs.AddArg("-checkblocks=<n>", strprintf("How many blocks to check at startup (default: %u, 0 = all)", DEFAULT_CHECKBLOCKS), true, OptionsCategory::DEBUG_TEST);
@@ -500,22 +526,14 @@ void SetupServerArgs()
gArgs.AddArg("-rpcworkqueue=<n>", strprintf("Set the depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE), true, OptionsCategory::RPC);
gArgs.AddArg("-server", "Accept command line and JSON-RPC commands", false, OptionsCategory::RPC);
- // Hidden options
- gArgs.AddArg("-rpcssl", "", false, OptionsCategory::HIDDEN);
- gArgs.AddArg("-benchmark", "", false, OptionsCategory::HIDDEN);
- gArgs.AddArg("-h", "", false, OptionsCategory::HIDDEN);
- gArgs.AddArg("-help", "", false, OptionsCategory::HIDDEN);
- gArgs.AddArg("-socks", "", false, OptionsCategory::HIDDEN);
- gArgs.AddArg("-tor", "", false, OptionsCategory::HIDDEN);
- gArgs.AddArg("-debugnet", "", false, OptionsCategory::HIDDEN);
- gArgs.AddArg("-whitelistalwaysrelay", "", false, OptionsCategory::HIDDEN);
- gArgs.AddArg("-prematurewitness", "", false, OptionsCategory::HIDDEN);
- gArgs.AddArg("-walletprematurewitness", "", false, OptionsCategory::HIDDEN);
- gArgs.AddArg("-promiscuousmempoolflags", "", false, OptionsCategory::HIDDEN);
- gArgs.AddArg("-blockminsize", "", false, OptionsCategory::HIDDEN);
- gArgs.AddArg("-dbcrashratio", "", false, OptionsCategory::HIDDEN);
- gArgs.AddArg("-forcecompactdb", "", false, OptionsCategory::HIDDEN);
- gArgs.AddArg("-usehd", "", false, OptionsCategory::HIDDEN);
+#if HAVE_DECL_DAEMON
+ gArgs.AddArg("-daemon", "Run in the background as a daemon and accept commands", false, OptionsCategory::OPTIONS);
+#else
+ hidden_args.emplace_back("-daemon");
+#endif
+
+ // Add the hidden options
+ gArgs.AddHiddenArgs(hidden_args);
}
std::string LicenseInfo()
@@ -614,7 +632,7 @@ static void CleanupBlockRevFiles()
// keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
// start removing block files.
int nContigCounter = 0;
- for (const std::pair<std::string, fs::path>& item : mapBlockFiles) {
+ for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
if (atoi(item.first) == nContigCounter) {
nContigCounter++;
continue;
@@ -1606,8 +1624,7 @@ bool AppInitMain()
// ********************************************************* Step 8: start indexers
if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
- auto txindex_db = MakeUnique<TxIndexDB>(nTxIndexCache, false, fReindex);
- g_txindex = MakeUnique<TxIndex>(std::move(txindex_db));
+ g_txindex = MakeUnique<TxIndex>(nTxIndexCache, false, fReindex);
g_txindex->Start();
}
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index fc05dd2ad2..de456e87f4 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -561,7 +561,7 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec
} // namespace
// This function is used for testing the stale tip eviction logic, see
-// DoS_tests.cpp
+// denialofservice_tests.cpp
void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
{
LOCK(cs_main);
diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp
index 5963bf371a..aac3fe5c14 100644
--- a/src/policy/policy.cpp
+++ b/src/policy/policy.cpp
@@ -54,7 +54,7 @@ bool IsDust(const CTxOut& txout, const CFeeRate& dustRelayFeeIn)
return (txout.nValue < GetDustThreshold(txout, dustRelayFeeIn));
}
-bool IsStandard(const CScript& scriptPubKey, txnouttype& whichType, const bool witnessEnabled)
+bool IsStandard(const CScript& scriptPubKey, txnouttype& whichType)
{
std::vector<std::vector<unsigned char> > vSolutions;
if (!Solver(scriptPubKey, whichType, vSolutions))
@@ -73,13 +73,10 @@ bool IsStandard(const CScript& scriptPubKey, txnouttype& whichType, const bool w
(!fAcceptDatacarrier || scriptPubKey.size() > nMaxDatacarrierBytes))
return false;
- else if (!witnessEnabled && (whichType == TX_WITNESS_V0_KEYHASH || whichType == TX_WITNESS_V0_SCRIPTHASH))
- return false;
-
return whichType != TX_NONSTANDARD && whichType != TX_WITNESS_UNKNOWN;
}
-bool IsStandardTx(const CTransaction& tx, std::string& reason, const bool witnessEnabled)
+bool IsStandardTx(const CTransaction& tx, std::string& reason)
{
if (tx.nVersion > CTransaction::MAX_STANDARD_VERSION || tx.nVersion < 1) {
reason = "version";
@@ -118,7 +115,7 @@ bool IsStandardTx(const CTransaction& tx, std::string& reason, const bool witnes
unsigned int nDataOut = 0;
txnouttype whichType;
for (const CTxOut& txout : tx.vout) {
- if (!::IsStandard(txout.scriptPubKey, whichType, witnessEnabled)) {
+ if (!::IsStandard(txout.scriptPubKey, whichType)) {
reason = "scriptpubkey";
return false;
}
diff --git a/src/policy/policy.h b/src/policy/policy.h
index 5ce019df4c..035627bd60 100644
--- a/src/policy/policy.h
+++ b/src/policy/policy.h
@@ -79,12 +79,12 @@ CAmount GetDustThreshold(const CTxOut& txout, const CFeeRate& dustRelayFee);
bool IsDust(const CTxOut& txout, const CFeeRate& dustRelayFee);
-bool IsStandard(const CScript& scriptPubKey, txnouttype& whichType, const bool witnessEnabled = false);
+bool IsStandard(const CScript& scriptPubKey, txnouttype& whichType);
/**
* Check for standard transaction types
* @return True if all outputs (scriptPubKeys) use only standard transaction forms
*/
-bool IsStandardTx(const CTransaction& tx, std::string& reason, const bool witnessEnabled = false);
+bool IsStandardTx(const CTransaction& tx, std::string& reason);
/**
* Check for standard transaction types
* @param[in] mapInputs Map of previous transactions that have outputs we're spending
diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h
index 1c846d38ec..360615ec56 100644
--- a/src/primitives/transaction.h
+++ b/src/primitives/transaction.h
@@ -388,11 +388,6 @@ struct CMutableTransaction
*/
uint256 GetHash() const;
- friend bool operator==(const CMutableTransaction& a, const CMutableTransaction& b)
- {
- return a.GetHash() == b.GetHash();
- }
-
bool HasWitness() const
{
for (size_t i = 0; i < vin.size(); i++) {
diff --git a/src/qt/README.md b/src/qt/README.md
index d8acf96ceb..bf8139666c 100644
--- a/src/qt/README.md
+++ b/src/qt/README.md
@@ -4,7 +4,7 @@ The current precise version for Qt 5 is specified in [qt.mk](/depends/packages/q
## Compile and run
-See build instructions ([OSX](/doc/build-osx.md), [Windows](/doc/build-windows.md), [Unix](/doc/build-unix.md), etc).
+See build instructions ([macOS](/doc/build-osx.md), [Windows](/doc/build-windows.md), [Unix](/doc/build-unix.md), etc).
To run:
@@ -65,7 +65,7 @@ Represents the view to a single wallet.
* `guiconstants.h`: UI colors, app name, etc
* `guiutil.h`: several helper functions
* `macdockiconhandler.(h/cpp)`
-* `macdockiconhandler.(h/cpp)`: display notifications in OSX
+* `macdockiconhandler.(h/cpp)`: display notifications in macOS
## Contribute
@@ -81,9 +81,9 @@ the UI layout.
Download and install the community edition of [Qt Creator](https://www.qt.io/download/).
Uncheck everything except Qt Creator during the installation process.
-Instructions for OSX:
+Instructions for macOS:
-1. Make sure you installed everything through Homebrew mentioned in the [OSX build instructions](/doc/build-osx.md)
+1. Make sure you installed everything through Homebrew mentioned in the [macOS build instructions](/doc/build-osx.md)
2. Use `./configure` with the `--enable-debug` flag
3. In Qt Creator do "New Project" -> Import Project -> Import Existing Project
4. Enter "bitcoin-qt" as project name, enter src/qt as location
diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui
index a3721991ee..8f34e6bc82 100644
--- a/src/qt/forms/optionsdialog.ui
+++ b/src/qt/forms/optionsdialog.ui
@@ -38,6 +38,69 @@
</widget>
</item>
<item>
+ <spacer name="horizontalSpacer_0_Main">
+ <property name="orientation">
+ <enum>Qt::Horizontal</enum>
+ </property>
+ <property name="sizeHint" stdset="0">
+ <size>
+ <width>40</width>
+ <height>5</height>
+ </size>
+ </property>
+ </spacer>
+ </item>
+ <item>
+ <layout class="QHBoxLayout" name="horizontalLayout_Main_Prune">
+ <item>
+ <widget class="QCheckBox" name="prune">
+ <property name="toolTip">
+ <string>Disables some advanced features but all blocks will still be fully validated. Reverting this setting requires re-downloading the entire blockchain. Actual disk usage may be somewhat higher.</string>
+ </property>
+ <property name="text">
+ <string>Prune &amp;block storage to</string>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QSpinBox" name="pruneSize"/>
+ </item>
+ <item>
+ <widget class="QLabel" name="pruneSizeUnitLabel">
+ <property name="text">
+ <string>GB</string>
+ </property>
+ <property name="textFormat">
+ <enum>Qt::PlainText</enum>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <spacer name="horizontalSpacer_Main_Prune">
+ <property name="orientation">
+ <enum>Qt::Horizontal</enum>
+ </property>
+ <property name="sizeHint" stdset="0">
+ <size>
+ <width>40</width>
+ <height>20</height>
+ </size>
+ </property>
+ </spacer>
+ </item>
+ </layout>
+ </item>
+ <item>
+ <widget class="QLabel" name="pruneWarning">
+ <property name="text">
+ <string>Reverting this setting requires re-downloading the entire blockchain.</string>
+ </property>
+ <property name="textFormat">
+ <enum>Qt::PlainText</enum>
+ </property>
+ </widget>
+ </item>
+ <item>
<layout class="QHBoxLayout" name="horizontalLayout_2_Main">
<item>
<widget class="QLabel" name="databaseCacheLabel">
@@ -81,7 +144,7 @@
</layout>
</item>
<item>
- <layout class="QHBoxLayout" name="horizontalLayout_3_Main">
+ <layout class="QHBoxLayout" name="horizontalLayout_Main_VerifyLabel">
<item>
<widget class="QLabel" name="threadsScriptVerifLabel">
<property name="text">
@@ -103,7 +166,7 @@
</widget>
</item>
<item>
- <spacer name="horizontalSpacer_3_Main">
+ <spacer name="horizontalSpacer_Main_Threads">
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp
index c0ddb89b40..108aa4f99c 100644
--- a/src/qt/optionsdialog.cpp
+++ b/src/qt/optionsdialog.cpp
@@ -36,8 +36,17 @@ OptionsDialog::OptionsDialog(QWidget *parent, bool enableWallet) :
/* Main elements init */
ui->databaseCache->setMinimum(nMinDbCache);
ui->databaseCache->setMaximum(nMaxDbCache);
+ static const uint64_t GiB = 1024 * 1024 * 1024;
+ static const uint64_t nMinDiskSpace = MIN_DISK_SPACE_FOR_BLOCK_FILES / GiB +
+ (MIN_DISK_SPACE_FOR_BLOCK_FILES % GiB) ? 1 : 0;
+ ui->pruneSize->setMinimum(nMinDiskSpace);
ui->threadsScriptVerif->setMinimum(-GetNumCores());
ui->threadsScriptVerif->setMaximum(MAX_SCRIPTCHECK_THREADS);
+ ui->pruneWarning->setVisible(false);
+ ui->pruneWarning->setStyleSheet("QLabel { color: red; }");
+
+ ui->pruneSize->setEnabled(false);
+ connect(ui->prune, SIGNAL(toggled(bool)), ui->pruneSize, SLOT(setEnabled(bool)));
/* Network elements init */
#ifndef USE_UPNP
@@ -157,6 +166,9 @@ void OptionsDialog::setModel(OptionsModel *_model)
/* warn when one of the following settings changes by user action (placed here so init via mapper doesn't trigger them) */
/* Main */
+ connect(ui->prune, SIGNAL(clicked(bool)), this, SLOT(showRestartWarning()));
+ connect(ui->prune, SIGNAL(clicked(bool)), this, SLOT(togglePruneWarning(bool)));
+ connect(ui->pruneSize, SIGNAL(valueChanged(int)), this, SLOT(showRestartWarning()));
connect(ui->databaseCache, SIGNAL(valueChanged(int)), this, SLOT(showRestartWarning()));
connect(ui->threadsScriptVerif, SIGNAL(valueChanged(int)), this, SLOT(showRestartWarning()));
/* Wallet */
@@ -176,6 +188,8 @@ void OptionsDialog::setMapper()
mapper->addMapping(ui->bitcoinAtStartup, OptionsModel::StartAtStartup);
mapper->addMapping(ui->threadsScriptVerif, OptionsModel::ThreadsScriptVerif);
mapper->addMapping(ui->databaseCache, OptionsModel::DatabaseCache);
+ mapper->addMapping(ui->prune, OptionsModel::Prune);
+ mapper->addMapping(ui->pruneSize, OptionsModel::PruneSize);
/* Wallet */
mapper->addMapping(ui->spendZeroConfChange, OptionsModel::SpendZeroConfChange);
@@ -266,6 +280,11 @@ void OptionsDialog::on_hideTrayIcon_stateChanged(int fState)
}
}
+void OptionsDialog::togglePruneWarning(bool enabled)
+{
+ ui->pruneWarning->setVisible(!ui->pruneWarning->isVisible());
+}
+
void OptionsDialog::showRestartWarning(bool fPersistent)
{
ui->statusLabel->setStyleSheet("QLabel { color: red; }");
diff --git a/src/qt/optionsdialog.h b/src/qt/optionsdialog.h
index faf9ff8959..5aad484ce7 100644
--- a/src/qt/optionsdialog.h
+++ b/src/qt/optionsdialog.h
@@ -53,6 +53,7 @@ private Q_SLOTS:
void on_hideTrayIcon_stateChanged(int fState);
+ void togglePruneWarning(bool enabled);
void showRestartWarning(bool fPersistent = false);
void clearStatusLabel();
void updateProxyValidationState();
diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp
index cae9dace4c..31a85f4e23 100644
--- a/src/qt/optionsmodel.cpp
+++ b/src/qt/optionsmodel.cpp
@@ -88,6 +88,16 @@ void OptionsModel::Init(bool resetSettings)
// by command-line and show this in the UI.
// Main
+ if (!settings.contains("bPrune"))
+ settings.setValue("bPrune", false);
+ if (!settings.contains("nPruneSize"))
+ settings.setValue("nPruneSize", 2);
+ // Convert prune size to MB:
+ const uint64_t nPruneSizeMB = settings.value("nPruneSize").toInt() * 1000;
+ if (!m_node.softSetArg("-prune", settings.value("bPrune").toBool() ? std::to_string(nPruneSizeMB) : "0")) {
+ addOverriddenOption("-prune");
+ }
+
if (!settings.contains("nDatabaseCache"))
settings.setValue("nDatabaseCache", (qint64)nDefaultDbCache);
if (!m_node.softSetArg("-dbcache", settings.value("nDatabaseCache").toString().toStdString()))
@@ -281,6 +291,10 @@ QVariant OptionsModel::data(const QModelIndex & index, int role) const
return settings.value("language");
case CoinControlFeatures:
return fCoinControlFeatures;
+ case Prune:
+ return settings.value("bPrune");
+ case PruneSize:
+ return settings.value("nPruneSize");
case DatabaseCache:
return settings.value("nDatabaseCache");
case ThreadsScriptVerif:
@@ -405,6 +419,18 @@ bool OptionsModel::setData(const QModelIndex & index, const QVariant & value, in
settings.setValue("fCoinControlFeatures", fCoinControlFeatures);
Q_EMIT coinControlFeaturesChanged(fCoinControlFeatures);
break;
+ case Prune:
+ if (settings.value("bPrune") != value) {
+ settings.setValue("bPrune", value);
+ setRestartRequired(true);
+ }
+ break;
+ case PruneSize:
+ if (settings.value("nPruneSize") != value) {
+ settings.setValue("nPruneSize", value);
+ setRestartRequired(true);
+ }
+ break;
case DatabaseCache:
if (settings.value("nDatabaseCache") != value) {
settings.setValue("nDatabaseCache", value);
diff --git a/src/qt/optionsmodel.h b/src/qt/optionsmodel.h
index fc1d119a71..2777cbeaf2 100644
--- a/src/qt/optionsmodel.h
+++ b/src/qt/optionsmodel.h
@@ -50,6 +50,8 @@ public:
Language, // QString
CoinControlFeatures, // bool
ThreadsScriptVerif, // int
+ Prune, // bool
+ PruneSize, // int
DatabaseCache, // int
SpendZeroConfChange, // bool
Listen, // bool
diff --git a/src/qt/res/movies/makespinner.sh b/src/qt/res/movies/makespinner.sh
index d0deb1238c..76e36e4f31 100755
--- a/src/qt/res/movies/makespinner.sh
+++ b/src/qt/res/movies/makespinner.sh
@@ -2,6 +2,7 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+export LC_ALL=C
FRAMEDIR=$(dirname $0)
for i in {0..35}
do
diff --git a/src/rest.cpp b/src/rest.cpp
index ffa75c241f..a5f164497d 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -217,7 +217,7 @@ static bool rest_block(HTTPRequest* req,
return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found");
}
- if (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0)
+ if (IsBlockPruned(pblockindex))
return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not available (pruned data)");
if (!ReadBlockFromDisk(block, pblockindex, Params().GetConsensus()))
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 24fb522e60..68279d7ffd 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -6,7 +6,6 @@
#include <rpc/blockchain.h>
#include <amount.h>
-#include <chain.h>
#include <chainparams.h>
#include <checkpoints.h>
#include <coins.h>
@@ -49,17 +48,13 @@ static std::mutex cs_blockchange;
static std::condition_variable cond_blockchange;
static CUpdatedBlock latestblock;
-/* Calculate the difficulty for a given block index,
- * or the block index of the given chain.
+/* Calculate the difficulty for a given block index.
*/
-double GetDifficulty(const CChain& chain, const CBlockIndex* blockindex)
+double GetDifficulty(const CBlockIndex* blockindex)
{
if (blockindex == nullptr)
{
- if (chain.Tip() == nullptr)
- return 1.0;
- else
- blockindex = chain.Tip();
+ return 1.0;
}
int nShift = (blockindex->nBits >> 24) & 0xff;
@@ -80,11 +75,6 @@ double GetDifficulty(const CChain& chain, const CBlockIndex* blockindex)
return dDiff;
}
-double GetDifficulty(const CBlockIndex* blockindex)
-{
- return GetDifficulty(chainActive, blockindex);
-}
-
UniValue blockheaderToJSON(const CBlockIndex* blockindex)
{
AssertLockHeld(cs_main);
@@ -105,6 +95,7 @@ UniValue blockheaderToJSON(const CBlockIndex* blockindex)
result.pushKV("bits", strprintf("%08x", blockindex->nBits));
result.pushKV("difficulty", GetDifficulty(blockindex));
result.pushKV("chainwork", blockindex->nChainWork.GetHex());
+ result.pushKV("nTx", (uint64_t)blockindex->nTx);
if (blockindex->pprev)
result.pushKV("previousblockhash", blockindex->pprev->GetBlockHash().GetHex());
@@ -150,6 +141,7 @@ UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool tx
result.pushKV("bits", strprintf("%08x", block.nBits));
result.pushKV("difficulty", GetDifficulty(blockindex));
result.pushKV("chainwork", blockindex->nChainWork.GetHex());
+ result.pushKV("nTx", (uint64_t)blockindex->nTx);
if (blockindex->pprev)
result.pushKV("previousblockhash", blockindex->pprev->GetBlockHash().GetHex());
@@ -354,7 +346,7 @@ static UniValue getdifficulty(const JSONRPCRequest& request)
);
LOCK(cs_main);
- return GetDifficulty();
+ return GetDifficulty(chainActive.Tip());
}
static std::string EntryDescriptionString()
@@ -704,6 +696,7 @@ static UniValue getblockheader(const JSONRPCRequest& request)
" \"bits\" : \"1d00ffff\", (string) The bits\n"
" \"difficulty\" : x.xxx, (numeric) The difficulty\n"
" \"chainwork\" : \"0000...1f3\" (string) Expected number of hashes required to produce the current chain (in hex)\n"
+ " \"nTx\" : n, (numeric) The number of transactions in the block.\n"
" \"previousblockhash\" : \"hash\", (string) The hash of the previous block\n"
" \"nextblockhash\" : \"hash\", (string) The hash of the next block\n"
"}\n"
@@ -742,7 +735,7 @@ static UniValue getblockheader(const JSONRPCRequest& request)
static CBlock GetBlockChecked(const CBlockIndex* pblockindex)
{
CBlock block;
- if (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0) {
+ if (IsBlockPruned(pblockindex)) {
throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)");
}
@@ -792,6 +785,7 @@ static UniValue getblock(const JSONRPCRequest& request)
" \"bits\" : \"1d00ffff\", (string) The bits\n"
" \"difficulty\" : x.xxx, (numeric) The difficulty\n"
" \"chainwork\" : \"xxxx\", (string) Expected number of hashes required to produce the chain up to this block (in hex)\n"
+ " \"nTx\" : n, (numeric) The number of transactions in the block.\n"
" \"previousblockhash\" : \"hash\", (string) The hash of the previous block\n"
" \"nextblockhash\" : \"hash\" (string) The hash of the next block\n"
"}\n"
@@ -1240,7 +1234,7 @@ UniValue getblockchaininfo(const JSONRPCRequest& request)
obj.pushKV("blocks", (int)chainActive.Height());
obj.pushKV("headers", pindexBestHeader ? pindexBestHeader->nHeight : -1);
obj.pushKV("bestblockhash", chainActive.Tip()->GetBlockHash().GetHex());
- obj.pushKV("difficulty", (double)GetDifficulty());
+ obj.pushKV("difficulty", (double)GetDifficulty(chainActive.Tip()));
obj.pushKV("mediantime", (int64_t)chainActive.Tip()->GetMedianTimePast());
obj.pushKV("verificationprogress", GuessVerificationProgress(Params().TxData(), chainActive.Tip()));
obj.pushKV("initialblockdownload", IsInitialBlockDownload());
diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h
index 960edfd56f..3aa8de2d2b 100644
--- a/src/rpc/blockchain.h
+++ b/src/rpc/blockchain.h
@@ -16,7 +16,7 @@ class UniValue;
* @return A floating point number that is a multiple of the main net minimum
* difficulty (4295032833 hashes).
*/
-double GetDifficulty(const CBlockIndex* blockindex = nullptr);
+double GetDifficulty(const CBlockIndex* blockindex);
/** Callback for when block tip changed. */
void RPCNotifyBlockChange(bool ibd, const CBlockIndex *);
@@ -34,4 +34,3 @@ UniValue mempoolToJSON(bool fVerbose = false);
UniValue blockheaderToJSON(const CBlockIndex* blockindex);
#endif
-
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index bb68f72ccc..0f35fd3770 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -52,7 +52,6 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "listreceivedbylabel", 0, "minconf" },
{ "listreceivedbylabel", 1, "include_empty" },
{ "listreceivedbylabel", 2, "include_watchonly" },
- { "getlabeladdress", 1, "force" },
{ "getbalance", 1, "minconf" },
{ "getbalance", 2, "include_watchonly" },
{ "getblockhash", 0, "height" },
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index b2d857108e..81c4fb040f 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -214,7 +214,7 @@ static UniValue getmininginfo(const JSONRPCRequest& request)
obj.pushKV("blocks", (int)chainActive.Height());
obj.pushKV("currentblockweight", (uint64_t)nLastBlockWeight);
obj.pushKV("currentblocktx", (uint64_t)nLastBlockTx);
- obj.pushKV("difficulty", (double)GetDifficulty());
+ obj.pushKV("difficulty", (double)GetDifficulty(chainActive.Tip()));
obj.pushKV("networkhashps", getnetworkhashps(request));
obj.pushKV("pooledtx", (uint64_t)mempool.size());
obj.pushKV("chain", Params().NetworkIDString());
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 1530d8578b..8fa56e9335 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -475,7 +475,7 @@ static UniValue getnetworkinfo(const JSONRPCRequest& request)
UniValue localAddresses(UniValue::VARR);
{
LOCK(cs_mapLocalHost);
- for (const std::pair<CNetAddr, LocalServiceInfo> &item : mapLocalHost)
+ for (const std::pair<const CNetAddr, LocalServiceInfo> &item : mapLocalHost)
{
UniValue rec(UniValue::VOBJ);
rec.pushKV("address", item.first.ToString());
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index ad2d55afe7..3b3f43edea 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -748,7 +748,7 @@ static UniValue combinerawtransaction(const JSONRPCRequest& request)
}
}
- UpdateTransaction(mergedTx, i, sigdata);
+ UpdateInput(txin, sigdata);
}
return EncodeHexTx(mergedTx);
@@ -882,7 +882,7 @@ UniValue SignTransaction(CMutableTransaction& mtx, const UniValue& prevTxsUnival
}
sigdata = CombineSignatures(prevPubKey, TransactionSignatureChecker(&txConst, i, amount), sigdata, DataFromTransaction(mtx, i));
- UpdateTransaction(mtx, i, sigdata);
+ UpdateInput(txin, sigdata);
ScriptError serror = SCRIPT_ERR_OK;
if (!VerifyScript(txin.scriptSig, prevPubKey, &txin.scriptWitness, STANDARD_SCRIPT_VERIFY_FLAGS, TransactionSignatureChecker(&txConst, i, amount), &serror)) {
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index c5468f633b..6dbfbda029 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -199,12 +199,6 @@ void UpdateInput(CTxIn& input, const SignatureData& data)
input.scriptWitness = data.scriptWitness;
}
-void UpdateTransaction(CMutableTransaction& tx, unsigned int nIn, const SignatureData& data)
-{
- assert(tx.vin.size() > nIn);
- UpdateInput(tx.vin[nIn], data);
-}
-
bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType)
{
assert(nIn < txTo.vin.size());
@@ -213,7 +207,7 @@ bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, C
SignatureData sigdata;
bool ret = ProduceSignature(provider, creator, fromPubKey, sigdata);
- UpdateTransaction(txTo, nIn, sigdata);
+ UpdateInput(txTo.vin.at(nIn), sigdata);
return ret;
}
diff --git a/src/script/sign.h b/src/script/sign.h
index a10366dcd1..8ef0306bfe 100644
--- a/src/script/sign.h
+++ b/src/script/sign.h
@@ -73,7 +73,6 @@ SignatureData CombineSignatures(const CScript& scriptPubKey, const BaseSignature
/** Extract signature data from a transaction, and insert it. */
SignatureData DataFromTransaction(const CMutableTransaction& tx, unsigned int nIn);
-void UpdateTransaction(CMutableTransaction& tx, unsigned int nIn, const SignatureData& data);
void UpdateInput(CTxIn& input, const SignatureData& data);
/* Check whether we know how to sign for an output like this, assuming we
diff --git a/src/script/standard.cpp b/src/script/standard.cpp
index 53fcbe37de..d9269d6147 100644
--- a/src/script/standard.cpp
+++ b/src/script/standard.cpp
@@ -114,6 +114,7 @@ bool Solver(const CScript& scriptPubKey, txnouttype& typeRet, std::vector<std::v
vSolutionsRet.push_back(std::move(witnessprogram));
return true;
}
+ typeRet = TX_NONSTANDARD;
return false;
}
diff --git a/src/test/arith_uint256_tests.cpp b/src/test/arith_uint256_tests.cpp
index 13ec19834a..8644aea371 100644
--- a/src/test/arith_uint256_tests.cpp
+++ b/src/test/arith_uint256_tests.cpp
@@ -198,13 +198,6 @@ BOOST_AUTO_TEST_CASE( shifts ) { // "<<" ">>" "<<=" ">>="
BOOST_AUTO_TEST_CASE( unaryOperators ) // ! ~ -
{
- BOOST_CHECK(!ZeroL);
- BOOST_CHECK(!(!OneL));
- for (unsigned int i = 0; i < 256; ++i)
- BOOST_CHECK(!(!(OneL<<i)));
- BOOST_CHECK(!(!R1L));
- BOOST_CHECK(!(!MaxL));
-
BOOST_CHECK(~ZeroL == MaxL);
unsigned char TmpArray[32];
diff --git a/src/test/blockchain_tests.cpp b/src/test/blockchain_tests.cpp
index 5b8df32158..7d8ae46fb8 100644
--- a/src/test/blockchain_tests.cpp
+++ b/src/test/blockchain_tests.cpp
@@ -1,9 +1,9 @@
#include <boost/test/unit_test.hpp>
-#include "stdlib.h"
+#include <stdlib.h>
-#include "rpc/blockchain.cpp"
-#include "test/test_bitcoin.h"
+#include <rpc/blockchain.h>
+#include <test/test_bitcoin.h>
/* Equality between doubles is imprecise. Comparison should be done
* with a small threshold of tolerance, rather than exact equality.
@@ -22,14 +22,6 @@ static CBlockIndex* CreateBlockIndexWithNbits(uint32_t nbits)
return block_index;
}
-static CChain CreateChainWithNbits(uint32_t nbits)
-{
- CBlockIndex* block_index = CreateBlockIndexWithNbits(nbits);
- CChain chain;
- chain.SetTip(block_index);
- return chain;
-}
-
static void RejectDifficultyMismatch(double difficulty, double expected_difficulty) {
BOOST_CHECK_MESSAGE(
DoubleEquals(difficulty, expected_difficulty, 0.00001),
@@ -43,12 +35,7 @@ static void RejectDifficultyMismatch(double difficulty, double expected_difficul
static void TestDifficulty(uint32_t nbits, double expected_difficulty)
{
CBlockIndex* block_index = CreateBlockIndexWithNbits(nbits);
- /* Since we are passing in block index explicitly,
- * there is no need to set up anything within the chain itself.
- */
- CChain chain;
-
- double difficulty = GetDifficulty(chain, block_index);
+ double difficulty = GetDifficulty(block_index);
delete block_index;
RejectDifficultyMismatch(difficulty, expected_difficulty);
@@ -84,43 +71,8 @@ BOOST_AUTO_TEST_CASE(get_difficulty_for_very_high_target)
// Verify that difficulty is 1.0 for an empty chain.
BOOST_AUTO_TEST_CASE(get_difficulty_for_null_tip)
{
- CChain chain;
- double difficulty = GetDifficulty(chain, nullptr);
+ double difficulty = GetDifficulty(nullptr);
RejectDifficultyMismatch(difficulty, 1.0);
}
-/* Verify that if difficulty is based upon the block index
- * in the chain, if no block index is explicitly specified.
- */
-BOOST_AUTO_TEST_CASE(get_difficulty_for_null_block_index)
-{
- CChain chain = CreateChainWithNbits(0x1df88f6f);
-
- double difficulty = GetDifficulty(chain, nullptr);
- delete chain.Tip();
-
- double expected_difficulty = 0.004023;
-
- RejectDifficultyMismatch(difficulty, expected_difficulty);
-}
-
-/* Verify that difficulty is based upon the explicitly specified
- * block index rather than being taken from the provided chain,
- * when both are present.
- */
-BOOST_AUTO_TEST_CASE(get_difficulty_for_block_index_overrides_tip)
-{
- CChain chain = CreateChainWithNbits(0x1df88f6f);
- /* This block index's nbits should be used
- * instead of the chain's when calculating difficulty.
- */
- CBlockIndex* override_block_index = CreateBlockIndexWithNbits(0x12345678);
-
- double difficulty = GetDifficulty(chain, override_block_index);
- delete chain.Tip();
- delete override_block_index;
-
- RejectDifficultyMismatch(difficulty, 5913134931067755359633408.0);
-}
-
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/DoS_tests.cpp b/src/test/denialofservice_tests.cpp
index 1868aed7dd..e5f914ba8a 100644
--- a/src/test/DoS_tests.cpp
+++ b/src/test/denialofservice_tests.cpp
@@ -42,7 +42,7 @@ static NodeId id = 0;
void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds);
-BOOST_FIXTURE_TEST_SUITE(DoS_tests, TestingSetup)
+BOOST_FIXTURE_TEST_SUITE(denialofservice_tests, TestingSetup)
// Test eviction of an outbound peer whose chain never advances
// Mock a node connection, and use mocktime to simulate a peer
diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp
index 5ca243f42e..0264d29455 100644
--- a/src/test/mempool_tests.cpp
+++ b/src/test/mempool_tests.cpp
@@ -571,4 +571,179 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
SetMockTime(0);
}
+inline CTransactionRef make_tx(std::vector<CAmount>&& output_values, std::vector<CTransactionRef>&& inputs=std::vector<CTransactionRef>(), std::vector<uint32_t>&& input_indices=std::vector<uint32_t>())
+{
+ CMutableTransaction tx = CMutableTransaction();
+ tx.vin.resize(inputs.size());
+ tx.vout.resize(output_values.size());
+ for (size_t i = 0; i < inputs.size(); ++i) {
+ tx.vin[i].prevout.hash = inputs[i]->GetHash();
+ tx.vin[i].prevout.n = input_indices.size() > i ? input_indices[i] : 0;
+ }
+ for (size_t i = 0; i < output_values.size(); ++i) {
+ tx.vout[i].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
+ tx.vout[i].nValue = output_values[i];
+ }
+ return MakeTransactionRef(tx);
+}
+
+
+BOOST_AUTO_TEST_CASE(MempoolAncestryTests)
+{
+ size_t ancestors, descendants;
+
+ CTxMemPool pool;
+ TestMemPoolEntryHelper entry;
+
+ /* Base transaction */
+ //
+ // [tx1]
+ //
+ CTransactionRef tx1 = make_tx(/* output_values */ {10 * COIN});
+ pool.addUnchecked(tx1->GetHash(), entry.Fee(10000LL).FromTx(tx1));
+
+ // Ancestors / descendants should be 1 / 1 (itself / itself)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 1ULL);
+
+ /* Child transaction */
+ //
+ // [tx1].0 <- [tx2]
+ //
+ CTransactionRef tx2 = make_tx(/* output_values */ {495 * CENT, 5 * COIN}, /* inputs */ {tx1});
+ pool.addUnchecked(tx2->GetHash(), entry.Fee(10000LL).FromTx(tx2));
+
+ // Ancestors / descendants should be:
+ // transaction ancestors descendants
+ // ============ =========== ===========
+ // tx1 1 (tx1) 2 (tx1,2)
+ // tx2 2 (tx1,2) 2 (tx1,2)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 2ULL);
+ pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 2ULL);
+
+ /* Grand-child 1 */
+ //
+ // [tx1].0 <- [tx2].0 <- [tx3]
+ //
+ CTransactionRef tx3 = make_tx(/* output_values */ {290 * CENT, 200 * CENT}, /* inputs */ {tx2});
+ pool.addUnchecked(tx3->GetHash(), entry.Fee(10000LL).FromTx(tx3));
+
+ // Ancestors / descendants should be:
+ // transaction ancestors descendants
+ // ============ =========== ===========
+ // tx1 1 (tx1) 3 (tx1,2,3)
+ // tx2 2 (tx1,2) 3 (tx1,2,3)
+ // tx3 3 (tx1,2,3) 3 (tx1,2,3)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 3ULL);
+ pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 3ULL);
+ pool.GetTransactionAncestry(tx3->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 3ULL);
+
+ /* Grand-child 2 */
+ //
+ // [tx1].0 <- [tx2].0 <- [tx3]
+ // |
+ // \---1 <- [tx4]
+ //
+ CTransactionRef tx4 = make_tx(/* output_values */ {290 * CENT, 250 * CENT}, /* inputs */ {tx2}, /* input_indices */ {1});
+ pool.addUnchecked(tx4->GetHash(), entry.Fee(10000LL).FromTx(tx4));
+
+ // Ancestors / descendants should be:
+ // transaction ancestors descendants
+ // ============ =========== ===========
+ // tx1 1 (tx1) 4 (tx1,2,3,4)
+ // tx2 2 (tx1,2) 4 (tx1,2,3,4)
+ // tx3 3 (tx1,2,3) 4 (tx1,2,3,4)
+ // tx4 3 (tx1,2,4) 4 (tx1,2,3,4)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 4ULL);
+ pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 4ULL);
+ pool.GetTransactionAncestry(tx3->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 4ULL);
+ pool.GetTransactionAncestry(tx4->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 4ULL);
+
+ /* Make an alternate branch that is longer and connect it to tx3 */
+ //
+ // [ty1].0 <- [ty2].0 <- [ty3].0 <- [ty4].0 <- [ty5].0
+ // |
+ // [tx1].0 <- [tx2].0 <- [tx3].0 <- [ty6] --->--/
+ // |
+ // \---1 <- [tx4]
+ //
+ CTransactionRef ty1, ty2, ty3, ty4, ty5;
+ CTransactionRef* ty[5] = {&ty1, &ty2, &ty3, &ty4, &ty5};
+ CAmount v = 5 * COIN;
+ for (uint64_t i = 0; i < 5; i++) {
+ CTransactionRef& tyi = *ty[i];
+ tyi = make_tx(/* output_values */ {v}, /* inputs */ i > 0 ? std::vector<CTransactionRef>{*ty[i - 1]} : std::vector<CTransactionRef>{});
+ v -= 50 * CENT;
+ pool.addUnchecked(tyi->GetHash(), entry.Fee(10000LL).FromTx(tyi));
+ pool.GetTransactionAncestry(tyi->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, i+1);
+ BOOST_CHECK_EQUAL(descendants, i+1);
+ }
+ CTransactionRef ty6 = make_tx(/* output_values */ {5 * COIN}, /* inputs */ {tx3, ty5});
+ pool.addUnchecked(ty6->GetHash(), entry.Fee(10000LL).FromTx(ty6));
+
+ // Ancestors / descendants should be:
+ // transaction ancestors descendants
+ // ============ =================== ===========
+ // tx1 1 (tx1) 5 (tx1,2,3,4, ty6)
+ // tx2 2 (tx1,2) 5 (tx1,2,3,4, ty6)
+ // tx3 3 (tx1,2,3) 5 (tx1,2,3,4, ty6)
+ // tx4 3 (tx1,2,4) 5 (tx1,2,3,4, ty6)
+ // ty1 1 (ty1) 6 (ty1,2,3,4,5,6)
+ // ty2 2 (ty1,2) 6 (ty1,2,3,4,5,6)
+ // ty3 3 (ty1,2,3) 6 (ty1,2,3,4,5,6)
+ // ty4 4 (y1234) 6 (ty1,2,3,4,5,6)
+ // ty5 5 (y12345) 6 (ty1,2,3,4,5,6)
+ // ty6 9 (tx123, ty123456) 6 (ty1,2,3,4,5,6)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 5ULL);
+ pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 5ULL);
+ pool.GetTransactionAncestry(tx3->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 5ULL);
+ pool.GetTransactionAncestry(tx4->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 5ULL);
+ pool.GetTransactionAncestry(ty1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty3->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty4->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 4ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty5->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 5ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty6->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 9ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/script_P2SH_tests.cpp b/src/test/script_p2sh_tests.cpp
index 63d211dd97..803a673fab 100644
--- a/src/test/script_P2SH_tests.cpp
+++ b/src/test/script_p2sh_tests.cpp
@@ -46,7 +46,7 @@ Verify(const CScript& scriptSig, const CScript& scriptPubKey, bool fStrict, Scri
}
-BOOST_FIXTURE_TEST_SUITE(script_P2SH_tests, BasicTestingSetup)
+BOOST_FIXTURE_TEST_SUITE(script_p2sh_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(sign)
{
diff --git a/src/test/script_standard_tests.cpp b/src/test/script_standard_tests.cpp
index ff0bf6c66d..7ab0978228 100644
--- a/src/test/script_standard_tests.cpp
+++ b/src/test/script_standard_tests.cpp
@@ -726,6 +726,32 @@ BOOST_AUTO_TEST_CASE(script_standard_IsMine)
BOOST_CHECK(!isInvalid);
}
+ // witness unspendable
+ {
+ CBasicKeyStore keystore;
+ keystore.AddKey(keys[0]);
+
+ scriptPubKey.clear();
+ scriptPubKey << OP_0 << ToByteVector(ParseHex("aabb"));
+
+ result = IsMine(keystore, scriptPubKey, isInvalid);
+ BOOST_CHECK_EQUAL(result, ISMINE_NO);
+ BOOST_CHECK(!isInvalid);
+ }
+
+ // witness unknown
+ {
+ CBasicKeyStore keystore;
+ keystore.AddKey(keys[0]);
+
+ scriptPubKey.clear();
+ scriptPubKey << OP_16 << ToByteVector(ParseHex("aabb"));
+
+ result = IsMine(keystore, scriptPubKey, isInvalid);
+ BOOST_CHECK_EQUAL(result, ISMINE_NO);
+ BOOST_CHECK(!isInvalid);
+ }
+
// Nonstandard
{
CBasicKeyStore keystore;
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index cc72e96eb1..65c5b8ea1d 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -629,7 +629,7 @@ BOOST_AUTO_TEST_CASE(test_witness)
CreateCreditAndSpend(keystore2, scriptMulti, output2, input2, false);
CheckWithFlag(output2, input2, 0, false);
BOOST_CHECK(*output1 == *output2);
- UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
+ UpdateInput(input1.vin[0], CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
// P2SH 2-of-2 multisig
@@ -640,7 +640,7 @@ BOOST_AUTO_TEST_CASE(test_witness)
CheckWithFlag(output2, input2, 0, true);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH, false);
BOOST_CHECK(*output1 == *output2);
- UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
+ UpdateInput(input1.vin[0], CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
@@ -652,7 +652,7 @@ BOOST_AUTO_TEST_CASE(test_witness)
CheckWithFlag(output2, input2, 0, true);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false);
BOOST_CHECK(*output1 == *output2);
- UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
+ UpdateInput(input1.vin[0], CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
@@ -664,7 +664,7 @@ BOOST_AUTO_TEST_CASE(test_witness)
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false);
BOOST_CHECK(*output1 == *output2);
- UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
+ UpdateInput(input1.vin[0], CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
}
diff --git a/src/test/txindex_tests.cpp b/src/test/txindex_tests.cpp
index 14158f2875..be7ee2428b 100644
--- a/src/test/txindex_tests.cpp
+++ b/src/test/txindex_tests.cpp
@@ -15,7 +15,7 @@ BOOST_AUTO_TEST_SUITE(txindex_tests)
BOOST_FIXTURE_TEST_CASE(txindex_initial_sync, TestChain100Setup)
{
- TxIndex txindex(MakeUnique<TxIndexDB>(1 << 20, true));
+ TxIndex txindex(1 << 20, true);
CTransactionRef tx_disk;
uint256 block_hash;
diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp
index 06497667c3..d32d4b267c 100644
--- a/src/test/txvalidationcache_tests.cpp
+++ b/src/test/txvalidationcache_tests.cpp
@@ -102,7 +102,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup)
// should fail.
// Capture this interaction with the upgraded_nop argument: set it when evaluating
// any script flag that is implemented as an upgraded NOP code.
-static void ValidateCheckInputsForAllFlags(CMutableTransaction &tx, uint32_t failing_flags, bool add_to_cache)
+static void ValidateCheckInputsForAllFlags(const CTransaction &tx, uint32_t failing_flags, bool add_to_cache)
{
PrecomputedTransactionData txdata(tx);
// If we add many more flags, this loop can get too expensive, but we can
@@ -315,7 +315,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup)
// Sign
SignatureData sigdata;
ProduceSignature(keystore, MutableTransactionSignatureCreator(&valid_with_witness_tx, 0, 11*CENT, SIGHASH_ALL), spend_tx.vout[1].scriptPubKey, sigdata);
- UpdateTransaction(valid_with_witness_tx, 0, sigdata);
+ UpdateInput(valid_with_witness_tx.vin[0], sigdata);
// This should be valid under all script flags.
ValidateCheckInputsForAllFlags(valid_with_witness_tx, 0, true);
@@ -343,7 +343,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup)
for (int i=0; i<2; ++i) {
SignatureData sigdata;
ProduceSignature(keystore, MutableTransactionSignatureCreator(&tx, i, 11*CENT, SIGHASH_ALL), spend_tx.vout[i].scriptPubKey, sigdata);
- UpdateTransaction(tx, i, sigdata);
+ UpdateInput(tx.vin[i], sigdata);
}
// This should be valid under all script flags
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 333d3596c1..b1d5879c83 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -21,8 +21,6 @@
static const char DB_COIN = 'C';
static const char DB_COINS = 'c';
static const char DB_BLOCK_FILES = 'f';
-static const char DB_TXINDEX = 't';
-static const char DB_TXINDEX_BLOCK = 'T';
static const char DB_BLOCK_INDEX = 'b';
static const char DB_BEST_BLOCK = 'B';
@@ -237,17 +235,6 @@ bool CBlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockF
return WriteBatch(batch, true);
}
-bool CBlockTreeDB::ReadTxIndex(const uint256 &txid, CDiskTxPos &pos) {
- return Read(std::make_pair(DB_TXINDEX, txid), pos);
-}
-
-bool CBlockTreeDB::WriteTxIndex(const std::vector<std::pair<uint256, CDiskTxPos> >&vect) {
- CDBBatch batch(*this);
- for (std::vector<std::pair<uint256,CDiskTxPos> >::const_iterator it=vect.begin(); it!=vect.end(); it++)
- batch.Write(std::make_pair(DB_TXINDEX, it->first), it->second);
- return WriteBatch(batch);
-}
-
bool CBlockTreeDB::WriteFlag(const std::string &name, bool fValue) {
return Write(std::make_pair(DB_FLAG, name), fValue ? '1' : '0');
}
@@ -425,173 +412,3 @@ bool CCoinsViewDB::Upgrade() {
LogPrintf("[%s].\n", ShutdownRequested() ? "CANCELLED" : "DONE");
return !ShutdownRequested();
}
-
-TxIndexDB::TxIndexDB(size_t n_cache_size, bool f_memory, bool f_wipe) :
- CDBWrapper(GetDataDir() / "indexes" / "txindex", n_cache_size, f_memory, f_wipe)
-{}
-
-bool TxIndexDB::ReadTxPos(const uint256 &txid, CDiskTxPos& pos) const
-{
- return Read(std::make_pair(DB_TXINDEX, txid), pos);
-}
-
-bool TxIndexDB::WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_pos)
-{
- CDBBatch batch(*this);
- for (const auto& tuple : v_pos) {
- batch.Write(std::make_pair(DB_TXINDEX, tuple.first), tuple.second);
- }
- return WriteBatch(batch);
-}
-
-bool TxIndexDB::ReadBestBlock(CBlockLocator& locator) const
-{
- bool success = Read(DB_BEST_BLOCK, locator);
- if (!success) {
- locator.SetNull();
- }
- return success;
-}
-
-bool TxIndexDB::WriteBestBlock(const CBlockLocator& locator)
-{
- return Write(DB_BEST_BLOCK, locator);
-}
-
-/*
- * Safely persist a transfer of data from the old txindex database to the new one, and compact the
- * range of keys updated. This is used internally by MigrateData.
- */
-static void WriteTxIndexMigrationBatches(TxIndexDB& newdb, CBlockTreeDB& olddb,
- CDBBatch& batch_newdb, CDBBatch& batch_olddb,
- const std::pair<unsigned char, uint256>& begin_key,
- const std::pair<unsigned char, uint256>& end_key)
-{
- // Sync new DB changes to disk before deleting from old DB.
- newdb.WriteBatch(batch_newdb, /*fSync=*/ true);
- olddb.WriteBatch(batch_olddb);
- olddb.CompactRange(begin_key, end_key);
-
- batch_newdb.Clear();
- batch_olddb.Clear();
-}
-
-bool TxIndexDB::MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator& best_locator)
-{
- // The prior implementation of txindex was always in sync with block index
- // and presence was indicated with a boolean DB flag. If the flag is set,
- // this means the txindex from a previous version is valid and in sync with
- // the chain tip. The first step of the migration is to unset the flag and
- // write the chain hash to a separate key, DB_TXINDEX_BLOCK. After that, the
- // index entries are copied over in batches to the new database. Finally,
- // DB_TXINDEX_BLOCK is erased from the old database and the block hash is
- // written to the new database.
- //
- // Unsetting the boolean flag ensures that if the node is downgraded to a
- // previous version, it will not see a corrupted, partially migrated index
- // -- it will see that the txindex is disabled. When the node is upgraded
- // again, the migration will pick up where it left off and sync to the block
- // with hash DB_TXINDEX_BLOCK.
- bool f_legacy_flag = false;
- block_tree_db.ReadFlag("txindex", f_legacy_flag);
- if (f_legacy_flag) {
- if (!block_tree_db.Write(DB_TXINDEX_BLOCK, best_locator)) {
- return error("%s: cannot write block indicator", __func__);
- }
- if (!block_tree_db.WriteFlag("txindex", false)) {
- return error("%s: cannot write block index db flag", __func__);
- }
- }
-
- CBlockLocator locator;
- if (!block_tree_db.Read(DB_TXINDEX_BLOCK, locator)) {
- return true;
- }
-
- int64_t count = 0;
- LogPrintf("Upgrading txindex database... [0%%]\n");
- uiInterface.ShowProgress(_("Upgrading txindex database"), 0, true);
- int report_done = 0;
- const size_t batch_size = 1 << 24; // 16 MiB
-
- CDBBatch batch_newdb(*this);
- CDBBatch batch_olddb(block_tree_db);
-
- std::pair<unsigned char, uint256> key;
- std::pair<unsigned char, uint256> begin_key{DB_TXINDEX, uint256()};
- std::pair<unsigned char, uint256> prev_key = begin_key;
-
- bool interrupted = false;
- std::unique_ptr<CDBIterator> cursor(block_tree_db.NewIterator());
- for (cursor->Seek(begin_key); cursor->Valid(); cursor->Next()) {
- boost::this_thread::interruption_point();
- if (ShutdownRequested()) {
- interrupted = true;
- break;
- }
-
- if (!cursor->GetKey(key)) {
- return error("%s: cannot get key from valid cursor", __func__);
- }
- if (key.first != DB_TXINDEX) {
- break;
- }
-
- // Log progress every 10%.
- if (++count % 256 == 0) {
- // Since txids are uniformly random and traversed in increasing order, the high 16 bits
- // of the hash can be used to estimate the current progress.
- const uint256& txid = key.second;
- uint32_t high_nibble =
- (static_cast<uint32_t>(*(txid.begin() + 0)) << 8) +
- (static_cast<uint32_t>(*(txid.begin() + 1)) << 0);
- int percentage_done = (int)(high_nibble * 100.0 / 65536.0 + 0.5);
-
- uiInterface.ShowProgress(_("Upgrading txindex database"), percentage_done, true);
- if (report_done < percentage_done/10) {
- LogPrintf("Upgrading txindex database... [%d%%]\n", percentage_done);
- report_done = percentage_done/10;
- }
- }
-
- CDiskTxPos value;
- if (!cursor->GetValue(value)) {
- return error("%s: cannot parse txindex record", __func__);
- }
- batch_newdb.Write(key, value);
- batch_olddb.Erase(key);
-
- if (batch_newdb.SizeEstimate() > batch_size || batch_olddb.SizeEstimate() > batch_size) {
- // NOTE: it's OK to delete the key pointed at by the current DB cursor while iterating
- // because LevelDB iterators are guaranteed to provide a consistent view of the
- // underlying data, like a lightweight snapshot.
- WriteTxIndexMigrationBatches(*this, block_tree_db,
- batch_newdb, batch_olddb,
- prev_key, key);
- prev_key = key;
- }
- }
-
- // If these final DB batches complete the migration, write the best block
- // hash marker to the new database and delete from the old one. This signals
- // that the former is fully caught up to that point in the blockchain and
- // that all txindex entries have been removed from the latter.
- if (!interrupted) {
- batch_olddb.Erase(DB_TXINDEX_BLOCK);
- batch_newdb.Write(DB_BEST_BLOCK, locator);
- }
-
- WriteTxIndexMigrationBatches(*this, block_tree_db,
- batch_newdb, batch_olddb,
- begin_key, key);
-
- if (interrupted) {
- LogPrintf("[CANCELLED].\n");
- return false;
- }
-
- uiInterface.ShowProgress("", 100, false);
-
- LogPrintf("[DONE].\n");
- return true;
-}
diff --git a/src/txdb.h b/src/txdb.h
index 4193f98de1..100adb428d 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -40,31 +40,6 @@ static const int64_t nMaxTxIndexCache = 1024;
//! Max memory allocated to coin DB specific cache (MiB)
static const int64_t nMaxCoinsDBCache = 8;
-struct CDiskTxPos : public CDiskBlockPos
-{
- unsigned int nTxOffset; // after header
-
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITEAS(CDiskBlockPos, *this);
- READWRITE(VARINT(nTxOffset));
- }
-
- CDiskTxPos(const CDiskBlockPos &blockIn, unsigned int nTxOffsetIn) : CDiskBlockPos(blockIn.nFile, blockIn.nPos), nTxOffset(nTxOffsetIn) {
- }
-
- CDiskTxPos() {
- SetNull();
- }
-
- void SetNull() {
- CDiskBlockPos::SetNull();
- nTxOffset = 0;
- }
-};
-
/** CCoinsView backed by the coin database (chainstate/) */
class CCoinsViewDB final : public CCoinsView
{
@@ -118,43 +93,9 @@ public:
bool ReadLastBlockFile(int &nFile);
bool WriteReindexing(bool fReindexing);
bool ReadReindexing(bool &fReindexing);
- bool ReadTxIndex(const uint256 &txid, CDiskTxPos &pos);
- bool WriteTxIndex(const std::vector<std::pair<uint256, CDiskTxPos> > &vect);
bool WriteFlag(const std::string &name, bool fValue);
bool ReadFlag(const std::string &name, bool &fValue);
bool LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex);
};
-/**
- * Access to the txindex database (indexes/txindex/)
- *
- * The database stores a block locator of the chain the database is synced to
- * so that the TxIndex can efficiently determine the point it last stopped at.
- * A locator is used instead of a simple hash of the chain tip because blocks
- * and block index entries may not be flushed to disk until after this database
- * is updated.
- */
-class TxIndexDB : public CDBWrapper
-{
-public:
- explicit TxIndexDB(size_t n_cache_size, bool f_memory = false, bool f_wipe = false);
-
- /// Read the disk location of the transaction data with the given hash. Returns false if the
- /// transaction hash is not indexed.
- bool ReadTxPos(const uint256& txid, CDiskTxPos& pos) const;
-
- /// Write a batch of transaction positions to the DB.
- bool WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_pos);
-
- /// Read block locator of the chain that the txindex is in sync with.
- bool ReadBestBlock(CBlockLocator& locator) const;
-
- /// Write block locator of the chain that the txindex is in sync with.
- bool WriteBestBlock(const CBlockLocator& locator);
-
- /// Migrate txindex data from the block tree DB, where it may be for older nodes that have not
- /// been upgraded yet to the new database.
- bool MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator& best_locator);
-};
-
#endif // BITCOIN_TXDB_H
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index bb585fc075..1c6dba0c9c 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -693,18 +693,18 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const
// Check children against mapNextTx
CTxMemPool::setEntries setChildrenCheck;
auto iter = mapNextTx.lower_bound(COutPoint(it->GetTx().GetHash(), 0));
- int64_t childSizes = 0;
+ uint64_t child_sizes = 0;
for (; iter != mapNextTx.end() && iter->first->hash == it->GetTx().GetHash(); ++iter) {
txiter childit = mapTx.find(iter->second->GetHash());
assert(childit != mapTx.end()); // mapNextTx points to in-mempool transactions
if (setChildrenCheck.insert(childit).second) {
- childSizes += childit->GetTxSize();
+ child_sizes += childit->GetTxSize();
}
}
assert(setChildrenCheck == GetMemPoolChildren(it));
// Also check to make sure size is greater than sum with immediate children.
// just a sanity check, not definitive that this calc is correct...
- assert(it->GetSizeWithDescendants() >= childSizes + it->GetTxSize());
+ assert(it->GetSizeWithDescendants() >= child_sizes + it->GetTxSize());
if (fDependsWait)
waitingOnDependants.push_back(&(*it));
@@ -1055,11 +1055,36 @@ void CTxMemPool::TrimToSize(size_t sizelimit, std::vector<COutPoint>* pvNoSpends
}
}
-bool CTxMemPool::TransactionWithinChainLimit(const uint256& txid, size_t chainLimit) const {
+uint64_t CTxMemPool::CalculateDescendantMaximum(txiter entry) const {
+ // find parent with highest descendant count
+ std::vector<txiter> candidates;
+ setEntries counted;
+ candidates.push_back(entry);
+ uint64_t maximum = 0;
+ while (candidates.size()) {
+ txiter candidate = candidates.back();
+ candidates.pop_back();
+ if (!counted.insert(candidate).second) continue;
+ const setEntries& parents = GetMemPoolParents(candidate);
+ if (parents.size() == 0) {
+ maximum = std::max(maximum, candidate->GetCountWithDescendants());
+ } else {
+ for (txiter i : parents) {
+ candidates.push_back(i);
+ }
+ }
+ }
+ return maximum;
+}
+
+void CTxMemPool::GetTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants) const {
LOCK(cs);
auto it = mapTx.find(txid);
- return it == mapTx.end() || (it->GetCountWithAncestors() < chainLimit &&
- it->GetCountWithDescendants() < chainLimit);
+ ancestors = descendants = 0;
+ if (it != mapTx.end()) {
+ ancestors = it->GetCountWithAncestors();
+ descendants = CalculateDescendantMaximum(it);
+ }
}
SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand(std::numeric_limits<uint64_t>::max())), k1(GetRand(std::numeric_limits<uint64_t>::max())) {}
diff --git a/src/txmempool.h b/src/txmempool.h
index ca7b1cd4be..bda812b42f 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -498,6 +498,7 @@ public:
const setEntries & GetMemPoolParents(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs);
const setEntries & GetMemPoolChildren(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs);
+ uint64_t CalculateDescendantMaximum(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs);
private:
typedef std::map<txiter, setEntries, CompareIteratorByHash> cacheMap;
@@ -619,8 +620,11 @@ public:
/** Expire all transaction (and their dependencies) in the mempool older than time. Return the number of removed transactions. */
int Expire(int64_t time);
- /** Returns false if the transaction is in the mempool and not within the chain limit specified. */
- bool TransactionWithinChainLimit(const uint256& txid, size_t chainLimit) const;
+ /**
+ * Calculate the ancestor and descendant count for the given transaction.
+ * The counts include the transaction itself.
+ */
+ void GetTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants) const;
unsigned long size()
{
diff --git a/src/util.cpp b/src/util.cpp
index 34483d95b0..ab262b4063 100644
--- a/src/util.cpp
+++ b/src/util.cpp
@@ -459,9 +459,9 @@ bool ArgsManager::ParseParameters(int argc, const char* const argv[], std::strin
if (it != m_override_args.end()) {
if (it->second.size() > 0) {
for (const auto& ic : it->second) {
- fprintf(stderr, "warning: -includeconf cannot be used from commandline; ignoring -includeconf=%s\n", ic.c_str());
+ error += "-includeconf cannot be used from commandline; -includeconf=" + ic + "\n";
}
- m_override_args.erase(it);
+ return false;
}
}
return true;
@@ -585,6 +585,13 @@ void ArgsManager::AddArg(const std::string& name, const std::string& help, const
assert(ret.second); // Make sure an insertion actually happened
}
+void ArgsManager::AddHiddenArgs(const std::vector<std::string>& names)
+{
+ for (const std::string& name : names) {
+ AddArg(name, "", false, OptionsCategory::HIDDEN);
+ }
+}
+
std::string ArgsManager::GetHelpMessage()
{
const bool show_debug = gArgs.GetBoolArg("-help-debug", false);
@@ -849,11 +856,12 @@ bool ArgsManager::ReadConfigFiles(std::string& error, bool ignore_invalid_keys)
// if there is an -includeconf in the override args, but it is empty, that means the user
// passed '-noincludeconf' on the command line, in which case we should not include anything
if (m_override_args.count("-includeconf") == 0) {
+ std::string chain_id = GetChainName();
std::vector<std::string> includeconf(GetArgs("-includeconf"));
{
// We haven't set m_network yet (that happens in SelectParams()), so manually check
// for network.includeconf args.
- std::vector<std::string> includeconf_net(GetArgs(std::string("-") + GetChainName() + ".includeconf"));
+ std::vector<std::string> includeconf_net(GetArgs(std::string("-") + chain_id + ".includeconf"));
includeconf.insert(includeconf.end(), includeconf_net.begin(), includeconf_net.end());
}
@@ -862,7 +870,7 @@ bool ArgsManager::ReadConfigFiles(std::string& error, bool ignore_invalid_keys)
{
LOCK(cs_args);
m_config_args.erase("-includeconf");
- m_config_args.erase(std::string("-") + GetChainName() + ".includeconf");
+ m_config_args.erase(std::string("-") + chain_id + ".includeconf");
}
for (const std::string& to_include : includeconf) {
@@ -873,15 +881,22 @@ bool ArgsManager::ReadConfigFiles(std::string& error, bool ignore_invalid_keys)
}
LogPrintf("Included configuration file %s\n", to_include.c_str());
} else {
- fprintf(stderr, "Failed to include configuration file %s\n", to_include.c_str());
+ error = "Failed to include configuration file " + to_include;
+ return false;
}
}
// Warn about recursive -includeconf
includeconf = GetArgs("-includeconf");
{
- std::vector<std::string> includeconf_net(GetArgs(std::string("-") + GetChainName() + ".includeconf"));
+ std::vector<std::string> includeconf_net(GetArgs(std::string("-") + chain_id + ".includeconf"));
includeconf.insert(includeconf.end(), includeconf_net.begin(), includeconf_net.end());
+ std::string chain_id_final = GetChainName();
+ if (chain_id_final != chain_id) {
+ // Also warn about recursive includeconf for the chain that was specified in one of the includeconfs
+ includeconf_net = GetArgs(std::string("-") + chain_id_final + ".includeconf");
+ includeconf.insert(includeconf.end(), includeconf_net.begin(), includeconf_net.end());
+ }
}
for (const std::string& to_include : includeconf) {
fprintf(stderr, "warning: -includeconf cannot be used from included files; ignoring -includeconf=%s\n", to_include.c_str());
diff --git a/src/util.h b/src/util.h
index efd8a4bd9d..8094d72d6b 100644
--- a/src/util.h
+++ b/src/util.h
@@ -264,6 +264,11 @@ public:
void AddArg(const std::string& name, const std::string& help, const bool debug_only, const OptionsCategory& cat);
/**
+ * Add many hidden arguments
+ */
+ void AddHiddenArgs(const std::vector<std::string>& args);
+
+ /**
* Clear available arguments
*/
void ClearArgs() { m_available_args.clear(); }
diff --git a/src/validation.cpp b/src/validation.cpp
index dbfbfc16a0..baf083b153 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -205,7 +205,7 @@ private:
void InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state);
CBlockIndex* FindMostWorkChain();
- bool ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBlockIndex *pindexNew, const CDiskBlockPos& pos, const Consensus::Params& consensusParams);
+ void ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const CDiskBlockPos& pos, const Consensus::Params& consensusParams);
bool RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs, const CChainParams& params);
@@ -577,15 +577,9 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
if (tx.IsCoinBase())
return state.DoS(100, false, REJECT_INVALID, "coinbase");
- // Reject transactions with witness before segregated witness activates (override with -prematurewitness)
- bool witnessEnabled = IsWitnessEnabled(chainActive.Tip(), chainparams.GetConsensus());
- if (!gArgs.GetBoolArg("-prematurewitness", false) && tx.HasWitness() && !witnessEnabled) {
- return state.DoS(0, false, REJECT_NONSTANDARD, "no-witness-yet", true);
- }
-
// Rather not work on nonstandard transactions (unless -testnet/-regtest)
std::string reason;
- if (fRequireStandard && !IsStandardTx(tx, reason, witnessEnabled))
+ if (fRequireStandard && !IsStandardTx(tx, reason))
return state.DoS(0, false, REJECT_NONSTANDARD, reason);
// Do not work on transactions that are too small.
@@ -2704,6 +2698,9 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams&
// Block until the validation queue drains. This should largely
// never happen in normal operation, however may happen during
// reindex, causing memory blowup if we run too far ahead.
+ // Note that if a validationinterface callback ends up calling
+ // ActivateBestChain this may lead to a deadlock! We should
+ // probably have a DEBUG_LOCKORDER test for this in the future.
SyncWithValidationInterfaceQueue();
}
@@ -2956,7 +2953,7 @@ CBlockIndex* CChainState::AddToBlockIndex(const CBlockHeader& block)
}
/** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
-bool CChainState::ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBlockIndex *pindexNew, const CDiskBlockPos& pos, const Consensus::Params& consensusParams)
+void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const CDiskBlockPos& pos, const Consensus::Params& consensusParams)
{
pindexNew->nTx = block.vtx.size();
pindexNew->nChainTx = 0;
@@ -3000,8 +2997,6 @@ bool CChainState::ReceivedBlockTransactions(const CBlock &block, CValidationStat
mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
}
}
-
- return true;
}
static bool FindBlockPos(CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false)
@@ -3536,8 +3531,7 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali
state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
return false;
}
- if (!ReceivedBlockTransactions(block, state, pindex, blockPos, chainparams.GetConsensus()))
- return error("AcceptBlock(): ReceivedBlockTransactions failed");
+ ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
} catch (const std::runtime_error& e) {
return AbortNode(state, std::string("System error: ") + e.what());
}
@@ -3840,7 +3834,7 @@ bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlo
// Calculate nChainWork
std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
vSortedByHeight.reserve(mapBlockIndex.size());
- for (const std::pair<uint256, CBlockIndex*>& item : mapBlockIndex)
+ for (const std::pair<const uint256, CBlockIndex*>& item : mapBlockIndex)
{
CBlockIndex* pindex = item.second;
vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
@@ -3907,7 +3901,7 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams)
// Check presence of blk files
LogPrintf("Checking all blk files are present...\n");
std::set<int> setBlkDataFiles;
- for (const std::pair<uint256, CBlockIndex*>& item : mapBlockIndex)
+ for (const std::pair<const uint256, CBlockIndex*>& item : mapBlockIndex)
{
CBlockIndex* pindex = item.second;
if (pindex->nStatus & BLOCK_HAVE_DATA) {
@@ -4006,7 +4000,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
reportDone = percentageDone/10;
}
uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone, false);
- if (pindex->nHeight < chainActive.Height()-nCheckDepth)
+ if (pindex->nHeight <= chainActive.Height()-nCheckDepth)
break;
if (fPruneMode && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
// If pruning, only go back as far as we have data.
@@ -4346,9 +4340,7 @@ bool CChainState::LoadGenesisBlock(const CChainParams& chainparams)
if (blockPos.IsNull())
return error("%s: writing genesis block to disk failed", __func__);
CBlockIndex *pindex = AddToBlockIndex(block);
- CValidationState state;
- if (!ReceivedBlockTransactions(block, state, pindex, blockPos, chainparams.GetConsensus()))
- return error("%s: genesis block not accepted (%s)", __func__, FormatStateMessage(state));
+ ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
} catch (const std::runtime_error& e) {
return error("%s: failed to write genesis block: %s", __func__, e.what());
}
diff --git a/src/validation.h b/src/validation.h
index b5ab10786a..b9c2f6c023 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -233,7 +233,8 @@ static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES = 550 * 1024 * 1024;
* Note that we guarantee that either the proof-of-work is valid on pblock, or
* (and possibly also) BlockChecked will have been called.
*
- * Call without cs_main held.
+ * May not be called with cs_main held. May not be called in a
+ * validationinterface callback.
*
* @param[in] pblock The block we want to process.
* @param[in] fForceProcessing Process this block even if unrequested; used for non-network block sources and whitelisted peers.
@@ -245,7 +246,8 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
/**
* Process incoming block headers.
*
- * Call without cs_main held.
+ * May not be called with cs_main held. May not be called in a
+ * validationinterface callback.
*
* @param[in] block The block headers themselves
* @param[out] state This may be set to an Error state if any error occurred processing them
@@ -278,7 +280,12 @@ void ThreadScriptCheck();
bool IsInitialBlockDownload();
/** Retrieve a transaction (from memory pool, or from disk, if possible) */
bool GetTransaction(const uint256& hash, CTransactionRef& tx, const Consensus::Params& params, uint256& hashBlock, bool fAllowSlow = false, CBlockIndex* blockIndex = nullptr);
-/** Find the best known block, and make it the tip of the block chain */
+/**
+ * Find the best known block, and make it the tip of the block chain
+ *
+ * May not be called with cs_main held. May not be called in a
+ * validationinterface callback.
+ */
bool ActivateBestChain(CValidationState& state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock = std::shared_ptr<const CBlock>());
CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams);
@@ -445,7 +452,11 @@ inline CBlockIndex* LookupBlockIndex(const uint256& hash)
/** Find the last common block between the parameter chain and a locator. */
CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator);
-/** Mark a block as precious and reorganize. */
+/** Mark a block as precious and reorganize.
+ *
+ * May not be called with cs_main held. May not be called in a
+ * validationinterface callback.
+ */
bool PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex *pindex);
/** Mark a block as invalid. */
@@ -497,4 +508,10 @@ bool DumpMempool();
/** Load the mempool from disk. */
bool LoadMempool();
+//! Check whether the block associated with this index entry is pruned or not.
+inline bool IsBlockPruned(const CBlockIndex* pblockindex)
+{
+ return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
+}
+
#endif // BITCOIN_VALIDATION_H
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 456f08bc14..fd9e23cb64 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -120,7 +120,7 @@ static void WalletTxToJSON(const CWalletTx& wtx, UniValue& entry)
}
entry.pushKV("bip125-replaceable", rbfStatus);
- for (const std::pair<std::string, std::string>& item : wtx.mapValue)
+ for (const std::pair<const std::string, std::string>& item : wtx.mapValue)
entry.pushKV(item.first, item.second);
}
@@ -198,7 +198,7 @@ CTxDestination GetLabelDestination(CWallet* const pwallet, const std::string& la
return dest;
}
-static UniValue getlabeladdress(const JSONRPCRequest& request)
+static UniValue getaccountaddress(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
CWallet* const pwallet = wallet.get();
@@ -207,53 +207,36 @@ static UniValue getlabeladdress(const JSONRPCRequest& request)
return NullUniValue;
}
- if (!IsDeprecatedRPCEnabled("accounts") && request.strMethod == "getaccountaddress") {
+ if (!IsDeprecatedRPCEnabled("accounts")) {
if (request.fHelp) {
throw std::runtime_error("getaccountaddress (Deprecated, will be removed in V0.18. To use this command, start bitcoind with -deprecatedrpc=accounts)");
}
throw JSONRPCError(RPC_METHOD_DEPRECATED, "getaccountaddress is deprecated and will be removed in V0.18. To use this command, start bitcoind with -deprecatedrpc=accounts.");
}
- if (request.fHelp || request.params.size() < 1 || request.params.size() > 2)
+ if (request.fHelp || request.params.size() != 1)
throw std::runtime_error(
- "getlabeladdress \"label\" ( force ) \n"
- "\nReturns the default receiving address for this label. This will reset to a fresh address once there's a transaction that spends to it.\n"
+ "getaccountaddress \"account\"\n"
+ "\n\nDEPRECATED. Returns the current Bitcoin address for receiving payments to this account.\n"
"\nArguments:\n"
- "1. \"label\" (string, required) The label for the address. It can also be set to the empty string \"\" to represent the default label.\n"
- "2. \"force\" (bool, optional) Whether the label should be created if it does not yet exist. If False, the RPC will return an error if called with a label that doesn't exist.\n"
- " Defaults to false (unless the getaccountaddress method alias is being called, in which case defaults to true for backwards compatibility).\n"
+ "1. \"account\" (string, required) The account for the address. It can also be set to the empty string \"\" to represent the default account. The account does not need to exist, it will be created and a new address created if there is no account by the given name.\n"
"\nResult:\n"
- "\"address\" (string) The current receiving address for the label.\n"
+ "\"address\" (string) The account bitcoin address\n"
"\nExamples:\n"
- + HelpExampleCli("getlabeladdress", "")
- + HelpExampleCli("getlabeladdress", "\"\"")
- + HelpExampleCli("getlabeladdress", "\"mylabel\"")
- + HelpExampleRpc("getlabeladdress", "\"mylabel\"")
+ + HelpExampleCli("getaccountaddress", "")
+ + HelpExampleCli("getaccountaddress", "\"\"")
+ + HelpExampleCli("getaccountaddress", "\"myaccount\"")
+ + HelpExampleRpc("getaccountaddress", "\"myaccount\"")
);
LOCK2(cs_main, pwallet->cs_wallet);
- // Parse the label first so we don't generate a key if there's an error
- std::string label = LabelFromValue(request.params[0]);
- bool force = request.strMethod == "getaccountaddress";
- if (!request.params[1].isNull()) {
- force = request.params[1].get_bool();
- }
-
- bool label_found = false;
- for (const std::pair<CTxDestination, CAddressBookData>& item : pwallet->mapAddressBook) {
- if (item.second.name == label) {
- label_found = true;
- break;
- }
- }
- if (!force && !label_found) {
- throw JSONRPCError(RPC_WALLET_INVALID_LABEL_NAME, std::string("No addresses with label " + label));
- }
+ // Parse the account first so we don't generate a key if there's an error
+ std::string account = LabelFromValue(request.params[0]);
UniValue ret(UniValue::VSTR);
- ret = EncodeDestination(GetLabelDestination(pwallet, label));
+ ret = EncodeDestination(GetLabelDestination(pwallet, account));
return ret;
}
@@ -343,23 +326,33 @@ static UniValue setlabel(const JSONRPCRequest& request)
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid Bitcoin address");
}
+ std::string old_label = pwallet->mapAddressBook[dest].name;
std::string label = LabelFromValue(request.params[1]);
if (IsMine(*pwallet, dest)) {
- // Detect when changing the label of an address that is the receiving address of another label:
- // If so, delete the account record for it. Labels, unlike addresses, can be deleted,
- // and if we wouldn't do this, the record would stick around forever.
- if (pwallet->mapAddressBook.count(dest)) {
- std::string old_label = pwallet->mapAddressBook[dest].name;
- if (old_label != label && dest == GetLabelDestination(pwallet, old_label)) {
- pwallet->DeleteLabel(old_label);
- }
- }
pwallet->SetAddressBook(dest, label, "receive");
+ if (request.strMethod == "setaccount" && old_label != label && dest == GetLabelDestination(pwallet, old_label)) {
+ // for setaccount, call GetLabelDestination so a new receive address is created for the old account
+ GetLabelDestination(pwallet, old_label, true);
+ }
} else {
pwallet->SetAddressBook(dest, label, "send");
}
+ // Detect when there are no addresses using this label.
+ // If so, delete the account record for it. Labels, unlike addresses, can be deleted,
+ // and if we wouldn't do this, the record would stick around forever.
+ bool found_address = false;
+ for (const std::pair<const CTxDestination, CAddressBookData>& item : pwallet->mapAddressBook) {
+ if (item.second.name == label) {
+ found_address = true;
+ break;
+ }
+ }
+ if (!found_address) {
+ pwallet->DeleteLabel(old_label);
+ }
+
return NullUniValue;
}
@@ -447,7 +440,7 @@ static UniValue getaddressesbyaccount(const JSONRPCRequest& request)
// Find all addresses that have the given account
UniValue ret(UniValue::VARR);
- for (const std::pair<CTxDestination, CAddressBookData>& item : pwallet->mapAddressBook) {
+ for (const std::pair<const CTxDestination, CAddressBookData>& item : pwallet->mapAddressBook) {
const CTxDestination& dest = item.first;
const std::string& strName = item.second.name;
if (strName == strAccount) {
@@ -760,7 +753,7 @@ static UniValue getreceivedbyaddress(const JSONRPCRequest& request)
// Tally
CAmount nAmount = 0;
- for (const std::pair<uint256, CWalletTx>& pairWtx : pwallet->mapWallet) {
+ for (const std::pair<const uint256, CWalletTx>& pairWtx : pwallet->mapWallet) {
const CWalletTx& wtx = pairWtx.second;
if (wtx.IsCoinBase() || !CheckFinalTx(*wtx.tx))
continue;
@@ -828,7 +821,7 @@ static UniValue getreceivedbylabel(const JSONRPCRequest& request)
// Tally
CAmount nAmount = 0;
- for (const std::pair<uint256, CWalletTx>& pairWtx : pwallet->mapWallet) {
+ for (const std::pair<const uint256, CWalletTx>& pairWtx : pwallet->mapWallet) {
const CWalletTx& wtx = pairWtx.second;
if (wtx.IsCoinBase() || !CheckFinalTx(*wtx.tx))
continue;
@@ -1457,13 +1450,6 @@ static UniValue addwitnessaddress(const JSONRPCRequest& request)
"Projects should transition to using the address_type argument of getnewaddress, or option -addresstype=[bech32|p2sh-segwit] instead.\n");
}
- {
- LOCK(cs_main);
- if (!IsWitnessEnabled(chainActive.Tip(), Params().GetConsensus()) && !gArgs.GetBoolArg("-walletprematurewitness", false)) {
- throw JSONRPCError(RPC_WALLET_ERROR, "Segregated witness not enabled on network");
- }
- }
-
CTxDestination dest = DecodeDestination(request.params[0].get_str());
if (!IsValidDestination(dest)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid Bitcoin address");
@@ -1541,7 +1527,7 @@ static UniValue ListReceived(CWallet * const pwallet, const UniValue& params, bo
// Tally
std::map<CTxDestination, tallyitem> mapTally;
- for (const std::pair<uint256, CWalletTx>& pairWtx : pwallet->mapWallet) {
+ for (const std::pair<const uint256, CWalletTx>& pairWtx : pwallet->mapWallet) {
const CWalletTx& wtx = pairWtx.second;
if (wtx.IsCoinBase() || !CheckFinalTx(*wtx.tx))
@@ -2120,13 +2106,13 @@ static UniValue listaccounts(const JSONRPCRequest& request)
includeWatchonly = includeWatchonly | ISMINE_WATCH_ONLY;
std::map<std::string, CAmount> mapAccountBalances;
- for (const std::pair<CTxDestination, CAddressBookData>& entry : pwallet->mapAddressBook) {
+ for (const std::pair<const CTxDestination, CAddressBookData>& entry : pwallet->mapAddressBook) {
if (IsMine(*pwallet, entry.first) & includeWatchonly) { // This address belongs to me
mapAccountBalances[entry.second.name] = 0;
}
}
- for (const std::pair<uint256, CWalletTx>& pairWtx : pwallet->mapWallet) {
+ for (const std::pair<const uint256, CWalletTx>& pairWtx : pwallet->mapWallet) {
const CWalletTx& wtx = pairWtx.second;
CAmount nFee;
std::string strSentAccount;
@@ -2155,7 +2141,7 @@ static UniValue listaccounts(const JSONRPCRequest& request)
mapAccountBalances[entry.strAccount] += entry.nCreditDebit;
UniValue ret(UniValue::VOBJ);
- for (const std::pair<std::string, CAmount>& accountBalance : mapAccountBalances) {
+ for (const std::pair<const std::string, CAmount>& accountBalance : mapAccountBalances) {
ret.pushKV(accountBalance.first, ValueFromAmount(accountBalance.second));
}
return ret;
@@ -2264,7 +2250,7 @@ static UniValue listsinceblock(const JSONRPCRequest& request)
UniValue transactions(UniValue::VARR);
- for (const std::pair<uint256, CWalletTx>& pairWtx : pwallet->mapWallet) {
+ for (const std::pair<const uint256, CWalletTx>& pairWtx : pwallet->mapWallet) {
CWalletTx tx = pairWtx.second;
if (depth == -1 || tx.GetDepthInMainChain() < depth) {
@@ -4201,7 +4187,7 @@ static UniValue getaddressesbylabel(const JSONRPCRequest& request)
// Find all addresses that have the given label
UniValue ret(UniValue::VOBJ);
- for (const std::pair<CTxDestination, CAddressBookData>& item : pwallet->mapAddressBook) {
+ for (const std::pair<const CTxDestination, CAddressBookData>& item : pwallet->mapAddressBook) {
if (item.second.name == label) {
ret.pushKV(EncodeDestination(item.first), AddressBookDataToJSON(item.second, false));
}
@@ -4254,7 +4240,7 @@ static UniValue listlabels(const JSONRPCRequest& request)
// Add to a set to sort by label name, then insert into Univalue array
std::set<std::string> label_set;
- for (const std::pair<CTxDestination, CAddressBookData>& entry : pwallet->mapAddressBook) {
+ for (const std::pair<const CTxDestination, CAddressBookData>& entry : pwallet->mapAddressBook) {
if (purpose.empty() || entry.second.purpose == purpose) {
label_set.insert(entry.second.name);
}
@@ -4404,7 +4390,7 @@ static const CRPCCommand commands[] =
{ "wallet", "sethdseed", &sethdseed, {"newkeypool","seed"} },
/** Account functions (deprecated) */
- { "wallet", "getaccountaddress", &getlabeladdress, {"account"} },
+ { "wallet", "getaccountaddress", &getaccountaddress, {"account"} },
{ "wallet", "getaccount", &getaccount, {"address"} },
{ "wallet", "getaddressesbyaccount", &getaddressesbyaccount, {"account"} },
{ "wallet", "getreceivedbyaccount", &getreceivedbylabel, {"account","minconf"} },
@@ -4414,7 +4400,6 @@ static const CRPCCommand commands[] =
{ "wallet", "move", &movecmd, {"fromaccount","toaccount","amount","minconf","comment"} },
/** Label functions (to replace non-balance account functions) */
- { "wallet", "getlabeladdress", &getlabeladdress, {"label","force"} },
{ "wallet", "getaddressesbylabel", &getaddressesbylabel, {"label"} },
{ "wallet", "getreceivedbylabel", &getreceivedbylabel, {"label","minconf"} },
{ "wallet", "listlabels", &listlabels, {"purpose"} },
diff --git a/src/wallet/test/coinselector_tests.cpp b/src/wallet/test/coinselector_tests.cpp
index e90370cf06..d90be33000 100644
--- a/src/wallet/test/coinselector_tests.cpp
+++ b/src/wallet/test/coinselector_tests.cpp
@@ -2,14 +2,14 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include "wallet/wallet.h"
-#include "wallet/coinselection.h"
-#include "wallet/coincontrol.h"
-#include "amount.h"
-#include "primitives/transaction.h"
-#include "random.h"
-#include "test/test_bitcoin.h"
-#include "wallet/test/wallet_test_fixture.h"
+#include <wallet/wallet.h>
+#include <wallet/coinselection.h>
+#include <wallet/coincontrol.h>
+#include <amount.h>
+#include <primitives/transaction.h>
+#include <random.h>
+#include <test/test_bitcoin.h>
+#include <wallet/test/wallet_test_fixture.h>
#include <boost/test/unit_test.hpp>
#include <random>
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 3987e8e70a..014a59ce2b 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -923,11 +923,10 @@ bool CWallet::AddToWallet(const CWalletTx& wtxIn, bool fFlushOnClose)
CWalletTx& wtx = (*ret.first).second;
wtx.BindWallet(this);
bool fInsertedNew = ret.second;
- if (fInsertedNew)
- {
+ if (fInsertedNew) {
wtx.nTimeReceived = GetAdjustedTime();
wtx.nOrderPos = IncOrderPosNext(&batch);
- wtxOrdered.insert(std::make_pair(wtx.nOrderPos, TxPair(&wtx, nullptr)));
+ wtx.m_it_wtxOrdered = wtxOrdered.insert(std::make_pair(wtx.nOrderPos, TxPair(&wtx, nullptr)));
wtx.nTimeSmart = ComputeTimeSmart(wtx);
AddToSpends(hash);
}
@@ -998,9 +997,12 @@ bool CWallet::AddToWallet(const CWalletTx& wtxIn, bool fFlushOnClose)
bool CWallet::LoadToWallet(const CWalletTx& wtxIn)
{
uint256 hash = wtxIn.GetHash();
- CWalletTx& wtx = mapWallet.emplace(hash, wtxIn).first->second;
+ const auto& ins = mapWallet.emplace(hash, wtxIn);
+ CWalletTx& wtx = ins.first->second;
wtx.BindWallet(this);
- wtxOrdered.insert(std::make_pair(wtx.nOrderPos, TxPair(&wtx, nullptr)));
+ if (/* insertion took place */ ins.second) {
+ wtx.m_it_wtxOrdered = wtxOrdered.insert(std::make_pair(wtx.nOrderPos, TxPair(&wtx, nullptr)));
+ }
AddToSpends(hash);
for (const CTxIn& txin : wtx.tx->vin) {
auto it = mapWallet.find(txin.prevout.hash);
@@ -2469,8 +2471,11 @@ bool CWallet::OutputEligibleForSpending(const COutput& output, const CoinEligibi
if (output.nDepth < (output.tx->IsFromMe(ISMINE_ALL) ? eligibility_filter.conf_mine : eligibility_filter.conf_theirs))
return false;
- if (!mempool.TransactionWithinChainLimit(output.tx->GetHash(), eligibility_filter.max_ancestors))
+ size_t ancestors, descendants;
+ mempool.GetTransactionAncestry(output.tx->GetHash(), ancestors, descendants);
+ if (ancestors > eligibility_filter.max_ancestors || descendants > eligibility_filter.max_descendants) {
return false;
+ }
return true;
}
@@ -2582,16 +2587,17 @@ bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAm
++it;
}
- size_t nMaxChainLength = std::min(gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT), gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT));
+ size_t max_ancestors = (size_t)std::max<int64_t>(1, gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT));
+ size_t max_descendants = (size_t)std::max<int64_t>(1, gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT));
bool fRejectLongChains = gArgs.GetBoolArg("-walletrejectlongchains", DEFAULT_WALLET_REJECT_LONG_CHAINS);
bool res = nTargetValue <= nValueFromPresetInputs ||
SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(1, 6, 0), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used) ||
SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(1, 1, 0), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used) ||
(m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, 2), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
- (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, std::min((size_t)4, nMaxChainLength/3)), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
- (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, nMaxChainLength/2), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
- (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, nMaxChainLength), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
+ (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, std::min((size_t)4, max_ancestors/3), std::min((size_t)4, max_descendants/3)), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
+ (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, max_ancestors/2, max_descendants/2), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
+ (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, max_ancestors-1, max_descendants-1), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
(m_spend_zero_conf_change && !fRejectLongChains && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, std::numeric_limits<uint64_t>::max()), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used));
// because SelectCoinsMinConf clears the setCoinsRet, we now add the possible inputs to the coinset
@@ -2609,7 +2615,7 @@ bool CWallet::SignTransaction(CMutableTransaction &tx)
// sign the new tx
int nIn = 0;
- for (const auto& input : tx.vin) {
+ for (auto& input : tx.vin) {
std::map<uint256, CWalletTx>::const_iterator mi = mapWallet.find(input.prevout.hash);
if(mi == mapWallet.end() || input.prevout.n >= mi->second.tx->vout.size()) {
return false;
@@ -2620,7 +2626,7 @@ bool CWallet::SignTransaction(CMutableTransaction &tx)
if (!ProduceSignature(*this, MutableTransactionSignatureCreator(&tx, nIn, amount, SIGHASH_ALL), scriptPubKey, sigdata)) {
return false;
}
- UpdateTransaction(tx, nIn, sigdata);
+ UpdateInput(input, sigdata);
nIn++;
}
return true;
@@ -3050,7 +3056,7 @@ bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CTransac
strFailReason = _("Signing transaction failed");
return false;
} else {
- UpdateTransaction(txNew, nIn, sigdata);
+ UpdateInput(txNew.vin.at(nIn), sigdata);
}
nIn++;
@@ -3206,8 +3212,11 @@ DBErrors CWallet::ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256
{
AssertLockHeld(cs_wallet); // mapWallet
DBErrors nZapSelectTxRet = WalletBatch(*database,"cr+").ZapSelectTx(vHashIn, vHashOut);
- for (uint256 hash : vHashOut)
- mapWallet.erase(hash);
+ for (uint256 hash : vHashOut) {
+ const auto& it = mapWallet.find(hash);
+ wtxOrdered.erase(it->second.m_it_wtxOrdered);
+ mapWallet.erase(it);
+ }
if (nZapSelectTxRet == DBErrors::NEED_REWRITE)
{
@@ -3280,7 +3289,7 @@ bool CWallet::DelAddressBook(const CTxDestination& address)
// Delete destdata tuples associated with address
std::string strAddress = EncodeDestination(address);
- for (const std::pair<std::string, std::string> &item : mapAddressBook[address].destdata)
+ for (const std::pair<const std::string, std::string> &item : mapAddressBook[address].destdata)
{
WalletBatch(*database).EraseDestData(strAddress, item.first);
}
@@ -3681,7 +3690,7 @@ std::set<CTxDestination> CWallet::GetLabelAddresses(const std::string& label) co
{
LOCK(cs_wallet);
std::set<CTxDestination> result;
- for (const std::pair<CTxDestination, CAddressBookData>& item : mapAddressBook)
+ for (const std::pair<const CTxDestination, CAddressBookData>& item : mapAddressBook)
{
const CTxDestination& address = item.first;
const std::string& strName = item.second.name;
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index f1761b0fcf..aacbdf81d6 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -339,6 +339,7 @@ public:
char fFromMe;
std::string strFromAccount;
int64_t nOrderPos; //!< position in ordered transaction list
+ std::multimap<int64_t, std::pair<CWalletTx*, CAccountingEntry*>>::const_iterator m_it_wtxOrdered;
// memory only
mutable bool fDebitCached;
@@ -662,8 +663,10 @@ struct CoinEligibilityFilter
const int conf_mine;
const int conf_theirs;
const uint64_t max_ancestors;
+ const uint64_t max_descendants;
- CoinEligibilityFilter(int conf_mine, int conf_theirs, uint64_t max_ancestors) : conf_mine(conf_mine), conf_theirs(conf_theirs), max_ancestors(max_ancestors) {}
+ CoinEligibilityFilter(int conf_mine, int conf_theirs, uint64_t max_ancestors) : conf_mine(conf_mine), conf_theirs(conf_theirs), max_ancestors(max_ancestors), max_descendants(max_ancestors) {}
+ CoinEligibilityFilter(int conf_mine, int conf_theirs, uint64_t max_ancestors, uint64_t max_descendants) : conf_mine(conf_mine), conf_theirs(conf_theirs), max_ancestors(max_ancestors), max_descendants(max_descendants) {}
};
class WalletRescanReserver; //forward declarations for ScanForWalletTransactions/RescanFromTime
diff --git a/test/functional/combine_logs.py b/test/functional/combine_logs.py
index d1bf9206b2..91b6415a7c 100755
--- a/test/functional/combine_logs.py
+++ b/test/functional/combine_logs.py
@@ -63,7 +63,7 @@ def get_log_events(source, logfile):
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
- with open(logfile, 'r') as infile:
+ with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
diff --git a/test/functional/feature_includeconf.py b/test/functional/feature_includeconf.py
index 9ccb89af43..9a7a0ca103 100755
--- a/test/functional/feature_includeconf.py
+++ b/test/functional/feature_includeconf.py
@@ -41,14 +41,9 @@ class IncludeConfTest(BitcoinTestFramework):
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
- self.log.info("-includeconf cannot be used as command-line arg. subversion should still end with 'main; relative)/'")
+ self.log.info("-includeconf cannot be used as command-line arg")
self.stop_node(0)
-
- self.start_node(0, extra_args=["-includeconf=relative2.conf"])
-
- subversion = self.nodes[0].getnetworkinfo()["subversion"]
- assert subversion.endswith("main; relative)/")
- self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from commandline; ignoring -includeconf=relative2.conf")
+ self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
@@ -59,8 +54,18 @@ class IncludeConfTest(BitcoinTestFramework):
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
+ self.log.info("-includeconf cannot contain invalid arg")
+ with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
+ f.write("foo=bar\n")
+ self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Invalid configuration value foo")
+
+ self.log.info("-includeconf cannot be invalid path")
+ os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
+ self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Failed to include configuration file relative.conf")
+
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
+ # Restore initial file contents
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "bitcoin.conf"), "a", encoding='utf8') as f:
diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py
index 8964c8d64b..6d51f31e35 100755
--- a/test/functional/feature_notifications.py
+++ b/test/functional/feature_notifications.py
@@ -36,7 +36,7 @@ class NotificationsTest(BitcoinTestFramework):
wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(self.block_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated blocks hashes
- with open(self.block_filename, 'r') as f:
+ with open(self.block_filename, 'r', encoding="utf-8") as f:
assert_equal(sorted(blocks), sorted(l.strip() for l in f.read().splitlines()))
self.log.info("test -walletnotify")
@@ -45,7 +45,7 @@ class NotificationsTest(BitcoinTestFramework):
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
- with open(self.tx_filename, 'r') as f:
+ with open(self.tx_filename, 'r', encoding="ascii") as f:
assert_equal(sorted(txids_rpc), sorted(l.strip() for l in f.read().splitlines()))
os.remove(self.tx_filename)
@@ -58,7 +58,7 @@ class NotificationsTest(BitcoinTestFramework):
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
- with open(self.tx_filename, 'r') as f:
+ with open(self.tx_filename, 'r', encoding="ascii") as f:
assert_equal(sorted(txids_rpc), sorted(l.strip() for l in f.read().splitlines()))
# Mine another 41 up-version blocks. -alertnotify should trigger on the 51st.
diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py
index 7db6a03b45..6577d83f5c 100755
--- a/test/functional/feature_nulldummy.py
+++ b/test/functional/feature_nulldummy.py
@@ -42,7 +42,7 @@ class NULLDUMMYTest(BitcoinTestFramework):
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
- self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness', '-vbparams=segwit:0:999999999999', '-addresstype=legacy', "-deprecatedrpc=addwitnessaddress"]]
+ self.extra_args = [['-whitelist=127.0.0.1', '-vbparams=segwit:0:999999999999', '-addresstype=legacy', "-deprecatedrpc=addwitnessaddress"]]
def run_test(self):
self.address = self.nodes[0].getnewaddress()
diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py
index 11a52b9ee2..d400507a66 100755
--- a/test/functional/feature_pruning.py
+++ b/test/functional/feature_pruning.py
@@ -260,10 +260,17 @@ class PruneTest(BitcoinTestFramework):
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
+ # Save block transaction count before pruning, assert value
+ block1_details = node.getblock(node.getblockhash(1))
+ assert_equal(block1_details["nTx"], len(block1_details["tx"]))
+
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
+ # Pruned block should still know the number of transactions
+ assert_equal(node.getblockheader(node.getblockhash(1))["nTx"], block1_details["nTx"])
+
# negative heights should raise an exception
assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10)
diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py
index 3622e1834a..b10306b283 100755
--- a/test/functional/feature_segwit.py
+++ b/test/functional/feature_segwit.py
@@ -42,9 +42,9 @@ class SegWitTest(BitcoinTestFramework):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
- self.extra_args = [["-walletprematurewitness", "-rpcserialversion=0", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"],
- ["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"],
- ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"]]
+ self.extra_args = [["-rpcserialversion=0", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"],
+ ["-blockversion=4", "-promiscuousmempoolflags=517", "-rpcserialversion=1", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"],
+ ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"]]
def setup_network(self):
super().setup_network()
@@ -129,21 +129,6 @@ class SegWitTest(BitcoinTestFramework):
self.nodes[0].generate(260) #block 423
sync_blocks(self.nodes)
- self.log.info("Verify default node can't accept any witness format txs before fork")
- # unsigned, no scriptsig
- self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False)
- self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False)
- self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False)
- self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False)
- # unsigned with redeem script
- self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0]))
- self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0]))
- # signed
- self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V0][0], True)
- self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V1][0], True)
- self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V0][0], True)
- self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V1][0], True)
-
self.log.info("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
@@ -164,6 +149,16 @@ class SegWitTest(BitcoinTestFramework):
segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
assert_equal(len(segwit_tx_list), 5)
+ self.log.info("Verify default node can't accept txs with missing witness")
+ # unsigned, no scriptsig
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False)
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False)
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False)
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False)
+ # unsigned with redeem script
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0]))
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0]))
+
self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py
index e80a764477..940d085e89 100755
--- a/test/functional/p2p_segwit.py
+++ b/test/functional/p2p_segwit.py
@@ -230,55 +230,9 @@ class SegWitTest(BitcoinTestFramework):
sync_blocks(self.nodes)
- # Create a p2sh output -- this is so we can pass the standardness
- # rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
- # in P2SH).
- p2sh_program = CScript([OP_TRUE])
- p2sh_pubkey = hash160(p2sh_program)
- scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
-
- # Now check that unnecessary witnesses can't be used to blind a node
- # to a transaction, eg by violating standardness checks.
- tx2 = CTransaction()
- tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
- tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
- tx2.rehash()
- test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, False, True)
- self.nodes[0].generate(1)
- sync_blocks(self.nodes)
-
- # We'll add an unnecessary witness to this transaction that would cause
- # it to be non-standard, to test that violating policy with a witness before
- # segwit activation doesn't blind a node to a transaction. Transactions
- # rejected for having a witness before segwit activation shouldn't be added
- # to the rejection cache.
- tx3 = CTransaction()
- tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
- tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey))
- tx3.wit.vtxinwit.append(CTxInWitness())
- tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
- tx3.rehash()
- # Note that this should be rejected for the premature witness reason,
- # rather than a policy check, since segwit hasn't activated yet.
- test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'no-witness-yet')
-
- # If we send without witness, it should be accepted.
- test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, False, True)
-
- # Now create a new anyone-can-spend utxo for the next test.
- tx4 = CTransaction()
- tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
- tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
- tx4.rehash()
- test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, False, True)
- test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx4, False, True)
-
- self.nodes[0].generate(1)
- sync_blocks(self.nodes)
-
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
- self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
+ self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
# ~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
# backdated so that it applies to all blocks, going back to the genesis
@@ -1119,7 +1073,7 @@ class SegWitTest(BitcoinTestFramework):
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
- # V0 segwit outputs should be standard after activation, but not before.
+ # V0 segwit outputs and inputs are always standard. V0 segwit inputs may only be mined after activation, but not before.
def test_standardness_v0(self, segwit_activated):
self.log.info("Testing standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
@@ -1148,45 +1102,46 @@ class SegWitTest(BitcoinTestFramework):
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-10000, scriptPubKey)]
tx.vout.append(CTxOut(8000, scriptPubKey)) # Might burn this later
+ tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
- test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx, with_witness=True, accepted=segwit_activated)
+ # This is always accepted, since the mempool policy is to consider segwit as always active
+ # and thus allow segwit outputs
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
- if segwit_activated:
- # if tx was accepted, then we spend the second output.
- tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
- tx2.vout = [CTxOut(7000, scriptPubKey)]
- tx2.wit.vtxinwit.append(CTxInWitness())
- tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
- else:
- # if tx wasn't accepted, we just re-spend the p2sh output we started with.
- tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
- tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, scriptPubKey)]
+ # tx was accepted, so we spend the second output.
+ tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
+ tx2.vout = [CTxOut(7000, scriptPubKey)]
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
- test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, with_witness=True, accepted=segwit_activated)
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
- if segwit_activated:
- # tx and tx2 were both accepted. Don't bother trying to reclaim the
- # P2PKH output; just send tx's first output back to an anyone-can-spend.
- sync_mempools([self.nodes[0], self.nodes[1]])
- tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
- tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
- tx3.wit.vtxinwit.append(CTxInWitness())
- tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
- tx3.rehash()
- test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True)
- else:
- # tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
- tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
- tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
+ # tx and tx2 were both accepted. Don't bother trying to reclaim the
+ # P2PKH output; just send tx's first output back to an anyone-can-spend.
+ sync_mempools([self.nodes[0], self.nodes[1]])
+ tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
+ tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
+ tx3.wit.vtxinwit.append(CTxInWitness())
+ tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
+ tx3.rehash()
+ if not segwit_activated:
+ # Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
+ # in blocks and the tx is impossible to mine right now.
+ assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
+ # Create the same output as tx3, but by replacing tx
+ tx3_out = tx3.vout[0]
+ tx3 = tx
+ tx3.vout = [tx3_out]
tx3.rehash()
- test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True)
+ assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
@@ -1855,6 +1810,60 @@ class SegWitTest(BitcoinTestFramework):
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
+ def test_non_standard_witness_blinding(self):
+ self.log.info("Testing behavior of unnecessary witnesses in transactions does not blind the node for the transaction")
+ assert (len(self.utxo) > 0)
+
+ # Create a p2sh output -- this is so we can pass the standardness
+ # rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
+ # in P2SH).
+ p2sh_program = CScript([OP_TRUE])
+ p2sh_pubkey = hash160(p2sh_program)
+ scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
+
+ # Now check that unnecessary witnesses can't be used to blind a node
+ # to a transaction, eg by violating standardness checks.
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+ tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, scriptPubKey))
+ tx.rehash()
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, False, True)
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+
+ # We'll add an unnecessary witness to this transaction that would cause
+ # it to be non-standard, to test that violating policy with a witness
+ # doesn't blind a node to a transaction. Transactions
+ # rejected for having a witness shouldn't be added
+ # to the rejection cache.
+ tx2 = CTransaction()
+ tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
+ tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, scriptPubKey))
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
+ tx2.rehash()
+ # This will be rejected due to a policy check:
+ # No witness is allowed, since it is not a witness program but a p2sh program
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, True, False, b'bad-witness-nonstandard')
+
+ # If we send without witness, it should be accepted.
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, False, True)
+
+ # Now create a new anyone-can-spend utxo for the next test.
+ tx3 = CTransaction()
+ tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
+ tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
+ tx3.rehash()
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, False, True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, False, True)
+
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+
+ # Update our utxo list; we spent the first entry.
+ self.utxo.pop(0)
+ self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
+
def test_non_standard_witness(self):
self.log.info("Testing detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
@@ -2023,6 +2032,7 @@ class SegWitTest(BitcoinTestFramework):
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
+ self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(node_id=2)
diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py
index 095cc4b734..7ee8168e2f 100755
--- a/test/functional/p2p_sendheaders.py
+++ b/test/functional/p2p_sendheaders.py
@@ -288,6 +288,7 @@ class SendHeadersTest(BitcoinTestFramework):
# 1. Mine a block; expect inv announcements each time
self.log.info("Part 1: headers don't start before sendheaders message...")
for i in range(4):
+ self.log.debug("Part 1.{}: starting...".format(i))
old_tip = tip
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
@@ -335,11 +336,13 @@ class SendHeadersTest(BitcoinTestFramework):
height = self.nodes[0].getblockcount() + 1
block_time += 10 # Advance far enough ahead
for i in range(10):
+ self.log.debug("Part 2.{}: starting...".format(i))
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
+ self.log.debug("Part 2.{}.{}: starting...".format(i, j))
blocks = []
for b in range(i + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
@@ -386,6 +389,7 @@ class SendHeadersTest(BitcoinTestFramework):
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
+ self.log.debug("Part 3.{}: starting...".format(j))
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
@@ -412,23 +416,28 @@ class SendHeadersTest(BitcoinTestFramework):
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
+ self.log.debug("Part 3.{}.{}: starting...".format(j, i))
+
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
if i == 0:
- self.log.debug("Just get the data -- shouldn't cause headers announcements to resume")
+ # Just get the data -- shouldn't cause headers announcements to resume
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
- self.log.debug("Send a getheaders message that shouldn't trigger headers announcements to resume (best header sent will be too old)")
+ # Send a getheaders message that shouldn't trigger headers announcements
+ # to resume (best header sent will be too old)
test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
+ # This time, try sending either a getheaders to trigger resumption
+ # of headers announcements, or mine a new block and inv it, also
+ # triggering resumption of headers announcements.
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
- self.log.debug("This time, try sending either a getheaders to trigger resumption of headers announcements, or mine a new block and inv it, also triggering resumption of headers announcements.")
if j == 0:
test_node.send_get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
@@ -532,6 +541,7 @@ class SendHeadersTest(BitcoinTestFramework):
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
+ self.log.debug("Part 5.{}: starting...".format(i))
test_node.last_message.pop("getdata", None)
blocks = []
# Create two more blocks.
diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py
index 17e24453e5..7acc59c2c6 100755
--- a/test/functional/rpc_blockchain.py
+++ b/test/functional/rpc_blockchain.py
@@ -217,6 +217,7 @@ class BlockchainTest(BitcoinTestFramework):
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
+ assert_equal(header['nTx'], 1)
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
diff --git a/test/functional/rpc_deprecated.py b/test/functional/rpc_deprecated.py
index 2e0500e7c4..bc27c183b1 100755
--- a/test/functional/rpc_deprecated.py
+++ b/test/functional/rpc_deprecated.py
@@ -40,7 +40,6 @@ class DeprecatedRpcTest(BitcoinTestFramework):
#
# The following 'label' RPC methods are usable both with and without the
# -deprecatedrpc=accounts switch enabled.
- # - getlabeladdress
# - getaddressesbylabel
# - getreceivedbylabel
# - listlabels
@@ -69,10 +68,6 @@ class DeprecatedRpcTest(BitcoinTestFramework):
assert_raises_rpc_error(-32, "getaccountaddress is deprecated", self.nodes[0].getaccountaddress, "label0")
self.nodes[1].getaccountaddress("label1")
- self.log.info("- getlabeladdress")
- self.nodes[0].getlabeladdress("label0")
- self.nodes[1].getlabeladdress("label1")
-
self.log.info("- getaddressesbyaccount")
assert_raises_rpc_error(-32, "getaddressesbyaccount is deprecated", self.nodes[0].getaddressesbyaccount, "label0")
self.nodes[1].getaddressesbyaccount("label1")
diff --git a/test/functional/rpc_getblockstats.py b/test/functional/rpc_getblockstats.py
index 060a2373b1..f573faaf17 100755
--- a/test/functional/rpc_getblockstats.py
+++ b/test/functional/rpc_getblockstats.py
@@ -81,11 +81,11 @@ class GetblockstatsTest(BitcoinTestFramework):
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
- with open(filename, 'w') as f:
+ with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
- with open(filename, 'r') as f:
+ with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index 75a8986793..5c2555c1ff 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -359,7 +359,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
- fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
+ fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index e016148f70..5e0b61b5e7 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -327,7 +327,7 @@ def get_auth_cookie(datadir):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
- with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
+ with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 65e4c0817e..36101d9f57 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -213,7 +213,7 @@ def main():
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
- config.read_file(open(configfile))
+ config.read_file(open(configfile, encoding="utf8"))
passon_args.append("--configfile=%s" % configfile)
@@ -419,10 +419,6 @@ class TestHandler:
self.test_list = test_list
self.flags = flags
self.num_running = 0
- # In case there is a graveyard of zombie bitcoinds, we can apply a
- # pseudorandom offset to hopefully jump over them.
- # (625 is PORT_RANGE/MAX_NODES)
- self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
@@ -430,7 +426,7 @@ class TestHandler:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
- portseed = len(self.test_list) + self.portseed_offset
+ portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
@@ -594,7 +590,7 @@ class RPCCoverage():
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
- with open(coverage_ref_filename, 'r') as coverage_ref_file:
+ with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, dirs, files in os.walk(self.dir):
@@ -603,7 +599,7 @@ class RPCCoverage():
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
- with open(filename, 'r') as coverage_file:
+ with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py
index fcc11abce0..f07041706a 100755
--- a/test/functional/wallet_bumpfee.py
+++ b/test/functional/wallet_bumpfee.py
@@ -31,7 +31,7 @@ class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
- self.extra_args = [["-prematurewitness", "-walletprematurewitness", "-deprecatedrpc=addwitnessaddress", "-walletrbf={}".format(i)]
+ self.extra_args = [["-deprecatedrpc=addwitnessaddress", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
def run_test(self):
diff --git a/test/functional/wallet_labels.py b/test/functional/wallet_labels.py
index 705dd8985e..f5d7830fca 100755
--- a/test/functional/wallet_labels.py
+++ b/test/functional/wallet_labels.py
@@ -5,7 +5,7 @@
"""Test label RPCs.
RPCs tested are:
- - getlabeladdress
+ - getaccountaddress
- getaddressesbyaccount/getaddressesbylabel
- listaddressgroupings
- setlabel
@@ -92,17 +92,24 @@ class WalletLabelsTest(BitcoinTestFramework):
# recognize the label/address associations.
labels = [Label(name, accounts_api) for name in ("a", "b", "c", "d", "e")]
for label in labels:
- label.add_receive_address(node.getlabeladdress(label=label.name, force=True))
+ if accounts_api:
+ address = node.getaccountaddress(label.name)
+ else:
+ address = node.getnewaddress(label.name)
+ label.add_receive_address(address)
label.verify(node)
# Check all labels are returned by listlabels.
assert_equal(node.listlabels(), [label.name for label in labels])
# Send a transaction to each label, and make sure this forces
- # getlabeladdress to generate a new receiving address.
+ # getaccountaddress to generate a new receiving address.
for label in labels:
- node.sendtoaddress(label.receive_address, amount_to_send)
- label.add_receive_address(node.getlabeladdress(label.name))
+ if accounts_api:
+ node.sendtoaddress(label.receive_address, amount_to_send)
+ label.add_receive_address(node.getaccountaddress(label.name))
+ else:
+ node.sendtoaddress(label.addresses[0], amount_to_send)
label.verify(node)
# Check the amounts received.
@@ -115,10 +122,17 @@ class WalletLabelsTest(BitcoinTestFramework):
# Check that sendfrom label reduces listaccounts balances.
for i, label in enumerate(labels):
to_label = labels[(i + 1) % len(labels)]
- node.sendfrom(label.name, to_label.receive_address, amount_to_send)
+ if accounts_api:
+ node.sendfrom(label.name, to_label.receive_address, amount_to_send)
+ else:
+ node.sendfrom(label.name, to_label.addresses[0], amount_to_send)
node.generate(1)
for label in labels:
- label.add_receive_address(node.getlabeladdress(label.name))
+ if accounts_api:
+ address = node.getaccountaddress(label.name)
+ else:
+ address = node.getnewaddress(label.name)
+ label.add_receive_address(address)
label.verify(node)
assert_equal(node.getreceivedbylabel(label.name), 2)
if accounts_api:
@@ -134,12 +148,12 @@ class WalletLabelsTest(BitcoinTestFramework):
# Check that setlabel can assign a label to a new unused address.
for label in labels:
- address = node.getlabeladdress(label="", force=True)
+ address = node.getnewaddress()
node.setlabel(address, label.name)
label.add_address(address)
label.verify(node)
if accounts_api:
- assert(address not in node.getaddressesbyaccount(""))
+ assert address not in node.getaddressesbyaccount("")
else:
assert_raises_rpc_error(-11, "No addresses with label", node.getaddressesbylabel, "")
@@ -160,19 +174,20 @@ class WalletLabelsTest(BitcoinTestFramework):
# Check that setlabel can change the label of an address from a
# different label.
- change_label(node, labels[0].addresses[0], labels[0], labels[1])
-
- # Check that setlabel can change the label of an address which
- # is the receiving address of a different label.
- change_label(node, labels[0].receive_address, labels[0], labels[1])
+ change_label(node, labels[0].addresses[0], labels[0], labels[1], accounts_api)
# Check that setlabel can set the label of an address already
# in the label. This is a no-op.
- change_label(node, labels[2].addresses[0], labels[2], labels[2])
+ change_label(node, labels[2].addresses[0], labels[2], labels[2], accounts_api)
+
+ if accounts_api:
+ # Check that setaccount can change the label of an address which
+ # is the receiving address of a different label.
+ change_label(node, labels[0].receive_address, labels[0], labels[1], accounts_api)
- # Check that setlabel can set the label of an address which is
- # already the receiving address of the label. This is a no-op.
- change_label(node, labels[2].receive_address, labels[2], labels[2])
+ # Check that setaccount can set the label of an address which is
+ # already the receiving address of the label. This is a no-op.
+ change_label(node, labels[2].receive_address, labels[2], labels[2], accounts_api)
class Label:
def __init__(self, name, accounts_api):
@@ -192,12 +207,14 @@ class Label:
def add_receive_address(self, address):
self.add_address(address)
- self.receive_address = address
+ if self.accounts_api:
+ self.receive_address = address
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
- assert_equal(node.getlabeladdress(self.name), self.receive_address)
+ if self.accounts_api:
+ assert_equal(node.getaccountaddress(self.name), self.receive_address)
for address in self.addresses:
assert_equal(
@@ -216,22 +233,26 @@ class Label:
assert_equal(set(node.getaddressesbyaccount(self.name)), set(self.addresses))
-def change_label(node, address, old_label, new_label):
+def change_label(node, address, old_label, new_label, accounts_api):
assert_equal(address in old_label.addresses, True)
- node.setlabel(address, new_label.name)
+ if accounts_api:
+ node.setaccount(address, new_label.name)
+ else:
+ node.setlabel(address, new_label.name)
old_label.addresses.remove(address)
new_label.add_address(address)
- # Calling setlabel on an address which was previously the receiving
- # address of a different label should reset the receiving address of
- # the old label, causing getlabeladdress to return a brand new
+ # Calling setaccount on an address which was previously the receiving
+ # address of a different account should reset the receiving address of
+ # the old account, causing getaccountaddress to return a brand new
# address.
- if old_label.name != new_label.name and address == old_label.receive_address:
- new_address = node.getlabeladdress(old_label.name)
- assert_equal(new_address not in old_label.addresses, True)
- assert_equal(new_address not in new_label.addresses, True)
- old_label.add_receive_address(new_address)
+ if accounts_api:
+ if old_label.name != new_label.name and address == old_label.receive_address:
+ new_address = node.getaccountaddress(old_label.name)
+ assert_equal(new_address not in old_label.addresses, True)
+ assert_equal(new_address not in new_label.addresses, True)
+ old_label.add_receive_address(new_address)
old_label.verify(node)
new_label.verify(node)
diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py
index 53638615f6..a0fbc4a754 100755
--- a/test/functional/wallet_multiwallet.py
+++ b/test/functional/wallet_multiwallet.py
@@ -88,7 +88,7 @@ class MultiWalletTest(BitcoinTestFramework):
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
- open(not_a_dir, 'a').close()
+ open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -zapwallettxes with multiwallet")
diff --git a/test/lint/check-doc.py b/test/lint/check-doc.py
index de5719eb29..89776b2a6b 100755
--- a/test/lint/check-doc.py
+++ b/test/lint/check-doc.py
@@ -22,7 +22,7 @@ CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/{}'.format(FOLDER_GREP)
CMD_GREP_ARGS = r"git grep --perl-regexp '{}' -- {} ':(exclude){}'".format(REGEX_ARG, CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"git grep --perl-regexp '{}' {}".format(REGEX_DOC, CMD_ROOT_DIR)
# list unsupported, deprecated and duplicate args as they need no documentation
-SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio', '-forcecompactdb', '-usehd'])
+SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio', '-forcecompactdb', '-usehd'])
def main():
diff --git a/test/lint/check-rpc-mappings.py b/test/lint/check-rpc-mappings.py
index 7e96852c5c..c3cdeef580 100755
--- a/test/lint/check-rpc-mappings.py
+++ b/test/lint/check-rpc-mappings.py
@@ -44,7 +44,7 @@ def process_commands(fname):
"""Find and parse dispatch table in implementation file `fname`."""
cmds = []
in_rpcs = False
- with open(fname, "r") as f:
+ with open(fname, "r", encoding="utf8") as f:
for line in f:
line = line.rstrip()
if not in_rpcs:
@@ -70,7 +70,7 @@ def process_mapping(fname):
"""Find and parse conversion table in implementation file `fname`."""
cmds = []
in_rpcs = False
- with open(fname, "r") as f:
+ with open(fname, "r", encoding="utf8") as f:
for line in f:
line = line.rstrip()
if not in_rpcs:
diff --git a/test/lint/commit-script-check.sh b/test/lint/commit-script-check.sh
index 1c9dbc7f68..f1327469f3 100755
--- a/test/lint/commit-script-check.sh
+++ b/test/lint/commit-script-check.sh
@@ -11,6 +11,7 @@
# The resulting script should exactly transform the previous commit into the current
# one. Any remaining diff signals an error.
+export LC_ALL=C
if test "x$1" = "x"; then
echo "Usage: $0 <commit>..."
exit 1
diff --git a/test/lint/git-subtree-check.sh b/test/lint/git-subtree-check.sh
index 184951715e..85e8b841b6 100755
--- a/test/lint/git-subtree-check.sh
+++ b/test/lint/git-subtree-check.sh
@@ -3,6 +3,7 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+export LC_ALL=C
DIR="$1"
COMMIT="$2"
if [ -z "$COMMIT" ]; then
diff --git a/test/lint/lint-all.sh b/test/lint/lint-all.sh
index b6d86959c6..c9d4ec7199 100755
--- a/test/lint/lint-all.sh
+++ b/test/lint/lint-all.sh
@@ -7,6 +7,10 @@
# This script runs all contrib/devtools/lint-*.sh files, and fails if any exit
# with a non-zero status code.
+# This script is intentionally locale dependent by not setting "export LC_ALL=C"
+# in order to allow for the executed lint scripts to opt in or opt out of locale
+# dependence themselves.
+
set -u
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
diff --git a/test/lint/lint-filenames.sh b/test/lint/lint-filenames.sh
new file mode 100755
index 0000000000..61e978fe79
--- /dev/null
+++ b/test/lint/lint-filenames.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (c) 2018 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+# Make sure only lowercase alphanumerics (a-z0-9), underscores (_),
+# hyphens (-) and dots (.) are used in source code filenames.
+
+export LC_ALL=C
+
+EXIT_CODE=0
+OUTPUT=$(git ls-files -- "*.cpp" "*.h" "*.py" "*.sh" | grep -vE '^[a-z0-9_./-]+$' | grep -vE 'src/(secp256k1|univalue)/')
+if [[ ${OUTPUT} != "" ]]; then
+ echo "Use only lowercase alphanumerics (a-z0-9), underscores (_), hyphens (-) and dots (.)"
+ echo "in source code filenames:"
+ echo
+ echo "${OUTPUT}"
+ EXIT_CODE=1
+fi
+exit ${EXIT_CODE}
diff --git a/test/lint/lint-include-guards.sh b/test/lint/lint-include-guards.sh
index 6a0dd556bb..ea791b0176 100755
--- a/test/lint/lint-include-guards.sh
+++ b/test/lint/lint-include-guards.sh
@@ -6,6 +6,7 @@
#
# Check include guards.
+export LC_ALL=C
HEADER_ID_PREFIX="BITCOIN_"
HEADER_ID_SUFFIX="_H"
diff --git a/test/lint/lint-includes.sh b/test/lint/lint-includes.sh
index f54be46b52..03350576a7 100755
--- a/test/lint/lint-includes.sh
+++ b/test/lint/lint-includes.sh
@@ -5,12 +5,18 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Check for duplicate includes.
+# Guard against accidental introduction of new Boost dependencies.
+# Check includes: Check for duplicate includes. Enforce bracket syntax includes.
+
+export LC_ALL=C
+IGNORE_REGEXP="/(leveldb|secp256k1|univalue)/"
filter_suffix() {
- git ls-files | grep -E "^src/.*\.${1}"'$' | grep -Ev "/(leveldb|secp256k1|univalue)/"
+ git ls-files | grep -E "^src/.*\.${1}"'$' | grep -Ev "${IGNORE_REGEXP}"
}
EXIT_CODE=0
+
for HEADER_FILE in $(filter_suffix h); do
DUPLICATE_INCLUDES_IN_HEADER_FILE=$(grep -E "^#include " < "${HEADER_FILE}" | sort | uniq -d)
if [[ ${DUPLICATE_INCLUDES_IN_HEADER_FILE} != "" ]]; then
@@ -20,6 +26,7 @@ for HEADER_FILE in $(filter_suffix h); do
EXIT_CODE=1
fi
done
+
for CPP_FILE in $(filter_suffix cpp); do
DUPLICATE_INCLUDES_IN_CPP_FILE=$(grep -E "^#include " < "${CPP_FILE}" | sort | uniq -d)
if [[ ${DUPLICATE_INCLUDES_IN_CPP_FILE} != "" ]]; then
@@ -29,4 +36,85 @@ for CPP_FILE in $(filter_suffix cpp); do
EXIT_CODE=1
fi
done
+
+INCLUDED_CPP_FILES=$(git grep -E "^#include [<\"][^>\"]+\.cpp[>\"]" -- "*.cpp" "*.h")
+if [[ ${INCLUDED_CPP_FILES} != "" ]]; then
+ echo "The following files #include .cpp files:"
+ echo "${INCLUDED_CPP_FILES}"
+ echo
+ EXIT_CODE=1
+fi
+
+EXPECTED_BOOST_INCLUDES=(
+ boost/algorithm/string.hpp
+ boost/algorithm/string/case_conv.hpp
+ boost/algorithm/string/classification.hpp
+ boost/algorithm/string/join.hpp
+ boost/algorithm/string/predicate.hpp
+ boost/algorithm/string/replace.hpp
+ boost/algorithm/string/split.hpp
+ boost/assign/std/vector.hpp
+ boost/bind.hpp
+ boost/chrono/chrono.hpp
+ boost/date_time/posix_time/posix_time.hpp
+ boost/filesystem.hpp
+ boost/filesystem/detail/utf8_codecvt_facet.hpp
+ boost/filesystem/fstream.hpp
+ boost/interprocess/sync/file_lock.hpp
+ boost/multi_index/hashed_index.hpp
+ boost/multi_index/ordered_index.hpp
+ boost/multi_index/sequenced_index.hpp
+ boost/multi_index_container.hpp
+ boost/optional.hpp
+ boost/preprocessor/cat.hpp
+ boost/preprocessor/stringize.hpp
+ boost/program_options/detail/config_file.hpp
+ boost/scoped_array.hpp
+ boost/signals2/connection.hpp
+ boost/signals2/last_value.hpp
+ boost/signals2/signal.hpp
+ boost/test/unit_test.hpp
+ boost/thread.hpp
+ boost/thread/condition_variable.hpp
+ boost/thread/mutex.hpp
+ boost/thread/thread.hpp
+ boost/variant.hpp
+ boost/variant/apply_visitor.hpp
+ boost/variant/static_visitor.hpp
+)
+
+for BOOST_INCLUDE in $(git grep '^#include <boost/' -- "*.cpp" "*.h" | cut -f2 -d: | cut -f2 -d'<' | cut -f1 -d'>' | sort -u); do
+ IS_EXPECTED_INCLUDE=0
+ for EXPECTED_BOOST_INCLUDE in "${EXPECTED_BOOST_INCLUDES[@]}"; do
+ if [[ "${BOOST_INCLUDE}" == "${EXPECTED_BOOST_INCLUDE}" ]]; then
+ IS_EXPECTED_INCLUDE=1
+ break
+ fi
+ done
+ if [[ ${IS_EXPECTED_INCLUDE} == 0 ]]; then
+ EXIT_CODE=1
+ echo "A new Boost dependency in the form of \"${BOOST_INCLUDE}\" appears to have been introduced:"
+ git grep "${BOOST_INCLUDE}" -- "*.cpp" "*.h"
+ echo
+ fi
+done
+
+for EXPECTED_BOOST_INCLUDE in "${EXPECTED_BOOST_INCLUDES[@]}"; do
+ if ! git grep -q "^#include <${EXPECTED_BOOST_INCLUDE}>" -- "*.cpp" "*.h"; then
+ echo "Good job! The Boost dependency \"${EXPECTED_BOOST_INCLUDE}\" is no longer used."
+ echo "Please remove it from EXPECTED_BOOST_INCLUDES in $0"
+ echo "to make sure this dependency is not accidentally reintroduced."
+ echo
+ EXIT_CODE=1
+ fi
+done
+
+QUOTE_SYNTAX_INCLUDES=$(git grep '^#include "' -- "*.cpp" "*.h" | grep -Ev "${IGNORE_REGEXP}")
+if [[ ${QUOTE_SYNTAX_INCLUDES} != "" ]]; then
+ echo "Please use bracket syntax includes (\"#include <foo.h>\") instead of quote syntax includes:"
+ echo "${QUOTE_SYNTAX_INCLUDES}"
+ echo
+ EXIT_CODE=1
+fi
+
exit ${EXIT_CODE}
diff --git a/test/lint/lint-locale-dependence.sh b/test/lint/lint-locale-dependence.sh
new file mode 100755
index 0000000000..98814ce4b9
--- /dev/null
+++ b/test/lint/lint-locale-dependence.sh
@@ -0,0 +1,230 @@
+#!/bin/bash
+
+export LC_ALL=C
+KNOWN_VIOLATIONS=(
+ "src/base58.cpp:.*isspace"
+ "src/bitcoin-tx.cpp.*stoul"
+ "src/bitcoin-tx.cpp.*trim_right"
+ "src/bitcoin-tx.cpp:.*atoi"
+ "src/core_read.cpp.*is_digit"
+ "src/dbwrapper.cpp.*stoul"
+ "src/dbwrapper.cpp:.*vsnprintf"
+ "src/httprpc.cpp.*trim"
+ "src/init.cpp:.*atoi"
+ "src/netbase.cpp.*to_lower"
+ "src/qt/rpcconsole.cpp:.*atoi"
+ "src/qt/rpcconsole.cpp:.*isdigit"
+ "src/rest.cpp:.*strtol"
+ "src/rpc/server.cpp.*to_upper"
+ "src/test/dbwrapper_tests.cpp:.*snprintf"
+ "src/test/getarg_tests.cpp.*split"
+ "src/torcontrol.cpp:.*atoi"
+ "src/torcontrol.cpp:.*strtol"
+ "src/uint256.cpp:.*isspace"
+ "src/uint256.cpp:.*tolower"
+ "src/util.cpp:.*atoi"
+ "src/util.cpp:.*fprintf"
+ "src/util.cpp:.*tolower"
+ "src/utilmoneystr.cpp:.*isdigit"
+ "src/utilmoneystr.cpp:.*isspace"
+ "src/utilstrencodings.cpp:.*atoi"
+ "src/utilstrencodings.cpp:.*isspace"
+ "src/utilstrencodings.cpp:.*strtol"
+ "src/utilstrencodings.cpp:.*strtoll"
+ "src/utilstrencodings.cpp:.*strtoul"
+ "src/utilstrencodings.cpp:.*strtoull"
+ "src/utilstrencodings.h:.*atoi"
+)
+
+REGEXP_IGNORE_EXTERNAL_DEPENDENCIES="^src/(crypto/ctaes/|leveldb/|secp256k1/|tinyformat.h|univalue/)"
+
+LOCALE_DEPENDENT_FUNCTIONS=(
+ alphasort # LC_COLLATE (via strcoll)
+ asctime # LC_TIME (directly)
+ asprintf # (via vasprintf)
+ atof # LC_NUMERIC (via strtod)
+ atoi # LC_NUMERIC (via strtol)
+ atol # LC_NUMERIC (via strtol)
+ atoll # (via strtoll)
+ atoq
+ btowc # LC_CTYPE (directly)
+ ctime # (via asctime or localtime)
+ dprintf # (via vdprintf)
+ fgetwc
+ fgetws
+ fold_case # boost::locale::fold_case
+ fprintf # (via vfprintf)
+ fputwc
+ fputws
+ fscanf # (via __vfscanf)
+ fwprintf # (via __vfwprintf)
+ getdate # via __getdate_r => isspace // __localtime_r
+ getwc
+ getwchar
+ is_digit # boost::algorithm::is_digit
+ is_space # boost::algorithm::is_space
+ isalnum # LC_CTYPE
+ isalpha # LC_CTYPE
+ isblank # LC_CTYPE
+ iscntrl # LC_CTYPE
+ isctype # LC_CTYPE
+ isdigit # LC_CTYPE
+ isgraph # LC_CTYPE
+ islower # LC_CTYPE
+ isprint # LC_CTYPE
+ ispunct # LC_CTYPE
+ isspace # LC_CTYPE
+ isupper # LC_CTYPE
+ iswalnum # LC_CTYPE
+ iswalpha # LC_CTYPE
+ iswblank # LC_CTYPE
+ iswcntrl # LC_CTYPE
+ iswctype # LC_CTYPE
+ iswdigit # LC_CTYPE
+ iswgraph # LC_CTYPE
+ iswlower # LC_CTYPE
+ iswprint # LC_CTYPE
+ iswpunct # LC_CTYPE
+ iswspace # LC_CTYPE
+ iswupper # LC_CTYPE
+ iswxdigit # LC_CTYPE
+ isxdigit # LC_CTYPE
+ localeconv # LC_NUMERIC + LC_MONETARY
+ mblen # LC_CTYPE
+ mbrlen
+ mbrtowc
+ mbsinit
+ mbsnrtowcs
+ mbsrtowcs
+ mbstowcs # LC_CTYPE
+ mbtowc # LC_CTYPE
+ mktime
+ normalize # boost::locale::normalize
+# printf # LC_NUMERIC
+ putwc
+ putwchar
+ scanf # LC_NUMERIC
+ setlocale
+ snprintf
+ sprintf
+ sscanf
+ stod
+ stof
+ stoi
+ stol
+ stold
+ stoll
+ stoul
+ stoull
+ strcasecmp
+ strcasestr
+ strcoll # LC_COLLATE
+# strerror
+ strfmon
+ strftime # LC_TIME
+ strncasecmp
+ strptime
+ strtod # LC_NUMERIC
+ strtof
+ strtoimax
+ strtol # LC_NUMERIC
+ strtold
+ strtoll
+ strtoq
+ strtoul # LC_NUMERIC
+ strtoull
+ strtoumax
+ strtouq
+ strxfrm # LC_COLLATE
+ swprintf
+ to_lower # boost::locale::to_lower
+ to_title # boost::locale::to_title
+ to_upper # boost::locale::to_upper
+ tolower # LC_CTYPE
+ toupper # LC_CTYPE
+ towctrans
+ towlower # LC_CTYPE
+ towupper # LC_CTYPE
+ trim # boost::algorithm::trim
+ trim_left # boost::algorithm::trim_left
+ trim_right # boost::algorithm::trim_right
+ ungetwc
+ vasprintf
+ vdprintf
+ versionsort
+ vfprintf
+ vfscanf
+ vfwprintf
+ vprintf
+ vscanf
+ vsnprintf
+ vsprintf
+ vsscanf
+ vswprintf
+ vwprintf
+ wcrtomb
+ wcscasecmp
+ wcscoll # LC_COLLATE
+ wcsftime # LC_TIME
+ wcsncasecmp
+ wcsnrtombs
+ wcsrtombs
+ wcstod # LC_NUMERIC
+ wcstof
+ wcstoimax
+ wcstol # LC_NUMERIC
+ wcstold
+ wcstoll
+ wcstombs # LC_CTYPE
+ wcstoul # LC_NUMERIC
+ wcstoull
+ wcstoumax
+ wcswidth
+ wcsxfrm # LC_COLLATE
+ wctob
+ wctomb # LC_CTYPE
+ wctrans
+ wctype
+ wcwidth
+ wprintf
+)
+
+function join_array {
+ local IFS="$1"
+ shift
+ echo "$*"
+}
+
+REGEXP_IGNORE_KNOWN_VIOLATIONS=$(join_array "|" "${KNOWN_VIOLATIONS[@]}")
+
+# Invoke "git grep" only once in order to minimize run-time
+REGEXP_LOCALE_DEPENDENT_FUNCTIONS=$(join_array "|" "${LOCALE_DEPENDENT_FUNCTIONS[@]}")
+GIT_GREP_OUTPUT=$(git grep -E "[^a-zA-Z0-9_\`'\"<>](${REGEXP_LOCALE_DEPENDENT_FUNCTIONS}(|_r|_s))[^a-zA-Z0-9_\`'\"<>]" -- "*.cpp" "*.h")
+
+EXIT_CODE=0
+for LOCALE_DEPENDENT_FUNCTION in "${LOCALE_DEPENDENT_FUNCTIONS[@]}"; do
+ MATCHES=$(grep -E "[^a-zA-Z0-9_\`'\"<>]${LOCALE_DEPENDENT_FUNCTION}(|_r|_s)[^a-zA-Z0-9_\`'\"<>]" <<< "${GIT_GREP_OUTPUT}" | \
+ grep -vE "\.(c|cpp|h):\s*(//|\*|/\*|\").*${LOCALE_DEPENDENT_FUNCTION}" | \
+ grep -vE 'fprintf\(.*(stdout|stderr)')
+ if [[ ${REGEXP_IGNORE_EXTERNAL_DEPENDENCIES} != "" ]]; then
+ MATCHES=$(grep -vE "${REGEXP_IGNORE_EXTERNAL_DEPENDENCIES}" <<< "${MATCHES}")
+ fi
+ if [[ ${REGEXP_IGNORE_KNOWN_VIOLATIONS} != "" ]]; then
+ MATCHES=$(grep -vE "${REGEXP_IGNORE_KNOWN_VIOLATIONS}" <<< "${MATCHES}")
+ fi
+ if [[ ${MATCHES} != "" ]]; then
+ echo "The locale dependent function ${LOCALE_DEPENDENT_FUNCTION}(...) appears to be used:"
+ echo "${MATCHES}"
+ echo
+ EXIT_CODE=1
+ fi
+done
+if [[ ${EXIT_CODE} != 0 ]]; then
+ echo "Unnecessary locale dependence can cause bugs that are very"
+ echo "tricky to isolate and fix. Please avoid using locale dependent"
+ echo "functions if possible."
+ echo
+ echo "Advice not applicable in this specific case? Add an exception"
+ echo "by updating the ignore list in $0"
+fi
+exit ${EXIT_CODE}
diff --git a/test/lint/lint-logs.sh b/test/lint/lint-logs.sh
index 35be13ec19..89e7fb8764 100755
--- a/test/lint/lint-logs.sh
+++ b/test/lint/lint-logs.sh
@@ -12,7 +12,7 @@
# There are some instances of LogPrintf() in comments. Those can be
# ignored
-
+export LC_ALL=C
UNTERMINATED_LOGS=$(git grep --extended-regexp "LogPrintf?\(" -- "*.cpp" | \
grep -v '\\n"' | \
grep -v "/\* Continued \*/" | \
diff --git a/test/lint/lint-python-shebang.sh b/test/lint/lint-python-shebang.sh
index f5c5971c03..031487a91a 100755
--- a/test/lint/lint-python-shebang.sh
+++ b/test/lint/lint-python-shebang.sh
@@ -1,5 +1,7 @@
#!/bin/bash
# Shebang must use python3 (not python or python2)
+
+export LC_ALL=C
EXIT_CODE=0
for PYTHON_FILE in $(git ls-files -- "*.py"); do
if [[ $(head -c 2 "${PYTHON_FILE}") == "#!" &&
diff --git a/test/lint/lint-python-utf8-encoding.sh b/test/lint/lint-python-utf8-encoding.sh
new file mode 100755
index 0000000000..d0ff38ea0a
--- /dev/null
+++ b/test/lint/lint-python-utf8-encoding.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (c) 2018 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+# Make sure we explicitly open all text files using UTF-8 (or ASCII) encoding to
+# avoid potential issues on the BSDs where the locale is not always set.
+
+export LC_ALL=C
+EXIT_CODE=0
+OUTPUT=$(git grep " open(" -- "*.py" | grep -vE "encoding=.(ascii|utf8|utf-8)." | grep -vE "open\([^,]*, ['\"][^'\"]*b[^'\"]*['\"]")
+if [[ ${OUTPUT} != "" ]]; then
+ echo "Python's open(...) seems to be used to open text files without explicitly"
+ echo "specifying encoding=\"utf8\":"
+ echo
+ echo "${OUTPUT}"
+ EXIT_CODE=1
+fi
+exit ${EXIT_CODE}
diff --git a/test/lint/lint-python.sh b/test/lint/lint-python.sh
index 7d3555b6d4..d9d46d86d5 100755
--- a/test/lint/lint-python.sh
+++ b/test/lint/lint-python.sh
@@ -6,6 +6,8 @@
#
# Check for specified flake8 warnings in python files.
+export LC_ALL=C
+
# E101 indentation contains mixed spaces and tabs
# E112 expected an indented block
# E113 unexpected indentation
diff --git a/test/lint/lint-shell-locale.sh b/test/lint/lint-shell-locale.sh
new file mode 100755
index 0000000000..d78bac2d47
--- /dev/null
+++ b/test/lint/lint-shell-locale.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright (c) 2018 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+# Make sure all shell scripts:
+# a.) explicitly opt out of locale dependence using "export LC_ALL=C", or
+# b.) explicitly opt in to locale dependence using the annotation below.
+
+export LC_ALL=C
+
+EXIT_CODE=0
+for SHELL_SCRIPT in $(git ls-files -- "*.sh" | grep -vE "src/(secp256k1|univalue)/"); do
+ if grep -q "# This script is intentionally locale dependent by not setting \"export LC_ALL=C\"" "${SHELL_SCRIPT}"; then
+ continue
+ fi
+ FIRST_NON_COMMENT_LINE=$(grep -vE '^(#.*|)$' "${SHELL_SCRIPT}" | head -1)
+ if [[ ${FIRST_NON_COMMENT_LINE} != "export LC_ALL=C" ]]; then
+ echo "Missing \"export LC_ALL=C\" (to avoid locale dependence) as first non-comment non-empty line in ${SHELL_SCRIPT}"
+ EXIT_CODE=1
+ fi
+done
+exit ${EXIT_CODE}
diff --git a/test/lint/lint-shell.sh b/test/lint/lint-shell.sh
index 5f5fa9a925..bf56db90c6 100755
--- a/test/lint/lint-shell.sh
+++ b/test/lint/lint-shell.sh
@@ -6,6 +6,10 @@
#
# Check for shellcheck warnings in shell scripts.
+# This script is intentionally locale dependent by not setting "export LC_ALL=C"
+# to allow running certain versions of shellcheck that core dump when LC_ALL=C
+# is set.
+
# Disabled warnings:
# SC2001: See if you can use ${variable//search/replace} instead.
# SC2004: $/${} is unnecessary on arithmetic variables.
diff --git a/test/lint/lint-tests.sh b/test/lint/lint-tests.sh
index ffc0660551..9d83547df4 100755
--- a/test/lint/lint-tests.sh
+++ b/test/lint/lint-tests.sh
@@ -6,6 +6,7 @@
#
# Check the test suite naming conventions
+export LC_ALL=C
EXIT_CODE=0
NAMING_INCONSISTENCIES=$(git grep -E '^BOOST_FIXTURE_TEST_SUITE\(' -- \
diff --git a/test/lint/lint-whitespace.sh b/test/lint/lint-whitespace.sh
index c5d43043d5..128aa10da5 100755
--- a/test/lint/lint-whitespace.sh
+++ b/test/lint/lint-whitespace.sh
@@ -8,6 +8,7 @@
# We can't run this check unless we know the commit range for the PR.
+export LC_ALL=C
while getopts "?" opt; do
case $opt in
?)
diff --git a/test/util/bitcoin-util-test.py b/test/util/bitcoin-util-test.py
index 30bd13d0dc..16371a6234 100755
--- a/test/util/bitcoin-util-test.py
+++ b/test/util/bitcoin-util-test.py
@@ -28,7 +28,7 @@ import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
- config.readfp(open(os.path.join(os.path.dirname(__file__), "../config.ini")))
+ config.readfp(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
@@ -49,7 +49,7 @@ def main():
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
- raw_data = open(input_filename).read()
+ raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
@@ -86,7 +86,7 @@ def bctest(testDir, testObj, buildenv):
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
- inputData = open(filename).read()
+ inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
@@ -97,7 +97,7 @@ def bctest(testDir, testObj, buildenv):
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
- outputData = open(os.path.join(testDir, outputFn)).read()
+ outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
diff --git a/test/util/rpcauth-test.py b/test/util/rpcauth-test.py
index 2456feb102..46e9fbc739 100755
--- a/test/util/rpcauth-test.py
+++ b/test/util/rpcauth-test.py
@@ -18,7 +18,7 @@ class TestRPCAuth(unittest.TestCase):
config_path = os.path.abspath(
os.path.join(os.sep, os.path.abspath(os.path.dirname(__file__)),
"../config.ini"))
- with open(config_path) as config_file:
+ with open(config_path, encoding="utf8") as config_file:
config.read_file(config_file)
sys.path.insert(0, os.path.dirname(config['environment']['RPCAUTH']))
self.rpcauth = importlib.import_module('rpcauth')