aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml2
-rw-r--r--README.md2
-rwxr-xr-xcontrib/devtools/update-translations.py10
-rw-r--r--contrib/init/README.md2
-rw-r--r--contrib/verify-commits/README.md10
-rw-r--r--contrib/verify-commits/allow-incorrect-sha512-commits2
-rw-r--r--contrib/verify-commits/allow-unclean-merge-commits4
-rwxr-xr-xcontrib/verify-commits/pre-push-hook.sh4
-rwxr-xr-xcontrib/verify-commits/verify-commits.py155
-rwxr-xr-xcontrib/verify-commits/verify-commits.sh153
-rw-r--r--depends/README.md6
-rw-r--r--depends/description.md2
-rw-r--r--doc/README.md4
-rw-r--r--doc/README_osx.md10
-rw-r--r--doc/build-osx.md6
-rw-r--r--doc/developer-notes.md40
-rw-r--r--doc/init.md6
-rw-r--r--doc/release-notes-pr12892.md2
-rw-r--r--doc/release-notes.md5
-rw-r--r--doc/release-process.md22
-rw-r--r--doc/translation_process.md8
-rw-r--r--src/Makefile.am40
-rw-r--r--src/Makefile.bench.include4
-rw-r--r--src/Makefile.qt.include2
-rw-r--r--src/Makefile.qttest.include2
-rw-r--r--src/Makefile.test.include6
-rw-r--r--src/arith_uint256.h8
-rw-r--r--src/bench/examples.cpp (renamed from src/bench/Examples.cpp)0
-rw-r--r--src/bench/merkle_root.cpp8
-rw-r--r--src/bitcoin-cli.cpp13
-rw-r--r--src/bitcoin-tx.cpp4
-rw-r--r--src/crypto/sha256_avx2.cpp4
-rw-r--r--src/crypto/sha256_sse41.cpp4
-rw-r--r--src/index/base.cpp278
-rw-r--r--src/index/base.h98
-rw-r--r--src/index/txindex.cpp419
-rw-r--r--src/index/txindex.h74
-rw-r--r--src/init.cpp3
-rw-r--r--src/net_processing.cpp2
-rw-r--r--src/policy/policy.cpp9
-rw-r--r--src/policy/policy.h4
-rw-r--r--src/qt/README.md8
-rw-r--r--src/qt/forms/optionsdialog.ui67
-rw-r--r--src/qt/optionsdialog.cpp19
-rw-r--r--src/qt/optionsdialog.h1
-rw-r--r--src/qt/optionsmodel.cpp26
-rw-r--r--src/qt/optionsmodel.h2
-rw-r--r--src/rest.cpp2
-rw-r--r--src/rpc/blockchain.cpp22
-rw-r--r--src/rpc/blockchain.h3
-rw-r--r--src/rpc/client.cpp1
-rw-r--r--src/rpc/mining.cpp2
-rw-r--r--src/rpc/rawtransaction.cpp4
-rw-r--r--src/script/sign.cpp8
-rw-r--r--src/script/sign.h1
-rw-r--r--src/script/standard.cpp1
-rw-r--r--src/test/arith_uint256_tests.cpp7
-rw-r--r--src/test/blockchain_tests.cpp58
-rw-r--r--src/test/denialofservice_tests.cpp (renamed from src/test/DoS_tests.cpp)2
-rw-r--r--src/test/mempool_tests.cpp178
-rw-r--r--src/test/script_p2sh_tests.cpp (renamed from src/test/script_P2SH_tests.cpp)2
-rw-r--r--src/test/script_standard_tests.cpp26
-rw-r--r--src/test/transaction_tests.cpp8
-rw-r--r--src/test/txindex_tests.cpp2
-rw-r--r--src/test/txvalidationcache_tests.cpp6
-rw-r--r--src/txdb.cpp183
-rw-r--r--src/txdb.h59
-rw-r--r--src/txmempool.cpp37
-rw-r--r--src/txmempool.h8
-rw-r--r--src/util.cpp20
-rw-r--r--src/validation.cpp8
-rw-r--r--src/validation.h6
-rw-r--r--src/wallet/rpcwallet.cpp83
-rw-r--r--src/wallet/test/coinselector_tests.cpp16
-rw-r--r--src/wallet/wallet.cpp20
-rw-r--r--src/wallet/wallet.h4
-rwxr-xr-xtest/functional/combine_logs.py2
-rwxr-xr-xtest/functional/feature_includeconf.py19
-rwxr-xr-xtest/functional/feature_nulldummy.py2
-rwxr-xr-xtest/functional/feature_segwit.py31
-rwxr-xr-xtest/functional/p2p_segwit.py160
-rwxr-xr-xtest/functional/rpc_deprecated.py5
-rwxr-xr-xtest/functional/test_framework/test_framework.py2
-rwxr-xr-xtest/functional/test_runner.py6
-rwxr-xr-xtest/functional/wallet_bumpfee.py2
-rwxr-xr-xtest/functional/wallet_labels.py81
-rwxr-xr-xtest/lint/check-doc.py2
-rwxr-xr-xtest/lint/lint-includes.sh89
-rwxr-xr-xtest/lint/lint-locale-dependence.sh229
89 files changed, 1845 insertions, 1122 deletions
diff --git a/.travis.yml b/.travis.yml
index 7cbe0b83f1..42ea5fdc44 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -104,5 +104,5 @@ jobs:
- test/lint/lint-all.sh
- if [ "$TRAVIS_REPO_SLUG" = "bitcoin/bitcoin" -a "$TRAVIS_EVENT_TYPE" = "cron" ]; then
while read LINE; do travis_retry gpg --keyserver hkp://subset.pool.sks-keyservers.net --recv-keys $LINE; done < contrib/verify-commits/trusted-keys &&
- travis_wait 30 contrib/verify-commits/verify-commits.sh;
+ travis_wait 30 contrib/verify-commits/verify-commits.py;
fi
diff --git a/README.md b/README.md
index acdbe46104..4e830109c2 100644
--- a/README.md
+++ b/README.md
@@ -52,7 +52,7 @@ There are also [regression and integration tests](/test), written
in Python, that are run automatically on the build server.
These tests can be run (if the [test dependencies](/test) are installed) with: `test/functional/test_runner.py`
-The Travis CI system makes sure that every pull request is built for Windows, Linux, and OS X, and that unit/sanity tests are run automatically.
+The Travis CI system makes sure that every pull request is built for Windows, Linux, and macOS, and that unit/sanity tests are run automatically.
### Manual Quality Assurance (QA) Testing
diff --git a/contrib/devtools/update-translations.py b/contrib/devtools/update-translations.py
index b36e6968bf..f0098cfcdf 100755
--- a/contrib/devtools/update-translations.py
+++ b/contrib/devtools/update-translations.py
@@ -30,6 +30,8 @@ SOURCE_LANG = 'bitcoin_en.ts'
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
+# Regexp to check for Bitcoin addresses
+ADDRESS_REGEXP = re.compile('([13]|bc1)[a-zA-Z0-9]{30,}')
def check_at_repository_root():
if not os.path.exists('.git'):
@@ -122,6 +124,12 @@ def escape_cdata(text):
text = text.replace('"', '&quot;')
return text
+def contains_bitcoin_addr(text, errors):
+ if text != None and ADDRESS_REGEXP.search(text) != None:
+ errors.append('Translation "%s" contains a bitcoin address. This will be removed.' % (text))
+ return True
+ return False
+
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
@@ -160,7 +168,7 @@ def postprocess_translations(reduce_diff_hacks=False):
if translation is None:
continue
errors = []
- valid = check_format_specifiers(source, translation, errors, numerus)
+ valid = check_format_specifiers(source, translation, errors, numerus) and not contains_bitcoin_addr(translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
diff --git a/contrib/init/README.md b/contrib/init/README.md
index 1a949f3c07..8d3e57c526 100644
--- a/contrib/init/README.md
+++ b/contrib/init/README.md
@@ -5,7 +5,7 @@ Upstart: bitcoind.conf
OpenRC: bitcoind.openrc
bitcoind.openrcconf
CentOS: bitcoind.init
-OS X: org.bitcoin.bitcoind.plist
+macOS: org.bitcoin.bitcoind.plist
```
have been made available to assist packagers in creating node packages here.
diff --git a/contrib/verify-commits/README.md b/contrib/verify-commits/README.md
index fa492fdd27..aa805ad1b9 100644
--- a/contrib/verify-commits/README.md
+++ b/contrib/verify-commits/README.md
@@ -7,18 +7,18 @@ are PGP signed (nearly always merge commits), as well as a script to verify
commits against a trusted keys list.
-Using verify-commits.sh safely
+Using verify-commits.py safely
------------------------------
Remember that you can't use an untrusted script to verify itself. This means
-that checking out code, then running `verify-commits.sh` against `HEAD` is
-_not_ safe, because the version of `verify-commits.sh` that you just ran could
+that checking out code, then running `verify-commits.py` against `HEAD` is
+_not_ safe, because the version of `verify-commits.py` that you just ran could
be backdoored. Instead, you need to use a trusted version of verify-commits
prior to checkout to make sure you're checking out only code signed by trusted
keys:
git fetch origin && \
- ./contrib/verify-commits/verify-commits.sh origin/master && \
+ ./contrib/verify-commits/verify-commits.py origin/master && \
git checkout origin/master
Note that the above isn't a good UI/UX yet, and needs significant improvements
@@ -42,6 +42,6 @@ said key. In order to avoid bumping the root-of-trust `trusted-git-root`
file, individual commits which were signed by such a key can be added to the
`allow-revsig-commits` file. That way, the PGP signatures are still verified
but no new commits can be signed by any expired/revoked key. To easily build a
-list of commits which need to be added, verify-commits.sh can be edited to test
+list of commits which need to be added, verify-commits.py can be edited to test
each commit with BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG set to both 1 and 0, and
those which need it set to 1 printed.
diff --git a/contrib/verify-commits/allow-incorrect-sha512-commits b/contrib/verify-commits/allow-incorrect-sha512-commits
new file mode 100644
index 0000000000..c572806f26
--- /dev/null
+++ b/contrib/verify-commits/allow-incorrect-sha512-commits
@@ -0,0 +1,2 @@
+f8feaa4636260b599294c7285bcf1c8b7737f74e
+8040ae6fc576e9504186f2ae3ff2c8125de1095c
diff --git a/contrib/verify-commits/allow-unclean-merge-commits b/contrib/verify-commits/allow-unclean-merge-commits
new file mode 100644
index 0000000000..7aab274b9a
--- /dev/null
+++ b/contrib/verify-commits/allow-unclean-merge-commits
@@ -0,0 +1,4 @@
+6052d509105790a26b3ad5df43dd61e7f1b24a12
+3798e5de334c3deb5f71302b782f6b8fbd5087f1
+326ffed09bfcc209a2efd6a2ebc69edf6bd200b5
+97d83739db0631be5d4ba86af3616014652c00ec
diff --git a/contrib/verify-commits/pre-push-hook.sh b/contrib/verify-commits/pre-push-hook.sh
index c21febb9e9..1f14635f61 100755
--- a/contrib/verify-commits/pre-push-hook.sh
+++ b/contrib/verify-commits/pre-push-hook.sh
@@ -12,9 +12,9 @@ while read LINE; do
if [ "$4" != "refs/heads/master" ]; then
continue
fi
- if ! ./contrib/verify-commits/verify-commits.sh $3 > /dev/null 2>&1; then
+ if ! ./contrib/verify-commits/verify-commits.py $3 > /dev/null 2>&1; then
echo "ERROR: A commit is not signed, can't push"
- ./contrib/verify-commits/verify-commits.sh
+ ./contrib/verify-commits/verify-commits.py
exit 1
fi
done < /dev/stdin
diff --git a/contrib/verify-commits/verify-commits.py b/contrib/verify-commits/verify-commits.py
new file mode 100755
index 0000000000..80f0aa0bf1
--- /dev/null
+++ b/contrib/verify-commits/verify-commits.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+# Copyright (c) 2018 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Verify commits against a trusted keys list."""
+import argparse
+import hashlib
+import os
+import subprocess
+import sys
+import time
+
+GIT = os.getenv('GIT', 'git')
+
+def tree_sha512sum(commit='HEAD'):
+ """Calculate the Tree-sha512 for the commit.
+
+ This is copied from github-merge.py."""
+
+ # request metadata for entire tree, recursively
+ files = []
+ blob_by_name = {}
+ for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines():
+ name_sep = line.index(b'\t')
+ metadata = line[:name_sep].split() # perms, 'blob', blobid
+ assert metadata[1] == b'blob'
+ name = line[name_sep + 1:]
+ files.append(name)
+ blob_by_name[name] = metadata[2]
+
+ files.sort()
+ # open connection to git-cat-file in batch mode to request data for all blobs
+ # this is much faster than launching it per file
+ p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
+ overall = hashlib.sha512()
+ for f in files:
+ blob = blob_by_name[f]
+ # request blob
+ p.stdin.write(blob + b'\n')
+ p.stdin.flush()
+ # read header: blob, "blob", size
+ reply = p.stdout.readline().split()
+ assert reply[0] == blob and reply[1] == b'blob'
+ size = int(reply[2])
+ # hash the blob data
+ intern = hashlib.sha512()
+ ptr = 0
+ while ptr < size:
+ bs = min(65536, size - ptr)
+ piece = p.stdout.read(bs)
+ if len(piece) == bs:
+ intern.update(piece)
+ else:
+ raise IOError('Premature EOF reading git cat-file output')
+ ptr += bs
+ dig = intern.hexdigest()
+ assert p.stdout.read(1) == b'\n' # ignore LF that follows blob data
+ # update overall hash with file hash
+ overall.update(dig.encode("utf-8"))
+ overall.update(" ".encode("utf-8"))
+ overall.update(f)
+ overall.update("\n".encode("utf-8"))
+ p.stdin.close()
+ if p.wait():
+ raise IOError('Non-zero return value executing git cat-file')
+ return overall.hexdigest()
+
+def main():
+ # Parse arguments
+ parser = argparse.ArgumentParser(usage='%(prog)s [options] [commit id]')
+ parser.add_argument('--disable-tree-check', action='store_false', dest='verify_tree', help='disable SHA-512 tree check')
+ parser.add_argument('--clean-merge', type=float, dest='clean_merge', default=float('inf'), help='Only check clean merge after <NUMBER> days ago (default: %(default)s)', metavar='NUMBER')
+ parser.add_argument('commit', nargs='?', default='HEAD', help='Check clean merge up to commit <commit>')
+ args = parser.parse_args()
+
+ # get directory of this program and read data files
+ dirname = os.path.dirname(os.path.abspath(__file__))
+ print("Using verify-commits data from " + dirname)
+ verified_root = open(dirname + "/trusted-git-root", "r").read().splitlines()[0]
+ verified_sha512_root = open(dirname + "/trusted-sha512-root-commit", "r").read().splitlines()[0]
+ revsig_allowed = open(dirname + "/allow-revsig-commits", "r").read().splitlines()
+ unclean_merge_allowed = open(dirname + "/allow-unclean-merge-commits", "r").read().splitlines()
+ incorrect_sha512_allowed = open(dirname + "/allow-incorrect-sha512-commits", "r").read().splitlines()
+
+ # Set commit and branch and set variables
+ current_commit = args.commit
+ if ' ' in current_commit:
+ print("Commit must not contain spaces", file=sys.stderr)
+ sys.exit(1)
+ verify_tree = args.verify_tree
+ no_sha1 = True
+ prev_commit = ""
+ initial_commit = current_commit
+ branch = subprocess.check_output([GIT, 'show', '-s', '--format=%H', initial_commit], universal_newlines=True).splitlines()[0]
+
+ # Iterate through commits
+ while True:
+ if current_commit == verified_root:
+ print('There is a valid path from "{}" to {} where all commits are signed!'.format(initial_commit, verified_root))
+ sys.exit(0)
+ if current_commit == verified_sha512_root:
+ if verify_tree:
+ print("All Tree-SHA512s matched up to {}".format(verified_sha512_root), file=sys.stderr)
+ verify_tree = False
+ no_sha1 = False
+
+ os.environ['BITCOIN_VERIFY_COMMITS_ALLOW_SHA1'] = "0" if no_sha1 else "1"
+ os.environ['BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG'] = "1" if current_commit in revsig_allowed else "0"
+
+ # Check that the commit (and parents) was signed with a trusted key
+ if subprocess.call([GIT, '-c', 'gpg.program={}/gpg.sh'.format(dirname), 'verify-commit', current_commit], stdout=subprocess.DEVNULL):
+ if prev_commit != "":
+ print("No parent of {} was signed with a trusted key!".format(prev_commit), file=sys.stderr)
+ print("Parents are:", file=sys.stderr)
+ parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', prev_commit], universal_newlines=True).splitlines()[0].split(' ')
+ for parent in parents:
+ subprocess.call([GIT, 'show', '-s', parent], stdout=sys.stderr)
+ else:
+ print("{} was not signed with a trusted key!".format(current_commit), file=sys.stderr)
+ sys.exit(1)
+
+ # Check the Tree-SHA512
+ if (verify_tree or prev_commit == "") and current_commit not in incorrect_sha512_allowed:
+ tree_hash = tree_sha512sum(current_commit)
+ if ("Tree-SHA512: {}".format(tree_hash)) not in subprocess.check_output([GIT, 'show', '-s', '--format=format:%B', current_commit], universal_newlines=True).splitlines():
+ print("Tree-SHA512 did not match for commit " + current_commit, file=sys.stderr)
+ sys.exit(1)
+
+ # Merge commits should only have two parents
+ parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', current_commit], universal_newlines=True).splitlines()[0].split(' ')
+ if len(parents) > 2:
+ print("Commit {} is an octopus merge".format(current_commit), file=sys.stderr)
+ sys.exit(1)
+
+ # Check that the merge commit is clean
+ commit_time = int(subprocess.check_output([GIT, 'show', '-s', '--format=format:%ct', current_commit], universal_newlines=True).splitlines()[0])
+ check_merge = commit_time > time.time() - args.clean_merge * 24 * 60 * 60 # Only check commits in clean_merge days
+ allow_unclean = current_commit in unclean_merge_allowed
+ if len(parents) == 2 and check_merge and not allow_unclean:
+ current_tree = subprocess.check_output([GIT, 'show', '--format=%T', current_commit], universal_newlines=True).splitlines()[0]
+ subprocess.call([GIT, 'checkout', '--force', '--quiet', parents[0]])
+ subprocess.call([GIT, 'merge', '--no-ff', '--quiet', parents[1]], stdout=subprocess.DEVNULL)
+ recreated_tree = subprocess.check_output([GIT, 'show', '--format=format:%T', 'HEAD'], universal_newlines=True).splitlines()[0]
+ if current_tree != recreated_tree:
+ print("Merge commit {} is not clean".format(current_commit), file=sys.stderr)
+ subprocess.call([GIT, 'diff', current_commit])
+ subprocess.call([GIT, 'checkout', '--force', '--quiet', branch])
+ sys.exit(1)
+ subprocess.call([GIT, 'checkout', '--force', '--quiet', branch])
+
+ prev_commit = current_commit
+ current_commit = parents[0]
+
+if __name__ == '__main__':
+ main()
diff --git a/contrib/verify-commits/verify-commits.sh b/contrib/verify-commits/verify-commits.sh
deleted file mode 100755
index 6415eea4d5..0000000000
--- a/contrib/verify-commits/verify-commits.sh
+++ /dev/null
@@ -1,153 +0,0 @@
-#!/bin/sh
-# Copyright (c) 2014-2016 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-DIR=$(dirname "$0")
-[ "/${DIR#/}" != "$DIR" ] && DIR=$(dirname "$(pwd)/$0")
-
-echo "Using verify-commits data from ${DIR}"
-
-VERIFIED_ROOT=$(cat "${DIR}/trusted-git-root")
-VERIFIED_SHA512_ROOT=$(cat "${DIR}/trusted-sha512-root-commit")
-REVSIG_ALLOWED=$(cat "${DIR}/allow-revsig-commits")
-
-HAVE_GNU_SHA512=1
-[ ! -x "$(which sha512sum)" ] && HAVE_GNU_SHA512=0
-
-if [ x"$1" = "x" ]; then
- CURRENT_COMMIT="HEAD"
-else
- CURRENT_COMMIT="$1"
-fi
-
-if [ "${CURRENT_COMMIT#* }" != "$CURRENT_COMMIT" ]; then
- echo "Commit must not contain spaces?" > /dev/stderr
- exit 1
-fi
-
-VERIFY_TREE=0
-if [ x"$2" = "x--tree-checks" ]; then
- VERIFY_TREE=1
-fi
-
-NO_SHA1=1
-PREV_COMMIT=""
-INITIAL_COMMIT="${CURRENT_COMMIT}"
-
-BRANCH="$(git rev-parse --abbrev-ref HEAD)"
-
-while true; do
- if [ "$CURRENT_COMMIT" = $VERIFIED_ROOT ]; then
- echo "There is a valid path from \"$INITIAL_COMMIT\" to $VERIFIED_ROOT where all commits are signed!"
- exit 0
- fi
-
- if [ "$CURRENT_COMMIT" = $VERIFIED_SHA512_ROOT ]; then
- if [ "$VERIFY_TREE" = "1" ]; then
- echo "All Tree-SHA512s matched up to $VERIFIED_SHA512_ROOT" > /dev/stderr
- fi
- VERIFY_TREE=0
- NO_SHA1=0
- fi
-
- if [ "$NO_SHA1" = "1" ]; then
- export BITCOIN_VERIFY_COMMITS_ALLOW_SHA1=0
- else
- export BITCOIN_VERIFY_COMMITS_ALLOW_SHA1=1
- fi
-
- if [ "${REVSIG_ALLOWED#*$CURRENT_COMMIT}" != "$REVSIG_ALLOWED" ]; then
- export BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG=1
- else
- export BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG=0
- fi
-
- if ! git -c "gpg.program=${DIR}/gpg.sh" verify-commit "$CURRENT_COMMIT" > /dev/null; then
- if [ "$PREV_COMMIT" != "" ]; then
- echo "No parent of $PREV_COMMIT was signed with a trusted key!" > /dev/stderr
- echo "Parents are:" > /dev/stderr
- PARENTS=$(git show -s --format=format:%P $PREV_COMMIT)
- for PARENT in $PARENTS; do
- git show -s $PARENT > /dev/stderr
- done
- else
- echo "$CURRENT_COMMIT was not signed with a trusted key!" > /dev/stderr
- fi
- exit 1
- fi
-
- # We always verify the top of the tree
- if [ "$VERIFY_TREE" = 1 -o "$PREV_COMMIT" = "" ]; then
- IFS_CACHE="$IFS"
- IFS='
-'
- for LINE in $(git ls-tree --full-tree -r "$CURRENT_COMMIT"); do
- case "$LINE" in
- "12"*)
- echo "Repo contains symlinks" > /dev/stderr
- IFS="$IFS_CACHE"
- exit 1
- ;;
- esac
- done
- IFS="$IFS_CACHE"
-
- FILE_HASHES=""
- for FILE in $(git ls-tree --full-tree -r --name-only "$CURRENT_COMMIT" | LC_ALL=C sort); do
- if [ "$HAVE_GNU_SHA512" = 1 ]; then
- HASH=$(git cat-file blob "$CURRENT_COMMIT":"$FILE" | sha512sum | { read FIRST _; echo $FIRST; } )
- else
- HASH=$(git cat-file blob "$CURRENT_COMMIT":"$FILE" | shasum -a 512 | { read FIRST _; echo $FIRST; } )
- fi
- [ "$FILE_HASHES" != "" ] && FILE_HASHES="$FILE_HASHES"'
-'
- FILE_HASHES="$FILE_HASHES$HASH $FILE"
- done
-
- if [ "$HAVE_GNU_SHA512" = 1 ]; then
- TREE_HASH="$(echo "$FILE_HASHES" | sha512sum)"
- else
- TREE_HASH="$(echo "$FILE_HASHES" | shasum -a 512)"
- fi
- HASH_MATCHES=0
- MSG="$(git show -s --format=format:%B "$CURRENT_COMMIT" | tail -n1)"
-
- case "$MSG -" in
- "Tree-SHA512: $TREE_HASH")
- HASH_MATCHES=1;;
- esac
-
- if [ "$HASH_MATCHES" = "0" ]; then
- echo "Tree-SHA512 did not match for commit $CURRENT_COMMIT" > /dev/stderr
- exit 1
- fi
- fi
-
- PARENTS=$(git show -s --format=format:%P "$CURRENT_COMMIT")
- PARENT1=${PARENTS%% *}
- PARENT2=""
- if [ "x$PARENT1" != "x$PARENTS" ]; then
- PARENTX=${PARENTS#* }
- PARENT2=${PARENTX%% *}
- if [ "x$PARENT2" != "x$PARENTX" ]; then
- echo "Commit $CURRENT_COMMIT is an octopus merge" > /dev/stderr
- exit 1
- fi
- fi
- if [ "x$PARENT2" != "x" ]; then
- CURRENT_TREE="$(git show --format="%T" "$CURRENT_COMMIT")"
- git checkout --force --quiet "$PARENT1"
- git merge --no-ff --quiet "$PARENT2" >/dev/null
- RECREATED_TREE="$(git show --format="%T" HEAD)"
- if [ "$CURRENT_TREE" != "$RECREATED_TREE" ]; then
- echo "Merge commit $CURRENT_COMMIT is not clean" > /dev/stderr
- git diff "$CURRENT_COMMIT"
- git checkout --force --quiet "$BRANCH"
- exit 1
- fi
- git checkout --force --quiet "$BRANCH"
- fi
- PREV_COMMIT="$CURRENT_COMMIT"
- CURRENT_COMMIT="$PARENT1"
-done
diff --git a/depends/README.md b/depends/README.md
index 99eef1952c..482b94a64f 100644
--- a/depends/README.md
+++ b/depends/README.md
@@ -22,7 +22,7 @@ Common `host-platform-triplets` for cross compilation are:
- `i686-w64-mingw32` for Win32
- `x86_64-w64-mingw32` for Win64
-- `x86_64-apple-darwin11` for MacOSX
+- `x86_64-apple-darwin11` for macOS
- `arm-linux-gnueabihf` for Linux ARM 32 bit
- `aarch64-linux-gnu` for Linux ARM 64 bit
@@ -49,7 +49,7 @@ The following can be set when running make: make FOO=bar
SOURCES_PATH: downloaded sources will be placed here
BASE_CACHE: built packages will be placed here
- SDK_PATH: Path where sdk's can be found (used by OSX)
+ SDK_PATH: Path where sdk's can be found (used by macOS)
FALLBACK_DOWNLOAD_PATH: If a source file can't be fetched, try here before giving up
NO_QT: Don't download/build/cache qt and its dependencies
NO_WALLET: Don't download/build/cache libs needed to enable the wallet
@@ -64,7 +64,7 @@ options will be passed to bitcoin's configure. In this case, `--disable-wallet`.
Additional targets:
download: run 'make download' to fetch all sources without building them
- download-osx: run 'make download-osx' to fetch all sources needed for osx builds
+ download-osx: run 'make download-osx' to fetch all sources needed for macOS builds
download-win: run 'make download-win' to fetch all sources needed for win builds
download-linux: run 'make download-linux' to fetch all sources needed for linux builds
diff --git a/depends/description.md b/depends/description.md
index 74f9ef3f20..9fc7093be4 100644
--- a/depends/description.md
+++ b/depends/description.md
@@ -7,7 +7,7 @@ In theory, binaries for any target OS/architecture can be created, from a
builder running any OS/architecture. In practice, build-side tools must be
specified when the defaults don't fit, and packages must be amended to work
on new hosts. For now, a build architecture of x86_64 is assumed, either on
-Linux or OSX.
+Linux or macOS.
### No reliance on timestamps
diff --git a/doc/README.md b/doc/README.md
index ddb239f60c..45762b2374 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -22,7 +22,7 @@ Unpack the files into a directory and run:
Unpack the files into a directory, and then run bitcoin-qt.exe.
-### OS X
+### macOS
Drag Bitcoin-Core to your applications folder, and then run Bitcoin-Core.
@@ -38,7 +38,7 @@ Building
The following are developer notes on how to build Bitcoin on your native platform. They are not complete guides, but include notes on the necessary libraries, compile flags, etc.
- [Dependencies](dependencies.md)
-- [OS X Build Notes](build-osx.md)
+- [macOS Build Notes](build-osx.md)
- [Unix Build Notes](build-unix.md)
- [Windows Build Notes](build-windows.md)
- [OpenBSD Build Notes](build-openbsd.md)
diff --git a/doc/README_osx.md b/doc/README_osx.md
index 975be4be9e..739e22d634 100644
--- a/doc/README_osx.md
+++ b/doc/README_osx.md
@@ -1,12 +1,12 @@
-Deterministic OS X DMG Notes.
+Deterministic macOS DMG Notes.
-Working OS X DMGs are created in Linux by combining a recent clang,
+Working macOS DMGs are created in Linux by combining a recent clang,
the Apple binutils (ld, ar, etc) and DMG authoring tools.
Apple uses clang extensively for development and has upstreamed the necessary
functionality so that a vanilla clang can take advantage. It supports the use
of -F, -target, -mmacosx-version-min, and --sysroot, which are all necessary
-when building for OS X.
+when building for macOS.
Apple's version of binutils (called cctools) contains lots of functionality
missing in the FSF's binutils. In addition to extra linker options for
@@ -38,7 +38,7 @@ Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.1
Unfortunately, the usual linux tools (7zip, hpmount, loopback mount) are incapable of opening this file.
To create a tarball suitable for Gitian input, there are two options:
-Using Mac OS X, you can mount the dmg, and then create it with:
+Using macOS, you can mount the dmg, and then create it with:
```
$ hdiutil attach Xcode_7.3.1.dmg
$ tar -C /Volumes/Xcode/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/ -czf MacOSX10.11.sdk.tar.gz MacOSX10.11.sdk
@@ -81,7 +81,7 @@ Background images and other features can be added to DMG files by inserting a
.DS_Store before creation. This is generated by the script
contrib/macdeploy/custom_dsstore.py.
-As of OS X Mavericks (10.9), using an Apple-blessed key to sign binaries is a
+As of OS X 10.9 Mavericks, using an Apple-blessed key to sign binaries is a
requirement in order to satisfy the new Gatekeeper requirements. Because this
private key cannot be shared, we'll have to be a bit creative in order for the
build process to remain somewhat deterministic. Here's how it works:
diff --git a/doc/build-osx.md b/doc/build-osx.md
index e52a770ced..abd305cf9a 100644
--- a/doc/build-osx.md
+++ b/doc/build-osx.md
@@ -1,11 +1,11 @@
-Mac OS X Build Instructions and Notes
+macOS Build Instructions and Notes
====================================
The commands in this guide should be executed in a Terminal application.
The built-in one is located in `/Applications/Utilities/Terminal.app`.
Preparation
-----------
-Install the OS X command line tools:
+Install the macOS command line tools:
`xcode-select --install`
@@ -93,6 +93,6 @@ Other commands:
Notes
-----
-* Tested on OS X 10.8 through 10.13 on 64-bit Intel processors only.
+* Tested on OS X 10.8 Mountain Lion through macOS 10.13 High Sierra on 64-bit Intel processors only.
* Building with downloaded Qt binaries is not officially supported. See the notes in [#7714](https://github.com/bitcoin/bitcoin/issues/7714)
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index 9081cab911..2fa91ecb02 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -499,7 +499,35 @@ Strings and formatting
- Use `ParseInt32`, `ParseInt64`, `ParseUInt32`, `ParseUInt64`, `ParseDouble` from `utilstrencodings.h` for number parsing
- - *Rationale*: These functions do overflow checking, and avoid pesky locale issues
+ - *Rationale*: These functions do overflow checking, and avoid pesky locale issues.
+
+- Avoid using locale dependent functions if possible. You can use the provided
+ [`lint-locale-dependence.sh`](/contrib/devtools/lint-locale-dependence.sh)
+ to check for accidental use of locale dependent functions.
+
+ - *Rationale*: Unnecessary locale dependence can cause bugs that are very tricky to isolate and fix.
+
+ - These functions are known to be locale dependent:
+ `alphasort`, `asctime`, `asprintf`, `atof`, `atoi`, `atol`, `atoll`, `atoq`,
+ `btowc`, `ctime`, `dprintf`, `fgetwc`, `fgetws`, `fprintf`, `fputwc`,
+ `fputws`, `fscanf`, `fwprintf`, `getdate`, `getwc`, `getwchar`, `isalnum`,
+ `isalpha`, `isblank`, `iscntrl`, `isdigit`, `isgraph`, `islower`, `isprint`,
+ `ispunct`, `isspace`, `isupper`, `iswalnum`, `iswalpha`, `iswblank`,
+ `iswcntrl`, `iswctype`, `iswdigit`, `iswgraph`, `iswlower`, `iswprint`,
+ `iswpunct`, `iswspace`, `iswupper`, `iswxdigit`, `isxdigit`, `mblen`,
+ `mbrlen`, `mbrtowc`, `mbsinit`, `mbsnrtowcs`, `mbsrtowcs`, `mbstowcs`,
+ `mbtowc`, `mktime`, `putwc`, `putwchar`, `scanf`, `snprintf`, `sprintf`,
+ `sscanf`, `stoi`, `stol`, `stoll`, `strcasecmp`, `strcasestr`, `strcoll`,
+ `strfmon`, `strftime`, `strncasecmp`, `strptime`, `strtod`, `strtof`,
+ `strtoimax`, `strtol`, `strtold`, `strtoll`, `strtoq`, `strtoul`,
+ `strtoull`, `strtoumax`, `strtouq`, `strxfrm`, `swprintf`, `tolower`,
+ `toupper`, `towctrans`, `towlower`, `towupper`, `ungetwc`, `vasprintf`,
+ `vdprintf`, `versionsort`, `vfprintf`, `vfscanf`, `vfwprintf`, `vprintf`,
+ `vscanf`, `vsnprintf`, `vsprintf`, `vsscanf`, `vswprintf`, `vwprintf`,
+ `wcrtomb`, `wcscasecmp`, `wcscoll`, `wcsftime`, `wcsncasecmp`, `wcsnrtombs`,
+ `wcsrtombs`, `wcstod`, `wcstof`, `wcstoimax`, `wcstol`, `wcstold`,
+ `wcstoll`, `wcstombs`, `wcstoul`, `wcstoull`, `wcstoumax`, `wcswidth`,
+ `wcsxfrm`, `wctob`, `wctomb`, `wctrans`, `wctype`, `wcwidth`, `wprintf`
- For `strprintf`, `LogPrint`, `LogPrintf` formatting characters don't need size specifiers
@@ -567,6 +595,12 @@ Source code organization
- *Rationale*: Shorter and simpler header files are easier to read, and reduce compile time
+- Use only the lowercase alphanumerics (`a-z0-9`), underscore (`_`) and hyphen (`-`) in source code filenames.
+
+ - *Rationale*: `grep`:ing and auto-completing filenames is easier when using a consistent
+ naming pattern. Potential problems when building on case-insensitive filesystems are
+ avoided when using only lowercase characters in source code filenames.
+
- Every `.cpp` and `.h` file should `#include` every header file it directly uses classes, functions or other
definitions from, even if those headers are already included indirectly through other headers.
@@ -594,8 +628,8 @@ namespace {
- *Rationale*: Avoids confusion about the namespace context
-- Prefer `#include <primitives/transaction.h>` bracket syntax instead of
- `#include "primitives/transactions.h"` quote syntax when possible.
+- Use `#include <primitives/transaction.h>` bracket syntax instead of
+ `#include "primitives/transactions.h"` quote syntax.
- *Rationale*: Bracket syntax is less ambiguous because the preprocessor
searches a fixed list of include directories without taking location of the
diff --git a/doc/init.md b/doc/init.md
index ffd13ae1f9..d04f7d186a 100644
--- a/doc/init.md
+++ b/doc/init.md
@@ -15,7 +15,7 @@ Service User
All three Linux startup configurations assume the existence of a "bitcoin" user
and group. They must be created before attempting to use these scripts.
-The OS X configuration assumes bitcoind will be set up for the current user.
+The macOS configuration assumes bitcoind will be set up for the current user.
Configuration
---------------------------------
@@ -65,7 +65,7 @@ reasons to make the configuration file and data directory only readable by the
bitcoin user and group. Access to bitcoin-cli and other bitcoind rpc clients
can then be controlled by group membership.
-### Mac OS X
+### macOS
Binary: `/usr/local/bin/bitcoind`
Configuration file: `~/Library/Application Support/Bitcoin/bitcoin.conf`
@@ -111,7 +111,7 @@ Using this script, you can adjust the path and flags to the bitcoind program by
setting the BITCOIND and FLAGS environment variables in the file
/etc/sysconfig/bitcoind. You can also use the DAEMONOPTS environment variable here.
-### Mac OS X
+### macOS
Copy org.bitcoin.bitcoind.plist into ~/Library/LaunchAgents. Load the launch agent by
running `launchctl load ~/Library/LaunchAgents/org.bitcoin.bitcoind.plist`.
diff --git a/doc/release-notes-pr12892.md b/doc/release-notes-pr12892.md
index 8105eca5c0..f4a95bd40f 100644
--- a/doc/release-notes-pr12892.md
+++ b/doc/release-notes-pr12892.md
@@ -18,7 +18,7 @@ Here are the changes to RPC methods:
| Deprecated Method | New Method | Notes |
| :---------------------- | :-------------------- | :-----------|
| `getaccount` | `getaddressinfo` | `getaddressinfo` returns a json object with address information instead of just the name of the account as a string. |
-| `getaccountaddress` | `getlabeladdress` | `getlabeladdress` throws an error by default if the label does not already exist, but provides a `force` option for compatibility with existing applications. |
+| `getaccountaddress` | n/a | There is no replacement for `getaccountaddress` since labels do not have an associated receive address. |
| `getaddressesbyaccount` | `getaddressesbylabel` | `getaddressesbylabel` returns a json object with the addresses as keys, instead of a list of strings. |
| `getreceivedbyaccount` | `getreceivedbylabel` | _no change in behavior_ |
| `listaccounts` | `listlabels` | `listlabels` does not return a balance or accept `minconf` and `watchonly` arguments. |
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 7a9a98bfec..e1bb84cca9 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -56,6 +56,11 @@ frequently tested on them.
Notable changes
===============
+GUI changes
+-----------
+
+- Block storage can be limited under Preferences, in the Main tab. Undoing this setting requires downloading the full blockchain again. This mode is incompatible with -txindex and -rescan.
+
RPC changes
------------
diff --git a/doc/release-process.md b/doc/release-process.md
index fb6f08750d..912b620794 100644
--- a/doc/release-process.md
+++ b/doc/release-process.md
@@ -89,7 +89,7 @@ Ensure gitian-builder is up-to-date:
wget -P inputs http://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz
popd
-Create the OS X SDK tarball, see the [OS X readme](README_osx.md) for details, and copy it into the inputs directory.
+Create the macOS SDK tarball, see the [macOS readme](README_osx.md) for details, and copy it into the inputs directory.
### Optional: Seed the Gitian sources cache and offline git repositories
@@ -111,7 +111,7 @@ NOTE: Offline builds must use the --url flag to ensure Gitian fetches only from
The gbuild invocations below <b>DO NOT DO THIS</b> by default.
-### Build and sign Bitcoin Core for Linux, Windows, and OS X:
+### Build and sign Bitcoin Core for Linux, Windows, and macOS:
pushd ./gitian-builder
./bin/gbuild --num-make 2 --memory 3000 --commit bitcoin=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-linux.yml
@@ -134,7 +134,7 @@ Build output expected:
1. source tarball (`bitcoin-${VERSION}.tar.gz`)
2. linux 32-bit and 64-bit dist tarballs (`bitcoin-${VERSION}-linux[32|64].tar.gz`)
3. windows 32-bit and 64-bit unsigned installers and dist zips (`bitcoin-${VERSION}-win[32|64]-setup-unsigned.exe`, `bitcoin-${VERSION}-win[32|64].zip`)
- 4. OS X unsigned installer and dist tarball (`bitcoin-${VERSION}-osx-unsigned.dmg`, `bitcoin-${VERSION}-osx64.tar.gz`)
+ 4. macOS unsigned installer and dist tarball (`bitcoin-${VERSION}-osx-unsigned.dmg`, `bitcoin-${VERSION}-osx64.tar.gz`)
5. Gitian signatures (in `gitian.sigs/${VERSION}-<linux|{win,osx}-unsigned>/(your Gitian key)/`)
### Verify other gitian builders signatures to your own. (Optional)
@@ -161,13 +161,13 @@ Commit your signature to gitian.sigs:
git push # Assuming you can push to the gitian.sigs tree
popd
-Codesigner only: Create Windows/OS X detached signatures:
+Codesigner only: Create Windows/macOS detached signatures:
- Only one person handles codesigning. Everyone else should skip to the next step.
-- Only once the Windows/OS X builds each have 3 matching signatures may they be signed with their respective release keys.
+- Only once the Windows/macOS builds each have 3 matching signatures may they be signed with their respective release keys.
-Codesigner only: Sign the osx binary:
+Codesigner only: Sign the macOS binary:
- transfer bitcoin-osx-unsigned.tar.gz to osx for signing
+ transfer bitcoin-osx-unsigned.tar.gz to macOS for signing
tar xf bitcoin-osx-unsigned.tar.gz
./detached-sig-create.sh -s "Key ID"
Enter the keychain password and authorize the signature
@@ -192,12 +192,12 @@ Codesigner only: Commit the detached codesign payloads:
git tag -s v${VERSION} HEAD
git push the current branch and new tag
-Non-codesigners: wait for Windows/OS X detached signatures:
+Non-codesigners: wait for Windows/macOS detached signatures:
-- Once the Windows/OS X builds each have 3 matching signatures, they will be signed with their respective release keys.
+- Once the Windows/macOS builds each have 3 matching signatures, they will be signed with their respective release keys.
- Detached signatures will then be committed to the [bitcoin-detached-sigs](https://github.com/bitcoin-core/bitcoin-detached-sigs) repository, which can be combined with the unsigned apps to create signed binaries.
-Create (and optionally verify) the signed OS X binary:
+Create (and optionally verify) the signed macOS binary:
pushd ./gitian-builder
./bin/gbuild -i --commit signature=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-osx-signer.yml
@@ -216,7 +216,7 @@ Create (and optionally verify) the signed Windows binaries:
mv build/out/bitcoin-*win32-setup.exe ../bitcoin-${VERSION}-win32-setup.exe
popd
-Commit your signature for the signed OS X/Windows binaries:
+Commit your signature for the signed macOS/Windows binaries:
pushd gitian.sigs
git add ${VERSION}-osx-signed/"${SIGNER}"
diff --git a/doc/translation_process.md b/doc/translation_process.md
index 5a9c59914e..022d7bb00b 100644
--- a/doc/translation_process.md
+++ b/doc/translation_process.md
@@ -46,9 +46,7 @@ Visit the [Transifex Signup](https://www.transifex.com/signup/) page to create a
You can find the Bitcoin translation project at [https://www.transifex.com/projects/p/bitcoin/](https://www.transifex.com/projects/p/bitcoin/).
### Installing the Transifex client command-line tool
-The client it used to fetch updated translations. If you are having problems, or need more details, see [http://docs.transifex.com/developer/client/setup](http://docs.transifex.com/developer/client/setup)
-
-**For Linux and Mac**
+The client is used to fetch updated translations. If you are having problems, or need more details, see [https://docs.transifex.com/client/installing-the-client](https://docs.transifex.com/client/installing-the-client)
`pip install transifex-client`
@@ -64,10 +62,6 @@ token =
username = USERNAME
```
-**For Windows**
-
-Please see [http://docs.transifex.com/developer/client/setup#windows](http://docs.transifex.com/developer/client/setup#windows) for details on installation.
-
The Transifex Bitcoin project config file is included as part of the repo. It can be found at `.tx/config`, however you shouldn’t need change anything.
### Synchronising translations
diff --git a/src/Makefile.am b/src/Makefile.am
index 96e56915a6..e03c21f16e 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -29,9 +29,7 @@ LIBBITCOIN_COMMON=libbitcoin_common.a
LIBBITCOIN_CONSENSUS=libbitcoin_consensus.a
LIBBITCOIN_CLI=libbitcoin_cli.a
LIBBITCOIN_UTIL=libbitcoin_util.a
-LIBBITCOIN_CRYPTO=crypto/libbitcoin_crypto.a
-LIBBITCOIN_CRYPTO_SSE41=crypto/libbitcoin_crypto_sse41.a
-LIBBITCOIN_CRYPTO_AVX2=crypto/libbitcoin_crypto_avx2.a
+LIBBITCOIN_CRYPTO_BASE=crypto/libbitcoin_crypto_base.a
LIBBITCOINQT=qt/libbitcoinqt.a
LIBSECP256K1=secp256k1/libsecp256k1.la
@@ -45,6 +43,16 @@ if ENABLE_WALLET
LIBBITCOIN_WALLET=libbitcoin_wallet.a
endif
+LIBBITCOIN_CRYPTO= $(LIBBITCOIN_CRYPTO_BASE)
+if ENABLE_SSE41
+LIBBITCOIN_CRYPTO_SSE41 = crypto/libbitcoin_crypto_sse41.a
+LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_SSE41)
+endif
+if ENABLE_AVX2
+LIBBITCOIN_CRYPTO_AVX2 = crypto/libbitcoin_crypto_avx2.a
+LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_AVX2)
+endif
+
$(LIBSECP256K1): $(wildcard secp256k1/src/*) $(wildcard secp256k1/include/*)
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C $(@D) $(@F)
@@ -52,8 +60,6 @@ $(LIBSECP256K1): $(wildcard secp256k1/src/*) $(wildcard secp256k1/include/*)
# But to build the less dependent modules first, we manually select their order here:
EXTRA_LIBRARIES += \
$(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2) \
$(LIBBITCOIN_UTIL) \
$(LIBBITCOIN_COMMON) \
$(LIBBITCOIN_CONSENSUS) \
@@ -107,6 +113,7 @@ BITCOIN_CORE_H = \
fs.h \
httprpc.h \
httpserver.h \
+ index/base.h \
index/txindex.h \
indirectmap.h \
init.h \
@@ -208,6 +215,7 @@ libbitcoin_server_a_SOURCES = \
consensus/tx_verify.cpp \
httprpc.cpp \
httpserver.cpp \
+ index/base.cpp \
index/txindex.cpp \
init.cpp \
dbwrapper.cpp \
@@ -268,9 +276,9 @@ libbitcoin_wallet_a_SOURCES = \
$(BITCOIN_CORE_H)
# crypto primitives library
-crypto_libbitcoin_crypto_a_CPPFLAGS = $(AM_CPPFLAGS)
-crypto_libbitcoin_crypto_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-crypto_libbitcoin_crypto_a_SOURCES = \
+crypto_libbitcoin_crypto_base_a_CPPFLAGS = $(AM_CPPFLAGS)
+crypto_libbitcoin_crypto_base_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+crypto_libbitcoin_crypto_base_a_SOURCES = \
crypto/aes.cpp \
crypto/aes.h \
crypto/chacha20.h \
@@ -290,23 +298,19 @@ crypto_libbitcoin_crypto_a_SOURCES = \
crypto/sha512.h
if USE_ASM
-crypto_libbitcoin_crypto_a_SOURCES += crypto/sha256_sse4.cpp
+crypto_libbitcoin_crypto_base_a_SOURCES += crypto/sha256_sse4.cpp
endif
crypto_libbitcoin_crypto_sse41_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
crypto_libbitcoin_crypto_sse41_a_CPPFLAGS = $(AM_CPPFLAGS)
-if ENABLE_SSE41
crypto_libbitcoin_crypto_sse41_a_CXXFLAGS += $(SSE41_CXXFLAGS)
crypto_libbitcoin_crypto_sse41_a_CPPFLAGS += -DENABLE_SSE41
-endif
crypto_libbitcoin_crypto_sse41_a_SOURCES = crypto/sha256_sse41.cpp
crypto_libbitcoin_crypto_avx2_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
crypto_libbitcoin_crypto_avx2_a_CPPFLAGS = $(AM_CPPFLAGS)
-if ENABLE_AVX2
crypto_libbitcoin_crypto_avx2_a_CXXFLAGS += $(AVX2_CXXFLAGS)
crypto_libbitcoin_crypto_avx2_a_CPPFLAGS += -DENABLE_AVX2
-endif
crypto_libbitcoin_crypto_avx2_a_SOURCES = crypto/sha256_avx2.cpp
# consensus: shared between all executables that validate any consensus rules.
@@ -431,8 +435,6 @@ bitcoind_LDADD = \
$(LIBBITCOIN_ZMQ) \
$(LIBBITCOIN_CONSENSUS) \
$(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2) \
$(LIBLEVELDB) \
$(LIBLEVELDB_SSE42) \
$(LIBMEMENV) \
@@ -454,9 +456,7 @@ bitcoin_cli_LDADD = \
$(LIBBITCOIN_CLI) \
$(LIBUNIVALUE) \
$(LIBBITCOIN_UTIL) \
- $(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2)
+ $(LIBBITCOIN_CRYPTO)
bitcoin_cli_LDADD += $(BOOST_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(EVENT_LIBS)
#
@@ -477,8 +477,6 @@ bitcoin_tx_LDADD = \
$(LIBBITCOIN_UTIL) \
$(LIBBITCOIN_CONSENSUS) \
$(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2) \
$(LIBSECP256K1)
bitcoin_tx_LDADD += $(BOOST_LIBS) $(CRYPTO_LIBS)
@@ -487,7 +485,7 @@ bitcoin_tx_LDADD += $(BOOST_LIBS) $(CRYPTO_LIBS)
# bitcoinconsensus library #
if BUILD_BITCOIN_LIBS
include_HEADERS = script/bitcoinconsensus.h
-libbitcoinconsensus_la_SOURCES = $(crypto_libbitcoin_crypto_a_SOURCES) $(libbitcoin_consensus_a_SOURCES)
+libbitcoinconsensus_la_SOURCES = $(crypto_libbitcoin_crypto_base_a_SOURCES) $(libbitcoin_consensus_a_SOURCES)
if GLIBC_BACK_COMPAT
libbitcoinconsensus_la_SOURCES += compat/glibc_compat.cpp
diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include
index 804df3bf21..bc2c46f228 100644
--- a/src/Makefile.bench.include
+++ b/src/Makefile.bench.include
@@ -17,7 +17,7 @@ bench_bench_bitcoin_SOURCES = \
bench/bench.h \
bench/checkblock.cpp \
bench/checkqueue.cpp \
- bench/Examples.cpp \
+ bench/examples.cpp \
bench/rollingbloom.cpp \
bench/crypto_hash.cpp \
bench/ccoins_caching.cpp \
@@ -39,8 +39,6 @@ bench_bench_bitcoin_LDADD = \
$(LIBBITCOIN_UTIL) \
$(LIBBITCOIN_CONSENSUS) \
$(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2) \
$(LIBLEVELDB) \
$(LIBLEVELDB_SSE42) \
$(LIBMEMENV) \
diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include
index f8c31be3d4..a84a11ac45 100644
--- a/src/Makefile.qt.include
+++ b/src/Makefile.qt.include
@@ -408,7 +408,7 @@ endif
if ENABLE_ZMQ
qt_bitcoin_qt_LDADD += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS)
endif
-qt_bitcoin_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBBITCOIN_CRYPTO_SSE41) $(LIBBITCOIN_CRYPTO_AVX2) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) \
+qt_bitcoin_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) \
$(BOOST_LIBS) $(QT_LIBS) $(QT_DBUS_LIBS) $(QR_LIBS) $(PROTOBUF_LIBS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(MINIUPNPC_LIBS) $(LIBSECP256K1) \
$(EVENT_PTHREADS_LIBS) $(EVENT_LIBS)
qt_bitcoin_qt_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(QT_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
diff --git a/src/Makefile.qttest.include b/src/Makefile.qttest.include
index a4356f1cbd..4b14212b2e 100644
--- a/src/Makefile.qttest.include
+++ b/src/Makefile.qttest.include
@@ -62,7 +62,7 @@ endif
if ENABLE_ZMQ
qt_test_test_bitcoin_qt_LDADD += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS)
endif
-qt_test_test_bitcoin_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBBITCOIN_CRYPTO_SSE41) $(LIBBITCOIN_CRYPTO_AVX2) $(LIBUNIVALUE) $(LIBLEVELDB) \
+qt_test_test_bitcoin_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) \
$(LIBLEVELDB_SSE42) $(LIBMEMENV) $(BOOST_LIBS) $(QT_DBUS_LIBS) $(QT_TEST_LIBS) $(QT_LIBS) \
$(QR_LIBS) $(PROTOBUF_LIBS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(MINIUPNPC_LIBS) $(LIBSECP256K1) \
$(EVENT_PTHREADS_LIBS) $(EVENT_LIBS)
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index cbd63cd53d..a4d31795ec 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -46,7 +46,7 @@ BITCOIN_TESTS =\
test/compress_tests.cpp \
test/crypto_tests.cpp \
test/cuckoocache_tests.cpp \
- test/DoS_tests.cpp \
+ test/denialofservice_tests.cpp \
test/getarg_tests.cpp \
test/hash_tests.cpp \
test/key_io_tests.cpp \
@@ -71,7 +71,7 @@ BITCOIN_TESTS =\
test/rpc_tests.cpp \
test/sanity_tests.cpp \
test/scheduler_tests.cpp \
- test/script_P2SH_tests.cpp \
+ test/script_p2sh_tests.cpp \
test/script_tests.cpp \
test/script_standard_tests.cpp \
test/scriptnum_tests.cpp \
@@ -110,7 +110,7 @@ if ENABLE_WALLET
test_test_bitcoin_LDADD += $(LIBBITCOIN_WALLET)
endif
-test_test_bitcoin_LDADD += $(LIBBITCOIN_SERVER) $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBBITCOIN_CRYPTO_SSE41) $(LIBBITCOIN_CRYPTO_AVX2) $(LIBUNIVALUE) \
+test_test_bitcoin_LDADD += $(LIBBITCOIN_SERVER) $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) \
$(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) $(BOOST_LIBS) $(BOOST_UNIT_TEST_FRAMEWORK_LIB) $(LIBSECP256K1) $(EVENT_LIBS) $(EVENT_PTHREADS_LIBS)
test_test_bitcoin_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
diff --git a/src/arith_uint256.h b/src/arith_uint256.h
index 3f4cc8c2bf..e4c7575e2d 100644
--- a/src/arith_uint256.h
+++ b/src/arith_uint256.h
@@ -64,14 +64,6 @@ public:
explicit base_uint(const std::string& str);
- bool operator!() const
- {
- for (int i = 0; i < WIDTH; i++)
- if (pn[i] != 0)
- return false;
- return true;
- }
-
const base_uint operator~() const
{
base_uint ret;
diff --git a/src/bench/Examples.cpp b/src/bench/examples.cpp
index b68c9cd156..b68c9cd156 100644
--- a/src/bench/Examples.cpp
+++ b/src/bench/examples.cpp
diff --git a/src/bench/merkle_root.cpp b/src/bench/merkle_root.cpp
index ae2a0a28dc..fab12da311 100644
--- a/src/bench/merkle_root.cpp
+++ b/src/bench/merkle_root.cpp
@@ -2,11 +2,11 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include "bench.h"
+#include <bench/bench.h>
-#include "uint256.h"
-#include "random.h"
-#include "consensus/merkle.h"
+#include <uint256.h>
+#include <random.h>
+#include <consensus/merkle.h>
static void MerkleRoot(benchmark::State& state)
{
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index be5ce14480..b332b5e581 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -56,6 +56,18 @@ static void SetupCliArgs()
gArgs.AddArg("-help", "", false, OptionsCategory::HIDDEN);
}
+/** libevent event log callback */
+static void libevent_log_cb(int severity, const char *msg)
+{
+#ifndef EVENT_LOG_ERR // EVENT_LOG_ERR was added in 2.0.19; but before then _EVENT_LOG_ERR existed.
+# define EVENT_LOG_ERR _EVENT_LOG_ERR
+#endif
+ // Ignore everything other than errors
+ if (severity >= EVENT_LOG_ERR) {
+ throw std::runtime_error(strprintf("libevent error: %s", msg));
+ }
+}
+
//////////////////////////////////////////////////////////////////////////////
//
// Start
@@ -506,6 +518,7 @@ int main(int argc, char* argv[])
fprintf(stderr, "Error: Initializing networking failed\n");
return EXIT_FAILURE;
}
+ event_set_log_callback(&libevent_log_cb);
try {
int ret = AppInitRPC(argc, argv);
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index 3fb505d739..2a594c3051 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -637,7 +637,7 @@ static void MutateTxSign(CMutableTransaction& tx, const std::string& flagStr)
// Sign what we can:
for (unsigned int i = 0; i < mergedTx.vin.size(); i++) {
- const CTxIn& txin = mergedTx.vin[i];
+ CTxIn& txin = mergedTx.vin[i];
const Coin& coin = view.AccessCoin(txin.prevout);
if (coin.IsSpent()) {
continue;
@@ -652,7 +652,7 @@ static void MutateTxSign(CMutableTransaction& tx, const std::string& flagStr)
// ... and merge in other signatures:
sigdata = CombineSignatures(prevPubKey, MutableTransactionSignatureChecker(&mergedTx, i, amount), sigdata, DataFromTransaction(txv, i));
- UpdateTransaction(mergedTx, i, sigdata);
+ UpdateInput(txin, sigdata);
}
tx = mergedTx;
diff --git a/src/crypto/sha256_avx2.cpp b/src/crypto/sha256_avx2.cpp
index f45c1d4ab6..b338b06927 100644
--- a/src/crypto/sha256_avx2.cpp
+++ b/src/crypto/sha256_avx2.cpp
@@ -7,8 +7,8 @@
#include <x86intrin.h>
#endif
-#include "crypto/sha256.h"
-#include "crypto/common.h"
+#include <crypto/sha256.h>
+#include <crypto/common.h>
namespace sha256d64_avx2 {
namespace {
diff --git a/src/crypto/sha256_sse41.cpp b/src/crypto/sha256_sse41.cpp
index a11d658c70..be71dd8fb8 100644
--- a/src/crypto/sha256_sse41.cpp
+++ b/src/crypto/sha256_sse41.cpp
@@ -7,8 +7,8 @@
#include <x86intrin.h>
#endif
-#include "crypto/sha256.h"
-#include "crypto/common.h"
+#include <crypto/sha256.h>
+#include <crypto/common.h>
namespace sha256d64_sse41 {
namespace {
diff --git a/src/index/base.cpp b/src/index/base.cpp
new file mode 100644
index 0000000000..738166dc94
--- /dev/null
+++ b/src/index/base.cpp
@@ -0,0 +1,278 @@
+// Copyright (c) 2017-2018 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <chainparams.h>
+#include <index/base.h>
+#include <init.h>
+#include <tinyformat.h>
+#include <ui_interface.h>
+#include <util.h>
+#include <validation.h>
+#include <warnings.h>
+
+constexpr char DB_BEST_BLOCK = 'B';
+
+constexpr int64_t SYNC_LOG_INTERVAL = 30; // seconds
+constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds
+
+template<typename... Args>
+static void FatalError(const char* fmt, const Args&... args)
+{
+ std::string strMessage = tfm::format(fmt, args...);
+ SetMiscWarning(strMessage);
+ LogPrintf("*** %s\n", strMessage);
+ uiInterface.ThreadSafeMessageBox(
+ "Error: A fatal internal error occurred, see debug.log for details",
+ "", CClientUIInterface::MSG_ERROR);
+ StartShutdown();
+}
+
+BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) :
+ CDBWrapper(path, n_cache_size, f_memory, f_wipe, f_obfuscate)
+{}
+
+bool BaseIndex::DB::ReadBestBlock(CBlockLocator& locator) const
+{
+ bool success = Read(DB_BEST_BLOCK, locator);
+ if (!success) {
+ locator.SetNull();
+ }
+ return success;
+}
+
+bool BaseIndex::DB::WriteBestBlock(const CBlockLocator& locator)
+{
+ return Write(DB_BEST_BLOCK, locator);
+}
+
+BaseIndex::~BaseIndex()
+{
+ Interrupt();
+ Stop();
+}
+
+bool BaseIndex::Init()
+{
+ CBlockLocator locator;
+ if (!GetDB().ReadBestBlock(locator)) {
+ locator.SetNull();
+ }
+
+ LOCK(cs_main);
+ m_best_block_index = FindForkInGlobalIndex(chainActive, locator);
+ m_synced = m_best_block_index.load() == chainActive.Tip();
+ return true;
+}
+
+static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev)
+{
+ AssertLockHeld(cs_main);
+
+ if (!pindex_prev) {
+ return chainActive.Genesis();
+ }
+
+ const CBlockIndex* pindex = chainActive.Next(pindex_prev);
+ if (pindex) {
+ return pindex;
+ }
+
+ return chainActive.Next(chainActive.FindFork(pindex_prev));
+}
+
+void BaseIndex::ThreadSync()
+{
+ const CBlockIndex* pindex = m_best_block_index.load();
+ if (!m_synced) {
+ auto& consensus_params = Params().GetConsensus();
+
+ int64_t last_log_time = 0;
+ int64_t last_locator_write_time = 0;
+ while (true) {
+ if (m_interrupt) {
+ WriteBestBlock(pindex);
+ return;
+ }
+
+ {
+ LOCK(cs_main);
+ const CBlockIndex* pindex_next = NextSyncBlock(pindex);
+ if (!pindex_next) {
+ WriteBestBlock(pindex);
+ m_best_block_index = pindex;
+ m_synced = true;
+ break;
+ }
+ pindex = pindex_next;
+ }
+
+ int64_t current_time = GetTime();
+ if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
+ LogPrintf("Syncing %s with block chain from height %d\n",
+ GetName(), pindex->nHeight);
+ last_log_time = current_time;
+ }
+
+ if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
+ WriteBestBlock(pindex);
+ last_locator_write_time = current_time;
+ }
+
+ CBlock block;
+ if (!ReadBlockFromDisk(block, pindex, consensus_params)) {
+ FatalError("%s: Failed to read block %s from disk",
+ __func__, pindex->GetBlockHash().ToString());
+ return;
+ }
+ if (!WriteBlock(block, pindex)) {
+ FatalError("%s: Failed to write block %s to index database",
+ __func__, pindex->GetBlockHash().ToString());
+ return;
+ }
+ }
+ }
+
+ if (pindex) {
+ LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight);
+ } else {
+ LogPrintf("%s is enabled\n", GetName());
+ }
+}
+
+bool BaseIndex::WriteBestBlock(const CBlockIndex* block_index)
+{
+ LOCK(cs_main);
+ if (!GetDB().WriteBestBlock(chainActive.GetLocator(block_index))) {
+ return error("%s: Failed to write locator to disk", __func__);
+ }
+ return true;
+}
+
+void BaseIndex::BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
+ const std::vector<CTransactionRef>& txn_conflicted)
+{
+ if (!m_synced) {
+ return;
+ }
+
+ const CBlockIndex* best_block_index = m_best_block_index.load();
+ if (!best_block_index) {
+ if (pindex->nHeight != 0) {
+ FatalError("%s: First block connected is not the genesis block (height=%d)",
+ __func__, pindex->nHeight);
+ return;
+ }
+ } else {
+ // Ensure block connects to an ancestor of the current best block. This should be the case
+ // most of the time, but may not be immediately after the sync thread catches up and sets
+ // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
+ // in the ValidationInterface queue backlog even after the sync thread has caught up to the
+ // new chain tip. In this unlikely event, log a warning and let the queue clear.
+ if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
+ LogPrintf("%s: WARNING: Block %s does not connect to an ancestor of " /* Continued */
+ "known best chain (tip=%s); not updating index\n",
+ __func__, pindex->GetBlockHash().ToString(),
+ best_block_index->GetBlockHash().ToString());
+ return;
+ }
+ }
+
+ if (WriteBlock(*block, pindex)) {
+ m_best_block_index = pindex;
+ } else {
+ FatalError("%s: Failed to write block %s to index",
+ __func__, pindex->GetBlockHash().ToString());
+ return;
+ }
+}
+
+void BaseIndex::ChainStateFlushed(const CBlockLocator& locator)
+{
+ if (!m_synced) {
+ return;
+ }
+
+ const uint256& locator_tip_hash = locator.vHave.front();
+ const CBlockIndex* locator_tip_index;
+ {
+ LOCK(cs_main);
+ locator_tip_index = LookupBlockIndex(locator_tip_hash);
+ }
+
+ if (!locator_tip_index) {
+ FatalError("%s: First block (hash=%s) in locator was not found",
+ __func__, locator_tip_hash.ToString());
+ return;
+ }
+
+ // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
+ // immediately after the sync thread catches up and sets m_synced. Consider the case where
+ // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
+ // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
+ // event, log a warning and let the queue clear.
+ const CBlockIndex* best_block_index = m_best_block_index.load();
+ if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
+ LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known best " /* Continued */
+ "chain (tip=%s); not writing index locator\n",
+ __func__, locator_tip_hash.ToString(),
+ best_block_index->GetBlockHash().ToString());
+ return;
+ }
+
+ if (!GetDB().WriteBestBlock(locator)) {
+ error("%s: Failed to write locator to disk", __func__);
+ }
+}
+
+bool BaseIndex::BlockUntilSyncedToCurrentChain()
+{
+ AssertLockNotHeld(cs_main);
+
+ if (!m_synced) {
+ return false;
+ }
+
+ {
+ // Skip the queue-draining stuff if we know we're caught up with
+ // chainActive.Tip().
+ LOCK(cs_main);
+ const CBlockIndex* chain_tip = chainActive.Tip();
+ const CBlockIndex* best_block_index = m_best_block_index.load();
+ if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
+ return true;
+ }
+ }
+
+ LogPrintf("%s: %s is catching up on block notifications\n", __func__, GetName());
+ SyncWithValidationInterfaceQueue();
+ return true;
+}
+
+void BaseIndex::Interrupt()
+{
+ m_interrupt();
+}
+
+void BaseIndex::Start()
+{
+ // Need to register this ValidationInterface before running Init(), so that
+ // callbacks are not missed if Init sets m_synced to true.
+ RegisterValidationInterface(this);
+ if (!Init()) {
+ FatalError("%s: %s failed to initialize", __func__, GetName());
+ return;
+ }
+
+ m_thread_sync = std::thread(&TraceThread<std::function<void()>>, GetName(),
+ std::bind(&BaseIndex::ThreadSync, this));
+}
+
+void BaseIndex::Stop()
+{
+ UnregisterValidationInterface(this);
+
+ if (m_thread_sync.joinable()) {
+ m_thread_sync.join();
+ }
+}
diff --git a/src/index/base.h b/src/index/base.h
new file mode 100644
index 0000000000..04ee6e6cc2
--- /dev/null
+++ b/src/index/base.h
@@ -0,0 +1,98 @@
+// Copyright (c) 2017-2018 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_INDEX_BASE_H
+#define BITCOIN_INDEX_BASE_H
+
+#include <dbwrapper.h>
+#include <primitives/block.h>
+#include <primitives/transaction.h>
+#include <threadinterrupt.h>
+#include <uint256.h>
+#include <validationinterface.h>
+
+class CBlockIndex;
+
+/**
+ * Base class for indices of blockchain data. This implements
+ * CValidationInterface and ensures blocks are indexed sequentially according
+ * to their position in the active chain.
+ */
+class BaseIndex : public CValidationInterface
+{
+protected:
+ class DB : public CDBWrapper
+ {
+ public:
+ DB(const fs::path& path, size_t n_cache_size,
+ bool f_memory = false, bool f_wipe = false, bool f_obfuscate = false);
+
+ /// Read block locator of the chain that the txindex is in sync with.
+ bool ReadBestBlock(CBlockLocator& locator) const;
+
+ /// Write block locator of the chain that the txindex is in sync with.
+ bool WriteBestBlock(const CBlockLocator& locator);
+ };
+
+private:
+ /// Whether the index is in sync with the main chain. The flag is flipped
+ /// from false to true once, after which point this starts processing
+ /// ValidationInterface notifications to stay in sync.
+ std::atomic<bool> m_synced{false};
+
+ /// The last block in the chain that the index is in sync with.
+ std::atomic<const CBlockIndex*> m_best_block_index{nullptr};
+
+ std::thread m_thread_sync;
+ CThreadInterrupt m_interrupt;
+
+ /// Sync the index with the block index starting from the current best block.
+ /// Intended to be run in its own thread, m_thread_sync, and can be
+ /// interrupted with m_interrupt. Once the index gets in sync, the m_synced
+ /// flag is set and the BlockConnected ValidationInterface callback takes
+ /// over and the sync thread exits.
+ void ThreadSync();
+
+ /// Write the current chain block locator to the DB.
+ bool WriteBestBlock(const CBlockIndex* block_index);
+
+protected:
+ void BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
+ const std::vector<CTransactionRef>& txn_conflicted) override;
+
+ void ChainStateFlushed(const CBlockLocator& locator) override;
+
+ /// Initialize internal state from the database and block index.
+ virtual bool Init();
+
+ /// Write update index entries for a newly connected block.
+ virtual bool WriteBlock(const CBlock& block, const CBlockIndex* pindex) { return true; }
+
+ virtual DB& GetDB() const = 0;
+
+ /// Get the name of the index for display in logs.
+ virtual const char* GetName() const = 0;
+
+public:
+ /// Destructor interrupts sync thread if running and blocks until it exits.
+ virtual ~BaseIndex();
+
+ /// Blocks the current thread until the index is caught up to the current
+ /// state of the block chain. This only blocks if the index has gotten in
+ /// sync once and only needs to process blocks in the ValidationInterface
+ /// queue. If the index is catching up from far behind, this method does
+ /// not block and immediately returns false.
+ bool BlockUntilSyncedToCurrentChain();
+
+ void Interrupt();
+
+ /// Start initializes the sync state and registers the instance as a
+ /// ValidationInterface so that it stays in sync with blockchain updates.
+ void Start();
+
+ /// Stops the instance from staying in sync with blockchain updates.
+ void Stop();
+};
+
+#endif // BITCOIN_INDEX_BASE_H
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index 3ff16b7664..e106b9b420 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -2,258 +2,261 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <chainparams.h>
#include <index/txindex.h>
#include <init.h>
-#include <tinyformat.h>
#include <ui_interface.h>
#include <util.h>
#include <validation.h>
-#include <warnings.h>
-constexpr int64_t SYNC_LOG_INTERVAL = 30; // seconds
-constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds
+#include <boost/thread.hpp>
+
+constexpr char DB_BEST_BLOCK = 'B';
+constexpr char DB_TXINDEX = 't';
+constexpr char DB_TXINDEX_BLOCK = 'T';
std::unique_ptr<TxIndex> g_txindex;
-template<typename... Args>
-static void FatalError(const char* fmt, const Args&... args)
+struct CDiskTxPos : public CDiskBlockPos
{
- std::string strMessage = tfm::format(fmt, args...);
- SetMiscWarning(strMessage);
- LogPrintf("*** %s\n", strMessage);
- uiInterface.ThreadSafeMessageBox(
- "Error: A fatal internal error occurred, see debug.log for details",
- "", CClientUIInterface::MSG_ERROR);
- StartShutdown();
-}
+ unsigned int nTxOffset; // after header
-TxIndex::TxIndex(std::unique_ptr<TxIndexDB> db) :
- m_db(std::move(db)), m_synced(false), m_best_block_index(nullptr)
-{}
+ ADD_SERIALIZE_METHODS;
-TxIndex::~TxIndex()
-{
- Interrupt();
- Stop();
-}
-
-bool TxIndex::Init()
-{
- LOCK(cs_main);
-
- // Attempt to migrate txindex from the old database to the new one. Even if
- // chain_tip is null, the node could be reindexing and we still want to
- // delete txindex records in the old database.
- if (!m_db->MigrateData(*pblocktree, chainActive.GetLocator())) {
- return false;
+ template <typename Stream, typename Operation>
+ inline void SerializationOp(Stream& s, Operation ser_action) {
+ READWRITEAS(CDiskBlockPos, *this);
+ READWRITE(VARINT(nTxOffset));
}
- CBlockLocator locator;
- if (!m_db->ReadBestBlock(locator)) {
- locator.SetNull();
+ CDiskTxPos(const CDiskBlockPos &blockIn, unsigned int nTxOffsetIn) : CDiskBlockPos(blockIn.nFile, blockIn.nPos), nTxOffset(nTxOffsetIn) {
}
- m_best_block_index = FindForkInGlobalIndex(chainActive, locator);
- m_synced = m_best_block_index.load() == chainActive.Tip();
- return true;
-}
-
-static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev)
-{
- AssertLockHeld(cs_main);
-
- if (!pindex_prev) {
- return chainActive.Genesis();
+ CDiskTxPos() {
+ SetNull();
}
- const CBlockIndex* pindex = chainActive.Next(pindex_prev);
- if (pindex) {
- return pindex;
+ void SetNull() {
+ CDiskBlockPos::SetNull();
+ nTxOffset = 0;
}
-
- return chainActive.Next(chainActive.FindFork(pindex_prev));
-}
-
-void TxIndex::ThreadSync()
+};
+
+/**
+ * Access to the txindex database (indexes/txindex/)
+ *
+ * The database stores a block locator of the chain the database is synced to
+ * so that the TxIndex can efficiently determine the point it last stopped at.
+ * A locator is used instead of a simple hash of the chain tip because blocks
+ * and block index entries may not be flushed to disk until after this database
+ * is updated.
+ */
+class TxIndex::DB : public BaseIndex::DB
{
- const CBlockIndex* pindex = m_best_block_index.load();
- if (!m_synced) {
- auto& consensus_params = Params().GetConsensus();
-
- int64_t last_log_time = 0;
- int64_t last_locator_write_time = 0;
- while (true) {
- if (m_interrupt) {
- WriteBestBlock(pindex);
- return;
- }
+public:
+ explicit DB(size_t n_cache_size, bool f_memory = false, bool f_wipe = false);
- {
- LOCK(cs_main);
- const CBlockIndex* pindex_next = NextSyncBlock(pindex);
- if (!pindex_next) {
- WriteBestBlock(pindex);
- m_best_block_index = pindex;
- m_synced = true;
- break;
- }
- pindex = pindex_next;
- }
+ /// Read the disk location of the transaction data with the given hash. Returns false if the
+ /// transaction hash is not indexed.
+ bool ReadTxPos(const uint256& txid, CDiskTxPos& pos) const;
- int64_t current_time = GetTime();
- if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
- LogPrintf("Syncing txindex with block chain from height %d\n", pindex->nHeight);
- last_log_time = current_time;
- }
+ /// Write a batch of transaction positions to the DB.
+ bool WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_pos);
- if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
- WriteBestBlock(pindex);
- last_locator_write_time = current_time;
- }
+ /// Migrate txindex data from the block tree DB, where it may be for older nodes that have not
+ /// been upgraded yet to the new database.
+ bool MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator& best_locator);
+};
- CBlock block;
- if (!ReadBlockFromDisk(block, pindex, consensus_params)) {
- FatalError("%s: Failed to read block %s from disk",
- __func__, pindex->GetBlockHash().ToString());
- return;
- }
- if (!WriteBlock(block, pindex)) {
- FatalError("%s: Failed to write block %s to tx index database",
- __func__, pindex->GetBlockHash().ToString());
- return;
- }
- }
- }
+TxIndex::DB::DB(size_t n_cache_size, bool f_memory, bool f_wipe) :
+ BaseIndex::DB(GetDataDir() / "indexes" / "txindex", n_cache_size, f_memory, f_wipe)
+{}
- if (pindex) {
- LogPrintf("txindex is enabled at height %d\n", pindex->nHeight);
- } else {
- LogPrintf("txindex is enabled\n");
- }
+bool TxIndex::DB::ReadTxPos(const uint256 &txid, CDiskTxPos& pos) const
+{
+ return Read(std::make_pair(DB_TXINDEX, txid), pos);
}
-bool TxIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
+bool TxIndex::DB::WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_pos)
{
- CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size()));
- std::vector<std::pair<uint256, CDiskTxPos>> vPos;
- vPos.reserve(block.vtx.size());
- for (const auto& tx : block.vtx) {
- vPos.emplace_back(tx->GetHash(), pos);
- pos.nTxOffset += ::GetSerializeSize(*tx, SER_DISK, CLIENT_VERSION);
+ CDBBatch batch(*this);
+ for (const auto& tuple : v_pos) {
+ batch.Write(std::make_pair(DB_TXINDEX, tuple.first), tuple.second);
}
- return m_db->WriteTxs(vPos);
+ return WriteBatch(batch);
}
-bool TxIndex::WriteBestBlock(const CBlockIndex* block_index)
+/*
+ * Safely persist a transfer of data from the old txindex database to the new one, and compact the
+ * range of keys updated. This is used internally by MigrateData.
+ */
+static void WriteTxIndexMigrationBatches(CDBWrapper& newdb, CDBWrapper& olddb,
+ CDBBatch& batch_newdb, CDBBatch& batch_olddb,
+ const std::pair<unsigned char, uint256>& begin_key,
+ const std::pair<unsigned char, uint256>& end_key)
{
- LOCK(cs_main);
- if (!m_db->WriteBestBlock(chainActive.GetLocator(block_index))) {
- return error("%s: Failed to write locator to disk", __func__);
- }
- return true;
+ // Sync new DB changes to disk before deleting from old DB.
+ newdb.WriteBatch(batch_newdb, /*fSync=*/ true);
+ olddb.WriteBatch(batch_olddb);
+ olddb.CompactRange(begin_key, end_key);
+
+ batch_newdb.Clear();
+ batch_olddb.Clear();
}
-void TxIndex::BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
- const std::vector<CTransactionRef>& txn_conflicted)
+bool TxIndex::DB::MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator& best_locator)
{
- if (!m_synced) {
- return;
- }
-
- const CBlockIndex* best_block_index = m_best_block_index.load();
- if (!best_block_index) {
- if (pindex->nHeight != 0) {
- FatalError("%s: First block connected is not the genesis block (height=%d)",
- __func__, pindex->nHeight);
- return;
+ // The prior implementation of txindex was always in sync with block index
+ // and presence was indicated with a boolean DB flag. If the flag is set,
+ // this means the txindex from a previous version is valid and in sync with
+ // the chain tip. The first step of the migration is to unset the flag and
+ // write the chain hash to a separate key, DB_TXINDEX_BLOCK. After that, the
+ // index entries are copied over in batches to the new database. Finally,
+ // DB_TXINDEX_BLOCK is erased from the old database and the block hash is
+ // written to the new database.
+ //
+ // Unsetting the boolean flag ensures that if the node is downgraded to a
+ // previous version, it will not see a corrupted, partially migrated index
+ // -- it will see that the txindex is disabled. When the node is upgraded
+ // again, the migration will pick up where it left off and sync to the block
+ // with hash DB_TXINDEX_BLOCK.
+ bool f_legacy_flag = false;
+ block_tree_db.ReadFlag("txindex", f_legacy_flag);
+ if (f_legacy_flag) {
+ if (!block_tree_db.Write(DB_TXINDEX_BLOCK, best_locator)) {
+ return error("%s: cannot write block indicator", __func__);
}
- } else {
- // Ensure block connects to an ancestor of the current best block. This should be the case
- // most of the time, but may not be immediately after the sync thread catches up and sets
- // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
- // in the ValidationInterface queue backlog even after the sync thread has caught up to the
- // new chain tip. In this unlikely event, log a warning and let the queue clear.
- if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
- LogPrintf("%s: WARNING: Block %s does not connect to an ancestor of " /* Continued */
- "known best chain (tip=%s); not updating txindex\n",
- __func__, pindex->GetBlockHash().ToString(),
- best_block_index->GetBlockHash().ToString());
- return;
+ if (!block_tree_db.WriteFlag("txindex", false)) {
+ return error("%s: cannot write block index db flag", __func__);
}
}
- if (WriteBlock(*block, pindex)) {
- m_best_block_index = pindex;
- } else {
- FatalError("%s: Failed to write block %s to txindex",
- __func__, pindex->GetBlockHash().ToString());
- return;
+ CBlockLocator locator;
+ if (!block_tree_db.Read(DB_TXINDEX_BLOCK, locator)) {
+ return true;
}
-}
-void TxIndex::ChainStateFlushed(const CBlockLocator& locator)
-{
- if (!m_synced) {
- return;
- }
+ int64_t count = 0;
+ LogPrintf("Upgrading txindex database... [0%%]\n");
+ uiInterface.ShowProgress(_("Upgrading txindex database"), 0, true);
+ int report_done = 0;
+ const size_t batch_size = 1 << 24; // 16 MiB
+
+ CDBBatch batch_newdb(*this);
+ CDBBatch batch_olddb(block_tree_db);
+
+ std::pair<unsigned char, uint256> key;
+ std::pair<unsigned char, uint256> begin_key{DB_TXINDEX, uint256()};
+ std::pair<unsigned char, uint256> prev_key = begin_key;
+
+ bool interrupted = false;
+ std::unique_ptr<CDBIterator> cursor(block_tree_db.NewIterator());
+ for (cursor->Seek(begin_key); cursor->Valid(); cursor->Next()) {
+ boost::this_thread::interruption_point();
+ if (ShutdownRequested()) {
+ interrupted = true;
+ break;
+ }
- const uint256& locator_tip_hash = locator.vHave.front();
- const CBlockIndex* locator_tip_index;
- {
- LOCK(cs_main);
- locator_tip_index = LookupBlockIndex(locator_tip_hash);
- }
+ if (!cursor->GetKey(key)) {
+ return error("%s: cannot get key from valid cursor", __func__);
+ }
+ if (key.first != DB_TXINDEX) {
+ break;
+ }
- if (!locator_tip_index) {
- FatalError("%s: First block (hash=%s) in locator was not found",
- __func__, locator_tip_hash.ToString());
- return;
+ // Log progress every 10%.
+ if (++count % 256 == 0) {
+ // Since txids are uniformly random and traversed in increasing order, the high 16 bits
+ // of the hash can be used to estimate the current progress.
+ const uint256& txid = key.second;
+ uint32_t high_nibble =
+ (static_cast<uint32_t>(*(txid.begin() + 0)) << 8) +
+ (static_cast<uint32_t>(*(txid.begin() + 1)) << 0);
+ int percentage_done = (int)(high_nibble * 100.0 / 65536.0 + 0.5);
+
+ uiInterface.ShowProgress(_("Upgrading txindex database"), percentage_done, true);
+ if (report_done < percentage_done/10) {
+ LogPrintf("Upgrading txindex database... [%d%%]\n", percentage_done);
+ report_done = percentage_done/10;
+ }
+ }
+
+ CDiskTxPos value;
+ if (!cursor->GetValue(value)) {
+ return error("%s: cannot parse txindex record", __func__);
+ }
+ batch_newdb.Write(key, value);
+ batch_olddb.Erase(key);
+
+ if (batch_newdb.SizeEstimate() > batch_size || batch_olddb.SizeEstimate() > batch_size) {
+ // NOTE: it's OK to delete the key pointed at by the current DB cursor while iterating
+ // because LevelDB iterators are guaranteed to provide a consistent view of the
+ // underlying data, like a lightweight snapshot.
+ WriteTxIndexMigrationBatches(*this, block_tree_db,
+ batch_newdb, batch_olddb,
+ prev_key, key);
+ prev_key = key;
+ }
}
- // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
- // immediately after the sync thread catches up and sets m_synced. Consider the case where
- // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
- // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
- // event, log a warning and let the queue clear.
- const CBlockIndex* best_block_index = m_best_block_index.load();
- if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
- LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known best " /* Continued */
- "chain (tip=%s); not writing txindex locator\n",
- __func__, locator_tip_hash.ToString(),
- best_block_index->GetBlockHash().ToString());
- return;
+ // If these final DB batches complete the migration, write the best block
+ // hash marker to the new database and delete from the old one. This signals
+ // that the former is fully caught up to that point in the blockchain and
+ // that all txindex entries have been removed from the latter.
+ if (!interrupted) {
+ batch_olddb.Erase(DB_TXINDEX_BLOCK);
+ batch_newdb.Write(DB_BEST_BLOCK, locator);
}
- if (!m_db->WriteBestBlock(locator)) {
- error("%s: Failed to write locator to disk", __func__);
+ WriteTxIndexMigrationBatches(*this, block_tree_db,
+ batch_newdb, batch_olddb,
+ begin_key, key);
+
+ if (interrupted) {
+ LogPrintf("[CANCELLED].\n");
+ return false;
}
+
+ uiInterface.ShowProgress("", 100, false);
+
+ LogPrintf("[DONE].\n");
+ return true;
}
-bool TxIndex::BlockUntilSyncedToCurrentChain()
+TxIndex::TxIndex(size_t n_cache_size, bool f_memory, bool f_wipe)
+ : m_db(MakeUnique<TxIndex::DB>(n_cache_size, f_memory, f_wipe))
+{}
+
+TxIndex::~TxIndex() {}
+
+bool TxIndex::Init()
{
- AssertLockNotHeld(cs_main);
+ LOCK(cs_main);
- if (!m_synced) {
+ // Attempt to migrate txindex from the old database to the new one. Even if
+ // chain_tip is null, the node could be reindexing and we still want to
+ // delete txindex records in the old database.
+ if (!m_db->MigrateData(*pblocktree, chainActive.GetLocator())) {
return false;
}
- {
- // Skip the queue-draining stuff if we know we're caught up with
- // chainActive.Tip().
- LOCK(cs_main);
- const CBlockIndex* chain_tip = chainActive.Tip();
- const CBlockIndex* best_block_index = m_best_block_index.load();
- if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
- return true;
- }
- }
+ return BaseIndex::Init();
+}
- LogPrintf("%s: txindex is catching up on block notifications\n", __func__);
- SyncWithValidationInterfaceQueue();
- return true;
+bool TxIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
+{
+ CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size()));
+ std::vector<std::pair<uint256, CDiskTxPos>> vPos;
+ vPos.reserve(block.vtx.size());
+ for (const auto& tx : block.vtx) {
+ vPos.emplace_back(tx->GetHash(), pos);
+ pos.nTxOffset += ::GetSerializeSize(*tx, SER_DISK, CLIENT_VERSION);
+ }
+ return m_db->WriteTxs(vPos);
}
+BaseIndex::DB& TxIndex::GetDB() const { return *m_db; }
+
bool TxIndex::FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRef& tx) const
{
CDiskTxPos postx;
@@ -281,31 +284,3 @@ bool TxIndex::FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRe
block_hash = header.GetHash();
return true;
}
-
-void TxIndex::Interrupt()
-{
- m_interrupt();
-}
-
-void TxIndex::Start()
-{
- // Need to register this ValidationInterface before running Init(), so that
- // callbacks are not missed if Init sets m_synced to true.
- RegisterValidationInterface(this);
- if (!Init()) {
- FatalError("%s: txindex failed to initialize", __func__);
- return;
- }
-
- m_thread_sync = std::thread(&TraceThread<std::function<void()>>, "txindex",
- std::bind(&TxIndex::ThreadSync, this));
-}
-
-void TxIndex::Stop()
-{
- UnregisterValidationInterface(this);
-
- if (m_thread_sync.joinable()) {
- m_thread_sync.join();
- }
-}
diff --git a/src/index/txindex.h b/src/index/txindex.h
index 4937bd64e9..8202c3c951 100644
--- a/src/index/txindex.h
+++ b/src/index/txindex.h
@@ -5,70 +5,39 @@
#ifndef BITCOIN_INDEX_TXINDEX_H
#define BITCOIN_INDEX_TXINDEX_H
-#include <primitives/block.h>
-#include <primitives/transaction.h>
-#include <threadinterrupt.h>
+#include <chain.h>
+#include <index/base.h>
#include <txdb.h>
-#include <uint256.h>
-#include <validationinterface.h>
-
-class CBlockIndex;
/**
* TxIndex is used to look up transactions included in the blockchain by hash.
* The index is written to a LevelDB database and records the filesystem
* location of each transaction by transaction hash.
*/
-class TxIndex final : public CValidationInterface
+class TxIndex final : public BaseIndex
{
-private:
- const std::unique_ptr<TxIndexDB> m_db;
-
- /// Whether the index is in sync with the main chain. The flag is flipped
- /// from false to true once, after which point this starts processing
- /// ValidationInterface notifications to stay in sync.
- std::atomic<bool> m_synced;
-
- /// The last block in the chain that the TxIndex is in sync with.
- std::atomic<const CBlockIndex*> m_best_block_index;
-
- std::thread m_thread_sync;
- CThreadInterrupt m_interrupt;
-
- /// Initialize internal state from the database and block index.
- bool Init();
+protected:
+ class DB;
- /// Sync the tx index with the block index starting from the current best
- /// block. Intended to be run in its own thread, m_thread_sync, and can be
- /// interrupted with m_interrupt. Once the txindex gets in sync, the
- /// m_synced flag is set and the BlockConnected ValidationInterface callback
- /// takes over and the sync thread exits.
- void ThreadSync();
+private:
+ const std::unique_ptr<DB> m_db;
- /// Write update index entries for a newly connected block.
- bool WriteBlock(const CBlock& block, const CBlockIndex* pindex);
+protected:
+ /// Override base class init to migrate from old database.
+ bool Init() override;
- /// Write the current chain block locator to the DB.
- bool WriteBestBlock(const CBlockIndex* block_index);
+ bool WriteBlock(const CBlock& block, const CBlockIndex* pindex) override;
-protected:
- void BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
- const std::vector<CTransactionRef>& txn_conflicted) override;
+ BaseIndex::DB& GetDB() const override;
- void ChainStateFlushed(const CBlockLocator& locator) override;
+ const char* GetName() const override { return "txindex"; }
public:
- /// Constructs the TxIndex, which becomes available to be queried.
- explicit TxIndex(std::unique_ptr<TxIndexDB> db);
-
- /// Destructor interrupts sync thread if running and blocks until it exits.
- ~TxIndex();
+ /// Constructs the index, which becomes available to be queried.
+ explicit TxIndex(size_t n_cache_size, bool f_memory = false, bool f_wipe = false);
- /// Blocks the current thread until the transaction index is caught up to
- /// the current state of the block chain. This only blocks if the index has gotten in sync once
- /// and only needs to process blocks in the ValidationInterface queue. If the index is catching
- /// up from far behind, this method does not block and immediately returns false.
- bool BlockUntilSyncedToCurrentChain();
+ // Destructor is declared because this class contains a unique_ptr to an incomplete type.
+ virtual ~TxIndex() override;
/// Look up a transaction by hash.
///
@@ -77,15 +46,6 @@ public:
/// @param[out] tx The transaction itself.
/// @return true if transaction is found, false otherwise
bool FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRef& tx) const;
-
- void Interrupt();
-
- /// Start initializes the sync state and registers the instance as a
- /// ValidationInterface so that it stays in sync with blockchain updates.
- void Start();
-
- /// Stops the instance from staying in sync with blockchain updates.
- void Stop();
};
/// The global transaction index, used in GetTransaction. May be null.
diff --git a/src/init.cpp b/src/init.cpp
index b4e2eec0d2..9246f6e71c 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -1606,8 +1606,7 @@ bool AppInitMain()
// ********************************************************* Step 8: start indexers
if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
- auto txindex_db = MakeUnique<TxIndexDB>(nTxIndexCache, false, fReindex);
- g_txindex = MakeUnique<TxIndex>(std::move(txindex_db));
+ g_txindex = MakeUnique<TxIndex>(nTxIndexCache, false, fReindex);
g_txindex->Start();
}
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index fc05dd2ad2..de456e87f4 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -561,7 +561,7 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec
} // namespace
// This function is used for testing the stale tip eviction logic, see
-// DoS_tests.cpp
+// denialofservice_tests.cpp
void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
{
LOCK(cs_main);
diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp
index 5963bf371a..aac3fe5c14 100644
--- a/src/policy/policy.cpp
+++ b/src/policy/policy.cpp
@@ -54,7 +54,7 @@ bool IsDust(const CTxOut& txout, const CFeeRate& dustRelayFeeIn)
return (txout.nValue < GetDustThreshold(txout, dustRelayFeeIn));
}
-bool IsStandard(const CScript& scriptPubKey, txnouttype& whichType, const bool witnessEnabled)
+bool IsStandard(const CScript& scriptPubKey, txnouttype& whichType)
{
std::vector<std::vector<unsigned char> > vSolutions;
if (!Solver(scriptPubKey, whichType, vSolutions))
@@ -73,13 +73,10 @@ bool IsStandard(const CScript& scriptPubKey, txnouttype& whichType, const bool w
(!fAcceptDatacarrier || scriptPubKey.size() > nMaxDatacarrierBytes))
return false;
- else if (!witnessEnabled && (whichType == TX_WITNESS_V0_KEYHASH || whichType == TX_WITNESS_V0_SCRIPTHASH))
- return false;
-
return whichType != TX_NONSTANDARD && whichType != TX_WITNESS_UNKNOWN;
}
-bool IsStandardTx(const CTransaction& tx, std::string& reason, const bool witnessEnabled)
+bool IsStandardTx(const CTransaction& tx, std::string& reason)
{
if (tx.nVersion > CTransaction::MAX_STANDARD_VERSION || tx.nVersion < 1) {
reason = "version";
@@ -118,7 +115,7 @@ bool IsStandardTx(const CTransaction& tx, std::string& reason, const bool witnes
unsigned int nDataOut = 0;
txnouttype whichType;
for (const CTxOut& txout : tx.vout) {
- if (!::IsStandard(txout.scriptPubKey, whichType, witnessEnabled)) {
+ if (!::IsStandard(txout.scriptPubKey, whichType)) {
reason = "scriptpubkey";
return false;
}
diff --git a/src/policy/policy.h b/src/policy/policy.h
index 5ce019df4c..035627bd60 100644
--- a/src/policy/policy.h
+++ b/src/policy/policy.h
@@ -79,12 +79,12 @@ CAmount GetDustThreshold(const CTxOut& txout, const CFeeRate& dustRelayFee);
bool IsDust(const CTxOut& txout, const CFeeRate& dustRelayFee);
-bool IsStandard(const CScript& scriptPubKey, txnouttype& whichType, const bool witnessEnabled = false);
+bool IsStandard(const CScript& scriptPubKey, txnouttype& whichType);
/**
* Check for standard transaction types
* @return True if all outputs (scriptPubKeys) use only standard transaction forms
*/
-bool IsStandardTx(const CTransaction& tx, std::string& reason, const bool witnessEnabled = false);
+bool IsStandardTx(const CTransaction& tx, std::string& reason);
/**
* Check for standard transaction types
* @param[in] mapInputs Map of previous transactions that have outputs we're spending
diff --git a/src/qt/README.md b/src/qt/README.md
index d8acf96ceb..bf8139666c 100644
--- a/src/qt/README.md
+++ b/src/qt/README.md
@@ -4,7 +4,7 @@ The current precise version for Qt 5 is specified in [qt.mk](/depends/packages/q
## Compile and run
-See build instructions ([OSX](/doc/build-osx.md), [Windows](/doc/build-windows.md), [Unix](/doc/build-unix.md), etc).
+See build instructions ([macOS](/doc/build-osx.md), [Windows](/doc/build-windows.md), [Unix](/doc/build-unix.md), etc).
To run:
@@ -65,7 +65,7 @@ Represents the view to a single wallet.
* `guiconstants.h`: UI colors, app name, etc
* `guiutil.h`: several helper functions
* `macdockiconhandler.(h/cpp)`
-* `macdockiconhandler.(h/cpp)`: display notifications in OSX
+* `macdockiconhandler.(h/cpp)`: display notifications in macOS
## Contribute
@@ -81,9 +81,9 @@ the UI layout.
Download and install the community edition of [Qt Creator](https://www.qt.io/download/).
Uncheck everything except Qt Creator during the installation process.
-Instructions for OSX:
+Instructions for macOS:
-1. Make sure you installed everything through Homebrew mentioned in the [OSX build instructions](/doc/build-osx.md)
+1. Make sure you installed everything through Homebrew mentioned in the [macOS build instructions](/doc/build-osx.md)
2. Use `./configure` with the `--enable-debug` flag
3. In Qt Creator do "New Project" -> Import Project -> Import Existing Project
4. Enter "bitcoin-qt" as project name, enter src/qt as location
diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui
index a3721991ee..8f34e6bc82 100644
--- a/src/qt/forms/optionsdialog.ui
+++ b/src/qt/forms/optionsdialog.ui
@@ -38,6 +38,69 @@
</widget>
</item>
<item>
+ <spacer name="horizontalSpacer_0_Main">
+ <property name="orientation">
+ <enum>Qt::Horizontal</enum>
+ </property>
+ <property name="sizeHint" stdset="0">
+ <size>
+ <width>40</width>
+ <height>5</height>
+ </size>
+ </property>
+ </spacer>
+ </item>
+ <item>
+ <layout class="QHBoxLayout" name="horizontalLayout_Main_Prune">
+ <item>
+ <widget class="QCheckBox" name="prune">
+ <property name="toolTip">
+ <string>Disables some advanced features but all blocks will still be fully validated. Reverting this setting requires re-downloading the entire blockchain. Actual disk usage may be somewhat higher.</string>
+ </property>
+ <property name="text">
+ <string>Prune &amp;block storage to</string>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QSpinBox" name="pruneSize"/>
+ </item>
+ <item>
+ <widget class="QLabel" name="pruneSizeUnitLabel">
+ <property name="text">
+ <string>GB</string>
+ </property>
+ <property name="textFormat">
+ <enum>Qt::PlainText</enum>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <spacer name="horizontalSpacer_Main_Prune">
+ <property name="orientation">
+ <enum>Qt::Horizontal</enum>
+ </property>
+ <property name="sizeHint" stdset="0">
+ <size>
+ <width>40</width>
+ <height>20</height>
+ </size>
+ </property>
+ </spacer>
+ </item>
+ </layout>
+ </item>
+ <item>
+ <widget class="QLabel" name="pruneWarning">
+ <property name="text">
+ <string>Reverting this setting requires re-downloading the entire blockchain.</string>
+ </property>
+ <property name="textFormat">
+ <enum>Qt::PlainText</enum>
+ </property>
+ </widget>
+ </item>
+ <item>
<layout class="QHBoxLayout" name="horizontalLayout_2_Main">
<item>
<widget class="QLabel" name="databaseCacheLabel">
@@ -81,7 +144,7 @@
</layout>
</item>
<item>
- <layout class="QHBoxLayout" name="horizontalLayout_3_Main">
+ <layout class="QHBoxLayout" name="horizontalLayout_Main_VerifyLabel">
<item>
<widget class="QLabel" name="threadsScriptVerifLabel">
<property name="text">
@@ -103,7 +166,7 @@
</widget>
</item>
<item>
- <spacer name="horizontalSpacer_3_Main">
+ <spacer name="horizontalSpacer_Main_Threads">
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp
index c0ddb89b40..108aa4f99c 100644
--- a/src/qt/optionsdialog.cpp
+++ b/src/qt/optionsdialog.cpp
@@ -36,8 +36,17 @@ OptionsDialog::OptionsDialog(QWidget *parent, bool enableWallet) :
/* Main elements init */
ui->databaseCache->setMinimum(nMinDbCache);
ui->databaseCache->setMaximum(nMaxDbCache);
+ static const uint64_t GiB = 1024 * 1024 * 1024;
+ static const uint64_t nMinDiskSpace = MIN_DISK_SPACE_FOR_BLOCK_FILES / GiB +
+ (MIN_DISK_SPACE_FOR_BLOCK_FILES % GiB) ? 1 : 0;
+ ui->pruneSize->setMinimum(nMinDiskSpace);
ui->threadsScriptVerif->setMinimum(-GetNumCores());
ui->threadsScriptVerif->setMaximum(MAX_SCRIPTCHECK_THREADS);
+ ui->pruneWarning->setVisible(false);
+ ui->pruneWarning->setStyleSheet("QLabel { color: red; }");
+
+ ui->pruneSize->setEnabled(false);
+ connect(ui->prune, SIGNAL(toggled(bool)), ui->pruneSize, SLOT(setEnabled(bool)));
/* Network elements init */
#ifndef USE_UPNP
@@ -157,6 +166,9 @@ void OptionsDialog::setModel(OptionsModel *_model)
/* warn when one of the following settings changes by user action (placed here so init via mapper doesn't trigger them) */
/* Main */
+ connect(ui->prune, SIGNAL(clicked(bool)), this, SLOT(showRestartWarning()));
+ connect(ui->prune, SIGNAL(clicked(bool)), this, SLOT(togglePruneWarning(bool)));
+ connect(ui->pruneSize, SIGNAL(valueChanged(int)), this, SLOT(showRestartWarning()));
connect(ui->databaseCache, SIGNAL(valueChanged(int)), this, SLOT(showRestartWarning()));
connect(ui->threadsScriptVerif, SIGNAL(valueChanged(int)), this, SLOT(showRestartWarning()));
/* Wallet */
@@ -176,6 +188,8 @@ void OptionsDialog::setMapper()
mapper->addMapping(ui->bitcoinAtStartup, OptionsModel::StartAtStartup);
mapper->addMapping(ui->threadsScriptVerif, OptionsModel::ThreadsScriptVerif);
mapper->addMapping(ui->databaseCache, OptionsModel::DatabaseCache);
+ mapper->addMapping(ui->prune, OptionsModel::Prune);
+ mapper->addMapping(ui->pruneSize, OptionsModel::PruneSize);
/* Wallet */
mapper->addMapping(ui->spendZeroConfChange, OptionsModel::SpendZeroConfChange);
@@ -266,6 +280,11 @@ void OptionsDialog::on_hideTrayIcon_stateChanged(int fState)
}
}
+void OptionsDialog::togglePruneWarning(bool enabled)
+{
+ ui->pruneWarning->setVisible(!ui->pruneWarning->isVisible());
+}
+
void OptionsDialog::showRestartWarning(bool fPersistent)
{
ui->statusLabel->setStyleSheet("QLabel { color: red; }");
diff --git a/src/qt/optionsdialog.h b/src/qt/optionsdialog.h
index faf9ff8959..5aad484ce7 100644
--- a/src/qt/optionsdialog.h
+++ b/src/qt/optionsdialog.h
@@ -53,6 +53,7 @@ private Q_SLOTS:
void on_hideTrayIcon_stateChanged(int fState);
+ void togglePruneWarning(bool enabled);
void showRestartWarning(bool fPersistent = false);
void clearStatusLabel();
void updateProxyValidationState();
diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp
index cae9dace4c..31a85f4e23 100644
--- a/src/qt/optionsmodel.cpp
+++ b/src/qt/optionsmodel.cpp
@@ -88,6 +88,16 @@ void OptionsModel::Init(bool resetSettings)
// by command-line and show this in the UI.
// Main
+ if (!settings.contains("bPrune"))
+ settings.setValue("bPrune", false);
+ if (!settings.contains("nPruneSize"))
+ settings.setValue("nPruneSize", 2);
+ // Convert prune size to MB:
+ const uint64_t nPruneSizeMB = settings.value("nPruneSize").toInt() * 1000;
+ if (!m_node.softSetArg("-prune", settings.value("bPrune").toBool() ? std::to_string(nPruneSizeMB) : "0")) {
+ addOverriddenOption("-prune");
+ }
+
if (!settings.contains("nDatabaseCache"))
settings.setValue("nDatabaseCache", (qint64)nDefaultDbCache);
if (!m_node.softSetArg("-dbcache", settings.value("nDatabaseCache").toString().toStdString()))
@@ -281,6 +291,10 @@ QVariant OptionsModel::data(const QModelIndex & index, int role) const
return settings.value("language");
case CoinControlFeatures:
return fCoinControlFeatures;
+ case Prune:
+ return settings.value("bPrune");
+ case PruneSize:
+ return settings.value("nPruneSize");
case DatabaseCache:
return settings.value("nDatabaseCache");
case ThreadsScriptVerif:
@@ -405,6 +419,18 @@ bool OptionsModel::setData(const QModelIndex & index, const QVariant & value, in
settings.setValue("fCoinControlFeatures", fCoinControlFeatures);
Q_EMIT coinControlFeaturesChanged(fCoinControlFeatures);
break;
+ case Prune:
+ if (settings.value("bPrune") != value) {
+ settings.setValue("bPrune", value);
+ setRestartRequired(true);
+ }
+ break;
+ case PruneSize:
+ if (settings.value("nPruneSize") != value) {
+ settings.setValue("nPruneSize", value);
+ setRestartRequired(true);
+ }
+ break;
case DatabaseCache:
if (settings.value("nDatabaseCache") != value) {
settings.setValue("nDatabaseCache", value);
diff --git a/src/qt/optionsmodel.h b/src/qt/optionsmodel.h
index fc1d119a71..2777cbeaf2 100644
--- a/src/qt/optionsmodel.h
+++ b/src/qt/optionsmodel.h
@@ -50,6 +50,8 @@ public:
Language, // QString
CoinControlFeatures, // bool
ThreadsScriptVerif, // int
+ Prune, // bool
+ PruneSize, // int
DatabaseCache, // int
SpendZeroConfChange, // bool
Listen, // bool
diff --git a/src/rest.cpp b/src/rest.cpp
index ffa75c241f..a5f164497d 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -217,7 +217,7 @@ static bool rest_block(HTTPRequest* req,
return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found");
}
- if (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0)
+ if (IsBlockPruned(pblockindex))
return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not available (pruned data)");
if (!ReadBlockFromDisk(block, pblockindex, Params().GetConsensus()))
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 24fb522e60..f70d506e13 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -6,7 +6,6 @@
#include <rpc/blockchain.h>
#include <amount.h>
-#include <chain.h>
#include <chainparams.h>
#include <checkpoints.h>
#include <coins.h>
@@ -49,17 +48,13 @@ static std::mutex cs_blockchange;
static std::condition_variable cond_blockchange;
static CUpdatedBlock latestblock;
-/* Calculate the difficulty for a given block index,
- * or the block index of the given chain.
+/* Calculate the difficulty for a given block index.
*/
-double GetDifficulty(const CChain& chain, const CBlockIndex* blockindex)
+double GetDifficulty(const CBlockIndex* blockindex)
{
if (blockindex == nullptr)
{
- if (chain.Tip() == nullptr)
- return 1.0;
- else
- blockindex = chain.Tip();
+ return 1.0;
}
int nShift = (blockindex->nBits >> 24) & 0xff;
@@ -80,11 +75,6 @@ double GetDifficulty(const CChain& chain, const CBlockIndex* blockindex)
return dDiff;
}
-double GetDifficulty(const CBlockIndex* blockindex)
-{
- return GetDifficulty(chainActive, blockindex);
-}
-
UniValue blockheaderToJSON(const CBlockIndex* blockindex)
{
AssertLockHeld(cs_main);
@@ -354,7 +344,7 @@ static UniValue getdifficulty(const JSONRPCRequest& request)
);
LOCK(cs_main);
- return GetDifficulty();
+ return GetDifficulty(chainActive.Tip());
}
static std::string EntryDescriptionString()
@@ -742,7 +732,7 @@ static UniValue getblockheader(const JSONRPCRequest& request)
static CBlock GetBlockChecked(const CBlockIndex* pblockindex)
{
CBlock block;
- if (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0) {
+ if (IsBlockPruned(pblockindex)) {
throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)");
}
@@ -1240,7 +1230,7 @@ UniValue getblockchaininfo(const JSONRPCRequest& request)
obj.pushKV("blocks", (int)chainActive.Height());
obj.pushKV("headers", pindexBestHeader ? pindexBestHeader->nHeight : -1);
obj.pushKV("bestblockhash", chainActive.Tip()->GetBlockHash().GetHex());
- obj.pushKV("difficulty", (double)GetDifficulty());
+ obj.pushKV("difficulty", (double)GetDifficulty(chainActive.Tip()));
obj.pushKV("mediantime", (int64_t)chainActive.Tip()->GetMedianTimePast());
obj.pushKV("verificationprogress", GuessVerificationProgress(Params().TxData(), chainActive.Tip()));
obj.pushKV("initialblockdownload", IsInitialBlockDownload());
diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h
index 960edfd56f..3aa8de2d2b 100644
--- a/src/rpc/blockchain.h
+++ b/src/rpc/blockchain.h
@@ -16,7 +16,7 @@ class UniValue;
* @return A floating point number that is a multiple of the main net minimum
* difficulty (4295032833 hashes).
*/
-double GetDifficulty(const CBlockIndex* blockindex = nullptr);
+double GetDifficulty(const CBlockIndex* blockindex);
/** Callback for when block tip changed. */
void RPCNotifyBlockChange(bool ibd, const CBlockIndex *);
@@ -34,4 +34,3 @@ UniValue mempoolToJSON(bool fVerbose = false);
UniValue blockheaderToJSON(const CBlockIndex* blockindex);
#endif
-
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index bb68f72ccc..0f35fd3770 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -52,7 +52,6 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "listreceivedbylabel", 0, "minconf" },
{ "listreceivedbylabel", 1, "include_empty" },
{ "listreceivedbylabel", 2, "include_watchonly" },
- { "getlabeladdress", 1, "force" },
{ "getbalance", 1, "minconf" },
{ "getbalance", 2, "include_watchonly" },
{ "getblockhash", 0, "height" },
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 203fac39e2..85b864e6b9 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -214,7 +214,7 @@ static UniValue getmininginfo(const JSONRPCRequest& request)
obj.pushKV("blocks", (int)chainActive.Height());
obj.pushKV("currentblockweight", (uint64_t)nLastBlockWeight);
obj.pushKV("currentblocktx", (uint64_t)nLastBlockTx);
- obj.pushKV("difficulty", (double)GetDifficulty());
+ obj.pushKV("difficulty", (double)GetDifficulty(chainActive.Tip()));
obj.pushKV("networkhashps", getnetworkhashps(request));
obj.pushKV("pooledtx", (uint64_t)mempool.size());
obj.pushKV("chain", Params().NetworkIDString());
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index ad2d55afe7..3b3f43edea 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -748,7 +748,7 @@ static UniValue combinerawtransaction(const JSONRPCRequest& request)
}
}
- UpdateTransaction(mergedTx, i, sigdata);
+ UpdateInput(txin, sigdata);
}
return EncodeHexTx(mergedTx);
@@ -882,7 +882,7 @@ UniValue SignTransaction(CMutableTransaction& mtx, const UniValue& prevTxsUnival
}
sigdata = CombineSignatures(prevPubKey, TransactionSignatureChecker(&txConst, i, amount), sigdata, DataFromTransaction(mtx, i));
- UpdateTransaction(mtx, i, sigdata);
+ UpdateInput(txin, sigdata);
ScriptError serror = SCRIPT_ERR_OK;
if (!VerifyScript(txin.scriptSig, prevPubKey, &txin.scriptWitness, STANDARD_SCRIPT_VERIFY_FLAGS, TransactionSignatureChecker(&txConst, i, amount), &serror)) {
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index c5468f633b..6dbfbda029 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -199,12 +199,6 @@ void UpdateInput(CTxIn& input, const SignatureData& data)
input.scriptWitness = data.scriptWitness;
}
-void UpdateTransaction(CMutableTransaction& tx, unsigned int nIn, const SignatureData& data)
-{
- assert(tx.vin.size() > nIn);
- UpdateInput(tx.vin[nIn], data);
-}
-
bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType)
{
assert(nIn < txTo.vin.size());
@@ -213,7 +207,7 @@ bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, C
SignatureData sigdata;
bool ret = ProduceSignature(provider, creator, fromPubKey, sigdata);
- UpdateTransaction(txTo, nIn, sigdata);
+ UpdateInput(txTo.vin.at(nIn), sigdata);
return ret;
}
diff --git a/src/script/sign.h b/src/script/sign.h
index a10366dcd1..8ef0306bfe 100644
--- a/src/script/sign.h
+++ b/src/script/sign.h
@@ -73,7 +73,6 @@ SignatureData CombineSignatures(const CScript& scriptPubKey, const BaseSignature
/** Extract signature data from a transaction, and insert it. */
SignatureData DataFromTransaction(const CMutableTransaction& tx, unsigned int nIn);
-void UpdateTransaction(CMutableTransaction& tx, unsigned int nIn, const SignatureData& data);
void UpdateInput(CTxIn& input, const SignatureData& data);
/* Check whether we know how to sign for an output like this, assuming we
diff --git a/src/script/standard.cpp b/src/script/standard.cpp
index 53fcbe37de..d9269d6147 100644
--- a/src/script/standard.cpp
+++ b/src/script/standard.cpp
@@ -114,6 +114,7 @@ bool Solver(const CScript& scriptPubKey, txnouttype& typeRet, std::vector<std::v
vSolutionsRet.push_back(std::move(witnessprogram));
return true;
}
+ typeRet = TX_NONSTANDARD;
return false;
}
diff --git a/src/test/arith_uint256_tests.cpp b/src/test/arith_uint256_tests.cpp
index 13ec19834a..8644aea371 100644
--- a/src/test/arith_uint256_tests.cpp
+++ b/src/test/arith_uint256_tests.cpp
@@ -198,13 +198,6 @@ BOOST_AUTO_TEST_CASE( shifts ) { // "<<" ">>" "<<=" ">>="
BOOST_AUTO_TEST_CASE( unaryOperators ) // ! ~ -
{
- BOOST_CHECK(!ZeroL);
- BOOST_CHECK(!(!OneL));
- for (unsigned int i = 0; i < 256; ++i)
- BOOST_CHECK(!(!(OneL<<i)));
- BOOST_CHECK(!(!R1L));
- BOOST_CHECK(!(!MaxL));
-
BOOST_CHECK(~ZeroL == MaxL);
unsigned char TmpArray[32];
diff --git a/src/test/blockchain_tests.cpp b/src/test/blockchain_tests.cpp
index 5b8df32158..7d8ae46fb8 100644
--- a/src/test/blockchain_tests.cpp
+++ b/src/test/blockchain_tests.cpp
@@ -1,9 +1,9 @@
#include <boost/test/unit_test.hpp>
-#include "stdlib.h"
+#include <stdlib.h>
-#include "rpc/blockchain.cpp"
-#include "test/test_bitcoin.h"
+#include <rpc/blockchain.h>
+#include <test/test_bitcoin.h>
/* Equality between doubles is imprecise. Comparison should be done
* with a small threshold of tolerance, rather than exact equality.
@@ -22,14 +22,6 @@ static CBlockIndex* CreateBlockIndexWithNbits(uint32_t nbits)
return block_index;
}
-static CChain CreateChainWithNbits(uint32_t nbits)
-{
- CBlockIndex* block_index = CreateBlockIndexWithNbits(nbits);
- CChain chain;
- chain.SetTip(block_index);
- return chain;
-}
-
static void RejectDifficultyMismatch(double difficulty, double expected_difficulty) {
BOOST_CHECK_MESSAGE(
DoubleEquals(difficulty, expected_difficulty, 0.00001),
@@ -43,12 +35,7 @@ static void RejectDifficultyMismatch(double difficulty, double expected_difficul
static void TestDifficulty(uint32_t nbits, double expected_difficulty)
{
CBlockIndex* block_index = CreateBlockIndexWithNbits(nbits);
- /* Since we are passing in block index explicitly,
- * there is no need to set up anything within the chain itself.
- */
- CChain chain;
-
- double difficulty = GetDifficulty(chain, block_index);
+ double difficulty = GetDifficulty(block_index);
delete block_index;
RejectDifficultyMismatch(difficulty, expected_difficulty);
@@ -84,43 +71,8 @@ BOOST_AUTO_TEST_CASE(get_difficulty_for_very_high_target)
// Verify that difficulty is 1.0 for an empty chain.
BOOST_AUTO_TEST_CASE(get_difficulty_for_null_tip)
{
- CChain chain;
- double difficulty = GetDifficulty(chain, nullptr);
+ double difficulty = GetDifficulty(nullptr);
RejectDifficultyMismatch(difficulty, 1.0);
}
-/* Verify that if difficulty is based upon the block index
- * in the chain, if no block index is explicitly specified.
- */
-BOOST_AUTO_TEST_CASE(get_difficulty_for_null_block_index)
-{
- CChain chain = CreateChainWithNbits(0x1df88f6f);
-
- double difficulty = GetDifficulty(chain, nullptr);
- delete chain.Tip();
-
- double expected_difficulty = 0.004023;
-
- RejectDifficultyMismatch(difficulty, expected_difficulty);
-}
-
-/* Verify that difficulty is based upon the explicitly specified
- * block index rather than being taken from the provided chain,
- * when both are present.
- */
-BOOST_AUTO_TEST_CASE(get_difficulty_for_block_index_overrides_tip)
-{
- CChain chain = CreateChainWithNbits(0x1df88f6f);
- /* This block index's nbits should be used
- * instead of the chain's when calculating difficulty.
- */
- CBlockIndex* override_block_index = CreateBlockIndexWithNbits(0x12345678);
-
- double difficulty = GetDifficulty(chain, override_block_index);
- delete chain.Tip();
- delete override_block_index;
-
- RejectDifficultyMismatch(difficulty, 5913134931067755359633408.0);
-}
-
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/DoS_tests.cpp b/src/test/denialofservice_tests.cpp
index 1868aed7dd..e5f914ba8a 100644
--- a/src/test/DoS_tests.cpp
+++ b/src/test/denialofservice_tests.cpp
@@ -42,7 +42,7 @@ static NodeId id = 0;
void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds);
-BOOST_FIXTURE_TEST_SUITE(DoS_tests, TestingSetup)
+BOOST_FIXTURE_TEST_SUITE(denialofservice_tests, TestingSetup)
// Test eviction of an outbound peer whose chain never advances
// Mock a node connection, and use mocktime to simulate a peer
diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp
index 5ca243f42e..4070642537 100644
--- a/src/test/mempool_tests.cpp
+++ b/src/test/mempool_tests.cpp
@@ -571,4 +571,182 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
SetMockTime(0);
}
+inline CTransactionRef make_tx(std::vector<CAmount>&& output_values, std::vector<CTransactionRef>&& inputs=std::vector<CTransactionRef>(), std::vector<uint32_t>&& input_indices=std::vector<uint32_t>())
+{
+ CMutableTransaction tx = CMutableTransaction();
+ tx.vin.resize(inputs.size());
+ tx.vout.resize(output_values.size());
+ for (size_t i = 0; i < inputs.size(); ++i) {
+ tx.vin[i].prevout.hash = inputs[i]->GetHash();
+ tx.vin[i].prevout.n = input_indices.size() > i ? input_indices[i] : 0;
+ }
+ for (size_t i = 0; i < output_values.size(); ++i) {
+ tx.vout[i].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
+ tx.vout[i].nValue = output_values[i];
+ }
+ return MakeTransactionRef(tx);
+}
+
+#define MK_OUTPUTS(amounts...) std::vector<CAmount>{amounts}
+#define MK_INPUTS(txs...) std::vector<CTransactionRef>{txs}
+#define MK_INPUT_IDX(idxes...) std::vector<uint32_t>{idxes}
+
+BOOST_AUTO_TEST_CASE(MempoolAncestryTests)
+{
+ size_t ancestors, descendants;
+
+ CTxMemPool pool;
+ TestMemPoolEntryHelper entry;
+
+ /* Base transaction */
+ //
+ // [tx1]
+ //
+ CTransactionRef tx1 = make_tx(MK_OUTPUTS(10 * COIN));
+ pool.addUnchecked(tx1->GetHash(), entry.Fee(10000LL).FromTx(tx1));
+
+ // Ancestors / descendants should be 1 / 1 (itself / itself)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 1ULL);
+
+ /* Child transaction */
+ //
+ // [tx1].0 <- [tx2]
+ //
+ CTransactionRef tx2 = make_tx(MK_OUTPUTS(495 * CENT, 5 * COIN), MK_INPUTS(tx1));
+ pool.addUnchecked(tx2->GetHash(), entry.Fee(10000LL).FromTx(tx2));
+
+ // Ancestors / descendants should be:
+ // transaction ancestors descendants
+ // ============ =========== ===========
+ // tx1 1 (tx1) 2 (tx1,2)
+ // tx2 2 (tx1,2) 2 (tx1,2)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 2ULL);
+ pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 2ULL);
+
+ /* Grand-child 1 */
+ //
+ // [tx1].0 <- [tx2].0 <- [tx3]
+ //
+ CTransactionRef tx3 = make_tx(MK_OUTPUTS(290 * CENT, 200 * CENT), MK_INPUTS(tx2));
+ pool.addUnchecked(tx3->GetHash(), entry.Fee(10000LL).FromTx(tx3));
+
+ // Ancestors / descendants should be:
+ // transaction ancestors descendants
+ // ============ =========== ===========
+ // tx1 1 (tx1) 3 (tx1,2,3)
+ // tx2 2 (tx1,2) 3 (tx1,2,3)
+ // tx3 3 (tx1,2,3) 3 (tx1,2,3)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 3ULL);
+ pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 3ULL);
+ pool.GetTransactionAncestry(tx3->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 3ULL);
+
+ /* Grand-child 2 */
+ //
+ // [tx1].0 <- [tx2].0 <- [tx3]
+ // |
+ // \---1 <- [tx4]
+ //
+ CTransactionRef tx4 = make_tx(MK_OUTPUTS(290 * CENT, 250 * CENT), MK_INPUTS(tx2), MK_INPUT_IDX(1));
+ pool.addUnchecked(tx4->GetHash(), entry.Fee(10000LL).FromTx(tx4));
+
+ // Ancestors / descendants should be:
+ // transaction ancestors descendants
+ // ============ =========== ===========
+ // tx1 1 (tx1) 4 (tx1,2,3,4)
+ // tx2 2 (tx1,2) 4 (tx1,2,3,4)
+ // tx3 3 (tx1,2,3) 4 (tx1,2,3,4)
+ // tx4 3 (tx1,2,4) 4 (tx1,2,3,4)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 4ULL);
+ pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 4ULL);
+ pool.GetTransactionAncestry(tx3->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 4ULL);
+ pool.GetTransactionAncestry(tx4->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 4ULL);
+
+ /* Make an alternate branch that is longer and connect it to tx3 */
+ //
+ // [ty1].0 <- [ty2].0 <- [ty3].0 <- [ty4].0 <- [ty5].0
+ // |
+ // [tx1].0 <- [tx2].0 <- [tx3].0 <- [ty6] --->--/
+ // |
+ // \---1 <- [tx4]
+ //
+ CTransactionRef ty1, ty2, ty3, ty4, ty5;
+ CTransactionRef* ty[5] = {&ty1, &ty2, &ty3, &ty4, &ty5};
+ CAmount v = 5 * COIN;
+ for (uint64_t i = 0; i < 5; i++) {
+ CTransactionRef& tyi = *ty[i];
+ tyi = make_tx(MK_OUTPUTS(v), i > 0 ? MK_INPUTS(*ty[i-1]) : std::vector<CTransactionRef>());
+ v -= 50 * CENT;
+ pool.addUnchecked(tyi->GetHash(), entry.Fee(10000LL).FromTx(tyi));
+ pool.GetTransactionAncestry(tyi->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, i+1);
+ BOOST_CHECK_EQUAL(descendants, i+1);
+ }
+ CTransactionRef ty6 = make_tx(MK_OUTPUTS(5 * COIN), MK_INPUTS(tx3, ty5));
+ pool.addUnchecked(ty6->GetHash(), entry.Fee(10000LL).FromTx(ty6));
+
+ // Ancestors / descendants should be:
+ // transaction ancestors descendants
+ // ============ =================== ===========
+ // tx1 1 (tx1) 5 (tx1,2,3,4, ty6)
+ // tx2 2 (tx1,2) 5 (tx1,2,3,4, ty6)
+ // tx3 3 (tx1,2,3) 5 (tx1,2,3,4, ty6)
+ // tx4 3 (tx1,2,4) 5 (tx1,2,3,4, ty6)
+ // ty1 1 (ty1) 6 (ty1,2,3,4,5,6)
+ // ty2 2 (ty1,2) 6 (ty1,2,3,4,5,6)
+ // ty3 3 (ty1,2,3) 6 (ty1,2,3,4,5,6)
+ // ty4 4 (y1234) 6 (ty1,2,3,4,5,6)
+ // ty5 5 (y12345) 6 (ty1,2,3,4,5,6)
+ // ty6 9 (tx123, ty123456) 6 (ty1,2,3,4,5,6)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 5ULL);
+ pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 5ULL);
+ pool.GetTransactionAncestry(tx3->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 5ULL);
+ pool.GetTransactionAncestry(tx4->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 5ULL);
+ pool.GetTransactionAncestry(ty1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty3->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty4->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 4ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty5->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 5ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty6->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 9ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/script_P2SH_tests.cpp b/src/test/script_p2sh_tests.cpp
index 63d211dd97..803a673fab 100644
--- a/src/test/script_P2SH_tests.cpp
+++ b/src/test/script_p2sh_tests.cpp
@@ -46,7 +46,7 @@ Verify(const CScript& scriptSig, const CScript& scriptPubKey, bool fStrict, Scri
}
-BOOST_FIXTURE_TEST_SUITE(script_P2SH_tests, BasicTestingSetup)
+BOOST_FIXTURE_TEST_SUITE(script_p2sh_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(sign)
{
diff --git a/src/test/script_standard_tests.cpp b/src/test/script_standard_tests.cpp
index ff0bf6c66d..7ab0978228 100644
--- a/src/test/script_standard_tests.cpp
+++ b/src/test/script_standard_tests.cpp
@@ -726,6 +726,32 @@ BOOST_AUTO_TEST_CASE(script_standard_IsMine)
BOOST_CHECK(!isInvalid);
}
+ // witness unspendable
+ {
+ CBasicKeyStore keystore;
+ keystore.AddKey(keys[0]);
+
+ scriptPubKey.clear();
+ scriptPubKey << OP_0 << ToByteVector(ParseHex("aabb"));
+
+ result = IsMine(keystore, scriptPubKey, isInvalid);
+ BOOST_CHECK_EQUAL(result, ISMINE_NO);
+ BOOST_CHECK(!isInvalid);
+ }
+
+ // witness unknown
+ {
+ CBasicKeyStore keystore;
+ keystore.AddKey(keys[0]);
+
+ scriptPubKey.clear();
+ scriptPubKey << OP_16 << ToByteVector(ParseHex("aabb"));
+
+ result = IsMine(keystore, scriptPubKey, isInvalid);
+ BOOST_CHECK_EQUAL(result, ISMINE_NO);
+ BOOST_CHECK(!isInvalid);
+ }
+
// Nonstandard
{
CBasicKeyStore keystore;
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index cc72e96eb1..65c5b8ea1d 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -629,7 +629,7 @@ BOOST_AUTO_TEST_CASE(test_witness)
CreateCreditAndSpend(keystore2, scriptMulti, output2, input2, false);
CheckWithFlag(output2, input2, 0, false);
BOOST_CHECK(*output1 == *output2);
- UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
+ UpdateInput(input1.vin[0], CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
// P2SH 2-of-2 multisig
@@ -640,7 +640,7 @@ BOOST_AUTO_TEST_CASE(test_witness)
CheckWithFlag(output2, input2, 0, true);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH, false);
BOOST_CHECK(*output1 == *output2);
- UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
+ UpdateInput(input1.vin[0], CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
@@ -652,7 +652,7 @@ BOOST_AUTO_TEST_CASE(test_witness)
CheckWithFlag(output2, input2, 0, true);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false);
BOOST_CHECK(*output1 == *output2);
- UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
+ UpdateInput(input1.vin[0], CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
@@ -664,7 +664,7 @@ BOOST_AUTO_TEST_CASE(test_witness)
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false);
BOOST_CHECK(*output1 == *output2);
- UpdateTransaction(input1, 0, CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
+ UpdateInput(input1.vin[0], CombineSignatures(output1->vout[0].scriptPubKey, MutableTransactionSignatureChecker(&input1, 0, output1->vout[0].nValue), DataFromTransaction(input1, 0), DataFromTransaction(input2, 0)));
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
}
diff --git a/src/test/txindex_tests.cpp b/src/test/txindex_tests.cpp
index 14158f2875..be7ee2428b 100644
--- a/src/test/txindex_tests.cpp
+++ b/src/test/txindex_tests.cpp
@@ -15,7 +15,7 @@ BOOST_AUTO_TEST_SUITE(txindex_tests)
BOOST_FIXTURE_TEST_CASE(txindex_initial_sync, TestChain100Setup)
{
- TxIndex txindex(MakeUnique<TxIndexDB>(1 << 20, true));
+ TxIndex txindex(1 << 20, true);
CTransactionRef tx_disk;
uint256 block_hash;
diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp
index 06497667c3..d32d4b267c 100644
--- a/src/test/txvalidationcache_tests.cpp
+++ b/src/test/txvalidationcache_tests.cpp
@@ -102,7 +102,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup)
// should fail.
// Capture this interaction with the upgraded_nop argument: set it when evaluating
// any script flag that is implemented as an upgraded NOP code.
-static void ValidateCheckInputsForAllFlags(CMutableTransaction &tx, uint32_t failing_flags, bool add_to_cache)
+static void ValidateCheckInputsForAllFlags(const CTransaction &tx, uint32_t failing_flags, bool add_to_cache)
{
PrecomputedTransactionData txdata(tx);
// If we add many more flags, this loop can get too expensive, but we can
@@ -315,7 +315,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup)
// Sign
SignatureData sigdata;
ProduceSignature(keystore, MutableTransactionSignatureCreator(&valid_with_witness_tx, 0, 11*CENT, SIGHASH_ALL), spend_tx.vout[1].scriptPubKey, sigdata);
- UpdateTransaction(valid_with_witness_tx, 0, sigdata);
+ UpdateInput(valid_with_witness_tx.vin[0], sigdata);
// This should be valid under all script flags.
ValidateCheckInputsForAllFlags(valid_with_witness_tx, 0, true);
@@ -343,7 +343,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup)
for (int i=0; i<2; ++i) {
SignatureData sigdata;
ProduceSignature(keystore, MutableTransactionSignatureCreator(&tx, i, 11*CENT, SIGHASH_ALL), spend_tx.vout[i].scriptPubKey, sigdata);
- UpdateTransaction(tx, i, sigdata);
+ UpdateInput(tx.vin[i], sigdata);
}
// This should be valid under all script flags
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 333d3596c1..b1d5879c83 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -21,8 +21,6 @@
static const char DB_COIN = 'C';
static const char DB_COINS = 'c';
static const char DB_BLOCK_FILES = 'f';
-static const char DB_TXINDEX = 't';
-static const char DB_TXINDEX_BLOCK = 'T';
static const char DB_BLOCK_INDEX = 'b';
static const char DB_BEST_BLOCK = 'B';
@@ -237,17 +235,6 @@ bool CBlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockF
return WriteBatch(batch, true);
}
-bool CBlockTreeDB::ReadTxIndex(const uint256 &txid, CDiskTxPos &pos) {
- return Read(std::make_pair(DB_TXINDEX, txid), pos);
-}
-
-bool CBlockTreeDB::WriteTxIndex(const std::vector<std::pair<uint256, CDiskTxPos> >&vect) {
- CDBBatch batch(*this);
- for (std::vector<std::pair<uint256,CDiskTxPos> >::const_iterator it=vect.begin(); it!=vect.end(); it++)
- batch.Write(std::make_pair(DB_TXINDEX, it->first), it->second);
- return WriteBatch(batch);
-}
-
bool CBlockTreeDB::WriteFlag(const std::string &name, bool fValue) {
return Write(std::make_pair(DB_FLAG, name), fValue ? '1' : '0');
}
@@ -425,173 +412,3 @@ bool CCoinsViewDB::Upgrade() {
LogPrintf("[%s].\n", ShutdownRequested() ? "CANCELLED" : "DONE");
return !ShutdownRequested();
}
-
-TxIndexDB::TxIndexDB(size_t n_cache_size, bool f_memory, bool f_wipe) :
- CDBWrapper(GetDataDir() / "indexes" / "txindex", n_cache_size, f_memory, f_wipe)
-{}
-
-bool TxIndexDB::ReadTxPos(const uint256 &txid, CDiskTxPos& pos) const
-{
- return Read(std::make_pair(DB_TXINDEX, txid), pos);
-}
-
-bool TxIndexDB::WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_pos)
-{
- CDBBatch batch(*this);
- for (const auto& tuple : v_pos) {
- batch.Write(std::make_pair(DB_TXINDEX, tuple.first), tuple.second);
- }
- return WriteBatch(batch);
-}
-
-bool TxIndexDB::ReadBestBlock(CBlockLocator& locator) const
-{
- bool success = Read(DB_BEST_BLOCK, locator);
- if (!success) {
- locator.SetNull();
- }
- return success;
-}
-
-bool TxIndexDB::WriteBestBlock(const CBlockLocator& locator)
-{
- return Write(DB_BEST_BLOCK, locator);
-}
-
-/*
- * Safely persist a transfer of data from the old txindex database to the new one, and compact the
- * range of keys updated. This is used internally by MigrateData.
- */
-static void WriteTxIndexMigrationBatches(TxIndexDB& newdb, CBlockTreeDB& olddb,
- CDBBatch& batch_newdb, CDBBatch& batch_olddb,
- const std::pair<unsigned char, uint256>& begin_key,
- const std::pair<unsigned char, uint256>& end_key)
-{
- // Sync new DB changes to disk before deleting from old DB.
- newdb.WriteBatch(batch_newdb, /*fSync=*/ true);
- olddb.WriteBatch(batch_olddb);
- olddb.CompactRange(begin_key, end_key);
-
- batch_newdb.Clear();
- batch_olddb.Clear();
-}
-
-bool TxIndexDB::MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator& best_locator)
-{
- // The prior implementation of txindex was always in sync with block index
- // and presence was indicated with a boolean DB flag. If the flag is set,
- // this means the txindex from a previous version is valid and in sync with
- // the chain tip. The first step of the migration is to unset the flag and
- // write the chain hash to a separate key, DB_TXINDEX_BLOCK. After that, the
- // index entries are copied over in batches to the new database. Finally,
- // DB_TXINDEX_BLOCK is erased from the old database and the block hash is
- // written to the new database.
- //
- // Unsetting the boolean flag ensures that if the node is downgraded to a
- // previous version, it will not see a corrupted, partially migrated index
- // -- it will see that the txindex is disabled. When the node is upgraded
- // again, the migration will pick up where it left off and sync to the block
- // with hash DB_TXINDEX_BLOCK.
- bool f_legacy_flag = false;
- block_tree_db.ReadFlag("txindex", f_legacy_flag);
- if (f_legacy_flag) {
- if (!block_tree_db.Write(DB_TXINDEX_BLOCK, best_locator)) {
- return error("%s: cannot write block indicator", __func__);
- }
- if (!block_tree_db.WriteFlag("txindex", false)) {
- return error("%s: cannot write block index db flag", __func__);
- }
- }
-
- CBlockLocator locator;
- if (!block_tree_db.Read(DB_TXINDEX_BLOCK, locator)) {
- return true;
- }
-
- int64_t count = 0;
- LogPrintf("Upgrading txindex database... [0%%]\n");
- uiInterface.ShowProgress(_("Upgrading txindex database"), 0, true);
- int report_done = 0;
- const size_t batch_size = 1 << 24; // 16 MiB
-
- CDBBatch batch_newdb(*this);
- CDBBatch batch_olddb(block_tree_db);
-
- std::pair<unsigned char, uint256> key;
- std::pair<unsigned char, uint256> begin_key{DB_TXINDEX, uint256()};
- std::pair<unsigned char, uint256> prev_key = begin_key;
-
- bool interrupted = false;
- std::unique_ptr<CDBIterator> cursor(block_tree_db.NewIterator());
- for (cursor->Seek(begin_key); cursor->Valid(); cursor->Next()) {
- boost::this_thread::interruption_point();
- if (ShutdownRequested()) {
- interrupted = true;
- break;
- }
-
- if (!cursor->GetKey(key)) {
- return error("%s: cannot get key from valid cursor", __func__);
- }
- if (key.first != DB_TXINDEX) {
- break;
- }
-
- // Log progress every 10%.
- if (++count % 256 == 0) {
- // Since txids are uniformly random and traversed in increasing order, the high 16 bits
- // of the hash can be used to estimate the current progress.
- const uint256& txid = key.second;
- uint32_t high_nibble =
- (static_cast<uint32_t>(*(txid.begin() + 0)) << 8) +
- (static_cast<uint32_t>(*(txid.begin() + 1)) << 0);
- int percentage_done = (int)(high_nibble * 100.0 / 65536.0 + 0.5);
-
- uiInterface.ShowProgress(_("Upgrading txindex database"), percentage_done, true);
- if (report_done < percentage_done/10) {
- LogPrintf("Upgrading txindex database... [%d%%]\n", percentage_done);
- report_done = percentage_done/10;
- }
- }
-
- CDiskTxPos value;
- if (!cursor->GetValue(value)) {
- return error("%s: cannot parse txindex record", __func__);
- }
- batch_newdb.Write(key, value);
- batch_olddb.Erase(key);
-
- if (batch_newdb.SizeEstimate() > batch_size || batch_olddb.SizeEstimate() > batch_size) {
- // NOTE: it's OK to delete the key pointed at by the current DB cursor while iterating
- // because LevelDB iterators are guaranteed to provide a consistent view of the
- // underlying data, like a lightweight snapshot.
- WriteTxIndexMigrationBatches(*this, block_tree_db,
- batch_newdb, batch_olddb,
- prev_key, key);
- prev_key = key;
- }
- }
-
- // If these final DB batches complete the migration, write the best block
- // hash marker to the new database and delete from the old one. This signals
- // that the former is fully caught up to that point in the blockchain and
- // that all txindex entries have been removed from the latter.
- if (!interrupted) {
- batch_olddb.Erase(DB_TXINDEX_BLOCK);
- batch_newdb.Write(DB_BEST_BLOCK, locator);
- }
-
- WriteTxIndexMigrationBatches(*this, block_tree_db,
- batch_newdb, batch_olddb,
- begin_key, key);
-
- if (interrupted) {
- LogPrintf("[CANCELLED].\n");
- return false;
- }
-
- uiInterface.ShowProgress("", 100, false);
-
- LogPrintf("[DONE].\n");
- return true;
-}
diff --git a/src/txdb.h b/src/txdb.h
index 4193f98de1..100adb428d 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -40,31 +40,6 @@ static const int64_t nMaxTxIndexCache = 1024;
//! Max memory allocated to coin DB specific cache (MiB)
static const int64_t nMaxCoinsDBCache = 8;
-struct CDiskTxPos : public CDiskBlockPos
-{
- unsigned int nTxOffset; // after header
-
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITEAS(CDiskBlockPos, *this);
- READWRITE(VARINT(nTxOffset));
- }
-
- CDiskTxPos(const CDiskBlockPos &blockIn, unsigned int nTxOffsetIn) : CDiskBlockPos(blockIn.nFile, blockIn.nPos), nTxOffset(nTxOffsetIn) {
- }
-
- CDiskTxPos() {
- SetNull();
- }
-
- void SetNull() {
- CDiskBlockPos::SetNull();
- nTxOffset = 0;
- }
-};
-
/** CCoinsView backed by the coin database (chainstate/) */
class CCoinsViewDB final : public CCoinsView
{
@@ -118,43 +93,9 @@ public:
bool ReadLastBlockFile(int &nFile);
bool WriteReindexing(bool fReindexing);
bool ReadReindexing(bool &fReindexing);
- bool ReadTxIndex(const uint256 &txid, CDiskTxPos &pos);
- bool WriteTxIndex(const std::vector<std::pair<uint256, CDiskTxPos> > &vect);
bool WriteFlag(const std::string &name, bool fValue);
bool ReadFlag(const std::string &name, bool &fValue);
bool LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex);
};
-/**
- * Access to the txindex database (indexes/txindex/)
- *
- * The database stores a block locator of the chain the database is synced to
- * so that the TxIndex can efficiently determine the point it last stopped at.
- * A locator is used instead of a simple hash of the chain tip because blocks
- * and block index entries may not be flushed to disk until after this database
- * is updated.
- */
-class TxIndexDB : public CDBWrapper
-{
-public:
- explicit TxIndexDB(size_t n_cache_size, bool f_memory = false, bool f_wipe = false);
-
- /// Read the disk location of the transaction data with the given hash. Returns false if the
- /// transaction hash is not indexed.
- bool ReadTxPos(const uint256& txid, CDiskTxPos& pos) const;
-
- /// Write a batch of transaction positions to the DB.
- bool WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_pos);
-
- /// Read block locator of the chain that the txindex is in sync with.
- bool ReadBestBlock(CBlockLocator& locator) const;
-
- /// Write block locator of the chain that the txindex is in sync with.
- bool WriteBestBlock(const CBlockLocator& locator);
-
- /// Migrate txindex data from the block tree DB, where it may be for older nodes that have not
- /// been upgraded yet to the new database.
- bool MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator& best_locator);
-};
-
#endif // BITCOIN_TXDB_H
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index bb585fc075..1c6dba0c9c 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -693,18 +693,18 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const
// Check children against mapNextTx
CTxMemPool::setEntries setChildrenCheck;
auto iter = mapNextTx.lower_bound(COutPoint(it->GetTx().GetHash(), 0));
- int64_t childSizes = 0;
+ uint64_t child_sizes = 0;
for (; iter != mapNextTx.end() && iter->first->hash == it->GetTx().GetHash(); ++iter) {
txiter childit = mapTx.find(iter->second->GetHash());
assert(childit != mapTx.end()); // mapNextTx points to in-mempool transactions
if (setChildrenCheck.insert(childit).second) {
- childSizes += childit->GetTxSize();
+ child_sizes += childit->GetTxSize();
}
}
assert(setChildrenCheck == GetMemPoolChildren(it));
// Also check to make sure size is greater than sum with immediate children.
// just a sanity check, not definitive that this calc is correct...
- assert(it->GetSizeWithDescendants() >= childSizes + it->GetTxSize());
+ assert(it->GetSizeWithDescendants() >= child_sizes + it->GetTxSize());
if (fDependsWait)
waitingOnDependants.push_back(&(*it));
@@ -1055,11 +1055,36 @@ void CTxMemPool::TrimToSize(size_t sizelimit, std::vector<COutPoint>* pvNoSpends
}
}
-bool CTxMemPool::TransactionWithinChainLimit(const uint256& txid, size_t chainLimit) const {
+uint64_t CTxMemPool::CalculateDescendantMaximum(txiter entry) const {
+ // find parent with highest descendant count
+ std::vector<txiter> candidates;
+ setEntries counted;
+ candidates.push_back(entry);
+ uint64_t maximum = 0;
+ while (candidates.size()) {
+ txiter candidate = candidates.back();
+ candidates.pop_back();
+ if (!counted.insert(candidate).second) continue;
+ const setEntries& parents = GetMemPoolParents(candidate);
+ if (parents.size() == 0) {
+ maximum = std::max(maximum, candidate->GetCountWithDescendants());
+ } else {
+ for (txiter i : parents) {
+ candidates.push_back(i);
+ }
+ }
+ }
+ return maximum;
+}
+
+void CTxMemPool::GetTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants) const {
LOCK(cs);
auto it = mapTx.find(txid);
- return it == mapTx.end() || (it->GetCountWithAncestors() < chainLimit &&
- it->GetCountWithDescendants() < chainLimit);
+ ancestors = descendants = 0;
+ if (it != mapTx.end()) {
+ ancestors = it->GetCountWithAncestors();
+ descendants = CalculateDescendantMaximum(it);
+ }
}
SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand(std::numeric_limits<uint64_t>::max())), k1(GetRand(std::numeric_limits<uint64_t>::max())) {}
diff --git a/src/txmempool.h b/src/txmempool.h
index ca7b1cd4be..bda812b42f 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -498,6 +498,7 @@ public:
const setEntries & GetMemPoolParents(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs);
const setEntries & GetMemPoolChildren(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs);
+ uint64_t CalculateDescendantMaximum(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs);
private:
typedef std::map<txiter, setEntries, CompareIteratorByHash> cacheMap;
@@ -619,8 +620,11 @@ public:
/** Expire all transaction (and their dependencies) in the mempool older than time. Return the number of removed transactions. */
int Expire(int64_t time);
- /** Returns false if the transaction is in the mempool and not within the chain limit specified. */
- bool TransactionWithinChainLimit(const uint256& txid, size_t chainLimit) const;
+ /**
+ * Calculate the ancestor and descendant count for the given transaction.
+ * The counts include the transaction itself.
+ */
+ void GetTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants) const;
unsigned long size()
{
diff --git a/src/util.cpp b/src/util.cpp
index 34483d95b0..48d64e3eec 100644
--- a/src/util.cpp
+++ b/src/util.cpp
@@ -459,9 +459,9 @@ bool ArgsManager::ParseParameters(int argc, const char* const argv[], std::strin
if (it != m_override_args.end()) {
if (it->second.size() > 0) {
for (const auto& ic : it->second) {
- fprintf(stderr, "warning: -includeconf cannot be used from commandline; ignoring -includeconf=%s\n", ic.c_str());
+ error += "-includeconf cannot be used from commandline; -includeconf=" + ic + "\n";
}
- m_override_args.erase(it);
+ return false;
}
}
return true;
@@ -849,11 +849,12 @@ bool ArgsManager::ReadConfigFiles(std::string& error, bool ignore_invalid_keys)
// if there is an -includeconf in the override args, but it is empty, that means the user
// passed '-noincludeconf' on the command line, in which case we should not include anything
if (m_override_args.count("-includeconf") == 0) {
+ std::string chain_id = GetChainName();
std::vector<std::string> includeconf(GetArgs("-includeconf"));
{
// We haven't set m_network yet (that happens in SelectParams()), so manually check
// for network.includeconf args.
- std::vector<std::string> includeconf_net(GetArgs(std::string("-") + GetChainName() + ".includeconf"));
+ std::vector<std::string> includeconf_net(GetArgs(std::string("-") + chain_id + ".includeconf"));
includeconf.insert(includeconf.end(), includeconf_net.begin(), includeconf_net.end());
}
@@ -862,7 +863,7 @@ bool ArgsManager::ReadConfigFiles(std::string& error, bool ignore_invalid_keys)
{
LOCK(cs_args);
m_config_args.erase("-includeconf");
- m_config_args.erase(std::string("-") + GetChainName() + ".includeconf");
+ m_config_args.erase(std::string("-") + chain_id + ".includeconf");
}
for (const std::string& to_include : includeconf) {
@@ -873,15 +874,22 @@ bool ArgsManager::ReadConfigFiles(std::string& error, bool ignore_invalid_keys)
}
LogPrintf("Included configuration file %s\n", to_include.c_str());
} else {
- fprintf(stderr, "Failed to include configuration file %s\n", to_include.c_str());
+ error = "Failed to include configuration file " + to_include;
+ return false;
}
}
// Warn about recursive -includeconf
includeconf = GetArgs("-includeconf");
{
- std::vector<std::string> includeconf_net(GetArgs(std::string("-") + GetChainName() + ".includeconf"));
+ std::vector<std::string> includeconf_net(GetArgs(std::string("-") + chain_id + ".includeconf"));
includeconf.insert(includeconf.end(), includeconf_net.begin(), includeconf_net.end());
+ std::string chain_id_final = GetChainName();
+ if (chain_id_final != chain_id) {
+ // Also warn about recursive includeconf for the chain that was specified in one of the includeconfs
+ includeconf_net = GetArgs(std::string("-") + chain_id_final + ".includeconf");
+ includeconf.insert(includeconf.end(), includeconf_net.begin(), includeconf_net.end());
+ }
}
for (const std::string& to_include : includeconf) {
fprintf(stderr, "warning: -includeconf cannot be used from included files; ignoring -includeconf=%s\n", to_include.c_str());
diff --git a/src/validation.cpp b/src/validation.cpp
index 9791d6e2d8..60b48d38c3 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -577,15 +577,9 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
if (tx.IsCoinBase())
return state.DoS(100, false, REJECT_INVALID, "coinbase");
- // Reject transactions with witness before segregated witness activates (override with -prematurewitness)
- bool witnessEnabled = IsWitnessEnabled(chainActive.Tip(), chainparams.GetConsensus());
- if (!gArgs.GetBoolArg("-prematurewitness", false) && tx.HasWitness() && !witnessEnabled) {
- return state.DoS(0, false, REJECT_NONSTANDARD, "no-witness-yet", true);
- }
-
// Rather not work on nonstandard transactions (unless -testnet/-regtest)
std::string reason;
- if (fRequireStandard && !IsStandardTx(tx, reason, witnessEnabled))
+ if (fRequireStandard && !IsStandardTx(tx, reason))
return state.DoS(0, false, REJECT_NONSTANDARD, reason);
// Do not work on transactions that are too small.
diff --git a/src/validation.h b/src/validation.h
index b5ab10786a..04f5b6cb80 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -497,4 +497,10 @@ bool DumpMempool();
/** Load the mempool from disk. */
bool LoadMempool();
+//! Check whether the block associated with this index entry is pruned or not.
+inline bool IsBlockPruned(const CBlockIndex* pblockindex)
+{
+ return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
+}
+
#endif // BITCOIN_VALIDATION_H
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 456f08bc14..94c35e2cb1 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -198,7 +198,7 @@ CTxDestination GetLabelDestination(CWallet* const pwallet, const std::string& la
return dest;
}
-static UniValue getlabeladdress(const JSONRPCRequest& request)
+static UniValue getaccountaddress(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
CWallet* const pwallet = wallet.get();
@@ -207,53 +207,36 @@ static UniValue getlabeladdress(const JSONRPCRequest& request)
return NullUniValue;
}
- if (!IsDeprecatedRPCEnabled("accounts") && request.strMethod == "getaccountaddress") {
+ if (!IsDeprecatedRPCEnabled("accounts")) {
if (request.fHelp) {
throw std::runtime_error("getaccountaddress (Deprecated, will be removed in V0.18. To use this command, start bitcoind with -deprecatedrpc=accounts)");
}
throw JSONRPCError(RPC_METHOD_DEPRECATED, "getaccountaddress is deprecated and will be removed in V0.18. To use this command, start bitcoind with -deprecatedrpc=accounts.");
}
- if (request.fHelp || request.params.size() < 1 || request.params.size() > 2)
+ if (request.fHelp || request.params.size() != 1)
throw std::runtime_error(
- "getlabeladdress \"label\" ( force ) \n"
- "\nReturns the default receiving address for this label. This will reset to a fresh address once there's a transaction that spends to it.\n"
+ "getaccountaddress \"account\"\n"
+ "\n\nDEPRECATED. Returns the current Bitcoin address for receiving payments to this account.\n"
"\nArguments:\n"
- "1. \"label\" (string, required) The label for the address. It can also be set to the empty string \"\" to represent the default label.\n"
- "2. \"force\" (bool, optional) Whether the label should be created if it does not yet exist. If False, the RPC will return an error if called with a label that doesn't exist.\n"
- " Defaults to false (unless the getaccountaddress method alias is being called, in which case defaults to true for backwards compatibility).\n"
+ "1. \"account\" (string, required) The account for the address. It can also be set to the empty string \"\" to represent the default account. The account does not need to exist, it will be created and a new address created if there is no account by the given name.\n"
"\nResult:\n"
- "\"address\" (string) The current receiving address for the label.\n"
+ "\"address\" (string) The account bitcoin address\n"
"\nExamples:\n"
- + HelpExampleCli("getlabeladdress", "")
- + HelpExampleCli("getlabeladdress", "\"\"")
- + HelpExampleCli("getlabeladdress", "\"mylabel\"")
- + HelpExampleRpc("getlabeladdress", "\"mylabel\"")
+ + HelpExampleCli("getaccountaddress", "")
+ + HelpExampleCli("getaccountaddress", "\"\"")
+ + HelpExampleCli("getaccountaddress", "\"myaccount\"")
+ + HelpExampleRpc("getaccountaddress", "\"myaccount\"")
);
LOCK2(cs_main, pwallet->cs_wallet);
- // Parse the label first so we don't generate a key if there's an error
- std::string label = LabelFromValue(request.params[0]);
- bool force = request.strMethod == "getaccountaddress";
- if (!request.params[1].isNull()) {
- force = request.params[1].get_bool();
- }
-
- bool label_found = false;
- for (const std::pair<CTxDestination, CAddressBookData>& item : pwallet->mapAddressBook) {
- if (item.second.name == label) {
- label_found = true;
- break;
- }
- }
- if (!force && !label_found) {
- throw JSONRPCError(RPC_WALLET_INVALID_LABEL_NAME, std::string("No addresses with label " + label));
- }
+ // Parse the account first so we don't generate a key if there's an error
+ std::string account = LabelFromValue(request.params[0]);
UniValue ret(UniValue::VSTR);
- ret = EncodeDestination(GetLabelDestination(pwallet, label));
+ ret = EncodeDestination(GetLabelDestination(pwallet, account));
return ret;
}
@@ -343,23 +326,33 @@ static UniValue setlabel(const JSONRPCRequest& request)
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid Bitcoin address");
}
+ std::string old_label = pwallet->mapAddressBook[dest].name;
std::string label = LabelFromValue(request.params[1]);
if (IsMine(*pwallet, dest)) {
- // Detect when changing the label of an address that is the receiving address of another label:
- // If so, delete the account record for it. Labels, unlike addresses, can be deleted,
- // and if we wouldn't do this, the record would stick around forever.
- if (pwallet->mapAddressBook.count(dest)) {
- std::string old_label = pwallet->mapAddressBook[dest].name;
- if (old_label != label && dest == GetLabelDestination(pwallet, old_label)) {
- pwallet->DeleteLabel(old_label);
- }
- }
pwallet->SetAddressBook(dest, label, "receive");
+ if (request.strMethod == "setaccount" && old_label != label && dest == GetLabelDestination(pwallet, old_label)) {
+ // for setaccount, call GetLabelDestination so a new receive address is created for the old account
+ GetLabelDestination(pwallet, old_label, true);
+ }
} else {
pwallet->SetAddressBook(dest, label, "send");
}
+ // Detect when there are no addresses using this label.
+ // If so, delete the account record for it. Labels, unlike addresses, can be deleted,
+ // and if we wouldn't do this, the record would stick around forever.
+ bool found_address = false;
+ for (const std::pair<CTxDestination, CAddressBookData>& item : pwallet->mapAddressBook) {
+ if (item.second.name == label) {
+ found_address = true;
+ break;
+ }
+ }
+ if (!found_address) {
+ pwallet->DeleteLabel(old_label);
+ }
+
return NullUniValue;
}
@@ -1457,13 +1450,6 @@ static UniValue addwitnessaddress(const JSONRPCRequest& request)
"Projects should transition to using the address_type argument of getnewaddress, or option -addresstype=[bech32|p2sh-segwit] instead.\n");
}
- {
- LOCK(cs_main);
- if (!IsWitnessEnabled(chainActive.Tip(), Params().GetConsensus()) && !gArgs.GetBoolArg("-walletprematurewitness", false)) {
- throw JSONRPCError(RPC_WALLET_ERROR, "Segregated witness not enabled on network");
- }
- }
-
CTxDestination dest = DecodeDestination(request.params[0].get_str());
if (!IsValidDestination(dest)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid Bitcoin address");
@@ -4404,7 +4390,7 @@ static const CRPCCommand commands[] =
{ "wallet", "sethdseed", &sethdseed, {"newkeypool","seed"} },
/** Account functions (deprecated) */
- { "wallet", "getaccountaddress", &getlabeladdress, {"account"} },
+ { "wallet", "getaccountaddress", &getaccountaddress, {"account"} },
{ "wallet", "getaccount", &getaccount, {"address"} },
{ "wallet", "getaddressesbyaccount", &getaddressesbyaccount, {"account"} },
{ "wallet", "getreceivedbyaccount", &getreceivedbylabel, {"account","minconf"} },
@@ -4414,7 +4400,6 @@ static const CRPCCommand commands[] =
{ "wallet", "move", &movecmd, {"fromaccount","toaccount","amount","minconf","comment"} },
/** Label functions (to replace non-balance account functions) */
- { "wallet", "getlabeladdress", &getlabeladdress, {"label","force"} },
{ "wallet", "getaddressesbylabel", &getaddressesbylabel, {"label"} },
{ "wallet", "getreceivedbylabel", &getreceivedbylabel, {"label","minconf"} },
{ "wallet", "listlabels", &listlabels, {"purpose"} },
diff --git a/src/wallet/test/coinselector_tests.cpp b/src/wallet/test/coinselector_tests.cpp
index e90370cf06..d90be33000 100644
--- a/src/wallet/test/coinselector_tests.cpp
+++ b/src/wallet/test/coinselector_tests.cpp
@@ -2,14 +2,14 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include "wallet/wallet.h"
-#include "wallet/coinselection.h"
-#include "wallet/coincontrol.h"
-#include "amount.h"
-#include "primitives/transaction.h"
-#include "random.h"
-#include "test/test_bitcoin.h"
-#include "wallet/test/wallet_test_fixture.h"
+#include <wallet/wallet.h>
+#include <wallet/coinselection.h>
+#include <wallet/coincontrol.h>
+#include <amount.h>
+#include <primitives/transaction.h>
+#include <random.h>
+#include <test/test_bitcoin.h>
+#include <wallet/test/wallet_test_fixture.h>
#include <boost/test/unit_test.hpp>
#include <random>
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 3987e8e70a..78cacc0206 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -2469,8 +2469,11 @@ bool CWallet::OutputEligibleForSpending(const COutput& output, const CoinEligibi
if (output.nDepth < (output.tx->IsFromMe(ISMINE_ALL) ? eligibility_filter.conf_mine : eligibility_filter.conf_theirs))
return false;
- if (!mempool.TransactionWithinChainLimit(output.tx->GetHash(), eligibility_filter.max_ancestors))
+ size_t ancestors, descendants;
+ mempool.GetTransactionAncestry(output.tx->GetHash(), ancestors, descendants);
+ if (ancestors > eligibility_filter.max_ancestors || descendants > eligibility_filter.max_descendants) {
return false;
+ }
return true;
}
@@ -2582,16 +2585,17 @@ bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAm
++it;
}
- size_t nMaxChainLength = std::min(gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT), gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT));
+ size_t max_ancestors = (size_t)std::max<int64_t>(1, gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT));
+ size_t max_descendants = (size_t)std::max<int64_t>(1, gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT));
bool fRejectLongChains = gArgs.GetBoolArg("-walletrejectlongchains", DEFAULT_WALLET_REJECT_LONG_CHAINS);
bool res = nTargetValue <= nValueFromPresetInputs ||
SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(1, 6, 0), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used) ||
SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(1, 1, 0), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used) ||
(m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, 2), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
- (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, std::min((size_t)4, nMaxChainLength/3)), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
- (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, nMaxChainLength/2), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
- (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, nMaxChainLength), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
+ (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, std::min((size_t)4, max_ancestors/3), std::min((size_t)4, max_descendants/3)), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
+ (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, max_ancestors/2, max_descendants/2), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
+ (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, max_ancestors-1, max_descendants-1), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
(m_spend_zero_conf_change && !fRejectLongChains && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, std::numeric_limits<uint64_t>::max()), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used));
// because SelectCoinsMinConf clears the setCoinsRet, we now add the possible inputs to the coinset
@@ -2609,7 +2613,7 @@ bool CWallet::SignTransaction(CMutableTransaction &tx)
// sign the new tx
int nIn = 0;
- for (const auto& input : tx.vin) {
+ for (auto& input : tx.vin) {
std::map<uint256, CWalletTx>::const_iterator mi = mapWallet.find(input.prevout.hash);
if(mi == mapWallet.end() || input.prevout.n >= mi->second.tx->vout.size()) {
return false;
@@ -2620,7 +2624,7 @@ bool CWallet::SignTransaction(CMutableTransaction &tx)
if (!ProduceSignature(*this, MutableTransactionSignatureCreator(&tx, nIn, amount, SIGHASH_ALL), scriptPubKey, sigdata)) {
return false;
}
- UpdateTransaction(tx, nIn, sigdata);
+ UpdateInput(input, sigdata);
nIn++;
}
return true;
@@ -3050,7 +3054,7 @@ bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CTransac
strFailReason = _("Signing transaction failed");
return false;
} else {
- UpdateTransaction(txNew, nIn, sigdata);
+ UpdateInput(txNew.vin.at(nIn), sigdata);
}
nIn++;
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index f1761b0fcf..1ec2a9e771 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -662,8 +662,10 @@ struct CoinEligibilityFilter
const int conf_mine;
const int conf_theirs;
const uint64_t max_ancestors;
+ const uint64_t max_descendants;
- CoinEligibilityFilter(int conf_mine, int conf_theirs, uint64_t max_ancestors) : conf_mine(conf_mine), conf_theirs(conf_theirs), max_ancestors(max_ancestors) {}
+ CoinEligibilityFilter(int conf_mine, int conf_theirs, uint64_t max_ancestors) : conf_mine(conf_mine), conf_theirs(conf_theirs), max_ancestors(max_ancestors), max_descendants(max_ancestors) {}
+ CoinEligibilityFilter(int conf_mine, int conf_theirs, uint64_t max_ancestors, uint64_t max_descendants) : conf_mine(conf_mine), conf_theirs(conf_theirs), max_ancestors(max_ancestors), max_descendants(max_descendants) {}
};
class WalletRescanReserver; //forward declarations for ScanForWalletTransactions/RescanFromTime
diff --git a/test/functional/combine_logs.py b/test/functional/combine_logs.py
index d1bf9206b2..91b6415a7c 100755
--- a/test/functional/combine_logs.py
+++ b/test/functional/combine_logs.py
@@ -63,7 +63,7 @@ def get_log_events(source, logfile):
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
- with open(logfile, 'r') as infile:
+ with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
diff --git a/test/functional/feature_includeconf.py b/test/functional/feature_includeconf.py
index 9ccb89af43..9a7a0ca103 100755
--- a/test/functional/feature_includeconf.py
+++ b/test/functional/feature_includeconf.py
@@ -41,14 +41,9 @@ class IncludeConfTest(BitcoinTestFramework):
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
- self.log.info("-includeconf cannot be used as command-line arg. subversion should still end with 'main; relative)/'")
+ self.log.info("-includeconf cannot be used as command-line arg")
self.stop_node(0)
-
- self.start_node(0, extra_args=["-includeconf=relative2.conf"])
-
- subversion = self.nodes[0].getnetworkinfo()["subversion"]
- assert subversion.endswith("main; relative)/")
- self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from commandline; ignoring -includeconf=relative2.conf")
+ self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
@@ -59,8 +54,18 @@ class IncludeConfTest(BitcoinTestFramework):
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
+ self.log.info("-includeconf cannot contain invalid arg")
+ with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
+ f.write("foo=bar\n")
+ self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Invalid configuration value foo")
+
+ self.log.info("-includeconf cannot be invalid path")
+ os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
+ self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Failed to include configuration file relative.conf")
+
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
+ # Restore initial file contents
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "bitcoin.conf"), "a", encoding='utf8') as f:
diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py
index 7db6a03b45..6577d83f5c 100755
--- a/test/functional/feature_nulldummy.py
+++ b/test/functional/feature_nulldummy.py
@@ -42,7 +42,7 @@ class NULLDUMMYTest(BitcoinTestFramework):
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
- self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness', '-vbparams=segwit:0:999999999999', '-addresstype=legacy', "-deprecatedrpc=addwitnessaddress"]]
+ self.extra_args = [['-whitelist=127.0.0.1', '-vbparams=segwit:0:999999999999', '-addresstype=legacy', "-deprecatedrpc=addwitnessaddress"]]
def run_test(self):
self.address = self.nodes[0].getnewaddress()
diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py
index 3622e1834a..b10306b283 100755
--- a/test/functional/feature_segwit.py
+++ b/test/functional/feature_segwit.py
@@ -42,9 +42,9 @@ class SegWitTest(BitcoinTestFramework):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
- self.extra_args = [["-walletprematurewitness", "-rpcserialversion=0", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"],
- ["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"],
- ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"]]
+ self.extra_args = [["-rpcserialversion=0", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"],
+ ["-blockversion=4", "-promiscuousmempoolflags=517", "-rpcserialversion=1", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"],
+ ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"]]
def setup_network(self):
super().setup_network()
@@ -129,21 +129,6 @@ class SegWitTest(BitcoinTestFramework):
self.nodes[0].generate(260) #block 423
sync_blocks(self.nodes)
- self.log.info("Verify default node can't accept any witness format txs before fork")
- # unsigned, no scriptsig
- self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False)
- self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False)
- self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False)
- self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False)
- # unsigned with redeem script
- self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0]))
- self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0]))
- # signed
- self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V0][0], True)
- self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V1][0], True)
- self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V0][0], True)
- self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V1][0], True)
-
self.log.info("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
@@ -164,6 +149,16 @@ class SegWitTest(BitcoinTestFramework):
segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
assert_equal(len(segwit_tx_list), 5)
+ self.log.info("Verify default node can't accept txs with missing witness")
+ # unsigned, no scriptsig
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False)
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False)
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False)
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False)
+ # unsigned with redeem script
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0]))
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0]))
+
self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py
index e80a764477..940d085e89 100755
--- a/test/functional/p2p_segwit.py
+++ b/test/functional/p2p_segwit.py
@@ -230,55 +230,9 @@ class SegWitTest(BitcoinTestFramework):
sync_blocks(self.nodes)
- # Create a p2sh output -- this is so we can pass the standardness
- # rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
- # in P2SH).
- p2sh_program = CScript([OP_TRUE])
- p2sh_pubkey = hash160(p2sh_program)
- scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
-
- # Now check that unnecessary witnesses can't be used to blind a node
- # to a transaction, eg by violating standardness checks.
- tx2 = CTransaction()
- tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
- tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
- tx2.rehash()
- test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, False, True)
- self.nodes[0].generate(1)
- sync_blocks(self.nodes)
-
- # We'll add an unnecessary witness to this transaction that would cause
- # it to be non-standard, to test that violating policy with a witness before
- # segwit activation doesn't blind a node to a transaction. Transactions
- # rejected for having a witness before segwit activation shouldn't be added
- # to the rejection cache.
- tx3 = CTransaction()
- tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
- tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey))
- tx3.wit.vtxinwit.append(CTxInWitness())
- tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
- tx3.rehash()
- # Note that this should be rejected for the premature witness reason,
- # rather than a policy check, since segwit hasn't activated yet.
- test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'no-witness-yet')
-
- # If we send without witness, it should be accepted.
- test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, False, True)
-
- # Now create a new anyone-can-spend utxo for the next test.
- tx4 = CTransaction()
- tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
- tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
- tx4.rehash()
- test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, False, True)
- test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx4, False, True)
-
- self.nodes[0].generate(1)
- sync_blocks(self.nodes)
-
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
- self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
+ self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
# ~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
# backdated so that it applies to all blocks, going back to the genesis
@@ -1119,7 +1073,7 @@ class SegWitTest(BitcoinTestFramework):
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
- # V0 segwit outputs should be standard after activation, but not before.
+ # V0 segwit outputs and inputs are always standard. V0 segwit inputs may only be mined after activation, but not before.
def test_standardness_v0(self, segwit_activated):
self.log.info("Testing standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
@@ -1148,45 +1102,46 @@ class SegWitTest(BitcoinTestFramework):
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-10000, scriptPubKey)]
tx.vout.append(CTxOut(8000, scriptPubKey)) # Might burn this later
+ tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
- test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx, with_witness=True, accepted=segwit_activated)
+ # This is always accepted, since the mempool policy is to consider segwit as always active
+ # and thus allow segwit outputs
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
- if segwit_activated:
- # if tx was accepted, then we spend the second output.
- tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
- tx2.vout = [CTxOut(7000, scriptPubKey)]
- tx2.wit.vtxinwit.append(CTxInWitness())
- tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
- else:
- # if tx wasn't accepted, we just re-spend the p2sh output we started with.
- tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
- tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, scriptPubKey)]
+ # tx was accepted, so we spend the second output.
+ tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
+ tx2.vout = [CTxOut(7000, scriptPubKey)]
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
- test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, with_witness=True, accepted=segwit_activated)
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
- if segwit_activated:
- # tx and tx2 were both accepted. Don't bother trying to reclaim the
- # P2PKH output; just send tx's first output back to an anyone-can-spend.
- sync_mempools([self.nodes[0], self.nodes[1]])
- tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
- tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
- tx3.wit.vtxinwit.append(CTxInWitness())
- tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
- tx3.rehash()
- test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True)
- else:
- # tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
- tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
- tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
+ # tx and tx2 were both accepted. Don't bother trying to reclaim the
+ # P2PKH output; just send tx's first output back to an anyone-can-spend.
+ sync_mempools([self.nodes[0], self.nodes[1]])
+ tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
+ tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
+ tx3.wit.vtxinwit.append(CTxInWitness())
+ tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
+ tx3.rehash()
+ if not segwit_activated:
+ # Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
+ # in blocks and the tx is impossible to mine right now.
+ assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
+ # Create the same output as tx3, but by replacing tx
+ tx3_out = tx3.vout[0]
+ tx3 = tx
+ tx3.vout = [tx3_out]
tx3.rehash()
- test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True)
+ assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
@@ -1855,6 +1810,60 @@ class SegWitTest(BitcoinTestFramework):
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
+ def test_non_standard_witness_blinding(self):
+ self.log.info("Testing behavior of unnecessary witnesses in transactions does not blind the node for the transaction")
+ assert (len(self.utxo) > 0)
+
+ # Create a p2sh output -- this is so we can pass the standardness
+ # rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
+ # in P2SH).
+ p2sh_program = CScript([OP_TRUE])
+ p2sh_pubkey = hash160(p2sh_program)
+ scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
+
+ # Now check that unnecessary witnesses can't be used to blind a node
+ # to a transaction, eg by violating standardness checks.
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+ tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, scriptPubKey))
+ tx.rehash()
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, False, True)
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+
+ # We'll add an unnecessary witness to this transaction that would cause
+ # it to be non-standard, to test that violating policy with a witness
+ # doesn't blind a node to a transaction. Transactions
+ # rejected for having a witness shouldn't be added
+ # to the rejection cache.
+ tx2 = CTransaction()
+ tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
+ tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, scriptPubKey))
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
+ tx2.rehash()
+ # This will be rejected due to a policy check:
+ # No witness is allowed, since it is not a witness program but a p2sh program
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, True, False, b'bad-witness-nonstandard')
+
+ # If we send without witness, it should be accepted.
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, False, True)
+
+ # Now create a new anyone-can-spend utxo for the next test.
+ tx3 = CTransaction()
+ tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
+ tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
+ tx3.rehash()
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, False, True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, False, True)
+
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+
+ # Update our utxo list; we spent the first entry.
+ self.utxo.pop(0)
+ self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
+
def test_non_standard_witness(self):
self.log.info("Testing detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
@@ -2023,6 +2032,7 @@ class SegWitTest(BitcoinTestFramework):
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
+ self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(node_id=2)
diff --git a/test/functional/rpc_deprecated.py b/test/functional/rpc_deprecated.py
index 2e0500e7c4..bc27c183b1 100755
--- a/test/functional/rpc_deprecated.py
+++ b/test/functional/rpc_deprecated.py
@@ -40,7 +40,6 @@ class DeprecatedRpcTest(BitcoinTestFramework):
#
# The following 'label' RPC methods are usable both with and without the
# -deprecatedrpc=accounts switch enabled.
- # - getlabeladdress
# - getaddressesbylabel
# - getreceivedbylabel
# - listlabels
@@ -69,10 +68,6 @@ class DeprecatedRpcTest(BitcoinTestFramework):
assert_raises_rpc_error(-32, "getaccountaddress is deprecated", self.nodes[0].getaccountaddress, "label0")
self.nodes[1].getaccountaddress("label1")
- self.log.info("- getlabeladdress")
- self.nodes[0].getlabeladdress("label0")
- self.nodes[1].getlabeladdress("label1")
-
self.log.info("- getaddressesbyaccount")
assert_raises_rpc_error(-32, "getaddressesbyaccount is deprecated", self.nodes[0].getaddressesbyaccount, "label0")
self.nodes[1].getaddressesbyaccount("label1")
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index 75a8986793..5c2555c1ff 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -359,7 +359,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
- fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
+ fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 65e4c0817e..5b3a4df0f9 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -419,10 +419,6 @@ class TestHandler:
self.test_list = test_list
self.flags = flags
self.num_running = 0
- # In case there is a graveyard of zombie bitcoinds, we can apply a
- # pseudorandom offset to hopefully jump over them.
- # (625 is PORT_RANGE/MAX_NODES)
- self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
@@ -430,7 +426,7 @@ class TestHandler:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
- portseed = len(self.test_list) + self.portseed_offset
+ portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py
index fcc11abce0..f07041706a 100755
--- a/test/functional/wallet_bumpfee.py
+++ b/test/functional/wallet_bumpfee.py
@@ -31,7 +31,7 @@ class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
- self.extra_args = [["-prematurewitness", "-walletprematurewitness", "-deprecatedrpc=addwitnessaddress", "-walletrbf={}".format(i)]
+ self.extra_args = [["-deprecatedrpc=addwitnessaddress", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
def run_test(self):
diff --git a/test/functional/wallet_labels.py b/test/functional/wallet_labels.py
index 705dd8985e..f5d7830fca 100755
--- a/test/functional/wallet_labels.py
+++ b/test/functional/wallet_labels.py
@@ -5,7 +5,7 @@
"""Test label RPCs.
RPCs tested are:
- - getlabeladdress
+ - getaccountaddress
- getaddressesbyaccount/getaddressesbylabel
- listaddressgroupings
- setlabel
@@ -92,17 +92,24 @@ class WalletLabelsTest(BitcoinTestFramework):
# recognize the label/address associations.
labels = [Label(name, accounts_api) for name in ("a", "b", "c", "d", "e")]
for label in labels:
- label.add_receive_address(node.getlabeladdress(label=label.name, force=True))
+ if accounts_api:
+ address = node.getaccountaddress(label.name)
+ else:
+ address = node.getnewaddress(label.name)
+ label.add_receive_address(address)
label.verify(node)
# Check all labels are returned by listlabels.
assert_equal(node.listlabels(), [label.name for label in labels])
# Send a transaction to each label, and make sure this forces
- # getlabeladdress to generate a new receiving address.
+ # getaccountaddress to generate a new receiving address.
for label in labels:
- node.sendtoaddress(label.receive_address, amount_to_send)
- label.add_receive_address(node.getlabeladdress(label.name))
+ if accounts_api:
+ node.sendtoaddress(label.receive_address, amount_to_send)
+ label.add_receive_address(node.getaccountaddress(label.name))
+ else:
+ node.sendtoaddress(label.addresses[0], amount_to_send)
label.verify(node)
# Check the amounts received.
@@ -115,10 +122,17 @@ class WalletLabelsTest(BitcoinTestFramework):
# Check that sendfrom label reduces listaccounts balances.
for i, label in enumerate(labels):
to_label = labels[(i + 1) % len(labels)]
- node.sendfrom(label.name, to_label.receive_address, amount_to_send)
+ if accounts_api:
+ node.sendfrom(label.name, to_label.receive_address, amount_to_send)
+ else:
+ node.sendfrom(label.name, to_label.addresses[0], amount_to_send)
node.generate(1)
for label in labels:
- label.add_receive_address(node.getlabeladdress(label.name))
+ if accounts_api:
+ address = node.getaccountaddress(label.name)
+ else:
+ address = node.getnewaddress(label.name)
+ label.add_receive_address(address)
label.verify(node)
assert_equal(node.getreceivedbylabel(label.name), 2)
if accounts_api:
@@ -134,12 +148,12 @@ class WalletLabelsTest(BitcoinTestFramework):
# Check that setlabel can assign a label to a new unused address.
for label in labels:
- address = node.getlabeladdress(label="", force=True)
+ address = node.getnewaddress()
node.setlabel(address, label.name)
label.add_address(address)
label.verify(node)
if accounts_api:
- assert(address not in node.getaddressesbyaccount(""))
+ assert address not in node.getaddressesbyaccount("")
else:
assert_raises_rpc_error(-11, "No addresses with label", node.getaddressesbylabel, "")
@@ -160,19 +174,20 @@ class WalletLabelsTest(BitcoinTestFramework):
# Check that setlabel can change the label of an address from a
# different label.
- change_label(node, labels[0].addresses[0], labels[0], labels[1])
-
- # Check that setlabel can change the label of an address which
- # is the receiving address of a different label.
- change_label(node, labels[0].receive_address, labels[0], labels[1])
+ change_label(node, labels[0].addresses[0], labels[0], labels[1], accounts_api)
# Check that setlabel can set the label of an address already
# in the label. This is a no-op.
- change_label(node, labels[2].addresses[0], labels[2], labels[2])
+ change_label(node, labels[2].addresses[0], labels[2], labels[2], accounts_api)
+
+ if accounts_api:
+ # Check that setaccount can change the label of an address which
+ # is the receiving address of a different label.
+ change_label(node, labels[0].receive_address, labels[0], labels[1], accounts_api)
- # Check that setlabel can set the label of an address which is
- # already the receiving address of the label. This is a no-op.
- change_label(node, labels[2].receive_address, labels[2], labels[2])
+ # Check that setaccount can set the label of an address which is
+ # already the receiving address of the label. This is a no-op.
+ change_label(node, labels[2].receive_address, labels[2], labels[2], accounts_api)
class Label:
def __init__(self, name, accounts_api):
@@ -192,12 +207,14 @@ class Label:
def add_receive_address(self, address):
self.add_address(address)
- self.receive_address = address
+ if self.accounts_api:
+ self.receive_address = address
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
- assert_equal(node.getlabeladdress(self.name), self.receive_address)
+ if self.accounts_api:
+ assert_equal(node.getaccountaddress(self.name), self.receive_address)
for address in self.addresses:
assert_equal(
@@ -216,22 +233,26 @@ class Label:
assert_equal(set(node.getaddressesbyaccount(self.name)), set(self.addresses))
-def change_label(node, address, old_label, new_label):
+def change_label(node, address, old_label, new_label, accounts_api):
assert_equal(address in old_label.addresses, True)
- node.setlabel(address, new_label.name)
+ if accounts_api:
+ node.setaccount(address, new_label.name)
+ else:
+ node.setlabel(address, new_label.name)
old_label.addresses.remove(address)
new_label.add_address(address)
- # Calling setlabel on an address which was previously the receiving
- # address of a different label should reset the receiving address of
- # the old label, causing getlabeladdress to return a brand new
+ # Calling setaccount on an address which was previously the receiving
+ # address of a different account should reset the receiving address of
+ # the old account, causing getaccountaddress to return a brand new
# address.
- if old_label.name != new_label.name and address == old_label.receive_address:
- new_address = node.getlabeladdress(old_label.name)
- assert_equal(new_address not in old_label.addresses, True)
- assert_equal(new_address not in new_label.addresses, True)
- old_label.add_receive_address(new_address)
+ if accounts_api:
+ if old_label.name != new_label.name and address == old_label.receive_address:
+ new_address = node.getaccountaddress(old_label.name)
+ assert_equal(new_address not in old_label.addresses, True)
+ assert_equal(new_address not in new_label.addresses, True)
+ old_label.add_receive_address(new_address)
old_label.verify(node)
new_label.verify(node)
diff --git a/test/lint/check-doc.py b/test/lint/check-doc.py
index de5719eb29..89776b2a6b 100755
--- a/test/lint/check-doc.py
+++ b/test/lint/check-doc.py
@@ -22,7 +22,7 @@ CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/{}'.format(FOLDER_GREP)
CMD_GREP_ARGS = r"git grep --perl-regexp '{}' -- {} ':(exclude){}'".format(REGEX_ARG, CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"git grep --perl-regexp '{}' {}".format(REGEX_DOC, CMD_ROOT_DIR)
# list unsupported, deprecated and duplicate args as they need no documentation
-SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio', '-forcecompactdb', '-usehd'])
+SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio', '-forcecompactdb', '-usehd'])
def main():
diff --git a/test/lint/lint-includes.sh b/test/lint/lint-includes.sh
index f54be46b52..f5daf4f9ac 100755
--- a/test/lint/lint-includes.sh
+++ b/test/lint/lint-includes.sh
@@ -5,12 +5,17 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Check for duplicate includes.
+# Guard against accidental introduction of new Boost dependencies.
+# Check includes: Check for duplicate includes. Enforce bracket syntax includes.
+
+IGNORE_REGEXP="/(leveldb|secp256k1|univalue)/"
filter_suffix() {
- git ls-files | grep -E "^src/.*\.${1}"'$' | grep -Ev "/(leveldb|secp256k1|univalue)/"
+ git ls-files | grep -E "^src/.*\.${1}"'$' | grep -Ev "${IGNORE_REGEXP}"
}
EXIT_CODE=0
+
for HEADER_FILE in $(filter_suffix h); do
DUPLICATE_INCLUDES_IN_HEADER_FILE=$(grep -E "^#include " < "${HEADER_FILE}" | sort | uniq -d)
if [[ ${DUPLICATE_INCLUDES_IN_HEADER_FILE} != "" ]]; then
@@ -20,6 +25,7 @@ for HEADER_FILE in $(filter_suffix h); do
EXIT_CODE=1
fi
done
+
for CPP_FILE in $(filter_suffix cpp); do
DUPLICATE_INCLUDES_IN_CPP_FILE=$(grep -E "^#include " < "${CPP_FILE}" | sort | uniq -d)
if [[ ${DUPLICATE_INCLUDES_IN_CPP_FILE} != "" ]]; then
@@ -29,4 +35,85 @@ for CPP_FILE in $(filter_suffix cpp); do
EXIT_CODE=1
fi
done
+
+INCLUDED_CPP_FILES=$(git grep -E "^#include [<\"][^>\"]+\.cpp[>\"]" -- "*.cpp" "*.h")
+if [[ ${INCLUDED_CPP_FILES} != "" ]]; then
+ echo "The following files #include .cpp files:"
+ echo "${INCLUDED_CPP_FILES}"
+ echo
+ EXIT_CODE=1
+fi
+
+EXPECTED_BOOST_INCLUDES=(
+ boost/algorithm/string.hpp
+ boost/algorithm/string/case_conv.hpp
+ boost/algorithm/string/classification.hpp
+ boost/algorithm/string/join.hpp
+ boost/algorithm/string/predicate.hpp
+ boost/algorithm/string/replace.hpp
+ boost/algorithm/string/split.hpp
+ boost/assign/std/vector.hpp
+ boost/bind.hpp
+ boost/chrono/chrono.hpp
+ boost/date_time/posix_time/posix_time.hpp
+ boost/filesystem.hpp
+ boost/filesystem/detail/utf8_codecvt_facet.hpp
+ boost/filesystem/fstream.hpp
+ boost/interprocess/sync/file_lock.hpp
+ boost/multi_index/hashed_index.hpp
+ boost/multi_index/ordered_index.hpp
+ boost/multi_index/sequenced_index.hpp
+ boost/multi_index_container.hpp
+ boost/optional.hpp
+ boost/preprocessor/cat.hpp
+ boost/preprocessor/stringize.hpp
+ boost/program_options/detail/config_file.hpp
+ boost/scoped_array.hpp
+ boost/signals2/connection.hpp
+ boost/signals2/last_value.hpp
+ boost/signals2/signal.hpp
+ boost/test/unit_test.hpp
+ boost/thread.hpp
+ boost/thread/condition_variable.hpp
+ boost/thread/mutex.hpp
+ boost/thread/thread.hpp
+ boost/variant.hpp
+ boost/variant/apply_visitor.hpp
+ boost/variant/static_visitor.hpp
+)
+
+for BOOST_INCLUDE in $(git grep '^#include <boost/' -- "*.cpp" "*.h" | cut -f2 -d: | cut -f2 -d'<' | cut -f1 -d'>' | sort -u); do
+ IS_EXPECTED_INCLUDE=0
+ for EXPECTED_BOOST_INCLUDE in "${EXPECTED_BOOST_INCLUDES[@]}"; do
+ if [[ "${BOOST_INCLUDE}" == "${EXPECTED_BOOST_INCLUDE}" ]]; then
+ IS_EXPECTED_INCLUDE=1
+ break
+ fi
+ done
+ if [[ ${IS_EXPECTED_INCLUDE} == 0 ]]; then
+ EXIT_CODE=1
+ echo "A new Boost dependency in the form of \"${BOOST_INCLUDE}\" appears to have been introduced:"
+ git grep "${BOOST_INCLUDE}" -- "*.cpp" "*.h"
+ echo
+ fi
+done
+
+for EXPECTED_BOOST_INCLUDE in "${EXPECTED_BOOST_INCLUDES[@]}"; do
+ if ! git grep -q "^#include <${EXPECTED_BOOST_INCLUDE}>" -- "*.cpp" "*.h"; then
+ echo "Good job! The Boost dependency \"${EXPECTED_BOOST_INCLUDE}\" is no longer used."
+ echo "Please remove it from EXPECTED_BOOST_INCLUDES in $0"
+ echo "to make sure this dependency is not accidentally reintroduced."
+ echo
+ EXIT_CODE=1
+ fi
+done
+
+QUOTE_SYNTAX_INCLUDES=$(git grep '^#include "' -- "*.cpp" "*.h" | grep -Ev "${IGNORE_REGEXP}")
+if [[ ${QUOTE_SYNTAX_INCLUDES} != "" ]]; then
+ echo "Please use bracket syntax includes (\"#include <foo.h>\") instead of quote syntax includes:"
+ echo "${QUOTE_SYNTAX_INCLUDES}"
+ echo
+ EXIT_CODE=1
+fi
+
exit ${EXIT_CODE}
diff --git a/test/lint/lint-locale-dependence.sh b/test/lint/lint-locale-dependence.sh
new file mode 100755
index 0000000000..3144f2c841
--- /dev/null
+++ b/test/lint/lint-locale-dependence.sh
@@ -0,0 +1,229 @@
+#!/bin/bash
+
+KNOWN_VIOLATIONS=(
+ "src/base58.cpp:.*isspace"
+ "src/bitcoin-tx.cpp.*stoul"
+ "src/bitcoin-tx.cpp.*trim_right"
+ "src/bitcoin-tx.cpp:.*atoi"
+ "src/core_read.cpp.*is_digit"
+ "src/dbwrapper.cpp.*stoul"
+ "src/dbwrapper.cpp:.*vsnprintf"
+ "src/httprpc.cpp.*trim"
+ "src/init.cpp:.*atoi"
+ "src/netbase.cpp.*to_lower"
+ "src/qt/rpcconsole.cpp:.*atoi"
+ "src/qt/rpcconsole.cpp:.*isdigit"
+ "src/rest.cpp:.*strtol"
+ "src/rpc/server.cpp.*to_upper"
+ "src/test/dbwrapper_tests.cpp:.*snprintf"
+ "src/test/getarg_tests.cpp.*split"
+ "src/torcontrol.cpp:.*atoi"
+ "src/torcontrol.cpp:.*strtol"
+ "src/uint256.cpp:.*isspace"
+ "src/uint256.cpp:.*tolower"
+ "src/util.cpp:.*atoi"
+ "src/util.cpp:.*fprintf"
+ "src/util.cpp:.*tolower"
+ "src/utilmoneystr.cpp:.*isdigit"
+ "src/utilmoneystr.cpp:.*isspace"
+ "src/utilstrencodings.cpp:.*atoi"
+ "src/utilstrencodings.cpp:.*isspace"
+ "src/utilstrencodings.cpp:.*strtol"
+ "src/utilstrencodings.cpp:.*strtoll"
+ "src/utilstrencodings.cpp:.*strtoul"
+ "src/utilstrencodings.cpp:.*strtoull"
+ "src/utilstrencodings.h:.*atoi"
+)
+
+REGEXP_IGNORE_EXTERNAL_DEPENDENCIES="^src/(crypto/ctaes/|leveldb/|secp256k1/|tinyformat.h|univalue/)"
+
+LOCALE_DEPENDENT_FUNCTIONS=(
+ alphasort # LC_COLLATE (via strcoll)
+ asctime # LC_TIME (directly)
+ asprintf # (via vasprintf)
+ atof # LC_NUMERIC (via strtod)
+ atoi # LC_NUMERIC (via strtol)
+ atol # LC_NUMERIC (via strtol)
+ atoll # (via strtoll)
+ atoq
+ btowc # LC_CTYPE (directly)
+ ctime # (via asctime or localtime)
+ dprintf # (via vdprintf)
+ fgetwc
+ fgetws
+ fold_case # boost::locale::fold_case
+ fprintf # (via vfprintf)
+ fputwc
+ fputws
+ fscanf # (via __vfscanf)
+ fwprintf # (via __vfwprintf)
+ getdate # via __getdate_r => isspace // __localtime_r
+ getwc
+ getwchar
+ is_digit # boost::algorithm::is_digit
+ is_space # boost::algorithm::is_space
+ isalnum # LC_CTYPE
+ isalpha # LC_CTYPE
+ isblank # LC_CTYPE
+ iscntrl # LC_CTYPE
+ isctype # LC_CTYPE
+ isdigit # LC_CTYPE
+ isgraph # LC_CTYPE
+ islower # LC_CTYPE
+ isprint # LC_CTYPE
+ ispunct # LC_CTYPE
+ isspace # LC_CTYPE
+ isupper # LC_CTYPE
+ iswalnum # LC_CTYPE
+ iswalpha # LC_CTYPE
+ iswblank # LC_CTYPE
+ iswcntrl # LC_CTYPE
+ iswctype # LC_CTYPE
+ iswdigit # LC_CTYPE
+ iswgraph # LC_CTYPE
+ iswlower # LC_CTYPE
+ iswprint # LC_CTYPE
+ iswpunct # LC_CTYPE
+ iswspace # LC_CTYPE
+ iswupper # LC_CTYPE
+ iswxdigit # LC_CTYPE
+ isxdigit # LC_CTYPE
+ localeconv # LC_NUMERIC + LC_MONETARY
+ mblen # LC_CTYPE
+ mbrlen
+ mbrtowc
+ mbsinit
+ mbsnrtowcs
+ mbsrtowcs
+ mbstowcs # LC_CTYPE
+ mbtowc # LC_CTYPE
+ mktime
+ normalize # boost::locale::normalize
+# printf # LC_NUMERIC
+ putwc
+ putwchar
+ scanf # LC_NUMERIC
+ setlocale
+ snprintf
+ sprintf
+ sscanf
+ stod
+ stof
+ stoi
+ stol
+ stold
+ stoll
+ stoul
+ stoull
+ strcasecmp
+ strcasestr
+ strcoll # LC_COLLATE
+# strerror
+ strfmon
+ strftime # LC_TIME
+ strncasecmp
+ strptime
+ strtod # LC_NUMERIC
+ strtof
+ strtoimax
+ strtol # LC_NUMERIC
+ strtold
+ strtoll
+ strtoq
+ strtoul # LC_NUMERIC
+ strtoull
+ strtoumax
+ strtouq
+ strxfrm # LC_COLLATE
+ swprintf
+ to_lower # boost::locale::to_lower
+ to_title # boost::locale::to_title
+ to_upper # boost::locale::to_upper
+ tolower # LC_CTYPE
+ toupper # LC_CTYPE
+ towctrans
+ towlower # LC_CTYPE
+ towupper # LC_CTYPE
+ trim # boost::algorithm::trim
+ trim_left # boost::algorithm::trim_left
+ trim_right # boost::algorithm::trim_right
+ ungetwc
+ vasprintf
+ vdprintf
+ versionsort
+ vfprintf
+ vfscanf
+ vfwprintf
+ vprintf
+ vscanf
+ vsnprintf
+ vsprintf
+ vsscanf
+ vswprintf
+ vwprintf
+ wcrtomb
+ wcscasecmp
+ wcscoll # LC_COLLATE
+ wcsftime # LC_TIME
+ wcsncasecmp
+ wcsnrtombs
+ wcsrtombs
+ wcstod # LC_NUMERIC
+ wcstof
+ wcstoimax
+ wcstol # LC_NUMERIC
+ wcstold
+ wcstoll
+ wcstombs # LC_CTYPE
+ wcstoul # LC_NUMERIC
+ wcstoull
+ wcstoumax
+ wcswidth
+ wcsxfrm # LC_COLLATE
+ wctob
+ wctomb # LC_CTYPE
+ wctrans
+ wctype
+ wcwidth
+ wprintf
+)
+
+function join_array {
+ local IFS="$1"
+ shift
+ echo "$*"
+}
+
+REGEXP_IGNORE_KNOWN_VIOLATIONS=$(join_array "|" "${KNOWN_VIOLATIONS[@]}")
+
+# Invoke "git grep" only once in order to minimize run-time
+REGEXP_LOCALE_DEPENDENT_FUNCTIONS=$(join_array "|" "${LOCALE_DEPENDENT_FUNCTIONS[@]}")
+GIT_GREP_OUTPUT=$(git grep -E "[^a-zA-Z0-9_\`'\"<>](${REGEXP_LOCALE_DEPENDENT_FUNCTIONS}(|_r|_s))[^a-zA-Z0-9_\`'\"<>]" -- "*.cpp" "*.h")
+
+EXIT_CODE=0
+for LOCALE_DEPENDENT_FUNCTION in "${LOCALE_DEPENDENT_FUNCTIONS[@]}"; do
+ MATCHES=$(grep -E "[^a-zA-Z0-9_\`'\"<>]${LOCALE_DEPENDENT_FUNCTION}(|_r|_s)[^a-zA-Z0-9_\`'\"<>]" <<< "${GIT_GREP_OUTPUT}" | \
+ grep -vE "\.(c|cpp|h):\s*(//|\*|/\*|\").*${LOCALE_DEPENDENT_FUNCTION}" | \
+ grep -vE 'fprintf\(.*(stdout|stderr)')
+ if [[ ${REGEXP_IGNORE_EXTERNAL_DEPENDENCIES} != "" ]]; then
+ MATCHES=$(grep -vE "${REGEXP_IGNORE_EXTERNAL_DEPENDENCIES}" <<< "${MATCHES}")
+ fi
+ if [[ ${REGEXP_IGNORE_KNOWN_VIOLATIONS} != "" ]]; then
+ MATCHES=$(grep -vE "${REGEXP_IGNORE_KNOWN_VIOLATIONS}" <<< "${MATCHES}")
+ fi
+ if [[ ${MATCHES} != "" ]]; then
+ echo "The locale dependent function ${LOCALE_DEPENDENT_FUNCTION}(...) appears to be used:"
+ echo "${MATCHES}"
+ echo
+ EXIT_CODE=1
+ fi
+done
+if [[ ${EXIT_CODE} != 0 ]]; then
+ echo "Unnecessary locale dependence can cause bugs that are very"
+ echo "tricky to isolate and fix. Please avoid using locale dependent"
+ echo "functions if possible."
+ echo
+ echo "Advice not applicable in this specific case? Add an exception"
+ echo "by updating the ignore list in $0"
+fi
+exit ${EXIT_CODE}