diff options
146 files changed, 3511 insertions, 2263 deletions
diff --git a/.travis.yml b/.travis.yml index 6c3a13d73d..69397c26bf 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,8 +8,6 @@ cache: - depends/built - depends/sdk-sources - $HOME/.ccache -git: - depth: 1 env: global: - MAKEJOBS=-j3 @@ -80,7 +78,7 @@ script: - ./configure --cache-file=../config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false) - make $MAKEJOBS $GOAL || ( echo "Build failure. Verbose build follows." && make $GOAL V=1 ; false ) - export LD_LIBRARY_PATH=$TRAVIS_BUILD_DIR/depends/$HOST/lib - - if [ "$RUN_TESTS" = "true" ]; then travis_wait 30 make $MAKEJOBS check VERBOSE=1; fi + - if [ "$RUN_TESTS" = "true" ]; then travis_wait 50 make $MAKEJOBS check VERBOSE=1; fi - if [ "$TRAVIS_EVENT_TYPE" = "cron" ]; then extended="--extended --exclude feature_pruning,feature_dbcrash"; fi - if [ "$RUN_TESTS" = "true" ]; then test/functional/test_runner.py --combinedlogslen=4000 --coverage --quiet ${extended}; fi after_script: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8381bd2448..ea475f8cfb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,6 +15,24 @@ merging pull requests as well as a "lead maintainer" who is responsible for the release cycle, overall merging, moderation and appointment of maintainers. +Communication Channels +---------------------- + +Most communication about Bitcoin Core development happens on IRC, in the +#bitcoin-core-dev channel on Freenode. The easiest way to participate on IRC is +with the web client, [webchat.freenode.net](https://webchat.freenode.net/). Chat +history logs can be found +on [botbot.me](https://botbot.me/freenode/bitcoin-core-dev/). + +Discussion about code base improvements happens in GitHub issues and on pull +requests. + +The developer +[mailing list](https://lists.linuxfoundation.org/mailman/listinfo/bitcoin-dev) +should be used to discuss complicated or controversial changes before working on +a patch set. + + Contributor Workflow -------------------- @@ -33,12 +33,6 @@ regularly to indicate new official, stable release versions of Bitcoin Core. The contribution workflow is described in [CONTRIBUTING.md](CONTRIBUTING.md). -The developer [mailing list](https://lists.linuxfoundation.org/mailman/listinfo/bitcoin-dev) -should be used to discuss complicated or controversial changes before working -on a patch set. - -Developer IRC can be found on Freenode at #bitcoin-core-dev. - Testing ------- diff --git a/configure.ac b/configure.ac index 4f57750a1a..c422914a26 100644 --- a/configure.ac +++ b/configure.ac @@ -219,6 +219,12 @@ AC_ARG_ENABLE([debug], [enable_debug=$enableval], [enable_debug=no]) +# Enable different -fsanitize options +AC_ARG_WITH([sanitizers], + [AS_HELP_STRING([--with-sanitizers], + [comma separated list of extra sanitizers to build with (default is none enabled)])], + [use_sanitizers=$withval]) + # Enable gprof profiling AC_ARG_ENABLE([gprof], [AS_HELP_STRING([--enable-gprof], @@ -247,6 +253,26 @@ if test "x$enable_debug" = xyes; then fi fi +if test x$use_sanitizers != x; then + # First check if the compiler accepts flags. If an incompatible pair like + # -fsanitize=address,thread is used here, this check will fail. This will also + # fail if a bad argument is passed, e.g. -fsanitize=undfeined + AX_CHECK_COMPILE_FLAG( + [[-fsanitize=$use_sanitizers]], + [[SANITIZER_CXXFLAGS=-fsanitize=$use_sanitizers]], + [AC_MSG_ERROR([compiler did not accept requested flags])]) + + # Some compilers (e.g. GCC) require additional libraries like libasan, + # libtsan, libubsan, etc. Make sure linking still works with the sanitize + # flag. This is a separate check so we can give a better error message when + # the sanitize flags are supported by the compiler but the actual sanitizer + # libs are missing. + AX_CHECK_LINK_FLAG( + [[-fsanitize=$use_sanitizers]], + [[SANITIZER_LDFLAGS=-fsanitize=$use_sanitizers]], + [AC_MSG_ERROR([linker did not accept requested flags, you are missing required libraries])]) +fi + ERROR_CXXFLAGS= if test "x$enable_werror" = "xyes"; then if test "x$CXXFLAG_WERROR" = "x"; then @@ -1258,6 +1284,8 @@ AC_SUBST(HARDENED_CPPFLAGS) AC_SUBST(HARDENED_LDFLAGS) AC_SUBST(PIC_FLAGS) AC_SUBST(PIE_FLAGS) +AC_SUBST(SANITIZER_CXXFLAGS) +AC_SUBST(SANITIZER_LDFLAGS) AC_SUBST(SSE42_CXXFLAGS) AC_SUBST(LIBTOOL_APP_LDFLAGS) AC_SUBST(USE_UPNP) diff --git a/contrib/devtools/check-doc.py b/contrib/devtools/check-doc.py index f164ea9322..0c2e1a24be 100755 --- a/contrib/devtools/check-doc.py +++ b/contrib/devtools/check-doc.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2015-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -16,31 +16,33 @@ import sys FOLDER_GREP = 'src' FOLDER_TEST = 'src/test/' -CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/%s' % FOLDER_GREP -CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' %s | grep -v '%s'" % (CMD_ROOT_DIR, FOLDER_TEST) -CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_ROOT_DIR) -REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"') -REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")') +REGEX_ARG = '(?:ForceSet|SoftSet|Get|Is)(?:Bool)?Args?(?:Set)?\("(-[^"]+)"' +REGEX_DOC = 'HelpMessageOpt\("(-[^"=]+?)(?:=|")' +CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/{}'.format(FOLDER_GREP) +CMD_GREP_ARGS = r"git grep --perl-regexp '{}' -- {} ':(exclude){}'".format(REGEX_ARG, CMD_ROOT_DIR, FOLDER_TEST) +CMD_GREP_DOCS = r"git grep --perl-regexp '{}' {}".format(REGEX_DOC, CMD_ROOT_DIR) # list unsupported, deprecated and duplicate args as they need no documentation SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio', '-forcecompactdb', '-usehd']) + def main(): - used = check_output(CMD_GREP_ARGS, shell=True) - docd = check_output(CMD_GREP_DOCS, shell=True) - - args_used = set(re.findall(REGEX_ARG,used)) - args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL) - args_need_doc = args_used.difference(args_docd) - args_unknown = args_docd.difference(args_used) - - print "Args used : %s" % len(args_used) - print "Args documented : %s" % len(args_docd) - print "Args undocumented: %s" % len(args_need_doc) - print args_need_doc - print "Args unknown : %s" % len(args_unknown) - print args_unknown - - sys.exit(len(args_need_doc)) + used = check_output(CMD_GREP_ARGS, shell=True, universal_newlines=True) + docd = check_output(CMD_GREP_DOCS, shell=True, universal_newlines=True) + + args_used = set(re.findall(re.compile(REGEX_ARG), used)) + args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL) + args_need_doc = args_used.difference(args_docd) + args_unknown = args_docd.difference(args_used) + + print("Args used : {}".format(len(args_used))) + print("Args documented : {}".format(len(args_docd))) + print("Args undocumented: {}".format(len(args_need_doc))) + print(args_need_doc) + print("Args unknown : {}".format(len(args_unknown))) + print(args_unknown) + + sys.exit(len(args_need_doc)) + if __name__ == "__main__": main() diff --git a/contrib/devtools/clang-format-diff.py b/contrib/devtools/clang-format-diff.py index 7ea49b65e1..5402870fba 100755 --- a/contrib/devtools/clang-format-diff.py +++ b/contrib/devtools/clang-format-diff.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # #===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===# # @@ -69,10 +69,9 @@ Example usage for git/svn users: import argparse import difflib +import io import re -import string import subprocess -import StringIO import sys @@ -133,9 +132,9 @@ def main(): ['-lines', str(start_line) + ':' + str(end_line)]) # Reformat files containing changes in place. - for filename, lines in lines_by_file.iteritems(): + for filename, lines in lines_by_file.items(): if args.i and args.verbose: - print 'Formatting', filename + print('Formatting {}'.format(filename)) command = [binary, filename] if args.i: command.append('-i') @@ -143,8 +142,11 @@ def main(): command.append('-sort-includes') command.extend(lines) command.extend(['-style=file', '-fallback-style=none']) - p = subprocess.Popen(command, stdout=subprocess.PIPE, - stderr=None, stdin=subprocess.PIPE) + p = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=None, + stdin=subprocess.PIPE, + universal_newlines=True) stdout, stderr = p.communicate() if p.returncode != 0: sys.exit(p.returncode) @@ -152,11 +154,11 @@ def main(): if not args.i: with open(filename) as f: code = f.readlines() - formatted_code = StringIO.StringIO(stdout).readlines() + formatted_code = io.StringIO(stdout).readlines() diff = difflib.unified_diff(code, formatted_code, filename, filename, '(before formatting)', '(after formatting)') - diff_string = string.join(diff, '') + diff_string = ''.join(diff) if len(diff_string) > 0: sys.stdout.write(diff_string) diff --git a/contrib/devtools/github-merge.py b/contrib/devtools/github-merge.py index 2941d2cb6d..9c666673cf 100755 --- a/contrib/devtools/github-merge.py +++ b/contrib/devtools/github-merge.py @@ -46,7 +46,7 @@ def git_config_get(option, default=None): ''' try: return subprocess.check_output([GIT,'config','--get',option]).rstrip().decode('utf-8') - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: return default def retrieve_pr_info(repo,pull): @@ -193,23 +193,23 @@ def main(): devnull = open(os.devnull,'w') try: subprocess.check_call([GIT,'checkout','-q',branch]) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: print("ERROR: Cannot check out branch %s." % (branch), file=stderr) sys.exit(3) try: subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*', '+refs/heads/'+branch+':refs/heads/'+base_branch]) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: print("ERROR: Cannot find pull request #%s or branch %s on %s." % (pull,branch,host_repo), file=stderr) sys.exit(3) try: subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: print("ERROR: Cannot find head of pull request #%s on %s." % (pull,host_repo), file=stderr) sys.exit(3) try: subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: print("ERROR: Cannot find merge of pull request #%s on %s." % (pull,host_repo), file=stderr) sys.exit(3) subprocess.check_call([GIT,'checkout','-q',base_branch]) @@ -230,7 +230,7 @@ def main(): message += '\n\nPull request description:\n\n ' + body.replace('\n', '\n ') + '\n' try: subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','-m',message.encode('utf-8'),head_branch]) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: print("ERROR: Cannot be merged cleanly.",file=stderr) subprocess.check_call([GIT,'merge','--abort']) sys.exit(4) @@ -249,12 +249,12 @@ def main(): try: first_sha512 = tree_sha512sum() message += '\n\nTree-SHA512: ' + first_sha512 - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: print("ERROR: Unable to compute tree hash") sys.exit(4) try: subprocess.check_call([GIT,'commit','--amend','-m',message.encode('utf-8')]) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: print("ERROR: Cannot update message.", file=stderr) sys.exit(4) @@ -299,7 +299,7 @@ def main(): try: subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit']) break - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: print("Error while signing, asking again.",file=stderr) elif reply == 'x': print("Not signing off on merge, exiting.",file=stderr) diff --git a/contrib/devtools/lint-python.sh b/contrib/devtools/lint-python.sh index e2c9d775a6..1469ce1640 100755 --- a/contrib/devtools/lint-python.sh +++ b/contrib/devtools/lint-python.sh @@ -52,6 +52,7 @@ # F822 undefined name name in __all__ # F823 local variable name … referenced before assignment # F831 duplicate argument name in function definition +# F841 local variable 'foo' is assigned to but never used # W292 no newline at end of file # W504 line break after binary operator # W601 .has_key() is deprecated, use "in" @@ -60,4 +61,4 @@ # W604 backticks are deprecated, use "repr()" # W605 invalid escape sequence "x" -flake8 --ignore=B,C,E,F,I,N,W --select=E112,E113,E115,E116,E125,E131,E133,E223,E224,E271,E272,E273,E274,E275,E304,E306,E502,E702,E703,E714,E721,E741,E742,E743,F401,F402,F404,F406,F407,F601,F602,F621,F622,F631,F701,F702,F703,F704,F705,F706,F707,F811,F812,F822,F823,F831,W292,W504,W601,W602,W603,W604,W605 . +flake8 --ignore=B,C,E,F,I,N,W --select=E112,E113,E115,E116,E125,E131,E133,E223,E224,E271,E272,E273,E274,E275,E304,E306,E502,E702,E703,E714,E721,E741,E742,E743,F401,F402,F404,F406,F407,F601,F602,F621,F622,F631,F701,F702,F703,F704,F705,F706,F707,F811,F812,F822,F823,F831,F841,W292,W504,W601,W602,W603,W604,W605 . diff --git a/contrib/devtools/lint-tests.sh b/contrib/devtools/lint-tests.sh new file mode 100755 index 0000000000..dd1a3ebdc4 --- /dev/null +++ b/contrib/devtools/lint-tests.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# +# Copyright (c) 2018 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# +# Check the test suite naming convention + +NAMING_INCONSISTENCIES=$(git grep -E '^BOOST_FIXTURE_TEST_SUITE\(' -- \ + "src/test/**.cpp" "src/wallet/test/**.cpp" | \ + grep -vE '/(.*?)\.cpp:BOOST_FIXTURE_TEST_SUITE\(\1, .*\)$') +if [[ ${NAMING_INCONSISTENCIES} != "" ]]; then + echo "The test suite in file src/test/foo_tests.cpp should be named" + echo "\"foo_tests\". Please make sure the following test suites follow" + echo "that convention:" + echo + echo "${NAMING_INCONSISTENCIES}" + exit 1 +fi diff --git a/contrib/devtools/optimize-pngs.py b/contrib/devtools/optimize-pngs.py index 5cb3bb6f75..565b199125 100755 --- a/contrib/devtools/optimize-pngs.py +++ b/contrib/devtools/optimize-pngs.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -10,7 +10,7 @@ import os import sys import subprocess import hashlib -from PIL import Image +from PIL import Image # pip3 install Pillow def file_hash(filename): '''Return hash of raw file contents''' @@ -27,7 +27,7 @@ def content_hash(filename): pngcrush = 'pngcrush' git = 'git' folders = ["src/qt/res/movies", "src/qt/res/icons", "share/pixmaps"] -basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel']).rstrip('\n') +basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel'], universal_newlines=True).rstrip('\n') totalSaveBytes = 0 noHashChange = True @@ -37,42 +37,40 @@ for folder in folders: for file in os.listdir(absFolder): extension = os.path.splitext(file)[1] if extension.lower() == '.png': - print("optimizing "+file+"..."), + print("optimizing {}...".format(file), end =' ') file_path = os.path.join(absFolder, file) fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)} fileMetaMap['contentHashPre'] = content_hash(file_path) - pngCrushOutput = "" try: - pngCrushOutput = subprocess.check_output( - [pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path], - stderr=subprocess.STDOUT).rstrip('\n') + subprocess.call([pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path], + stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) except: - print "pngcrush is not installed, aborting..." + print("pngcrush is not installed, aborting...") sys.exit(0) #verify - if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT): - print "PNG file "+file+" is corrupted after crushing, check out pngcursh version" + if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT, universal_newlines=True): + print("PNG file "+file+" is corrupted after crushing, check out pngcursh version") sys.exit(1) fileMetaMap['sha256New'] = file_hash(file_path) fileMetaMap['contentHashPost'] = content_hash(file_path) if fileMetaMap['contentHashPre'] != fileMetaMap['contentHashPost']: - print "Image contents of PNG file "+file+" before and after crushing don't match" + print("Image contents of PNG file {} before and after crushing don't match".format(file)) sys.exit(1) fileMetaMap['psize'] = os.path.getsize(file_path) outputArray.append(fileMetaMap) - print("done\n"), + print("done") -print "summary:\n+++++++++++++++++" +print("summary:\n+++++++++++++++++") for fileDict in outputArray: oldHash = fileDict['sha256Old'] newHash = fileDict['sha256New'] totalSaveBytes += fileDict['osize'] - fileDict['psize'] noHashChange = noHashChange and (oldHash == newHash) - print fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n" + print(fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n") -print "completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes" +print("completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes") diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py index 1613f704df..0f2099953f 100755 --- a/contrib/devtools/security-check.py +++ b/contrib/devtools/security-check.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2015-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -8,7 +8,6 @@ Exit status will be 0 if successful, and the program will be silent. Otherwise the exit status will be 1 and it will log which executables failed which checks. Needs `readelf` (for ELF) and `objdump` (for PE). ''' -from __future__ import division,print_function,unicode_literals import subprocess import sys import os @@ -21,38 +20,38 @@ def check_ELF_PIE(executable): ''' Check for position independent executable (PIE), allowing for address space randomization. ''' - p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') ok = False - for line in stdout.split(b'\n'): + for line in stdout.splitlines(): line = line.split() - if len(line)>=2 and line[0] == b'Type:' and line[1] == b'DYN': + if len(line)>=2 and line[0] == 'Type:' and line[1] == 'DYN': ok = True return ok def get_ELF_program_headers(executable): '''Return type and flags for ELF program headers''' - p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') in_headers = False count = 0 headers = [] - for line in stdout.split(b'\n'): - if line.startswith(b'Program Headers:'): + for line in stdout.splitlines(): + if line.startswith('Program Headers:'): in_headers = True - if line == b'': + if line == '': in_headers = False if in_headers: if count == 1: # header line - ofs_typ = line.find(b'Type') - ofs_offset = line.find(b'Offset') - ofs_flags = line.find(b'Flg') - ofs_align = line.find(b'Align') + ofs_typ = line.find('Type') + ofs_offset = line.find('Offset') + ofs_flags = line.find('Flg') + ofs_align = line.find('Align') if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1: raise ValueError('Cannot parse elfread -lW output') elif count > 1: @@ -69,9 +68,9 @@ def check_ELF_NX(executable): have_wx = False have_gnu_stack = False for (typ, flags) in get_ELF_program_headers(executable): - if typ == b'GNU_STACK': + if typ == 'GNU_STACK': have_gnu_stack = True - if b'W' in flags and b'E' in flags: # section is both writable and executable + if 'W' in flags and 'E' in flags: # section is both writable and executable have_wx = True return have_gnu_stack and not have_wx @@ -88,17 +87,17 @@ def check_ELF_RELRO(executable): # However, the dynamic linker need to write to this area so these are RW. # Glibc itself takes care of mprotecting this area R after relocations are finished. # See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347 - if typ == b'GNU_RELRO': + if typ == 'GNU_RELRO': have_gnu_relro = True have_bindnow = False - p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') - for line in stdout.split(b'\n'): + for line in stdout.splitlines(): tokens = line.split() - if len(tokens)>1 and tokens[1] == b'(BIND_NOW)' or (len(tokens)>2 and tokens[1] == b'(FLAGS)' and b'BIND_NOW' in tokens[2]): + if len(tokens)>1 and tokens[1] == '(BIND_NOW)' or (len(tokens)>2 and tokens[1] == '(FLAGS)' and 'BIND_NOW' in tokens[2]): have_bindnow = True return have_gnu_relro and have_bindnow @@ -106,13 +105,13 @@ def check_ELF_Canary(executable): ''' Check for use of stack canary ''' - p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') ok = False - for line in stdout.split(b'\n'): - if b'__stack_chk_fail' in line: + for line in stdout.splitlines(): + if '__stack_chk_fail' in line: ok = True return ok @@ -122,13 +121,13 @@ def get_PE_dll_characteristics(executable): Returns a tuple (arch,bits) where arch is 'i386:x86-64' or 'i386' and bits is the DllCharacteristics value. ''' - p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') arch = '' bits = 0 - for line in stdout.split('\n'): + for line in stdout.splitlines(): tokens = line.split() if len(tokens)>=2 and tokens[0] == 'architecture:': arch = tokens[1].rstrip(',') diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py index 98daa1bd76..3a67319eaa 100755 --- a/contrib/devtools/symbol-check.py +++ b/contrib/devtools/symbol-check.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2014 Wladimir J. van der Laan # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -11,7 +11,6 @@ Example usage: find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py ''' -from __future__ import division, print_function, unicode_literals import subprocess import re import sys @@ -47,28 +46,28 @@ MAX_VERSIONS = { # Ignore symbols that are exported as part of every executable IGNORE_EXPORTS = { -b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used' +'_edata', '_end', '_init', '__bss_start', '_fini', '_IO_stdin_used' } READELF_CMD = os.getenv('READELF', '/usr/bin/readelf') CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt') # Allowed NEEDED libraries ALLOWED_LIBRARIES = { # bitcoind and bitcoin-qt -b'libgcc_s.so.1', # GCC base support -b'libc.so.6', # C library -b'libpthread.so.0', # threading -b'libanl.so.1', # DNS resolve -b'libm.so.6', # math library -b'librt.so.1', # real-time (clock) -b'ld-linux-x86-64.so.2', # 64-bit dynamic linker -b'ld-linux.so.2', # 32-bit dynamic linker +'libgcc_s.so.1', # GCC base support +'libc.so.6', # C library +'libpthread.so.0', # threading +'libanl.so.1', # DNS resolve +'libm.so.6', # math library +'librt.so.1', # real-time (clock) +'ld-linux-x86-64.so.2', # 64-bit dynamic linker +'ld-linux.so.2', # 32-bit dynamic linker # bitcoin-qt only -b'libX11-xcb.so.1', # part of X11 -b'libX11.so.6', # part of X11 -b'libxcb.so.1', # part of X11 -b'libfontconfig.so.1', # font support -b'libfreetype.so.6', # font parsing -b'libdl.so.2' # programming interface to dynamic linker +'libX11-xcb.so.1', # part of X11 +'libX11.so.6', # part of X11 +'libxcb.so.1', # part of X11 +'libfontconfig.so.1', # font support +'libfreetype.so.6', # font parsing +'libdl.so.2' # programming interface to dynamic linker } class CPPFilt(object): @@ -78,10 +77,10 @@ class CPPFilt(object): Use a pipe to the 'c++filt' command. ''' def __init__(self): - self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE) + self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) def __call__(self, mangled): - self.proc.stdin.write(mangled + b'\n') + self.proc.stdin.write(mangled + '\n') self.proc.stdin.flush() return self.proc.stdout.readline().rstrip() @@ -95,43 +94,43 @@ def read_symbols(executable, imports=True): Parse an ELF executable and return a list of (symbol,version) tuples for dynamic, imported symbols. ''' - p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip())) syms = [] - for line in stdout.split(b'\n'): + for line in stdout.splitlines(): line = line.split() - if len(line)>7 and re.match(b'[0-9]+:$', line[0]): - (sym, _, version) = line[7].partition(b'@') - is_import = line[6] == b'UND' - if version.startswith(b'@'): + if len(line)>7 and re.match('[0-9]+:$', line[0]): + (sym, _, version) = line[7].partition('@') + is_import = line[6] == 'UND' + if version.startswith('@'): version = version[1:] if is_import == imports: syms.append((sym, version)) return syms def check_version(max_versions, version): - if b'_' in version: - (lib, _, ver) = version.rpartition(b'_') + if '_' in version: + (lib, _, ver) = version.rpartition('_') else: lib = version ver = '0' - ver = tuple([int(x) for x in ver.split(b'.')]) + ver = tuple([int(x) for x in ver.split('.')]) if not lib in max_versions: return False return ver <= max_versions[lib] def read_libraries(filename): - p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') libraries = [] - for line in stdout.split(b'\n'): + for line in stdout.splitlines(): tokens = line.split() - if len(tokens)>2 and tokens[1] == b'(NEEDED)': - match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:])) + if len(tokens)>2 and tokens[1] == '(NEEDED)': + match = re.match('^Shared library: \[(.*)\]$', ' '.join(tokens[2:])) if match: libraries.append(match.group(1)) else: @@ -145,18 +144,18 @@ if __name__ == '__main__': # Check imported symbols for sym,version in read_symbols(filename, True): if version and not check_version(MAX_VERSIONS, version): - print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8'))) + print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym), version)) retval = 1 # Check exported symbols for sym,version in read_symbols(filename, False): if sym in IGNORE_EXPORTS: continue - print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8'))) + print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym))) retval = 1 # Check dependency libraries for library_name in read_libraries(filename): if library_name not in ALLOWED_LIBRARIES: - print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8'))) + print('%s: NEEDED library %s is not allowed' % (filename, library_name)) retval = 1 sys.exit(retval) diff --git a/contrib/devtools/update-translations.py b/contrib/devtools/update-translations.py index e1924749d2..b36e6968bf 100755 --- a/contrib/devtools/update-translations.py +++ b/contrib/devtools/update-translations.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2014 Wladimir J. van der Laan # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -15,7 +15,6 @@ It will do the following automatically: TODO: - auto-add new translations to the build system according to the translation process ''' -from __future__ import division, print_function import subprocess import re import sys diff --git a/contrib/macdeploy/custom_dsstore.py b/contrib/macdeploy/custom_dsstore.py index e6ecabace1..b29fc71765 100755 --- a/contrib/macdeploy/custom_dsstore.py +++ b/contrib/macdeploy/custom_dsstore.py @@ -1,8 +1,7 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2013-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -from __future__ import division,print_function,unicode_literals import biplist from ds_store import DSStore from mac_alias import Alias diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus index 23a568ad13..17ce6c44f9 100755 --- a/contrib/macdeploy/macdeployqtplus +++ b/contrib/macdeploy/macdeployqtplus @@ -1,5 +1,4 @@ -#!/usr/bin/env python -from __future__ import division, print_function, unicode_literals +#!/usr/bin/env python3 # # Copyright (C) 2011 Patrick "p2k" Schneider <me@p2k-network.org> # @@ -203,7 +202,7 @@ def getFrameworks(binaryPath, verbose): if verbose >= 3: print("Inspecting with otool: " + binaryPath) otoolbin=os.getenv("OTOOL", "otool") - otool = subprocess.Popen([otoolbin, "-L", binaryPath], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + otool = subprocess.Popen([otoolbin, "-L", binaryPath], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) o_stdout, o_stderr = otool.communicate() if otool.returncode != 0: if verbose >= 1: @@ -211,7 +210,7 @@ def getFrameworks(binaryPath, verbose): sys.stderr.flush() raise RuntimeError("otool failed with return code %d" % otool.returncode) - otoolLines = o_stdout.decode().split("\n") + otoolLines = o_stdout.split("\n") otoolLines.pop(0) # First line is the inspected binary if ".framework" in binaryPath or binaryPath.endswith(".dylib"): otoolLines.pop(0) # Frameworks and dylibs list themselves as a dependency. @@ -714,22 +713,6 @@ elif config.sign: if config.dmg is not None: - #Patch in check_output for Python 2.6 - if "check_output" not in dir( subprocess ): - def f(*popenargs, **kwargs): - if 'stdout' in kwargs: - raise ValueError('stdout argument not allowed, it will be overridden.') - process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) - output, unused_err = process.communicate() - retcode = process.poll() - if retcode: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - raise CalledProcessError(retcode, cmd) - return output - subprocess.check_output = f - def runHDIUtil(verb, image_basename, **kwargs): hdiutil_args = ["hdiutil", verb, image_basename + ".dmg"] if "capture_stdout" in kwargs: @@ -747,7 +730,7 @@ if config.dmg is not None: if not value is True: hdiutil_args.append(str(value)) - return run(hdiutil_args) + return run(hdiutil_args, universal_newlines=True) if verbose >= 2: if fancy is None: @@ -789,7 +772,7 @@ if config.dmg is not None: except subprocess.CalledProcessError as e: sys.exit(e.returncode) - m = re.search("/Volumes/(.+$)", output.decode()) + m = re.search("/Volumes/(.+$)", output) disk_root = m.group(0) disk_name = m.group(1) diff --git a/contrib/testgen/base58.py b/contrib/testgen/base58.py index 816d40b49c..0dbb79a707 100644 --- a/contrib/testgen/base58.py +++ b/contrib/testgen/base58.py @@ -28,7 +28,9 @@ def b58encode(v): """ long_value = 0 for (i, c) in enumerate(v[::-1]): - long_value += (256**i) * ord(c) + if isinstance(c, str): + c = ord(c) + long_value += (256**i) * c result = '' while long_value >= __b58base: @@ -41,7 +43,7 @@ def b58encode(v): # leading 0-bytes in the input become leading-1s nPad = 0 for c in v: - if c == '\0': nPad += 1 + if c == 0: nPad += 1 else: break return (__b58chars[0]*nPad) + result @@ -50,8 +52,10 @@ def b58decode(v, length = None): """ decode v into a string of len bytes """ long_value = 0 - for (i, c) in enumerate(v[::-1]): - long_value += __b58chars.find(c) * (__b58base**i) + for i, c in enumerate(v[::-1]): + pos = __b58chars.find(c) + assert pos != -1 + long_value += pos * (__b58base**i) result = bytes() while long_value >= 256: @@ -62,10 +66,12 @@ def b58decode(v, length = None): nPad = 0 for c in v: - if c == __b58chars[0]: nPad += 1 - else: break + if c == __b58chars[0]: + nPad += 1 + continue + break - result = chr(0)*nPad + result + result = bytes(nPad) + result if length is not None and len(result) != length: return None diff --git a/contrib/testgen/gen_base58_test_vectors.py b/contrib/testgen/gen_base58_test_vectors.py index 8e6a5d5819..4351605786 100755 --- a/contrib/testgen/gen_base58_test_vectors.py +++ b/contrib/testgen/gen_base58_test_vectors.py @@ -1,11 +1,11 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2012-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Generate valid and invalid base58 address and private key test vectors. -Usage: +Usage: gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json ''' @@ -46,8 +46,8 @@ def is_valid(v): if result is None: return False for template in templates: - prefix = str(bytearray(template[0])) - suffix = str(bytearray(template[2])) + prefix = bytearray(template[0]) + suffix = bytearray(template[2]) if result.startswith(prefix) and result.endswith(suffix): if (len(result) - len(prefix) - len(suffix)) == template[1]: return True @@ -57,20 +57,23 @@ def gen_valid_vectors(): '''Generate valid test vectors''' while True: for template in templates: - prefix = str(bytearray(template[0])) - payload = os.urandom(template[1]) - suffix = str(bytearray(template[2])) + prefix = bytearray(template[0]) + payload = bytearray(os.urandom(template[1])) + suffix = bytearray(template[2]) rv = b58encode_chk(prefix + payload + suffix) assert is_valid(rv) - metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None]) - yield (rv, b2a_hex(payload), metadata) + metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None} + hexrepr = b2a_hex(payload) + if isinstance(hexrepr, bytes): + hexrepr = hexrepr.decode('utf8') + yield (rv, hexrepr, metadata) def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix): '''Generate possibly invalid vector''' if corrupt_prefix: prefix = os.urandom(1) else: - prefix = str(bytearray(template[0])) + prefix = bytearray(template[0]) if randomize_payload_size: payload = os.urandom(max(int(random.expovariate(0.5)), 50)) @@ -80,7 +83,7 @@ def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt if corrupt_suffix: suffix = os.urandom(len(template[2])) else: - suffix = str(bytearray(template[2])) + suffix = bytearray(template[2]) return b58encode_chk(prefix + payload + suffix) diff --git a/contrib/verify-commits/README.md b/contrib/verify-commits/README.md index e9e3f65da2..fa492fdd27 100644 --- a/contrib/verify-commits/README.md +++ b/contrib/verify-commits/README.md @@ -24,3 +24,24 @@ keys: Note that the above isn't a good UI/UX yet, and needs significant improvements to make it more convenient and reduce the chance of errors; pull-reqs improving this process would be much appreciated. + +Configuration files +------------------- + +* `trusted-git-root`: This file should contain a single git commit hash which is the first unsigned git commit (hence it is the "root of trust"). +* `trusted-sha512-root-commit`: This file should contain a single git commit hash which is the first commit without a SHA512 root commitment. +* `trusted-keys`: This file should contain a \n-delimited list of all PGP fingerprints of authorized commit signers (primary, not subkeys). +* `allow-revsig-commits`: This file should contain a \n-delimited list of git commit hashes. See next section for more info. + +Key expiry/revocation +--------------------- + +When a key (or subkey) which has signed old commits expires or is revoked, +verify-commits will start failing to verify all commits which were signed by +said key. In order to avoid bumping the root-of-trust `trusted-git-root` +file, individual commits which were signed by such a key can be added to the +`allow-revsig-commits` file. That way, the PGP signatures are still verified +but no new commits can be signed by any expired/revoked key. To easily build a +list of commits which need to be added, verify-commits.sh can be edited to test +each commit with BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG set to both 1 and 0, and +those which need it set to 1 printed. diff --git a/contrib/verify-commits/allow-revsig-commits b/contrib/verify-commits/allow-revsig-commits index f0088cdca4..3abf82e529 100644 --- a/contrib/verify-commits/allow-revsig-commits +++ b/contrib/verify-commits/allow-revsig-commits @@ -102,3 +102,403 @@ bafd075c5e6a1088ef0f1aa0b0b224e026a3d3e0 c8d2473e6cb042e7275a10c49d3f6a4a91bf0166 386f4385ab04b0b2c3d47bddc0dc0f2de7354964 9f33dba05c01ecc5c56eb1284ab7d64d42f55171 +7466a26cab5d66665991433947964a638f5b957e +b43aba89e356ff95b706e80d4802f60fc46a569a +02b7e8319aef2a870264ad4fa2e3bb18664dcc36 +f686002a8eba820a40ac2f34a6e8f57b2b5cc54c +2b1c50b9352ab1dc40b0f877db23c1fa4048fae3 +2405ce1df043f778b8efb9205009500cbc17313a +4ad3b3c72c73d61e0a0cab541dca20acf651320d +4ba3d4f4393d81148422d24d222fe7ed00130194 +8ee5c7b747171e335793c74cd9d2f7491da58164 +872c921c0a208b04bd0713758e52fcab5b7c1684 +00d1680498c5550e7db1f359202d3433a092fafd +585db41e9ab7a6fb262c8bad7f427cdbdc497188 +18462960c0f13bd07d8f52b61e7d7bc17e991eea +0630974647dacaf25e7fcb7f9cbb785bb078ede6 +0f58d7f3d62f012f2584f5e781fc73de4763dd9e +3d16f581538b0974853e820508e8b3093269d2fd +66e91420ab233cf1dac64504e0dc129019bf8c0d +d8d9162f5bad39b2720dd2b2da237c6159e4755f +29fad97c320c892ab6a480c81e2078ec22ab354b +791c3ea61b4e49fd46a1a71b84ca99ddf69d2ff7 +a312e201ba56742499a5480b5f2115f01505c217 +ce56fdd2e8cdf94fd0ab76d71adbfa755e23ce7d +480f42630cbd598c04fa59ee0e406f56904ecffb +6012f1caf744ac9b53383d7d10a8f1b70ca2c0e1 +ded6a2afa549f693dcabb430ce0862f8631360c8 +07090c5339436f856e79a8036d1c85deeb453803 +0e265916d1c6a63e4a3821dab9db597b5ec64b46 +e4ffcacc2187d3419c8ea12b82fb06d82d8751d2 +e117cfe45eee9169409e74a44ef4a866be25bc35 +dcfe218626b05204e9fbc95ba5d95ca0eb72ec9b +23481fa50301201ef5a60675ef899aa6ce94ca03 +27c59dc502f29cf1d76290556c21e366145e3b2e +4a62ddd01873d18dbca96c81d756be1020249b45 +a233fb4f1d037e68ff70eef3a9f5b7bf1d631918 +b2089c51cc4af2f7e1c0ec75be9449ee222b1d69 +c997f8808256521397f1c003bb1e9896fee6eaa0 +5dc00f68c49c46a380a98d06233f90528b8e2557 +fe53d5f3636aed064823bc220d828c7ff08d1d52 +935eb8de039dec65669a96a1c3b86f4b03a1b86c +0277173b1defb63216d40a8d8805ae6d5d563c26 +2a30e67d20f76bbcd9a7d445f616f005316e0a1a +d32528e733f2711b34dbc41fbb2bb0f153bf7e9a +4cad91663df381d0dff8526f3b4aa74569dfb626 +1b06ed136f17b526360617a70026aed5ded5746c +895fbd768f0c89cea3f78acac58b233d4e3a145e +f0295becbf3ef1fb78095306408789253fe0c114 +8d573198638e52e2dbd9abc609861430f9d2bcc3 +9d9c4185fadaf243bb97c226e2fef16b65299699 +eebe4580bc8d6484d79ecb24dd87412221cf2ea7 +9cf6393a4f82b9c81d3b4b468a17a89db10531a2 +598a9c4e4dcd03c6d80fba005de729a6a3aeba7e +6970b30c6f1d2be7947295fe18f2390649b17a4b +f359afcc410432ed5d30001acda0c66741ee8935 +126000ba9e7ff16271be2f4eef3df99ade8d624f +b5e4b9b5100ec15217d43edb5f4149439f4b20a5 +b987ca4ee495a7fff82f0ac14ef0753bfb7586e2 +b03013396cb2f4bf25746388b3982a2c3616e16b +9a97f39afaa890caa7987c6bc001b9a66e3e74e8 +cad504bf4c302f7a72e0a0e191f3fdbafda7340f +45cf8a03cb57b8639a8d47323bde46ba22d9eeaf +b7450cdbd89a1c862f4d4d8bf093f8a0b5448f9c +0910cbe4ef31eb95fd76c7c2f820419fe64a3150 +92a810d04b906722c9efe60e3997243c71ff3d4c +45173fa6fca9537abb0a0554f731d14b9f89c456 +fd4ca17360e6fc0c9bb76bf6b5b07c9102c12728 +ddff3447f29b62d79a33f728791f42fa9436216e +36a5a4404836da323c755523fbd27563a8e84f94 +c991b304dee368f506cfee27ddaa333f1f82c518 +d38d1a3e75aa97ffa8755ddd431754a6d0942964 +a332a7d5a15214015f9553fdb2bcf80a1a4b8dc0 +604e08c83cf58ca7e7cda2ab284c1ace7bb12977 +18a1bbad98bd4321f15e7921d9aec91661499d90 +8049241e226c16bd07b029c0cb4b62ac40f0c923 +797441ee995aac59f55d59a93ecb55e8ecbe7dbc +62fdf9b07087b80d2142799bdd2324f61483359d +f60b4ad57912b78a96af08046a503f7905610a8c +13e31dd6548d64a5992f439e74bb424bf88aca04 +fbce66a982679b5409a295be5c99a2eef429cabf +9f2c2dba21855b8cb9b193b1819be73fa4a23a99 +a89221873a3ee2451c73b41bbe2d99d36f439d31 +3d6ad407770e13958e157bf026cae0bfb9254899 +901ba3e3819405306414628306746552b0aa1d28 +7a43fbb959c38e025e558e472ad57de357539894 +0d89fa0877930c6c8a539a656c1009ad8ab6755b +54aedc013744c86b11157423fa3cffc9a51eef02 +f0c1f8abb0182da557d07372b938f3a0a4bb906f +4ed818060ecf4a38a02c8cb48f6cbc78d2ee7708 +3bdf242fc68a8d767932c6214455d4d413effbc9 +5e468994fbb349e8eefc996954a31a67a34aaa15 +41aa9c4a801a01eca1fad22a7095372d23dace60 +2adbddb03840ad71e843c6c4a207a13e871cd1d4 +13e352dc53dec0127c5f94a60055d0ca829420dc +95e14dc81dd30ee0d396ad08dca9a6980d16eee1 +61fb80660f73e5aa5b69302ecc7ac33da206ba5a +05a761932edd05cf94ffe938908baf058f38632a +ee92243e66f2df03b3a759a8ffb75dc06f0cea0d +22cdf93c062eeaa0f8f9d6220f01b67240073dfb +76b33491596736ca804e3a29bd8398d7a1516ab7 +6e4e98ee8ce2da3cca2e2fd210e9e8dbc9b1c936 +c838283ecdfb9490425bb071b7c22e542de46c7c +5e3f5e4f25b65b583d3bfefac9e1148035781089 +f7388e93d3dd91a90239aedac4ec58404f103a2e +0a2f46b0158b6fc7244a585913b0925c0acf707f +dd561667cb7ccbbfed3134b05a565971ef6f5873 +6f01dcf63873a5e42798635ab4026c9a5f9fa213 +70fec9e36bcd1a3d93df019be084aaf89cecd7d7 +f9b74ef3fc74fd7d2aa94560820341f03cda8e12 +998c3046fab2b52bc9f141cfb588a18c05506a86 +89cc4f905e30b913ca20e4192d538cc5cbe2c38d +87d90efd69b64f769116956a5db89e536e9e3714 +5aeaa9ccd1568a77e075dbe2bd2435bd60c87c91 +bfb270acfa30713dc8c968bb9ee40cf5a2360359 +1b8c88451b0554502435d3883c528ad0aad1b09b +57ee73990f1ce29916adfd99f93eae1ccea1a43b +808c84f89d0edcef9ddaab0b849a382719f6ec9e +14b860bf64020451ced823b859da8cb912278ab9 +c63364610f4a041df1c1bd81d01b1f6856160749 +92eadc395071876d77f3babddc056b4325bdbabc +e93fff1463ae906fc986bf98c3b118c82f171546 +9ccafb1d7bdd172a9b963444072a844da379c4f7 +b4a509a3f817121c3df98ddfd96b2769e18a3e5a +dbc4ae03963014ab4b7957d62ba59dbd8f938c33 +8ddf60db7ad636b6a31b590251c671ded635fa1d +f199b8a33d9443a258a1f49a1a29674cd9ee9a20 +e542728cde676f218c552d841d0af29b92f9800b +763231051596b8e3455b839911ad6a3a1f1c3c74 +ff4cd6075b12fb32b9a906deea3ed033e3f9560a +9c3c9cdae3e20b5bdea91a0631edac5116bbc89f +93d20a734d2ee873832bed8ca5c05cf8e539c53c +ef8340d25f7c5dd5682bdecea97ce84cfce1493c +69c7ecef405d168f658a9cc7996da84c17f61e66 +4ce2f3d0d33346e9f0e96851689ee6550b2a72e3 +44e1fd926cfb0df0fbd8c41de8cd65ed8d5d6e18 +d6d2c8503c4039b682196d83a67dc28359c10c5c +ae233c4ec3d14a97c6195059f52873cdba2b4755 +0f399a9ff227896265cafab9b2e9fab6cdb9b5b9 +f4ed44ab4a8f9a87ba678d5fd1449fbf636103dc +7fcd61b2613c211bb042a82a889655178be6a212 +42973f834445d7735738bdba8847812ba3c34d95 +8df48b36ed3201d938b9974ecbee455d7dc2fb84 +96ac26e56627f0c24213fcd3a1cce9fc95f1f661 +cce94c518a46b7b0006f984bbe4d69e8749182d2 +801dd40666d1e6009920ad3ff755c7bb993b2a62 +ce829855cfca103dde55661fa1524e66b139d063 +b148803b181e30213e8a7f3bd89c8239e9dcb866 +c377feaad87f8109f85da6caf62602b30c20effc +b37cab65c63e051ebc5b491da9bd687581df94df +16e41844e7d6c5876d2caaeef6010656950c6ec5 +ee50c9e48786dea0d9df2e45805c25565c100fe3 +11dacc6154c42bc6fe3ba94c1823f8a46e4fe81a +791a0e6ddade27d1b69f4861a6640de60b9553cf +638e6c59da4fad987c437592174b188510193b2e +52f8877525d5238f3440e73710507be889d14127 +2a56baf395bf11835d784c4f8634f4525deed6a1 +bc561b4b7d6a3f71649d37d5eb9047c29efa2b13 +31809d6f8514c4a8d5677e947e3f1ebb0db210b9 +a31e9ad4f027955d43c04a05517244647e250161 +777519bd96f68c18150a0f5942f8f97a91937f5e +4eb1f39d421024d9666cec61deaf96715ffae4c6 +50fae68d416b4b8ec4ca192923dfd5ae9ea42773 +ce665863b137ac4a7470cf006a92aa7694faca71 +81f8c0378b2ab5ea0d7b65635cb529bd3c69127c +108222b9c323a05cc9339368f10ddd0859f62b43 +28f788e47e58f2b462351d6989348a4e1a241b2b +d81dccf191a48a6b59c3747d7b4ccbe3535dde40 +a90e6d2bffc422ddcdb771c53aac0bceb970a2c4 +91e49c51f1aecc9e1d75457f4920d52a4b0a133c +60dd9cc470584960431de425e2a9ffbed0e8034a +ede386c2193fc31351e193b3a8cf30030d6be62c +a084767b40c0d3ba8fa8f8d60f1e8d99a9dc3457 +3f726c99f819f97f2ab21b94d34c6b3129cd883a +77fc469fc78cdd87c29f398d46ac58dbb9ef62c0 +4ae6d0fbef60ccbecf8f23bb482e201b3678f7a3 +8858b6ddd3bce9daa08da6e05de3ca863a399c15 +22e301a3d56dc9e6878380ee92c7d19ca43119d2 +c484ec6c9b85ca4e331e395c564ae232fd0681dd +a46a671e253528e450bd57645c400bf761da07ab +655970d9c60ae6850daf452457e14e21047c0e1b +b6a48914c50631914192aa11b19205436a9c664d +7db65c363a0cc6ca7cdb04de9a973ab70013baad +6366941275344dac7e2130b0c972e90117d37ed0 +4fb2586661471a1572c2df2a5a091011d45eb7c4 +d7be7b39fa1021ec4518186afe145ee948e12a94 +85aec87b11ec41295558175c63f1f5a849460fdf +aeb31756276034dd506fdf97c8aaade0e7e584f5 +ac016e17d20253129a0287cee7e1d06b7ef15966 +bf74d377fb8e20140da6eac1407414928384bcea +2c811e08db651a4aed6ea0f7c1972d60de6de8ab +e5d26e47c7a482c072a7fe47bb84c56854734184 +96a63a3e0cefe920819bd42add0041837b1214a1 +e526ca6284b9e13be1b912b80dd73a34e739b539 +ecd21357f16106e541e9c2854ead2a906659b938 +4b5a7ce0c301ad971f383eb60f61bf9b4026efda +929fd7276c0f0c30b9416f61a6f5f35d763d81e4 +fa8a0639f7b0ce04030b72b4d5be4f0aa36fc5cb +f1f1605c22a6283bbfd757055fcf2b584a857709 +0c173a15ca1bf20999f74987988985508c9de463 +df0793f324e33066cc746c0cb1d053d35733d626 +2b0179d8a9b75397937126b36114df0dddeab40c +bf0a08be281dc42241e7f264c2a20515eb4781bb +3895e25a77363ae8b49358fb793f50fa8b271e2d +1fc783fc08bc078239537535f174ab8a489772c0 +1d4805ce04645f3203b0cfd3d66ea710e7433eb4 +d3b58704d1d325875fc605580c1c02b825c1bbcc +ed88e3194c4bc43aeafef929da7b419d03dea1ad +dd07f47b79628668e29cc0143b21e790100ee445 +65cc7aacfbfc7b747926375280a1d839e88d576b +080ec5209172ac9605f1434559dbb3c1e012b10a +416af3edf5b5ab265acf95568f2bc9eabd3d96de +e0a7801223fd573863939e76cb633f1dcc2d22c4 +4bc853b50fd9127687eb9e4f3b679dd261a4fa96 +c68a9a69278aa194fed96bd9733d32af3690a11e +c38f540298f0e188df5ed68fd56c623b9ac8331b +643fa0b22d70e459d7f7ec3d728ae4811dc5158f +e053e05c130549f43953f1d70e724dc9ce3e1b85 +75e898c094eea533d1dfaf141c6afccc3072c49f +2805d606bc46bf5589093a1b92d3542c13ce50c2 +32751807c9c06011eb689cba56b401a6302699c0 +30853e16d332816752dafcfca92147c7ffef5b54 +bea5b00cfe95cd37832305c0f93c339a22a7d79d +c871f323b418fac27bf834843ca26985010df53f +329fc1dce7a1c372c8b10c2f2f8732b2c60daff0 +1aefc94dd78d6e0c9209cb09fc16f53dedf42108 +8e5725666b519b61fcdc3141da5c6a57c1959909 +a4ca0b042365061020627a8c045cddacea3312ec +8bd16ee12fc8ef6723e0572c29b979c15b92b4f4 +87abe20fc118721cc5efdbd94a8462468cd1da2b +4b766fcdd4ca16399075d1e081a321b3b05ce516 +f6241b3e420e19f3f0507cbbc872fe9218916a02 +7ee523604851af62c0a47c07ee023a8710ef32f7 +776ba233e939fe41a74c6b2632b93a0679a32c71 +6a796b2b53fe542e0f340f250f4f20d69efed8d0 +23d78c4dd01bc74ba35db3e3df95280f6f1b2e22 +f4b15e2de97c4f8cdbb40bef4c9d0ab2807974d9 +fff72de5bf8ac7b70208e655f237b80e70e18851 +170bc2c381f86a523de2fc8b71d62ade66303c0d +314ebdfcb38d4b4c977579f787d5e1a20d068c94 +e9274839bf316b1972d80d28e45759f898edbf86 +75171f099e82e3527d7c3469b15891bd92227ec2 +3c5e6c94caf40395e031fbde44a0cca46fdd76ec +dc8fc0c73bebbc1c48ac5540026030c9cc00ec23 +492d22f92919d8d9d59568318c26c1e2ac4890cc +80c3a734298e824f9321c4efdd446086a3baad89 +47535d7c3ec79c5978cdcc03a5351ddbbb22538d +1b25b6df0f08f7474228c5b6ed13b58682e1e440 +c530c15180631cea95e9c292cf7fabde9dca9db3 +2723bcdce3248417e98e6c43207bef74d34076c1 +ed22eb4a62bd8d5369aaec87d4cbdc03c9f16368 +9111df9673beb6d6616d491a5478f09b5f14d040 +d86bb075bf6d1e78c1e4f3dd38b0ea828ef5ecfe +50a1cc0f0aef1514b917a5a3f4476967170b429d +6ce733747e160ca699711f2c47e686284ca9aa07 +b44adf92342ad4f9c343ba29c081a91687932936 +88799ea1b1c08f4bc1a487c9e3c2effd5e1650ae +080d7c700fc3291560d79fc590e05b8e2bad984f +12af74b289f8cdc6caf850dc6c802f9936b1e8b3 +8e4f7e72410df3ba430082c7cf385f26fd75b033 +8ac80412867118172dc4172494304e19969e9489 +f2734c2828f69d9cfd535e5eab0592a7674b2b61 +0b9fb682890b8fe10cec54072b809a5efe57d33d +5b029aaedb5fcf7cadd249607dd28eb3f233ab8c +79af9fbd8c3c0e54702a9c92b171f134bd4466c8 +c412fd805ddf3282dc2e1f28e30f51ffcb1f1da2 +111849345bb5140f86b48e730ceab4bff45fa2e9 +a0b1e57b20a17177ed5a9a54e4a8aab597a546b4 +ca209230c8e73745cf8cfc79f500c9c46e103306 +a230b0588788dbe1ac84622aea169c577b381241 +dfef6b6af08097f0676a2323085558fbbd3c48c6 +3192e5278abca7c1f3b4a2a7f77a0ce941c73985 +7c7ddd9ead99a8b5033a1a5d4698032c9e2b3a92 +10b930dde8f14e9cb661810e97a33bbf144fc55c +9225de2cf652fe2bf6e50636824cdb641546f57d +598ef9c44b3ea2cc142c175f077b493f39f5ba22 +c49355c7170a64bdd7864cc3ba9a64916b67fe7c +857d1e171e051b254a617f27b39f6a551054cee2 +21833f9456f6ad5bc06321ad6d9590f42ce0195c +8910b4717e5bb946ee6988f7fe9fd461f53a5935 +5703dff0939f05c7457cebd6fc61d88ab13afe41 +8bfa13b15b84cb372950fb7b25a1080173060b6a +ac23a7c1f19b3d8c326ffe75c8e13edf285f90fe +19be26afe3d04783a92d032b55bf3fb1e2ae63cc +f7ec7cfd38b543ba81ac7bed5b77f9a19739460b +36afd4db4442c45d4078b1a7ad16a1872b5bee0d +88c2ae3ed2bb5d367dd408c9255cd8f1e7a36c7d +a13a417cdcfdfd1f1b3bf997bb6ffe6e69b096b9 +d6064a89ac97dc0d2ce9da3982e1a4e25afaeda8 +7146d96de3e15a80cafbab2af48ff6f65d8e41bb +5628c70f2a44567695e5331fe2293c5b7f35b629 +7ff4a538a8682cdf02a4bcd6f15499c841001b73 +aa5fa642b0e7ce2ea55e2298886f212f11a8894e +8efd1c820b9a782d8608d54d924658536178295c +50a226563cd8d7c0a5e8448e87fede0eb72a8354 +b860915f8b0dae98e57a254d11575ea41f5c5a79 +d304fef3746039183f51b3ac8f4774dcf3a64f59 +53ab12d9318d5d195ccc77028b0e3ae66dc6e1fd +668de70be039a4f1ffcf20aeae2a22ee71fc55a8 +0fea960ca917b73aff853fe88476174c8a313863 +f89502306dcf6393a2c7b0efbb0fa728fc582137 +ff58b1c3bdff5e5f687f10f9e40ce495ca49674e +0b96abc35f1a9d46a27eeddd7df418d107c29c57 +b0b57a17306a7e963a4fe463f84e2b150a00a859 +4105cb6fd964ad13099ca83b1fdf3d35f3961f74 +23281a4dc3afc42a001346caec4dbb8193f0bb53 +8daf103fa138f9a184448ebf1c2e03b9dbd96f21 +02e5308c1b9f3771bbe49bc5036215fa2bd66aa9 +a65ced1a66575c652baf5084644b8647f531be8c +2456a835f0bc7796d9ff71f64837fa6790e2b7cc +9ec1330b455c1ab2eb6b89f8a2ab885677d4ae8a +0b738075bd43fbd4410e30a51e0498cbfd2b7513 +98c80e374b84e5a9c2d5c36889a0b1ebed5b814b +25720fc394e27a951bcad26095fb5a711bfacb8f +4cfd57d2e38207d78722ce8c9274ba8dd700d1cc +0fc1c31a878e93d938c67db3f958e82e3c39659f +df1ab5b4d67b46b5e9e840b1fbe0ff02520831f9 +5bc3b6cede8dabdf3f4f27ddb03723cbb7cde51a +c2ea1e6561caba3abffce361abc800822b9e0efe +caa2f106d704ec3ade63498031dd58d34510bc76 +dce853ef76ef90c46d84294225088d595467d08c +dbc8a8c86ae50059fddb2d6834fa5f0c9bbf9b71 +0f921e6a0492c4e9f037a9ed91f474885032d68c +041331e1da23e4136fd046ed870cdcc177464176 +e6ba5068f107ac234576e77cedbd748b665369c2 +76fcd9d5034143a5b041766552670d19f926097d +72bf1b3d0962304850a3ef5fe375db4bff1d0a39 +919db037f1f5cc73cdcaef92dd9cb0e7f5c8dec3 +c36229b0b2e9d4554053f5c9fc451ac29a493b1f +9e4bb312e6958d2baa309ba670e5eed1523c6f47 +d7ba4a233bd5a6f8fadee681c68a995e23fe36d7 +98514988a3d3e8b7dbf0463884a5c38f5ed5562d +5412c08c3cf13577566064edd04da021c37b7cbe +31bcc667863f368157efa1143a78623a5db8f0d1 +7bd1aa566fb4a4fe194f209085649f2c722b0cff +c4522e71c7e1d8ecfd70112e9375b9d00d6733a8 +e22f409f18881b63a8e747036584a71217f40e6e +97ec6e5c9098a1240655cfcab05b6cd5eedb6cd1 +bc121b0eb19713ec72002b5be03ba5ac35903a17 +c98f6b3d93a2cc1b49a6db425ea2b661089d0f9e +0de7fd36de57a68e543b4c1f184fba192c398c73 +e662d281b837c25b2b70525aa8fe8af894339823 +44adf683ad232db8ce0cb89b3e236a1f5944cfb0 +cb2ed300a89ebf9f0654da869ced665ed8b2abe7 +0a6d48d9ed60b0b02177059ab116f8f46d2cbed3 +b42291334651fff46dbfe5947a726f65cb9d7dfe +e5364991daecb73aca3bb5ac37f2619d7a89211b +4a2b170c075ce703cbdc82519a48016a9ee3f99c +924de0bd75a7f75df65d7d15f9d1587a2e794abf +1253f8692fc3a11be9430685cd405236a68df6c3 +2b799ae9e1e0a540f9a5971ddf27d83254668279 +c9bdf9a75f9fde8cd011e4aa94be4ed4347078a3 +3d69ecb4edeb80003a1a41442e320898a30dbd9c +f08222e882b18c1f279308636e03beceece2dbf1 +23e03f8d26d7bd03273a5dcbdcfe3905dfb49ffb +03dd707dc027fbf6f24120213f8eb66571600374 +d0754799698de2c032abcb8198ee5d5401063213 +072116fceb2294b97d1c40f79305f2e3ff71812b +e66cc1d58e16bf1650dd6479fed64ecaca8c6098 +f137753a2dcd8229f89d1d1ac28039364e5850b4 +61d191fbf953700ba8aeadc9c8cf4c195efbd10c +76f3c02fb01a6df98fbd8c16ac21d159d4649d37 +6013c73b3312e11b447ed387426749014716f820 +6faffb8a83db3f209a303a4464dbdd597faad5a4 +cc9e8aca5f950c78dcfeff63c441ba993c1fe12f +8ca69a2a88a77eb06149fa049ab1a7e6de38b321 +2f71490d21796594ca6f55e375558944de9db5a0 +08cc5fd666456cb476467473ed1880c90c92dedb +e31a43c725ebe641d7c219c3886eee18eebf0bb8 +52b5a8785de760a204b2b0aab19dfaf79c2c3ff0 +483e8e4f4875a1a621ec9e9df2880d3037d95ed7 +1e5799c52535a3fc20e885916f1e7ed33ecc7f46 +a82e5d8220bbc8b5d786bed99b0876f530b9b7cc +7fe6c5c993706e8395cdaf7977bee793c06f48f3 +2a0836f6d5e7c1d7e97bedb0e0ea33dcaf981f77 +ddc308068d69c6c9aa629ee3c4ce75e1d1cf08b5 +ec139a5621a9c9f03e1988391a3c7c6c5d849776 +c01a6c48b982d625fd9f4f69005878781d3d56fa +95a983d56dbda457e3bf8766d59bac74c7aa5699 +760741a00833876976389ed7a6b73f36ee5b4c13 +6e5e5abba6f8bbbe61c22795df440dfafcfdc378 +cf2cecb18779ce83de9adebf382dff1c19b12840 +af9b7a9f2f73b1a2f9728106774dd13e8d1cdd8d +115735d547fdeade822f547eb3e8c8f9961a9b07 +c2c69edf37b5c02aafa01d0407dadbf5ef8751b5 +a072d1a83787e786d074a4b5871b0b961781f7c6 +ed2cd59e258f756b2eaed7909a60956ade6ef7ee +ae5575ba41c8a782805afb1c08730343cfc22397 +6ff2c8d29f6b5a5c2ce63f0a16f3bb0dbd049451 +a80de15113166354cdf208e3d8b6e25f4511a591 +06bd4f637f15e769f088d9051a5af94bbb0217a3 +6700cc993cc07fb0f5b8b577ff8c4afcf0b18274 +37f9a1f627c0995d89b62923e75cd092600894f9 +8844ef15ded02d5ed86fb95aaf251235fcef2396 +1b87e5b5b184a0a6c683eda23b36393822b57f03 +e2bf830bb6c1bfa038c943dd6f5d92a406bd723f +423ca302a3ee87000530da3c105f269b8fabece7 +4e14afe42fdd468d5de11df8cc13defdcb8e83f8 +3e90fe6534206412ea22beaa445cf20d28fbe718 +88b77c7da0a672c89e24df37ea6e9085b4e2a05c +0ad104190465d8d65c2344bbe10dcf3df025d86c +5c7df7022bcd360e6af00b9458b1a3fd54e1cc9a +59ad56851a342d2c62f6b38bf15002b23ab439e1 diff --git a/contrib/verify-commits/trusted-git-root b/contrib/verify-commits/trusted-git-root index e560b98d02..c60f8ab695 100644 --- a/contrib/verify-commits/trusted-git-root +++ b/contrib/verify-commits/trusted-git-root @@ -1 +1 @@ -11049f4fe62606d1b0380a9ef800ac130f0fbadf +82bcf405f6db1d55b684a1f63a4aabad376cdad7 diff --git a/doc/developer-notes.md b/doc/developer-notes.md index 77ab9cccbe..540f2e8b84 100644 --- a/doc/developer-notes.md +++ b/doc/developer-notes.md @@ -1,6 +1,43 @@ Developer Notes =============== +<!-- markdown-toc start --> +**Table of Contents** + +- [Developer Notes](#developer-notes) + - [Coding Style](#coding-style) + - [Doxygen comments](#doxygen-comments) + - [Development tips and tricks](#development-tips-and-tricks) + - [Compiling for debugging](#compiling-for-debugging) + - [Compiling for gprof profiling](#compiling-for-gprof-profiling) + - [debug.log](#debuglog) + - [Testnet and Regtest modes](#testnet-and-regtest-modes) + - [DEBUG_LOCKORDER](#debug_lockorder) + - [Valgrind suppressions file](#valgrind-suppressions-file) + - [Compiling for test coverage](#compiling-for-test-coverage) + - [Locking/mutex usage notes](#lockingmutex-usage-notes) + - [Threads](#threads) + - [Ignoring IDE/editor files](#ignoring-ideeditor-files) +- [Development guidelines](#development-guidelines) + - [General Bitcoin Core](#general-bitcoin-core) + - [Wallet](#wallet) + - [General C++](#general-c) + - [C++ data structures](#c-data-structures) + - [Strings and formatting](#strings-and-formatting) + - [Variable names](#variable-names) + - [Threads and synchronization](#threads-and-synchronization) + - [Source code organization](#source-code-organization) + - [GUI](#gui) + - [Subtrees](#subtrees) + - [Git and GitHub tips](#git-and-github-tips) + - [Scripted diffs](#scripted-diffs) + - [RPC interface guidelines](#rpc-interface-guidelines) + +<!-- markdown-toc end --> + +Coding Style +--------------- + Various coding styles have been used during the history of the codebase, and the result is not very consistent. However, we're now trying to converge to a single style, which is specified below. When writing patches, favor the new @@ -34,11 +71,14 @@ code. - Constant names are all uppercase, and use `_` to separate words. - Class names, function names and method names are UpperCamelCase (PascalCase). Do not prefix class names with `C`. + - Test suite naming convention: The Boost test suite in file + `src/test/foo_tests.cpp` should be named `foo_tests`. - **Miscellaneous** - `++i` is preferred over `i++`. - `nullptr` is preferred over `NULL` or `(void*)0`. - `static_assert` is preferred over `assert` where possible. Generally; compile-time checking is preferred over run-time checking. + - `enum class` is preferred over `enum` where possible. Scoped enumerations avoid two potential pitfalls/problems with traditional C++ enumerations: implicit conversions to int, and name clashes due to enumerators being exported to the surrounding scope. Block style example: ```c++ @@ -137,43 +177,44 @@ Documentation can be generated with `make docs` and cleaned up with `make clean- Development tips and tricks --------------------------- -**compiling for debugging** +### Compiling for debugging -Run configure with the --enable-debug option, then make. Or run configure with -CXXFLAGS="-g -ggdb -O0" or whatever debug flags you need. +Run configure with `--enable-debug` to add additional compiler flags that +produce better debugging builds. -**compiling for gprof profiling** +### Compiling for gprof profiling -Run configure with the --enable-gprof option, then make. +Run configure with the `--enable-gprof` option, then make. -**debug.log** +### debug.log If the code is behaving strangely, take a look in the debug.log file in the data directory; error and debugging messages are written there. -The -debug=... command-line option controls debugging; running with just -debug or -debug=1 will turn +The `-debug=...` command-line option controls debugging; running with just `-debug` or `-debug=1` will turn on all categories (and give you a very large debug.log file). -The Qt code routes qDebug() output to debug.log under category "qt": run with -debug=qt +The Qt code routes `qDebug()` output to debug.log under category "qt": run with `-debug=qt` to see it. -**testnet and regtest modes** +### Testnet and Regtest modes -Run with the -testnet option to run with "play bitcoins" on the test network, if you +Run with the `-testnet` option to run with "play bitcoins" on the test network, if you are testing multi-machine code that needs to operate across the internet. -If you are testing something that can run on one machine, run with the -regtest option. -In regression test mode, blocks can be created on-demand; see test/functional/ for tests -that run in -regtest mode. +If you are testing something that can run on one machine, run with the `-regtest` option. +In regression test mode, blocks can be created on-demand; see [test/functional/](/test/functional) for tests +that run in `-regtest` mode. -**DEBUG_LOCKORDER** +### DEBUG_LOCKORDER -Bitcoin Core is a multithreaded application, and deadlocks or other multithreading bugs -can be very difficult to track down. Compiling with -DDEBUG_LOCKORDER (configure -CXXFLAGS="-DDEBUG_LOCKORDER -g") inserts run-time checks to keep track of which locks -are held, and adds warnings to the debug.log file if inconsistencies are detected. +Bitcoin Core is a multi-threaded application, and deadlocks or other +multi-threading bugs can be very difficult to track down. The `--enable-debug` +configure option adds `-DDEBUG_LOCKORDER` to the compiler flags. This inserts +run-time checks to keep track of which locks are held, and adds warnings to the +debug.log file if inconsistencies are detected. -**Valgrind suppressions file** +### Valgrind suppressions file Valgrind is a programming tool for memory debugging, memory leak detection, and profiling. The repo contains a Valgrind suppressions file @@ -188,7 +229,7 @@ $ valgrind --suppressions=contrib/valgrind.supp --leak-check=full \ $ valgrind -v --leak-check=full src/bitcoind -printtoconsole ``` -**compiling for test coverage** +### Compiling for test coverage LCOV can be used to generate a test coverage report based upon `make check` execution. LCOV must be installed on your system (e.g. the `lcov` package @@ -204,22 +245,73 @@ make cov # A coverage report will now be accessible at `./test_bitcoin.coverage/index.html`. ``` +**Sanitizers** + +Bitcoin can be compiled with various "sanitizers" enabled, which add +instrumentation for issues regarding things like memory safety, thread race +conditions, or undefined behavior. This is controlled with the +`--with-sanitizers` configure flag, which should be a comma separated list of +sanitizers to enable. The sanitizer list should correspond to supported +`-fsanitize=` options in your compiler. These sanitizers have runtime overhead, +so they are most useful when testing changes or producing debugging builds. + +Some examples: + +```bash +# Enable both the address sanitizer and the undefined behavior sanitizer +./configure --with-sanitizers=address,undefined + +# Enable the thread sanitizer +./configure --with-sanitizers=thread +``` + +If you are compiling with GCC you will typically need to install corresponding +"san" libraries to actually compile with these flags, e.g. libasan for the +address sanitizer, libtsan for the thread sanitizer, and libubsan for the +undefined sanitizer. If you are missing required libraries, the configure script +will fail with a linker error when testing the sanitizer flags. + +The test suite should pass cleanly with the `thread` and `undefined` sanitizers, +but there are a number of known problems when using the `address` sanitizer. The +address sanitizer is known to fail in +[sha256_sse4::Transform](/src/crypto/sha256_sse4.cpp) which makes it unusable +unless you also use `--disable-asm` when running configure. We would like to fix +sanitizer issues, so please send pull requests if you can fix any errors found +by the address sanitizer (or any other sanitizer). + +Not all sanitizer options can be enabled at the same time, e.g. trying to build +with `--with-sanitizers=address,thread` will fail in the configure script as +these sanitizers are mutually incompatible. Refer to your compiler manual to +learn more about these options and which sanitizers are supported by your +compiler. + +Additional resources: + + * [AddressSanitizer](https://clang.llvm.org/docs/AddressSanitizer.html) + * [LeakSanitizer](https://clang.llvm.org/docs/LeakSanitizer.html) + * [MemorySanitizer](https://clang.llvm.org/docs/MemorySanitizer.html) + * [ThreadSanitizer](https://clang.llvm.org/docs/ThreadSanitizer.html) + * [UndefinedBehaviorSanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html) + * [GCC Instrumentation Options](https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html) + * [Google Sanitizers Wiki](https://github.com/google/sanitizers/wiki) + * [Issue #12691: Enable -fsanitize flags in Travis](https://github.com/bitcoin/bitcoin/issues/12691) + Locking/mutex usage notes ------------------------- The code is multi-threaded, and uses mutexes and the -LOCK/TRY_LOCK macros to protect data structures. +`LOCK` and `TRY_LOCK` macros to protect data structures. -Deadlocks due to inconsistent lock ordering (thread 1 locks cs_main -and then cs_wallet, while thread 2 locks them in the opposite order: -result, deadlock as each waits for the other to release its lock) are -a problem. Compile with -DDEBUG_LOCKORDER to get lock order -inconsistencies reported in the debug.log file. +Deadlocks due to inconsistent lock ordering (thread 1 locks `cs_main` and then +`cs_wallet`, while thread 2 locks them in the opposite order: result, deadlock +as each waits for the other to release its lock) are a problem. Compile with +`-DDEBUG_LOCKORDER` (or use `--enable-debug`) to get lock order inconsistencies +reported in the debug.log file. Re-architecting the core code so there are better-defined interfaces between the various components is a goal, with any necessary locking -done by the components (e.g. see the self-contained CKeyStore class -and its cs_KeyStore lock for example). +done by the components (e.g. see the self-contained `CBasicKeyStore` class +and its `cs_KeyStore` lock for example). Threads ------- @@ -552,7 +644,10 @@ its upstream repository. Current subtrees include: - src/leveldb - - Upstream at https://github.com/google/leveldb ; Maintained by Google, but open important PRs to Core to avoid delay + - Upstream at https://github.com/google/leveldb ; Maintained by Google, but + open important PRs to Core to avoid delay. + - **Note**: Follow the instructions in [Upgrading LevelDB](#upgrading-leveldb) when + merging upstream changes to the leveldb subtree. - src/libsecp256k1 - Upstream at https://github.com/bitcoin-core/secp256k1/ ; actively maintaned by Core contributors. @@ -563,6 +658,52 @@ Current subtrees include: - src/univalue - Upstream at https://github.com/jgarzik/univalue ; report important PRs to Core to avoid delay. +Upgrading LevelDB +--------------------- + +Extra care must be taken when upgrading LevelDB. This section explains issues +you must be aware of. + +### File Descriptor Counts + +In most configurations we use the default LevelDB value for `max_open_files`, +which is 1000 at the time of this writing. If LevelDB actually uses this many +file descriptors it will cause problems with Bitcoin's `select()` loop, because +it may cause new sockets to be created where the fd value is >= 1024. For this +reason, on 64-bit Unix systems we rely on an internal LevelDB optimization that +uses `mmap()` + `close()` to open table files without actually retaining +references to the table file descriptors. If you are upgrading LevelDB, you must +sanity check the changes to make sure that this assumption remains valid. + +In addition to reviewing the upstream changes in `env_posix.cc`, you can use `lsof` to +check this. For example, on Linux this command will show open `.ldb` file counts: + +```bash +$ lsof -p $(pidof bitcoind) |\ + awk 'BEGIN { fd=0; mem=0; } /ldb$/ { if ($4 == "mem") mem++; else fd++ } END { printf "mem = %s, fd = %s\n", mem, fd}' +mem = 119, fd = 0 +``` + +The `mem` value shows how many files are mmap'ed, and the `fd` value shows you +many file descriptors these files are using. You should check that `fd` is a +small number (usually 0 on 64-bit hosts). + +See the notes in the `SetMaxOpenFiles()` function in `dbwrapper.cc` for more +details. + +### Consensus Compatibility + +It is possible for LevelDB changes to inadvertently change consensus +compatibility between nodes. This happened in Bitcoin 0.8 (when LevelDB was +first introduced). When upgrading LevelDB you should review the upstream changes +to check for issues affecting consensus compatibility. + +For example, if LevelDB had a bug that accidentally prevented a key from being +returned in an edge case, and that bug was fixed upstream, the bug "fix" would +be an incompatible consensus change. In this situation the correct behavior +would be to revert the upstream fix before applying the updates to Bitcoin's +copy of LevelDB. In general you should be wary of any upstream changes affecting +what data is returned from LevelDB queries. Git and GitHub tips --------------------- diff --git a/doc/release-notes.md b/doc/release-notes.md index a79012722f..48ee364c18 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -63,6 +63,17 @@ RPC changes - The `createrawtransaction` RPC will now accept an array or dictionary (kept for compatibility) for the `outputs` parameter. This means the order of transaction outputs can be specified by the client. - The `fundrawtransaction` RPC will reject the previously deprecated `reserveChangeKey` option. +- Wallet `getnewaddress` and `addmultisigaddress` RPC `account` named + parameters have been renamed to `label` with no change in behavior. +- Wallet `getlabeladdress`, `getreceivedbylabel`, `listreceivedbylabel`, and + `setlabel` RPCs have been added to replace `getaccountaddress`, + `getreceivedbyaccount`, `listreceivedbyaccount`, and `setaccount` RPCs, + which are now deprecated. There is no change in behavior between the + new RPCs and deprecated RPCs. +- Wallet `listreceivedbylabel`, `listreceivedbyaccount` and `listunspent` RPCs + add `label` fields to returned JSON objects that previously only had + `account` fields. +- `sendmany` now shuffles outputs to improve privacy, so any previously expected behavior with regards to output ordering can no longer be relied upon. External wallet files --------------------- @@ -98,6 +109,19 @@ Low-level RPC changes - The log timestamp format is now ISO 8601 (e.g. "2018-02-28T12:34:56Z"). +Miner block size removed +------------------------ + +The `-blockmaxsize` option for miners to limit their blocks' sizes was +deprecated in V0.15.1, and has now been removed. Miners should use the +`-blockmaxweight` option if they want to limit the weight of their blocks' +weights. + +Python Support +-------------- + +Support for Python 2 has been discontinued for all test files and tools. + Credits ======= diff --git a/share/qt/extract_strings_qt.py b/share/qt/extract_strings_qt.py index fbf3ea3436..e8f0820ca8 100755 --- a/share/qt/extract_strings_qt.py +++ b/share/qt/extract_strings_qt.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2012-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -6,7 +6,6 @@ Extract _("...") strings for translation and convert to Qt stringdefs so that they can be picked up by Qt linguist. ''' -from __future__ import division,print_function,unicode_literals from subprocess import Popen, PIPE import operator import os diff --git a/share/rpcauth/rpcauth.py b/share/rpcauth/rpcauth.py index 7baad85582..d6580281d4 100755 --- a/share/rpcauth/rpcauth.py +++ b/share/rpcauth/rpcauth.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2015-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/Makefile.am b/src/Makefile.am index 7c2fe56d9d..72e5cdb95d 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -4,8 +4,8 @@ DIST_SUBDIRS = secp256k1 univalue -AM_LDFLAGS = $(PTHREAD_CFLAGS) $(LIBTOOL_LDFLAGS) $(HARDENED_LDFLAGS) $(GPROF_LDFLAGS) -AM_CXXFLAGS = $(HARDENED_CXXFLAGS) $(ERROR_CXXFLAGS) $(GPROF_CXXFLAGS) +AM_LDFLAGS = $(PTHREAD_CFLAGS) $(LIBTOOL_LDFLAGS) $(HARDENED_LDFLAGS) $(GPROF_LDFLAGS) $(SANITIZER_LDFLAGS) +AM_CXXFLAGS = $(HARDENED_CXXFLAGS) $(ERROR_CXXFLAGS) $(GPROF_CXXFLAGS) $(SANITIZER_CXXFLAGS) AM_CPPFLAGS = $(HARDENED_CPPFLAGS) EXTRA_LIBRARIES = @@ -162,6 +162,7 @@ BITCOIN_CORE_H = \ validation.h \ validationinterface.h \ versionbits.h \ + walletinitinterface.h \ wallet/coincontrol.h \ wallet/crypter.h \ wallet/db.h \ diff --git a/src/addrdb.cpp b/src/addrdb.cpp index 675f3c28af..e4620e63c6 100644 --- a/src/addrdb.cpp +++ b/src/addrdb.cpp @@ -22,8 +22,8 @@ bool SerializeDB(Stream& stream, const Data& data) // Write and commit header, data try { CHashWriter hasher(SER_DISK, CLIENT_VERSION); - stream << FLATDATA(Params().MessageStart()) << data; - hasher << FLATDATA(Params().MessageStart()) << data; + stream << Params().MessageStart() << data; + hasher << Params().MessageStart() << data; stream << hasher.GetHash(); } catch (const std::exception& e) { return error("%s: Serialize or I/O error - %s", __func__, e.what()); @@ -66,7 +66,7 @@ bool DeserializeDB(Stream& stream, Data& data, bool fCheckSum = true) CHashVerifier<Stream> verifier(&stream); // de-serialize file header (network specific magic number) and .. unsigned char pchMsgTmp[4]; - verifier >> FLATDATA(pchMsgTmp); + verifier >> pchMsgTmp; // ... verify the network matches ours if (memcmp(pchMsgTmp, Params().MessageStart(), sizeof(pchMsgTmp))) return error("%s: Invalid network magic number", __func__); diff --git a/src/bench/lockedpool.cpp b/src/bench/lockedpool.cpp index ca30d81e59..55a00318c1 100644 --- a/src/bench/lockedpool.cpp +++ b/src/bench/lockedpool.cpp @@ -43,4 +43,4 @@ static void BenchLockedPool(benchmark::State& state) addr.clear(); } -BENCHMARK(BenchLockedPool, 530); +BENCHMARK(BenchLockedPool, 1300); diff --git a/src/bench/verify_script.cpp b/src/bench/verify_script.cpp index 29dedeef0b..705fa368a5 100644 --- a/src/bench/verify_script.cpp +++ b/src/bench/verify_script.cpp @@ -75,7 +75,7 @@ static void VerifyScriptBench(benchmark::State& state) CMutableTransaction txSpend = BuildSpendingTransaction(scriptSig, txCredit); CScriptWitness& witness = txSpend.vin[0].scriptWitness; witness.stack.emplace_back(); - key.Sign(SignatureHash(witScriptPubkey, txSpend, 0, SIGHASH_ALL, txCredit.vout[0].nValue, SIGVERSION_WITNESS_V0), witness.stack.back(), 0); + key.Sign(SignatureHash(witScriptPubkey, txSpend, 0, SIGHASH_ALL, txCredit.vout[0].nValue, SigVersion::WITNESS_V0), witness.stack.back(), 0); witness.stack.back().push_back(static_cast<unsigned char>(SIGHASH_ALL)); witness.stack.push_back(ToByteVector(pubkey)); diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp index d3eb60725f..df7fad89c2 100644 --- a/src/bitcoind.cpp +++ b/src/bitcoind.cpp @@ -18,6 +18,10 @@ #include <httpserver.h> #include <httprpc.h> #include <utilstrencodings.h> +#if ENABLE_WALLET +#include <wallet/init.h> +#endif +#include <walletinitinterface.h> #include <boost/thread.hpp> @@ -59,6 +63,12 @@ bool AppInit(int argc, char* argv[]) { bool fRet = false; +#if ENABLE_WALLET + g_wallet_init_interface.reset(new WalletInit); +#else + g_wallet_init_interface.reset(new DummyWalletInit); +#endif + // // Parameters // @@ -79,7 +89,7 @@ bool AppInit(int argc, char* argv[]) strUsage += "\n" + _("Usage:") + "\n" + " bitcoind [options] " + strprintf(_("Start %s Daemon"), _(PACKAGE_NAME)) + "\n"; - strUsage += "\n" + HelpMessage(HMM_BITCOIND); + strUsage += "\n" + HelpMessage(HelpMessageMode::BITCOIND); } fprintf(stdout, "%s", strUsage.c_str()); diff --git a/src/chainparams.cpp b/src/chainparams.cpp index c2b3480f9d..6067503b0b 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -135,6 +135,7 @@ public: vSeeds.emplace_back("seed.bitcoinstats.com"); // Christian Decker, supports x1 - xf vSeeds.emplace_back("seed.bitcoin.jonasschnelli.ch"); // Jonas Schnelli, only supports x1, x5, x9, and xd vSeeds.emplace_back("seed.btc.petertodd.org"); // Peter Todd, only supports x1, x5, x9, and xd + vSeeds.emplace_back("seed.bitcoin.sprovoost.nl"); // Sjors Provoost base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,0); base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,5); diff --git a/src/compressor.cpp b/src/compressor.cpp index 86de2900e9..da639a01af 100644 --- a/src/compressor.cpp +++ b/src/compressor.cpp @@ -9,7 +9,15 @@ #include <pubkey.h> #include <script/standard.h> -bool CScriptCompressor::IsToKeyID(CKeyID &hash) const +/* + * These check for scripts for which a special case with a shorter encoding is defined. + * They are implemented separately from the CScript test, as these test for exact byte + * sequence correspondences, and are more strict. For example, IsToPubKey also verifies + * whether the public key is valid (as invalid ones cannot be represented in compressed + * form). + */ + +static bool IsToKeyID(const CScript& script, CKeyID &hash) { if (script.size() == 25 && script[0] == OP_DUP && script[1] == OP_HASH160 && script[2] == 20 && script[23] == OP_EQUALVERIFY @@ -20,7 +28,7 @@ bool CScriptCompressor::IsToKeyID(CKeyID &hash) const return false; } -bool CScriptCompressor::IsToScriptID(CScriptID &hash) const +static bool IsToScriptID(const CScript& script, CScriptID &hash) { if (script.size() == 23 && script[0] == OP_HASH160 && script[1] == 20 && script[22] == OP_EQUAL) { @@ -30,7 +38,7 @@ bool CScriptCompressor::IsToScriptID(CScriptID &hash) const return false; } -bool CScriptCompressor::IsToPubKey(CPubKey &pubkey) const +static bool IsToPubKey(const CScript& script, CPubKey &pubkey) { if (script.size() == 35 && script[0] == 33 && script[34] == OP_CHECKSIG && (script[1] == 0x02 || script[1] == 0x03)) { @@ -45,24 +53,24 @@ bool CScriptCompressor::IsToPubKey(CPubKey &pubkey) const return false; } -bool CScriptCompressor::Compress(std::vector<unsigned char> &out) const +bool CompressScript(const CScript& script, std::vector<unsigned char> &out) { CKeyID keyID; - if (IsToKeyID(keyID)) { + if (IsToKeyID(script, keyID)) { out.resize(21); out[0] = 0x00; memcpy(&out[1], &keyID, 20); return true; } CScriptID scriptID; - if (IsToScriptID(scriptID)) { + if (IsToScriptID(script, scriptID)) { out.resize(21); out[0] = 0x01; memcpy(&out[1], &scriptID, 20); return true; } CPubKey pubkey; - if (IsToPubKey(pubkey)) { + if (IsToPubKey(script, pubkey)) { out.resize(33); memcpy(&out[1], &pubkey[1], 32); if (pubkey[0] == 0x02 || pubkey[0] == 0x03) { @@ -76,7 +84,7 @@ bool CScriptCompressor::Compress(std::vector<unsigned char> &out) const return false; } -unsigned int CScriptCompressor::GetSpecialSize(unsigned int nSize) const +unsigned int GetSpecialScriptSize(unsigned int nSize) { if (nSize == 0 || nSize == 1) return 20; @@ -85,7 +93,7 @@ unsigned int CScriptCompressor::GetSpecialSize(unsigned int nSize) const return 0; } -bool CScriptCompressor::Decompress(unsigned int nSize, const std::vector<unsigned char> &in) +bool DecompressScript(CScript& script, unsigned int nSize, const std::vector<unsigned char> &in) { switch(nSize) { case 0x00: @@ -139,7 +147,7 @@ bool CScriptCompressor::Decompress(unsigned int nSize, const std::vector<unsigne // * if e==9, we only know the resulting number is not zero, so output 1 + 10*(n - 1) + 9 // (this is decodable, as d is in [1-9] and e is in [0-9]) -uint64_t CTxOutCompressor::CompressAmount(uint64_t n) +uint64_t CompressAmount(uint64_t n) { if (n == 0) return 0; @@ -158,7 +166,7 @@ uint64_t CTxOutCompressor::CompressAmount(uint64_t n) } } -uint64_t CTxOutCompressor::DecompressAmount(uint64_t x) +uint64_t DecompressAmount(uint64_t x) { // x = 0 OR x = 1+10*(9*n + d - 1) + e OR x = 1+10*(n - 1) + 9 if (x == 0) diff --git a/src/compressor.h b/src/compressor.h index 6fcecd27e9..561c8e66d0 100644 --- a/src/compressor.h +++ b/src/compressor.h @@ -14,6 +14,13 @@ class CKeyID; class CPubKey; class CScriptID; +bool CompressScript(const CScript& script, std::vector<unsigned char> &out); +unsigned int GetSpecialScriptSize(unsigned int nSize); +bool DecompressScript(CScript& script, unsigned int nSize, const std::vector<unsigned char> &out); + +uint64_t CompressAmount(uint64_t nAmount); +uint64_t DecompressAmount(uint64_t nAmount); + /** Compact serializer for scripts. * * It detects common cases and encodes them much more efficiently. @@ -37,28 +44,13 @@ private: static const unsigned int nSpecialScripts = 6; CScript &script; -protected: - /** - * These check for scripts for which a special case with a shorter encoding is defined. - * They are implemented separately from the CScript test, as these test for exact byte - * sequence correspondences, and are more strict. For example, IsToPubKey also verifies - * whether the public key is valid (as invalid ones cannot be represented in compressed - * form). - */ - bool IsToKeyID(CKeyID &hash) const; - bool IsToScriptID(CScriptID &hash) const; - bool IsToPubKey(CPubKey &pubkey) const; - - bool Compress(std::vector<unsigned char> &out) const; - unsigned int GetSpecialSize(unsigned int nSize) const; - bool Decompress(unsigned int nSize, const std::vector<unsigned char> &out); public: explicit CScriptCompressor(CScript &scriptIn) : script(scriptIn) { } template<typename Stream> void Serialize(Stream &s) const { std::vector<unsigned char> compr; - if (Compress(compr)) { + if (CompressScript(script, compr)) { s << CFlatData(compr); return; } @@ -72,9 +64,9 @@ public: unsigned int nSize = 0; s >> VARINT(nSize); if (nSize < nSpecialScripts) { - std::vector<unsigned char> vch(GetSpecialSize(nSize), 0x00); + std::vector<unsigned char> vch(GetSpecialScriptSize(nSize), 0x00); s >> CFlatData(vch); - Decompress(nSize, vch); + DecompressScript(script, nSize, vch); return; } nSize -= nSpecialScripts; @@ -96,9 +88,6 @@ private: CTxOut &txout; public: - static uint64_t CompressAmount(uint64_t nAmount); - static uint64_t DecompressAmount(uint64_t nAmount); - explicit CTxOutCompressor(CTxOut &txoutIn) : txout(txoutIn) { } ADD_SERIALIZE_METHODS; diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp index fb0d4215a2..752f985bc0 100644 --- a/src/dbwrapper.cpp +++ b/src/dbwrapper.cpp @@ -71,6 +71,31 @@ public: } }; +static void SetMaxOpenFiles(leveldb::Options *options) { + // On most platforms the default setting of max_open_files (which is 1000) + // is optimal. On Windows using a large file count is OK because the handles + // do not interfere with select() loops. On 64-bit Unix hosts this value is + // also OK, because up to that amount LevelDB will use an mmap + // implementation that does not use extra file descriptors (the fds are + // closed after being mmaped). + // + // Increasing the value beyond the default is dangerous because LevelDB will + // fall back to a non-mmap implementation when the file count is too large. + // On 32-bit Unix host we should decrease the value because the handles use + // up real fds, and we want to avoid fd exhaustion issues. + // + // See PR #12495 for further discussion. + + int default_open_files = options->max_open_files; +#ifndef WIN32 + if (sizeof(void*) < 8) { + options->max_open_files = 64; + } +#endif + LogPrint(BCLog::LEVELDB, "LevelDB using max_open_files=%d (default=%d)\n", + options->max_open_files, default_open_files); +} + static leveldb::Options GetOptions(size_t nCacheSize) { leveldb::Options options; @@ -78,13 +103,13 @@ static leveldb::Options GetOptions(size_t nCacheSize) options.write_buffer_size = nCacheSize / 4; // up to two write buffers may be held in memory simultaneously options.filter_policy = leveldb::NewBloomFilterPolicy(10); options.compression = leveldb::kNoCompression; - options.max_open_files = 64; options.info_log = new CBitcoinLevelDBLogger(); if (leveldb::kMajorVersion > 1 || (leveldb::kMajorVersion == 1 && leveldb::kMinorVersion >= 16)) { // LevelDB versions before 1.16 consider short writes to be corruption. Only trigger error // on corruption in later versions. options.paranoid_checks = true; } + SetMaxOpenFiles(&options); return options; } @@ -159,12 +184,12 @@ bool CDBWrapper::WriteBatch(CDBBatch& batch, bool fSync) const bool log_memory = LogAcceptCategory(BCLog::LEVELDB); double mem_before = 0; if (log_memory) { - mem_before = DynamicMemoryUsage() / 1024 / 1024; + mem_before = DynamicMemoryUsage() / 1024.0 / 1024; } leveldb::Status status = pdb->Write(fSync ? syncoptions : writeoptions, &batch.batch); dbwrapper_private::HandleError(status); if (log_memory) { - double mem_after = DynamicMemoryUsage() / 1024 / 1024; + double mem_after = DynamicMemoryUsage() / 1024.0 / 1024; LogPrint(BCLog::LEVELDB, "WriteBatch memory usage: db=%s, before=%.1fMiB, after=%.1fMiB\n", m_name, mem_before, mem_after); } diff --git a/src/httprpc.cpp b/src/httprpc.cpp index 82ae733006..0a70619ba6 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -158,8 +158,9 @@ static bool HTTPReq_JSONRPC(HTTPRequest* req, const std::string &) } JSONRPCRequest jreq; + jreq.peerAddr = req->GetPeer().ToString(); if (!RPCAuthorized(authHeader.second, jreq.authUser)) { - LogPrintf("ThreadRPCServer incorrect password attempt from %s\n", req->GetPeer().ToString()); + LogPrintf("ThreadRPCServer incorrect password attempt from %s\n", jreq.peerAddr); /* Deter brute-forcing If this results in a DoS the user really @@ -252,6 +253,9 @@ void StopHTTPRPC() { LogPrint(BCLog::RPC, "Stopping HTTP RPC server\n"); UnregisterHTTPHandler("/", true); +#ifdef ENABLE_WALLET + UnregisterHTTPHandler("/wallet/", false); +#endif if (httpRPCTimerInterface) { RPCUnsetTimerInterface(httpRPCTimerInterface.get()); httpRPCTimerInterface.reset(); diff --git a/src/init.cpp b/src/init.cpp index e0efe5c0a2..f6f522da66 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -43,10 +43,8 @@ #include <util.h> #include <utilmoneystr.h> #include <validationinterface.h> -#ifdef ENABLE_WALLET -#include <wallet/init.h> -#endif #include <warnings.h> +#include <walletinitinterface.h> #include <stdint.h> #include <stdio.h> #include <memory> @@ -74,6 +72,7 @@ static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false; std::unique_ptr<CConnman> g_connman; std::unique_ptr<PeerLogicValidation> peerLogic; +std::unique_ptr<WalletInitInterface> g_wallet_init_interface; #if ENABLE_ZMQ static CZMQNotificationInterface* pzmqNotificationInterface = nullptr; @@ -116,7 +115,6 @@ static const char* FEE_ESTIMATES_FILENAME="fee_estimates.dat"; // std::atomic<bool> fRequestShutdown(false); -std::atomic<bool> fDumpMempoolLater(false); void StartShutdown() { @@ -189,9 +187,7 @@ void Shutdown() StopREST(); StopRPC(); StopHTTPServer(); -#ifdef ENABLE_WALLET - FlushWallets(); -#endif + g_wallet_init_interface->Flush(); StopMapPort(); // Because these depend on each-other, we make sure that neither can be @@ -208,7 +204,7 @@ void Shutdown() threadGroup.interrupt_all(); threadGroup.join_all(); - if (fDumpMempoolLater && gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { + if (g_is_mempool_loaded && gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { DumpMempool(); } @@ -249,9 +245,7 @@ void Shutdown() pcoinsdbview.reset(); pblocktree.reset(); } -#ifdef ENABLE_WALLET - StopWallets(); -#endif + g_wallet_init_interface->Stop(); #if ENABLE_ZMQ if (pzmqNotificationInterface) { @@ -271,9 +265,8 @@ void Shutdown() UnregisterAllValidationInterfaces(); GetMainSignals().UnregisterBackgroundSignalScheduler(); GetMainSignals().UnregisterWithMempoolSignals(mempool); -#ifdef ENABLE_WALLET - CloseWallets(); -#endif + g_wallet_init_interface->Close(); + g_wallet_init_interface.reset(); globalVerifyHandle.reset(); ECC_Stop(); LogPrintf("%s: done\n", __func__); @@ -333,12 +326,13 @@ std::string HelpMessage(HelpMessageMode mode) strUsage += HelpMessageOpt("-version", _("Print version and exit")); strUsage += HelpMessageOpt("-alertnotify=<cmd>", _("Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)")); strUsage +=HelpMessageOpt("-assumevalid=<hex>", strprintf(_("If this block is in the chain assume that it and its ancestors are valid and potentially skip their script verification (0 to verify all, default: %s, testnet: %s)"), defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(), testnetChainParams->GetConsensus().defaultAssumeValid.GetHex())); + strUsage += HelpMessageOpt("-blocksdir=<dir>", _("Specify blocks directory (default: <datadir>/blocks)")); strUsage += HelpMessageOpt("-blocknotify=<cmd>", _("Execute command when the best block changes (%s in cmd is replaced by block hash)")); strUsage += HelpMessageOpt("-blockreconstructionextratxn=<n>", strprintf(_("Extra transactions to keep in memory for compact block reconstructions (default: %u)"), DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN)); if (showDebug) strUsage += HelpMessageOpt("-blocksonly", strprintf(_("Whether to operate in a blocks only mode (default: %u)"), DEFAULT_BLOCKSONLY)); strUsage += HelpMessageOpt("-conf=<file>", strprintf(_("Specify configuration file. Relative paths will be prefixed by datadir location. (default: %s)"), BITCOIN_CONF_FILENAME)); - if (mode == HMM_BITCOIND) + if (mode == HelpMessageMode::BITCOIND) { #if HAVE_DECL_DAEMON strUsage += HelpMessageOpt("-daemon", _("Run in the background as a daemon and accept commands")); @@ -415,9 +409,7 @@ std::string HelpMessage(HelpMessageMode mode) strUsage += HelpMessageOpt("-whitelist=<IP address or network>", _("Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) or CIDR notated network (e.g. 1.2.3.0/24). Can be specified multiple times.") + " " + _("Whitelisted peers cannot be DoS banned and their transactions are always relayed, even if they are already in the mempool, useful e.g. for a gateway")); -#ifdef ENABLE_WALLET - strUsage += GetWalletHelpString(showDebug); -#endif + strUsage += g_wallet_init_interface->GetHelpString(showDebug); #if ENABLE_ZMQ strUsage += HelpMessageGroup(_("ZeroMQ notification options:")); @@ -428,18 +420,16 @@ std::string HelpMessage(HelpMessageMode mode) #endif strUsage += HelpMessageGroup(_("Debugging/Testing options:")); - if (showDebug) - { + if (showDebug) { strUsage += HelpMessageOpt("-checkblocks=<n>", strprintf(_("How many blocks to check at startup (default: %u, 0 = all)"), DEFAULT_CHECKBLOCKS)); strUsage += HelpMessageOpt("-checklevel=<n>", strprintf(_("How thorough the block verification of -checkblocks is (0-4, default: %u)"), DEFAULT_CHECKLEVEL)); - strUsage += HelpMessageOpt("-checkblockindex", strprintf("Do a full consistency check for mapBlockIndex, setBlockIndexCandidates, chainActive and mapBlocksUnlinked occasionally. Also sets -checkmempool (default: %u)", defaultChainParams->DefaultConsistencyChecks())); + strUsage += HelpMessageOpt("-checkblockindex", strprintf("Do a full consistency check for mapBlockIndex, setBlockIndexCandidates, chainActive and mapBlocksUnlinked occasionally. (default: %u)", defaultChainParams->DefaultConsistencyChecks())); strUsage += HelpMessageOpt("-checkmempool=<n>", strprintf("Run checks every <n> transactions (default: %u)", defaultChainParams->DefaultConsistencyChecks())); strUsage += HelpMessageOpt("-checkpoints", strprintf("Disable expensive verification for known chain history (default: %u)", DEFAULT_CHECKPOINTS_ENABLED)); strUsage += HelpMessageOpt("-disablesafemode", strprintf("Disable safemode, override a real safe mode event (default: %u)", DEFAULT_DISABLE_SAFEMODE)); strUsage += HelpMessageOpt("-deprecatedrpc=<method>", "Allows deprecated RPC method(s) to be used"); strUsage += HelpMessageOpt("-testsafemode", strprintf("Force safe mode (default: %u)", DEFAULT_TESTSAFEMODE)); strUsage += HelpMessageOpt("-dropmessagestest=<n>", "Randomly drop 1 of every <n> network messages"); - strUsage += HelpMessageOpt("-fuzzmessagestest=<n>", "Randomly fuzz 1 of every <n> network messages"); strUsage += HelpMessageOpt("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT)); strUsage += HelpMessageOpt("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT)); @@ -491,8 +481,6 @@ std::string HelpMessage(HelpMessageMode mode) strUsage += HelpMessageOpt("-whitelistrelay", strprintf(_("Accept relayed transactions received from whitelisted peers even when not relaying transactions (default: %d)"), DEFAULT_WHITELISTRELAY)); strUsage += HelpMessageGroup(_("Block creation options:")); - if (showDebug) - strUsage += HelpMessageOpt("-blockmaxsize=<n>", "Set maximum BIP141 block weight to this * 4. Deprecated, use blockmaxweight"); strUsage += HelpMessageOpt("-blockmaxweight=<n>", strprintf(_("Set maximum BIP141 block weight (default: %d)"), DEFAULT_BLOCK_MAX_WEIGHT)); strUsage += HelpMessageOpt("-blockmintxfee=<amt>", strprintf(_("Set lowest fee rate (in %s/kB) for transactions to be included in block creation. (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE))); if (showDebug) @@ -596,7 +584,7 @@ void CleanupBlockRevFiles() // Remove the rev files immediately and insert the blk file paths into an // ordered map keyed by block file index. LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n"); - fs::path blocksdir = GetDataDir() / "blocks"; + fs::path blocksdir = GetBlocksDir(); for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) { if (fs::is_regular_file(*it) && it->path().filename().string().length() == 12 && @@ -693,8 +681,8 @@ void ThreadImport(std::vector<fs::path> vImportFiles) } // End scope of CImportingNow if (gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { LoadMempool(); - fDumpMempoolLater = !fRequestShutdown; } + g_is_mempool_loaded = !fRequestShutdown; } /** Sanity checks @@ -798,15 +786,6 @@ void InitParameterInteraction() if (gArgs.SoftSetBoolArg("-whitelistrelay", true)) LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> setting -whitelistrelay=1\n", __func__); } - - if (gArgs.IsArgSet("-blockmaxsize")) { - unsigned int max_size = gArgs.GetArg("-blockmaxsize", 0); - if (gArgs.SoftSetArg("blockmaxweight", strprintf("%d", max_size * WITNESS_SCALE_FACTOR))) { - LogPrintf("%s: parameter interaction: -blockmaxsize=%d -> setting -blockmaxweight=%d (-blockmaxsize is deprecated!)\n", __func__, max_size, max_size * WITNESS_SCALE_FACTOR); - } else { - LogPrintf("%s: Ignoring blockmaxsize setting which is overridden by blockmaxweight", __func__); - } - } } static std::string ResolveErrMsg(const char * const optname, const std::string& strBind) @@ -908,6 +887,10 @@ bool AppInitParameterInteraction() // also see: InitParameterInteraction() + if (!fs::is_directory(GetBlocksDir(false))) { + return InitError(strprintf(_("Specified blocks directory \"%s\" does not exist.\n"), gArgs.GetArg("-blocksdir", "").c_str())); + } + // if using block pruning, then disallow txindex if (gArgs.GetArg("-prune", 0)) { if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) @@ -1093,10 +1076,7 @@ bool AppInitParameterInteraction() return InitError(strprintf("acceptnonstdtxn is not currently supported for %s chain", chainparams.NetworkIDString())); nBytesPerSigOp = gArgs.GetArg("-bytespersigop", nBytesPerSigOp); -#ifdef ENABLE_WALLET - if (!WalletParameterInteraction()) - return false; -#endif + if (!g_wallet_init_interface->ParameterInteraction()) return false; fIsBareMultisigStd = gArgs.GetBoolArg("-permitbaremultisig", DEFAULT_PERMIT_BAREMULTISIG); fAcceptDatacarrier = gArgs.GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER); @@ -1165,6 +1145,9 @@ static bool LockDataDirectory(bool probeOnly) { // Make sure only a single Bitcoin process is using the data directory. fs::path datadir = GetDataDir(); + if (!DirIsWritable(datadir)) { + return InitError(strprintf(_("Cannot write to data directory '%s'; check permissions."), datadir.string())); + } if (!LockDirectory(datadir, ".lock", probeOnly)) { return InitError(strprintf(_("Cannot obtain a lock on data directory %s. %s is probably already running."), datadir.string(), _(PACKAGE_NAME))); } @@ -1259,9 +1242,7 @@ bool AppInitMain() * available in the GUI RPC console even if external calls are disabled. */ RegisterAllCoreRPCCommands(tableRPC); -#ifdef ENABLE_WALLET - RegisterWalletRPC(tableRPC); -#endif + g_wallet_init_interface->RegisterRPC(tableRPC); /* Start the RPC server already. It will be started in "warmup" mode * and not really process calls already (but it will signify connections @@ -1278,10 +1259,8 @@ bool AppInitMain() int64_t nStart; // ********************************************************* Step 5: verify wallet database integrity -#ifdef ENABLE_WALLET - if (!VerifyWallets()) - return false; -#endif + if (!g_wallet_init_interface->Verify()) return false; + // ********************************************************* Step 6: network initialization // Note that we absolutely cannot open any actual connections // until the very end ("start node") as the UTXO/block state @@ -1599,12 +1578,7 @@ bool AppInitMain() fFeeEstimatesInitialized = true; // ********************************************************* Step 8: load wallet -#ifdef ENABLE_WALLET - if (!OpenWallets()) - return false; -#else - LogPrintf("No wallet support compiled in!\n"); -#endif + if (!g_wallet_init_interface->Open()) return false; // ********************************************************* Step 9: data directory maintenance @@ -1630,7 +1604,7 @@ bool AppInitMain() // ********************************************************* Step 10: import blocks - if (!CheckDiskSpace()) + if (!CheckDiskSpace() && !CheckDiskSpace(0, true)) return false; // Either install a handler to notify us when genesis activates, or set fHaveGenesis directly. @@ -1750,9 +1724,7 @@ bool AppInitMain() SetRPCWarmupFinished(); uiInterface.InitMessage(_("Done loading")); -#ifdef ENABLE_WALLET - StartWallets(scheduler); -#endif + g_wallet_init_interface->Start(scheduler); return true; } diff --git a/src/init.h b/src/init.h index 33f97a55a5..c93a210154 100644 --- a/src/init.h +++ b/src/init.h @@ -6,11 +6,15 @@ #ifndef BITCOIN_INIT_H #define BITCOIN_INIT_H +#include <memory> #include <string> class CScheduler; class CWallet; +class WalletInitInterface; +extern std::unique_ptr<WalletInitInterface> g_wallet_init_interface; + namespace boost { class thread_group; @@ -57,9 +61,9 @@ bool AppInitLockDataDirectory(); bool AppInitMain(); /** The help message mode determines what help message to show */ -enum HelpMessageMode { - HMM_BITCOIND, - HMM_BITCOIN_QT +enum class HelpMessageMode { + BITCOIND, + BITCOIN_QT }; /** Help for options shared between UI and daemon (for -help) */ diff --git a/src/keystore.cpp b/src/keystore.cpp index fab1b81c9a..dfdfa5ea9f 100644 --- a/src/keystore.cpp +++ b/src/keystore.cpp @@ -7,10 +7,6 @@ #include <util.h> -bool CKeyStore::AddKey(const CKey &key) { - return AddKeyPubKey(key, key.GetPubKey()); -} - void CBasicKeyStore::ImplicitlyLearnRelatedKeyScripts(const CPubKey& pubkey) { AssertLockHeld(cs_KeyStore); diff --git a/src/keystore.h b/src/keystore.h index ffd3238fd6..fa912cb195 100644 --- a/src/keystore.h +++ b/src/keystore.h @@ -9,35 +9,27 @@ #include <key.h> #include <pubkey.h> #include <script/script.h> +#include <script/sign.h> #include <script/standard.h> #include <sync.h> #include <boost/signals2/signal.hpp> /** A virtual base class for key stores */ -class CKeyStore +class CKeyStore : public SigningProvider { -protected: - mutable CCriticalSection cs_KeyStore; - public: - virtual ~CKeyStore() {} - //! Add a key to the store. virtual bool AddKeyPubKey(const CKey &key, const CPubKey &pubkey) =0; - virtual bool AddKey(const CKey &key); //! Check whether a key corresponding to a given address is present in the store. virtual bool HaveKey(const CKeyID &address) const =0; - virtual bool GetKey(const CKeyID &address, CKey& keyOut) const =0; virtual std::set<CKeyID> GetKeys() const =0; - virtual bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const =0; //! Support for BIP 0013 : see https://github.com/bitcoin/bips/blob/master/bip-0013.mediawiki virtual bool AddCScript(const CScript& redeemScript) =0; virtual bool HaveCScript(const CScriptID &hash) const =0; virtual std::set<CScriptID> GetCScripts() const =0; - virtual bool GetCScript(const CScriptID &hash, CScript& redeemScriptOut) const =0; //! Support for Watch-only addresses virtual bool AddWatchOnly(const CScript &dest) =0; @@ -55,6 +47,8 @@ typedef std::set<CScript> WatchOnlySet; class CBasicKeyStore : public CKeyStore { protected: + mutable CCriticalSection cs_KeyStore; + KeyMap mapKeys; WatchKeyMap mapWatchKeys; ScriptMap mapScripts; @@ -64,6 +58,7 @@ protected: public: bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override; + bool AddKey(const CKey &key) { return AddKeyPubKey(key, key.GetPubKey()); } bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const override; bool HaveKey(const CKeyID &address) const override; std::set<CKeyID> GetKeys() const override; diff --git a/src/miner.cpp b/src/miner.cpp index 4b86446774..0660df928c 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -68,9 +68,7 @@ BlockAssembler::BlockAssembler(const CChainParams& params, const Options& option static BlockAssembler::Options DefaultOptions(const CChainParams& params) { // Block resource limits - // If neither -blockmaxsize or -blockmaxweight is given, limit to DEFAULT_BLOCK_MAX_* - // If only one is given, only restrict the specified resource. - // If both are given, restrict both. + // If -blockmaxweight is not given, limit to DEFAULT_BLOCK_MAX_WEIGHT BlockAssembler::Options options; options.nBlockMaxWeight = gArgs.GetArg("-blockmaxweight", DEFAULT_BLOCK_MAX_WEIGHT); if (gArgs.IsArgSet("-blockmintxfee")) { diff --git a/src/netaddress.h b/src/netaddress.h index 93bbb66491..ad6b55eb58 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -93,7 +93,7 @@ class CNetAddr template <typename Stream, typename Operation> inline void SerializationOp(Stream& s, Operation ser_action) { - READWRITE(FLATDATA(ip)); + READWRITE(ip); } friend class CSubNet; @@ -131,8 +131,8 @@ class CSubNet template <typename Stream, typename Operation> inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(network); - READWRITE(FLATDATA(netmask)); - READWRITE(FLATDATA(valid)); + READWRITE(netmask); + READWRITE(valid); } }; @@ -166,7 +166,7 @@ class CService : public CNetAddr template <typename Stream, typename Operation> inline void SerializationOp(Stream& s, Operation ser_action) { - READWRITE(FLATDATA(ip)); + READWRITE(ip); unsigned short portN = htons(port); READWRITE(FLATDATA(portN)); if (ser_action.ForRead()) diff --git a/src/policy/fees.h b/src/policy/fees.h index 5d35e276e1..afb4746def 100644 --- a/src/policy/fees.h +++ b/src/policy/fees.h @@ -68,7 +68,7 @@ class TxConfirmStats; /* Identifier for each of the 3 different TxConfirmStats which will track * history over different time horizons. */ -enum FeeEstimateHorizon { +enum class FeeEstimateHorizon { SHORT_HALFLIFE = 0, MED_HALFLIFE = 1, LONG_HALFLIFE = 2 diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp index db1ad4f26d..c3f65fb2ab 100644 --- a/src/policy/policy.cpp +++ b/src/policy/policy.cpp @@ -179,7 +179,7 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) { std::vector<std::vector<unsigned char> > stack; // convert the scriptSig into a stack, so we can inspect the redeemScript - if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker(), SIGVERSION_BASE)) + if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker(), SigVersion::BASE)) return false; if (stack.empty()) return false; @@ -215,7 +215,7 @@ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) // If the scriptPubKey is P2SH, we try to extract the redeemScript casually by converting the scriptSig // into a stack. We do not check IsPushOnly nor compare the hash as these will be done later anyway. // If the check fails at this stage, we know that this txid must be a bad one. - if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker(), SIGVERSION_BASE)) + if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker(), SigVersion::BASE)) return false; if (stack.empty()) return false; diff --git a/src/policy/rbf.cpp b/src/policy/rbf.cpp index c5a1741608..81b2a7fadc 100644 --- a/src/policy/rbf.cpp +++ b/src/policy/rbf.cpp @@ -22,13 +22,13 @@ RBFTransactionState IsRBFOptIn(const CTransaction &tx, CTxMemPool &pool) // First check the transaction itself. if (SignalsOptInRBF(tx)) { - return RBF_TRANSACTIONSTATE_REPLACEABLE_BIP125; + return RBFTransactionState::REPLACEABLE_BIP125; } // If this transaction is not in our mempool, then we can't be sure // we will know about all its inputs. if (!pool.exists(tx.GetHash())) { - return RBF_TRANSACTIONSTATE_UNKNOWN; + return RBFTransactionState::UNKNOWN; } // If all the inputs have nSequence >= maxint-1, it still might be @@ -40,8 +40,8 @@ RBFTransactionState IsRBFOptIn(const CTransaction &tx, CTxMemPool &pool) for (CTxMemPool::txiter it : setAncestors) { if (SignalsOptInRBF(it->GetTx())) { - return RBF_TRANSACTIONSTATE_REPLACEABLE_BIP125; + return RBFTransactionState::REPLACEABLE_BIP125; } } - return RBF_TRANSACTIONSTATE_FINAL; + return RBFTransactionState::FINAL; } diff --git a/src/policy/rbf.h b/src/policy/rbf.h index 72f51b0f03..b10532addf 100644 --- a/src/policy/rbf.h +++ b/src/policy/rbf.h @@ -9,10 +9,10 @@ static const uint32_t MAX_BIP125_RBF_SEQUENCE = 0xfffffffd; -enum RBFTransactionState { - RBF_TRANSACTIONSTATE_UNKNOWN, - RBF_TRANSACTIONSTATE_REPLACEABLE_BIP125, - RBF_TRANSACTIONSTATE_FINAL +enum class RBFTransactionState { + UNKNOWN, + REPLACEABLE_BIP125, + FINAL }; // Check whether the sequence numbers on this transaction are signaling diff --git a/src/protocol.h b/src/protocol.h index e518d11944..a07c5ea862 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -48,10 +48,10 @@ public: template <typename Stream, typename Operation> inline void SerializationOp(Stream& s, Operation ser_action) { - READWRITE(FLATDATA(pchMessageStart)); - READWRITE(FLATDATA(pchCommand)); + READWRITE(pchMessageStart); + READWRITE(pchCommand); READWRITE(nMessageSize); - READWRITE(FLATDATA(pchChecksum)); + READWRITE(pchChecksum); } char pchMessageStart[MESSAGE_START_SIZE]; diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index ab381bfb5d..f853c04617 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -34,8 +34,10 @@ #include <warnings.h> #ifdef ENABLE_WALLET +#include <wallet/init.h> #include <wallet/wallet.h> #endif +#include <walletinitinterface.h> #include <stdint.h> @@ -251,7 +253,7 @@ private: QTimer *pollShutdownTimer; #ifdef ENABLE_WALLET PaymentServer* paymentServer; - WalletModel *walletModel; + std::vector<WalletModel*> m_wallet_models; #endif int returnValue; const PlatformStyle *platformStyle; @@ -333,7 +335,7 @@ BitcoinApplication::BitcoinApplication(int &argc, char **argv): pollShutdownTimer(0), #ifdef ENABLE_WALLET paymentServer(0), - walletModel(0), + m_wallet_models(), #endif returnValue(0) { @@ -451,8 +453,10 @@ void BitcoinApplication::requestShutdown() #ifdef ENABLE_WALLET window->removeAllWallets(); - delete walletModel; - walletModel = 0; + for (WalletModel *walletModel : m_wallet_models) { + delete walletModel; + } + m_wallet_models.clear(); #endif delete clientModel; clientModel = 0; @@ -481,16 +485,20 @@ void BitcoinApplication::initializeResult(bool success) window->setClientModel(clientModel); #ifdef ENABLE_WALLET - // TODO: Expose secondary wallets - if (!vpwallets.empty()) - { - walletModel = new WalletModel(platformStyle, vpwallets[0], optionsModel); + bool fFirstWallet = true; + for (CWalletRef pwallet : vpwallets) { + WalletModel * const walletModel = new WalletModel(platformStyle, pwallet, optionsModel); - window->addWallet(BitcoinGUI::DEFAULT_WALLET, walletModel); - window->setCurrentWallet(BitcoinGUI::DEFAULT_WALLET); + window->addWallet(walletModel); + if (fFirstWallet) { + window->setCurrentWallet(walletModel->getWalletName()); + fFirstWallet = false; + } connect(walletModel, SIGNAL(coinsSent(CWallet*,SendCoinsRecipient,QByteArray)), paymentServer, SLOT(fetchPaymentACK(CWallet*,const SendCoinsRecipient&,QByteArray))); + + m_wallet_models.push_back(walletModel); } #endif @@ -617,7 +625,7 @@ int main(int argc, char *argv[]) if (!Intro::pickDataDirectory()) return EXIT_SUCCESS; - /// 6. Determine availability of data directory and parse bitcoin.conf + /// 6. Determine availability of data and blocks directory and parse bitcoin.conf /// - Do not call GetDataDir(true) before this step finishes if (!fs::is_directory(GetDataDir(false))) { @@ -671,6 +679,11 @@ int main(int argc, char *argv[]) // Start up the payment server early, too, so impatient users that click on // bitcoin: links repeatedly have their payment requests routed to this process: app.createPaymentServer(); + + // Hook up the wallet init interface + g_wallet_init_interface.reset(new WalletInit); +#else + g_wallet_init_interface.reset(new DummyWalletInit); #endif /// 9. Main GUI initialization @@ -690,7 +703,7 @@ int main(int argc, char *argv[]) // Allow parameter interaction before we create the options model app.parameterSetup(); // Load GUI settings from QSettings - app.createOptionsModel(gArgs.IsArgSet("-resetguisettings")); + app.createOptionsModel(gArgs.GetBoolArg("-resetguisettings", false)); // Subscribe to global signals from core uiInterface.InitMessage.connect(InitMessage); diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index 427eb95a84..e4207fce99 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -21,6 +21,7 @@ #ifdef ENABLE_WALLET #include <qt/walletframe.h> #include <qt/walletmodel.h> +#include <qt/walletview.h> #endif // ENABLE_WALLET #ifdef Q_OS_MAC @@ -36,6 +37,7 @@ #include <QAction> #include <QApplication> +#include <QComboBox> #include <QDateTime> #include <QDesktopWidget> #include <QDragEnterEvent> @@ -70,10 +72,6 @@ const std::string BitcoinGUI::DEFAULT_UIPLATFORM = #endif ; -/** Display name for default wallet name. Uses tilde to avoid name - * collisions in the future with additional wallets */ -const QString BitcoinGUI::DEFAULT_WALLET = "~Default"; - BitcoinGUI::BitcoinGUI(const PlatformStyle *_platformStyle, const NetworkStyle *networkStyle, QWidget *parent) : QMainWindow(parent), enableWallet(false), @@ -88,6 +86,7 @@ BitcoinGUI::BitcoinGUI(const PlatformStyle *_platformStyle, const NetworkStyle * progressBar(0), progressDialog(0), appMenuBar(0), + appToolBar(0), overviewAction(0), historyAction(0), quitAction(0), @@ -455,6 +454,7 @@ void BitcoinGUI::createToolBars() if(walletFrame) { QToolBar *toolbar = addToolBar(tr("Tabs toolbar")); + appToolBar = toolbar; toolbar->setContextMenuPolicy(Qt::PreventContextMenu); toolbar->setMovable(false); toolbar->setToolButtonStyle(Qt::ToolButtonTextBesideIcon); @@ -463,6 +463,15 @@ void BitcoinGUI::createToolBars() toolbar->addAction(receiveCoinsAction); toolbar->addAction(historyAction); overviewAction->setChecked(true); + +#ifdef ENABLE_WALLET + QWidget *spacer = new QWidget(); + spacer->setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Expanding); + toolbar->addWidget(spacer); + + m_wallet_selector = new QComboBox(); + connect(m_wallet_selector, SIGNAL(currentIndexChanged(const QString&)), this, SLOT(setCurrentWallet(const QString&))); +#endif } } @@ -529,12 +538,22 @@ void BitcoinGUI::setClientModel(ClientModel *_clientModel) } #ifdef ENABLE_WALLET -bool BitcoinGUI::addWallet(const QString& name, WalletModel *walletModel) +bool BitcoinGUI::addWallet(WalletModel *walletModel) { if(!walletFrame) return false; + const QString name = walletModel->getWalletName(); setWalletActionsEnabled(true); - return walletFrame->addWallet(name, walletModel); + m_wallet_selector->addItem(name); + if (m_wallet_selector->count() == 2) { + m_wallet_selector_label = new QLabel(); + m_wallet_selector_label->setText(tr("Wallet:") + " "); + m_wallet_selector_label->setBuddy(m_wallet_selector); + appToolBar->addWidget(m_wallet_selector_label); + appToolBar->addWidget(m_wallet_selector); + } + rpcConsole->addWallet(walletModel); + return walletFrame->addWallet(walletModel); } bool BitcoinGUI::setCurrentWallet(const QString& name) @@ -780,7 +799,7 @@ void BitcoinGUI::setNumBlocks(int count, const QDateTime& blockDate, double nVer // Acquire current block source enum BlockSource blockSource = clientModel->getBlockSource(); switch (blockSource) { - case BLOCK_SOURCE_NETWORK: + case BlockSource::NETWORK: if (header) { updateHeadersSyncProgressLabel(); return; @@ -788,17 +807,17 @@ void BitcoinGUI::setNumBlocks(int count, const QDateTime& blockDate, double nVer progressBarLabel->setText(tr("Synchronizing with network...")); updateHeadersSyncProgressLabel(); break; - case BLOCK_SOURCE_DISK: + case BlockSource::DISK: if (header) { progressBarLabel->setText(tr("Indexing blocks on disk...")); } else { progressBarLabel->setText(tr("Processing blocks on disk...")); } break; - case BLOCK_SOURCE_REINDEX: + case BlockSource::REINDEX: progressBarLabel->setText(tr("Reindexing blocks on disk...")); break; - case BLOCK_SOURCE_NONE: + case BlockSource::NONE: if (header) { return; } @@ -983,12 +1002,15 @@ void BitcoinGUI::showEvent(QShowEvent *event) } #ifdef ENABLE_WALLET -void BitcoinGUI::incomingTransaction(const QString& date, int unit, const CAmount& amount, const QString& type, const QString& address, const QString& label) +void BitcoinGUI::incomingTransaction(const QString& date, int unit, const CAmount& amount, const QString& type, const QString& address, const QString& label, const QString& walletName) { // On new transaction, make an info balloon QString msg = tr("Date: %1\n").arg(date) + - tr("Amount: %1\n").arg(BitcoinUnits::formatWithUnit(unit, amount, true)) + - tr("Type: %1\n").arg(type); + tr("Amount: %1\n").arg(BitcoinUnits::formatWithUnit(unit, amount, true)); + if (WalletModel::isMultiwallet() && !walletName.isEmpty()) { + msg += tr("Wallet: %1\n").arg(walletName); + } + msg += tr("Type: %1\n").arg(type); if (!label.isEmpty()) msg += tr("Label: %1\n").arg(label); else if (!address.isEmpty()) @@ -1079,6 +1101,20 @@ void BitcoinGUI::setEncryptionStatus(int status) break; } } + +void BitcoinGUI::updateWalletStatus() +{ + if (!walletFrame) { + return; + } + WalletView * const walletView = walletFrame->currentWalletView(); + if (!walletView) { + return; + } + WalletModel * const walletModel = walletView->getWalletModel(); + setEncryptionStatus(walletModel->getEncryptionStatus()); + setHDStatus(walletModel->hdEnabled()); +} #endif // ENABLE_WALLET void BitcoinGUI::showNormalIfMinimized(bool fToggleHidden) diff --git a/src/qt/bitcoingui.h b/src/qt/bitcoingui.h index ddb7ecb76a..b9e92f2d5b 100644 --- a/src/qt/bitcoingui.h +++ b/src/qt/bitcoingui.h @@ -33,6 +33,7 @@ class ModalOverlay; QT_BEGIN_NAMESPACE class QAction; +class QComboBox; class QProgressBar; class QProgressDialog; QT_END_NAMESPACE @@ -46,7 +47,6 @@ class BitcoinGUI : public QMainWindow Q_OBJECT public: - static const QString DEFAULT_WALLET; static const std::string DEFAULT_UIPLATFORM; explicit BitcoinGUI(const PlatformStyle *platformStyle, const NetworkStyle *networkStyle, QWidget *parent = 0); @@ -62,8 +62,7 @@ public: The wallet model represents a bitcoin wallet, and offers access to the list of transactions, address book and sending functionality. */ - bool addWallet(const QString& name, WalletModel *walletModel); - bool setCurrentWallet(const QString& name); + bool addWallet(WalletModel *walletModel); void removeAllWallets(); #endif // ENABLE_WALLET bool enableWallet; @@ -90,6 +89,7 @@ private: QProgressDialog *progressDialog; QMenuBar *appMenuBar; + QToolBar *appToolBar; QAction *overviewAction; QAction *historyAction; QAction *quitAction; @@ -112,6 +112,9 @@ private: QAction *openAction; QAction *showHelpMessageAction; + QLabel *m_wallet_selector_label; + QComboBox *m_wallet_selector; + QSystemTrayIcon *trayIcon; QMenu *trayIconMenu; Notificator *notificator; @@ -171,6 +174,12 @@ public Q_SLOTS: void message(const QString &title, const QString &message, unsigned int style, bool *ret = nullptr); #ifdef ENABLE_WALLET + bool setCurrentWallet(const QString& name); + /** Set the UI status indicators based on the currently selected wallet. + */ + void updateWalletStatus(); + +private: /** Set the encryption status as shown in the UI. @param[in] status current encryption status @see WalletModel::EncryptionStatus @@ -183,10 +192,11 @@ public Q_SLOTS: */ void setHDStatus(int hdEnabled); +public Q_SLOTS: bool handlePaymentRequest(const SendCoinsRecipient& recipient); /** Show incoming transaction notification for new transactions. */ - void incomingTransaction(const QString& date, int unit, const CAmount& amount, const QString& type, const QString& address, const QString& label); + void incomingTransaction(const QString& date, int unit, const CAmount& amount, const QString& type, const QString& address, const QString& label, const QString& walletName); #endif // ENABLE_WALLET private Q_SLOTS: diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp index eaf2896bc3..40661d9ec3 100644 --- a/src/qt/clientmodel.cpp +++ b/src/qt/clientmodel.cpp @@ -177,13 +177,13 @@ bool ClientModel::inInitialBlockDownload() const enum BlockSource ClientModel::getBlockSource() const { if (fReindex) - return BLOCK_SOURCE_REINDEX; + return BlockSource::REINDEX; else if (fImporting) - return BLOCK_SOURCE_DISK; + return BlockSource::DISK; else if (getNumConnections() > 0) - return BLOCK_SOURCE_NETWORK; + return BlockSource::NETWORK; - return BLOCK_SOURCE_NONE; + return BlockSource::NONE; } void ClientModel::setNetworkActive(bool active) diff --git a/src/qt/clientmodel.h b/src/qt/clientmodel.h index 99ec2365a9..1118bc31b3 100644 --- a/src/qt/clientmodel.h +++ b/src/qt/clientmodel.h @@ -20,11 +20,11 @@ QT_BEGIN_NAMESPACE class QTimer; QT_END_NAMESPACE -enum BlockSource { - BLOCK_SOURCE_NONE, - BLOCK_SOURCE_REINDEX, - BLOCK_SOURCE_DISK, - BLOCK_SOURCE_NETWORK +enum class BlockSource { + NONE, + REINDEX, + DISK, + NETWORK }; enum NumConnections { diff --git a/src/qt/forms/debugwindow.ui b/src/qt/forms/debugwindow.ui index bba822882e..695ed61228 100644 --- a/src/qt/forms/debugwindow.ui +++ b/src/qt/forms/debugwindow.ui @@ -413,6 +413,22 @@ <number>4</number> </property> <item> + <widget class="QLabel" name="WalletSelectorLabel"> + <property name="text"> + <string>Wallet: </string> + </property> + </widget> + </item> + <item> + <widget class="QComboBox" name="WalletSelector"> + <item> + <property name="text"> + <string>(none)</string> + </property> + </item> + </widget> + </item> + <item> <spacer name="horizontalSpacer"> <property name="orientation"> <enum>Qt::Horizontal</enum> diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp index 16ef27a36a..7b653a99da 100644 --- a/src/qt/guiutil.cpp +++ b/src/qt/guiutil.cpp @@ -137,15 +137,6 @@ void setupAddressWidget(QValidatedLineEdit *widget, QWidget *parent) widget->setCheckValidator(new BitcoinAddressCheckValidator(parent)); } -void setupAmountWidget(QLineEdit *widget, QWidget *parent) -{ - QDoubleValidator *amountValidator = new QDoubleValidator(parent); - amountValidator->setDecimals(8); - amountValidator->setBottom(0.0); - widget->setValidator(amountValidator); - widget->setAlignment(Qt::AlignRight|Qt::AlignVCenter); -} - bool parseBitcoinURI(const QUrl &uri, SendCoinsRecipient *out) { // return if URI is not valid or is no bitcoin: URI diff --git a/src/qt/guiutil.h b/src/qt/guiutil.h index 4b856986da..bbbeaf2c43 100644 --- a/src/qt/guiutil.h +++ b/src/qt/guiutil.h @@ -40,9 +40,8 @@ namespace GUIUtil // Return a monospace font QFont fixedPitchFont(); - // Set up widgets for address and amounts + // Set up widget for address void setupAddressWidget(QValidatedLineEdit *widget, QWidget *parent); - void setupAmountWidget(QLineEdit *widget, QWidget *parent); // Parse "bitcoin:" URI into recipient object, return true on successful parsing bool parseBitcoinURI(const QUrl &uri, SendCoinsRecipient *out); diff --git a/src/qt/paymentserver.cpp b/src/qt/paymentserver.cpp index 05d3d36a74..65ef250440 100644 --- a/src/qt/paymentserver.cpp +++ b/src/qt/paymentserver.cpp @@ -639,8 +639,6 @@ void PaymentServer::fetchPaymentACK(CWallet* wallet, const SendCoinsRecipient& r payment.add_transactions(transaction.data(), transaction.size()); // Create a new refund address, or re-use: - QString account = tr("Refund from %1").arg(recipient.authenticatedMerchant); - std::string strAccount = account.toStdString(); CPubKey newKey; if (wallet->GetKeyFromPool(newKey)) { // BIP70 requests encode the scriptPubKey directly, so we are not restricted to address @@ -651,7 +649,8 @@ void PaymentServer::fetchPaymentACK(CWallet* wallet, const SendCoinsRecipient& r const OutputType change_type = wallet->m_default_change_type != OutputType::NONE ? wallet->m_default_change_type : wallet->m_default_address_type; wallet->LearnRelatedScripts(newKey, change_type); CTxDestination dest = GetDestinationForKey(newKey, change_type); - wallet->SetAddressBook(dest, strAccount, "refund"); + std::string label = tr("Refund from %1").arg(recipient.authenticatedMerchant).toStdString(); + wallet->SetAddressBook(dest, label, "refund"); CScript s = GetScriptForDestination(dest); payments::Output* refund_to = payment.add_refund_to(); diff --git a/src/qt/receivecoinsdialog.cpp b/src/qt/receivecoinsdialog.cpp index 132fb54d66..c8b6366db0 100644 --- a/src/qt/receivecoinsdialog.cpp +++ b/src/qt/receivecoinsdialog.cpp @@ -153,7 +153,7 @@ void ReceiveCoinsDialog::on_receiveButton_clicked() ui->reqAmount->value(), ui->reqMessage->text()); ReceiveRequestDialog *dialog = new ReceiveRequestDialog(this); dialog->setAttribute(Qt::WA_DeleteOnClose); - dialog->setModel(model->getOptionsModel()); + dialog->setModel(model); dialog->setInfo(info); dialog->show(); clear(); @@ -166,7 +166,7 @@ void ReceiveCoinsDialog::on_recentRequestsView_doubleClicked(const QModelIndex & { const RecentRequestsTableModel *submodel = model->getRecentRequestsTableModel(); ReceiveRequestDialog *dialog = new ReceiveRequestDialog(this); - dialog->setModel(model->getOptionsModel()); + dialog->setModel(model); dialog->setInfo(submodel->entry(index.row()).recipient); dialog->setAttribute(Qt::WA_DeleteOnClose); dialog->show(); diff --git a/src/qt/receiverequestdialog.cpp b/src/qt/receiverequestdialog.cpp index d4cb0e5ba2..75146e2214 100644 --- a/src/qt/receiverequestdialog.cpp +++ b/src/qt/receiverequestdialog.cpp @@ -108,12 +108,12 @@ ReceiveRequestDialog::~ReceiveRequestDialog() delete ui; } -void ReceiveRequestDialog::setModel(OptionsModel *_model) +void ReceiveRequestDialog::setModel(WalletModel *_model) { this->model = _model; if (_model) - connect(_model, SIGNAL(displayUnitChanged(int)), this, SLOT(update())); + connect(_model->getOptionsModel(), SIGNAL(displayUnitChanged(int)), this, SLOT(update())); // update the display unit if necessary update(); @@ -143,11 +143,14 @@ void ReceiveRequestDialog::update() html += "<a href=\""+uri+"\">" + GUIUtil::HtmlEscape(uri) + "</a><br>"; html += "<b>"+tr("Address")+"</b>: " + GUIUtil::HtmlEscape(info.address) + "<br>"; if(info.amount) - html += "<b>"+tr("Amount")+"</b>: " + BitcoinUnits::formatHtmlWithUnit(model->getDisplayUnit(), info.amount) + "<br>"; + html += "<b>"+tr("Amount")+"</b>: " + BitcoinUnits::formatHtmlWithUnit(model->getOptionsModel()->getDisplayUnit(), info.amount) + "<br>"; if(!info.label.isEmpty()) html += "<b>"+tr("Label")+"</b>: " + GUIUtil::HtmlEscape(info.label) + "<br>"; if(!info.message.isEmpty()) html += "<b>"+tr("Message")+"</b>: " + GUIUtil::HtmlEscape(info.message) + "<br>"; + if(model->isMultiwallet()) { + html += "<b>"+tr("Wallet")+"</b>: " + GUIUtil::HtmlEscape(model->getWalletName()) + "<br>"; + } ui->outUri->setText(html); #ifdef USE_QRCODE diff --git a/src/qt/receiverequestdialog.h b/src/qt/receiverequestdialog.h index 21bbf1edb7..23c5529535 100644 --- a/src/qt/receiverequestdialog.h +++ b/src/qt/receiverequestdialog.h @@ -12,8 +12,6 @@ #include <QLabel> #include <QPainter> -class OptionsModel; - namespace Ui { class ReceiveRequestDialog; } @@ -53,7 +51,7 @@ public: explicit ReceiveRequestDialog(QWidget *parent = 0); ~ReceiveRequestDialog(); - void setModel(OptionsModel *model); + void setModel(WalletModel *model); void setInfo(const SendCoinsRecipient &info); private Q_SLOTS: @@ -64,7 +62,7 @@ private Q_SLOTS: private: Ui::ReceiveRequestDialog *ui; - OptionsModel *model; + WalletModel *model; SendCoinsRecipient info; }; diff --git a/src/qt/recentrequeststablemodel.cpp b/src/qt/recentrequeststablemodel.cpp index 0dd7d46960..f045053c3b 100644 --- a/src/qt/recentrequeststablemodel.cpp +++ b/src/qt/recentrequeststablemodel.cpp @@ -139,10 +139,9 @@ bool RecentRequestsTableModel::removeRows(int row, int count, const QModelIndex if(count > 0 && row >= 0 && (row+count) <= list.size()) { - const RecentRequestEntry *rec; for (int i = 0; i < count; ++i) { - rec = &list[row+i]; + const RecentRequestEntry* rec = &list[row+i]; if (!walletModel->saveReceiveRequest(rec->recipient.address.toStdString(), rec->id, "")) return false; } diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index 1aa4de03ca..c41e19f6f5 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -12,6 +12,7 @@ #include <qt/bantablemodel.h> #include <qt/clientmodel.h> #include <qt/platformstyle.h> +#include <qt/walletmodel.h> #include <chainparams.h> #include <netbase.h> #include <rpc/server.h> @@ -84,7 +85,7 @@ class RPCExecutor : public QObject Q_OBJECT public Q_SLOTS: - void request(const QString &command); + void request(const QString &command, const QString &walletID); Q_SIGNALS: void reply(int category, const QString &command); @@ -145,7 +146,7 @@ public: * @param[out] pstrFilteredOut Command line, filtered to remove any sensitive data */ -bool RPCConsole::RPCParseCommandLine(std::string &strResult, const std::string &strCommand, const bool fExecute, std::string * const pstrFilteredOut) +bool RPCConsole::RPCParseCommandLine(std::string &strResult, const std::string &strCommand, const bool fExecute, std::string * const pstrFilteredOut, const std::string *walletID) { std::vector< std::vector<std::string> > stack; stack.push_back(std::vector<std::string>()); @@ -303,10 +304,8 @@ bool RPCConsole::RPCParseCommandLine(std::string &strResult, const std::string & req.params = RPCConvertValues(stack.back()[0], std::vector<std::string>(stack.back().begin() + 1, stack.back().end())); req.strMethod = stack.back()[0]; #ifdef ENABLE_WALLET - // TODO: Move this logic to WalletModel - if (!vpwallets.empty()) { - // in Qt, use always the wallet with index 0 when running with multiple wallets - QByteArray encodedName = QUrl::toPercentEncoding(QString::fromStdString(vpwallets[0]->GetName())); + if (walletID && !walletID->empty()) { + QByteArray encodedName = QUrl::toPercentEncoding(QString::fromStdString(*walletID)); req.URI = "/wallet/"+std::string(encodedName.constData(), encodedName.length()); } #endif @@ -385,7 +384,7 @@ bool RPCConsole::RPCParseCommandLine(std::string &strResult, const std::string & } } -void RPCExecutor::request(const QString &command) +void RPCExecutor::request(const QString &command, const QString &walletID) { try { @@ -416,7 +415,8 @@ void RPCExecutor::request(const QString &command) " example: getblock(getblockhash(0),true)[tx][0]\n\n"))); return; } - if(!RPCConsole::RPCExecuteCommandLine(result, executableCommand)) + std::string wallet_id = walletID.toStdString(); + if(!RPCConsole::RPCExecuteCommandLine(result, executableCommand, nullptr, &wallet_id)) { Q_EMIT reply(RPCConsole::CMD_ERROR, QString("Parse error: unbalanced ' or \"")); return; @@ -478,6 +478,10 @@ RPCConsole::RPCConsole(const PlatformStyle *_platformStyle, QWidget *parent) : connect(ui->fontSmallerButton, SIGNAL(clicked()), this, SLOT(fontSmaller())); connect(ui->btnClearTrafficGraph, SIGNAL(clicked()), ui->trafficGraph, SLOT(clear())); + // disable the wallet selector by default + ui->WalletSelector->setVisible(false); + ui->WalletSelectorLabel->setVisible(false); + // set library version labels #ifdef ENABLE_WALLET ui->berkeleyDBVersion->setText(DbEnv::version(0, 0, 0)); @@ -687,6 +691,23 @@ void RPCConsole::setClientModel(ClientModel *model) } } +#ifdef ENABLE_WALLET +void RPCConsole::addWallet(WalletModel * const walletModel) +{ + const QString name = walletModel->getWalletName(); + // use name for text and internal data object (to allow to move to a wallet id later) + ui->WalletSelector->addItem(name, name); + if (ui->WalletSelector->count() == 2 && !isVisible()) { + // First wallet added, set to default so long as the window isn't presently visible (and potentially in use) + ui->WalletSelector->setCurrentIndex(1); + } + if (ui->WalletSelector->count() > 2) { + ui->WalletSelector->setVisible(true); + ui->WalletSelectorLabel->setVisible(true); + } +} +#endif + static QString categoryClass(int category) { switch(category) @@ -874,8 +895,25 @@ void RPCConsole::on_lineEdit_returnPressed() cmdBeforeBrowsing = QString(); + QString walletID; +#ifdef ENABLE_WALLET + const int wallet_index = ui->WalletSelector->currentIndex(); + if (wallet_index > 0) { + walletID = (QString)ui->WalletSelector->itemData(wallet_index).value<QString>(); + } + + if (m_last_wallet_id != walletID) { + if (walletID.isEmpty()) { + message(CMD_REQUEST, tr("Executing command without any wallet")); + } else { + message(CMD_REQUEST, tr("Executing command using \"%1\" wallet").arg(walletID)); + } + m_last_wallet_id = walletID; + } +#endif + message(CMD_REQUEST, QString::fromStdString(strFilteredCmd)); - Q_EMIT cmdRequest(cmd); + Q_EMIT cmdRequest(cmd, walletID); cmd = QString::fromStdString(strFilteredCmd); @@ -923,7 +961,7 @@ void RPCConsole::startExecutor() // Replies from executor object must go to this object connect(executor, SIGNAL(reply(int,QString)), this, SLOT(message(int,QString))); // Requests from this object must go to executor - connect(this, SIGNAL(cmdRequest(QString)), executor, SLOT(request(QString))); + connect(this, SIGNAL(cmdRequest(QString, QString)), executor, SLOT(request(QString, QString))); // On stopExecutor signal // - quit the Qt event loop in the execution thread diff --git a/src/qt/rpcconsole.h b/src/qt/rpcconsole.h index c41cbb0933..c97260b2c3 100644 --- a/src/qt/rpcconsole.h +++ b/src/qt/rpcconsole.h @@ -17,6 +17,7 @@ class ClientModel; class PlatformStyle; class RPCTimerInterface; +class WalletModel; namespace Ui { class RPCConsole; @@ -36,12 +37,13 @@ public: explicit RPCConsole(const PlatformStyle *platformStyle, QWidget *parent); ~RPCConsole(); - static bool RPCParseCommandLine(std::string &strResult, const std::string &strCommand, bool fExecute, std::string * const pstrFilteredOut = nullptr); - static bool RPCExecuteCommandLine(std::string &strResult, const std::string &strCommand, std::string * const pstrFilteredOut = nullptr) { - return RPCParseCommandLine(strResult, strCommand, true, pstrFilteredOut); + static bool RPCParseCommandLine(std::string &strResult, const std::string &strCommand, bool fExecute, std::string * const pstrFilteredOut = nullptr, const std::string *walletID = nullptr); + static bool RPCExecuteCommandLine(std::string &strResult, const std::string &strCommand, std::string * const pstrFilteredOut = nullptr, const std::string *walletID = nullptr) { + return RPCParseCommandLine(strResult, strCommand, true, pstrFilteredOut, walletID); } void setClientModel(ClientModel *model); + void addWallet(WalletModel * const walletModel); enum MessageClass { MC_ERROR, @@ -120,7 +122,7 @@ public Q_SLOTS: Q_SIGNALS: // For RPC command executor void stopExecutor(); - void cmdRequest(const QString &command); + void cmdRequest(const QString &command, const QString &walletID); private: void startExecutor(); @@ -151,6 +153,7 @@ private: int consoleFontSize; QCompleter *autoCompleter; QThread thread; + QString m_last_wallet_id; /** Update UI with latest network info from model. */ void updateNetworkState(); diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp index ec7edd48cd..8a52aadbb0 100644 --- a/src/qt/sendcoinsdialog.cpp +++ b/src/qt/sendcoinsdialog.cpp @@ -277,8 +277,11 @@ void SendCoinsDialog::on_sendButton_clicked() QStringList formatted; for (const SendCoinsRecipient &rcp : currentTransaction.getRecipients()) { - // generate bold amount string + // generate bold amount string with wallet name in case of multiwallet QString amount = "<b>" + BitcoinUnits::formatHtmlWithUnit(model->getOptionsModel()->getDisplayUnit(), rcp.amount); + if (model->isMultiwallet()) { + amount.append(" <u>"+tr("from wallet %1").arg(GUIUtil::HtmlEscape(model->getWalletName()))+"</u> "); + } amount.append("</b>"); // generate monospace address string QString address = "<span style='font-family: monospace;'>" + rcp.address; diff --git a/src/qt/utilitydialog.cpp b/src/qt/utilitydialog.cpp index 65e7775f2b..c19e6aae78 100644 --- a/src/qt/utilitydialog.cpp +++ b/src/qt/utilitydialog.cpp @@ -77,7 +77,7 @@ HelpMessageDialog::HelpMessageDialog(QWidget *parent, bool about) : cursor.insertText(header); cursor.insertBlock(); - std::string strUsage = HelpMessage(HMM_BITCOIN_QT); + std::string strUsage = HelpMessage(HelpMessageMode::BITCOIN_QT); const bool showDebug = gArgs.GetBoolArg("-help-debug", false); strUsage += HelpMessageGroup(tr("UI Options:").toStdString()); if (showDebug) { diff --git a/src/qt/walletframe.cpp b/src/qt/walletframe.cpp index c0b9d04269..5b13353d7b 100644 --- a/src/qt/walletframe.cpp +++ b/src/qt/walletframe.cpp @@ -3,6 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <qt/walletframe.h> +#include <qt/walletmodel.h> #include <qt/bitcoingui.h> #include <qt/walletview.h> @@ -39,10 +40,16 @@ void WalletFrame::setClientModel(ClientModel *_clientModel) this->clientModel = _clientModel; } -bool WalletFrame::addWallet(const QString& name, WalletModel *walletModel) +bool WalletFrame::addWallet(WalletModel *walletModel) { - if (!gui || !clientModel || !walletModel || mapWalletViews.count(name) > 0) + if (!gui || !clientModel || !walletModel) { return false; + } + + const QString name = walletModel->getWalletName(); + if (mapWalletViews.count(name) > 0) { + return false; + } WalletView *walletView = new WalletView(platformStyle, this); walletView->setBitcoinGUI(gui); diff --git a/src/qt/walletframe.h b/src/qt/walletframe.h index 42ce69fea1..6eedcf370c 100644 --- a/src/qt/walletframe.h +++ b/src/qt/walletframe.h @@ -36,7 +36,7 @@ public: void setClientModel(ClientModel *clientModel); - bool addWallet(const QString& name, WalletModel *walletModel); + bool addWallet(WalletModel *walletModel); bool setCurrentWallet(const QString& name); bool removeWallet(const QString &name); void removeAllWallets(); @@ -59,6 +59,7 @@ private: const PlatformStyle *platformStyle; +public: WalletView *currentWalletView(); public Q_SLOTS: diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp index d092ebedfb..795302be58 100644 --- a/src/qt/walletmodel.cpp +++ b/src/qt/walletmodel.cpp @@ -110,8 +110,9 @@ void WalletModel::updateStatus() { EncryptionStatus newEncryptionStatus = getEncryptionStatus(); - if(cachedEncryptionStatus != newEncryptionStatus) - Q_EMIT encryptionStatusChanged(newEncryptionStatus); + if(cachedEncryptionStatus != newEncryptionStatus) { + Q_EMIT encryptionStatusChanged(); + } } void WalletModel::pollBalanceChanged() @@ -743,3 +744,14 @@ int WalletModel::getDefaultConfirmTarget() const { return nTxConfirmTarget; } + +QString WalletModel::getWalletName() const +{ + LOCK(wallet->cs_wallet); + return QString::fromStdString(wallet->GetName()); +} + +bool WalletModel::isMultiwallet() +{ + return gArgs.GetArgs("-wallet").size() > 1; +} diff --git a/src/qt/walletmodel.h b/src/qt/walletmodel.h index 64813e0f5a..ff4b38a804 100644 --- a/src/qt/walletmodel.h +++ b/src/qt/walletmodel.h @@ -136,6 +136,8 @@ public: TransactionTableModel *getTransactionTableModel(); RecentRequestsTableModel *getRecentRequestsTableModel(); + CWallet *getWallet() const { return wallet; }; + CAmount getBalance(const CCoinControl *coinControl = nullptr) const; CAmount getUnconfirmedBalance() const; CAmount getImmatureBalance() const; @@ -225,6 +227,9 @@ public: int getDefaultConfirmTarget() const; + QString getWalletName() const; + + static bool isMultiwallet(); private: CWallet *wallet; bool fHaveWatchOnly; @@ -260,7 +265,7 @@ Q_SIGNALS: const CAmount& watchOnlyBalance, const CAmount& watchUnconfBalance, const CAmount& watchImmatureBalance); // Encryption status of wallet changed - void encryptionStatusChanged(int status); + void encryptionStatusChanged(); // Signal emitted when wallet needs to be unlocked // It is valid behaviour for listeners to keep the wallet locked after this signal; diff --git a/src/qt/walletview.cpp b/src/qt/walletview.cpp index 64497a3431..cc4300a7a1 100644 --- a/src/qt/walletview.cpp +++ b/src/qt/walletview.cpp @@ -101,13 +101,13 @@ void WalletView::setBitcoinGUI(BitcoinGUI *gui) connect(this, SIGNAL(message(QString,QString,unsigned int)), gui, SLOT(message(QString,QString,unsigned int))); // Pass through encryption status changed signals - connect(this, SIGNAL(encryptionStatusChanged(int)), gui, SLOT(setEncryptionStatus(int))); + connect(this, SIGNAL(encryptionStatusChanged()), gui, SLOT(updateWalletStatus())); // Pass through transaction notifications - connect(this, SIGNAL(incomingTransaction(QString,int,CAmount,QString,QString,QString)), gui, SLOT(incomingTransaction(QString,int,CAmount,QString,QString,QString))); + connect(this, SIGNAL(incomingTransaction(QString,int,CAmount,QString,QString,QString,QString)), gui, SLOT(incomingTransaction(QString,int,CAmount,QString,QString,QString,QString))); // Connect HD enabled state signal - connect(this, SIGNAL(hdEnabledStatusChanged(int)), gui, SLOT(setHDStatus(int))); + connect(this, SIGNAL(hdEnabledStatusChanged()), gui, SLOT(updateWalletStatus())); } } @@ -137,11 +137,11 @@ void WalletView::setWalletModel(WalletModel *_walletModel) connect(_walletModel, SIGNAL(message(QString,QString,unsigned int)), this, SIGNAL(message(QString,QString,unsigned int))); // Handle changes in encryption status - connect(_walletModel, SIGNAL(encryptionStatusChanged(int)), this, SIGNAL(encryptionStatusChanged(int))); + connect(_walletModel, SIGNAL(encryptionStatusChanged()), this, SIGNAL(encryptionStatusChanged())); updateEncryptionStatus(); // update HD status - Q_EMIT hdEnabledStatusChanged(_walletModel->hdEnabled()); + Q_EMIT hdEnabledStatusChanged(); // Balloon pop-up for new transaction connect(_walletModel->getTransactionTableModel(), SIGNAL(rowsInserted(QModelIndex,int,int)), @@ -172,7 +172,7 @@ void WalletView::processNewTransaction(const QModelIndex& parent, int start, int QString address = ttm->data(index, TransactionTableModel::AddressRole).toString(); QString label = ttm->data(index, TransactionTableModel::LabelRole).toString(); - Q_EMIT incomingTransaction(date, walletModel->getOptionsModel()->getDisplayUnit(), amount, type, address, label); + Q_EMIT incomingTransaction(date, walletModel->getOptionsModel()->getDisplayUnit(), amount, type, address, label, walletModel->getWalletName()); } void WalletView::gotoOverviewPage() @@ -234,7 +234,7 @@ void WalletView::showOutOfSyncWarning(bool fShow) void WalletView::updateEncryptionStatus() { - Q_EMIT encryptionStatusChanged(walletModel->getEncryptionStatus()); + Q_EMIT encryptionStatusChanged(); } void WalletView::encryptWallet(bool status) diff --git a/src/qt/walletview.h b/src/qt/walletview.h index 30d68e4eff..878a5966d6 100644 --- a/src/qt/walletview.h +++ b/src/qt/walletview.h @@ -44,6 +44,7 @@ public: The client model represents the part of the core that communicates with the P2P network, and is wallet-agnostic. */ void setClientModel(ClientModel *clientModel); + WalletModel *getWalletModel() { return walletModel; } /** Set the wallet model. The wallet model represents a bitcoin wallet, and offers access to the list of transactions, address book and sending functionality. @@ -119,11 +120,11 @@ Q_SIGNALS: /** Fired when a message should be reported to the user */ void message(const QString &title, const QString &message, unsigned int style); /** Encryption status of wallet changed */ - void encryptionStatusChanged(int status); + void encryptionStatusChanged(); /** HD-Enabled status of wallet changed (only possible during startup) */ - void hdEnabledStatusChanged(int hdEnabled); + void hdEnabledStatusChanged(); /** Notify that a new transaction appeared */ - void incomingTransaction(const QString& date, int unit, const CAmount& amount, const QString& type, const QString& address, const QString& label); + void incomingTransaction(const QString& date, int unit, const CAmount& amount, const QString& type, const QString& address, const QString& label, const QString& walletName); /** Notify that the out of sync warning icon has been pressed */ void outOfSyncWarningClicked(); }; diff --git a/src/random.h b/src/random.h index 5fe8f2fd04..1d6b13a537 100644 --- a/src/random.h +++ b/src/random.h @@ -11,6 +11,7 @@ #include <uint256.h> #include <stdint.h> +#include <limits> /* Seed OpenSSL PRNG with additional entropy data */ void RandAddSeed(); @@ -121,6 +122,12 @@ public: /** Generate a random boolean. */ bool randbool() { return randbits(1); } + + // Compatibility with the C++11 UniformRandomBitGenerator concept + typedef uint64_t result_type; + static constexpr uint64_t min() { return 0; } + static constexpr uint64_t max() { return std::numeric_limits<uint64_t>::max(); } + inline uint64_t operator()() { return rand64(); } }; /* Number of random bytes returned by GetOSRand. diff --git a/src/rest.cpp b/src/rest.cpp index f47b40343b..5871b554a6 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -24,21 +24,21 @@ static const size_t MAX_GETUTXOS_OUTPOINTS = 15; //allow a max of 15 outpoints to be queried at once -enum RetFormat { - RF_UNDEF, - RF_BINARY, - RF_HEX, - RF_JSON, +enum class RetFormat { + UNDEF, + BINARY, + HEX, + JSON, }; static const struct { enum RetFormat rf; const char* name; } rf_names[] = { - {RF_UNDEF, ""}, - {RF_BINARY, "bin"}, - {RF_HEX, "hex"}, - {RF_JSON, "json"}, + {RetFormat::UNDEF, ""}, + {RetFormat::BINARY, "bin"}, + {RetFormat::HEX, "hex"}, + {RetFormat::JSON, "json"}, }; struct CCoin { @@ -162,20 +162,20 @@ static bool rest_headers(HTTPRequest* req, } switch (rf) { - case RF_BINARY: { + case RetFormat::BINARY: { std::string binaryHeader = ssHeader.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryHeader); return true; } - case RF_HEX: { + case RetFormat::HEX: { std::string strHex = HexStr(ssHeader.begin(), ssHeader.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } - case RF_JSON: { + case RetFormat::JSON: { UniValue jsonHeaders(UniValue::VARR); { LOCK(cs_main); @@ -227,21 +227,21 @@ static bool rest_block(HTTPRequest* req, ssBlock << block; switch (rf) { - case RF_BINARY: { + case RetFormat::BINARY: { std::string binaryBlock = ssBlock.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryBlock); return true; } - case RF_HEX: { + case RetFormat::HEX: { std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } - case RF_JSON: { + case RetFormat::JSON: { UniValue objBlock; { LOCK(cs_main); @@ -280,7 +280,7 @@ static bool rest_chaininfo(HTTPRequest* req, const std::string& strURIPart) const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { - case RF_JSON: { + case RetFormat::JSON: { JSONRPCRequest jsonRequest; jsonRequest.params = UniValue(UniValue::VARR); UniValue chainInfoObject = getblockchaininfo(jsonRequest); @@ -303,7 +303,7 @@ static bool rest_mempool_info(HTTPRequest* req, const std::string& strURIPart) const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { - case RF_JSON: { + case RetFormat::JSON: { UniValue mempoolInfoObject = mempoolInfoToJSON(); std::string strJSON = mempoolInfoObject.write() + "\n"; @@ -325,7 +325,7 @@ static bool rest_mempool_contents(HTTPRequest* req, const std::string& strURIPar const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { - case RF_JSON: { + case RetFormat::JSON: { UniValue mempoolObject = mempoolToJSON(true); std::string strJSON = mempoolObject.write() + "\n"; @@ -359,21 +359,21 @@ static bool rest_tx(HTTPRequest* req, const std::string& strURIPart) ssTx << tx; switch (rf) { - case RF_BINARY: { + case RetFormat::BINARY: { std::string binaryTx = ssTx.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryTx); return true; } - case RF_HEX: { + case RetFormat::HEX: { std::string strHex = HexStr(ssTx.begin(), ssTx.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } - case RF_JSON: { + case RetFormat::JSON: { UniValue objTx(UniValue::VOBJ); TxToUniv(*tx, hashBlock, objTx); std::string strJSON = objTx.write() + "\n"; @@ -440,13 +440,13 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart) } switch (rf) { - case RF_HEX: { + case RetFormat::HEX: { // convert hex to bin, continue then with bin part std::vector<unsigned char> strRequestV = ParseHex(strRequestMutable); strRequestMutable.assign(strRequestV.begin(), strRequestV.end()); } - case RF_BINARY: { + case RetFormat::BINARY: { try { //deserialize only if user sent a request if (strRequestMutable.size() > 0) @@ -466,7 +466,7 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart) break; } - case RF_JSON: { + case RetFormat::JSON: { if (!fInputParsed) return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request"); break; @@ -487,33 +487,35 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart) std::vector<bool> hits; bitmap.resize((vOutPoints.size() + 7) / 8); { - LOCK2(cs_main, mempool.cs); - - CCoinsView viewDummy; - CCoinsViewCache view(&viewDummy); - - CCoinsViewCache& viewChain = *pcoinsTip; - CCoinsViewMemPool viewMempool(&viewChain, mempool); - - if (fCheckMemPool) - view.SetBackend(viewMempool); // switch cache backend to db+mempool in case user likes to query mempool - - for (size_t i = 0; i < vOutPoints.size(); i++) { - bool hit = false; - Coin coin; - if (view.GetCoin(vOutPoints[i], coin) && !mempool.isSpent(vOutPoints[i])) { - hit = true; - outs.emplace_back(std::move(coin)); + auto process_utxos = [&vOutPoints, &outs, &hits](const CCoinsView& view, const CTxMemPool& mempool) { + for (const COutPoint& vOutPoint : vOutPoints) { + Coin coin; + bool hit = !mempool.isSpent(vOutPoint) && view.GetCoin(vOutPoint, coin); + hits.push_back(hit); + if (hit) outs.emplace_back(std::move(coin)); } + }; + + if (fCheckMemPool) { + // use db+mempool as cache backend in case user likes to query mempool + LOCK2(cs_main, mempool.cs); + CCoinsViewCache& viewChain = *pcoinsTip; + CCoinsViewMemPool viewMempool(&viewChain, mempool); + process_utxos(viewMempool, mempool); + } else { + LOCK(cs_main); // no need to lock mempool! + process_utxos(*pcoinsTip, CTxMemPool()); + } - hits.push_back(hit); + for (size_t i = 0; i < hits.size(); ++i) { + const bool hit = hits[i]; bitmapStringRepresentation.append(hit ? "1" : "0"); // form a binary string representation (human-readable for json output) bitmap[i / 8] |= ((uint8_t)hit) << (i % 8); } } switch (rf) { - case RF_BINARY: { + case RetFormat::BINARY: { // serialize data // use exact same output as mentioned in Bip64 CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION); @@ -525,7 +527,7 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart) return true; } - case RF_HEX: { + case RetFormat::HEX: { CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION); ssGetUTXOResponse << chainActive.Height() << chainActive.Tip()->GetBlockHash() << bitmap << outs; std::string strHex = HexStr(ssGetUTXOResponse.begin(), ssGetUTXOResponse.end()) + "\n"; @@ -535,7 +537,7 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart) return true; } - case RF_JSON: { + case RetFormat::JSON: { UniValue objGetUTXOResponse(UniValue::VOBJ); // pack in some essentials diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 169caddc59..31cbec4c86 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1120,20 +1120,20 @@ static UniValue BIP9SoftForkDesc(const Consensus::Params& consensusParams, Conse UniValue rv(UniValue::VOBJ); const ThresholdState thresholdState = VersionBitsTipState(consensusParams, id); switch (thresholdState) { - case THRESHOLD_DEFINED: rv.pushKV("status", "defined"); break; - case THRESHOLD_STARTED: rv.pushKV("status", "started"); break; - case THRESHOLD_LOCKED_IN: rv.pushKV("status", "locked_in"); break; - case THRESHOLD_ACTIVE: rv.pushKV("status", "active"); break; - case THRESHOLD_FAILED: rv.pushKV("status", "failed"); break; + case ThresholdState::DEFINED: rv.pushKV("status", "defined"); break; + case ThresholdState::STARTED: rv.pushKV("status", "started"); break; + case ThresholdState::LOCKED_IN: rv.pushKV("status", "locked_in"); break; + case ThresholdState::ACTIVE: rv.pushKV("status", "active"); break; + case ThresholdState::FAILED: rv.pushKV("status", "failed"); break; } - if (THRESHOLD_STARTED == thresholdState) + if (ThresholdState::STARTED == thresholdState) { rv.pushKV("bit", consensusParams.vDeployments[id].bit); } rv.pushKV("startTime", consensusParams.vDeployments[id].nStartTime); rv.pushKV("timeout", consensusParams.vDeployments[id].nTimeout); rv.pushKV("since", VersionBitsTipStateSinceHeight(consensusParams, id)); - if (THRESHOLD_STARTED == thresholdState) + if (ThresholdState::STARTED == thresholdState) { UniValue statsUV(UniValue::VOBJ); BIP9Stats statsStruct = VersionBitsTipStatistics(consensusParams, id); @@ -1607,13 +1607,17 @@ UniValue savemempool(const JSONRPCRequest& request) if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "savemempool\n" - "\nDumps the mempool to disk.\n" + "\nDumps the mempool to disk. It will fail until the previous dump is fully loaded.\n" "\nExamples:\n" + HelpExampleCli("savemempool", "") + HelpExampleRpc("savemempool", "") ); } + if (!g_is_mempool_loaded) { + throw JSONRPCError(RPC_MISC_ERROR, "The mempool was not loaded yet"); + } + if (!DumpMempool()) { throw JSONRPCError(RPC_MISC_ERROR, "Unable to dump mempool to disk"); } diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp index 0eeb3f98b3..e12685da65 100644 --- a/src/rpc/client.cpp +++ b/src/rpc/client.cpp @@ -40,6 +40,7 @@ static const CRPCConvertParam vRPCConvertParams[] = { "settxfee", 0, "amount" }, { "getreceivedbyaddress", 1, "minconf" }, { "getreceivedbyaccount", 1, "minconf" }, + { "getreceivedbylabel", 1, "minconf" }, { "listreceivedbyaddress", 0, "minconf" }, { "listreceivedbyaddress", 1, "include_empty" }, { "listreceivedbyaddress", 2, "include_watchonly" }, @@ -47,6 +48,9 @@ static const CRPCConvertParam vRPCConvertParams[] = { "listreceivedbyaccount", 0, "minconf" }, { "listreceivedbyaccount", 1, "include_empty" }, { "listreceivedbyaccount", 2, "include_watchonly" }, + { "listreceivedbylabel", 0, "minconf" }, + { "listreceivedbylabel", 1, "include_empty" }, + { "listreceivedbylabel", 2, "include_watchonly" }, { "getbalance", 1, "minconf" }, { "getbalance", 2, "include_watchonly" }, { "getblockhash", 0, "height" }, diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 0537628763..06882c0dfd 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -532,7 +532,7 @@ UniValue getblocktemplate(const JSONRPCRequest& request) pblock->nNonce = 0; // NOTE: If at some point we support pre-segwit miners post-segwit-activation, this needs to take segwit support into consideration - const bool fPreSegWit = (THRESHOLD_ACTIVE != VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT, versionbitscache)); + const bool fPreSegWit = (ThresholdState::ACTIVE != VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT, versionbitscache)); UniValue aCaps(UniValue::VARR); aCaps.push_back("proposal"); @@ -593,15 +593,15 @@ UniValue getblocktemplate(const JSONRPCRequest& request) Consensus::DeploymentPos pos = Consensus::DeploymentPos(j); ThresholdState state = VersionBitsState(pindexPrev, consensusParams, pos, versionbitscache); switch (state) { - case THRESHOLD_DEFINED: - case THRESHOLD_FAILED: + case ThresholdState::DEFINED: + case ThresholdState::FAILED: // Not exposed to GBT at all break; - case THRESHOLD_LOCKED_IN: + case ThresholdState::LOCKED_IN: // Ensure bit is set in block version pblock->nVersion |= VersionBitsMask(consensusParams, pos); // FALL THROUGH to get vbavailable set... - case THRESHOLD_STARTED: + case ThresholdState::STARTED: { const struct VBDeploymentInfo& vbinfo = VersionBitsDeploymentInfo[pos]; vbavailable.pushKV(gbt_vb_name(pos), consensusParams.vDeployments[pos].bit); @@ -613,7 +613,7 @@ UniValue getblocktemplate(const JSONRPCRequest& request) } break; } - case THRESHOLD_ACTIVE: + case ThresholdState::ACTIVE: { // Add to rules only const struct VBDeploymentInfo& vbinfo = VersionBitsDeploymentInfo[pos]; diff --git a/src/rpc/protocol.h b/src/rpc/protocol.h index 4051b3d603..4a265735d2 100644 --- a/src/rpc/protocol.h +++ b/src/rpc/protocol.h @@ -76,7 +76,7 @@ enum RPCErrorCode //! Wallet errors RPC_WALLET_ERROR = -4, //!< Unspecified problem with wallet (key not found etc.) RPC_WALLET_INSUFFICIENT_FUNDS = -6, //!< Not enough funds in wallet or account - RPC_WALLET_INVALID_ACCOUNT_NAME = -11, //!< Invalid account name + RPC_WALLET_INVALID_LABEL_NAME = -11, //!< Invalid label name RPC_WALLET_KEYPOOL_RAN_OUT = -12, //!< Keypool ran out, call keypoolrefill first RPC_WALLET_UNLOCK_NEEDED = -13, //!< Enter the wallet passphrase with walletpassphrase first RPC_WALLET_PASSPHRASE_INCORRECT = -14, //!< The wallet passphrase entered was incorrect @@ -85,6 +85,9 @@ enum RPCErrorCode RPC_WALLET_ALREADY_UNLOCKED = -17, //!< Wallet is already unlocked RPC_WALLET_NOT_FOUND = -18, //!< Invalid wallet specified RPC_WALLET_NOT_SPECIFIED = -19, //!< No wallet specified (error when there are multiple wallets loaded) + + //! Backwards compatible aliases + RPC_WALLET_INVALID_ACCOUNT_NAME = RPC_WALLET_INVALID_LABEL_NAME, }; UniValue JSONRPCRequestObj(const std::string& strMethod, const UniValue& params, const UniValue& id); diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 20bfd3f355..77040f75fd 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -1023,18 +1023,18 @@ UniValue signrawtransaction(const JSONRPCRequest& request) new_request.params.push_back(request.params[1]); new_request.params.push_back(request.params[3]); return signrawtransactionwithkey(new_request); - } - // Otherwise sign with the wallet which does not take a privkeys parameter + } else { #ifdef ENABLE_WALLET - else { + // Otherwise sign with the wallet which does not take a privkeys parameter new_request.params.push_back(request.params[0]); new_request.params.push_back(request.params[1]); new_request.params.push_back(request.params[3]); return signrawtransactionwithwallet(new_request); - } +#else + // If we have made it this far, then wallet is disabled and no private keys were given, so fail here. + throw JSONRPCError(RPC_INVALID_PARAMETER, "No private keys available."); #endif - // If we have made it this far, then wallet is disabled and no private keys were given, so fail here. - throw JSONRPCError(RPC_INVALID_PARAMETER, "No private keys available."); + } } UniValue sendrawtransaction(const JSONRPCRequest& request) diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index 54995ef000..c7c3b1f0d3 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -367,7 +367,11 @@ void JSONRPCRequest::parse(const UniValue& valRequest) if (!valMethod.isStr()) throw JSONRPCError(RPC_INVALID_REQUEST, "Method must be a string"); strMethod = valMethod.get_str(); - LogPrint(BCLog::RPC, "ThreadRPCServer method=%s\n", SanitizeString(strMethod)); + if (fLogIPs) + LogPrint(BCLog::RPC, "ThreadRPCServer method=%s user=%s peeraddr=%s\n", SanitizeString(strMethod), + this->authUser, this->peerAddr); + else + LogPrint(BCLog::RPC, "ThreadRPCServer method=%s user=%s\n", SanitizeString(strMethod), this->authUser); // Parse params UniValue valParams = find_value(request, "params"); diff --git a/src/rpc/server.h b/src/rpc/server.h index 1ab2b70816..373914885c 100644 --- a/src/rpc/server.h +++ b/src/rpc/server.h @@ -45,6 +45,7 @@ public: bool fHelp; std::string URI; std::string authUser; + std::string peerAddr; JSONRPCRequest() : id(NullUniValue), params(NullUniValue), fHelp(false) {} void parse(const UniValue& valRequest); diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index ffaba393c0..07b2292d46 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -219,7 +219,7 @@ bool static CheckPubKeyEncoding(const valtype &vchPubKey, unsigned int flags, co return set_error(serror, SCRIPT_ERR_PUBKEYTYPE); } // Only compressed keys are accepted in segwit - if ((flags & SCRIPT_VERIFY_WITNESS_PUBKEYTYPE) != 0 && sigversion == SIGVERSION_WITNESS_V0 && !IsCompressedPubKey(vchPubKey)) { + if ((flags & SCRIPT_VERIFY_WITNESS_PUBKEYTYPE) != 0 && sigversion == SigVersion::WITNESS_V0 && !IsCompressedPubKey(vchPubKey)) { return set_error(serror, SCRIPT_ERR_WITNESS_PUBKEYTYPE); } return true; @@ -443,7 +443,7 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& if (stack.size() < 1) return set_error(serror, SCRIPT_ERR_UNBALANCED_CONDITIONAL); valtype& vch = stacktop(-1); - if (sigversion == SIGVERSION_WITNESS_V0 && (flags & SCRIPT_VERIFY_MINIMALIF)) { + if (sigversion == SigVersion::WITNESS_V0 && (flags & SCRIPT_VERIFY_MINIMALIF)) { if (vch.size() > 1) return set_error(serror, SCRIPT_ERR_MINIMALIF); if (vch.size() == 1 && vch[0] != 1) @@ -890,7 +890,7 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& CScript scriptCode(pbegincodehash, pend); // Drop the signature in pre-segwit scripts but not segwit scripts - if (sigversion == SIGVERSION_BASE) { + if (sigversion == SigVersion::BASE) { scriptCode.FindAndDelete(CScript(vchSig)); } @@ -954,7 +954,7 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& for (int k = 0; k < nSigsCount; k++) { valtype& vchSig = stacktop(-isig-k); - if (sigversion == SIGVERSION_BASE) { + if (sigversion == SigVersion::BASE) { scriptCode.FindAndDelete(CScript(vchSig)); } } @@ -1182,7 +1182,7 @@ uint256 SignatureHash(const CScript& scriptCode, const CTransaction& txTo, unsig { assert(nIn < txTo.vin.size()); - if (sigversion == SIGVERSION_WITNESS_V0) { + if (sigversion == SigVersion::WITNESS_V0) { uint256 hashPrevouts; uint256 hashSequence; uint256 hashOutputs; @@ -1396,7 +1396,7 @@ static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion, return set_error(serror, SCRIPT_ERR_PUSH_SIZE); } - if (!EvalScript(stack, scriptPubKey, flags, checker, SIGVERSION_WITNESS_V0, serror)) { + if (!EvalScript(stack, scriptPubKey, flags, checker, SigVersion::WITNESS_V0, serror)) { return false; } @@ -1423,12 +1423,12 @@ bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const C } std::vector<std::vector<unsigned char> > stack, stackCopy; - if (!EvalScript(stack, scriptSig, flags, checker, SIGVERSION_BASE, serror)) + if (!EvalScript(stack, scriptSig, flags, checker, SigVersion::BASE, serror)) // serror is set return false; if (flags & SCRIPT_VERIFY_P2SH) stackCopy = stack; - if (!EvalScript(stack, scriptPubKey, flags, checker, SIGVERSION_BASE, serror)) + if (!EvalScript(stack, scriptPubKey, flags, checker, SigVersion::BASE, serror)) // serror is set return false; if (stack.empty()) @@ -1474,7 +1474,7 @@ bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const C CScript pubKey2(pubKeySerialized.begin(), pubKeySerialized.end()); popstack(stack); - if (!EvalScript(stack, pubKey2, flags, checker, SIGVERSION_BASE, serror)) + if (!EvalScript(stack, pubKey2, flags, checker, SigVersion::BASE, serror)) // serror is set return false; if (stack.empty()) diff --git a/src/script/interpreter.h b/src/script/interpreter.h index 4dad6b44c5..bb7750d783 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -123,10 +123,10 @@ struct PrecomputedTransactionData explicit PrecomputedTransactionData(const CTransaction& tx); }; -enum SigVersion +enum class SigVersion { - SIGVERSION_BASE = 0, - SIGVERSION_WITNESS_V0 = 1, + BASE = 0, + WITNESS_V0 = 1, }; uint256 SignatureHash(const CScript &scriptCode, const CTransaction& txTo, unsigned int nIn, int nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache = nullptr); diff --git a/src/script/ismine.cpp b/src/script/ismine.cpp index 35d794b983..05bc5e9bd6 100644 --- a/src/script/ismine.cpp +++ b/src/script/ismine.cpp @@ -61,7 +61,7 @@ isminetype IsMine(const CKeyStore &keystore, const CScript& scriptPubKey, bool& break; case TX_PUBKEY: keyID = CPubKey(vSolutions[0]).GetID(); - if (sigversion != SIGVERSION_BASE && vSolutions[0].size() != 33) { + if (sigversion != SigVersion::BASE && vSolutions[0].size() != 33) { isInvalid = true; return ISMINE_NO; } @@ -76,14 +76,14 @@ isminetype IsMine(const CKeyStore &keystore, const CScript& scriptPubKey, bool& // This also applies to the P2WSH case. break; } - isminetype ret = ::IsMine(keystore, GetScriptForDestination(CKeyID(uint160(vSolutions[0]))), isInvalid, SIGVERSION_WITNESS_V0); + isminetype ret = ::IsMine(keystore, GetScriptForDestination(CKeyID(uint160(vSolutions[0]))), isInvalid, SigVersion::WITNESS_V0); if (ret == ISMINE_SPENDABLE || ret == ISMINE_WATCH_SOLVABLE || (ret == ISMINE_NO && isInvalid)) return ret; break; } case TX_PUBKEYHASH: keyID = CKeyID(uint160(vSolutions[0])); - if (sigversion != SIGVERSION_BASE) { + if (sigversion != SigVersion::BASE) { CPubKey pubkey; if (keystore.GetPubKey(keyID, pubkey) && !pubkey.IsCompressed()) { isInvalid = true; @@ -114,7 +114,7 @@ isminetype IsMine(const CKeyStore &keystore, const CScript& scriptPubKey, bool& CScriptID scriptID = CScriptID(hash); CScript subscript; if (keystore.GetCScript(scriptID, subscript)) { - isminetype ret = IsMine(keystore, subscript, isInvalid, SIGVERSION_WITNESS_V0); + isminetype ret = IsMine(keystore, subscript, isInvalid, SigVersion::WITNESS_V0); if (ret == ISMINE_SPENDABLE || ret == ISMINE_WATCH_SOLVABLE || (ret == ISMINE_NO && isInvalid)) return ret; } @@ -129,7 +129,7 @@ isminetype IsMine(const CKeyStore &keystore, const CScript& scriptPubKey, bool& // them) enable spend-out-from-under-you attacks, especially // in shared-wallet situations. std::vector<valtype> keys(vSolutions.begin()+1, vSolutions.begin()+vSolutions.size()-1); - if (sigversion != SIGVERSION_BASE) { + if (sigversion != SigVersion::BASE) { for (size_t i = 0; i < keys.size(); i++) { if (keys[i].size() != 33) { isInvalid = true; diff --git a/src/script/ismine.h b/src/script/ismine.h index c1338c3a8e..f93a66e35a 100644 --- a/src/script/ismine.h +++ b/src/script/ismine.h @@ -31,11 +31,11 @@ typedef uint8_t isminefilter; /* isInvalid becomes true when the script is found invalid by consensus or policy. This will terminate the recursion * and return ISMINE_NO immediately, as an invalid script should never be considered as "mine". This is needed as * different SIGVERSION may have different network rules. Currently the only use of isInvalid is indicate uncompressed - * keys in SIGVERSION_WITNESS_V0 script, but could also be used in similar cases in the future + * keys in SigVersion::WITNESS_V0 script, but could also be used in similar cases in the future */ -isminetype IsMine(const CKeyStore& keystore, const CScript& scriptPubKey, bool& isInvalid, SigVersion = SIGVERSION_BASE); -isminetype IsMine(const CKeyStore& keystore, const CScript& scriptPubKey, SigVersion = SIGVERSION_BASE); -isminetype IsMine(const CKeyStore& keystore, const CTxDestination& dest, bool& isInvalid, SigVersion = SIGVERSION_BASE); -isminetype IsMine(const CKeyStore& keystore, const CTxDestination& dest, SigVersion = SIGVERSION_BASE); +isminetype IsMine(const CKeyStore& keystore, const CScript& scriptPubKey, bool& isInvalid, SigVersion = SigVersion::BASE); +isminetype IsMine(const CKeyStore& keystore, const CScript& scriptPubKey, SigVersion = SigVersion::BASE); +isminetype IsMine(const CKeyStore& keystore, const CTxDestination& dest, bool& isInvalid, SigVersion = SigVersion::BASE); +isminetype IsMine(const CKeyStore& keystore, const CTxDestination& dest, SigVersion = SigVersion::BASE); #endif // BITCOIN_SCRIPT_ISMINE_H diff --git a/src/script/sign.cpp b/src/script/sign.cpp index baa712dc2d..910bb39ce6 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -6,7 +6,6 @@ #include <script/sign.h> #include <key.h> -#include <keystore.h> #include <policy/policy.h> #include <primitives/transaction.h> #include <script/standard.h> @@ -15,16 +14,16 @@ typedef std::vector<unsigned char> valtype; -TransactionSignatureCreator::TransactionSignatureCreator(const CKeyStore* keystoreIn, const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn) : BaseSignatureCreator(keystoreIn), txTo(txToIn), nIn(nInIn), nHashType(nHashTypeIn), amount(amountIn), checker(txTo, nIn, amountIn) {} +TransactionSignatureCreator::TransactionSignatureCreator(const SigningProvider* provider, const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn) : BaseSignatureCreator(provider), txTo(txToIn), nIn(nInIn), nHashType(nHashTypeIn), amount(amountIn), checker(txTo, nIn, amountIn) {} bool TransactionSignatureCreator::CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& address, const CScript& scriptCode, SigVersion sigversion) const { CKey key; - if (!keystore->GetKey(address, key)) + if (!m_provider->GetKey(address, key)) return false; // Signing with uncompressed keys is disabled in witness scripts - if (sigversion == SIGVERSION_WITNESS_V0 && !key.IsCompressed()) + if (sigversion == SigVersion::WITNESS_V0 && !key.IsCompressed()) return false; uint256 hash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion); @@ -91,12 +90,12 @@ static bool SignStep(const BaseSignatureCreator& creator, const CScript& scriptP else { CPubKey vch; - creator.KeyStore().GetPubKey(keyID, vch); + creator.Provider().GetPubKey(keyID, vch); ret.push_back(ToByteVector(vch)); } return true; case TX_SCRIPTHASH: - if (creator.KeyStore().GetCScript(uint160(vSolutions[0]), scriptRet)) { + if (creator.Provider().GetCScript(uint160(vSolutions[0]), scriptRet)) { ret.push_back(std::vector<unsigned char>(scriptRet.begin(), scriptRet.end())); return true; } @@ -112,7 +111,7 @@ static bool SignStep(const BaseSignatureCreator& creator, const CScript& scriptP case TX_WITNESS_V0_SCRIPTHASH: CRIPEMD160().Write(&vSolutions[0][0], vSolutions[0].size()).Finalize(h160.begin()); - if (creator.KeyStore().GetCScript(h160, scriptRet)) { + if (creator.Provider().GetCScript(h160, scriptRet)) { ret.push_back(std::vector<unsigned char>(scriptRet.begin(), scriptRet.end())); return true; } @@ -142,7 +141,7 @@ bool ProduceSignature(const BaseSignatureCreator& creator, const CScript& fromPu { std::vector<valtype> result; txnouttype whichType; - bool solved = SignStep(creator, fromPubKey, result, whichType, SIGVERSION_BASE); + bool solved = SignStep(creator, fromPubKey, result, whichType, SigVersion::BASE); bool P2SH = false; CScript subscript; sigdata.scriptWitness.stack.clear(); @@ -153,7 +152,7 @@ bool ProduceSignature(const BaseSignatureCreator& creator, const CScript& fromPu // the final scriptSig is the signatures from that // and then the serialized subscript: subscript = CScript(result[0].begin(), result[0].end()); - solved = solved && SignStep(creator, subscript, result, whichType, SIGVERSION_BASE) && whichType != TX_SCRIPTHASH; + solved = solved && SignStep(creator, subscript, result, whichType, SigVersion::BASE) && whichType != TX_SCRIPTHASH; P2SH = true; } @@ -162,7 +161,7 @@ bool ProduceSignature(const BaseSignatureCreator& creator, const CScript& fromPu CScript witnessscript; witnessscript << OP_DUP << OP_HASH160 << ToByteVector(result[0]) << OP_EQUALVERIFY << OP_CHECKSIG; txnouttype subType; - solved = solved && SignStep(creator, witnessscript, result, subType, SIGVERSION_WITNESS_V0); + solved = solved && SignStep(creator, witnessscript, result, subType, SigVersion::WITNESS_V0); sigdata.scriptWitness.stack = result; result.clear(); } @@ -170,7 +169,7 @@ bool ProduceSignature(const BaseSignatureCreator& creator, const CScript& fromPu { CScript witnessscript(result[0].begin(), result[0].end()); txnouttype subType; - solved = solved && SignStep(creator, witnessscript, result, subType, SIGVERSION_WITNESS_V0) && subType != TX_SCRIPTHASH && subType != TX_WITNESS_V0_SCRIPTHASH && subType != TX_WITNESS_V0_KEYHASH; + solved = solved && SignStep(creator, witnessscript, result, subType, SigVersion::WITNESS_V0) && subType != TX_SCRIPTHASH && subType != TX_WITNESS_V0_SCRIPTHASH && subType != TX_WITNESS_V0_KEYHASH; result.push_back(std::vector<unsigned char>(witnessscript.begin(), witnessscript.end())); sigdata.scriptWitness.stack = result; result.clear(); @@ -206,12 +205,12 @@ void UpdateTransaction(CMutableTransaction& tx, unsigned int nIn, const Signatur UpdateInput(tx.vin[nIn], data); } -bool SignSignature(const CKeyStore &keystore, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType) +bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType) { assert(nIn < txTo.vin.size()); CTransaction txToConst(txTo); - TransactionSignatureCreator creator(&keystore, &txToConst, nIn, amount, nHashType); + TransactionSignatureCreator creator(&provider, &txToConst, nIn, amount, nHashType); SignatureData sigdata; bool ret = ProduceSignature(creator, fromPubKey, sigdata); @@ -219,14 +218,14 @@ bool SignSignature(const CKeyStore &keystore, const CScript& fromPubKey, CMutabl return ret; } -bool SignSignature(const CKeyStore &keystore, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType) +bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType) { assert(nIn < txTo.vin.size()); CTxIn& txin = txTo.vin[nIn]; assert(txin.prevout.n < txFrom.vout.size()); const CTxOut& txout = txFrom.vout[txin.prevout.n]; - return SignSignature(keystore, txout.scriptPubKey, txTo, nIn, txout.nValue, nHashType); + return SignSignature(provider, txout.scriptPubKey, txTo, nIn, txout.nValue, nHashType); } static std::vector<valtype> CombineMultisig(const CScript& scriptPubKey, const BaseSignatureChecker& checker, @@ -294,7 +293,7 @@ struct Stacks Stacks() {} explicit Stacks(const std::vector<valtype>& scriptSigStack_) : script(scriptSigStack_), witness() {} explicit Stacks(const SignatureData& data) : witness(data.scriptWitness.stack) { - EvalScript(script, data.scriptSig, SCRIPT_VERIFY_STRICTENC, BaseSignatureChecker(), SIGVERSION_BASE); + EvalScript(script, data.scriptSig, SCRIPT_VERIFY_STRICTENC, BaseSignatureChecker(), SigVersion::BASE); } SignatureData Output() const { @@ -370,7 +369,7 @@ static Stacks CombineSignatures(const CScript& scriptPubKey, const BaseSignature sigs2.witness.pop_back(); sigs2.script = sigs2.witness; sigs2.witness.clear(); - Stacks result = CombineSignatures(pubKey2, checker, txType2, vSolutions2, sigs1, sigs2, SIGVERSION_WITNESS_V0); + Stacks result = CombineSignatures(pubKey2, checker, txType2, vSolutions2, sigs1, sigs2, SigVersion::WITNESS_V0); result.witness = result.script; result.script.clear(); result.witness.push_back(valtype(pubKey2.begin(), pubKey2.end())); @@ -388,7 +387,7 @@ SignatureData CombineSignatures(const CScript& scriptPubKey, const BaseSignature std::vector<std::vector<unsigned char> > vSolutions; Solver(scriptPubKey, txType, vSolutions); - return CombineSignatures(scriptPubKey, checker, txType, vSolutions, Stacks(scriptSig1), Stacks(scriptSig2), SIGVERSION_BASE).Output(); + return CombineSignatures(scriptPubKey, checker, txType, vSolutions, Stacks(scriptSig1), Stacks(scriptSig2), SigVersion::BASE).Output(); } namespace { @@ -427,13 +426,13 @@ bool DummySignatureCreator::CreateSig(std::vector<unsigned char>& vchSig, const return true; } -bool IsSolvable(const CKeyStore& store, const CScript& script) +bool IsSolvable(const SigningProvider& provider, const CScript& script) { // This check is to make sure that the script we created can actually be solved for and signed by us // if we were to have the private keys. This is just to make sure that the script is valid and that, // if found in a transaction, we would still accept and relay that transaction. In particular, // it will reject witness outputs that require signing with an uncompressed public key. - DummySignatureCreator creator(&store); + DummySignatureCreator creator(&provider); SignatureData sigs; // Make sure that STANDARD_SCRIPT_VERIFY_FLAGS includes SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, the most // important property this function is designed to test for. diff --git a/src/script/sign.h b/src/script/sign.h index 2c749521cd..c301f0544f 100644 --- a/src/script/sign.h +++ b/src/script/sign.h @@ -8,21 +8,32 @@ #include <script/interpreter.h> +class CKey; class CKeyID; -class CKeyStore; class CScript; +class CScriptID; class CTransaction; struct CMutableTransaction; +/** An interface to be implemented by keystores that support signing. */ +class SigningProvider +{ +public: + virtual ~SigningProvider() {} + virtual bool GetCScript(const CScriptID &scriptid, CScript& script) const =0; + virtual bool GetPubKey(const CKeyID &address, CPubKey& pubkey) const =0; + virtual bool GetKey(const CKeyID &address, CKey& key) const =0; +}; + /** Virtual base class for signature creators. */ class BaseSignatureCreator { protected: - const CKeyStore* keystore; + const SigningProvider* m_provider; public: - explicit BaseSignatureCreator(const CKeyStore* keystoreIn) : keystore(keystoreIn) {} - const CKeyStore& KeyStore() const { return *keystore; }; + explicit BaseSignatureCreator(const SigningProvider* provider) : m_provider(provider) {} + const SigningProvider& Provider() const { return *m_provider; } virtual ~BaseSignatureCreator() {} virtual const BaseSignatureChecker& Checker() const =0; @@ -39,7 +50,7 @@ class TransactionSignatureCreator : public BaseSignatureCreator { const TransactionSignatureChecker checker; public: - TransactionSignatureCreator(const CKeyStore* keystoreIn, const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn=SIGHASH_ALL); + TransactionSignatureCreator(const SigningProvider* provider, const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn=SIGHASH_ALL); const BaseSignatureChecker& Checker() const override { return checker; } bool CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const override; }; @@ -48,13 +59,13 @@ class MutableTransactionSignatureCreator : public TransactionSignatureCreator { CTransaction tx; public: - MutableTransactionSignatureCreator(const CKeyStore* keystoreIn, const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn) : TransactionSignatureCreator(keystoreIn, &tx, nInIn, amountIn, nHashTypeIn), tx(*txToIn) {} + MutableTransactionSignatureCreator(const SigningProvider* provider, const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn) : TransactionSignatureCreator(provider, &tx, nInIn, amountIn, nHashTypeIn), tx(*txToIn) {} }; /** A signature creator that just produces 72-byte empty signatures. */ class DummySignatureCreator : public BaseSignatureCreator { public: - explicit DummySignatureCreator(const CKeyStore* keystoreIn) : BaseSignatureCreator(keystoreIn) {} + explicit DummySignatureCreator(const SigningProvider* provider) : BaseSignatureCreator(provider) {} const BaseSignatureChecker& Checker() const override; bool CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const override; }; @@ -71,8 +82,8 @@ struct SignatureData { bool ProduceSignature(const BaseSignatureCreator& creator, const CScript& scriptPubKey, SignatureData& sigdata); /** Produce a script signature for a transaction. */ -bool SignSignature(const CKeyStore &keystore, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType); -bool SignSignature(const CKeyStore& keystore, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType); +bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType); +bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType); /** Combine two script signatures using a generic signature checker, intelligently, possibly with OP_0 placeholders. */ SignatureData CombineSignatures(const CScript& scriptPubKey, const BaseSignatureChecker& checker, const SignatureData& scriptSig1, const SignatureData& scriptSig2); @@ -84,8 +95,8 @@ void UpdateInput(CTxIn& input, const SignatureData& data); /* Check whether we know how to sign for an output like this, assuming we * have all private keys. While this function does not need private keys, the passed - * keystore is used to look up public keys and redeemscripts by hash. + * provider is used to look up public keys and redeemscripts by hash. * Solvability is unrelated to whether we consider this output to be ours. */ -bool IsSolvable(const CKeyStore& store, const CScript& script); +bool IsSolvable(const SigningProvider& provider, const CScript& script); #endif // BITCOIN_SCRIPT_SIGN_H diff --git a/src/serialize.h b/src/serialize.h index 91da6b0f80..247e915298 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -59,6 +59,12 @@ inline T* NCONST_PTR(const T* val) return const_cast<T*>(val); } +//! Safely convert odd char pointer types to standard ones. +inline char* CharCast(char* c) { return c; } +inline char* CharCast(unsigned char* c) { return (char*)c; } +inline const char* CharCast(const char* c) { return c; } +inline const char* CharCast(const unsigned char* c) { return (const char*)c; } + /* * Lowest-level serialization and conversion. * @note Sizes of these types are verified in the tests @@ -177,6 +183,8 @@ template<typename Stream> inline void Serialize(Stream& s, int64_t a ) { ser_wri template<typename Stream> inline void Serialize(Stream& s, uint64_t a) { ser_writedata64(s, a); } template<typename Stream> inline void Serialize(Stream& s, float a ) { ser_writedata32(s, ser_float_to_uint32(a)); } template<typename Stream> inline void Serialize(Stream& s, double a ) { ser_writedata64(s, ser_double_to_uint64(a)); } +template<typename Stream, int N> inline void Serialize(Stream& s, const char (&a)[N]) { s.write(a, N); } +template<typename Stream, int N> inline void Serialize(Stream& s, const unsigned char (&a)[N]) { s.write(CharCast(a), N); } template<typename Stream> inline void Unserialize(Stream& s, char& a ) { a = ser_readdata8(s); } // TODO Get rid of bare char template<typename Stream> inline void Unserialize(Stream& s, int8_t& a ) { a = ser_readdata8(s); } @@ -189,6 +197,8 @@ template<typename Stream> inline void Unserialize(Stream& s, int64_t& a ) { a = template<typename Stream> inline void Unserialize(Stream& s, uint64_t& a) { a = ser_readdata64(s); } template<typename Stream> inline void Unserialize(Stream& s, float& a ) { a = ser_uint32_to_float(ser_readdata32(s)); } template<typename Stream> inline void Unserialize(Stream& s, double& a ) { a = ser_uint64_to_double(ser_readdata64(s)); } +template<typename Stream, int N> inline void Unserialize(Stream& s, char (&a)[N]) { s.read(a, N); } +template<typename Stream, int N> inline void Unserialize(Stream& s, unsigned char (&a)[N]) { s.read(CharCast(a), N); } template<typename Stream> inline void Serialize(Stream& s, bool a) { char f=a; ser_writedata8(s, f); } template<typename Stream> inline void Unserialize(Stream& s, bool& a) { char f=ser_readdata8(s); a=f; } diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp index 1401188a2b..f10fd07c63 100644 --- a/src/support/lockedpool.cpp +++ b/src/support/lockedpool.cpp @@ -47,7 +47,9 @@ Arena::Arena(void *base_in, size_t size_in, size_t alignment_in): base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in) { // Start with one free chunk that covers the entire arena - chunks_free.emplace(base, size_in); + auto it = size_to_free_chunk.emplace(size_in, base); + chunks_free.emplace(base, it); + chunks_free_end.emplace(base + size_in, it); } Arena::~Arena() @@ -63,26 +65,30 @@ void* Arena::alloc(size_t size) if (size == 0) return nullptr; - // Pick a large enough free-chunk - auto it = std::find_if(chunks_free.begin(), chunks_free.end(), - [=](const std::map<char*, size_t>::value_type& chunk){ return chunk.second >= size; }); - if (it == chunks_free.end()) + // Pick a large enough free-chunk. Returns an iterator pointing to the first element that is not less than key. + // This allocation strategy is best-fit. According to "Dynamic Storage Allocation: A Survey and Critical Review", + // Wilson et. al. 1995, http://www.scs.stanford.edu/14wi-cs140/sched/readings/wilson.pdf, best-fit and first-fit + // policies seem to work well in practice. + auto size_ptr_it = size_to_free_chunk.lower_bound(size); + if (size_ptr_it == size_to_free_chunk.end()) return nullptr; // Create the used-chunk, taking its space from the end of the free-chunk - auto alloced = chunks_used.emplace(it->first + it->second - size, size).first; - if (!(it->second -= size)) - chunks_free.erase(it); - return reinterpret_cast<void*>(alloced->first); -} - -/* extend the Iterator if other begins at its end */ -template <class Iterator, class Pair> bool extend(Iterator it, const Pair& other) { - if (it->first + it->second == other.first) { - it->second += other.second; - return true; + const size_t size_remaining = size_ptr_it->first - size; + auto alloced = chunks_used.emplace(size_ptr_it->second + size_remaining, size).first; + chunks_free_end.erase(size_ptr_it->second + size_ptr_it->first); + if (size_ptr_it->first == size) { + // whole chunk is used up + chunks_free.erase(size_ptr_it->second); + } else { + // still some memory left in the chunk + auto it_remaining = size_to_free_chunk.emplace(size_remaining, size_ptr_it->second); + chunks_free[size_ptr_it->second] = it_remaining; + chunks_free_end.emplace(size_ptr_it->second + size_remaining, it_remaining); } - return false; + size_to_free_chunk.erase(size_ptr_it); + + return reinterpret_cast<void*>(alloced->first); } void Arena::free(void *ptr) @@ -97,16 +103,30 @@ void Arena::free(void *ptr) if (i == chunks_used.end()) { throw std::runtime_error("Arena: invalid or double free"); } - auto freed = *i; + std::pair<char*, size_t> freed = *i; chunks_used.erase(i); - // Add space to free map, coalescing contiguous chunks - auto next = chunks_free.upper_bound(freed.first); - auto prev = (next == chunks_free.begin()) ? chunks_free.end() : std::prev(next); - if (prev == chunks_free.end() || !extend(prev, freed)) - prev = chunks_free.emplace_hint(next, freed); - if (next != chunks_free.end() && extend(prev, *next)) + // coalesce freed with previous chunk + auto prev = chunks_free_end.find(freed.first); + if (prev != chunks_free_end.end()) { + freed.first -= prev->second->first; + freed.second += prev->second->first; + size_to_free_chunk.erase(prev->second); + chunks_free_end.erase(prev); + } + + // coalesce freed with chunk after freed + auto next = chunks_free.find(freed.first + freed.second); + if (next != chunks_free.end()) { + freed.second += next->second->first; + size_to_free_chunk.erase(next->second); chunks_free.erase(next); + } + + // Add/set space with coalesced free chunk + auto it = size_to_free_chunk.emplace(freed.second, freed.first); + chunks_free[freed.first] = it; + chunks_free_end[freed.first + freed.second] = it; } Arena::Stats Arena::stats() const @@ -115,7 +135,7 @@ Arena::Stats Arena::stats() const for (const auto& chunk: chunks_used) r.used += chunk.second; for (const auto& chunk: chunks_free) - r.free += chunk.second; + r.free += chunk.second->first; r.total = r.used + r.free; return r; } diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h index fc85e6c73c..ccfae16701 100644 --- a/src/support/lockedpool.h +++ b/src/support/lockedpool.h @@ -10,6 +10,7 @@ #include <map> #include <mutex> #include <memory> +#include <unordered_map> /** * OS-dependent allocation and deallocation of locked/pinned memory pages. @@ -88,11 +89,19 @@ public: */ bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; } private: - /** Map of chunk address to chunk information. This class makes use of the - * sorted order to merge previous and next chunks during deallocation. - */ - std::map<char*, size_t> chunks_free; - std::map<char*, size_t> chunks_used; + typedef std::multimap<size_t, char*> SizeToChunkSortedMap; + /** Map to enable O(log(n)) best-fit allocation, as it's sorted by size */ + SizeToChunkSortedMap size_to_free_chunk; + + typedef std::unordered_map<char*, SizeToChunkSortedMap::const_iterator> ChunkToSizeMap; + /** Map from begin of free chunk to its node in size_to_free_chunk */ + ChunkToSizeMap chunks_free; + /** Map from end of free chunk to its node in size_to_free_chunk */ + ChunkToSizeMap chunks_free_end; + + /** Map from begin of used chunk to its size */ + std::unordered_map<char*, size_t> chunks_used; + /** Base address of arena */ char* base; /** End address of arena */ diff --git a/src/test/blockchain_tests.cpp b/src/test/blockchain_tests.cpp index 55fdd2c071..32b408838c 100644 --- a/src/test/blockchain_tests.cpp +++ b/src/test/blockchain_tests.cpp @@ -54,7 +54,7 @@ void TestDifficulty(uint32_t nbits, double expected_difficulty) RejectDifficultyMismatch(difficulty, expected_difficulty); } -BOOST_FIXTURE_TEST_SUITE(blockchain_difficulty_tests, BasicTestingSetup) +BOOST_FIXTURE_TEST_SUITE(blockchain_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(get_difficulty_for_very_low_target) { diff --git a/src/test/compress_tests.cpp b/src/test/compress_tests.cpp index 3c26013622..127cc154df 100644 --- a/src/test/compress_tests.cpp +++ b/src/test/compress_tests.cpp @@ -25,16 +25,16 @@ BOOST_FIXTURE_TEST_SUITE(compress_tests, BasicTestingSetup) bool static TestEncode(uint64_t in) { - return in == CTxOutCompressor::DecompressAmount(CTxOutCompressor::CompressAmount(in)); + return in == DecompressAmount(CompressAmount(in)); } bool static TestDecode(uint64_t in) { - return in == CTxOutCompressor::CompressAmount(CTxOutCompressor::DecompressAmount(in)); + return in == CompressAmount(DecompressAmount(in)); } bool static TestPair(uint64_t dec, uint64_t enc) { - return CTxOutCompressor::CompressAmount(dec) == enc && - CTxOutCompressor::DecompressAmount(enc) == dec; + return CompressAmount(dec) == enc && + DecompressAmount(enc) == dec; } BOOST_AUTO_TEST_CASE(compress_amounts) diff --git a/src/test/data/tx_invalid.json b/src/test/data/tx_invalid.json index f8a1347c31..abb46fe533 100644 --- a/src/test/data/tx_invalid.json +++ b/src/test/data/tx_invalid.json @@ -321,7 +321,7 @@ ["where the pubkey is obtained through key recovery with sig and the wrong sighash."], ["This is to show that FindAndDelete is applied only to non-segwit scripts"], ["To show that the tests are 'correctly wrong', they should pass by modifying OP_CHECKSIG under interpreter.cpp"], -["by replacing (sigversion == SIGVERSION_BASE) with (sigversion != SIGVERSION_BASE)"], +["by replacing (sigversion == SigVersion::BASE) with (sigversion != SigVersion::BASE)"], ["Non-segwit: wrong sighash (without FindAndDelete) = 1ba1fe3bc90c5d1265460e684ce6774e324f0fabdf67619eda729e64e8b6bc08"], [[["f18783ace138abac5d3a7a5cf08e88fe6912f267ef936452e0c27d090621c169", 7000, "HASH160 0x14 0x0c746489e2d83cdbb5b90b432773342ba809c134 EQUAL", 200000]], "010000000169c12106097dc2e0526493ef67f21269fe888ef05c7a3a5dacab38e1ac8387f1581b0000b64830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e012103b12a1ec8428fc74166926318c15e17408fea82dbb157575e16a8c365f546248f4aad4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e01ffffffff0101000000000000000000000000", "P2SH,WITNESS"], @@ -332,7 +332,7 @@ ["Script is 2 CHECKMULTISIGVERIFY <sig1> <sig2> DROP"], ["52af4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c0395960175"], ["Signature is 0 <sig1> <sig2> 2 <key1> <key2>"], -["Should pass by replacing (sigversion == SIGVERSION_BASE) with (sigversion != SIGVERSION_BASE) under OP_CHECKMULTISIG"], +["Should pass by replacing (sigversion == SigVersion::BASE) with (sigversion != SigVersion::BASE) under OP_CHECKMULTISIG"], ["Non-segwit: wrong sighash (without FindAndDelete) = 4bc6a53e8e16ef508c19e38bba08831daba85228b0211f323d4cb0999cf2a5e8"], [[["9628667ad48219a169b41b020800162287d2c0f713c04157e95c484a8dcb7592", 7000, "HASH160 0x14 0x5748407f5ca5cdca53ba30b79040260770c9ee1b EQUAL", 200000]], "01000000019275cb8d4a485ce95741c013f7c0d28722160008021bb469a11982d47a662896581b0000fd6f01004830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c039596015221023fd5dd42b44769c5653cbc5947ff30ab8871f240ad0c0e7432aefe84b5b4ff3421039d52178dbde360b83f19cf348deb04fa8360e1bf5634577be8e50fafc2b0e4ef4c9552af4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c0395960175ffffffff0101000000000000000000000000", "P2SH,WITNESS"], diff --git a/src/test/multisig_tests.cpp b/src/test/multisig_tests.cpp index 72d9d3d75c..b593f9633c 100644 --- a/src/test/multisig_tests.cpp +++ b/src/test/multisig_tests.cpp @@ -21,7 +21,7 @@ BOOST_FIXTURE_TEST_SUITE(multisig_tests, BasicTestingSetup) CScript sign_multisig(CScript scriptPubKey, std::vector<CKey> keys, CTransaction transaction, int whichIn) { - uint256 hash = SignatureHash(scriptPubKey, transaction, whichIn, SIGHASH_ALL, 0, SIGVERSION_BASE); + uint256 hash = SignatureHash(scriptPubKey, transaction, whichIn, SIGHASH_ALL, 0, SigVersion::BASE); CScript result; result << OP_0; // CHECKMULTISIG bug workaround diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index e03234060d..6552613c04 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -64,7 +64,7 @@ public: CDataStream AddrmanToStream(CAddrManSerializationMock& _addrman) { CDataStream ssPeersIn(SER_DISK, CLIENT_VERSION); - ssPeersIn << FLATDATA(Params().MessageStart()); + ssPeersIn << Params().MessageStart(); ssPeersIn << _addrman; std::string str = ssPeersIn.str(); std::vector<unsigned char> vchData(str.begin(), str.end()); @@ -110,7 +110,7 @@ BOOST_AUTO_TEST_CASE(caddrdb_read) BOOST_CHECK(addrman1.size() == 0); try { unsigned char pchMsgTmp[4]; - ssPeers1 >> FLATDATA(pchMsgTmp); + ssPeers1 >> pchMsgTmp; ssPeers1 >> addrman1; } catch (const std::exception& e) { exceptionThrown = true; @@ -142,7 +142,7 @@ BOOST_AUTO_TEST_CASE(caddrdb_read_corrupted) BOOST_CHECK(addrman1.size() == 0); try { unsigned char pchMsgTmp[4]; - ssPeers1 >> FLATDATA(pchMsgTmp); + ssPeers1 >> pchMsgTmp; ssPeers1 >> addrman1; } catch (const std::exception& e) { exceptionThrown = true; diff --git a/src/test/prevector_tests.cpp b/src/test/prevector_tests.cpp index 01c3a6cedd..fe6f10d845 100644 --- a/src/test/prevector_tests.cpp +++ b/src/test/prevector_tests.cpp @@ -13,7 +13,7 @@ #include <boost/test/unit_test.hpp> -BOOST_FIXTURE_TEST_SUITE(PrevectorTests, TestingSetup) +BOOST_FIXTURE_TEST_SUITE(prevector_tests, TestingSetup) template<unsigned int N, typename T> class prevector_tester { diff --git a/src/test/random_tests.cpp b/src/test/random_tests.cpp index 1ca5a53d72..623ed239f0 100644 --- a/src/test/random_tests.cpp +++ b/src/test/random_tests.cpp @@ -8,6 +8,9 @@ #include <boost/test/unit_test.hpp> +#include <random> +#include <algorithm> + BOOST_FIXTURE_TEST_SUITE(random_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(osrandom_tests) @@ -57,4 +60,23 @@ BOOST_AUTO_TEST_CASE(fastrandom_randbits) } } +/** Does-it-compile test for compatibility with standard C++11 RNG interface. */ +BOOST_AUTO_TEST_CASE(stdrandom_test) +{ + FastRandomContext ctx; + std::uniform_int_distribution<int> distribution(3, 9); + for (int i = 0; i < 100; ++i) { + int x = distribution(ctx); + BOOST_CHECK(x >= 3); + BOOST_CHECK(x <= 9); + + std::vector<int> test{1,2,3,4,5,6,7,8,9,10}; + std::shuffle(test.begin(), test.end(), ctx); + for (int j = 1; j <= 10; ++j) { + BOOST_CHECK(std::find(test.begin(), test.end(), j) != test.end()); + } + } + +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index c7a497f3a7..46a2d13745 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -267,10 +267,10 @@ struct KeyData } }; -enum WitnessMode { - WITNESS_NONE, - WITNESS_PKH, - WITNESS_SH +enum class WitnessMode { + NONE, + PKH, + SH }; class TestBuilder @@ -308,15 +308,15 @@ private: } public: - TestBuilder(const CScript& script_, const std::string& comment_, int flags_, bool P2SH = false, WitnessMode wm = WITNESS_NONE, int witnessversion = 0, CAmount nValue_ = 0) : script(script_), havePush(false), comment(comment_), flags(flags_), scriptError(SCRIPT_ERR_OK), nValue(nValue_) + TestBuilder(const CScript& script_, const std::string& comment_, int flags_, bool P2SH = false, WitnessMode wm = WitnessMode::NONE, int witnessversion = 0, CAmount nValue_ = 0) : script(script_), havePush(false), comment(comment_), flags(flags_), scriptError(SCRIPT_ERR_OK), nValue(nValue_) { CScript scriptPubKey = script; - if (wm == WITNESS_PKH) { + if (wm == WitnessMode::PKH) { uint160 hash; CHash160().Write(&script[1], script.size() - 1).Finalize(hash.begin()); script = CScript() << OP_DUP << OP_HASH160 << ToByteVector(hash) << OP_EQUALVERIFY << OP_CHECKSIG; scriptPubKey = CScript() << witnessversion << ToByteVector(hash); - } else if (wm == WITNESS_SH) { + } else if (wm == WitnessMode::SH) { witscript = scriptPubKey; uint256 hash; CSHA256().Write(&witscript[0], witscript.size()).Finalize(hash.begin()); @@ -361,7 +361,7 @@ public: return *this; } - TestBuilder& PushSig(const CKey& key, int nHashType = SIGHASH_ALL, unsigned int lenR = 32, unsigned int lenS = 32, SigVersion sigversion = SIGVERSION_BASE, CAmount amount = 0) + TestBuilder& PushSig(const CKey& key, int nHashType = SIGHASH_ALL, unsigned int lenR = 32, unsigned int lenS = 32, SigVersion sigversion = SigVersion::BASE, CAmount amount = 0) { uint256 hash = SignatureHash(script, spendTx, 0, nHashType, amount, sigversion); std::vector<unsigned char> vchSig, r, s; @@ -379,7 +379,7 @@ public: return *this; } - TestBuilder& PushWitSig(const CKey& key, CAmount amount = -1, int nHashType = SIGHASH_ALL, unsigned int lenR = 32, unsigned int lenS = 32, SigVersion sigversion = SIGVERSION_WITNESS_V0) + TestBuilder& PushWitSig(const CKey& key, CAmount amount = -1, int nHashType = SIGHASH_ALL, unsigned int lenR = 32, unsigned int lenS = 32, SigVersion sigversion = SigVersion::WITNESS_V0) { if (amount == -1) amount = nValue; @@ -747,57 +747,57 @@ BOOST_AUTO_TEST_CASE(script_build) ).PushSig(keys.key0).PushRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, - "Basic P2WSH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH, + "Basic P2WSH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH, 0, 1).PushWitSig(keys.key0).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "Basic P2WPKH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_PKH, + "Basic P2WPKH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::PKH, 0, 1).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, - "Basic P2SH(P2WSH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_SH, + "Basic P2SH(P2WSH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::SH, 0, 1).PushWitSig(keys.key0).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "Basic P2SH(P2WPKH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_PKH, + "Basic P2SH(P2WPKH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::PKH, 0, 1).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit().PushRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG, - "Basic P2WSH with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH + "Basic P2WSH with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH ).PushWitSig(keys.key0).PushWitRedeem().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1), - "Basic P2WPKH with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_PKH + "Basic P2WPKH with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::PKH ).PushWitSig(keys.key0).Push(keys.pubkey1).AsWit().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG, - "Basic P2SH(P2WSH) with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_SH + "Basic P2SH(P2WSH) with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::SH ).PushWitSig(keys.key0).PushWitRedeem().PushRedeem().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1), - "Basic P2SH(P2WPKH) with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_PKH + "Basic P2SH(P2WPKH) with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::PKH ).PushWitSig(keys.key0).Push(keys.pubkey1).AsWit().PushRedeem().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG, - "Basic P2WSH with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, false, WITNESS_SH + "Basic P2WSH with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, false, WitnessMode::SH ).PushWitSig(keys.key0).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1), - "Basic P2WPKH with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, false, WITNESS_PKH + "Basic P2WPKH with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, false, WitnessMode::PKH ).PushWitSig(keys.key0).Push(keys.pubkey1).AsWit()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG, - "Basic P2SH(P2WSH) with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, true, WITNESS_SH + "Basic P2SH(P2WSH) with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, true, WitnessMode::SH ).PushWitSig(keys.key0).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1), - "Basic P2SH(P2WPKH) with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, true, WITNESS_PKH + "Basic P2SH(P2WPKH) with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, true, WitnessMode::PKH ).PushWitSig(keys.key0).Push(keys.pubkey1).AsWit().PushRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, - "Basic P2WSH with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH, + "Basic P2WSH with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH, 0, 0).PushWitSig(keys.key0, 1).PushWitRedeem().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "Basic P2WPKH with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_PKH, + "Basic P2WPKH with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::PKH, 0, 0).PushWitSig(keys.key0, 1).Push(keys.pubkey0).AsWit().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, - "Basic P2SH(P2WSH) with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_SH, + "Basic P2SH(P2WSH) with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::SH, 0, 0).PushWitSig(keys.key0, 1).PushWitRedeem().PushRedeem().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "Basic P2SH(P2WPKH) with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_PKH, + "Basic P2SH(P2WPKH) with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::PKH, 0, 0).PushWitSig(keys.key0, 1).Push(keys.pubkey0).AsWit().PushRedeem().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), "P2WPKH with future witness version", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | - SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM, false, WITNESS_PKH, 1 + SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM, false, WitnessMode::PKH, 1 ).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit().ScriptError(SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM)); { CScript witscript = CScript() << ToByteVector(keys.pubkey0); @@ -810,22 +810,22 @@ BOOST_AUTO_TEST_CASE(script_build) ).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit().ScriptError(SCRIPT_ERR_WITNESS_PROGRAM_WRONG_LENGTH)); } tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, - "P2WSH with empty witness", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH + "P2WSH with empty witness", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH ).ScriptError(SCRIPT_ERR_WITNESS_PROGRAM_WITNESS_EMPTY)); { CScript witscript = CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG; tests.push_back(TestBuilder(witscript, - "P2WSH with witness program mismatch", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH + "P2WSH with witness program mismatch", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH ).PushWitSig(keys.key0).Push(witscript).DamagePush(0).AsWit().ScriptError(SCRIPT_ERR_WITNESS_PROGRAM_MISMATCH)); } tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "P2WPKH with witness program mismatch", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_PKH + "P2WPKH with witness program mismatch", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::PKH ).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit().Push("0").AsWit().ScriptError(SCRIPT_ERR_WITNESS_PROGRAM_MISMATCH)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "P2WPKH with non-empty scriptSig", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_PKH + "P2WPKH with non-empty scriptSig", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::PKH ).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit().Num(11).ScriptError(SCRIPT_ERR_WITNESS_MALLEATED)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1), - "P2SH(P2WPKH) with superfluous push in scriptSig", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_PKH + "P2SH(P2WPKH) with superfluous push in scriptSig", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::PKH ).PushWitSig(keys.key0).Push(keys.pubkey1).AsWit().Num(11).PushRedeem().ScriptError(SCRIPT_ERR_WITNESS_MALLEATED_P2SH)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, "P2PK with witness", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH @@ -833,95 +833,95 @@ BOOST_AUTO_TEST_CASE(script_build) // Compressed keys should pass SCRIPT_VERIFY_WITNESS_PUBKEYTYPE tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0C) << OP_CHECKSIG, - "Basic P2WSH with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "Basic P2WSH with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).PushWitSig(keys.key0C).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0C), - "Basic P2WPKH with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_PKH, + "Basic P2WPKH with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::PKH, 0, 1).PushWitSig(keys.key0C).Push(keys.pubkey0C).AsWit()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0C) << OP_CHECKSIG, - "Basic P2SH(P2WSH) with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "Basic P2SH(P2WSH) with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).PushWitSig(keys.key0C).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0C), - "Basic P2SH(P2WPKH) with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_PKH, + "Basic P2SH(P2WPKH) with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::PKH, 0, 1).PushWitSig(keys.key0C).Push(keys.pubkey0C).AsWit().PushRedeem()); // Testing uncompressed key in witness with SCRIPT_VERIFY_WITNESS_PUBKEYTYPE tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, - "Basic P2WSH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "Basic P2WSH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).PushWitSig(keys.key0).PushWitRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "Basic P2WPKH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_PKH, + "Basic P2WPKH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::PKH, 0, 1).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, - "Basic P2SH(P2WSH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "Basic P2SH(P2WSH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).PushWitSig(keys.key0).PushWitRedeem().PushRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "Basic P2SH(P2WPKH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_PKH, + "Basic P2SH(P2WPKH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::PKH, 0, 1).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit().PushRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); // P2WSH 1-of-2 multisig with compressed keys tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0C).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0C).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1C).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1C).PushWitRedeem().PushRedeem()); // P2WSH 1-of-2 multisig with first key uncompressed tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0).PushWitRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0).PushWitRedeem().PushRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1C).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1C).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1C).PushWitRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1C).PushWitRedeem().PushRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); // P2WSH 1-of-2 multisig with second key uncompressed tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with second key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with second key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0C).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG second key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG second key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0C).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with second key uncompressed and signing with the first key should pass as the uncompressed key is not used", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with second key uncompressed and signing with the first key should pass as the uncompressed key is not used", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0C).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with second key uncompressed and signing with the first key should pass as the uncompressed key is not used", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with second key uncompressed and signing with the first key should pass as the uncompressed key is not used", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0C).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1).PushWitRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1).PushWitRedeem().PushRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); std::set<std::string> tests_set; @@ -1009,21 +1009,21 @@ BOOST_AUTO_TEST_CASE(script_PushData) ScriptError err; std::vector<std::vector<unsigned char> > directStack; - BOOST_CHECK(EvalScript(directStack, CScript(&direct[0], &direct[sizeof(direct)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SIGVERSION_BASE, &err)); + BOOST_CHECK(EvalScript(directStack, CScript(&direct[0], &direct[sizeof(direct)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SigVersion::BASE, &err)); BOOST_CHECK_MESSAGE(err == SCRIPT_ERR_OK, ScriptErrorString(err)); std::vector<std::vector<unsigned char> > pushdata1Stack; - BOOST_CHECK(EvalScript(pushdata1Stack, CScript(&pushdata1[0], &pushdata1[sizeof(pushdata1)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SIGVERSION_BASE, &err)); + BOOST_CHECK(EvalScript(pushdata1Stack, CScript(&pushdata1[0], &pushdata1[sizeof(pushdata1)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SigVersion::BASE, &err)); BOOST_CHECK(pushdata1Stack == directStack); BOOST_CHECK_MESSAGE(err == SCRIPT_ERR_OK, ScriptErrorString(err)); std::vector<std::vector<unsigned char> > pushdata2Stack; - BOOST_CHECK(EvalScript(pushdata2Stack, CScript(&pushdata2[0], &pushdata2[sizeof(pushdata2)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SIGVERSION_BASE, &err)); + BOOST_CHECK(EvalScript(pushdata2Stack, CScript(&pushdata2[0], &pushdata2[sizeof(pushdata2)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SigVersion::BASE, &err)); BOOST_CHECK(pushdata2Stack == directStack); BOOST_CHECK_MESSAGE(err == SCRIPT_ERR_OK, ScriptErrorString(err)); std::vector<std::vector<unsigned char> > pushdata4Stack; - BOOST_CHECK(EvalScript(pushdata4Stack, CScript(&pushdata4[0], &pushdata4[sizeof(pushdata4)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SIGVERSION_BASE, &err)); + BOOST_CHECK(EvalScript(pushdata4Stack, CScript(&pushdata4[0], &pushdata4[sizeof(pushdata4)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SigVersion::BASE, &err)); BOOST_CHECK(pushdata4Stack == directStack); BOOST_CHECK_MESSAGE(err == SCRIPT_ERR_OK, ScriptErrorString(err)); } @@ -1031,7 +1031,7 @@ BOOST_AUTO_TEST_CASE(script_PushData) CScript sign_multisig(CScript scriptPubKey, std::vector<CKey> keys, CTransaction transaction) { - uint256 hash = SignatureHash(scriptPubKey, transaction, 0, SIGHASH_ALL, 0, SIGVERSION_BASE); + uint256 hash = SignatureHash(scriptPubKey, transaction, 0, SIGHASH_ALL, 0, SigVersion::BASE); CScript result; // @@ -1227,15 +1227,15 @@ BOOST_AUTO_TEST_CASE(script_combineSigs) // A couple of partially-signed versions: std::vector<unsigned char> sig1; - uint256 hash1 = SignatureHash(scriptPubKey, txTo, 0, SIGHASH_ALL, 0, SIGVERSION_BASE); + uint256 hash1 = SignatureHash(scriptPubKey, txTo, 0, SIGHASH_ALL, 0, SigVersion::BASE); BOOST_CHECK(keys[0].Sign(hash1, sig1)); sig1.push_back(SIGHASH_ALL); std::vector<unsigned char> sig2; - uint256 hash2 = SignatureHash(scriptPubKey, txTo, 0, SIGHASH_NONE, 0, SIGVERSION_BASE); + uint256 hash2 = SignatureHash(scriptPubKey, txTo, 0, SIGHASH_NONE, 0, SigVersion::BASE); BOOST_CHECK(keys[1].Sign(hash2, sig2)); sig2.push_back(SIGHASH_NONE); std::vector<unsigned char> sig3; - uint256 hash3 = SignatureHash(scriptPubKey, txTo, 0, SIGHASH_SINGLE, 0, SIGVERSION_BASE); + uint256 hash3 = SignatureHash(scriptPubKey, txTo, 0, SIGHASH_SINGLE, 0, SigVersion::BASE); BOOST_CHECK(keys[2].Sign(hash3, sig3)); sig3.push_back(SIGHASH_SINGLE); diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index 7a79a77e8b..9b8b7bdc56 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -19,11 +19,15 @@ protected: int intval; bool boolval; std::string stringval; - const char* charstrval; + char charstrval[16]; CTransactionRef txval; public: CSerializeMethodsTestSingle() = default; - CSerializeMethodsTestSingle(int intvalin, bool boolvalin, std::string stringvalin, const char* charstrvalin, CTransaction txvalin) : intval(intvalin), boolval(boolvalin), stringval(std::move(stringvalin)), charstrval(charstrvalin), txval(MakeTransactionRef(txvalin)){} + CSerializeMethodsTestSingle(int intvalin, bool boolvalin, std::string stringvalin, const char* charstrvalin, CTransaction txvalin) : intval(intvalin), boolval(boolvalin), stringval(std::move(stringvalin)), txval(MakeTransactionRef(txvalin)) + { + memcpy(charstrval, charstrvalin, sizeof(charstrval)); + } + ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> @@ -31,7 +35,7 @@ public: READWRITE(intval); READWRITE(boolval); READWRITE(stringval); - READWRITE(FLATDATA(charstrval)); + READWRITE(charstrval); READWRITE(txval); } @@ -53,7 +57,7 @@ public: template <typename Stream, typename Operation> inline void SerializationOp(Stream& s, Operation ser_action) { - READWRITE(intval, boolval, stringval, FLATDATA(charstrval), txval); + READWRITE(intval, boolval, stringval, charstrval, txval); } }; @@ -344,7 +348,7 @@ BOOST_AUTO_TEST_CASE(class_methods) int intval(100); bool boolval(true); std::string stringval("testing"); - const char* charstrval("testing charstr"); + const char charstrval[16] = "testing charstr"; CMutableTransaction txval; CSerializeMethodsTestSingle methodtest1(intval, boolval, stringval, charstrval, txval); CSerializeMethodsTestMany methodtest2(intval, boolval, stringval, charstrval, txval); @@ -360,7 +364,7 @@ BOOST_AUTO_TEST_CASE(class_methods) BOOST_CHECK(methodtest2 == methodtest3); BOOST_CHECK(methodtest3 == methodtest4); - CDataStream ss2(SER_DISK, PROTOCOL_VERSION, intval, boolval, stringval, FLATDATA(charstrval), txval); + CDataStream ss2(SER_DISK, PROTOCOL_VERSION, intval, boolval, stringval, charstrval, txval); ss2 >> methodtest3; BOOST_CHECK(methodtest3 == methodtest4); } diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index 32cd3a50b0..a2bd8998b1 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -138,7 +138,7 @@ BOOST_AUTO_TEST_CASE(sighash_test) uint256 sh, sho; sho = SignatureHashOld(scriptCode, txTo, nIn, nHashType); - sh = SignatureHash(scriptCode, txTo, nIn, nHashType, 0, SIGVERSION_BASE); + sh = SignatureHash(scriptCode, txTo, nIn, nHashType, 0, SigVersion::BASE); #if defined(PRINT_SIGHASH_JSON) CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss << txTo; @@ -204,7 +204,7 @@ BOOST_AUTO_TEST_CASE(sighash_from_data) continue; } - sh = SignatureHash(scriptCode, *tx, nIn, nHashType, 0, SIGVERSION_BASE); + sh = SignatureHash(scriptCode, *tx, nIn, nHashType, 0, SigVersion::BASE); BOOST_CHECK_MESSAGE(sh.GetHex() == sigHashHex, strTest); } } diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index 1108dab584..5d057108b1 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -57,16 +57,16 @@ BOOST_AUTO_TEST_CASE(streams_vector_writer) BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 0, 0, 1, 2}})); vch.clear(); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, FLATDATA(bytes)); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, bytes); BOOST_CHECK((vch == std::vector<unsigned char>{{3, 4, 5, 6}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, FLATDATA(bytes)); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, bytes); BOOST_CHECK((vch == std::vector<unsigned char>{{3, 4, 5, 6}})); vch.clear(); vch.resize(4, 8); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, FLATDATA(bytes), b); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, bytes, b); BOOST_CHECK((vch == std::vector<unsigned char>{{8, 8, 1, 3, 4, 5, 6, 2}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, FLATDATA(bytes), b); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, bytes, b); BOOST_CHECK((vch == std::vector<unsigned char>{{8, 8, 1, 3, 4, 5, 6, 2}})); vch.clear(); } diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp index 95c4825b84..9390a93b99 100644 --- a/src/test/test_bitcoin.cpp +++ b/src/test/test_bitcoin.cpp @@ -145,9 +145,9 @@ TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransaction>& for (const CMutableTransaction& tx : txns) block.vtx.push_back(MakeTransactionRef(tx)); // IncrementExtraNonce creates a valid coinbase and merkleRoot - unsigned int extraNonce = 0; { LOCK(cs_main); + unsigned int extraNonce = 0; IncrementExtraNonce(&block, chainActive.Tip(), extraNonce); } diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index edfb35d155..b222392ee5 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -407,7 +407,7 @@ static CScript PushAll(const std::vector<valtype>& values) void ReplaceRedeemScript(CScript& script, const CScript& redeemScript) { std::vector<valtype> stack; - EvalScript(stack, script, SCRIPT_VERIFY_STRICTENC, BaseSignatureChecker(), SIGVERSION_BASE); + EvalScript(stack, script, SCRIPT_VERIFY_STRICTENC, BaseSignatureChecker(), SigVersion::BASE); assert(stack.size() > 0); stack.back() = std::vector<unsigned char>(redeemScript.begin(), redeemScript.end()); script = PushAll(stack); diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp index 1aa54189b6..7087c26774 100644 --- a/src/test/txvalidationcache_tests.cpp +++ b/src/test/txvalidationcache_tests.cpp @@ -56,7 +56,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup) // Sign: std::vector<unsigned char> vchSig; - uint256 hash = SignatureHash(scriptPubKey, spends[i], 0, SIGHASH_ALL, 0, SIGVERSION_BASE); + uint256 hash = SignatureHash(scriptPubKey, spends[i], 0, SIGHASH_ALL, 0, SigVersion::BASE); BOOST_CHECK(coinbaseKey.Sign(hash, vchSig)); vchSig.push_back((unsigned char)SIGHASH_ALL); spends[i].vin[0].scriptSig << vchSig; @@ -182,7 +182,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) // Sign, with a non-DER signature { std::vector<unsigned char> vchSig; - uint256 hash = SignatureHash(p2pk_scriptPubKey, spend_tx, 0, SIGHASH_ALL, 0, SIGVERSION_BASE); + uint256 hash = SignatureHash(p2pk_scriptPubKey, spend_tx, 0, SIGHASH_ALL, 0, SigVersion::BASE); BOOST_CHECK(coinbaseKey.Sign(hash, vchSig)); vchSig.push_back((unsigned char) 0); // padding byte makes this non-DER vchSig.push_back((unsigned char)SIGHASH_ALL); @@ -256,7 +256,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) // Sign std::vector<unsigned char> vchSig; - uint256 hash = SignatureHash(spend_tx.vout[2].scriptPubKey, invalid_with_cltv_tx, 0, SIGHASH_ALL, 0, SIGVERSION_BASE); + uint256 hash = SignatureHash(spend_tx.vout[2].scriptPubKey, invalid_with_cltv_tx, 0, SIGHASH_ALL, 0, SigVersion::BASE); BOOST_CHECK(coinbaseKey.Sign(hash, vchSig)); vchSig.push_back((unsigned char)SIGHASH_ALL); invalid_with_cltv_tx.vin[0].scriptSig = CScript() << vchSig << 101; @@ -284,7 +284,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) // Sign std::vector<unsigned char> vchSig; - uint256 hash = SignatureHash(spend_tx.vout[3].scriptPubKey, invalid_with_csv_tx, 0, SIGHASH_ALL, 0, SIGVERSION_BASE); + uint256 hash = SignatureHash(spend_tx.vout[3].scriptPubKey, invalid_with_csv_tx, 0, SIGHASH_ALL, 0, SigVersion::BASE); BOOST_CHECK(coinbaseKey.Sign(hash, vchSig)); vchSig.push_back((unsigned char)SIGHASH_ALL); invalid_with_csv_tx.vin[0].scriptSig = CScript() << vchSig << 101; diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index 84b61bea86..4b44bbadac 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -185,17 +185,11 @@ BOOST_AUTO_TEST_CASE(util_FormatISO8601Time) BOOST_CHECK_EQUAL(FormatISO8601Time(1317425777), "23:36:17Z"); } -class TestArgsManager : public ArgsManager +struct TestArgsManager : public ArgsManager { -public: - std::map<std::string, std::string>& GetMapArgs() - { - return mapArgs; - }; - const std::map<std::string, std::vector<std::string> >& GetMapMultiArgs() - { - return mapMultiArgs; - }; + std::map<std::string, std::string>& GetMapArgs() { return mapArgs; } + const std::map<std::string, std::vector<std::string> >& GetMapMultiArgs() { return mapMultiArgs; } + const std::unordered_set<std::string>& GetNegatedArgs() { return m_negated_args; } }; BOOST_AUTO_TEST_CASE(util_ParseParameters) @@ -223,6 +217,54 @@ BOOST_AUTO_TEST_CASE(util_ParseParameters) BOOST_CHECK(testArgs.GetArgs("-ccc").size() == 2); } +BOOST_AUTO_TEST_CASE(util_GetBoolArg) +{ + TestArgsManager testArgs; + const char *argv_test[] = { + "ignored", "-a", "-nob", "-c=0", "-d=1", "-e=false", "-f=true"}; + testArgs.ParseParameters(7, (char**)argv_test); + + // Each letter should be set. + for (char opt : "abcdef") + BOOST_CHECK(testArgs.IsArgSet({'-', opt}) || !opt); + + // Nothing else should be in the map + BOOST_CHECK(testArgs.GetMapArgs().size() == 6 && + testArgs.GetMapMultiArgs().size() == 6); + + // The -no prefix should get stripped on the way in. + BOOST_CHECK(!testArgs.IsArgSet("-nob")); + + // The -b option is flagged as negated, and nothing else is + BOOST_CHECK(testArgs.IsArgNegated("-b")); + BOOST_CHECK(testArgs.GetNegatedArgs().size() == 1); + BOOST_CHECK(!testArgs.IsArgNegated("-a")); + + // Check expected values. + BOOST_CHECK(testArgs.GetBoolArg("-a", false) == true); + BOOST_CHECK(testArgs.GetBoolArg("-b", true) == false); + BOOST_CHECK(testArgs.GetBoolArg("-c", true) == false); + BOOST_CHECK(testArgs.GetBoolArg("-d", false) == true); + BOOST_CHECK(testArgs.GetBoolArg("-e", true) == false); + BOOST_CHECK(testArgs.GetBoolArg("-f", true) == false); +} + +BOOST_AUTO_TEST_CASE(util_GetBoolArgEdgeCases) +{ + // Test some awful edge cases that hopefully no user will ever exercise. + TestArgsManager testArgs; + const char *argv_test[] = {"ignored", "-nofoo", "-foo", "-nobar=0"}; + testArgs.ParseParameters(4, (char**)argv_test); + + // This was passed twice, second one overrides the negative setting. + BOOST_CHECK(!testArgs.IsArgNegated("-foo")); + BOOST_CHECK(testArgs.GetBoolArg("-foo", false) == true); + + // A double negative is a positive. + BOOST_CHECK(testArgs.IsArgNegated("-bar")); + BOOST_CHECK(testArgs.GetBoolArg("-bar", false) == true); +} + BOOST_AUTO_TEST_CASE(util_GetArg) { TestArgsManager testArgs; @@ -704,9 +746,8 @@ static constexpr char ExitCommand = 'X'; static void TestOtherProcess(fs::path dirname, std::string lockname, int fd) { char ch; - int rv; while (true) { - rv = read(fd, &ch, 1); // Wait for command + int rv = read(fd, &ch, 1); // Wait for command assert(rv == 1); switch(ch) { case LockCommand: @@ -817,4 +858,20 @@ BOOST_AUTO_TEST_CASE(test_LockDirectory) fs::remove_all(dirname); } +BOOST_AUTO_TEST_CASE(test_DirIsWritable) +{ + // Should be able to write to the system tmp dir. + fs::path tmpdirname = fs::temp_directory_path(); + BOOST_CHECK_EQUAL(DirIsWritable(tmpdirname), true); + + // Should not be able to write to a non-existent dir. + tmpdirname = fs::temp_directory_path() / fs::unique_path(); + BOOST_CHECK_EQUAL(DirIsWritable(tmpdirname), false); + + fs::create_directory(tmpdirname); + // Should be able to write to it now. + BOOST_CHECK_EQUAL(DirIsWritable(tmpdirname), true); + fs::remove(tmpdirname); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/versionbits_tests.cpp b/src/test/versionbits_tests.cpp index 5d6f781404..92ef58e517 100644 --- a/src/test/versionbits_tests.cpp +++ b/src/test/versionbits_tests.cpp @@ -101,8 +101,8 @@ public: VersionBitsTester& TestDefined() { for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { - BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_DEFINED, strprintf("Test %i for DEFINED", num)); - BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); + BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::DEFINED, strprintf("Test %i for DEFINED", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; @@ -112,8 +112,8 @@ public: VersionBitsTester& TestStarted() { for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { - BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_STARTED, strprintf("Test %i for STARTED", num)); - BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); + BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::STARTED, strprintf("Test %i for STARTED", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; @@ -123,8 +123,8 @@ public: VersionBitsTester& TestLockedIn() { for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { - BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_LOCKED_IN, strprintf("Test %i for LOCKED_IN", num)); - BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); + BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::LOCKED_IN, strprintf("Test %i for LOCKED_IN", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; @@ -134,8 +134,8 @@ public: VersionBitsTester& TestActive() { for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { - BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE", num)); - BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); + BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::ACTIVE, strprintf("Test %i for ACTIVE", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; @@ -145,8 +145,8 @@ public: VersionBitsTester& TestFailed() { for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { - BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_FAILED, strprintf("Test %i for FAILED", num)); - BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); + BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::FAILED, strprintf("Test %i for FAILED", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; diff --git a/src/txdb.cpp b/src/txdb.cpp index 91d6c98430..8550a7e889 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -147,7 +147,7 @@ size_t CCoinsViewDB::EstimateSize() const return db.EstimateSize(DB_COIN, (char)(DB_COIN+1)); } -CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(GetDataDir() / "blocks" / "index", nCacheSize, fMemory, fWipe) { +CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(gArgs.IsArgSet("-blocksdir") ? GetDataDir() / "blocks" / "index" : GetBlocksDir() / "index", nCacheSize, fMemory, fWipe) { } bool CBlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo &info) { diff --git a/src/txmempool.cpp b/src/txmempool.cpp index d1edde284f..cc639288d3 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -339,7 +339,7 @@ CTxMemPool::CTxMemPool(CBlockPolicyEstimator* estimator) : nCheckFrequency = 0; } -bool CTxMemPool::isSpent(const COutPoint& outpoint) +bool CTxMemPool::isSpent(const COutPoint& outpoint) const { LOCK(cs); return mapNextTx.count(outpoint); diff --git a/src/txmempool.h b/src/txmempool.h index 08a3421015..699f6b554b 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -550,7 +550,7 @@ public: void _clear(); //lock free bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb); void queryHashes(std::vector<uint256>& vtxid); - bool isSpent(const COutPoint& outpoint); + bool isSpent(const COutPoint& outpoint) const; unsigned int GetTransactionsUpdated() const; void AddTransactionsUpdated(unsigned int n); /** diff --git a/src/util.cpp b/src/util.cpp index 69ceefc8cd..490897899b 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -70,8 +70,6 @@ #include <malloc.h> #endif -#include <boost/algorithm/string/case_conv.hpp> // for to_lower() -#include <boost/algorithm/string/predicate.hpp> // for startswith() and endswith() #include <boost/interprocess/sync/file_lock.hpp> #include <boost/program_options/detail/config_file.hpp> #include <boost/thread.hpp> @@ -419,7 +417,36 @@ void ReleaseDirectoryLocks() dir_locks.clear(); } -/** Interpret string as boolean, for argument parsing */ +bool DirIsWritable(const fs::path& directory) +{ + fs::path tmpFile = directory / fs::unique_path(); + + FILE* file = fsbridge::fopen(tmpFile, "a"); + if (!file) return false; + + fclose(file); + remove(tmpFile); + + return true; +} + +/** + * Interpret a string argument as a boolean. + * + * The definition of atoi() requires that non-numeric string values like "foo", + * return 0. This means that if a user unintentionally supplies a non-integer + * argument here, the return value is always false. This means that -foo=false + * does what the user probably expects, but -foo=true is well defined but does + * not do what they probably expected. + * + * The return value of atoi() is undefined when given input not representable as + * an int. On most systems this means string value between "-2147483648" and + * "2147483647" are well defined (this method will return true). Setting + * -txindex=2147483648 on most systems, however, is probably undefined. + * + * For a more extensive discussion of this topic (and a wide range of opinions + * on the Right Way to change this code), see PR12713. + */ static bool InterpretBool(const std::string& strValue) { if (strValue.empty()) @@ -427,13 +454,30 @@ static bool InterpretBool(const std::string& strValue) return (atoi(strValue) != 0); } -/** Turn -noX into -X=0 */ -static void InterpretNegativeSetting(std::string& strKey, std::string& strValue) +/** + * Interpret -nofoo as if the user supplied -foo=0. + * + * This method also tracks when the -no form was supplied, and treats "-foo" as + * a negated option when this happens. This can be later checked using the + * IsArgNegated() method. One use case for this is to have a way to disable + * options that are not normally boolean (e.g. using -nodebuglogfile to request + * that debug log output is not sent to any file at all). + */ +void ArgsManager::InterpretNegatedOption(std::string& key, std::string& val) { - if (strKey.length()>3 && strKey[0]=='-' && strKey[1]=='n' && strKey[2]=='o') - { - strKey = "-" + strKey.substr(3); - strValue = InterpretBool(strValue) ? "0" : "1"; + if (key.substr(0, 3) == "-no") { + bool bool_val = InterpretBool(val); + if (!bool_val ) { + // Double negatives like -nofoo=0 are supported (but discouraged) + LogPrintf("Warning: parsed potentially confusing double-negative %s=%s\n", key, val); + } + key.erase(1, 2); + m_negated_args.insert(key); + val = bool_val ? "0" : "1"; + } else { + // In an invocation like "bitcoind -nofoo -foo" we want to unmark -foo + // as negated when we see the second option. + m_negated_args.erase(key); } } @@ -442,34 +486,34 @@ void ArgsManager::ParseParameters(int argc, const char* const argv[]) LOCK(cs_args); mapArgs.clear(); mapMultiArgs.clear(); - - for (int i = 1; i < argc; i++) - { - std::string str(argv[i]); - std::string strValue; - size_t is_index = str.find('='); - if (is_index != std::string::npos) - { - strValue = str.substr(is_index+1); - str = str.substr(0, is_index); + m_negated_args.clear(); + + for (int i = 1; i < argc; i++) { + std::string key(argv[i]); + std::string val; + size_t is_index = key.find('='); + if (is_index != std::string::npos) { + val = key.substr(is_index + 1); + key.erase(is_index); } #ifdef WIN32 - boost::to_lower(str); - if (boost::algorithm::starts_with(str, "/")) - str = "-" + str.substr(1); + std::transform(key.begin(), key.end(), key.begin(), ::tolower); + if (key[0] == '/') + key[0] = '-'; #endif - if (str[0] != '-') + if (key[0] != '-') break; - // Interpret --foo as -foo. - // If both --foo and -foo are set, the last takes effect. - if (str.length() > 1 && str[1] == '-') - str = str.substr(1); - InterpretNegativeSetting(str, strValue); + // Transform --foo to -foo + if (key.length() > 1 && key[1] == '-') + key.erase(0, 1); - mapArgs[str] = strValue; - mapMultiArgs[str].push_back(strValue); + // Transform -nofoo to -foo=0 + InterpretNegatedOption(key, val); + + mapArgs[key] = val; + mapMultiArgs[key].push_back(val); } } @@ -487,6 +531,12 @@ bool ArgsManager::IsArgSet(const std::string& strArg) const return mapArgs.count(strArg); } +bool ArgsManager::IsArgNegated(const std::string& strArg) const +{ + LOCK(cs_args); + return m_negated_args.find(strArg) != m_negated_args.end(); +} + std::string ArgsManager::GetArg(const std::string& strArg, const std::string& strDefault) const { LOCK(cs_args); @@ -600,10 +650,41 @@ fs::path GetDefaultDataDir() #endif } +static fs::path g_blocks_path_cached; +static fs::path g_blocks_path_cache_net_specific; static fs::path pathCached; static fs::path pathCachedNetSpecific; static CCriticalSection csPathCached; +const fs::path &GetBlocksDir(bool fNetSpecific) +{ + + LOCK(csPathCached); + + fs::path &path = fNetSpecific ? g_blocks_path_cache_net_specific : g_blocks_path_cached; + + // This can be called during exceptions by LogPrintf(), so we cache the + // value so we don't have to do memory allocations after that. + if (!path.empty()) + return path; + + if (gArgs.IsArgSet("-blocksdir")) { + path = fs::system_complete(gArgs.GetArg("-blocksdir", "")); + if (!fs::is_directory(path)) { + path = ""; + return path; + } + } else { + path = GetDataDir(false); + } + if (fNetSpecific) + path /= BaseParams().DataDir(); + + path /= "blocks"; + fs::create_directories(path); + return path; +} + const fs::path &GetDataDir(bool fNetSpecific) { @@ -642,6 +723,8 @@ void ClearDatadirCache() pathCached = fs::path(); pathCachedNetSpecific = fs::path(); + g_blocks_path_cached = fs::path(); + g_blocks_path_cache_net_specific = fs::path(); } fs::path GetConfigFile(const std::string& confPath) @@ -665,7 +748,7 @@ void ArgsManager::ReadConfigFile(const std::string& confPath) // Don't overwrite existing settings so command line settings override bitcoin.conf std::string strKey = std::string("-") + it->string_key; std::string strValue = it->value[0]; - InterpretNegativeSetting(strKey, strValue); + InterpretNegatedOption(strKey, strValue); if (mapArgs.count(strKey) == 0) mapArgs[strKey] = strValue; mapMultiArgs[strKey].push_back(strValue); diff --git a/src/util.h b/src/util.h index aebd2fd590..4c473c9354 100644 --- a/src/util.h +++ b/src/util.h @@ -25,6 +25,7 @@ #include <map> #include <stdint.h> #include <string> +#include <unordered_set> #include <vector> #include <boost/signals2/signal.hpp> @@ -174,6 +175,7 @@ int RaiseFileDescriptorLimit(int nMinFD); void AllocateFileRange(FILE *file, unsigned int offset, unsigned int length); bool RenameOver(fs::path src, fs::path dest); bool LockDirectory(const fs::path& directory, const std::string lockfile_name, bool probe_only=false); +bool DirIsWritable(const fs::path& directory); /** Release all directory locks. This is used for unit testing only, at runtime * the global destructor will take care of the locks. @@ -182,6 +184,7 @@ void ReleaseDirectoryLocks(); bool TryCreateDirectories(const fs::path& p); fs::path GetDefaultDataDir(); +const fs::path &GetBlocksDir(bool fNetSpecific = true); const fs::path &GetDataDir(bool fNetSpecific = true); void ClearDatadirCache(); fs::path GetConfigFile(const std::string& confPath); @@ -222,6 +225,8 @@ protected: mutable CCriticalSection cs_args; std::map<std::string, std::string> mapArgs; std::map<std::string, std::vector<std::string>> mapMultiArgs; + std::unordered_set<std::string> m_negated_args; + public: void ParseParameters(int argc, const char*const argv[]); void ReadConfigFile(const std::string& confPath); @@ -243,6 +248,15 @@ public: bool IsArgSet(const std::string& strArg) const; /** + * Return true if the argument was originally passed as a negated option, + * i.e. -nofoo. + * + * @param strArg Argument to get (e.g. "-foo") + * @return true if the argument was passed negated + */ + bool IsArgNegated(const std::string& strArg) const; + + /** * Return string argument or default value * * @param strArg Argument to get (e.g. "-foo") @@ -290,6 +304,11 @@ public: // Forces an arg setting. Called by SoftSetArg() if the arg hasn't already // been set. Also called directly in testing. void ForceSetArg(const std::string& strArg, const std::string& strValue); + +private: + + // Munge -nofoo into -foo=0 and track the value as negated. + void InterpretNegatedOption(std::string &key, std::string &val); }; extern ArgsManager gArgs; diff --git a/src/validation.cpp b/src/validation.cpp index 614876ea49..77764ff923 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -227,6 +227,7 @@ CAmount maxTxFee = DEFAULT_TRANSACTION_MAXFEE; CBlockPolicyEstimator feeEstimator; CTxMemPool mempool(&feeEstimator); +std::atomic_bool g_is_mempool_loaded{false}; /** Constant stuff for coinbase transactions we create: */ CScript COINBASE_FLAGS; @@ -280,11 +281,11 @@ std::unique_ptr<CCoinsViewDB> pcoinsdbview; std::unique_ptr<CCoinsViewCache> pcoinsTip; std::unique_ptr<CBlockTreeDB> pblocktree; -enum FlushStateMode { - FLUSH_STATE_NONE, - FLUSH_STATE_IF_NEEDED, - FLUSH_STATE_PERIODIC, - FLUSH_STATE_ALWAYS +enum class FlushStateMode { + NONE, + IF_NEEDED, + PERIODIC, + ALWAYS }; // See definition for documentation @@ -983,7 +984,7 @@ static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPo } // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits CValidationState stateDummy; - FlushStateToDisk(chainparams, stateDummy, FLUSH_STATE_PERIODIC); + FlushStateToDisk(chainparams, stateDummy, FlushStateMode::PERIODIC); return res; } @@ -1077,7 +1078,7 @@ static bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMes // Write index header unsigned int nSize = GetSerializeSize(fileout, block); - fileout << FLATDATA(messageStart) << nSize; + fileout << messageStart << nSize; // Write block long fileOutPos = ftell(fileout.Get()); @@ -1441,7 +1442,7 @@ bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint // Write index header unsigned int nSize = GetSerializeSize(fileout, blockundo); - fileout << FLATDATA(messageStart) << nSize; + fileout << messageStart << nSize; // Write undo data long fileOutPos = ftell(fileout.Get()); @@ -1684,7 +1685,7 @@ int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Para for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) { ThresholdState state = VersionBitsState(pindexPrev, params, static_cast<Consensus::DeploymentPos>(i), versionbitscache); - if (state == THRESHOLD_LOCKED_IN || state == THRESHOLD_STARTED) { + if (state == ThresholdState::LOCKED_IN || state == ThresholdState::STARTED) { nVersion |= VersionBitsMask(params, static_cast<Consensus::DeploymentPos>(i)); } } @@ -1740,7 +1741,7 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consens } // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic. - if (VersionBitsState(pindex->pprev, consensusparams, Consensus::DEPLOYMENT_CSV, versionbitscache) == THRESHOLD_ACTIVE) { + if (VersionBitsState(pindex->pprev, consensusparams, Consensus::DEPLOYMENT_CSV, versionbitscache) == ThresholdState::ACTIVE) { flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY; } @@ -1926,7 +1927,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic. int nLockTimeFlags = 0; - if (VersionBitsState(pindex->pprev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_CSV, versionbitscache) == THRESHOLD_ACTIVE) { + if (VersionBitsState(pindex->pprev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_CSV, versionbitscache) == ThresholdState::ACTIVE) { nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE; } @@ -2096,19 +2097,19 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState & int64_t cacheSize = pcoinsTip->DynamicMemoryUsage(); int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0); // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing). - bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024); + bool fCacheLarge = mode == FlushStateMode::PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024); // The cache is over the limit, we have to write now. - bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nTotalSpace; + bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cacheSize > nTotalSpace; // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash. - bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000; + bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000; // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage. - bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000; + bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000; // Combine all conditions that result in a full cache flush. - fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; + fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; // Write blocks and block index to disk. if (fDoFullFlush || fPeriodicWrite) { // Depend on nMinDiskSpace to ensure we can write block index - if (!CheckDiskSpace(0)) + if (!CheckDiskSpace(0, true)) return state.Error("out of disk space"); // First make sure all block and undo data is flushed to disk. FlushBlockFile(); @@ -2150,7 +2151,7 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState & nLastFlush = nNow; } } - if (fDoFullFlush || ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000)) { + if (fDoFullFlush || ((mode == FlushStateMode::ALWAYS || mode == FlushStateMode::PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000)) { // Update best block in wallet (so we can detect restored wallets). GetMainSignals().SetBestChain(chainActive.GetLocator()); nLastSetChain = nNow; @@ -2164,14 +2165,14 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState & void FlushStateToDisk() { CValidationState state; const CChainParams& chainparams = Params(); - FlushStateToDisk(chainparams, state, FLUSH_STATE_ALWAYS); + FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS); } void PruneAndFlush() { CValidationState state; fCheckForPruning = true; const CChainParams& chainparams = Params(); - FlushStateToDisk(chainparams, state, FLUSH_STATE_NONE); + FlushStateToDisk(chainparams, state, FlushStateMode::NONE); } static void DoWarning(const std::string& strWarning) @@ -2199,9 +2200,9 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) { WarningBitsConditionChecker checker(bit); ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]); - if (state == THRESHOLD_ACTIVE || state == THRESHOLD_LOCKED_IN) { + if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) { const std::string strWarning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit); - if (state == THRESHOLD_ACTIVE) { + if (state == ThresholdState::ACTIVE) { DoWarning(strWarning); } else { warningMessages.push_back(strWarning); @@ -2267,7 +2268,7 @@ bool CChainState::DisconnectTip(CValidationState& state, const CChainParams& cha } LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI); // Write the chain state to disk, if necessary. - if (!FlushStateToDisk(chainparams, state, FLUSH_STATE_IF_NEEDED)) + if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED)) return false; if (disconnectpool) { @@ -2405,7 +2406,7 @@ bool CChainState::ConnectTip(CValidationState& state, const CChainParams& chainp int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3; LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal); // Write the chain state to disk, if necessary. - if (!FlushStateToDisk(chainparams, state, FLUSH_STATE_IF_NEEDED)) + if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED)) return false; int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4; LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal); @@ -2682,7 +2683,7 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams& CheckBlockIndex(chainparams.GetConsensus()); // Write changes periodically to disk, after relay. - if (!FlushStateToDisk(chainparams, state, FLUSH_STATE_PERIODIC)) { + if (!FlushStateToDisk(chainparams, state, FlushStateMode::PERIODIC)) { return false; } @@ -2953,7 +2954,7 @@ static bool FindBlockPos(CDiskBlockPos &pos, unsigned int nAddSize, unsigned int if (nNewChunks > nOldChunks) { if (fPruneMode) fCheckForPruning = true; - if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) { + if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos, true)) { FILE *file = OpenBlockFile(pos); if (file) { LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile); @@ -2986,7 +2987,7 @@ static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, if (nNewChunks > nOldChunks) { if (fPruneMode) fCheckForPruning = true; - if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) { + if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos, true)) { FILE *file = OpenUndoFile(pos); if (file) { LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile); @@ -3076,7 +3077,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params) { LOCK(cs_main); - return (VersionBitsState(pindexPrev, params, Consensus::DEPLOYMENT_SEGWIT, versionbitscache) == THRESHOLD_ACTIVE); + return (VersionBitsState(pindexPrev, params, Consensus::DEPLOYMENT_SEGWIT, versionbitscache) == ThresholdState::ACTIVE); } // Compute at which vout of the block's coinbase transaction the witness @@ -3195,7 +3196,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c // Start enforcing BIP113 (Median Time Past) using versionbits logic. int nLockTimeFlags = 0; - if (VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CSV, versionbitscache) == THRESHOLD_ACTIVE) { + if (VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CSV, versionbitscache) == ThresholdState::ACTIVE) { nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST; } @@ -3223,13 +3224,13 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c // Validation for witness commitments. // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the // coinbase (where 0x0000....0000 is used instead). - // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness nonce (unconstrained). + // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness reserved value (unconstrained). // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header). // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are - // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness nonce). In case there are + // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are // multiple, the last one is used. bool fHaveWitness = false; - if (VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT, versionbitscache) == THRESHOLD_ACTIVE) { + if (VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT, versionbitscache) == ThresholdState::ACTIVE) { int commitpos = GetWitnessCommitmentIndex(block); if (commitpos != -1) { bool malleated = false; @@ -3238,7 +3239,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c // already does not permit it, it is impossible to trigger in the // witness tree. if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) { - return state.DoS(100, false, REJECT_INVALID, "bad-witness-nonce-size", true, strprintf("%s : invalid witness nonce size", __func__)); + return state.DoS(100, false, REJECT_INVALID, "bad-witness-nonce-size", true, strprintf("%s : invalid witness reserved value size", __func__)); } CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0]->vin[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin()); if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) { @@ -3257,7 +3258,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c } } - // After the coinbase witness nonce and commitment are verified, + // After the coinbase witness reserved value and commitment are verified, // we can check if the block weight passes (before we've checked the // coinbase witness, it would be possible for the weight to be too // large by filling up the coinbase witness, which doesn't change @@ -3443,7 +3444,7 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali } if (fCheckForPruning) - FlushStateToDisk(chainparams, state, FLUSH_STATE_NONE); // we just allocated more disk space for block files + FlushStateToDisk(chainparams, state, FlushStateMode::NONE); // we just allocated more disk space for block files CheckBlockIndex(chainparams.GetConsensus()); @@ -3596,7 +3597,7 @@ void PruneBlockFilesManual(int nManualPruneHeight) { CValidationState state; const CChainParams& chainparams = Params(); - FlushStateToDisk(chainparams, state, FLUSH_STATE_NONE, nManualPruneHeight); + FlushStateToDisk(chainparams, state, FlushStateMode::NONE, nManualPruneHeight); } /** @@ -3661,9 +3662,9 @@ static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfte nLastBlockWeCanPrune, count); } -bool CheckDiskSpace(uint64_t nAdditionalBytes) +bool CheckDiskSpace(uint64_t nAdditionalBytes, bool blocks_dir) { - uint64_t nFreeBytesAvailable = fs::space(GetDataDir()).available; + uint64_t nFreeBytesAvailable = fs::space(blocks_dir ? GetBlocksDir() : GetDataDir()).available; // Check for nMinDiskSpace bytes (currently 50MB) if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes) @@ -3706,7 +3707,7 @@ static FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) { fs::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix) { - return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile); + return GetBlocksDir() / strprintf("%s%05u.dat", prefix, pos.nFile); } CBlockIndex * CChainState::InsertBlockIndex(const uint256& hash) @@ -4094,7 +4095,7 @@ bool CChainState::RewindBlockIndex(const CChainParams& params) return error("RewindBlockIndex: unable to disconnect block at height %i", pindex->nHeight); } // Occasionally flush state to disk. - if (!FlushStateToDisk(params, state, FLUSH_STATE_PERIODIC)) + if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) return false; } @@ -4160,7 +4161,7 @@ bool RewindBlockIndex(const CChainParams& params) { // and skip it here, we're about to -reindex-chainstate anyway, so // it'll get called a bunch real soon. CValidationState state; - if (!FlushStateToDisk(params, state, FLUSH_STATE_ALWAYS)) { + if (!FlushStateToDisk(params, state, FlushStateMode::ALWAYS)) { return false; } } @@ -4283,7 +4284,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB unsigned char buf[CMessageHeader::MESSAGE_START_SIZE]; blkdat.FindByte(chainparams.MessageStart()[0]); nRewind = blkdat.GetPos()+1; - blkdat >> FLATDATA(buf); + blkdat >> buf; if (memcmp(buf, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE)) continue; // read size diff --git a/src/validation.h b/src/validation.h index e780f453b2..4031989f00 100644 --- a/src/validation.h +++ b/src/validation.h @@ -158,6 +158,7 @@ extern CScript COINBASE_FLAGS; extern CCriticalSection cs_main; extern CBlockPolicyEstimator feeEstimator; extern CTxMemPool mempool; +extern std::atomic_bool g_is_mempool_loaded; typedef std::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap; extern BlockMap& mapBlockIndex; extern uint64_t nLastBlockTx; @@ -254,7 +255,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& block, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex=nullptr, CBlockHeader *first_invalid=nullptr); /** Check whether enough disk space is available for an incoming block */ -bool CheckDiskSpace(uint64_t nAdditionalBytes = 0); +bool CheckDiskSpace(uint64_t nAdditionalBytes = 0, bool blocks_dir = false); /** Open a block file (blk?????.dat) */ FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly = false); /** Translation to a filesystem path */ @@ -411,7 +412,7 @@ bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& pa /** When there are blocks in the active chain with missing data, rewind the chainstate and remove them from the block index */ bool RewindBlockIndex(const CChainParams& params); -/** Update uncommitted block structures (currently: only the witness nonce). This is safe for submitted blocks. */ +/** Update uncommitted block structures (currently: only the witness reserved value). This is safe for submitted blocks. */ void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams); /** Produce the necessary coinbase commitment for a block (modifies the hash, don't call for mined blocks). */ diff --git a/src/versionbits.cpp b/src/versionbits.cpp index d2ee49db20..e3ec078173 100644 --- a/src/versionbits.cpp +++ b/src/versionbits.cpp @@ -29,7 +29,7 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex* // Check if this deployment is always active. if (nTimeStart == Consensus::BIP9Deployment::ALWAYS_ACTIVE) { - return THRESHOLD_ACTIVE; + return ThresholdState::ACTIVE; } // A block's state is always the same as that of the first of its period, so it is computed based on a pindexPrev whose height equals a multiple of nPeriod - 1. @@ -42,12 +42,12 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex* while (cache.count(pindexPrev) == 0) { if (pindexPrev == nullptr) { // The genesis block is by definition defined. - cache[pindexPrev] = THRESHOLD_DEFINED; + cache[pindexPrev] = ThresholdState::DEFINED; break; } if (pindexPrev->GetMedianTimePast() < nTimeStart) { // Optimization: don't recompute down further, as we know every earlier block will be before the start time - cache[pindexPrev] = THRESHOLD_DEFINED; + cache[pindexPrev] = ThresholdState::DEFINED; break; } vToCompute.push_back(pindexPrev); @@ -65,17 +65,17 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex* vToCompute.pop_back(); switch (state) { - case THRESHOLD_DEFINED: { + case ThresholdState::DEFINED: { if (pindexPrev->GetMedianTimePast() >= nTimeTimeout) { - stateNext = THRESHOLD_FAILED; + stateNext = ThresholdState::FAILED; } else if (pindexPrev->GetMedianTimePast() >= nTimeStart) { - stateNext = THRESHOLD_STARTED; + stateNext = ThresholdState::STARTED; } break; } - case THRESHOLD_STARTED: { + case ThresholdState::STARTED: { if (pindexPrev->GetMedianTimePast() >= nTimeTimeout) { - stateNext = THRESHOLD_FAILED; + stateNext = ThresholdState::FAILED; break; } // We need to count @@ -88,17 +88,17 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex* pindexCount = pindexCount->pprev; } if (count >= nThreshold) { - stateNext = THRESHOLD_LOCKED_IN; + stateNext = ThresholdState::LOCKED_IN; } break; } - case THRESHOLD_LOCKED_IN: { + case ThresholdState::LOCKED_IN: { // Always progresses into ACTIVE. - stateNext = THRESHOLD_ACTIVE; + stateNext = ThresholdState::ACTIVE; break; } - case THRESHOLD_FAILED: - case THRESHOLD_ACTIVE: { + case ThresholdState::FAILED: + case ThresholdState::ACTIVE: { // Nothing happens, these are terminal states. break; } @@ -149,7 +149,7 @@ int AbstractThresholdConditionChecker::GetStateSinceHeightFor(const CBlockIndex* const ThresholdState initialState = GetStateFor(pindexPrev, params, cache); // BIP 9 about state DEFINED: "The genesis block is by definition in this state for each deployment." - if (initialState == THRESHOLD_DEFINED) { + if (initialState == ThresholdState::DEFINED) { return 0; } diff --git a/src/versionbits.h b/src/versionbits.h index 44a009deeb..8962a65057 100644 --- a/src/versionbits.h +++ b/src/versionbits.h @@ -17,12 +17,12 @@ static const int32_t VERSIONBITS_TOP_MASK = 0xE0000000UL; /** Total bits available for versionbits */ static const int32_t VERSIONBITS_NUM_BITS = 29; -enum ThresholdState { - THRESHOLD_DEFINED, - THRESHOLD_STARTED, - THRESHOLD_LOCKED_IN, - THRESHOLD_ACTIVE, - THRESHOLD_FAILED, +enum class ThresholdState { + DEFINED, + STARTED, + LOCKED_IN, + ACTIVE, + FAILED, }; // A map that gives the state for blocks whose height is a multiple of Period(). diff --git a/src/wallet/crypter.h b/src/wallet/crypter.h index f3ae7144b4..fdeb4cfee0 100644 --- a/src/wallet/crypter.h +++ b/src/wallet/crypter.h @@ -67,7 +67,7 @@ public: typedef std::vector<unsigned char, secure_allocator<unsigned char> > CKeyingMaterial; -namespace wallet_crypto +namespace crypto_tests { class TestCrypter; } @@ -75,7 +75,7 @@ namespace wallet_crypto /** Encryption/decryption context with key information */ class CCrypter { -friend class wallet_crypto::TestCrypter; // for test access to chKey/chIV +friend class crypto_tests::TestCrypter; // for test access to chKey/chIV private: std::vector<unsigned char, secure_allocator<unsigned char>> vchKey; std::vector<unsigned char, secure_allocator<unsigned char>> vchIV; diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp index ebe7b48da0..553cae4d02 100644 --- a/src/wallet/db.cpp +++ b/src/wallet/db.cpp @@ -235,13 +235,13 @@ CDBEnv::VerifyResult CDBEnv::Verify(const std::string& strFile, recoverFunc_type Db db(dbenv.get(), 0); int result = db.verify(strFile.c_str(), nullptr, nullptr, 0); if (result == 0) - return VERIFY_OK; + return VerifyResult::VERIFY_OK; else if (recoverFunc == nullptr) - return RECOVER_FAIL; + return VerifyResult::RECOVER_FAIL; // Try to recover: bool fRecovered = (*recoverFunc)(fs::path(strPath) / strFile, out_backup_filename); - return (fRecovered ? RECOVER_OK : RECOVER_FAIL); + return (fRecovered ? VerifyResult::RECOVER_OK : VerifyResult::RECOVER_FAIL); } bool CDB::Recover(const fs::path& file_path, void *callbackDataIn, bool (*recoverKVcallback)(void* callbackData, CDataStream ssKey, CDataStream ssValue), std::string& newFilename) @@ -347,7 +347,7 @@ bool CDB::VerifyDatabaseFile(const fs::path& file_path, std::string& warningStr, { std::string backup_filename; CDBEnv::VerifyResult r = env->Verify(walletFile, recoverFunc, backup_filename); - if (r == CDBEnv::RECOVER_OK) + if (r == CDBEnv::VerifyResult::RECOVER_OK) { warningStr = strprintf(_("Warning: Wallet file corrupt, data salvaged!" " Original %s saved as %s in %s; if" @@ -355,7 +355,7 @@ bool CDB::VerifyDatabaseFile(const fs::path& file_path, std::string& warningStr, " restore from a backup."), walletFile, backup_filename, walletDir); } - if (r == CDBEnv::RECOVER_FAIL) + if (r == CDBEnv::VerifyResult::RECOVER_FAIL) { errorStr = strprintf(_("%s corrupt, salvage failed"), walletFile); return false; diff --git a/src/wallet/db.h b/src/wallet/db.h index b1ce451534..65bb8cc253 100644 --- a/src/wallet/db.h +++ b/src/wallet/db.h @@ -53,7 +53,7 @@ public: * This must be called BEFORE strFile is opened. * Returns true if strFile is OK. */ - enum VerifyResult { VERIFY_OK, + enum class VerifyResult { VERIFY_OK, RECOVER_OK, RECOVER_FAIL }; typedef bool (*recoverFunc_type)(const fs::path& file_path, std::string& out_backup_filename); diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp index 61481e01b6..3d7bb674f0 100644 --- a/src/wallet/init.cpp +++ b/src/wallet/init.cpp @@ -14,7 +14,7 @@ #include <wallet/wallet.h> #include <wallet/walletutil.h> -std::string GetWalletHelpString(bool showDebug) +std::string WalletInit::GetHelpString(bool showDebug) { std::string strUsage = HelpMessageGroup(_("Wallet options:")); strUsage += HelpMessageOpt("-addresstype", strprintf("What type of addresses to use (\"legacy\", \"p2sh-segwit\", or \"bech32\", default: \"%s\")", FormatOutputType(DEFAULT_ADDRESS_TYPE))); @@ -56,7 +56,7 @@ std::string GetWalletHelpString(bool showDebug) return strUsage; } -bool WalletParameterInteraction() +bool WalletInit::ParameterInteraction() { if (gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) { for (const std::string& wallet : gArgs.GetArgs("-wallet")) { @@ -184,7 +184,7 @@ bool WalletParameterInteraction() return true; } -void RegisterWalletRPC(CRPCTable &t) +void WalletInit::RegisterRPC(CRPCTable &t) { if (gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) { return; @@ -193,7 +193,7 @@ void RegisterWalletRPC(CRPCTable &t) RegisterWalletRPCCommands(t); } -bool VerifyWallets() +bool WalletInit::Verify() { if (gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) { return true; @@ -268,7 +268,7 @@ bool VerifyWallets() return true; } -bool OpenWallets() +bool WalletInit::Open() { if (gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) { LogPrintf("Wallet disabled!\n"); @@ -286,25 +286,29 @@ bool OpenWallets() return true; } -void StartWallets(CScheduler& scheduler) { +void WalletInit::Start(CScheduler& scheduler) +{ for (CWalletRef pwallet : vpwallets) { pwallet->postInitProcess(scheduler); } } -void FlushWallets() { +void WalletInit::Flush() +{ for (CWalletRef pwallet : vpwallets) { pwallet->Flush(false); } } -void StopWallets() { +void WalletInit::Stop() +{ for (CWalletRef pwallet : vpwallets) { pwallet->Flush(true); } } -void CloseWallets() { +void WalletInit::Close() +{ for (CWalletRef pwallet : vpwallets) { delete pwallet; } diff --git a/src/wallet/init.h b/src/wallet/init.h index 0b3ee2dda2..f8be90d3e3 100644 --- a/src/wallet/init.h +++ b/src/wallet/init.h @@ -6,38 +6,43 @@ #ifndef BITCOIN_WALLET_INIT_H #define BITCOIN_WALLET_INIT_H +#include <walletinitinterface.h> #include <string> class CRPCTable; class CScheduler; -//! Return the wallets help message. -std::string GetWalletHelpString(bool showDebug); +class WalletInit : public WalletInitInterface { +public: -//! Wallets parameter interaction -bool WalletParameterInteraction(); + //! Return the wallets help message. + std::string GetHelpString(bool showDebug) override; -//! Register wallet RPCs. -void RegisterWalletRPC(CRPCTable &tableRPC); + //! Wallets parameter interaction + bool ParameterInteraction() override; -//! Responsible for reading and validating the -wallet arguments and verifying the wallet database. -// This function will perform salvage on the wallet if requested, as long as only one wallet is -// being loaded (WalletParameterInteraction forbids -salvagewallet, -zapwallettxes or -upgradewallet with multiwallet). -bool VerifyWallets(); + //! Register wallet RPCs. + void RegisterRPC(CRPCTable &tableRPC) override; -//! Load wallet databases. -bool OpenWallets(); + //! Responsible for reading and validating the -wallet arguments and verifying the wallet database. + // This function will perform salvage on the wallet if requested, as long as only one wallet is + // being loaded (WalletParameterInteraction forbids -salvagewallet, -zapwallettxes or -upgradewallet with multiwallet). + bool Verify() override; -//! Complete startup of wallets. -void StartWallets(CScheduler& scheduler); + //! Load wallet databases. + bool Open() override; -//! Flush all wallets in preparation for shutdown. -void FlushWallets(); + //! Complete startup of wallets. + void Start(CScheduler& scheduler) override; -//! Stop all wallets. Wallets will be flushed first. -void StopWallets(); + //! Flush all wallets in preparation for shutdown. + void Flush() override; -//! Close all wallets. -void CloseWallets(); + //! Stop all wallets. Wallets will be flushed first. + void Stop() override; + + //! Close all wallets. + void Close() override; +}; #endif // BITCOIN_WALLET_INIT_H diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp index 1721bc6df6..28b6153ce1 100644 --- a/src/wallet/rpcdump.cpp +++ b/src/wallet/rpcdump.cpp @@ -406,7 +406,7 @@ UniValue removeprunedfunds(const JSONRPCRequest& request) vHash.push_back(hash); std::vector<uint256> vHashOut; - if (pwallet->ZapSelectTx(vHash, vHashOut) != DB_LOAD_OK) { + if (pwallet->ZapSelectTx(vHash, vHashOut) != DBErrors::LOAD_OK) { throw JSONRPCError(RPC_WALLET_ERROR, "Could not properly delete the transaction."); } diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index 0dc6de9564..c34b166a41 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -113,9 +113,9 @@ void WalletTxToJSON(const CWalletTx& wtx, UniValue& entry) if (confirms <= 0) { LOCK(mempool.cs); RBFTransactionState rbfState = IsRBFOptIn(*wtx.tx, mempool); - if (rbfState == RBF_TRANSACTIONSTATE_UNKNOWN) + if (rbfState == RBFTransactionState::UNKNOWN) rbfStatus = "unknown"; - else if (rbfState == RBF_TRANSACTIONSTATE_REPLACEABLE_BIP125) + else if (rbfState == RBFTransactionState::REPLACEABLE_BIP125) rbfStatus = "yes"; } entry.pushKV("bip125-replaceable", rbfStatus); @@ -124,12 +124,12 @@ void WalletTxToJSON(const CWalletTx& wtx, UniValue& entry) entry.pushKV(item.first, item.second); } -std::string AccountFromValue(const UniValue& value) +std::string LabelFromValue(const UniValue& value) { - std::string strAccount = value.get_str(); - if (strAccount == "*") - throw JSONRPCError(RPC_WALLET_INVALID_ACCOUNT_NAME, "Invalid account name"); - return strAccount; + std::string label = value.get_str(); + if (label == "*") + throw JSONRPCError(RPC_WALLET_INVALID_LABEL_NAME, "Invalid label name"); + return label; } UniValue getnewaddress(const JSONRPCRequest& request) @@ -141,12 +141,12 @@ UniValue getnewaddress(const JSONRPCRequest& request) if (request.fHelp || request.params.size() > 2) throw std::runtime_error( - "getnewaddress ( \"account\" \"address_type\" )\n" + "getnewaddress ( \"label\" \"address_type\" )\n" "\nReturns a new Bitcoin address for receiving payments.\n" - "If 'account' is specified (DEPRECATED), it is added to the address book \n" - "so payments received with the address will be credited to 'account'.\n" + "If 'label' is specified, it is added to the address book \n" + "so payments received with the address will be associated with 'label'.\n" "\nArguments:\n" - "1. \"account\" (string, optional) DEPRECATED. The account name for the address to be linked to. If not provided, the default account \"\" is used. It can also be set to the empty string \"\" to represent the default account. The account does not need to exist, it will be created if there is no account by the given name.\n" + "1. \"label\" (string, optional) The label name for the address to be linked to. If not provided, the default label \"\" is used. It can also be set to the empty string \"\" to represent the default label. The label does not need to exist, it will be created if there is no label by the given name.\n" "2. \"address_type\" (string, optional) The address type to use. Options are \"legacy\", \"p2sh-segwit\", and \"bech32\". Default is set by -addresstype.\n" "\nResult:\n" "\"address\" (string) The new bitcoin address\n" @@ -157,10 +157,10 @@ UniValue getnewaddress(const JSONRPCRequest& request) LOCK2(cs_main, pwallet->cs_wallet); - // Parse the account first so we don't generate a key if there's an error - std::string strAccount; + // Parse the label first so we don't generate a key if there's an error + std::string label; if (!request.params[0].isNull()) - strAccount = AccountFromValue(request.params[0]); + label = LabelFromValue(request.params[0]); OutputType output_type = pwallet->m_default_address_type; if (!request.params[1].isNull()) { @@ -182,23 +182,23 @@ UniValue getnewaddress(const JSONRPCRequest& request) pwallet->LearnRelatedScripts(newKey, output_type); CTxDestination dest = GetDestinationForKey(newKey, output_type); - pwallet->SetAddressBook(dest, strAccount, "receive"); + pwallet->SetAddressBook(dest, label, "receive"); return EncodeDestination(dest); } -CTxDestination GetAccountDestination(CWallet* const pwallet, std::string strAccount, bool bForceNew=false) +CTxDestination GetLabelDestination(CWallet* const pwallet, const std::string& label, bool bForceNew=false) { CTxDestination dest; - if (!pwallet->GetAccountDestination(dest, strAccount, bForceNew)) { + if (!pwallet->GetLabelDestination(dest, label, bForceNew)) { throw JSONRPCError(RPC_WALLET_KEYPOOL_RAN_OUT, "Error: Keypool ran out, please call keypoolrefill first"); } return dest; } -UniValue getaccountaddress(const JSONRPCRequest& request) +UniValue getlabeladdress(const JSONRPCRequest& request) { CWallet * const pwallet = GetWalletForJSONRPCRequest(request); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { @@ -207,27 +207,27 @@ UniValue getaccountaddress(const JSONRPCRequest& request) if (request.fHelp || request.params.size() != 1) throw std::runtime_error( - "getaccountaddress \"account\"\n" - "\nDEPRECATED. Returns the current Bitcoin address for receiving payments to this account.\n" + "getlabeladdress \"label\"\n" + "\nReturns the current Bitcoin address for receiving payments to this label.\n" "\nArguments:\n" - "1. \"account\" (string, required) The account name for the address. It can also be set to the empty string \"\" to represent the default account. The account does not need to exist, it will be created and a new address created if there is no account by the given name.\n" + "1. \"label\" (string, required) The label name for the address. It can also be set to the empty string \"\" to represent the default label. The label does not need to exist, it will be created and a new address created if there is no label by the given name.\n" "\nResult:\n" - "\"address\" (string) The account bitcoin address\n" + "\"address\" (string) The label bitcoin address\n" "\nExamples:\n" - + HelpExampleCli("getaccountaddress", "") - + HelpExampleCli("getaccountaddress", "\"\"") - + HelpExampleCli("getaccountaddress", "\"myaccount\"") - + HelpExampleRpc("getaccountaddress", "\"myaccount\"") + + HelpExampleCli("getlabeladdress", "") + + HelpExampleCli("getlabeladdress", "\"\"") + + HelpExampleCli("getlabeladdress", "\"mylabel\"") + + HelpExampleRpc("getlabeladdress", "\"mylabel\"") ); LOCK2(cs_main, pwallet->cs_wallet); - // Parse the account first so we don't generate a key if there's an error - std::string strAccount = AccountFromValue(request.params[0]); + // Parse the label first so we don't generate a key if there's an error + std::string label = LabelFromValue(request.params[0]); UniValue ret(UniValue::VSTR); - ret = EncodeDestination(GetAccountDestination(pwallet, strAccount)); + ret = EncodeDestination(GetLabelDestination(pwallet, label)); return ret; } @@ -281,7 +281,7 @@ UniValue getrawchangeaddress(const JSONRPCRequest& request) } -UniValue setaccount(const JSONRPCRequest& request) +UniValue setlabel(const JSONRPCRequest& request) { CWallet * const pwallet = GetWalletForJSONRPCRequest(request); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { @@ -290,14 +290,14 @@ UniValue setaccount(const JSONRPCRequest& request) if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) throw std::runtime_error( - "setaccount \"address\" \"account\"\n" - "\nDEPRECATED. Sets the account associated with the given address.\n" + "setlabel \"address\" \"label\"\n" + "\nSets the label associated with the given address.\n" "\nArguments:\n" - "1. \"address\" (string, required) The bitcoin address to be associated with an account.\n" - "2. \"account\" (string, required) The account to assign the address to.\n" + "1. \"address\" (string, required) The bitcoin address to be associated with a label.\n" + "2. \"label\" (string, required) The label to assign the address to.\n" "\nExamples:\n" - + HelpExampleCli("setaccount", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\" \"tabby\"") - + HelpExampleRpc("setaccount", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\", \"tabby\"") + + HelpExampleCli("setlabel", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\" \"tabby\"") + + HelpExampleRpc("setlabel", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\", \"tabby\"") ); LOCK2(cs_main, pwallet->cs_wallet); @@ -307,23 +307,23 @@ UniValue setaccount(const JSONRPCRequest& request) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid Bitcoin address"); } - std::string strAccount; + std::string label; if (!request.params[1].isNull()) - strAccount = AccountFromValue(request.params[1]); + label = LabelFromValue(request.params[1]); - // Only add the account if the address is yours. + // Only add the label if the address is yours. if (IsMine(*pwallet, dest)) { - // Detect when changing the account of an address that is the 'unused current key' of another account: + // Detect when changing the label of an address that is the 'unused current key' of another label: if (pwallet->mapAddressBook.count(dest)) { - std::string strOldAccount = pwallet->mapAddressBook[dest].name; - if (dest == GetAccountDestination(pwallet, strOldAccount)) { - GetAccountDestination(pwallet, strOldAccount, true); + std::string old_label = pwallet->mapAddressBook[dest].name; + if (dest == GetLabelDestination(pwallet, old_label)) { + GetLabelDestination(pwallet, old_label, true); } } - pwallet->SetAddressBook(dest, strAccount, "receive"); + pwallet->SetAddressBook(dest, label, "receive"); } else - throw JSONRPCError(RPC_MISC_ERROR, "setaccount can only be used with own address"); + throw JSONRPCError(RPC_MISC_ERROR, "setlabel can only be used with own address"); return NullUniValue; } @@ -390,7 +390,7 @@ UniValue getaddressesbyaccount(const JSONRPCRequest& request) LOCK2(cs_main, pwallet->cs_wallet); - std::string strAccount = AccountFromValue(request.params[0]); + std::string strAccount = LabelFromValue(request.params[0]); // Find all addresses that have the given account UniValue ret(UniValue::VARR); @@ -552,7 +552,7 @@ UniValue listaddressgroupings(const JSONRPCRequest& request) " [\n" " \"address\", (string) The bitcoin address\n" " amount, (numeric) The amount in " + CURRENCY_UNIT + "\n" - " \"account\" (string, optional) DEPRECATED. The account\n" + " \"label\" (string, optional) The label\n" " ]\n" " ,...\n" " ]\n" @@ -720,7 +720,7 @@ UniValue getreceivedbyaddress(const JSONRPCRequest& request) } -UniValue getreceivedbyaccount(const JSONRPCRequest& request) +UniValue getreceivedbylabel(const JSONRPCRequest& request) { CWallet * const pwallet = GetWalletForJSONRPCRequest(request); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { @@ -729,22 +729,22 @@ UniValue getreceivedbyaccount(const JSONRPCRequest& request) if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) throw std::runtime_error( - "getreceivedbyaccount \"account\" ( minconf )\n" - "\nDEPRECATED. Returns the total amount received by addresses with <account> in transactions with at least [minconf] confirmations.\n" + "getreceivedbylabel \"label\" ( minconf )\n" + "\nReturns the total amount received by addresses with <label> in transactions with at least [minconf] confirmations.\n" "\nArguments:\n" - "1. \"account\" (string, required) The selected account, may be the default account using \"\".\n" + "1. \"label\" (string, required) The selected label, may be the default label using \"\".\n" "2. minconf (numeric, optional, default=1) Only include transactions confirmed at least this many times.\n" "\nResult:\n" - "amount (numeric) The total amount in " + CURRENCY_UNIT + " received for this account.\n" + "amount (numeric) The total amount in " + CURRENCY_UNIT + " received for this label.\n" "\nExamples:\n" - "\nAmount received by the default account with at least 1 confirmation\n" - + HelpExampleCli("getreceivedbyaccount", "\"\"") + - "\nAmount received at the tabby account including unconfirmed amounts with zero confirmations\n" - + HelpExampleCli("getreceivedbyaccount", "\"tabby\" 0") + + "\nAmount received by the default label with at least 1 confirmation\n" + + HelpExampleCli("getreceivedbylabel", "\"\"") + + "\nAmount received at the tabby label including unconfirmed amounts with zero confirmations\n" + + HelpExampleCli("getreceivedbylabel", "\"tabby\" 0") + "\nThe amount with at least 6 confirmations\n" - + HelpExampleCli("getreceivedbyaccount", "\"tabby\" 6") + + + HelpExampleCli("getreceivedbylabel", "\"tabby\" 6") + "\nAs a json rpc call\n" - + HelpExampleRpc("getreceivedbyaccount", "\"tabby\", 6") + + HelpExampleRpc("getreceivedbylabel", "\"tabby\", 6") ); ObserveSafeMode(); @@ -760,9 +760,9 @@ UniValue getreceivedbyaccount(const JSONRPCRequest& request) if (!request.params[1].isNull()) nMinDepth = request.params[1].get_int(); - // Get the set of pub keys assigned to account - std::string strAccount = AccountFromValue(request.params[0]); - std::set<CTxDestination> setAddress = pwallet->GetAccountAddresses(strAccount); + // Get the set of pub keys assigned to label + std::string label = LabelFromValue(request.params[0]); + std::set<CTxDestination> setAddress = pwallet->GetLabelAddresses(label); // Tally CAmount nAmount = 0; @@ -920,8 +920,8 @@ UniValue movecmd(const JSONRPCRequest& request) ObserveSafeMode(); LOCK2(cs_main, pwallet->cs_wallet); - std::string strFrom = AccountFromValue(request.params[0]); - std::string strTo = AccountFromValue(request.params[1]); + std::string strFrom = LabelFromValue(request.params[0]); + std::string strTo = LabelFromValue(request.params[1]); CAmount nAmount = AmountFromValue(request.params[2]); if (nAmount <= 0) throw JSONRPCError(RPC_TYPE_ERROR, "Invalid amount for send"); @@ -984,7 +984,7 @@ UniValue sendfrom(const JSONRPCRequest& request) LOCK2(cs_main, pwallet->cs_wallet); - std::string strAccount = AccountFromValue(request.params[0]); + std::string strAccount = LabelFromValue(request.params[0]); CTxDestination dest = DecodeDestination(request.params[1].get_str()); if (!IsValidDestination(dest)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid Bitcoin address"); @@ -1076,7 +1076,7 @@ UniValue sendmany(const JSONRPCRequest& request) throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); } - std::string strAccount = AccountFromValue(request.params[0]); + std::string strAccount = LabelFromValue(request.params[0]); UniValue sendTo = request.params[1].get_obj(); int nMinDepth = 1; if (!request.params[2].isNull()) @@ -1145,6 +1145,9 @@ UniValue sendmany(const JSONRPCRequest& request) if (totalAmount > nBalance) throw JSONRPCError(RPC_WALLET_INSUFFICIENT_FUNDS, "Account has insufficient funds"); + // Shuffle recipient list + std::shuffle(vecSend.begin(), vecSend.end(), FastRandomContext()); + // Send CReserveKey keyChange(pwallet); CAmount nFeeRequired = 0; @@ -1171,12 +1174,12 @@ UniValue addmultisigaddress(const JSONRPCRequest& request) } if (request.fHelp || request.params.size() < 2 || request.params.size() > 4) { - std::string msg = "addmultisigaddress nrequired [\"key\",...] ( \"account\" \"address_type\" )\n" + std::string msg = "addmultisigaddress nrequired [\"key\",...] ( \"label\" \"address_type\" )\n" "\nAdd a nrequired-to-sign multisignature address to the wallet. Requires a new wallet backup.\n" "Each key is a Bitcoin address or hex-encoded public key.\n" "This functionality is only intended for use with non-watchonly addresses.\n" "See `importaddress` for watchonly p2sh address support.\n" - "If 'account' is specified (DEPRECATED), assign address to that account.\n" + "If 'label' is specified, assign address to that label.\n" "\nArguments:\n" "1. nrequired (numeric, required) The number of required signatures out of the n keys or addresses.\n" @@ -1185,7 +1188,7 @@ UniValue addmultisigaddress(const JSONRPCRequest& request) " \"address\" (string) bitcoin address or hex-encoded public key\n" " ...,\n" " ]\n" - "3. \"account\" (string, optional) DEPRECATED. An account to assign the addresses to.\n" + "3. \"label\" (string, optional) A label to assign the addresses to.\n" "4. \"address_type\" (string, optional) The address type to use. Options are \"legacy\", \"p2sh-segwit\", and \"bech32\". Default is set by -addresstype.\n" "\nResult:\n" @@ -1204,9 +1207,9 @@ UniValue addmultisigaddress(const JSONRPCRequest& request) LOCK2(cs_main, pwallet->cs_wallet); - std::string strAccount; + std::string label; if (!request.params[2].isNull()) - strAccount = AccountFromValue(request.params[2]); + label = LabelFromValue(request.params[2]); int required = request.params[0].get_int(); @@ -1233,7 +1236,7 @@ UniValue addmultisigaddress(const JSONRPCRequest& request) CScript inner = CreateMultisigRedeemscript(required, pubkeys); pwallet->AddCScript(inner); CTxDestination dest = pwallet->AddAndGetDestinationForScript(inner, output_type); - pwallet->SetAddressBook(dest, strAccount, "send"); + pwallet->SetAddressBook(dest, label, "send"); UniValue result(UniValue::VOBJ); result.pushKV("address", EncodeDestination(dest)); @@ -1385,14 +1388,14 @@ struct tallyitem } }; -UniValue ListReceived(CWallet * const pwallet, const UniValue& params, bool fByAccounts) +UniValue ListReceived(CWallet * const pwallet, const UniValue& params, bool by_label) { // Minimum confirmations int nMinDepth = 1; if (!params[0].isNull()) nMinDepth = params[0].get_int(); - // Whether to include empty accounts + // Whether to include empty labels bool fIncludeEmpty = false; if (!params[1].isNull()) fIncludeEmpty = params[1].get_bool(); @@ -1404,7 +1407,7 @@ UniValue ListReceived(CWallet * const pwallet, const UniValue& params, bool fByA bool has_filtered_address = false; CTxDestination filtered_address = CNoDestination(); - if (!fByAccounts && params.size() > 3) { + if (!by_label && params.size() > 3) { if (!IsValidDestinationString(params[3].get_str())) { throw JSONRPCError(RPC_WALLET_ERROR, "address_filter parameter was invalid"); } @@ -1449,7 +1452,7 @@ UniValue ListReceived(CWallet * const pwallet, const UniValue& params, bool fByA // Reply UniValue ret(UniValue::VARR); - std::map<std::string, tallyitem> mapAccountTally; + std::map<std::string, tallyitem> label_tally; // Create mapAddressBook iterator // If we aren't filtering, go from begin() to end() @@ -1466,7 +1469,7 @@ UniValue ListReceived(CWallet * const pwallet, const UniValue& params, bool fByA for (auto item_it = start; item_it != end; ++item_it) { const CTxDestination& address = item_it->first; - const std::string& strAccount = item_it->second.name; + const std::string& label = item_it->second.name; auto it = mapTally.find(address); if (it == mapTally.end() && !fIncludeEmpty) continue; @@ -1481,9 +1484,9 @@ UniValue ListReceived(CWallet * const pwallet, const UniValue& params, bool fByA fIsWatchonly = (*it).second.fIsWatchonly; } - if (fByAccounts) + if (by_label) { - tallyitem& _item = mapAccountTally[strAccount]; + tallyitem& _item = label_tally[label]; _item.nAmount += nAmount; _item.nConf = std::min(_item.nConf, nConf); _item.fIsWatchonly = fIsWatchonly; @@ -1494,11 +1497,10 @@ UniValue ListReceived(CWallet * const pwallet, const UniValue& params, bool fByA if(fIsWatchonly) obj.pushKV("involvesWatchonly", true); obj.pushKV("address", EncodeDestination(address)); - obj.pushKV("account", strAccount); + obj.pushKV("account", label); obj.pushKV("amount", ValueFromAmount(nAmount)); obj.pushKV("confirmations", (nConf == std::numeric_limits<int>::max() ? 0 : nConf)); - if (!fByAccounts) - obj.pushKV("label", strAccount); + obj.pushKV("label", label); UniValue transactions(UniValue::VARR); if (it != mapTally.end()) { @@ -1512,9 +1514,9 @@ UniValue ListReceived(CWallet * const pwallet, const UniValue& params, bool fByA } } - if (fByAccounts) + if (by_label) { - for (const auto& entry : mapAccountTally) + for (const auto& entry : label_tally) { CAmount nAmount = entry.second.nAmount; int nConf = entry.second.nConf; @@ -1524,6 +1526,7 @@ UniValue ListReceived(CWallet * const pwallet, const UniValue& params, bool fByA obj.pushKV("account", entry.first); obj.pushKV("amount", ValueFromAmount(nAmount)); obj.pushKV("confirmations", (nConf == std::numeric_limits<int>::max() ? 0 : nConf)); + obj.pushKV("label", entry.first); ret.push_back(obj); } } @@ -1552,10 +1555,10 @@ UniValue listreceivedbyaddress(const JSONRPCRequest& request) " {\n" " \"involvesWatchonly\" : true, (bool) Only returned if imported addresses were involved in transaction\n" " \"address\" : \"receivingaddress\", (string) The receiving address\n" - " \"account\" : \"accountname\", (string) DEPRECATED. The account of the receiving address. The default account is \"\".\n" + " \"account\" : \"accountname\", (string) DEPRECATED. Backwards compatible alias for label.\n" " \"amount\" : x.xxx, (numeric) The total amount in " + CURRENCY_UNIT + " received by the address\n" " \"confirmations\" : n, (numeric) The number of confirmations of the most recent transaction included\n" - " \"label\" : \"label\", (string) A comment for the address/transaction, if any\n" + " \"label\" : \"label\", (string) The label of the receiving address. The default label is \"\".\n" " \"txids\": [\n" " n, (numeric) The ids of transactions received with the address \n" " ...\n" @@ -1582,7 +1585,7 @@ UniValue listreceivedbyaddress(const JSONRPCRequest& request) return ListReceived(pwallet, request.params, false); } -UniValue listreceivedbyaccount(const JSONRPCRequest& request) +UniValue listreceivedbylabel(const JSONRPCRequest& request) { CWallet * const pwallet = GetWalletForJSONRPCRequest(request); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { @@ -1591,29 +1594,29 @@ UniValue listreceivedbyaccount(const JSONRPCRequest& request) if (request.fHelp || request.params.size() > 3) throw std::runtime_error( - "listreceivedbyaccount ( minconf include_empty include_watchonly)\n" - "\nDEPRECATED. List balances by account.\n" + "listreceivedbylabel ( minconf include_empty include_watchonly)\n" + "\nList received transactions by label.\n" "\nArguments:\n" "1. minconf (numeric, optional, default=1) The minimum number of confirmations before payments are included.\n" - "2. include_empty (bool, optional, default=false) Whether to include accounts that haven't received any payments.\n" + "2. include_empty (bool, optional, default=false) Whether to include labels that haven't received any payments.\n" "3. include_watchonly (bool, optional, default=false) Whether to include watch-only addresses (see 'importaddress').\n" "\nResult:\n" "[\n" " {\n" " \"involvesWatchonly\" : true, (bool) Only returned if imported addresses were involved in transaction\n" - " \"account\" : \"accountname\", (string) The account name of the receiving account\n" - " \"amount\" : x.xxx, (numeric) The total amount received by addresses with this account\n" + " \"account\" : \"accountname\", (string) DEPRECATED. Backwards compatible alias for label.\n" + " \"amount\" : x.xxx, (numeric) The total amount received by addresses with this label\n" " \"confirmations\" : n, (numeric) The number of confirmations of the most recent transaction included\n" - " \"label\" : \"label\" (string) A comment for the address/transaction, if any\n" + " \"label\" : \"label\" (string) The label of the receiving address. The default label is \"\".\n" " }\n" " ,...\n" "]\n" "\nExamples:\n" - + HelpExampleCli("listreceivedbyaccount", "") - + HelpExampleCli("listreceivedbyaccount", "6 true") - + HelpExampleRpc("listreceivedbyaccount", "6, true, true") + + HelpExampleCli("listreceivedbylabel", "") + + HelpExampleCli("listreceivedbylabel", "6 true") + + HelpExampleRpc("listreceivedbylabel", "6, true, true") ); ObserveSafeMode(); @@ -2927,7 +2930,8 @@ UniValue listunspent(const JSONRPCRequest& request) " \"txid\" : \"txid\", (string) the transaction id \n" " \"vout\" : n, (numeric) the vout value\n" " \"address\" : \"address\", (string) the bitcoin address\n" - " \"account\" : \"account\", (string) DEPRECATED. The associated account, or \"\" for the default account\n" + " \"label\" : \"label\", (string) The associated label, or \"\" for the default label\n" + " \"account\" : \"account\", (string) DEPRECATED. Backwards compatible alias for label.\n" " \"scriptPubKey\" : \"key\", (string) the script key\n" " \"amount\" : x.xxx, (numeric) the transaction output amount in " + CURRENCY_UNIT + "\n" " \"confirmations\" : n, (numeric) The number of confirmations\n" @@ -3031,6 +3035,7 @@ UniValue listunspent(const JSONRPCRequest& request) entry.pushKV("address", EncodeDestination(address)); if (pwallet->mapAddressBook.count(address)) { + entry.pushKV("label", pwallet->mapAddressBook[address].name); entry.pushKV("account", pwallet->mapAddressBook[address].name); } @@ -3826,21 +3831,23 @@ static const CRPCCommand commands[] = { "hidden", "resendwallettransactions", &resendwallettransactions, {} }, { "wallet", "abandontransaction", &abandontransaction, {"txid"} }, { "wallet", "abortrescan", &abortrescan, {} }, - { "wallet", "addmultisigaddress", &addmultisigaddress, {"nrequired","keys","account","address_type"} }, + { "wallet", "addmultisigaddress", &addmultisigaddress, {"nrequired","keys","label|account","address_type"} }, { "hidden", "addwitnessaddress", &addwitnessaddress, {"address","p2sh"} }, { "wallet", "backupwallet", &backupwallet, {"destination"} }, { "wallet", "bumpfee", &bumpfee, {"txid", "options"} }, { "wallet", "dumpprivkey", &dumpprivkey, {"address"} }, { "wallet", "dumpwallet", &dumpwallet, {"filename"} }, { "wallet", "encryptwallet", &encryptwallet, {"passphrase"} }, - { "wallet", "getaccountaddress", &getaccountaddress, {"account"} }, + { "wallet", "getlabeladdress", &getlabeladdress, {"label"} }, + { "wallet", "getaccountaddress", &getlabeladdress, {"account"} }, { "wallet", "getaccount", &getaccount, {"address"} }, { "wallet", "getaddressesbyaccount", &getaddressesbyaccount, {"account"} }, { "wallet", "getaddressinfo", &getaddressinfo, {"address"} }, { "wallet", "getbalance", &getbalance, {"account","minconf","include_watchonly"} }, - { "wallet", "getnewaddress", &getnewaddress, {"account","address_type"} }, + { "wallet", "getnewaddress", &getnewaddress, {"label|account","address_type"} }, { "wallet", "getrawchangeaddress", &getrawchangeaddress, {"address_type"} }, - { "wallet", "getreceivedbyaccount", &getreceivedbyaccount, {"account","minconf"} }, + { "wallet", "getreceivedbylabel", &getreceivedbylabel, {"label","minconf"} }, + { "wallet", "getreceivedbyaccount", &getreceivedbylabel, {"account","minconf"} }, { "wallet", "getreceivedbyaddress", &getreceivedbyaddress, {"address","minconf"} }, { "wallet", "gettransaction", &gettransaction, {"txid","include_watchonly"} }, { "wallet", "getunconfirmedbalance", &getunconfirmedbalance, {} }, @@ -3855,7 +3862,8 @@ static const CRPCCommand commands[] = { "wallet", "listaccounts", &listaccounts, {"minconf","include_watchonly"} }, { "wallet", "listaddressgroupings", &listaddressgroupings, {} }, { "wallet", "listlockunspent", &listlockunspent, {} }, - { "wallet", "listreceivedbyaccount", &listreceivedbyaccount, {"minconf","include_empty","include_watchonly"} }, + { "wallet", "listreceivedbylabel", &listreceivedbylabel, {"minconf","include_empty","include_watchonly"} }, + { "wallet", "listreceivedbyaccount", &listreceivedbylabel, {"minconf","include_empty","include_watchonly"} }, { "wallet", "listreceivedbyaddress", &listreceivedbyaddress, {"minconf","include_empty","include_watchonly","address_filter"} }, { "wallet", "listsinceblock", &listsinceblock, {"blockhash","target_confirmations","include_watchonly","include_removed"} }, { "wallet", "listtransactions", &listtransactions, {"account","count","skip","include_watchonly"} }, @@ -3866,7 +3874,8 @@ static const CRPCCommand commands[] = { "wallet", "sendfrom", &sendfrom, {"fromaccount","toaddress","amount","minconf","comment","comment_to"} }, { "wallet", "sendmany", &sendmany, {"fromaccount","amounts","minconf","comment","subtractfeefrom","replaceable","conf_target","estimate_mode"} }, { "wallet", "sendtoaddress", &sendtoaddress, {"address","amount","comment","comment_to","subtractfeefromamount","replaceable","conf_target","estimate_mode"} }, - { "wallet", "setaccount", &setaccount, {"address","account"} }, + { "wallet", "setlabel", &setlabel, {"address","label"} }, + { "wallet", "setaccount", &setlabel, {"address","account"} }, { "wallet", "settxfee", &settxfee, {"amount"} }, { "wallet", "signmessage", &signmessage, {"address","message"} }, { "wallet", "signrawtransactionwithwallet", &signrawtransactionwithwallet, {"hexstring","prevtxs","sighashtype"} }, diff --git a/src/wallet/test/accounting_tests.cpp b/src/wallet/test/accounting_tests.cpp index aae328d81f..cc6e491f53 100644 --- a/src/wallet/test/accounting_tests.cpp +++ b/src/wallet/test/accounting_tests.cpp @@ -18,7 +18,7 @@ GetResults(CWallet& wallet, std::map<CAmount, CAccountingEntry>& results) std::list<CAccountingEntry> aes; results.clear(); - BOOST_CHECK(wallet.ReorderTransactions() == DB_LOAD_OK); + BOOST_CHECK(wallet.ReorderTransactions() == DBErrors::LOAD_OK); wallet.ListAccountCreditDebit("", aes); for (CAccountingEntry& ae : aes) { diff --git a/src/wallet/test/coinselector_tests.cpp b/src/wallet/test/coinselector_tests.cpp index f05c81cd4c..184a8a3f1f 100644 --- a/src/wallet/test/coinselector_tests.cpp +++ b/src/wallet/test/coinselector_tests.cpp @@ -4,6 +4,7 @@ #include "wallet/wallet.h" #include "wallet/coinselection.h" +#include "wallet/coincontrol.h" #include "amount.h" #include "primitives/transaction.h" #include "random.h" @@ -13,7 +14,7 @@ #include <boost/test/unit_test.hpp> #include <random> -BOOST_FIXTURE_TEST_SUITE(coin_selection_tests, WalletTestingSetup) +BOOST_FIXTURE_TEST_SUITE(coinselector_tests, WalletTestingSetup) // how many times to run all the tests to have a chance to catch errors that only show up with particular random shuffles #define RUN_TESTS 100 @@ -27,7 +28,7 @@ std::vector<std::unique_ptr<CWalletTx>> wtxn; typedef std::set<CInputCoin> CoinSet; static std::vector<COutput> vCoins; -static const CWallet testWallet("dummy", CWalletDBWrapper::CreateDummy()); +static CWallet testWallet("dummy", CWalletDBWrapper::CreateDummy()); static CAmount balance = 0; CoinEligibilityFilter filter_standard(1, 6, 0); @@ -72,6 +73,7 @@ static void add_coin(const CAmount& nValue, int nAge = 6*24, bool fIsFromMe = fa } COutput output(wtx.get(), nInput, nAge, true /* spendable */, true /* solvable */, true /* safe */); vCoins.push_back(output); + testWallet.AddToWallet(*wtx.get()); wtxn.emplace_back(std::move(wtx)); } @@ -222,6 +224,18 @@ BOOST_AUTO_TEST_CASE(bnb_search_test) add_coin(1); vCoins.at(0).nInputBytes = 40; // Make sure that it has a negative effective value. The next check should assert if this somehow got through. Otherwise it will fail BOOST_CHECK(!testWallet.SelectCoinsMinConf( 1 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params_bnb, bnb_used)); + + // Make sure that we aren't using BnB when there are preset inputs + empty_wallet(); + add_coin(5 * CENT); + add_coin(3 * CENT); + add_coin(2 * CENT); + CCoinControl coin_control; + coin_control.fAllowOtherInputs = true; + coin_control.Select(COutPoint(vCoins.at(0).tx->GetHash(), vCoins.at(0).i)); + BOOST_CHECK(testWallet.SelectCoins(vCoins, 10 * CENT, setCoinsRet, nValueRet, coin_control, coin_selection_params_bnb, bnb_used)); + BOOST_CHECK(!bnb_used); + BOOST_CHECK(!coin_selection_params_bnb.use_bnb); } BOOST_AUTO_TEST_CASE(knapsack_solver_test) diff --git a/src/wallet/test/crypto_tests.cpp b/src/wallet/test/crypto_tests.cpp index 89b2c4e796..d8c0cdf0f9 100644 --- a/src/wallet/test/crypto_tests.cpp +++ b/src/wallet/test/crypto_tests.cpp @@ -10,7 +10,7 @@ #include <boost/test/unit_test.hpp> -BOOST_FIXTURE_TEST_SUITE(wallet_crypto, BasicTestingSetup) +BOOST_FIXTURE_TEST_SUITE(crypto_tests, BasicTestingSetup) class TestCrypter { diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index d63c40d2ff..c9843599d6 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -727,11 +727,11 @@ DBErrors CWallet::ReorderTransactions() if (pwtx) { if (!walletdb.WriteTx(*pwtx)) - return DB_LOAD_FAIL; + return DBErrors::LOAD_FAIL; } else if (!walletdb.WriteAccountingEntry(pacentry->nEntryNo, *pacentry)) - return DB_LOAD_FAIL; + return DBErrors::LOAD_FAIL; } else { @@ -751,16 +751,16 @@ DBErrors CWallet::ReorderTransactions() if (pwtx) { if (!walletdb.WriteTx(*pwtx)) - return DB_LOAD_FAIL; + return DBErrors::LOAD_FAIL; } else if (!walletdb.WriteAccountingEntry(pacentry->nEntryNo, *pacentry)) - return DB_LOAD_FAIL; + return DBErrors::LOAD_FAIL; } } walletdb.WriteOrderPosNext(nOrderPosNext); - return DB_LOAD_OK; + return DBErrors::LOAD_OK; } int64_t CWallet::IncOrderPosNext(CWalletDB *pwalletdb) @@ -809,12 +809,12 @@ bool CWallet::AccountMove(std::string strFrom, std::string strTo, CAmount nAmoun return true; } -bool CWallet::GetAccountDestination(CTxDestination &dest, std::string strAccount, bool bForceNew) +bool CWallet::GetLabelDestination(CTxDestination &dest, const std::string& label, bool bForceNew) { CWalletDB walletdb(*dbw); CAccount account; - walletdb.ReadAccount(strAccount, account); + walletdb.ReadAccount(label, account); if (!bForceNew) { if (!account.vchPubKey.IsValid()) @@ -840,8 +840,8 @@ bool CWallet::GetAccountDestination(CTxDestination &dest, std::string strAccount LearnRelatedScripts(account.vchPubKey, m_default_address_type); dest = GetDestinationForKey(account.vchPubKey, m_default_address_type); - SetAddressBook(dest, strAccount, "receive"); - walletdb.WriteAccount(strAccount, account); + SetAddressBook(dest, label, "receive"); + walletdb.WriteAccount(label, account); } else { dest = GetDestinationForKey(account.vchPubKey, m_default_address_type); } @@ -2220,7 +2220,7 @@ CAmount CWallet::GetLegacyBalance(const isminefilter& filter, int minDepth, cons for (const CTxOut& out : wtx.tx->vout) { if (outgoing && IsChange(out)) { debit -= out.nValue; - } else if (IsMine(out) & filter && depth >= minDepth && (!account || *account == GetAccountName(out.scriptPubKey))) { + } else if (IsMine(out) & filter && depth >= minDepth && (!account || *account == GetLabelName(out.scriptPubKey))) { balance += out.nValue; } } @@ -2491,7 +2491,7 @@ bool CWallet::SelectCoinsMinConf(const CAmount& nTargetValue, const CoinEligibil } } -bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAmount& nTargetValue, std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet, const CCoinControl& coin_control, const CoinSelectionParams& coin_selection_params, bool& bnb_used) const +bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAmount& nTargetValue, std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet, const CCoinControl& coin_control, CoinSelectionParams& coin_selection_params, bool& bnb_used) const { std::vector<COutput> vCoins(vAvailableCoins); @@ -2521,6 +2521,7 @@ bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAm { // For now, don't use BnB if preset inputs are selected. TODO: Enable this later bnb_used = false; + coin_selection_params.use_bnb = false; std::map<uint256, CWalletTx>::const_iterator it = mapWallet.find(outpoint.hash); if (it != mapWallet.end()) @@ -2889,20 +2890,11 @@ bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CTransac nChangePosInOut = -1; } - // Fill vin + // Dummy fill vin for maximum size estimation // - // Note how the sequence number is set to non-maxint so that - // the nLockTime set above actually works. - // - // BIP125 defines opt-in RBF as any nSequence < maxint-1, so - // we use the highest possible value in that range (maxint-2) - // to avoid conflicting with other possible uses of nSequence, - // and in the spirit of "smallest possible change from prior - // behavior." - const uint32_t nSequence = coin_control.signalRbf ? MAX_BIP125_RBF_SEQUENCE : (CTxIn::SEQUENCE_FINAL - 1); - for (const auto& coin : setCoins) - txNew.vin.push_back(CTxIn(coin.outpoint,CScript(), - nSequence)); + for (const auto& coin : setCoins) { + txNew.vin.push_back(CTxIn(coin.outpoint,CScript())); + } nBytes = CalculateMaximumSignedTxSize(txNew, this); if (nBytes < 0) { @@ -2992,11 +2984,29 @@ bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CTransac if (nChangePosInOut == -1) reservekey.ReturnKey(); // Return any reserved key if we don't have change + // Shuffle selected coins and fill in final vin + txNew.vin.clear(); + std::vector<CInputCoin> selected_coins(setCoins.begin(), setCoins.end()); + std::shuffle(selected_coins.begin(), selected_coins.end(), FastRandomContext()); + + // Note how the sequence number is set to non-maxint so that + // the nLockTime set above actually works. + // + // BIP125 defines opt-in RBF as any nSequence < maxint-1, so + // we use the highest possible value in that range (maxint-2) + // to avoid conflicting with other possible uses of nSequence, + // and in the spirit of "smallest possible change from prior + // behavior." + const uint32_t nSequence = coin_control.signalRbf ? MAX_BIP125_RBF_SEQUENCE : (CTxIn::SEQUENCE_FINAL - 1); + for (const auto& coin : selected_coins) { + txNew.vin.push_back(CTxIn(coin.outpoint, CScript(), nSequence)); + } + if (sign) { CTransaction txNewConst(txNew); int nIn = 0; - for (const auto& coin : setCoins) + for (const auto& coin : selected_coins) { const CScript& scriptPubKey = coin.txout.scriptPubKey; SignatureData sigdata; @@ -3136,7 +3146,7 @@ DBErrors CWallet::LoadWallet(bool& fFirstRunRet) fFirstRunRet = false; DBErrors nLoadWalletRet = CWalletDB(*dbw,"cr+").LoadWallet(this); - if (nLoadWalletRet == DB_NEED_REWRITE) + if (nLoadWalletRet == DBErrors::NEED_REWRITE) { if (dbw->Rewrite("\x04pool")) { @@ -3152,12 +3162,12 @@ DBErrors CWallet::LoadWallet(bool& fFirstRunRet) // This wallet is in its first run if all of these are empty fFirstRunRet = mapKeys.empty() && mapCryptedKeys.empty() && mapWatchKeys.empty() && setWatchOnly.empty() && mapScripts.empty(); - if (nLoadWalletRet != DB_LOAD_OK) + if (nLoadWalletRet != DBErrors::LOAD_OK) return nLoadWalletRet; uiInterface.LoadWallet(this); - return DB_LOAD_OK; + return DBErrors::LOAD_OK; } DBErrors CWallet::ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256>& vHashOut) @@ -3167,7 +3177,7 @@ DBErrors CWallet::ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256 for (uint256 hash : vHashOut) mapWallet.erase(hash); - if (nZapSelectTxRet == DB_NEED_REWRITE) + if (nZapSelectTxRet == DBErrors::NEED_REWRITE) { if (dbw->Rewrite("\x04pool")) { @@ -3180,19 +3190,19 @@ DBErrors CWallet::ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256 } } - if (nZapSelectTxRet != DB_LOAD_OK) + if (nZapSelectTxRet != DBErrors::LOAD_OK) return nZapSelectTxRet; MarkDirty(); - return DB_LOAD_OK; + return DBErrors::LOAD_OK; } DBErrors CWallet::ZapWalletTx(std::vector<CWalletTx>& vWtx) { DBErrors nZapWalletTxRet = CWalletDB(*dbw,"cr+").ZapWalletTx(vWtx); - if (nZapWalletTxRet == DB_NEED_REWRITE) + if (nZapWalletTxRet == DBErrors::NEED_REWRITE) { if (dbw->Rewrite("\x04pool")) { @@ -3206,10 +3216,10 @@ DBErrors CWallet::ZapWalletTx(std::vector<CWalletTx>& vWtx) } } - if (nZapWalletTxRet != DB_LOAD_OK) + if (nZapWalletTxRet != DBErrors::LOAD_OK) return nZapWalletTxRet; - return DB_LOAD_OK; + return DBErrors::LOAD_OK; } @@ -3251,7 +3261,7 @@ bool CWallet::DelAddressBook(const CTxDestination& address) return CWalletDB(*dbw).EraseName(EncodeDestination(address)); } -const std::string& CWallet::GetAccountName(const CScript& scriptPubKey) const +const std::string& CWallet::GetLabelName(const CScript& scriptPubKey) const { CTxDestination address; if (ExtractDestination(scriptPubKey, address) && !scriptPubKey.IsUnspendable()) { @@ -3261,9 +3271,9 @@ const std::string& CWallet::GetAccountName(const CScript& scriptPubKey) const } } // A scriptPubKey that doesn't have an entry in the address book is - // associated with the default account (""). - const static std::string DEFAULT_ACCOUNT_NAME; - return DEFAULT_ACCOUNT_NAME; + // associated with the default label (""). + const static std::string DEFAULT_LABEL_NAME; + return DEFAULT_LABEL_NAME; } /** @@ -3619,7 +3629,7 @@ std::set< std::set<CTxDestination> > CWallet::GetAddressGroupings() return ret; } -std::set<CTxDestination> CWallet::GetAccountAddresses(const std::string& strAccount) const +std::set<CTxDestination> CWallet::GetLabelAddresses(const std::string& label) const { LOCK(cs_wallet); std::set<CTxDestination> result; @@ -3627,7 +3637,7 @@ std::set<CTxDestination> CWallet::GetAccountAddresses(const std::string& strAcco { const CTxDestination& address = item.first; const std::string& strName = item.second.name; - if (strName == strAccount) + if (strName == label) result.insert(address); } return result; @@ -3921,7 +3931,7 @@ CWallet* CWallet::CreateWalletFromFile(const std::string& name, const fs::path& std::unique_ptr<CWallet> tempWallet = MakeUnique<CWallet>(name, CWalletDBWrapper::Create(path)); DBErrors nZapWalletRet = tempWallet->ZapWalletTx(vWtx); - if (nZapWalletRet != DB_LOAD_OK) { + if (nZapWalletRet != DBErrors::LOAD_OK) { InitError(strprintf(_("Error loading %s: Wallet corrupted"), walletFile)); return nullptr; } @@ -3933,23 +3943,23 @@ CWallet* CWallet::CreateWalletFromFile(const std::string& name, const fs::path& bool fFirstRun = true; CWallet *walletInstance = new CWallet(name, CWalletDBWrapper::Create(path)); DBErrors nLoadWalletRet = walletInstance->LoadWallet(fFirstRun); - if (nLoadWalletRet != DB_LOAD_OK) + if (nLoadWalletRet != DBErrors::LOAD_OK) { - if (nLoadWalletRet == DB_CORRUPT) { + if (nLoadWalletRet == DBErrors::CORRUPT) { InitError(strprintf(_("Error loading %s: Wallet corrupted"), walletFile)); return nullptr; } - else if (nLoadWalletRet == DB_NONCRITICAL_ERROR) + else if (nLoadWalletRet == DBErrors::NONCRITICAL_ERROR) { InitWarning(strprintf(_("Error reading %s! All keys read correctly, but transaction data" " or address book entries might be missing or incorrect."), walletFile)); } - else if (nLoadWalletRet == DB_TOO_NEW) { + else if (nLoadWalletRet == DBErrors::TOO_NEW) { InitError(strprintf(_("Error loading %s: Wallet requires newer version of %s"), walletFile, _(PACKAGE_NAME))); return nullptr; } - else if (nLoadWalletRet == DB_NEED_REWRITE) + else if (nLoadWalletRet == DBErrors::NEED_REWRITE) { InitError(strprintf(_("Wallet needed to be rewritten: restart %s to complete"), _(PACKAGE_NAME))); return nullptr; diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 45d9762bde..3ef5bfbb65 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -665,15 +665,6 @@ private: std::mutex mutexScanning; friend class WalletRescanReserver; - - /** - * Select a set of coins such that nValueRet >= nTargetValue and at least - * all coins from coinControl are selected; Never select unconfirmed coins - * if they are not ours - */ - bool SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAmount& nTargetValue, std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet, - const CCoinControl& coin_control, const CoinSelectionParams& coin_selection_params, bool& bnb_used) const; - CWalletDB *pwalletdbEncryption; //! the current wallet version: clients below this version are not able to load the wallet @@ -766,6 +757,14 @@ public: return *dbw; } + /** + * Select a set of coins such that nValueRet >= nTargetValue and at least + * all coins from coinControl are selected; Never select unconfirmed coins + * if they are not ours + */ + bool SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAmount& nTargetValue, std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet, + const CCoinControl& coin_control, CoinSelectionParams& coin_selection_params, bool& bnb_used) const; + /** Get a name for this wallet for logging/debugging purposes. */ const std::string& GetName() const { return m_name; } @@ -929,7 +928,7 @@ public: int64_t IncOrderPosNext(CWalletDB *pwalletdb = nullptr); DBErrors ReorderTransactions(); bool AccountMove(std::string strFrom, std::string strTo, CAmount nAmount, std::string strComment = ""); - bool GetAccountDestination(CTxDestination &dest, std::string strAccount, bool bForceNew = false); + bool GetLabelDestination(CTxDestination &dest, const std::string& label, bool bForceNew = false); void MarkDirty(); bool AddToWallet(const CWalletTx& wtxIn, bool fFlushOnClose=true); @@ -1007,7 +1006,7 @@ public: std::set< std::set<CTxDestination> > GetAddressGroupings(); std::map<CTxDestination, CAmount> GetAddressBalances(); - std::set<CTxDestination> GetAccountAddresses(const std::string& strAccount) const; + std::set<CTxDestination> GetLabelAddresses(const std::string& label) const; isminetype IsMine(const CTxIn& txin) const; /** @@ -1037,7 +1036,7 @@ public: bool DelAddressBook(const CTxDestination& address); - const std::string& GetAccountName(const CScript& scriptPubKey) const; + const std::string& GetLabelName(const CScript& scriptPubKey) const; void Inventory(const uint256 &hash) override { diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index 7f5f3b84b2..77cdfe7dd0 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -522,7 +522,7 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) { CWalletScanState wss; bool fNoncriticalErrors = false; - DBErrors result = DB_LOAD_OK; + DBErrors result = DBErrors::LOAD_OK; LOCK(pwallet->cs_wallet); try { @@ -530,7 +530,7 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) if (batch.Read((std::string)"minversion", nMinVersion)) { if (nMinVersion > CLIENT_VERSION) - return DB_TOO_NEW; + return DBErrors::TOO_NEW; pwallet->LoadMinVersion(nMinVersion); } @@ -539,7 +539,7 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) if (!pcursor) { LogPrintf("Error getting wallet database cursor\n"); - return DB_CORRUPT; + return DBErrors::CORRUPT; } while (true) @@ -553,7 +553,7 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) else if (ret != 0) { LogPrintf("Error reading next record from wallet database\n"); - return DB_CORRUPT; + return DBErrors::CORRUPT; } // Try to be tolerant of single corrupt records: @@ -563,7 +563,7 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) // losing keys is considered a catastrophic error, anything else // we assume the user can live with: if (IsKeyType(strType) || strType == "defaultkey") - result = DB_CORRUPT; + result = DBErrors::CORRUPT; else { // Leave other errors alone, if we try to fix them we might make things worse. @@ -582,15 +582,15 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) throw; } catch (...) { - result = DB_CORRUPT; + result = DBErrors::CORRUPT; } - if (fNoncriticalErrors && result == DB_LOAD_OK) - result = DB_NONCRITICAL_ERROR; + if (fNoncriticalErrors && result == DBErrors::LOAD_OK) + result = DBErrors::NONCRITICAL_ERROR; // Any wallet corruption at all: skip any rewriting or // upgrading, we don't want to make it worse. - if (result != DB_LOAD_OK) + if (result != DBErrors::LOAD_OK) return result; LogPrintf("nFileVersion = %d\n", wss.nFileVersion); @@ -607,7 +607,7 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) // Rewrite encrypted wallets of versions 0.4.0 and 0.5.0rc: if (wss.fIsEncrypted && (wss.nFileVersion == 40000 || wss.nFileVersion == 50000)) - return DB_NEED_REWRITE; + return DBErrors::NEED_REWRITE; if (wss.nFileVersion < CLIENT_VERSION) // Update WriteVersion(CLIENT_VERSION); @@ -626,14 +626,14 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) DBErrors CWalletDB::FindWalletTx(std::vector<uint256>& vTxHash, std::vector<CWalletTx>& vWtx) { - DBErrors result = DB_LOAD_OK; + DBErrors result = DBErrors::LOAD_OK; try { int nMinVersion = 0; if (batch.Read((std::string)"minversion", nMinVersion)) { if (nMinVersion > CLIENT_VERSION) - return DB_TOO_NEW; + return DBErrors::TOO_NEW; } // Get cursor @@ -641,7 +641,7 @@ DBErrors CWalletDB::FindWalletTx(std::vector<uint256>& vTxHash, std::vector<CWal if (!pcursor) { LogPrintf("Error getting wallet database cursor\n"); - return DB_CORRUPT; + return DBErrors::CORRUPT; } while (true) @@ -655,7 +655,7 @@ DBErrors CWalletDB::FindWalletTx(std::vector<uint256>& vTxHash, std::vector<CWal else if (ret != 0) { LogPrintf("Error reading next record from wallet database\n"); - return DB_CORRUPT; + return DBErrors::CORRUPT; } std::string strType; @@ -677,7 +677,7 @@ DBErrors CWalletDB::FindWalletTx(std::vector<uint256>& vTxHash, std::vector<CWal throw; } catch (...) { - result = DB_CORRUPT; + result = DBErrors::CORRUPT; } return result; @@ -689,7 +689,7 @@ DBErrors CWalletDB::ZapSelectTx(std::vector<uint256>& vTxHashIn, std::vector<uin std::vector<uint256> vTxHash; std::vector<CWalletTx> vWtx; DBErrors err = FindWalletTx(vTxHash, vWtx); - if (err != DB_LOAD_OK) { + if (err != DBErrors::LOAD_OK) { return err; } @@ -716,9 +716,9 @@ DBErrors CWalletDB::ZapSelectTx(std::vector<uint256>& vTxHashIn, std::vector<uin } if (delerror) { - return DB_CORRUPT; + return DBErrors::CORRUPT; } - return DB_LOAD_OK; + return DBErrors::LOAD_OK; } DBErrors CWalletDB::ZapWalletTx(std::vector<CWalletTx>& vWtx) @@ -726,16 +726,16 @@ DBErrors CWalletDB::ZapWalletTx(std::vector<CWalletTx>& vWtx) // build list of wallet TXs std::vector<uint256> vTxHash; DBErrors err = FindWalletTx(vTxHash, vWtx); - if (err != DB_LOAD_OK) + if (err != DBErrors::LOAD_OK) return err; // erase each wallet TX for (uint256& hash : vTxHash) { if (!EraseTx(hash)) - return DB_CORRUPT; + return DBErrors::CORRUPT; } - return DB_LOAD_OK; + return DBErrors::LOAD_OK; } void MaybeCompactWalletDB() diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h index 7d754c7284..606b7dace7 100644 --- a/src/wallet/walletdb.h +++ b/src/wallet/walletdb.h @@ -46,14 +46,14 @@ class uint160; class uint256; /** Error statuses for the wallet database */ -enum DBErrors +enum class DBErrors { - DB_LOAD_OK, - DB_CORRUPT, - DB_NONCRITICAL_ERROR, - DB_TOO_NEW, - DB_LOAD_FAIL, - DB_NEED_REWRITE + LOAD_OK, + CORRUPT, + NONCRITICAL_ERROR, + TOO_NEW, + LOAD_FAIL, + NEED_REWRITE }; /* simple HD chain data model */ diff --git a/src/walletinitinterface.h b/src/walletinitinterface.h new file mode 100644 index 0000000000..47e4e2cce1 --- /dev/null +++ b/src/walletinitinterface.h @@ -0,0 +1,51 @@ +// Copyright (c) 2017 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef WALLETINITINTERFACE_H +#define WALLETINITINTERFACE_H + +#include <string> + +class CScheduler; +class CRPCTable; + +class WalletInitInterface { +public: + /** Get wallet help string */ + virtual std::string GetHelpString(bool showDebug) = 0; + /** Check wallet parameter interaction */ + virtual bool ParameterInteraction() = 0; + /** Register wallet RPC*/ + virtual void RegisterRPC(CRPCTable &) = 0; + /** Verify wallets */ + virtual bool Verify() = 0; + /** Open wallets*/ + virtual bool Open() = 0; + /** Start wallets*/ + virtual void Start(CScheduler& scheduler) = 0; + /** Flush Wallets*/ + virtual void Flush() = 0; + /** Stop Wallets*/ + virtual void Stop() = 0; + /** Close wallets */ + virtual void Close() = 0; + + virtual ~WalletInitInterface() {} +}; + +class DummyWalletInit : public WalletInitInterface { +public: + + std::string GetHelpString(bool showDebug) override {return std::string{};} + bool ParameterInteraction() override {return true;} + void RegisterRPC(CRPCTable &) override {} + bool Verify() override {return true;} + bool Open() override {return true;} + void Start(CScheduler& scheduler) override {} + void Flush() override {} + void Stop() override {} + void Close() override {} +}; + +#endif // WALLETINITINTERFACE_H diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index fe9bbda14b..181c7f3369 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -2,36 +2,59 @@ # Copyright (c) 2015-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test block processing. - -This reimplements tests from the bitcoinj/FullBlockTestGenerator used -by the pull-tester. - -We use the testing framework in which we expect a particular answer from -each test. -""" - -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import * -from test_framework.comptool import TestManager, TestInstance, RejectResult -from test_framework.blocktools import * +"""Test block processing.""" +import copy +import struct import time + +from test_framework.blocktools import create_block, create_coinbase, create_transaction, get_legacy_sigopcount_block from test_framework.key import CECKey -from test_framework.script import * -from test_framework.mininode import network_thread_start -import struct +from test_framework.messages import ( + CBlock, + COIN, + COutPoint, + CTransaction, + CTxIn, + CTxOut, + MAX_BLOCK_BASE_SIZE, + uint256_from_compact, + uint256_from_str, +) +from test_framework.mininode import P2PDataStore, network_thread_start, network_thread_join +from test_framework.script import ( + CScript, + MAX_SCRIPT_ELEMENT_SIZE, + OP_2DUP, + OP_CHECKMULTISIG, + OP_CHECKMULTISIGVERIFY, + OP_CHECKSIG, + OP_CHECKSIGVERIFY, + OP_ELSE, + OP_ENDIF, + OP_EQUAL, + OP_FALSE, + OP_HASH160, + OP_IF, + OP_INVALIDOPCODE, + OP_RETURN, + OP_TRUE, + SIGHASH_ALL, + SignatureHash, + hash160, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +MAX_BLOCK_SIGOPS = 20000 class PreviousSpendableOutput(): - def __init__(self, tx = CTransaction(), n = -1): + def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n # the output we're spending # Use this class for tests that require behavior other than normal "mininode" behavior. # For now, it is used to serialize a bloated varint (b64). class CBrokenBlock(CBlock): - def __init__(self, header=None): - super(CBrokenBlock, self).__init__(header) - def initialize(self, base_block): self.vtx = copy.deepcopy(base_block.vtx) self.hashMerkleRoot = self.calc_merkle_root() @@ -48,381 +71,272 @@ class CBrokenBlock(CBlock): return r def normal_serialize(self): - r = b"" - r += super(CBrokenBlock, self).serialize() - return r + return super().serialize() -class FullBlockTest(ComparisonTestFramework): - # Can either run this test as 1 node with expected answers, or two and compare them. - # Change the "outcome" variable from each TestInstance object to only do the comparison. +class FullBlockTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True + self.extra_args = [[]] + + def run_test(self): + node = self.nodes[0] # convenience reference to the node + + # reconnect_p2p() expects the network thread to be running + network_thread_start() + + self.reconnect_p2p() + self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"horsebattery") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.tip = None self.blocks = {} - - def add_options(self, parser): - super().add_options(parser) - parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True) - - def run_test(self): - self.test = TestManager(self, self.options.tmpdir) - self.test.add_all_connections(self.nodes) - network_thread_start() - self.test.run() - - def add_transactions_to_block(self, block, tx_list): - [ tx.rehash() for tx in tx_list ] - block.vtx.extend(tx_list) - - # this is a little handier to use than the version in blocktools.py - def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): - tx = create_transaction(spend_tx, n, b"", value, script) - return tx - - # sign a transaction, using the key we know about - # this signs input 0 in tx, which is assumed to be spending output n in spend_tx - def sign_tx(self, tx, spend_tx, n): - scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) - if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend - tx.vin[0].scriptSig = CScript() - return - (sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL) - tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))]) - - def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): - tx = self.create_tx(spend_tx, n, value, script) - self.sign_tx(tx, spend_tx, n) - tx.rehash() - return tx - - def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True): - if self.tip == None: - base_block_hash = self.genesis_hash - block_time = int(time.time())+1 - else: - base_block_hash = self.tip.sha256 - block_time = self.tip.nTime + 1 - # First create the coinbase - height = self.block_heights[base_block_hash] + 1 - coinbase = create_coinbase(height, self.coinbase_pubkey) - coinbase.vout[0].nValue += additional_coinbase_value - coinbase.rehash() - if spend == None: - block = create_block(base_block_hash, coinbase, block_time) - else: - coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees - coinbase.rehash() - block = create_block(base_block_hash, coinbase, block_time) - tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi - self.sign_tx(tx, spend.tx, spend.n) - self.add_transactions_to_block(block, [tx]) - block.hashMerkleRoot = block.calc_merkle_root() - if solve: - block.solve() - self.tip = block - self.block_heights[block.sha256] = height - assert number not in self.blocks - self.blocks[number] = block - return block - - def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 - spendable_outputs = [] - - # save the current tip so it can be spent by a later block - def save_spendable_output(): - spendable_outputs.append(self.tip) - - # get an output that we previously marked as spendable - def get_spendable_output(): - return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) - - # returns a test case that asserts that the current tip was accepted - def accepted(): - return TestInstance([[self.tip, True]]) - - # returns a test case that asserts that the current tip was rejected - def rejected(reject = None): - if reject is None: - return TestInstance([[self.tip, False]]) - else: - return TestInstance([[self.tip, reject]]) - - # move the tip back to a previous block - def tip(number): - self.tip = self.blocks[number] - - # adds transactions to the block and updates state - def update_block(block_number, new_transactions): - block = self.blocks[block_number] - self.add_transactions_to_block(block, new_transactions) - old_sha256 = block.sha256 - block.hashMerkleRoot = block.calc_merkle_root() - block.solve() - # Update the internal state just like in next_block - self.tip = block - if block.sha256 != old_sha256: - self.block_heights[block.sha256] = self.block_heights[old_sha256] - del self.block_heights[old_sha256] - self.blocks[block_number] = block - return block - - # shorthand for functions - block = self.next_block - create_tx = self.create_tx - create_and_sign_tx = self.create_and_sign_transaction - - # these must be updated if consensus changes - MAX_BLOCK_SIGOPS = 20000 - + self.spendable_outputs = [] # Create a new block - block(0) - save_spendable_output() - yield accepted() - + b0 = self.next_block(0) + self.save_spendable_output() + self.sync_blocks([b0]) - # Now we need that block to mature so we can spend the coinbase. - test = TestInstance(sync_every_block=False) + # Allow the block to mature + blocks = [] for i in range(99): - block(5000 + i) - test.blocks_and_transactions.append([self.tip, True]) - save_spendable_output() - yield test + blocks.append(self.next_block(5000 + i)) + self.save_spendable_output() + self.sync_blocks(blocks) # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(33): - out.append(get_spendable_output()) + out.append(self.get_spendable_output()) # Start by building a couple of blocks on top (which output is spent is # in parentheses): # genesis -> b1 (0) -> b2 (1) - block(1, spend=out[0]) - save_spendable_output() - yield accepted() + b1 = self.next_block(1, spend=out[0]) + self.save_spendable_output() - block(2, spend=out[1]) - yield accepted() - save_spendable_output() + b2 = self.next_block(2, spend=out[1]) + self.save_spendable_output() - # so fork like this: + self.sync_blocks([b1, b2]) + + # Fork like this: # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) # # Nothing should happen at this point. We saw b2 first so it takes priority. - tip(1) - b3 = block(3, spend=out[1]) + self.log.info("Don't reorg to a chain of the same length") + self.move_tip(1) + b3 = self.next_block(3, spend=out[1]) txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0) - yield rejected() - + self.sync_blocks([b3], False) # Now we add another block to make the alternative chain longer. # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) -> b4 (2) - block(4, spend=out[2]) - yield accepted() - + self.log.info("Reorg to a longer chain") + b4 = self.next_block(4, spend=out[2]) + self.sync_blocks([b4]) # ... and back to the first chain. # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b3 (1) -> b4 (2) - tip(2) - block(5, spend=out[2]) - save_spendable_output() - yield rejected() + self.move_tip(2) + b5 = self.next_block(5, spend=out[2]) + self.save_spendable_output() + self.sync_blocks([b5], False) - block(6, spend=out[3]) - yield accepted() + self.log.info("Reorg back to the original chain") + b6 = self.next_block(6, spend=out[3]) + self.sync_blocks([b6], True) # Try to create a fork that double-spends # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b7 (2) -> b8 (4) # \-> b3 (1) -> b4 (2) - tip(5) - block(7, spend=out[2]) - yield rejected() + self.log.info("Reject a chain with a double spend, even if it is longer") + self.move_tip(5) + b7 = self.next_block(7, spend=out[2]) + self.sync_blocks([b7], False) - block(8, spend=out[4]) - yield rejected() + b8 = self.next_block(8, spend=out[4]) + self.sync_blocks([b8], False, reconnect=True) # Try to create a block that has too much fee # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b9 (4) # \-> b3 (1) -> b4 (2) - tip(6) - block(9, spend=out[4], additional_coinbase_value=1) - yield rejected(RejectResult(16, b'bad-cb-amount')) + self.log.info("Reject a block where the miner creates too much coinbase reward") + self.move_tip(6) + b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1) + self.sync_blocks([b9], False, 16, b'bad-cb-amount', reconnect=True) # Create a fork that ends in a block with too much fee (the one that causes the reorg) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b10 (3) -> b11 (4) # \-> b3 (1) -> b4 (2) - tip(5) - block(10, spend=out[3]) - yield rejected() - - block(11, spend=out[4], additional_coinbase_value=1) - yield rejected(RejectResult(16, b'bad-cb-amount')) + self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer") + self.move_tip(5) + b10 = self.next_block(10, spend=out[3]) + self.sync_blocks([b10], False) + b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1) + self.sync_blocks([b11], False, 16, b'bad-cb-amount', reconnect=True) # Try again, but with a valid fork first # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b14 (5) - # (b12 added last) # \-> b3 (1) -> b4 (2) - tip(5) - b12 = block(12, spend=out[3]) - save_spendable_output() - b13 = block(13, spend=out[4]) - # Deliver the block header for b12, and the block b13. - # b13 should be accepted but the tip won't advance until b12 is delivered. - yield TestInstance([[CBlockHeader(b12), None], [b13, False]]) - - save_spendable_output() - # b14 is invalid, but the node won't know that until it tries to connect - # Tip still can't advance because b12 is missing - block(14, spend=out[5], additional_coinbase_value=1) - yield rejected() - - yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. + self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)") + self.move_tip(5) + b12 = self.next_block(12, spend=out[3]) + self.save_spendable_output() + b13 = self.next_block(13, spend=out[4]) + self.save_spendable_output() + b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1) + self.sync_blocks([b12, b13, b14], False, 16, b'bad-cb-amount', reconnect=True) + + # New tip should be b13. + assert_equal(node.getbestblockhash(), b13.hash) # Add a block with MAX_BLOCK_SIGOPS and one with one more sigop # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6) # \-> b3 (1) -> b4 (2) - - # Test that a block with a lot of checksigs is okay + self.log.info("Accept a block with lots of checksigs") lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1)) - tip(13) - block(15, spend=out[5], script=lots_of_checksigs) - yield accepted() - save_spendable_output() + self.move_tip(13) + b15 = self.next_block(15, spend=out[5], script=lots_of_checksigs) + self.save_spendable_output() + self.sync_blocks([b15], True) - - # Test that a block with too many checksigs is rejected + self.log.info("Reject a block with too many checksigs") too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS)) - block(16, spend=out[6], script=too_many_checksigs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - + b16 = self.next_block(16, spend=out[6], script=too_many_checksigs) + self.sync_blocks([b16], False, 16, b'bad-blk-sigops', reconnect=True) # Attempt to spend a transaction created on a different fork # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) # \-> b3 (1) -> b4 (2) - tip(15) - block(17, spend=txout_b3) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + self.log.info("Reject a block with a spend from a re-org'ed out tx") + self.move_tip(15) + b17 = self.next_block(17, spend=txout_b3) + self.sync_blocks([b17], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) # Attempt to spend a transaction created on a different fork (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b18 (b3.vtx[1]) -> b19 (6) # \-> b3 (1) -> b4 (2) - tip(13) - block(18, spend=txout_b3) - yield rejected() + self.log.info("Reject a block with a spend from a re-org'ed out tx (on a forked chain)") + self.move_tip(13) + b18 = self.next_block(18, spend=txout_b3) + self.sync_blocks([b18], False) - block(19, spend=out[6]) - yield rejected() + b19 = self.next_block(19, spend=out[6]) + self.sync_blocks([b19], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) # Attempt to spend a coinbase at depth too low # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) # \-> b3 (1) -> b4 (2) - tip(15) - block(20, spend=out[7]) - yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase')) + self.log.info("Reject a block spending an immature coinbase.") + self.move_tip(15) + b20 = self.next_block(20, spend=out[7]) + self.sync_blocks([b20], False, 16, b'bad-txns-premature-spend-of-coinbase') # Attempt to spend a coinbase at depth too low (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b21 (6) -> b22 (5) # \-> b3 (1) -> b4 (2) - tip(13) - block(21, spend=out[6]) - yield rejected() + self.log.info("Reject a block spending an immature coinbase (on a forked chain)") + self.move_tip(13) + b21 = self.next_block(21, spend=out[6]) + self.sync_blocks([b21], False) - block(22, spend=out[5]) - yield rejected() + b22 = self.next_block(22, spend=out[5]) + self.sync_blocks([b22], False, 16, b'bad-txns-premature-spend-of-coinbase') # Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) # \-> b24 (6) -> b25 (7) # \-> b3 (1) -> b4 (2) - tip(15) - b23 = block(23, spend=out[6]) + self.log.info("Accept a block of size MAX_BLOCK_BASE_SIZE") + self.move_tip(15) + b23 = self.next_block(23, spend=out[6]) tx = CTransaction() script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) - b23 = update_block(23, [tx]) + b23 = self.update_block(23, [tx]) # Make sure the math above worked out to produce a max-sized block assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE) - yield accepted() - save_spendable_output() + self.sync_blocks([b23], True) + self.save_spendable_output() - # Make the next block one byte bigger and check that it fails - tip(15) - b24 = block(24, spend=out[6]) + self.log.info("Reject a block of size MAX_BLOCK_BASE_SIZE + 1") + self.move_tip(15) + b24 = self.next_block(24, spend=out[6]) script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69 - script_output = CScript([b'\x00' * (script_length+1)]) + script_output = CScript([b'\x00' * (script_length + 1)]) tx.vout = [CTxOut(0, script_output)] - b24 = update_block(24, [tx]) - assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1) - yield rejected(RejectResult(16, b'bad-blk-length')) + b24 = self.update_block(24, [tx]) + assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE + 1) + self.sync_blocks([b24], False, 16, b'bad-blk-length', reconnect=True) - block(25, spend=out[7]) - yield rejected() + b25 = self.next_block(25, spend=out[7]) + self.sync_blocks([b25], False) # Create blocks with a coinbase input script size out of range # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) # \-> ... (6) -> ... (7) # \-> b3 (1) -> b4 (2) - tip(15) - b26 = block(26, spend=out[6]) + self.log.info("Reject a block with coinbase input script size out of range") + self.move_tip(15) + b26 = self.next_block(26, spend=out[6]) b26.vtx[0].vin[0].scriptSig = b'\x00' b26.vtx[0].rehash() # update_block causes the merkle root to get updated, even with no new # transactions, and updates the required state. - b26 = update_block(26, []) - yield rejected(RejectResult(16, b'bad-cb-length')) + b26 = self.update_block(26, []) + self.sync_blocks([b26], False, 16, b'bad-cb-length', reconnect=True) # Extend the b26 chain to make sure bitcoind isn't accepting b26 - block(27, spend=out[7]) - yield rejected(False) + b27 = self.next_block(27, spend=out[7]) + self.sync_blocks([b27], False) # Now try a too-large-coinbase script - tip(15) - b28 = block(28, spend=out[6]) + self.move_tip(15) + b28 = self.next_block(28, spend=out[6]) b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 b28.vtx[0].rehash() - b28 = update_block(28, []) - yield rejected(RejectResult(16, b'bad-cb-length')) + b28 = self.update_block(28, []) + self.sync_blocks([b28], False, 16, b'bad-cb-length', reconnect=True) # Extend the b28 chain to make sure bitcoind isn't accepting b28 - block(29, spend=out[7]) - yield rejected(False) + b29 = self.next_block(29, spend=out[7]) + self.sync_blocks([b29], False) # b30 has a max-sized coinbase scriptSig. - tip(23) - b30 = block(30) + self.move_tip(23) + b30 = self.next_block(30) b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 b30.vtx[0].rehash() - b30 = update_block(30, []) - yield accepted() - save_spendable_output() + b30 = self.update_block(30, []) + self.sync_blocks([b30], True) + self.save_spendable_output() # b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY # @@ -433,42 +347,45 @@ class FullBlockTest(ComparisonTestFramework): # # MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end. - lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19) - b31 = block(31, spend=out[8], script=lots_of_multisigs) + self.log.info("Accept a block with the max number of OP_CHECKMULTISIG sigops") + lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19) + b31 = self.next_block(31, spend=out[8], script=lots_of_multisigs) assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS) - yield accepted() - save_spendable_output() + self.sync_blocks([b31], True) + self.save_spendable_output() # this goes over the limit because the coinbase has one sigop + self.log.info("Reject a block with too many OP_CHECKMULTISIG sigops") too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20)) - b32 = block(32, spend=out[9], script=too_many_multisigs) + b32 = self.next_block(32, spend=out[9], script=too_many_multisigs) assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - + self.sync_blocks([b32], False, 16, b'bad-blk-sigops', reconnect=True) # CHECKMULTISIGVERIFY - tip(31) - lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19) - block(33, spend=out[9], script=lots_of_multisigs) - yield accepted() - save_spendable_output() - + self.log.info("Accept a block with the max number of OP_CHECKMULTISIGVERIFY sigops") + self.move_tip(31) + lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19) + b33 = self.next_block(33, spend=out[9], script=lots_of_multisigs) + self.sync_blocks([b33], True) + self.save_spendable_output() + + self.log.info("Reject a block with too many OP_CHECKMULTISIGVERIFY sigops") too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20)) - block(34, spend=out[10], script=too_many_multisigs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - + b34 = self.next_block(34, spend=out[10], script=too_many_multisigs) + self.sync_blocks([b34], False, 16, b'bad-blk-sigops', reconnect=True) # CHECKSIGVERIFY - tip(33) + self.log.info("Accept a block with the max number of OP_CHECKSIGVERIFY sigops") + self.move_tip(33) lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1)) - b35 = block(35, spend=out[10], script=lots_of_checksigs) - yield accepted() - save_spendable_output() + b35 = self.next_block(35, spend=out[10], script=lots_of_checksigs) + self.sync_blocks([b35], True) + self.save_spendable_output() + self.log.info("Reject a block with too many OP_CHECKSIGVERIFY sigops") too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS)) - block(36, spend=out[11], script=too_many_checksigs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - + b36 = self.next_block(36, spend=out[11], script=too_many_checksigs) + self.sync_blocks([b36], False, 16, b'bad-blk-sigops', reconnect=True) # Check spending of a transaction in a block which failed to connect # @@ -479,17 +396,18 @@ class FullBlockTest(ComparisonTestFramework): # # save 37's spendable output, but then double-spend out11 to invalidate the block - tip(35) - b37 = block(37, spend=out[11]) + self.log.info("Reject a block spending transaction from a block which failed to connect") + self.move_tip(35) + b37 = self.next_block(37, spend=out[11]) txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0) - tx = create_and_sign_tx(out[11].tx, out[11].n, 0) - b37 = update_block(37, [tx]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + tx = self.create_and_sign_transaction(out[11].tx, out[11].n, 0) + b37 = self.update_block(37, [tx]) + self.sync_blocks([b37], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) # attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid - tip(35) - block(38, spend=txout_b37) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + self.move_tip(35) + b38 = self.next_block(38, spend=txout_b37) + self.sync_blocks([b38], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) # Check P2SH SigOp counting # @@ -502,45 +420,45 @@ class FullBlockTest(ComparisonTestFramework): # redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG # p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL # - tip(35) - b39 = block(39) + self.log.info("Check P2SH SIGOPS are correctly counted") + self.move_tip(35) + b39 = self.next_block(39) b39_outputs = 0 b39_sigops_per_output = 6 # Build the redeem script, hash it, use hash to create the p2sh script - redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG]) + redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE # This must be signed because it is spending a coinbase spend = out[11] - tx = create_tx(spend.tx, spend.n, 1, p2sh_script) + tx = self.create_tx(spend.tx, spend.n, 1, p2sh_script) tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE]))) self.sign_tx(tx, spend.tx, spend.n) tx.rehash() - b39 = update_block(39, [tx]) + b39 = self.update_block(39, [tx]) b39_outputs += 1 # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE tx_new = None tx_last = tx - total_size=len(b39.serialize()) + total_size = len(b39.serialize()) while(total_size < MAX_BLOCK_BASE_SIZE): - tx_new = create_tx(tx_last, 1, 1, p2sh_script) + tx_new = self.create_tx(tx_last, 1, 1, p2sh_script) tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE]))) tx_new.rehash() total_size += len(tx_new.serialize()) if total_size >= MAX_BLOCK_BASE_SIZE: break - b39.vtx.append(tx_new) # add tx to block + b39.vtx.append(tx_new) # add tx to block tx_last = tx_new b39_outputs += 1 - b39 = update_block(39, []) - yield accepted() - save_spendable_output() - + b39 = self.update_block(39, []) + self.sync_blocks([b39], True) + self.save_spendable_output() # Test sigops in P2SH redeem scripts # @@ -549,15 +467,16 @@ class FullBlockTest(ComparisonTestFramework): # # b41 does the same, less one, so it has the maximum sigops permitted. # - tip(39) - b40 = block(40, spend=out[12]) + self.log.info("Reject a block with too many P2SH sigops") + self.move_tip(39) + b40 = self.next_block(40, spend=out[12]) sigops = get_legacy_sigopcount_block(b40) numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output assert_equal(numTxes <= b39_outputs, True) lastOutpoint = COutPoint(b40.vtx[1].sha256, 0) new_txs = [] - for i in range(1, numTxes+1): + for i in range(1, numTxes + 1): tx = CTransaction() tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) tx.vin.append(CTxIn(lastOutpoint, b'')) @@ -579,35 +498,34 @@ class FullBlockTest(ComparisonTestFramework): tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill))) tx.rehash() new_txs.append(tx) - update_block(40, new_txs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) + self.update_block(40, new_txs) + self.sync_blocks([b40], False, 16, b'bad-blk-sigops', reconnect=True) # same as b40, but one less sigop - tip(39) - block(41, spend=None) - update_block(41, b40.vtx[1:-1]) + self.log.info("Accept a block with the max number of P2SH sigops") + self.move_tip(39) + b41 = self.next_block(41, spend=None) + self.update_block(41, b40.vtx[1:-1]) b41_sigops_to_fill = b40_sigops_to_fill - 1 tx = CTransaction() tx.vin.append(CTxIn(lastOutpoint, b'')) tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill))) tx.rehash() - update_block(41, [tx]) - yield accepted() + self.update_block(41, [tx]) + self.sync_blocks([b41], True) # Fork off of b39 to create a constant base again # # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) # \-> b41 (12) # - tip(39) - block(42, spend=out[12]) - yield rejected() - save_spendable_output() - - block(43, spend=out[13]) - yield accepted() - save_spendable_output() + self.move_tip(39) + b42 = self.next_block(42, spend=out[12]) + self.save_spendable_output() + b43 = self.next_block(43, spend=out[13]) + self.save_spendable_output() + self.sync_blocks([b42, b43], True) # Test a number of really invalid scenarios # @@ -616,6 +534,7 @@ class FullBlockTest(ComparisonTestFramework): # The next few blocks are going to be created "by hand" since they'll do funky things, such as having # the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works. + self.log.info("Build block 44 manually") height = self.block_heights[self.tip.sha256] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) b44 = CBlock() @@ -628,10 +547,10 @@ class FullBlockTest(ComparisonTestFramework): self.tip = b44 self.block_heights[b44.sha256] = height self.blocks[44] = b44 - yield accepted() + self.sync_blocks([b44], True) - # A block with a non-coinbase as the first tx - non_coinbase = create_tx(out[15].tx, out[15].n, 1) + self.log.info("Reject a block with a non-coinbase as the first tx") + non_coinbase = self.create_tx(out[15].tx, out[15].n, 1) b45 = CBlock() b45.nTime = self.tip.nTime + 1 b45.hashPrevBlock = self.tip.sha256 @@ -640,104 +559,102 @@ class FullBlockTest(ComparisonTestFramework): b45.hashMerkleRoot = b45.calc_merkle_root() b45.calc_sha256() b45.solve() - self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1 + self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256] + 1 self.tip = b45 self.blocks[45] = b45 - yield rejected(RejectResult(16, b'bad-cb-missing')) + self.sync_blocks([b45], False, 16, b'bad-cb-missing', reconnect=True) - # A block with no txns - tip(44) + self.log.info("Reject a block with no transactions") + self.move_tip(44) b46 = CBlock() - b46.nTime = b44.nTime+1 + b46.nTime = b44.nTime + 1 b46.hashPrevBlock = b44.sha256 b46.nBits = 0x207fffff b46.vtx = [] b46.hashMerkleRoot = 0 b46.solve() - self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1 + self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1 self.tip = b46 assert 46 not in self.blocks self.blocks[46] = b46 - s = ser_uint256(b46.hashMerkleRoot) - yield rejected(RejectResult(16, b'bad-blk-length')) + self.sync_blocks([b46], False, 16, b'bad-blk-length', reconnect=True) - # A block with invalid work - tip(44) - b47 = block(47, solve=False) + self.log.info("Reject a block with invalid work") + self.move_tip(44) + b47 = self.next_block(47, solve=False) target = uint256_from_compact(b47.nBits) - while b47.sha256 < target: #changed > to < + while b47.sha256 < target: b47.nNonce += 1 b47.rehash() - yield rejected(RejectResult(16, b'high-hash')) + self.sync_blocks([b47], False, request_block=False) - # A block with timestamp > 2 hrs in the future - tip(44) - b48 = block(48, solve=False) + self.log.info("Reject a block with a timestamp >2 hours in the future") + self.move_tip(44) + b48 = self.next_block(48, solve=False) b48.nTime = int(time.time()) + 60 * 60 * 3 b48.solve() - yield rejected(RejectResult(16, b'time-too-new')) + self.sync_blocks([b48], False, request_block=False) - # A block with an invalid merkle hash - tip(44) - b49 = block(49) + self.log.info("Reject a block with invalid merkle hash") + self.move_tip(44) + b49 = self.next_block(49) b49.hashMerkleRoot += 1 b49.solve() - yield rejected(RejectResult(16, b'bad-txnmrklroot')) + self.sync_blocks([b49], False, 16, b'bad-txnmrklroot', reconnect=True) - # A block with an incorrect POW limit - tip(44) - b50 = block(50) + self.log.info("Reject a block with incorrect POW limit") + self.move_tip(44) + b50 = self.next_block(50) b50.nBits = b50.nBits - 1 b50.solve() - yield rejected(RejectResult(16, b'bad-diffbits')) + self.sync_blocks([b50], False, request_block=False, reconnect=True) - # A block with two coinbase txns - tip(44) - b51 = block(51) + self.log.info("Reject a block with two coinbase transactions") + self.move_tip(44) + b51 = self.next_block(51) cb2 = create_coinbase(51, self.coinbase_pubkey) - b51 = update_block(51, [cb2]) - yield rejected(RejectResult(16, b'bad-cb-multiple')) + b51 = self.update_block(51, [cb2]) + self.sync_blocks([b51], False, 16, b'bad-cb-multiple', reconnect=True) - # A block w/ duplicate txns + self.log.info("Reject a block with duplicate transactions") # Note: txns have to be in the right position in the merkle tree to trigger this error - tip(44) - b52 = block(52, spend=out[15]) - tx = create_tx(b52.vtx[1], 0, 1) - b52 = update_block(52, [tx, tx]) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) + self.move_tip(44) + b52 = self.next_block(52, spend=out[15]) + tx = self.create_tx(b52.vtx[1], 0, 1) + b52 = self.update_block(52, [tx, tx]) + self.sync_blocks([b52], False, 16, b'bad-txns-duplicate', reconnect=True) # Test block timestamps # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) # \-> b54 (15) # - tip(43) - block(53, spend=out[14]) - yield rejected() # rejected since b44 is at same height - save_spendable_output() + self.move_tip(43) + b53 = self.next_block(53, spend=out[14]) + self.sync_blocks([b53], False) + self.save_spendable_output() - # invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast) - b54 = block(54, spend=out[15]) + self.log.info("Reject a block with timestamp before MedianTimePast") + b54 = self.next_block(54, spend=out[15]) b54.nTime = b35.nTime - 1 b54.solve() - yield rejected(RejectResult(16, b'time-too-old')) + self.sync_blocks([b54], False, request_block=False) # valid timestamp - tip(53) - b55 = block(55, spend=out[15]) + self.move_tip(53) + b55 = self.next_block(55, spend=out[15]) b55.nTime = b35.nTime - update_block(55, []) - yield accepted() - save_spendable_output() + self.update_block(55, []) + self.sync_blocks([b55], True) + self.save_spendable_output() - - # Test CVE-2012-2459 + # Test Merkle tree malleability # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16) # \-> b57 (16) # \-> b56p2 (16) # \-> b56 (16) # - # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without + # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without # affecting the merkle root of a block, while still invalidating it. # See: src/consensus/merkle.h # @@ -758,46 +675,48 @@ class FullBlockTest(ComparisonTestFramework): # that the error was caught early, avoiding a DOS vulnerability.) # b57 - a good block with 2 txs, don't submit until end - tip(55) - b57 = block(57) - tx = create_and_sign_tx(out[16].tx, out[16].n, 1) - tx1 = create_tx(tx, 0, 1) - b57 = update_block(57, [tx, tx1]) + self.move_tip(55) + b57 = self.next_block(57) + tx = self.create_and_sign_transaction(out[16].tx, out[16].n, 1) + tx1 = self.create_tx(tx, 0, 1) + b57 = self.update_block(57, [tx, tx1]) # b56 - copy b57, add a duplicate tx - tip(55) + self.log.info("Reject a block with a duplicate transaction in the Merkle Tree (but with a valid Merkle Root)") + self.move_tip(55) b56 = copy.deepcopy(b57) self.blocks[56] = b56 - assert_equal(len(b56.vtx),3) - b56 = update_block(56, [tx1]) + assert_equal(len(b56.vtx), 3) + b56 = self.update_block(56, [tx1]) assert_equal(b56.hash, b57.hash) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) + self.sync_blocks([b56], False, 16, b'bad-txns-duplicate', reconnect=True) # b57p2 - a good block with 6 tx'es, don't submit until end - tip(55) - b57p2 = block("57p2") - tx = create_and_sign_tx(out[16].tx, out[16].n, 1) - tx1 = create_tx(tx, 0, 1) - tx2 = create_tx(tx1, 0, 1) - tx3 = create_tx(tx2, 0, 1) - tx4 = create_tx(tx3, 0, 1) - b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4]) + self.move_tip(55) + b57p2 = self.next_block("57p2") + tx = self.create_and_sign_transaction(out[16].tx, out[16].n, 1) + tx1 = self.create_tx(tx, 0, 1) + tx2 = self.create_tx(tx1, 0, 1) + tx3 = self.create_tx(tx2, 0, 1) + tx4 = self.create_tx(tx3, 0, 1) + b57p2 = self.update_block("57p2", [tx, tx1, tx2, tx3, tx4]) # b56p2 - copy b57p2, duplicate two non-consecutive tx's - tip(55) + self.log.info("Reject a block with two duplicate transactions in the Merkle Tree (but with a valid Merkle Root)") + self.move_tip(55) b56p2 = copy.deepcopy(b57p2) self.blocks["b56p2"] = b56p2 assert_equal(b56p2.hash, b57p2.hash) - assert_equal(len(b56p2.vtx),6) - b56p2 = update_block("b56p2", [tx3, tx4]) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) + assert_equal(len(b56p2.vtx), 6) + b56p2 = self.update_block("b56p2", [tx3, tx4]) + self.sync_blocks([b56p2], False, 16, b'bad-txns-duplicate', reconnect=True) - tip("57p2") - yield accepted() + self.move_tip("57p2") + self.sync_blocks([b57p2], True) - tip(57) - yield rejected() #rejected because 57p2 seen first - save_spendable_output() + self.move_tip(57) + self.sync_blocks([b57], False) # The tip is not updated because 57p2 seen first + self.save_spendable_output() # Test a few invalid tx types # @@ -806,28 +725,30 @@ class FullBlockTest(ComparisonTestFramework): # # tx with prevout.n out of range - tip(57) - b58 = block(58, spend=out[17]) + self.log.info("Reject a block with a transaction with prevout.n out of range") + self.move_tip(57) + b58 = self.next_block(58, spend=out[17]) tx = CTransaction() assert(len(out[17].tx.vout) < 42) tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff)) tx.vout.append(CTxOut(0, b"")) tx.calc_sha256() - b58 = update_block(58, [tx]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + b58 = self.update_block(58, [tx]) + self.sync_blocks([b58], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) - # tx with output value > input value out of range - tip(57) - b59 = block(59) - tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN) - b59 = update_block(59, [tx]) - yield rejected(RejectResult(16, b'bad-txns-in-belowout')) + # tx with output value > input value + self.log.info("Reject a block with a transaction with outputs > inputs") + self.move_tip(57) + b59 = self.next_block(59) + tx = self.create_and_sign_transaction(out[17].tx, out[17].n, 51 * COIN) + b59 = self.update_block(59, [tx]) + self.sync_blocks([b59], False, 16, b'bad-txns-in-belowout', reconnect=True) # reset to good chain - tip(57) - b60 = block(60, spend=out[17]) - yield accepted() - save_spendable_output() + self.move_tip(57) + b60 = self.next_block(60, spend=out[17]) + self.sync_blocks([b60], True) + self.save_spendable_output() # Test BIP30 # @@ -838,46 +759,46 @@ class FullBlockTest(ComparisonTestFramework): # not-fully-spent transaction in the same chain. To test, make identical coinbases; # the second one should be rejected. # - tip(60) - b61 = block(61, spend=out[18]) - b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases + self.log.info("Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)") + self.move_tip(60) + b61 = self.next_block(61, spend=out[18]) + b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig # Equalize the coinbases b61.vtx[0].rehash() - b61 = update_block(61, []) + b61 = self.update_block(61, []) assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize()) - yield rejected(RejectResult(16, b'bad-txns-BIP30')) - + self.sync_blocks([b61], False, 16, b'bad-txns-BIP30', reconnect=True) # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b62 (18) # - tip(60) - b62 = block(62) + self.log.info("Reject a block with a transaction with a nonfinal locktime") + self.move_tip(60) + b62 = self.next_block(62) tx = CTransaction() - tx.nLockTime = 0xffffffff #this locktime is non-final + tx.nLockTime = 0xffffffff # this locktime is non-final assert(out[18].n < len(out[18].tx.vout)) - tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence + tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) assert(tx.vin[0].nSequence < 0xffffffff) tx.calc_sha256() - b62 = update_block(62, [tx]) - yield rejected(RejectResult(16, b'bad-txns-nonfinal')) - + b62 = self.update_block(62, [tx]) + self.sync_blocks([b62], False, 16, b'bad-txns-nonfinal') # Test a non-final coinbase is also rejected # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b63 (-) # - tip(60) - b63 = block(63) + self.log.info("Reject a block with a coinbase transaction with a nonfinal locktime") + self.move_tip(60) + b63 = self.next_block(63) b63.vtx[0].nLockTime = 0xffffffff b63.vtx[0].vin[0].nSequence = 0xDEADBEEF b63.vtx[0].rehash() - b63 = update_block(63, []) - yield rejected(RejectResult(16, b'bad-txns-nonfinal')) - + b63 = self.update_block(63, []) + self.sync_blocks([b63], False, 16, b'bad-txns-nonfinal') # This checks that a block with a bloated VARINT between the block_header and the array of tx such that # the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint, @@ -893,8 +814,9 @@ class FullBlockTest(ComparisonTestFramework): # b64a is a bloated block (non-canonical varint) # b64 is a good block (same as b64 but w/ canonical varint) # - tip(60) - regular_block = block("64a", spend=out[18]) + self.log.info("Accept a valid block even if a bloated version of the block has previously been sent") + self.move_tip(60) + regular_block = self.next_block("64a", spend=out[18]) # make it a "broken_block," with non-canonical serialization b64a = CBrokenBlock(regular_block) @@ -908,45 +830,51 @@ class FullBlockTest(ComparisonTestFramework): script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) - b64a = update_block("64a", [tx]) + b64a = self.update_block("64a", [tx]) assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8) - yield TestInstance([[self.tip, None]]) + self.sync_blocks([b64a], False, 1, b'error parsing message') - # comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore - self.test.block_store.erase(b64a.sha256) + # bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently + # resend the header message, it won't send us the getdata message again. Just + # disconnect and reconnect and then call sync_blocks. + # TODO: improve this test to be less dependent on P2P DOS behaviour. + node.disconnect_p2ps() + self.reconnect_p2p() - tip(60) + self.move_tip(60) b64 = CBlock(b64a) b64.vtx = copy.deepcopy(b64a.vtx) assert_equal(b64.hash, b64a.hash) assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE) self.blocks[64] = b64 - update_block(64, []) - yield accepted() - save_spendable_output() + b64 = self.update_block(64, []) + self.sync_blocks([b64], True) + self.save_spendable_output() # Spend an output created in the block itself # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # - tip(64) - block(65) - tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue) - tx2 = create_and_sign_tx(tx1, 0, 0) - update_block(65, [tx1, tx2]) - yield accepted() - save_spendable_output() + self.log.info("Accept a block with a transaction spending an output created in the same block") + self.move_tip(64) + b65 = self.next_block(65) + tx1 = self.create_and_sign_transaction(out[19].tx, out[19].n, out[19].tx.vout[0].nValue) + tx2 = self.create_and_sign_transaction(tx1, 0, 0) + b65 = self.update_block(65, [tx1, tx2]) + self.sync_blocks([b65], True) + self.save_spendable_output() # Attempt to spend an output created later in the same block # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # \-> b66 (20) - tip(65) - block(66) - tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) - tx2 = create_and_sign_tx(tx1, 0, 1) - update_block(66, [tx2, tx1]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + self.log.info("Reject a block with a transaction spending an output created later in the same block") + self.move_tip(65) + b66 = self.next_block(66) + tx1 = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) + tx2 = self.create_and_sign_transaction(tx1, 0, 1) + b66 = self.update_block(66, [tx2, tx1]) + self.sync_blocks([b66], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) # Attempt to double-spend a transaction created in a block # @@ -954,13 +882,14 @@ class FullBlockTest(ComparisonTestFramework): # \-> b67 (20) # # - tip(65) - block(67) - tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) - tx2 = create_and_sign_tx(tx1, 0, 1) - tx3 = create_and_sign_tx(tx1, 0, 2) - update_block(67, [tx1, tx2, tx3]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + self.log.info("Reject a block with a transaction double spending a transaction creted in the same block") + self.move_tip(65) + b67 = self.next_block(67) + tx1 = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) + tx2 = self.create_and_sign_transaction(tx1, 0, 1) + tx3 = self.create_and_sign_transaction(tx1, 0, 2) + b67 = self.update_block(67, [tx1, tx2, tx3]) + self.sync_blocks([b67], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) # More tests of block subsidy # @@ -974,34 +903,36 @@ class FullBlockTest(ComparisonTestFramework): # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee # this succeeds # - tip(65) - block(68, additional_coinbase_value=10) - tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9) - update_block(68, [tx]) - yield rejected(RejectResult(16, b'bad-cb-amount')) - - tip(65) - b69 = block(69, additional_coinbase_value=10) - tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10) - update_block(69, [tx]) - yield accepted() - save_spendable_output() + self.log.info("Reject a block trying to claim too much subsidy in the coinbase transaction") + self.move_tip(65) + b68 = self.next_block(68, additional_coinbase_value=10) + tx = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 9) + b68 = self.update_block(68, [tx]) + self.sync_blocks([b68], False, 16, b'bad-cb-amount', reconnect=True) + + self.log.info("Accept a block claiming the correct subsidy in the coinbase transaction") + self.move_tip(65) + b69 = self.next_block(69, additional_coinbase_value=10) + tx = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 10) + self.update_block(69, [tx]) + self.sync_blocks([b69], True) + self.save_spendable_output() # Test spending the outpoint of a non-existent transaction # # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) # \-> b70 (21) # - tip(69) - block(70, spend=out[21]) + self.log.info("Reject a block containing a transaction spending from a non-existent input") + self.move_tip(69) + b70 = self.next_block(70, spend=out[21]) bogus_tx = CTransaction() bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c") tx = CTransaction() tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff)) tx.vout.append(CTxOut(1, b"")) - update_block(70, [tx]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - + b70 = self.update_block(70, [tx]) + self.sync_blocks([b70], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) # @@ -1009,13 +940,13 @@ class FullBlockTest(ComparisonTestFramework): # \-> b71 (21) # # b72 is a good block. - # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71. - # - tip(69) - b72 = block(72) - tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2) - tx2 = create_and_sign_tx(tx1, 0, 1) - b72 = update_block(72, [tx1, tx2]) # now tip is 72 + # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b72. + self.log.info("Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability") + self.move_tip(69) + b72 = self.next_block(72) + tx1 = self.create_and_sign_transaction(out[21].tx, out[21].n, 2) + tx2 = self.create_and_sign_transaction(tx1, 0, 1) + b72 = self.update_block(72, [tx1, tx2]) # now tip is 72 b71 = copy.deepcopy(b72) b71.vtx.append(tx2) # add duplicate tx2 self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69 @@ -1025,12 +956,12 @@ class FullBlockTest(ComparisonTestFramework): assert_equal(len(b72.vtx), 3) assert_equal(b72.sha256, b71.sha256) - tip(71) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) - tip(72) - yield accepted() - save_spendable_output() + self.move_tip(71) + self.sync_blocks([b71], False, 16, b'bad-txns-duplicate', reconnect=True) + self.move_tip(72) + self.sync_blocks([b72], True) + self.save_spendable_output() # Test some invalid scripts and MAX_BLOCK_SIGOPS # @@ -1048,23 +979,23 @@ class FullBlockTest(ComparisonTestFramework): # bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format) # bytearray[20,004-20,525]: unread data (script_element) # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit) - # - tip(72) - b73 = block(73) + self.log.info("Reject a block containing too many sigops after a large script element") + self.move_tip(72) + b73 = self.next_block(73) size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1 a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4 + a[MAX_BLOCK_SIGOPS - 1] = int("4e", 16) # OP_PUSHDATA4 element_size = MAX_SCRIPT_ELEMENT_SIZE + 1 a[MAX_BLOCK_SIGOPS] = element_size % 256 - a[MAX_BLOCK_SIGOPS+1] = element_size // 256 - a[MAX_BLOCK_SIGOPS+2] = 0 - a[MAX_BLOCK_SIGOPS+3] = 0 + a[MAX_BLOCK_SIGOPS + 1] = element_size // 256 + a[MAX_BLOCK_SIGOPS + 2] = 0 + a[MAX_BLOCK_SIGOPS + 3] = 0 - tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) - b73 = update_block(73, [tx]) - assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1) - yield rejected(RejectResult(16, b'bad-blk-sigops')) + tx = self.create_and_sign_transaction(out[22].tx, 0, 1, CScript(a)) + b73 = self.update_block(73, [tx]) + assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS + 1) + self.sync_blocks([b73], False, 16, b'bad-blk-sigops', reconnect=True) # b74/75 - if we push an invalid script element, all prevous sigops are counted, # but sigops after the element are not counted. @@ -1076,45 +1007,44 @@ class FullBlockTest(ComparisonTestFramework): # # b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element # b75 succeeds because we put MAX_BLOCK_SIGOPS before the element - # - # - tip(72) - b74 = block(74) - size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561 + self.log.info("Check sigops are counted correctly after an invalid script element") + self.move_tip(72) + b74 = self.next_block(74) + size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS] = 0x4e - a[MAX_BLOCK_SIGOPS+1] = 0xfe - a[MAX_BLOCK_SIGOPS+2] = 0xff - a[MAX_BLOCK_SIGOPS+3] = 0xff - a[MAX_BLOCK_SIGOPS+4] = 0xff - tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) - b74 = update_block(74, [tx]) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - tip(72) - b75 = block(75) + a[MAX_BLOCK_SIGOPS + 1] = 0xfe + a[MAX_BLOCK_SIGOPS + 2] = 0xff + a[MAX_BLOCK_SIGOPS + 3] = 0xff + a[MAX_BLOCK_SIGOPS + 4] = 0xff + tx = self.create_and_sign_transaction(out[22].tx, 0, 1, CScript(a)) + b74 = self.update_block(74, [tx]) + self.sync_blocks([b74], False, 16, b'bad-blk-sigops', reconnect=True) + + self.move_tip(72) + b75 = self.next_block(75) size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS-1] = 0x4e + a[MAX_BLOCK_SIGOPS - 1] = 0x4e a[MAX_BLOCK_SIGOPS] = 0xff - a[MAX_BLOCK_SIGOPS+1] = 0xff - a[MAX_BLOCK_SIGOPS+2] = 0xff - a[MAX_BLOCK_SIGOPS+3] = 0xff - tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) - b75 = update_block(75, [tx]) - yield accepted() - save_spendable_output() + a[MAX_BLOCK_SIGOPS + 1] = 0xff + a[MAX_BLOCK_SIGOPS + 2] = 0xff + a[MAX_BLOCK_SIGOPS + 3] = 0xff + tx = self.create_and_sign_transaction(out[22].tx, 0, 1, CScript(a)) + b75 = self.update_block(75, [tx]) + self.sync_blocks([b75], True) + self.save_spendable_output() # Check that if we push an element filled with CHECKSIGs, they are not counted - tip(75) - b76 = block(76) + self.move_tip(75) + b76 = self.next_block(76) size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs - tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a)) - b76 = update_block(76, [tx]) - yield accepted() - save_spendable_output() + a[MAX_BLOCK_SIGOPS - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs + tx = self.create_and_sign_transaction(out[23].tx, 0, 1, CScript(a)) + b76 = self.update_block(76, [tx]) + self.sync_blocks([b76], True) + self.save_spendable_output() # Test transaction resurrection # @@ -1133,39 +1063,39 @@ class FullBlockTest(ComparisonTestFramework): # To get around this issue, we construct transactions which are not signed and which # spend to OP_TRUE. If the standard-ness rules change, this test would need to be # updated. (Perhaps to spend to a P2SH OP_TRUE script) - # - tip(76) - block(77) - tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN) - update_block(77, [tx77]) - yield accepted() - save_spendable_output() - - block(78) - tx78 = create_tx(tx77, 0, 9*COIN) - update_block(78, [tx78]) - yield accepted() - - block(79) - tx79 = create_tx(tx78, 0, 8*COIN) - update_block(79, [tx79]) - yield accepted() + self.log.info("Test transaction resurrection during a re-org") + self.move_tip(76) + b77 = self.next_block(77) + tx77 = self.create_and_sign_transaction(out[24].tx, out[24].n, 10 * COIN) + b77 = self.update_block(77, [tx77]) + self.sync_blocks([b77], True) + self.save_spendable_output() + + b78 = self.next_block(78) + tx78 = self.create_tx(tx77, 0, 9 * COIN) + b78 = self.update_block(78, [tx78]) + self.sync_blocks([b78], True) + + b79 = self.next_block(79) + tx79 = self.create_tx(tx78, 0, 8 * COIN) + b79 = self.update_block(79, [tx79]) + self.sync_blocks([b79], True) # mempool should be empty assert_equal(len(self.nodes[0].getrawmempool()), 0) - tip(77) - block(80, spend=out[25]) - yield rejected() - save_spendable_output() + self.move_tip(77) + b80 = self.next_block(80, spend=out[25]) + self.sync_blocks([b80], False, request_block=False) + self.save_spendable_output() - block(81, spend=out[26]) - yield rejected() # other chain is same length - save_spendable_output() + b81 = self.next_block(81, spend=out[26]) + self.sync_blocks([b81], False, request_block=False) # other chain is same length + self.save_spendable_output() - block(82, spend=out[27]) - yield accepted() # now this chain is longer, triggers re-org - save_spendable_output() + b82 = self.next_block(82, spend=out[27]) + self.sync_blocks([b82], True) # now this chain is longer, triggers re-org + self.save_spendable_output() # now check that tx78 and tx79 have been put back into the peer's mempool mempool = self.nodes[0].getrawmempool() @@ -1173,33 +1103,32 @@ class FullBlockTest(ComparisonTestFramework): assert(tx78.hash in mempool) assert(tx79.hash in mempool) - # Test invalid opcodes in dead execution paths. # # -> b81 (26) -> b82 (27) -> b83 (28) # - block(83) + self.log.info("Accept a block with invalid opcodes in dead execution paths") + b83 = self.next_block(83) op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF] script = CScript(op_codes) - tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script) + tx1 = self.create_and_sign_transaction(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script) - tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE])) + tx2 = self.create_and_sign_transaction(tx1, 0, 0, CScript([OP_TRUE])) tx2.vin[0].scriptSig = CScript([OP_FALSE]) tx2.rehash() - update_block(83, [tx1, tx2]) - yield accepted() - save_spendable_output() - + b83 = self.update_block(83, [tx1, tx2]) + self.sync_blocks([b83], True) + self.save_spendable_output() # Reorg on/off blocks that have OP_RETURN in them (and try to spend them) # # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31) # \-> b85 (29) -> b86 (30) \-> b89a (32) # - # - block(84) - tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN])) + self.log.info("Test re-orging blocks with OP_RETURN in them") + b84 = self.next_block(84) + tx1 = self.create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN])) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) @@ -1207,87 +1136,186 @@ class FullBlockTest(ComparisonTestFramework): tx1.calc_sha256() self.sign_tx(tx1, out[29].tx, out[29].n) tx1.rehash() - tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN])) + tx2 = self.create_tx(tx1, 1, 0, CScript([OP_RETURN])) tx2.vout.append(CTxOut(0, CScript([OP_RETURN]))) - tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN])) + tx3 = self.create_tx(tx1, 2, 0, CScript([OP_RETURN])) tx3.vout.append(CTxOut(0, CScript([OP_TRUE]))) - tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE])) + tx4 = self.create_tx(tx1, 3, 0, CScript([OP_TRUE])) tx4.vout.append(CTxOut(0, CScript([OP_RETURN]))) - tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN])) + tx5 = self.create_tx(tx1, 4, 0, CScript([OP_RETURN])) - update_block(84, [tx1,tx2,tx3,tx4,tx5]) - yield accepted() - save_spendable_output() + b84 = self.update_block(84, [tx1, tx2, tx3, tx4, tx5]) + self.sync_blocks([b84], True) + self.save_spendable_output() - tip(83) - block(85, spend=out[29]) - yield rejected() + self.move_tip(83) + b85 = self.next_block(85, spend=out[29]) + self.sync_blocks([b85], False) # other chain is same length - block(86, spend=out[30]) - yield accepted() + b86 = self.next_block(86, spend=out[30]) + self.sync_blocks([b86], True) - tip(84) - block(87, spend=out[30]) - yield rejected() - save_spendable_output() + self.move_tip(84) + b87 = self.next_block(87, spend=out[30]) + self.sync_blocks([b87], False) # other chain is same length + self.save_spendable_output() - block(88, spend=out[31]) - yield accepted() - save_spendable_output() + b88 = self.next_block(88, spend=out[31]) + self.sync_blocks([b88], True) + self.save_spendable_output() # trying to spend the OP_RETURN output is rejected - block("89a", spend=out[32]) - tx = create_tx(tx1, 0, 0, CScript([OP_TRUE])) - update_block("89a", [tx]) - yield rejected() - - - # Test re-org of a week's worth of blocks (1088 blocks) - # This test takes a minute or two and can be accomplished in memory - # - if self.options.runbarelyexpensive: - tip(88) - LARGE_REORG_SIZE = 1088 - test1 = TestInstance(sync_every_block=False) - spend=out[32] - for i in range(89, LARGE_REORG_SIZE + 89): - b = block(i, spend) - tx = CTransaction() - script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69 - script_output = CScript([b'\x00' * script_length]) - tx.vout.append(CTxOut(0, script_output)) - tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) - b = update_block(i, [tx]) - assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE) - test1.blocks_and_transactions.append([self.tip, True]) - save_spendable_output() - spend = get_spendable_output() - - yield test1 - chain1_tip = i - - # now create alt chain of same length - tip(88) - test2 = TestInstance(sync_every_block=False) - for i in range(89, LARGE_REORG_SIZE + 89): - block("alt"+str(i)) - test2.blocks_and_transactions.append([self.tip, False]) - yield test2 - - # extend alt chain to trigger re-org - block("alt" + str(chain1_tip + 1)) - yield accepted() - - # ... and re-org back to the first chain - tip(chain1_tip) - block(chain1_tip + 1) - yield rejected() - block(chain1_tip + 2) - yield accepted() - - chain1_tip += 2 + b89a = self.next_block("89a", spend=out[32]) + tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE])) + b89a = self.update_block("89a", [tx]) + self.sync_blocks([b89a], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) + + self.log.info("Test a re-org of one week's worth of blocks (1088 blocks)") + + self.move_tip(88) + LARGE_REORG_SIZE = 1088 + blocks = [] + spend = out[32] + for i in range(89, LARGE_REORG_SIZE + 89): + b = self.next_block(i, spend) + tx = CTransaction() + script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69 + script_output = CScript([b'\x00' * script_length]) + tx.vout.append(CTxOut(0, script_output)) + tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) + b = self.update_block(i, [tx]) + assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE) + blocks.append(b) + self.save_spendable_output() + spend = self.get_spendable_output() + + self.sync_blocks(blocks, True, timeout=180) + chain1_tip = i + + # now create alt chain of same length + self.move_tip(88) + blocks2 = [] + for i in range(89, LARGE_REORG_SIZE + 89): + blocks2.append(self.next_block("alt" + str(i))) + self.sync_blocks(blocks2, False, request_block=False) + + # extend alt chain to trigger re-org + block = self.next_block("alt" + str(chain1_tip + 1)) + self.sync_blocks([block], True, timeout=180) + + # ... and re-org back to the first chain + self.move_tip(chain1_tip) + block = self.next_block(chain1_tip + 1) + self.sync_blocks([block], False, request_block=False) + block = self.next_block(chain1_tip + 2) + self.sync_blocks([block], True, timeout=180) + + # Helper methods + ################ + + def add_transactions_to_block(self, block, tx_list): + [tx.rehash() for tx in tx_list] + block.vtx.extend(tx_list) + + # this is a little handier to use than the version in blocktools.py + def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): + return create_transaction(spend_tx, n, b"", value, script) + + # sign a transaction, using the key we know about + # this signs input 0 in tx, which is assumed to be spending output n in spend_tx + def sign_tx(self, tx, spend_tx, n): + scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) + if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend + tx.vin[0].scriptSig = CScript() + return + (sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL) + tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))]) + + def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): + tx = self.create_tx(spend_tx, n, value, script) + self.sign_tx(tx, spend_tx, n) + tx.rehash() + return tx + + def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True): + if self.tip is None: + base_block_hash = self.genesis_hash + block_time = int(time.time()) + 1 + else: + base_block_hash = self.tip.sha256 + block_time = self.tip.nTime + 1 + # First create the coinbase + height = self.block_heights[base_block_hash] + 1 + coinbase = create_coinbase(height, self.coinbase_pubkey) + coinbase.vout[0].nValue += additional_coinbase_value + coinbase.rehash() + if spend is None: + block = create_block(base_block_hash, coinbase, block_time) + else: + coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees + coinbase.rehash() + block = create_block(base_block_hash, coinbase, block_time) + tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi + self.sign_tx(tx, spend.tx, spend.n) + self.add_transactions_to_block(block, [tx]) + block.hashMerkleRoot = block.calc_merkle_root() + if solve: + block.solve() + self.tip = block + self.block_heights[block.sha256] = height + assert number not in self.blocks + self.blocks[number] = block + return block + + # save the current tip so it can be spent by a later block + def save_spendable_output(self): + self.log.debug("saving spendable output %s" % self.tip.vtx[0]) + self.spendable_outputs.append(self.tip) + + # get an output that we previously marked as spendable + def get_spendable_output(self): + self.log.debug("getting spendable output %s" % self.spendable_outputs[0].vtx[0]) + return PreviousSpendableOutput(self.spendable_outputs.pop(0).vtx[0], 0) + + # move the tip back to a previous block + def move_tip(self, number): + self.tip = self.blocks[number] + + # adds transactions to the block and updates state + def update_block(self, block_number, new_transactions): + block = self.blocks[block_number] + self.add_transactions_to_block(block, new_transactions) + old_sha256 = block.sha256 + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + # Update the internal state just like in next_block + self.tip = block + if block.sha256 != old_sha256: + self.block_heights[block.sha256] = self.block_heights[old_sha256] + del self.block_heights[old_sha256] + self.blocks[block_number] = block + return block + + def reconnect_p2p(self): + """Add a P2P connection to the node. + + The node gets disconnected several times in this test. This helper + method reconnects the p2p and restarts the network thread.""" + + network_thread_join() + self.nodes[0].disconnect_p2ps() + self.nodes[0].add_p2p_connection(P2PDataStore()) + network_thread_start() + self.nodes[0].p2p.wait_for_verack() + + def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, request_block=True, reconnect=False, timeout=60): + """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. + Call with success = False if the tip shouldn't advance to the most recent block.""" + self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_code=reject_code, reject_reason=reject_reason, request_block=request_block, timeout=timeout) + if reconnect: + self.reconnect_p2p() if __name__ == '__main__': FullBlockTest().main() diff --git a/test/functional/feature_blocksdir.py b/test/functional/feature_blocksdir.py new file mode 100755 index 0000000000..a77014a524 --- /dev/null +++ b/test/functional/feature_blocksdir.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +# Copyright (c) 2018 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the blocksdir option. +""" + +import os +import re +import shutil + +from test_framework.test_framework import BitcoinTestFramework, initialize_datadir + + +class BlocksdirTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def run_test(self): + self.stop_node(0) + shutil.rmtree(self.nodes[0].datadir) + initialize_datadir(self.options.tmpdir, 0) + self.log.info("Starting with non exiting blocksdir ...") + blocksdir_path = os.path.join(self.options.tmpdir, 'blocksdir') + self.nodes[0].assert_start_raises_init_error(["-blocksdir=" + blocksdir_path], re.escape('Error: Specified blocks directory "{}" does not exist.'.format(blocksdir_path))) + os.mkdir(blocksdir_path) + self.log.info("Starting with exiting blocksdir ...") + self.start_node(0, ["-blocksdir=" + blocksdir_path]) + self.log.info("mining blocks..") + self.nodes[0].generate(10) + assert os.path.isfile(os.path.join(blocksdir_path, "regtest", "blocks", "blk00000.dat")) + assert os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest", "blocks", "index")) + + +if __name__ == '__main__': + BlocksdirTest().main() diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py index 8b5e5681e4..37d60aad61 100755 --- a/test/functional/feature_csv_activation.py +++ b/test/functional/feature_csv_activation.py @@ -42,98 +42,131 @@ bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {re bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP bip112tx_special - test negative argument to OP_CSV """ - -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import * -from test_framework.mininode import ToHex, CTransaction, network_thread_start -from test_framework.blocktools import create_coinbase, create_block -from test_framework.comptool import TestInstance, TestManager -from test_framework.script import * +from decimal import Decimal +from itertools import product from io import BytesIO import time -base_relative_locktime = 10 -seq_disable_flag = 1<<31 -seq_random_high_bit = 1<<25 -seq_type_flag = 1<<22 -seq_random_low_bit = 1<<18 - -# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field -# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1 -relative_locktimes = [] -for b31 in range(2): - b25times = [] - for b25 in range(2): - b22times = [] - for b22 in range(2): - b18times = [] - for b18 in range(2): - rlt = base_relative_locktime - if (b31): - rlt = rlt | seq_disable_flag - if (b25): - rlt = rlt | seq_random_high_bit - if (b22): - rlt = rlt | seq_type_flag - if (b18): - rlt = rlt | seq_random_low_bit - b18times.append(rlt) - b22times.append(b18times) - b25times.append(b22times) - relative_locktimes.append(b25times) - -def all_rlt_txs(txarray): +from test_framework.blocktools import create_coinbase, create_block +from test_framework.messages import ToHex, CTransaction +from test_framework.mininode import network_thread_start, P2PDataStore +from test_framework.script import ( + CScript, + OP_CHECKSEQUENCEVERIFY, + OP_DROP, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + get_bip9_status, + hex_str_to_bytes, +) + +BASE_RELATIVE_LOCKTIME = 10 +SEQ_DISABLE_FLAG = 1 << 31 +SEQ_RANDOM_HIGH_BIT = 1 << 25 +SEQ_TYPE_FLAG = 1 << 22 +SEQ_RANDOM_LOW_BIT = 1 << 18 + +def relative_locktime(sdf, srhb, stf, srlb): + """Returns a locktime with certain bits set.""" + + locktime = BASE_RELATIVE_LOCKTIME + if sdf: + locktime |= SEQ_DISABLE_FLAG + if srhb: + locktime |= SEQ_RANDOM_HIGH_BIT + if stf: + locktime |= SEQ_TYPE_FLAG + if srlb: + locktime |= SEQ_RANDOM_LOW_BIT + return locktime + +def all_rlt_txs(txs): + return [tx['tx'] for tx in txs] + +def create_transaction(node, txid, to_address, amount): + inputs = [{"txid": txid, "vout": 0}] + outputs = {to_address: amount} + rawtx = node.createrawtransaction(inputs, outputs) + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(rawtx)) + tx.deserialize(f) + return tx + +def sign_transaction(node, unsignedtx): + rawtx = ToHex(unsignedtx) + signresult = node.signrawtransactionwithwallet(rawtx) + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(signresult['hex'])) + tx.deserialize(f) + return tx + +def create_bip112special(node, input, txversion, address): + tx = create_transaction(node, input, address, Decimal("49.98")) + tx.nVersion = txversion + signtx = sign_transaction(node, tx) + signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) + return signtx + +def send_generic_input_tx(node, coinbases, address): + amount = Decimal("49.99") + return node.sendrawtransaction(ToHex(sign_transaction(node, create_transaction(node, node.getblock(coinbases.pop())['tx'][0], address, amount)))) + +def create_bip68txs(node, bip68inputs, txversion, address, locktime_delta=0): + """Returns a list of bip68 transactions with different bits set.""" txs = [] - for b31 in range(2): - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - txs.append(txarray[b31][b25][b22][b18]) + assert(len(bip68inputs) >= 16) + for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)): + locktime = relative_locktime(sdf, srhb, stf, srlb) + tx = create_transaction(node, bip68inputs[i], address, Decimal("49.98")) + tx.nVersion = txversion + tx.vin[0].nSequence = locktime + locktime_delta + tx = sign_transaction(node, tx) + tx.rehash() + txs.append({'tx': tx, 'sdf': sdf, 'stf': stf}) + return txs -class BIP68_112_113Test(ComparisonTestFramework): +def create_bip112txs(node, bip112inputs, varyOP_CSV, txversion, address, locktime_delta=0): + """Returns a list of bip68 transactions with different bits set.""" + txs = [] + assert(len(bip112inputs) >= 16) + for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)): + locktime = relative_locktime(sdf, srhb, stf, srlb) + tx = create_transaction(node, bip112inputs[i], address, Decimal("49.98")) + if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed + tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME + locktime_delta + else: # vary nSequence instead, OP_CSV is fixed + tx.vin[0].nSequence = locktime + locktime_delta + tx.nVersion = txversion + signtx = sign_transaction(node, tx) + if (varyOP_CSV): + signtx.vin[0].scriptSig = CScript([locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) + else: + signtx.vin[0].scriptSig = CScript([BASE_RELATIVE_LOCKTIME, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) + tx.rehash() + txs.append({'tx': signtx, 'sdf': sdf, 'stf': stf}) + return txs + +class BIP68_112_113Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4', '-addresstype=legacy']] - def run_test(self): - test = TestManager(self, self.options.tmpdir) - test.add_all_connections(self.nodes) - network_thread_start() - test.run() - - def send_generic_input_tx(self, node, coinbases): - amount = Decimal("49.99") - return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount)))) - - def create_transaction(self, node, txid, to_address, amount): - inputs = [{ "txid" : txid, "vout" : 0}] - outputs = { to_address : amount } - rawtx = node.createrawtransaction(inputs, outputs) - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(rawtx)) - tx.deserialize(f) - return tx - - def sign_transaction(self, node, unsignedtx): - rawtx = ToHex(unsignedtx) - signresult = node.signrawtransactionwithwallet(rawtx) - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(signresult['hex'])) - tx.deserialize(f) - return tx - - def generate_blocks(self, number, version, test_blocks = []): + def generate_blocks(self, number, version, test_blocks=None): + if test_blocks is None: + test_blocks = [] for i in range(number): block = self.create_test_block([], version) - test_blocks.append([block, True]) + test_blocks.append(block) self.last_block_time += 600 self.tip = block.sha256 self.tipheight += 1 return test_blocks - def create_test_block(self, txs, version = 536870912): + def create_test_block(self, txs, version=536870912): block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600) block.nVersion = version block.vtx.extend(txs) @@ -142,184 +175,148 @@ class BIP68_112_113Test(ComparisonTestFramework): block.solve() return block - def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0): - txs = [] - assert(len(bip68inputs) >= 16) - i = 0 - for b31 in range(2): - b25txs = [] - for b25 in range(2): - b22txs = [] - for b22 in range(2): - b18txs = [] - for b18 in range(2): - tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98")) - i += 1 - tx.nVersion = txversion - tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta - b18txs.append(self.sign_transaction(self.nodes[0], tx)) - b22txs.append(b18txs) - b25txs.append(b22txs) - txs.append(b25txs) - return txs - - def create_bip112special(self, input, txversion): - tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98")) - tx.nVersion = txversion - signtx = self.sign_transaction(self.nodes[0], tx) - signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) - return signtx - - def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0): - txs = [] - assert(len(bip112inputs) >= 16) - i = 0 - for b31 in range(2): - b25txs = [] - for b25 in range(2): - b22txs = [] - for b22 in range(2): - b18txs = [] - for b18 in range(2): - tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98")) - i += 1 - if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed - tx.vin[0].nSequence = base_relative_locktime + locktime_delta - else: # vary nSequence instead, OP_CSV is fixed - tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta - tx.nVersion = txversion - signtx = self.sign_transaction(self.nodes[0], tx) - if (varyOP_CSV): - signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) - else: - signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) - b18txs.append(signtx) - b22txs.append(b18txs) - b25txs.append(b22txs) - txs.append(b25txs) - return txs - - def get_tests(self): - long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future - self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time - self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs - self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time - self.tipheight = 82 # height of the next block to build + def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, request_block=True): + """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. + + Call with success = False if the tip shouldn't advance to the most recent block.""" + self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_code=reject_code, reject_reason=reject_reason, request_block=request_block) + + def run_test(self): + self.nodes[0].add_p2p_connection(P2PDataStore()) + network_thread_start() + self.nodes[0].p2p.wait_for_verack() + + self.log.info("Generate blocks in the past for coinbase outputs.") + long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future + self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time + self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2 * 32 + 1) # 82 blocks generated for inputs + self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time + self.tipheight = 82 # height of the next block to build self.last_block_time = long_past_time - self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) + self.tip = int(self.nodes[0].getbestblockhash(), 16) self.nodeaddress = self.nodes[0].getnewaddress() + self.log.info("Test that the csv softfork is DEFINED") assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined') test_blocks = self.generate_blocks(61, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 1 - # Advanced from DEFINED to STARTED, height = 143 + self.sync_blocks(test_blocks) + + self.log.info("Advance from DEFINED to STARTED, height = 143") assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started') - # Fail to achieve LOCKED_IN 100 out of 144 signal bit 0 - # using a variety of bits to simulate multiple parallel softforks - test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready) - test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) - test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) # 2 - # Failed to advance past STARTED, height = 287 + self.log.info("Fail to achieve LOCKED_IN") + # 100 out of 144 signal bit 0. Use a variety of bits to simulate multiple parallel softforks + + test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready) + test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) + test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not) + self.sync_blocks(test_blocks) + + self.log.info("Failed to advance past STARTED, height = 287") assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started') + self.log.info("Generate blocks to achieve LOCK-IN") # 108 out of 144 signal bit 0 to achieve lock-in # using a variety of bits to simulate multiple parallel softforks - test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready) - test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) - test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) # 3 - # Advanced from STARTED to LOCKED_IN, height = 431 + test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready) + test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) + test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not) + self.sync_blocks(test_blocks) + + self.log.info("Advanced from STARTED to LOCKED_IN, height = 431") assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') - # 140 more version 4 blocks + # Generate 140 more version 4 blocks test_blocks = self.generate_blocks(140, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 4 + self.sync_blocks(test_blocks) - ### Inputs at height = 572 + # Inputs at height = 572 + # # Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block) # Note we reuse inputs for v1 and v2 txs so must test these separately # 16 normal inputs bip68inputs = [] for i in range(16): - bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) + bip68inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) + # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112basicinputs = [] for j in range(2): inputs = [] for i in range(16): - inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) + inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) bip112basicinputs.append(inputs) + # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112diverseinputs = [] for j in range(2): inputs = [] for i in range(16): - inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) + inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) bip112diverseinputs.append(inputs) + # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) - bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks) + bip112specialinput = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress) + # 1 normal input - bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks) + bip113input = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress) self.nodes[0].setmocktime(self.last_block_time + 600) - inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572 + inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572 self.nodes[0].setmocktime(0) - self.tip = int("0x" + inputblockhash, 0) + self.tip = int(inputblockhash, 16) self.tipheight += 1 self.last_block_time += 600 - assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1) + assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), 82 + 1) # 2 more version 4 blocks test_blocks = self.generate_blocks(2, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 5 - # Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575) + self.sync_blocks(test_blocks) + + self.log.info("Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)") assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') # Test both version 1 and version 2 transactions for all tests # BIP113 test transaction will be modified before each use to put in appropriate block time - bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) + bip113tx_v1 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE bip113tx_v1.nVersion = 1 - bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) + bip113tx_v2 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE bip113tx_v2.nVersion = 2 # For BIP68 test all 16 relative sequence locktimes - bip68txs_v1 = self.create_bip68txs(bip68inputs, 1) - bip68txs_v2 = self.create_bip68txs(bip68inputs, 2) + bip68txs_v1 = create_bip68txs(self.nodes[0], bip68inputs, 1, self.nodeaddress) + bip68txs_v2 = create_bip68txs(self.nodes[0], bip68inputs, 2, self.nodeaddress) # For BIP112 test: # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs - bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1) - bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2) + bip112txs_vary_nSequence_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 1, self.nodeaddress) + bip112txs_vary_nSequence_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 2, self.nodeaddress) # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs - bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1) - bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1) + bip112txs_vary_nSequence_9_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[1], False, 1, self.nodeaddress, -1) + bip112txs_vary_nSequence_9_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[1], False, 2, self.nodeaddress, -1) # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs - bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1) - bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2) + bip112txs_vary_OP_CSV_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 1, self.nodeaddress) + bip112txs_vary_OP_CSV_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 2, self.nodeaddress) # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs - bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1) - bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1) + bip112txs_vary_OP_CSV_9_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 1, self.nodeaddress, -1) + bip112txs_vary_OP_CSV_9_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 2, self.nodeaddress, -1) # -1 OP_CSV OP_DROP input - bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1) - bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2) + bip112tx_special_v1 = create_bip112special(self.nodes[0], bip112specialinput, 1, self.nodeaddress) + bip112tx_special_v2 = create_bip112special(self.nodes[0], bip112specialinput, 2, self.nodeaddress) + + self.log.info("TESTING") + self.log.info("Pre-Soft Fork Tests. All txs should pass.") + self.log.info("Test version 1 txs") - ### TESTING ### - ################################## - ### Before Soft Forks Activate ### - ################################## - # All txs should pass - ### Version 1 txs ### success_txs = [] # add BIP113 tx and -1 CSV tx - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) success_txs.append(bip113signed1) success_txs.append(bip112tx_special_v1) # add BIP 68 txs @@ -330,14 +327,15 @@ class BIP68_112_113Test(ComparisonTestFramework): # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 6 + self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - ### Version 2 txs ### + self.log.info("Test version 2 txs") + success_txs = [] # add BIP113 tx and -1 CSV tx - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) success_txs.append(bip113signed2) success_txs.append(bip112tx_special_v2) # add BIP 68 txs @@ -348,187 +346,149 @@ class BIP68_112_113Test(ComparisonTestFramework): # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 7 + self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - # 1 more version 4 block to get us to height 575 so the fork should now be active for the next block test_blocks = self.generate_blocks(1, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 8 + self.sync_blocks(test_blocks) assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active') + self.log.info("Post-Soft Fork Tests.") - ################################# - ### After Soft Forks Activate ### - ################################# - ### BIP 113 ### + self.log.info("BIP 113 tests") # BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: - yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10 + self.sync_blocks([self.create_test_block([bip113tx])], success=False) # BIP 113 tests should now pass if the locktime is < MTP - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block - bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block - bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block + bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block + bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: - yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12 + self.sync_blocks([self.create_test_block([bip113tx])]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Next block height = 580 after 4 blocks of random version test_blocks = self.generate_blocks(4, 1234) - yield TestInstance(test_blocks, sync_every_block=False) # 13 + self.sync_blocks(test_blocks) + + self.log.info("BIP 68 tests") + self.log.info("Test version 1 txs - all should still pass") - ### BIP 68 ### - ### Version 1 txs ### - # All still pass success_txs = [] success_txs.extend(all_rlt_txs(bip68txs_v1)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 14 + self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - ### Version 2 txs ### - bip68success_txs = [] + self.log.info("Test version 2 txs") + # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - bip68success_txs.append(bip68txs_v2[1][b25][b22][b18]) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15 + bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']] + self.sync_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + # All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512 - bip68timetxs = [] - for b25 in range(2): - for b18 in range(2): - bip68timetxs.append(bip68txs_v2[0][b25][1][b18]) + bip68timetxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']] for tx in bip68timetxs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19 - bip68heighttxs = [] - for b25 in range(2): - for b18 in range(2): - bip68heighttxs.append(bip68txs_v2[0][b25][0][b18]) + self.sync_blocks([self.create_test_block([tx])], success=False) + + bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']] for tx in bip68heighttxs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23 + self.sync_blocks([self.create_test_block([tx])], success=False) # Advance one block to 581 test_blocks = self.generate_blocks(1, 1234) - yield TestInstance(test_blocks, sync_every_block=False) # 24 + self.sync_blocks(test_blocks) # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512 bip68success_txs.extend(bip68timetxs) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25 + self.sync_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) for tx in bip68heighttxs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29 + self.sync_blocks([self.create_test_block([tx])], success=False) # Advance one block to 582 test_blocks = self.generate_blocks(1, 1234) - yield TestInstance(test_blocks, sync_every_block=False) # 30 + self.sync_blocks(test_blocks) # All BIP 68 txs should pass bip68success_txs.extend(bip68heighttxs) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31 + self.sync_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + self.log.info("BIP 112 tests") + self.log.info("Test version 1 txs") - ### BIP 112 ### - ### Version 1 txs ### # -1 OP_CSV tx should fail - yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32 + self.sync_blocks([self.create_test_block([bip112tx_special_v1])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass - success_txs = [] - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18]) - success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18]) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 33 + + success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']] + success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf']] + self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail - fail_txs = [] - fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) - fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18]) - fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18]) - + fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1) + fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1) + fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']] + fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']] for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81 + self.sync_blocks([self.create_test_block([tx])], success=False) + + self.log.info("Test version 2 txs") - ### Version 2 txs ### # -1 OP_CSV tx should fail - yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82 + self.sync_blocks([self.create_test_block([bip112tx_special_v2])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met) - success_txs = [] - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV - success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9 + success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']] + success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf']] - yield TestInstance([[self.create_test_block(success_txs), True]]) # 83 + self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - ## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ## - # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check - fail_txs = [] - fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9 - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9 + # SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ## + # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check + fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2) + fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']] for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107 + self.sync_blocks([self.create_test_block([tx])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail - fail_txs = [] - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence + fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf']] for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115 + self.sync_blocks([self.create_test_block([tx])], success=False) # If sequencelock types mismatch, tx should fail - fail_txs = [] - for b25 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence - fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV + fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']] + fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']] for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123 + self.sync_blocks([self.create_test_block([tx])], success=False) # Remaining txs should pass, just test masking works properly - success_txs = [] - for b25 in range(2): - for b18 in range(2): - success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence - success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV - yield TestInstance([[self.create_test_block(success_txs), True]]) # 124 + success_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']] + success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf']] + self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Additional test, of checking that comparison of two time types works properly time_txs = [] - for b25 in range(2): - for b18 in range(2): - tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18] - tx.vin[0].nSequence = base_relative_locktime | seq_type_flag - signtx = self.sign_transaction(self.nodes[0], tx) - time_txs.append(signtx) - yield TestInstance([[self.create_test_block(time_txs), True]]) # 125 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + for tx in [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]: + tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME | SEQ_TYPE_FLAG + signtx = sign_transaction(self.nodes[0], tx) + time_txs.append(signtx) - ### Missing aspects of test - ## Testing empty stack fails + self.sync_blocks([self.create_test_block(time_txs)]) + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + # TODO: Test empty stack fails if __name__ == '__main__': BIP68_112_113Test().main() diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py index f434b6682b..32a6bd5d59 100755 --- a/test/functional/feature_fee_estimation.py +++ b/test/functional/feature_fee_estimation.py @@ -133,12 +133,12 @@ class EstimateFeeTest(BitcoinTestFramework): which we will use to generate our transactions. """ self.add_nodes(3, extra_args=[["-maxorphantx=1000", "-whitelist=127.0.0.1"], - ["-blockmaxsize=17000", "-maxorphantx=1000"], - ["-blockmaxsize=8000", "-maxorphantx=1000"]]) + ["-blockmaxweight=68000", "-maxorphantx=1000"], + ["-blockmaxweight=32000", "-maxorphantx=1000"]]) # Use node0 to mine blocks for input splitting # Node1 mines small blocks but that are bigger than the expected transaction rate. - # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes, - # (17k is room enough for 110 or so transactions) + # NOTE: the CreateNewBlock code starts counting block weight at 4,000 weight, + # (68k weight is room enough for 120 or so transactions) # Node2 is a stingy miner, that # produces too small blocks (room for only 55 or so transactions) diff --git a/test/functional/feature_help.py b/test/functional/feature_help.py new file mode 100755 index 0000000000..1e62d7a409 --- /dev/null +++ b/test/functional/feature_help.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +# Copyright (c) 2018 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Verify that starting bitcoin with -h works as expected.""" +import subprocess + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class HelpTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def setup_network(self): + self.add_nodes(self.num_nodes) + # Don't start the node + + def run_test(self): + self.log.info("Start bitcoin with -h for help text") + self.nodes[0].start(extra_args=['-h'], stderr=subprocess.PIPE, stdout=subprocess.PIPE) + # Node should exit immediately and output help to stdout. + ret_code = self.nodes[0].process.wait(timeout=1) + assert_equal(ret_code, 0) + output = self.nodes[0].process.stdout.read() + assert b'Options' in output + self.log.info("Help text received: {} (...)".format(output[0:60])) + self.nodes[0].running = False + + self.log.info("Start bitcoin with -version for version information") + self.nodes[0].start(extra_args=['-version'], stderr=subprocess.PIPE, stdout=subprocess.PIPE) + # Node should exit immediately and output version to stdout. + ret_code = self.nodes[0].process.wait(timeout=1) + assert_equal(ret_code, 0) + output = self.nodes[0].process.stdout.read() + assert b'version' in output + self.log.info("Version text received: {} (...)".format(output[0:60])) + self.nodes[0].running = False + +if __name__ == '__main__': + HelpTest().main() diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py index abe4cb7c8e..ce6ec76c61 100755 --- a/test/functional/feature_maxuploadtarget.py +++ b/test/functional/feature_maxuploadtarget.py @@ -34,7 +34,7 @@ class MaxUploadTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 - self.extra_args = [["-maxuploadtarget=800", "-blockmaxsize=999000"]] + self.extra_args = [["-maxuploadtarget=800"]] # Cache for utxos, as the listunspent may take a long time later in the test self.utxo_cache = [] @@ -144,7 +144,7 @@ class MaxUploadTest(BitcoinTestFramework): #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 self.log.info("Restarting nodes with -whitelist=127.0.0.1") self.stop_node(0) - self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"]) + self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1"]) # Reconnect to self.nodes[0] self.nodes[0].add_p2p_connection(TestP2PConn()) diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index ad305a6e80..3adde8dd73 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -31,14 +31,14 @@ class PruneTest(BitcoinTestFramework): # Create nodes 0 and 1 to mine. # Create node 2 to test pruning. - self.full_node_default_args = ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000" ] + self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000" ] # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) # Create nodes 5 to test wallet in prune mode, but do not connect self.extra_args = [self.full_node_default_args, self.full_node_default_args, ["-maxreceivebuffer=20000", "-prune=550"], - ["-maxreceivebuffer=20000", "-blockmaxsize=999000"], - ["-maxreceivebuffer=20000", "-blockmaxsize=999000"], + ["-maxreceivebuffer=20000"], + ["-maxreceivebuffer=20000"], ["-prune=550"]] def setup_network(self): @@ -124,7 +124,7 @@ class PruneTest(BitcoinTestFramework): # Reboot node 1 to clear its mempool (hopefully make the invalidate faster) # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks) self.stop_node(1) - self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"]) + self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxweight=20000", "-checkblocks=5", "-disablesafemode"]) height = self.nodes[1].getblockcount() self.log.info("Current block height: %d" % height) @@ -147,7 +147,7 @@ class PruneTest(BitcoinTestFramework): # Reboot node1 to clear those giant tx's from mempool self.stop_node(1) - self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"]) + self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxweight=20000", "-checkblocks=5", "-disablesafemode"]) self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py index c6cb4c54cd..6f585f6825 100755 --- a/test/functional/interface_rest.py +++ b/test/functional/interface_rest.py @@ -85,7 +85,7 @@ class RESTTest (BitcoinTestFramework): ####################################### # GETUTXOS: query an unspent outpoint # ####################################### - json_request = '/checkmempool/'+txid+'-'+str(n) + json_request = '/'+txid+'-'+str(n) json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) @@ -100,7 +100,7 @@ class RESTTest (BitcoinTestFramework): ################################################# # GETUTXOS: now query an already spent outpoint # ################################################# - json_request = '/checkmempool/'+vintx+'-0' + json_request = '/'+vintx+'-0' json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) @@ -117,7 +117,7 @@ class RESTTest (BitcoinTestFramework): ################################################## # GETUTXOS: now check both with the same request # ################################################## - json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0' + json_request = '/'+txid+'-'+str(n)+'/'+vintx+'-0' json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) assert_equal(len(json_obj['utxos']), 1) @@ -151,23 +151,48 @@ class RESTTest (BitcoinTestFramework): txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json") json_obj = json.loads(json_string) - vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then) + # get the spent output to later check for utxo (should be spent by then) + spent = '{}-{}'.format(json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']) # get n of 0.1 outpoint n = 0 for vout in json_obj['vout']: if vout['value'] == 0.1: n = vout['n'] + spending = '{}-{}'.format(txid, n) - json_request = '/'+txid+'-'+str(n) + json_request = '/'+spending json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) - assert_equal(len(json_obj['utxos']), 0) #there should be an outpoint because it has just added to the mempool + assert_equal(len(json_obj['utxos']), 0) #there should be no outpoint because it has just added to the mempool - json_request = '/checkmempool/'+txid+'-'+str(n) + json_request = '/checkmempool/'+spending json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because it has just added to the mempool + json_request = '/'+spent + json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') + json_obj = json.loads(json_string) + assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because its spending tx is not confirmed + + json_request = '/checkmempool/'+spent + json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') + json_obj = json.loads(json_string) + assert_equal(len(json_obj['utxos']), 0) #there should be no outpoint because it has just spent (by mempool tx) + + self.nodes[0].generate(1) + self.sync_all() + + json_request = '/'+spending + json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') + json_obj = json.loads(json_string) + assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because it was mined + + json_request = '/checkmempool/'+spending + json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') + json_obj = json.loads(json_string) + assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because it was mined + #do some invalid requests json_request = '{"checkmempool' response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True) diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 8b226c2e9d..5546bf6b29 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -425,7 +425,7 @@ class SegWitTest(BitcoinTestFramework): assert(self.nodes[0].getbestblockhash() == block.hash) - # Now make sure that malleating the witness nonce doesn't + # Now make sure that malleating the witness reserved value doesn't # result in a block permanently marked bad. block = self.build_next_block() add_witness_commitment(block) @@ -436,7 +436,7 @@ class SegWitTest(BitcoinTestFramework): block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ] test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) - # Changing the witness nonce doesn't change the block hash + # Changing the witness reserved value doesn't change the block hash block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ] test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) @@ -833,7 +833,7 @@ class SegWitTest(BitcoinTestFramework): self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2) self.log.error("Error: duplicate tx getdata!") assert(False) - except AssertionError as e: + except AssertionError: pass # Delivering this transaction with witness should fail (no matter who diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index d8348432aa..72b5f4748f 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -10,6 +10,7 @@ Tests correspond to code in rpc/net.cpp. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, + assert_greater_than_or_equal, assert_raises_rpc_error, connect_nodes_bi, p2p_port, @@ -33,26 +34,34 @@ class NetTest(BitcoinTestFramework): assert_equal(self.nodes[0].getconnectioncount(), 2) def _test_getnettotals(self): - # check that getnettotals totalbytesrecv and totalbytessent - # are consistent with getpeerinfo + # getnettotals totalbytesrecv and totalbytessent should be + # consistent with getpeerinfo. Since the RPC calls are not atomic, + # and messages might have been recvd or sent between RPC calls, call + # getnettotals before and after and verify that the returned values + # from getpeerinfo are bounded by those values. + net_totals_before = self.nodes[0].getnettotals() peer_info = self.nodes[0].getpeerinfo() + net_totals_after = self.nodes[0].getnettotals() assert_equal(len(peer_info), 2) - net_totals = self.nodes[0].getnettotals() - assert_equal(sum([peer['bytesrecv'] for peer in peer_info]), - net_totals['totalbytesrecv']) - assert_equal(sum([peer['bytessent'] for peer in peer_info]), - net_totals['totalbytessent']) + peers_recv = sum([peer['bytesrecv'] for peer in peer_info]) + peers_sent = sum([peer['bytessent'] for peer in peer_info]) + + assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv']) + assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv) + assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent']) + assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent) + # test getnettotals and getpeerinfo by doing a ping # the bytes sent/received should change # note ping and pong are 32 bytes each self.nodes[0].ping() - wait_until(lambda: (net_totals['totalbytessent'] + 32*2) == self.nodes[0].getnettotals()['totalbytessent'], timeout=1) - wait_until(lambda: (net_totals['totalbytesrecv'] + 32*2) == self.nodes[0].getnettotals()['totalbytesrecv'], timeout=1) + wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1) + wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1) peer_info_after_ping = self.nodes[0].getpeerinfo() for before, after in zip(peer_info, peer_info_after_ping): - assert_equal(before['bytesrecv_per_msg']['pong'] + 32, after['bytesrecv_per_msg']['pong']) - assert_equal(before['bytessent_per_msg']['ping'] + 32, after['bytessent_per_msg']['ping']) + assert_greater_than_or_equal(after['bytesrecv_per_msg']['pong'], before['bytesrecv_per_msg']['pong'] + 32) + assert_greater_than_or_equal(after['bytessent_per_msg']['ping'], before['bytessent_per_msg']['ping'] + 32) def _test_getnetworkinginfo(self): assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True) @@ -78,8 +87,7 @@ class NetTest(BitcoinTestFramework): assert_equal(len(added_nodes), 1) assert_equal(added_nodes[0]['addednode'], ip_port) # check that a non-existent node returns an error - assert_raises_rpc_error(-24, "Node has not been added", - self.nodes[0].getaddednodeinfo, '1.1.1.1') + assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1') def _test_getpeerinfo(self): peer_info = [x.getpeerinfo() for x in self.nodes] diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py index bd3a3b3fab..900090bb66 100644 --- a/test/functional/test_framework/authproxy.py +++ b/test/functional/test_framework/authproxy.py @@ -151,7 +151,7 @@ class AuthServiceProxy(): req_start_time = time.time() try: http_response = self.__conn.getresponse() - except socket.timeout as e: + except socket.timeout: raise JSONRPCException({ 'code': -344, 'message': '%r RPC took longer than %f seconds. Consider ' diff --git a/test/functional/test_framework/blockstore.py b/test/functional/test_framework/blockstore.py index 6067a407cc..6073285a6c 100644 --- a/test/functional/test_framework/blockstore.py +++ b/test/functional/test_framework/blockstore.py @@ -87,7 +87,7 @@ class BlockStore(): block.calc_sha256() try: self.blockDB[repr(block.sha256)] = bytes(block.serialize()) - except TypeError as e: + except TypeError: logger.exception("Unexpected error") self.currentBlock = block.sha256 self.headers_map[block.sha256] = CBlockHeader(block) @@ -147,7 +147,7 @@ class TxStore(): tx.calc_sha256() try: self.txDB[repr(tx.sha256)] = bytes(tx.serialize()) - except TypeError as e: + except TypeError: logger.exception("Unexpected error") def get_transactions(self, inv): diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 583d07deec..291ac3ee46 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -30,6 +30,11 @@ JSONDecodeError = getattr(json, "JSONDecodeError", ValueError) BITCOIND_PROC_WAIT_TIMEOUT = 60 + +class FailedToStartError(Exception): + """Raised when a node fails to start correctly.""" + + class TestNode(): """A class for representing a bitcoind node under test. @@ -102,7 +107,8 @@ class TestNode(): # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): - assert self.process.poll() is None, "bitcoind exited with status %i during initialization" % self.process.returncode + if self.process.poll() is not None: + raise FailedToStartError('bitcoind exited with status {} during initialization'.format(self.process.returncode)) try: self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir) self.rpc.getblockcount() @@ -179,9 +185,9 @@ class TestNode(): self.start(extra_args, stderr=log_stderr, *args, **kwargs) self.wait_for_rpc_connection() self.stop_node() - self.wait_util_stopped() - except Exception as e: - assert 'bitcoind exited' in str(e) # node must have shutdown + self.wait_until_stopped() + except FailedToStartError as e: + self.log.debug('bitcoind failed to start: %s', e) self.running = False self.process = None # Check stderr for expected message diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 041e2b86e8..a24a2ec4f5 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -8,6 +8,7 @@ from base64 import b64encode from binascii import hexlify, unhexlify from decimal import Decimal, ROUND_DOWN import hashlib +import inspect import json import logging import os @@ -204,9 +205,9 @@ def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=N if attempts == float('inf') and timeout == float('inf'): timeout = 60 attempt = 0 - timeout += time.time() + time_end = time.time() + timeout - while attempt < attempts and time.time() < timeout: + while attempt < attempts and time.time() < time_end: if lock: with lock: if predicate(): @@ -218,8 +219,12 @@ def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=N time.sleep(0.05) # Print the cause of the timeout - assert_greater_than(attempts, attempt) - assert_greater_than(timeout, time.time()) + predicate_source = inspect.getsourcelines(predicate) + logger.error("wait_until() failed. Predicate: {}".format(predicate_source)) + if attempt >= attempts: + raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts)) + elif time.time() >= time_end: + raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout)) raise RuntimeError('Unreachable') # RPC/P2P connection constants and functions diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 09f7f50de0..a2e92dce3b 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -67,7 +67,7 @@ BASE_SCRIPTS= [ 'feature_segwit.py', # vv Tests less than 2m vv 'wallet_basic.py', - 'wallet_accounts.py', + 'wallet_labels.py', 'p2p_segwit.py', 'wallet_dump.py', 'rpc_listtransactions.py', @@ -136,7 +136,9 @@ BASE_SCRIPTS= [ 'p2p_unrequested_blocks.py', 'feature_logging.py', 'p2p_node_network_limited.py', + 'feature_blocksdir.py', 'feature_config_args.py', + 'feature_help.py', # Don't append tests at the end to avoid merge conflicts # Put them in a random line within the section that fits their approximate run-time ] @@ -366,7 +368,7 @@ def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_cove def print_results(test_results, max_len_name, runtime): results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0] - test_results.sort(key=lambda result: result.name.lower()) + test_results.sort(key=TestResult.sort_key) all_passed = True time_sum = 0 @@ -377,7 +379,11 @@ def print_results(test_results, max_len_name, runtime): results += str(test_result) status = TICK + "Passed" if all_passed else CROSS + "Failed" + if not all_passed: + results += RED[1] results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0] + if not all_passed: + results += RED[0] results += "Runtime: %s s\n" % (runtime) print(results) @@ -454,6 +460,14 @@ class TestResult(): self.time = time self.padding = 0 + def sort_key(self): + if self.status == "Passed": + return 0, self.name.lower() + elif self.status == "Failed": + return 2, self.name.lower() + elif self.status == "Skipped": + return 1, self.name.lower() + def __repr__(self): if self.status == "Passed": color = BLUE diff --git a/test/functional/wallet_accounts.py b/test/functional/wallet_accounts.py deleted file mode 100755 index ecd1cfc82b..0000000000 --- a/test/functional/wallet_accounts.py +++ /dev/null @@ -1,206 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test account RPCs. - -RPCs tested are: - - getaccountaddress - - getaddressesbyaccount - - listaddressgroupings - - setaccount - - sendfrom (with account arguments) - - move (with account arguments) -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal - -class WalletAccountsTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - self.extra_args = [[]] - - def run_test(self): - node = self.nodes[0] - # Check that there's no UTXO on any of the nodes - assert_equal(len(node.listunspent()), 0) - - # Note each time we call generate, all generated coins go into - # the same address, so we call twice to get two addresses w/50 each - node.generate(1) - node.generate(101) - assert_equal(node.getbalance(), 100) - - # there should be 2 address groups - # each with 1 address with a balance of 50 Bitcoins - address_groups = node.listaddressgroupings() - assert_equal(len(address_groups), 2) - # the addresses aren't linked now, but will be after we send to the - # common address - linked_addresses = set() - for address_group in address_groups: - assert_equal(len(address_group), 1) - assert_equal(len(address_group[0]), 2) - assert_equal(address_group[0][1], 50) - linked_addresses.add(address_group[0][0]) - - # send 50 from each address to a third address not in this wallet - # There's some fee that will come back to us when the miner reward - # matures. - common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr" - txid = node.sendmany( - fromaccount="", - amounts={common_address: 100}, - subtractfeefrom=[common_address], - minconf=1, - ) - tx_details = node.gettransaction(txid) - fee = -tx_details['details'][0]['fee'] - # there should be 1 address group, with the previously - # unlinked addresses now linked (they both have 0 balance) - address_groups = node.listaddressgroupings() - assert_equal(len(address_groups), 1) - assert_equal(len(address_groups[0]), 2) - assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses) - assert_equal([a[1] for a in address_groups[0]], [0, 0]) - - node.generate(1) - - # we want to reset so that the "" account has what's expected. - # otherwise we're off by exactly the fee amount as that's mined - # and matures in the next 100 blocks - node.sendfrom("", common_address, fee) - amount_to_send = 1.0 - - # Create accounts and make sure subsequent account API calls - # recognize the account/address associations. - accounts = [Account(name) for name in ("a", "b", "c", "d", "e")] - for account in accounts: - account.add_receive_address(node.getaccountaddress(account.name)) - account.verify(node) - - # Send a transaction to each account, and make sure this forces - # getaccountaddress to generate a new receiving address. - for account in accounts: - node.sendtoaddress(account.receive_address, amount_to_send) - account.add_receive_address(node.getaccountaddress(account.name)) - account.verify(node) - - # Check the amounts received. - node.generate(1) - for account in accounts: - assert_equal( - node.getreceivedbyaddress(account.addresses[0]), amount_to_send) - assert_equal(node.getreceivedbyaccount(account.name), amount_to_send) - - # Check that sendfrom account reduces listaccounts balances. - for i, account in enumerate(accounts): - to_account = accounts[(i+1) % len(accounts)] - node.sendfrom(account.name, to_account.receive_address, amount_to_send) - node.generate(1) - for account in accounts: - account.add_receive_address(node.getaccountaddress(account.name)) - account.verify(node) - assert_equal(node.getreceivedbyaccount(account.name), 2) - node.move(account.name, "", node.getbalance(account.name)) - account.verify(node) - node.generate(101) - expected_account_balances = {"": 5200} - for account in accounts: - expected_account_balances[account.name] = 0 - assert_equal(node.listaccounts(), expected_account_balances) - assert_equal(node.getbalance(""), 5200) - - # Check that setaccount can assign an account to a new unused address. - for account in accounts: - address = node.getaccountaddress("") - node.setaccount(address, account.name) - account.add_address(address) - account.verify(node) - assert(address not in node.getaddressesbyaccount("")) - - # Check that addmultisigaddress can assign accounts. - for account in accounts: - addresses = [] - for x in range(10): - addresses.append(node.getnewaddress()) - multisig_address = node.addmultisigaddress(5, addresses, account.name)['address'] - account.add_address(multisig_address) - account.verify(node) - node.sendfrom("", multisig_address, 50) - node.generate(101) - for account in accounts: - assert_equal(node.getbalance(account.name), 50) - - # Check that setaccount can change the account of an address from a - # different account. - change_account(node, accounts[0].addresses[0], accounts[0], accounts[1]) - - # Check that setaccount can change the account of an address which - # is the receiving address of a different account. - change_account(node, accounts[0].receive_address, accounts[0], accounts[1]) - - # Check that setaccount can set the account of an address already - # in the account. This is a no-op. - change_account(node, accounts[2].addresses[0], accounts[2], accounts[2]) - - # Check that setaccount can set the account of an address which is - # already the receiving address of the account. It would probably make - # sense for this to be a no-op, but right now it resets the receiving - # address, causing getaccountaddress to return a brand new address. - change_account(node, accounts[2].receive_address, accounts[2], accounts[2]) - -class Account: - def __init__(self, name): - # Account name - self.name = name - # Current receiving address associated with this account. - self.receive_address = None - # List of all addresses assigned with this account - self.addresses = [] - - def add_address(self, address): - assert_equal(address not in self.addresses, True) - self.addresses.append(address) - - def add_receive_address(self, address): - self.add_address(address) - self.receive_address = address - - def verify(self, node): - if self.receive_address is not None: - assert self.receive_address in self.addresses - assert_equal(node.getaccountaddress(self.name), self.receive_address) - - for address in self.addresses: - assert_equal(node.getaccount(address), self.name) - - assert_equal( - set(node.getaddressesbyaccount(self.name)), set(self.addresses)) - - -def change_account(node, address, old_account, new_account): - assert_equal(address in old_account.addresses, True) - node.setaccount(address, new_account.name) - - old_account.addresses.remove(address) - new_account.add_address(address) - - # Calling setaccount on an address which was previously the receiving - # address of a different account should reset the receiving address of - # the old account, causing getaccountaddress to return a brand new - # address. - if address == old_account.receive_address: - new_address = node.getaccountaddress(old_account.name) - assert_equal(new_address not in old_account.addresses, True) - assert_equal(new_address not in new_account.addresses, True) - old_account.add_receive_address(new_address) - - old_account.verify(node) - new_account.verify(node) - - -if __name__ == '__main__': - WalletAccountsTest().main() diff --git a/test/functional/wallet_import_rescan.py b/test/functional/wallet_import_rescan.py index bfd4638481..b66e9b5d91 100755 --- a/test/functional/wallet_import_rescan.py +++ b/test/functional/wallet_import_rescan.py @@ -78,7 +78,7 @@ class Variant(collections.namedtuple("Variant", "call data rescan prune")): if txid is not None: tx, = [tx for tx in txs if tx["txid"] == txid] - assert_equal(tx["account"], self.label) + assert_equal(tx["label"], self.label) assert_equal(tx["address"], self.address["address"]) assert_equal(tx["amount"], amount) assert_equal(tx["category"], "receive") diff --git a/test/functional/wallet_importmulti.py b/test/functional/wallet_importmulti.py index 56ebc2622a..91acdb01d0 100755 --- a/test/functional/wallet_importmulti.py +++ b/test/functional/wallet_importmulti.py @@ -230,7 +230,7 @@ class ImportMultiTest (BitcoinTestFramework): sig_address_3 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) self.nodes[1].generate(100) - transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] @@ -257,7 +257,7 @@ class ImportMultiTest (BitcoinTestFramework): sig_address_3 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) self.nodes[1].generate(100) - transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] @@ -284,7 +284,7 @@ class ImportMultiTest (BitcoinTestFramework): sig_address_3 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) self.nodes[1].generate(100) - transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] @@ -311,7 +311,7 @@ class ImportMultiTest (BitcoinTestFramework): sig_address_3 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) self.nodes[1].generate(100) - transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] diff --git a/test/functional/wallet_labels.py b/test/functional/wallet_labels.py new file mode 100755 index 0000000000..b2695e681f --- /dev/null +++ b/test/functional/wallet_labels.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test label RPCs. + +RPCs tested are: + - getlabeladdress + - getaddressesbyaccount + - listaddressgroupings + - setlabel + - sendfrom (with account arguments) + - move (with account arguments) +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class WalletLabelsTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [[]] + + def run_test(self): + node = self.nodes[0] + # Check that there's no UTXO on any of the nodes + assert_equal(len(node.listunspent()), 0) + + # Note each time we call generate, all generated coins go into + # the same address, so we call twice to get two addresses w/50 each + node.generate(1) + node.generate(101) + assert_equal(node.getbalance(), 100) + + # there should be 2 address groups + # each with 1 address with a balance of 50 Bitcoins + address_groups = node.listaddressgroupings() + assert_equal(len(address_groups), 2) + # the addresses aren't linked now, but will be after we send to the + # common address + linked_addresses = set() + for address_group in address_groups: + assert_equal(len(address_group), 1) + assert_equal(len(address_group[0]), 2) + assert_equal(address_group[0][1], 50) + linked_addresses.add(address_group[0][0]) + + # send 50 from each address to a third address not in this wallet + # There's some fee that will come back to us when the miner reward + # matures. + common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr" + txid = node.sendmany( + fromaccount="", + amounts={common_address: 100}, + subtractfeefrom=[common_address], + minconf=1, + ) + tx_details = node.gettransaction(txid) + fee = -tx_details['details'][0]['fee'] + # there should be 1 address group, with the previously + # unlinked addresses now linked (they both have 0 balance) + address_groups = node.listaddressgroupings() + assert_equal(len(address_groups), 1) + assert_equal(len(address_groups[0]), 2) + assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses) + assert_equal([a[1] for a in address_groups[0]], [0, 0]) + + node.generate(1) + + # we want to reset so that the "" label has what's expected. + # otherwise we're off by exactly the fee amount as that's mined + # and matures in the next 100 blocks + node.sendfrom("", common_address, fee) + amount_to_send = 1.0 + + # Create labels and make sure subsequent label API calls + # recognize the label/address associations. + labels = [Label(name) for name in ("a", "b", "c", "d", "e")] + for label in labels: + label.add_receive_address(node.getlabeladdress(label.name)) + label.verify(node) + + # Send a transaction to each label, and make sure this forces + # getlabeladdress to generate a new receiving address. + for label in labels: + node.sendtoaddress(label.receive_address, amount_to_send) + label.add_receive_address(node.getlabeladdress(label.name)) + label.verify(node) + + # Check the amounts received. + node.generate(1) + for label in labels: + assert_equal( + node.getreceivedbyaddress(label.addresses[0]), amount_to_send) + assert_equal(node.getreceivedbylabel(label.name), amount_to_send) + + # Check that sendfrom label reduces listaccounts balances. + for i, label in enumerate(labels): + to_label = labels[(i+1) % len(labels)] + node.sendfrom(label.name, to_label.receive_address, amount_to_send) + node.generate(1) + for label in labels: + label.add_receive_address(node.getlabeladdress(label.name)) + label.verify(node) + assert_equal(node.getreceivedbylabel(label.name), 2) + node.move(label.name, "", node.getbalance(label.name)) + label.verify(node) + node.generate(101) + expected_account_balances = {"": 5200} + for label in labels: + expected_account_balances[label.name] = 0 + assert_equal(node.listaccounts(), expected_account_balances) + assert_equal(node.getbalance(""), 5200) + + # Check that setlabel can assign a label to a new unused address. + for label in labels: + address = node.getlabeladdress("") + node.setlabel(address, label.name) + label.add_address(address) + label.verify(node) + assert(address not in node.getaddressesbyaccount("")) + + # Check that addmultisigaddress can assign labels. + for label in labels: + addresses = [] + for x in range(10): + addresses.append(node.getnewaddress()) + multisig_address = node.addmultisigaddress(5, addresses, label.name)['address'] + label.add_address(multisig_address) + label.verify(node) + node.sendfrom("", multisig_address, 50) + node.generate(101) + for label in labels: + assert_equal(node.getbalance(label.name), 50) + + # Check that setlabel can change the label of an address from a + # different label. + change_label(node, labels[0].addresses[0], labels[0], labels[1]) + + # Check that setlabel can change the label of an address which + # is the receiving address of a different label. + change_label(node, labels[0].receive_address, labels[0], labels[1]) + + # Check that setlabel can set the label of an address already + # in the label. This is a no-op. + change_label(node, labels[2].addresses[0], labels[2], labels[2]) + + # Check that setlabel can set the label of an address which is + # already the receiving address of the label. It would probably make + # sense for this to be a no-op, but right now it resets the receiving + # address, causing getlabeladdress to return a brand new address. + change_label(node, labels[2].receive_address, labels[2], labels[2]) + +class Label: + def __init__(self, name): + # Label name + self.name = name + # Current receiving address associated with this label. + self.receive_address = None + # List of all addresses assigned with this label + self.addresses = [] + + def add_address(self, address): + assert_equal(address not in self.addresses, True) + self.addresses.append(address) + + def add_receive_address(self, address): + self.add_address(address) + self.receive_address = address + + def verify(self, node): + if self.receive_address is not None: + assert self.receive_address in self.addresses + assert_equal(node.getlabeladdress(self.name), self.receive_address) + + for address in self.addresses: + assert_equal(node.getaccount(address), self.name) + + assert_equal( + set(node.getaddressesbyaccount(self.name)), set(self.addresses)) + + +def change_label(node, address, old_label, new_label): + assert_equal(address in old_label.addresses, True) + node.setlabel(address, new_label.name) + + old_label.addresses.remove(address) + new_label.add_address(address) + + # Calling setlabel on an address which was previously the receiving + # address of a different label should reset the receiving address of + # the old label, causing getlabeladdress to return a brand new + # address. + if address == old_label.receive_address: + new_address = node.getlabeladdress(old_label.name) + assert_equal(new_address not in old_label.addresses, True) + assert_equal(new_address not in new_label.addresses, True) + old_label.add_receive_address(new_address) + + old_label.verify(node) + new_label.verify(node) + + +if __name__ == '__main__': + WalletLabelsTest().main() diff --git a/test/functional/wallet_listreceivedby.py b/test/functional/wallet_listreceivedby.py index 01c9899c71..a4754852ed 100755 --- a/test/functional/wallet_listreceivedby.py +++ b/test/functional/wallet_listreceivedby.py @@ -36,11 +36,11 @@ class ReceivedByTest(BitcoinTestFramework): self.sync_all() assert_array_result(self.nodes[1].listreceivedbyaddress(), {"address": addr}, - {"address": addr, "account": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) + {"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) # With min confidence < 10 assert_array_result(self.nodes[1].listreceivedbyaddress(5), {"address": addr}, - {"address": addr, "account": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) + {"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) # With min confidence > 10, should not find Tx assert_array_result(self.nodes[1].listreceivedbyaddress(11), {"address": addr}, {}, True) @@ -48,11 +48,11 @@ class ReceivedByTest(BitcoinTestFramework): empty_addr = self.nodes[1].getnewaddress() assert_array_result(self.nodes[1].listreceivedbyaddress(0, True), {"address": empty_addr}, - {"address": empty_addr, "account": "", "amount": 0, "confirmations": 0, "txids": []}) + {"address": empty_addr, "label": "", "amount": 0, "confirmations": 0, "txids": []}) #Test Address filtering #Only on addr - expected = {"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]} + expected = {"address":addr, "label":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]} res = self.nodes[1].listreceivedbyaddress(minconf=0, include_empty=True, include_watchonly=True, address_filter=addr) assert_array_result(res, {"address":addr}, expected) assert_equal(len(res), 1) @@ -66,12 +66,12 @@ class ReceivedByTest(BitcoinTestFramework): self.nodes[0].generate(1) self.sync_all() #Same test as above should still pass - expected = {"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":11, "txids":[txid,]} + expected = {"address":addr, "label":"", "amount":Decimal("0.1"), "confirmations":11, "txids":[txid,]} res = self.nodes[1].listreceivedbyaddress(0, True, True, addr) assert_array_result(res, {"address":addr}, expected) assert_equal(len(res), 1) #Same test as above but with other_addr should still pass - expected = {"address":other_addr, "account":"", "amount":Decimal("0.1"), "confirmations":1, "txids":[txid2,]} + expected = {"address":other_addr, "label":"", "amount":Decimal("0.1"), "confirmations":1, "txids":[txid2,]} res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr) assert_array_result(res, {"address":other_addr}, expected) assert_equal(len(res), 1) @@ -108,46 +108,46 @@ class ReceivedByTest(BitcoinTestFramework): # Trying to getreceivedby for an address the wallet doesn't own should return an error assert_raises_rpc_error(-4, "Address not found in wallet", self.nodes[0].getreceivedbyaddress, addr) - self.log.info("listreceivedbyaccount + getreceivedbyaccount Test") + self.log.info("listreceivedbylabel + getreceivedbylabel Test") # set pre-state addrArr = self.nodes[1].getnewaddress() - account = self.nodes[1].getaccount(addrArr) - received_by_account_json = [r for r in self.nodes[1].listreceivedbyaccount() if r["account"] == account][0] - balance_by_account = self.nodes[1].getreceivedbyaccount(account) + label = self.nodes[1].getaccount(addrArr) + received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel() if r["label"] == label][0] + balance_by_label = self.nodes[1].getreceivedbylabel(label) txid = self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() - # listreceivedbyaccount should return received_by_account_json because of 0 confirmations - assert_array_result(self.nodes[1].listreceivedbyaccount(), - {"account": account}, - received_by_account_json) + # listreceivedbylabel should return received_by_label_json because of 0 confirmations + assert_array_result(self.nodes[1].listreceivedbylabel(), + {"label": label}, + received_by_label_json) # getreceivedbyaddress should return same balance because of 0 confirmations - balance = self.nodes[1].getreceivedbyaccount(account) - assert_equal(balance, balance_by_account) + balance = self.nodes[1].getreceivedbylabel(label) + assert_equal(balance, balance_by_label) self.nodes[1].generate(10) self.sync_all() - # listreceivedbyaccount should return updated account balance - assert_array_result(self.nodes[1].listreceivedbyaccount(), - {"account": account}, - {"account": received_by_account_json["account"], "amount": (received_by_account_json["amount"] + Decimal("0.1"))}) + # listreceivedbylabel should return updated received list + assert_array_result(self.nodes[1].listreceivedbylabel(), + {"label": label}, + {"label": received_by_label_json["label"], "amount": (received_by_label_json["amount"] + Decimal("0.1"))}) - # getreceivedbyaddress should return updates balance - balance = self.nodes[1].getreceivedbyaccount(account) - assert_equal(balance, balance_by_account + Decimal("0.1")) + # getreceivedbylabel should return updated receive total + balance = self.nodes[1].getreceivedbylabel(label) + assert_equal(balance, balance_by_label + Decimal("0.1")) - # Create a new account named "mynewaccount" that has a 0 balance - self.nodes[1].getaccountaddress("mynewaccount") - received_by_account_json = [r for r in self.nodes[1].listreceivedbyaccount(0, True) if r["account"] == "mynewaccount"][0] + # Create a new label named "mynewlabel" that has a 0 balance + self.nodes[1].getlabeladdress("mynewlabel") + received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel(0, True) if r["label"] == "mynewlabel"][0] - # Test includeempty of listreceivedbyaccount - assert_equal(received_by_account_json["amount"], Decimal("0.0")) + # Test includeempty of listreceivedbylabel + assert_equal(received_by_label_json["amount"], Decimal("0.0")) - # Test getreceivedbyaccount for 0 amount accounts - balance = self.nodes[1].getreceivedbyaccount("mynewaccount") + # Test getreceivedbylabel for 0 amount labels + balance = self.nodes[1].getreceivedbylabel("mynewlabel") assert_equal(balance, Decimal("0.0")) if __name__ == '__main__': |