diff options
82 files changed, 1600 insertions, 1293 deletions
diff --git a/contrib/devtools/check-doc.py b/contrib/devtools/check-doc.py index f164ea9322..4e87cdae82 100755 --- a/contrib/devtools/check-doc.py +++ b/contrib/devtools/check-doc.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2015-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -16,29 +16,29 @@ import sys FOLDER_GREP = 'src' FOLDER_TEST = 'src/test/' -CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/%s' % FOLDER_GREP -CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' %s | grep -v '%s'" % (CMD_ROOT_DIR, FOLDER_TEST) -CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_ROOT_DIR) +CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/{}'.format(FOLDER_GREP) +CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' {} | grep -v '{}'".format(CMD_ROOT_DIR, FOLDER_TEST) +CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' {}".format(CMD_ROOT_DIR) REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"') REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")') # list unsupported, deprecated and duplicate args as they need no documentation SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio', '-forcecompactdb', '-usehd']) def main(): - used = check_output(CMD_GREP_ARGS, shell=True) - docd = check_output(CMD_GREP_DOCS, shell=True) + used = check_output(CMD_GREP_ARGS, shell=True, universal_newlines=True) + docd = check_output(CMD_GREP_DOCS, shell=True, universal_newlines=True) args_used = set(re.findall(REGEX_ARG,used)) args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL) args_need_doc = args_used.difference(args_docd) args_unknown = args_docd.difference(args_used) - print "Args used : %s" % len(args_used) - print "Args documented : %s" % len(args_docd) - print "Args undocumented: %s" % len(args_need_doc) - print args_need_doc - print "Args unknown : %s" % len(args_unknown) - print args_unknown + print("Args used : {}".format(len(args_used))) + print("Args documented : {}".format(len(args_docd))) + print("Args undocumented: {}".format(len(args_need_doc))) + print(args_need_doc) + print("Args unknown : {}".format(len(args_unknown))) + print(args_unknown) sys.exit(len(args_need_doc)) diff --git a/contrib/devtools/clang-format-diff.py b/contrib/devtools/clang-format-diff.py index 7ea49b65e1..5402870fba 100755 --- a/contrib/devtools/clang-format-diff.py +++ b/contrib/devtools/clang-format-diff.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # #===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===# # @@ -69,10 +69,9 @@ Example usage for git/svn users: import argparse import difflib +import io import re -import string import subprocess -import StringIO import sys @@ -133,9 +132,9 @@ def main(): ['-lines', str(start_line) + ':' + str(end_line)]) # Reformat files containing changes in place. - for filename, lines in lines_by_file.iteritems(): + for filename, lines in lines_by_file.items(): if args.i and args.verbose: - print 'Formatting', filename + print('Formatting {}'.format(filename)) command = [binary, filename] if args.i: command.append('-i') @@ -143,8 +142,11 @@ def main(): command.append('-sort-includes') command.extend(lines) command.extend(['-style=file', '-fallback-style=none']) - p = subprocess.Popen(command, stdout=subprocess.PIPE, - stderr=None, stdin=subprocess.PIPE) + p = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=None, + stdin=subprocess.PIPE, + universal_newlines=True) stdout, stderr = p.communicate() if p.returncode != 0: sys.exit(p.returncode) @@ -152,11 +154,11 @@ def main(): if not args.i: with open(filename) as f: code = f.readlines() - formatted_code = StringIO.StringIO(stdout).readlines() + formatted_code = io.StringIO(stdout).readlines() diff = difflib.unified_diff(code, formatted_code, filename, filename, '(before formatting)', '(after formatting)') - diff_string = string.join(diff, '') + diff_string = ''.join(diff) if len(diff_string) > 0: sys.stdout.write(diff_string) diff --git a/contrib/devtools/optimize-pngs.py b/contrib/devtools/optimize-pngs.py index 5cb3bb6f75..565b199125 100755 --- a/contrib/devtools/optimize-pngs.py +++ b/contrib/devtools/optimize-pngs.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -10,7 +10,7 @@ import os import sys import subprocess import hashlib -from PIL import Image +from PIL import Image # pip3 install Pillow def file_hash(filename): '''Return hash of raw file contents''' @@ -27,7 +27,7 @@ def content_hash(filename): pngcrush = 'pngcrush' git = 'git' folders = ["src/qt/res/movies", "src/qt/res/icons", "share/pixmaps"] -basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel']).rstrip('\n') +basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel'], universal_newlines=True).rstrip('\n') totalSaveBytes = 0 noHashChange = True @@ -37,42 +37,40 @@ for folder in folders: for file in os.listdir(absFolder): extension = os.path.splitext(file)[1] if extension.lower() == '.png': - print("optimizing "+file+"..."), + print("optimizing {}...".format(file), end =' ') file_path = os.path.join(absFolder, file) fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)} fileMetaMap['contentHashPre'] = content_hash(file_path) - pngCrushOutput = "" try: - pngCrushOutput = subprocess.check_output( - [pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path], - stderr=subprocess.STDOUT).rstrip('\n') + subprocess.call([pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path], + stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) except: - print "pngcrush is not installed, aborting..." + print("pngcrush is not installed, aborting...") sys.exit(0) #verify - if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT): - print "PNG file "+file+" is corrupted after crushing, check out pngcursh version" + if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT, universal_newlines=True): + print("PNG file "+file+" is corrupted after crushing, check out pngcursh version") sys.exit(1) fileMetaMap['sha256New'] = file_hash(file_path) fileMetaMap['contentHashPost'] = content_hash(file_path) if fileMetaMap['contentHashPre'] != fileMetaMap['contentHashPost']: - print "Image contents of PNG file "+file+" before and after crushing don't match" + print("Image contents of PNG file {} before and after crushing don't match".format(file)) sys.exit(1) fileMetaMap['psize'] = os.path.getsize(file_path) outputArray.append(fileMetaMap) - print("done\n"), + print("done") -print "summary:\n+++++++++++++++++" +print("summary:\n+++++++++++++++++") for fileDict in outputArray: oldHash = fileDict['sha256Old'] newHash = fileDict['sha256New'] totalSaveBytes += fileDict['osize'] - fileDict['psize'] noHashChange = noHashChange and (oldHash == newHash) - print fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n" + print(fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n") -print "completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes" +print("completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes") diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py index 1613f704df..0f2099953f 100755 --- a/contrib/devtools/security-check.py +++ b/contrib/devtools/security-check.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2015-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -8,7 +8,6 @@ Exit status will be 0 if successful, and the program will be silent. Otherwise the exit status will be 1 and it will log which executables failed which checks. Needs `readelf` (for ELF) and `objdump` (for PE). ''' -from __future__ import division,print_function,unicode_literals import subprocess import sys import os @@ -21,38 +20,38 @@ def check_ELF_PIE(executable): ''' Check for position independent executable (PIE), allowing for address space randomization. ''' - p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') ok = False - for line in stdout.split(b'\n'): + for line in stdout.splitlines(): line = line.split() - if len(line)>=2 and line[0] == b'Type:' and line[1] == b'DYN': + if len(line)>=2 and line[0] == 'Type:' and line[1] == 'DYN': ok = True return ok def get_ELF_program_headers(executable): '''Return type and flags for ELF program headers''' - p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') in_headers = False count = 0 headers = [] - for line in stdout.split(b'\n'): - if line.startswith(b'Program Headers:'): + for line in stdout.splitlines(): + if line.startswith('Program Headers:'): in_headers = True - if line == b'': + if line == '': in_headers = False if in_headers: if count == 1: # header line - ofs_typ = line.find(b'Type') - ofs_offset = line.find(b'Offset') - ofs_flags = line.find(b'Flg') - ofs_align = line.find(b'Align') + ofs_typ = line.find('Type') + ofs_offset = line.find('Offset') + ofs_flags = line.find('Flg') + ofs_align = line.find('Align') if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1: raise ValueError('Cannot parse elfread -lW output') elif count > 1: @@ -69,9 +68,9 @@ def check_ELF_NX(executable): have_wx = False have_gnu_stack = False for (typ, flags) in get_ELF_program_headers(executable): - if typ == b'GNU_STACK': + if typ == 'GNU_STACK': have_gnu_stack = True - if b'W' in flags and b'E' in flags: # section is both writable and executable + if 'W' in flags and 'E' in flags: # section is both writable and executable have_wx = True return have_gnu_stack and not have_wx @@ -88,17 +87,17 @@ def check_ELF_RELRO(executable): # However, the dynamic linker need to write to this area so these are RW. # Glibc itself takes care of mprotecting this area R after relocations are finished. # See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347 - if typ == b'GNU_RELRO': + if typ == 'GNU_RELRO': have_gnu_relro = True have_bindnow = False - p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') - for line in stdout.split(b'\n'): + for line in stdout.splitlines(): tokens = line.split() - if len(tokens)>1 and tokens[1] == b'(BIND_NOW)' or (len(tokens)>2 and tokens[1] == b'(FLAGS)' and b'BIND_NOW' in tokens[2]): + if len(tokens)>1 and tokens[1] == '(BIND_NOW)' or (len(tokens)>2 and tokens[1] == '(FLAGS)' and 'BIND_NOW' in tokens[2]): have_bindnow = True return have_gnu_relro and have_bindnow @@ -106,13 +105,13 @@ def check_ELF_Canary(executable): ''' Check for use of stack canary ''' - p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') ok = False - for line in stdout.split(b'\n'): - if b'__stack_chk_fail' in line: + for line in stdout.splitlines(): + if '__stack_chk_fail' in line: ok = True return ok @@ -122,13 +121,13 @@ def get_PE_dll_characteristics(executable): Returns a tuple (arch,bits) where arch is 'i386:x86-64' or 'i386' and bits is the DllCharacteristics value. ''' - p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') arch = '' bits = 0 - for line in stdout.split('\n'): + for line in stdout.splitlines(): tokens = line.split() if len(tokens)>=2 and tokens[0] == 'architecture:': arch = tokens[1].rstrip(',') diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py index 98daa1bd76..3a67319eaa 100755 --- a/contrib/devtools/symbol-check.py +++ b/contrib/devtools/symbol-check.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2014 Wladimir J. van der Laan # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -11,7 +11,6 @@ Example usage: find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py ''' -from __future__ import division, print_function, unicode_literals import subprocess import re import sys @@ -47,28 +46,28 @@ MAX_VERSIONS = { # Ignore symbols that are exported as part of every executable IGNORE_EXPORTS = { -b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used' +'_edata', '_end', '_init', '__bss_start', '_fini', '_IO_stdin_used' } READELF_CMD = os.getenv('READELF', '/usr/bin/readelf') CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt') # Allowed NEEDED libraries ALLOWED_LIBRARIES = { # bitcoind and bitcoin-qt -b'libgcc_s.so.1', # GCC base support -b'libc.so.6', # C library -b'libpthread.so.0', # threading -b'libanl.so.1', # DNS resolve -b'libm.so.6', # math library -b'librt.so.1', # real-time (clock) -b'ld-linux-x86-64.so.2', # 64-bit dynamic linker -b'ld-linux.so.2', # 32-bit dynamic linker +'libgcc_s.so.1', # GCC base support +'libc.so.6', # C library +'libpthread.so.0', # threading +'libanl.so.1', # DNS resolve +'libm.so.6', # math library +'librt.so.1', # real-time (clock) +'ld-linux-x86-64.so.2', # 64-bit dynamic linker +'ld-linux.so.2', # 32-bit dynamic linker # bitcoin-qt only -b'libX11-xcb.so.1', # part of X11 -b'libX11.so.6', # part of X11 -b'libxcb.so.1', # part of X11 -b'libfontconfig.so.1', # font support -b'libfreetype.so.6', # font parsing -b'libdl.so.2' # programming interface to dynamic linker +'libX11-xcb.so.1', # part of X11 +'libX11.so.6', # part of X11 +'libxcb.so.1', # part of X11 +'libfontconfig.so.1', # font support +'libfreetype.so.6', # font parsing +'libdl.so.2' # programming interface to dynamic linker } class CPPFilt(object): @@ -78,10 +77,10 @@ class CPPFilt(object): Use a pipe to the 'c++filt' command. ''' def __init__(self): - self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE) + self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) def __call__(self, mangled): - self.proc.stdin.write(mangled + b'\n') + self.proc.stdin.write(mangled + '\n') self.proc.stdin.flush() return self.proc.stdout.readline().rstrip() @@ -95,43 +94,43 @@ def read_symbols(executable, imports=True): Parse an ELF executable and return a list of (symbol,version) tuples for dynamic, imported symbols. ''' - p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip())) syms = [] - for line in stdout.split(b'\n'): + for line in stdout.splitlines(): line = line.split() - if len(line)>7 and re.match(b'[0-9]+:$', line[0]): - (sym, _, version) = line[7].partition(b'@') - is_import = line[6] == b'UND' - if version.startswith(b'@'): + if len(line)>7 and re.match('[0-9]+:$', line[0]): + (sym, _, version) = line[7].partition('@') + is_import = line[6] == 'UND' + if version.startswith('@'): version = version[1:] if is_import == imports: syms.append((sym, version)) return syms def check_version(max_versions, version): - if b'_' in version: - (lib, _, ver) = version.rpartition(b'_') + if '_' in version: + (lib, _, ver) = version.rpartition('_') else: lib = version ver = '0' - ver = tuple([int(x) for x in ver.split(b'.')]) + ver = tuple([int(x) for x in ver.split('.')]) if not lib in max_versions: return False return ver <= max_versions[lib] def read_libraries(filename): - p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') libraries = [] - for line in stdout.split(b'\n'): + for line in stdout.splitlines(): tokens = line.split() - if len(tokens)>2 and tokens[1] == b'(NEEDED)': - match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:])) + if len(tokens)>2 and tokens[1] == '(NEEDED)': + match = re.match('^Shared library: \[(.*)\]$', ' '.join(tokens[2:])) if match: libraries.append(match.group(1)) else: @@ -145,18 +144,18 @@ if __name__ == '__main__': # Check imported symbols for sym,version in read_symbols(filename, True): if version and not check_version(MAX_VERSIONS, version): - print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8'))) + print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym), version)) retval = 1 # Check exported symbols for sym,version in read_symbols(filename, False): if sym in IGNORE_EXPORTS: continue - print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8'))) + print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym))) retval = 1 # Check dependency libraries for library_name in read_libraries(filename): if library_name not in ALLOWED_LIBRARIES: - print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8'))) + print('%s: NEEDED library %s is not allowed' % (filename, library_name)) retval = 1 sys.exit(retval) diff --git a/contrib/devtools/update-translations.py b/contrib/devtools/update-translations.py index e1924749d2..b36e6968bf 100755 --- a/contrib/devtools/update-translations.py +++ b/contrib/devtools/update-translations.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2014 Wladimir J. van der Laan # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -15,7 +15,6 @@ It will do the following automatically: TODO: - auto-add new translations to the build system according to the translation process ''' -from __future__ import division, print_function import subprocess import re import sys diff --git a/contrib/macdeploy/custom_dsstore.py b/contrib/macdeploy/custom_dsstore.py index e6ecabace1..b29fc71765 100755 --- a/contrib/macdeploy/custom_dsstore.py +++ b/contrib/macdeploy/custom_dsstore.py @@ -1,8 +1,7 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2013-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -from __future__ import division,print_function,unicode_literals import biplist from ds_store import DSStore from mac_alias import Alias diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus index 23a568ad13..17ce6c44f9 100755 --- a/contrib/macdeploy/macdeployqtplus +++ b/contrib/macdeploy/macdeployqtplus @@ -1,5 +1,4 @@ -#!/usr/bin/env python -from __future__ import division, print_function, unicode_literals +#!/usr/bin/env python3 # # Copyright (C) 2011 Patrick "p2k" Schneider <me@p2k-network.org> # @@ -203,7 +202,7 @@ def getFrameworks(binaryPath, verbose): if verbose >= 3: print("Inspecting with otool: " + binaryPath) otoolbin=os.getenv("OTOOL", "otool") - otool = subprocess.Popen([otoolbin, "-L", binaryPath], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + otool = subprocess.Popen([otoolbin, "-L", binaryPath], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) o_stdout, o_stderr = otool.communicate() if otool.returncode != 0: if verbose >= 1: @@ -211,7 +210,7 @@ def getFrameworks(binaryPath, verbose): sys.stderr.flush() raise RuntimeError("otool failed with return code %d" % otool.returncode) - otoolLines = o_stdout.decode().split("\n") + otoolLines = o_stdout.split("\n") otoolLines.pop(0) # First line is the inspected binary if ".framework" in binaryPath or binaryPath.endswith(".dylib"): otoolLines.pop(0) # Frameworks and dylibs list themselves as a dependency. @@ -714,22 +713,6 @@ elif config.sign: if config.dmg is not None: - #Patch in check_output for Python 2.6 - if "check_output" not in dir( subprocess ): - def f(*popenargs, **kwargs): - if 'stdout' in kwargs: - raise ValueError('stdout argument not allowed, it will be overridden.') - process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) - output, unused_err = process.communicate() - retcode = process.poll() - if retcode: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - raise CalledProcessError(retcode, cmd) - return output - subprocess.check_output = f - def runHDIUtil(verb, image_basename, **kwargs): hdiutil_args = ["hdiutil", verb, image_basename + ".dmg"] if "capture_stdout" in kwargs: @@ -747,7 +730,7 @@ if config.dmg is not None: if not value is True: hdiutil_args.append(str(value)) - return run(hdiutil_args) + return run(hdiutil_args, universal_newlines=True) if verbose >= 2: if fancy is None: @@ -789,7 +772,7 @@ if config.dmg is not None: except subprocess.CalledProcessError as e: sys.exit(e.returncode) - m = re.search("/Volumes/(.+$)", output.decode()) + m = re.search("/Volumes/(.+$)", output) disk_root = m.group(0) disk_name = m.group(1) diff --git a/contrib/testgen/base58.py b/contrib/testgen/base58.py index 816d40b49c..0dbb79a707 100644 --- a/contrib/testgen/base58.py +++ b/contrib/testgen/base58.py @@ -28,7 +28,9 @@ def b58encode(v): """ long_value = 0 for (i, c) in enumerate(v[::-1]): - long_value += (256**i) * ord(c) + if isinstance(c, str): + c = ord(c) + long_value += (256**i) * c result = '' while long_value >= __b58base: @@ -41,7 +43,7 @@ def b58encode(v): # leading 0-bytes in the input become leading-1s nPad = 0 for c in v: - if c == '\0': nPad += 1 + if c == 0: nPad += 1 else: break return (__b58chars[0]*nPad) + result @@ -50,8 +52,10 @@ def b58decode(v, length = None): """ decode v into a string of len bytes """ long_value = 0 - for (i, c) in enumerate(v[::-1]): - long_value += __b58chars.find(c) * (__b58base**i) + for i, c in enumerate(v[::-1]): + pos = __b58chars.find(c) + assert pos != -1 + long_value += pos * (__b58base**i) result = bytes() while long_value >= 256: @@ -62,10 +66,12 @@ def b58decode(v, length = None): nPad = 0 for c in v: - if c == __b58chars[0]: nPad += 1 - else: break + if c == __b58chars[0]: + nPad += 1 + continue + break - result = chr(0)*nPad + result + result = bytes(nPad) + result if length is not None and len(result) != length: return None diff --git a/contrib/testgen/gen_base58_test_vectors.py b/contrib/testgen/gen_base58_test_vectors.py index 8e6a5d5819..4351605786 100755 --- a/contrib/testgen/gen_base58_test_vectors.py +++ b/contrib/testgen/gen_base58_test_vectors.py @@ -1,11 +1,11 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2012-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Generate valid and invalid base58 address and private key test vectors. -Usage: +Usage: gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json ''' @@ -46,8 +46,8 @@ def is_valid(v): if result is None: return False for template in templates: - prefix = str(bytearray(template[0])) - suffix = str(bytearray(template[2])) + prefix = bytearray(template[0]) + suffix = bytearray(template[2]) if result.startswith(prefix) and result.endswith(suffix): if (len(result) - len(prefix) - len(suffix)) == template[1]: return True @@ -57,20 +57,23 @@ def gen_valid_vectors(): '''Generate valid test vectors''' while True: for template in templates: - prefix = str(bytearray(template[0])) - payload = os.urandom(template[1]) - suffix = str(bytearray(template[2])) + prefix = bytearray(template[0]) + payload = bytearray(os.urandom(template[1])) + suffix = bytearray(template[2]) rv = b58encode_chk(prefix + payload + suffix) assert is_valid(rv) - metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None]) - yield (rv, b2a_hex(payload), metadata) + metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None} + hexrepr = b2a_hex(payload) + if isinstance(hexrepr, bytes): + hexrepr = hexrepr.decode('utf8') + yield (rv, hexrepr, metadata) def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix): '''Generate possibly invalid vector''' if corrupt_prefix: prefix = os.urandom(1) else: - prefix = str(bytearray(template[0])) + prefix = bytearray(template[0]) if randomize_payload_size: payload = os.urandom(max(int(random.expovariate(0.5)), 50)) @@ -80,7 +83,7 @@ def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt if corrupt_suffix: suffix = os.urandom(len(template[2])) else: - suffix = str(bytearray(template[2])) + suffix = bytearray(template[2]) return b58encode_chk(prefix + payload + suffix) diff --git a/doc/developer-notes.md b/doc/developer-notes.md index 3be599623a..0de1892200 100644 --- a/doc/developer-notes.md +++ b/doc/developer-notes.md @@ -1,6 +1,43 @@ Developer Notes =============== +<!-- markdown-toc start --> +**Table of Contents** + +- [Developer Notes](#developer-notes) + - [Coding Style](#coding-style) + - [Doxygen comments](#doxygen-comments) + - [Development tips and tricks](#development-tips-and-tricks) + - [Compiling for debugging](#compiling-for-debugging) + - [Compiling for gprof profiling](#compiling-for-gprof-profiling) + - [debug.log](#debuglog) + - [Testnet and Regtest modes](#testnet-and-regtest-modes) + - [DEBUG_LOCKORDER](#debug_lockorder) + - [Valgrind suppressions file](#valgrind-suppressions-file) + - [Compiling for test coverage](#compiling-for-test-coverage) + - [Locking/mutex usage notes](#lockingmutex-usage-notes) + - [Threads](#threads) + - [Ignoring IDE/editor files](#ignoring-ideeditor-files) +- [Development guidelines](#development-guidelines) + - [General Bitcoin Core](#general-bitcoin-core) + - [Wallet](#wallet) + - [General C++](#general-c) + - [C++ data structures](#c-data-structures) + - [Strings and formatting](#strings-and-formatting) + - [Variable names](#variable-names) + - [Threads and synchronization](#threads-and-synchronization) + - [Source code organization](#source-code-organization) + - [GUI](#gui) + - [Subtrees](#subtrees) + - [Git and GitHub tips](#git-and-github-tips) + - [Scripted diffs](#scripted-diffs) + - [RPC interface guidelines](#rpc-interface-guidelines) + +<!-- markdown-toc end --> + +Coding Style +--------------- + Various coding styles have been used during the history of the codebase, and the result is not very consistent. However, we're now trying to converge to a single style, which is specified below. When writing patches, favor the new @@ -39,6 +76,7 @@ code. - `++i` is preferred over `i++`. - `nullptr` is preferred over `NULL` or `(void*)0`. - `static_assert` is preferred over `assert` where possible. Generally; compile-time checking is preferred over run-time checking. + - `enum class` is preferred over `enum` where possible. Scoped enumerations avoid two potential pitfalls/problems with traditional C++ enumerations: implicit conversions to int, and name clashes due to enumerators being exported to the surrounding scope. Block style example: ```c++ @@ -137,43 +175,44 @@ Documentation can be generated with `make docs` and cleaned up with `make clean- Development tips and tricks --------------------------- -**compiling for debugging** +### Compiling for debugging -Run configure with the --enable-debug option, then make. Or run configure with -CXXFLAGS="-g -ggdb -O0" or whatever debug flags you need. +Run configure with `--enable-debug` to add additional compiler flags that +produce better debugging builds. -**compiling for gprof profiling** +### Compiling for gprof profiling -Run configure with the --enable-gprof option, then make. +Run configure with the `--enable-gprof` option, then make. -**debug.log** +### debug.log If the code is behaving strangely, take a look in the debug.log file in the data directory; error and debugging messages are written there. -The -debug=... command-line option controls debugging; running with just -debug or -debug=1 will turn +The `-debug=...` command-line option controls debugging; running with just `-debug` or `-debug=1` will turn on all categories (and give you a very large debug.log file). -The Qt code routes qDebug() output to debug.log under category "qt": run with -debug=qt +The Qt code routes `qDebug()` output to debug.log under category "qt": run with `-debug=qt` to see it. -**testnet and regtest modes** +### Testnet and Regtest modes -Run with the -testnet option to run with "play bitcoins" on the test network, if you +Run with the `-testnet` option to run with "play bitcoins" on the test network, if you are testing multi-machine code that needs to operate across the internet. -If you are testing something that can run on one machine, run with the -regtest option. -In regression test mode, blocks can be created on-demand; see test/functional/ for tests -that run in -regtest mode. +If you are testing something that can run on one machine, run with the `-regtest` option. +In regression test mode, blocks can be created on-demand; see [test/functional/](/test/functional) for tests +that run in `-regtest` mode. -**DEBUG_LOCKORDER** +### DEBUG_LOCKORDER -Bitcoin Core is a multithreaded application, and deadlocks or other multithreading bugs -can be very difficult to track down. Compiling with -DDEBUG_LOCKORDER (configure -CXXFLAGS="-DDEBUG_LOCKORDER -g") inserts run-time checks to keep track of which locks -are held, and adds warnings to the debug.log file if inconsistencies are detected. +Bitcoin Core is a multi-threaded application, and deadlocks or other +multi-threading bugs can be very difficult to track down. The `--enable-debug` +configure option adds `-DDEBUG_LOCKORDER` to the compiler flags. This inserts +run-time checks to keep track of which locks are held, and adds warnings to the +debug.log file if inconsistencies are detected. -**Valgrind suppressions file** +### Valgrind suppressions file Valgrind is a programming tool for memory debugging, memory leak detection, and profiling. The repo contains a Valgrind suppressions file @@ -188,7 +227,7 @@ $ valgrind --suppressions=contrib/valgrind.supp --leak-check=full \ $ valgrind -v --leak-check=full src/bitcoind -printtoconsole ``` -**compiling for test coverage** +### Compiling for test coverage LCOV can be used to generate a test coverage report based upon `make check` execution. LCOV must be installed on your system (e.g. the `lcov` package @@ -259,18 +298,18 @@ Locking/mutex usage notes ------------------------- The code is multi-threaded, and uses mutexes and the -LOCK/TRY_LOCK macros to protect data structures. +`LOCK` and `TRY_LOCK` macros to protect data structures. -Deadlocks due to inconsistent lock ordering (thread 1 locks cs_main -and then cs_wallet, while thread 2 locks them in the opposite order: -result, deadlock as each waits for the other to release its lock) are -a problem. Compile with -DDEBUG_LOCKORDER to get lock order -inconsistencies reported in the debug.log file. +Deadlocks due to inconsistent lock ordering (thread 1 locks `cs_main` and then +`cs_wallet`, while thread 2 locks them in the opposite order: result, deadlock +as each waits for the other to release its lock) are a problem. Compile with +`-DDEBUG_LOCKORDER` (or use `--enable-debug`) to get lock order inconsistencies +reported in the debug.log file. Re-architecting the core code so there are better-defined interfaces between the various components is a goal, with any necessary locking -done by the components (e.g. see the self-contained CKeyStore class -and its cs_KeyStore lock for example). +done by the components (e.g. see the self-contained `CBasicKeyStore` class +and its `cs_KeyStore` lock for example). Threads ------- @@ -593,7 +632,10 @@ its upstream repository. Current subtrees include: - src/leveldb - - Upstream at https://github.com/google/leveldb ; Maintained by Google, but open important PRs to Core to avoid delay + - Upstream at https://github.com/google/leveldb ; Maintained by Google, but + open important PRs to Core to avoid delay. + - **Note**: Follow the instructions in [Upgrading LevelDB](#upgrading-leveldb) when + merging upstream changes to the leveldb subtree. - src/libsecp256k1 - Upstream at https://github.com/bitcoin-core/secp256k1/ ; actively maintaned by Core contributors. @@ -604,6 +646,52 @@ Current subtrees include: - src/univalue - Upstream at https://github.com/jgarzik/univalue ; report important PRs to Core to avoid delay. +Upgrading LevelDB +--------------------- + +Extra care must be taken when upgrading LevelDB. This section explains issues +you must be aware of. + +### File Descriptor Counts + +In most configurations we use the default LevelDB value for `max_open_files`, +which is 1000 at the time of this writing. If LevelDB actually uses this many +file descriptors it will cause problems with Bitcoin's `select()` loop, because +it may cause new sockets to be created where the fd value is >= 1024. For this +reason, on 64-bit Unix systems we rely on an internal LevelDB optimization that +uses `mmap()` + `close()` to open table files without actually retaining +references to the table file descriptors. If you are upgrading LevelDB, you must +sanity check the changes to make sure that this assumption remains valid. + +In addition to reviewing the upstream changes in `env_posix.cc`, you can use `lsof` to +check this. For example, on Linux this command will show open `.ldb` file counts: + +```bash +$ lsof -p $(pidof bitcoind) |\ + awk 'BEGIN { fd=0; mem=0; } /ldb$/ { if ($4 == "mem") mem++; else fd++ } END { printf "mem = %s, fd = %s\n", mem, fd}' +mem = 119, fd = 0 +``` + +The `mem` value shows how many files are mmap'ed, and the `fd` value shows you +many file descriptors these files are using. You should check that `fd` is a +small number (usually 0 on 64-bit hosts). + +See the notes in the `SetMaxOpenFiles()` function in `dbwrapper.cc` for more +details. + +### Consensus Compatibility + +It is possible for LevelDB changes to inadvertently change consensus +compatibility between nodes. This happened in Bitcoin 0.8 (when LevelDB was +first introduced). When upgrading LevelDB you should review the upstream changes +to check for issues affecting consensus compatibility. + +For example, if LevelDB had a bug that accidentally prevented a key from being +returned in an edge case, and that bug was fixed upstream, the bug "fix" would +be an incompatible consensus change. In this situation the correct behavior +would be to revert the upstream fix before applying the updates to Bitcoin's +copy of LevelDB. In general you should be wary of any upstream changes affecting +what data is returned from LevelDB queries. Git and GitHub tips --------------------- diff --git a/doc/release-notes.md b/doc/release-notes.md index 740afd5137..48ee364c18 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -117,6 +117,11 @@ deprecated in V0.15.1, and has now been removed. Miners should use the `-blockmaxweight` option if they want to limit the weight of their blocks' weights. +Python Support +-------------- + +Support for Python 2 has been discontinued for all test files and tools. + Credits ======= diff --git a/share/qt/extract_strings_qt.py b/share/qt/extract_strings_qt.py index fbf3ea3436..e8f0820ca8 100755 --- a/share/qt/extract_strings_qt.py +++ b/share/qt/extract_strings_qt.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2012-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -6,7 +6,6 @@ Extract _("...") strings for translation and convert to Qt stringdefs so that they can be picked up by Qt linguist. ''' -from __future__ import division,print_function,unicode_literals from subprocess import Popen, PIPE import operator import os diff --git a/share/rpcauth/rpcauth.py b/share/rpcauth/rpcauth.py index 7baad85582..d6580281d4 100755 --- a/share/rpcauth/rpcauth.py +++ b/share/rpcauth/rpcauth.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2015-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/Makefile.am b/src/Makefile.am index 3ff8958960..72e5cdb95d 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -162,6 +162,7 @@ BITCOIN_CORE_H = \ validation.h \ validationinterface.h \ versionbits.h \ + walletinitinterface.h \ wallet/coincontrol.h \ wallet/crypter.h \ wallet/db.h \ diff --git a/src/bench/verify_script.cpp b/src/bench/verify_script.cpp index 29dedeef0b..705fa368a5 100644 --- a/src/bench/verify_script.cpp +++ b/src/bench/verify_script.cpp @@ -75,7 +75,7 @@ static void VerifyScriptBench(benchmark::State& state) CMutableTransaction txSpend = BuildSpendingTransaction(scriptSig, txCredit); CScriptWitness& witness = txSpend.vin[0].scriptWitness; witness.stack.emplace_back(); - key.Sign(SignatureHash(witScriptPubkey, txSpend, 0, SIGHASH_ALL, txCredit.vout[0].nValue, SIGVERSION_WITNESS_V0), witness.stack.back(), 0); + key.Sign(SignatureHash(witScriptPubkey, txSpend, 0, SIGHASH_ALL, txCredit.vout[0].nValue, SigVersion::WITNESS_V0), witness.stack.back(), 0); witness.stack.back().push_back(static_cast<unsigned char>(SIGHASH_ALL)); witness.stack.push_back(ToByteVector(pubkey)); diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp index d3eb60725f..df7fad89c2 100644 --- a/src/bitcoind.cpp +++ b/src/bitcoind.cpp @@ -18,6 +18,10 @@ #include <httpserver.h> #include <httprpc.h> #include <utilstrencodings.h> +#if ENABLE_WALLET +#include <wallet/init.h> +#endif +#include <walletinitinterface.h> #include <boost/thread.hpp> @@ -59,6 +63,12 @@ bool AppInit(int argc, char* argv[]) { bool fRet = false; +#if ENABLE_WALLET + g_wallet_init_interface.reset(new WalletInit); +#else + g_wallet_init_interface.reset(new DummyWalletInit); +#endif + // // Parameters // @@ -79,7 +89,7 @@ bool AppInit(int argc, char* argv[]) strUsage += "\n" + _("Usage:") + "\n" + " bitcoind [options] " + strprintf(_("Start %s Daemon"), _(PACKAGE_NAME)) + "\n"; - strUsage += "\n" + HelpMessage(HMM_BITCOIND); + strUsage += "\n" + HelpMessage(HelpMessageMode::BITCOIND); } fprintf(stdout, "%s", strUsage.c_str()); diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp index fb0d4215a2..752f985bc0 100644 --- a/src/dbwrapper.cpp +++ b/src/dbwrapper.cpp @@ -71,6 +71,31 @@ public: } }; +static void SetMaxOpenFiles(leveldb::Options *options) { + // On most platforms the default setting of max_open_files (which is 1000) + // is optimal. On Windows using a large file count is OK because the handles + // do not interfere with select() loops. On 64-bit Unix hosts this value is + // also OK, because up to that amount LevelDB will use an mmap + // implementation that does not use extra file descriptors (the fds are + // closed after being mmaped). + // + // Increasing the value beyond the default is dangerous because LevelDB will + // fall back to a non-mmap implementation when the file count is too large. + // On 32-bit Unix host we should decrease the value because the handles use + // up real fds, and we want to avoid fd exhaustion issues. + // + // See PR #12495 for further discussion. + + int default_open_files = options->max_open_files; +#ifndef WIN32 + if (sizeof(void*) < 8) { + options->max_open_files = 64; + } +#endif + LogPrint(BCLog::LEVELDB, "LevelDB using max_open_files=%d (default=%d)\n", + options->max_open_files, default_open_files); +} + static leveldb::Options GetOptions(size_t nCacheSize) { leveldb::Options options; @@ -78,13 +103,13 @@ static leveldb::Options GetOptions(size_t nCacheSize) options.write_buffer_size = nCacheSize / 4; // up to two write buffers may be held in memory simultaneously options.filter_policy = leveldb::NewBloomFilterPolicy(10); options.compression = leveldb::kNoCompression; - options.max_open_files = 64; options.info_log = new CBitcoinLevelDBLogger(); if (leveldb::kMajorVersion > 1 || (leveldb::kMajorVersion == 1 && leveldb::kMinorVersion >= 16)) { // LevelDB versions before 1.16 consider short writes to be corruption. Only trigger error // on corruption in later versions. options.paranoid_checks = true; } + SetMaxOpenFiles(&options); return options; } @@ -159,12 +184,12 @@ bool CDBWrapper::WriteBatch(CDBBatch& batch, bool fSync) const bool log_memory = LogAcceptCategory(BCLog::LEVELDB); double mem_before = 0; if (log_memory) { - mem_before = DynamicMemoryUsage() / 1024 / 1024; + mem_before = DynamicMemoryUsage() / 1024.0 / 1024; } leveldb::Status status = pdb->Write(fSync ? syncoptions : writeoptions, &batch.batch); dbwrapper_private::HandleError(status); if (log_memory) { - double mem_after = DynamicMemoryUsage() / 1024 / 1024; + double mem_after = DynamicMemoryUsage() / 1024.0 / 1024; LogPrint(BCLog::LEVELDB, "WriteBatch memory usage: db=%s, before=%.1fMiB, after=%.1fMiB\n", m_name, mem_before, mem_after); } diff --git a/src/httprpc.cpp b/src/httprpc.cpp index ed0a030701..0a70619ba6 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -158,8 +158,9 @@ static bool HTTPReq_JSONRPC(HTTPRequest* req, const std::string &) } JSONRPCRequest jreq; + jreq.peerAddr = req->GetPeer().ToString(); if (!RPCAuthorized(authHeader.second, jreq.authUser)) { - LogPrintf("ThreadRPCServer incorrect password attempt from %s\n", req->GetPeer().ToString()); + LogPrintf("ThreadRPCServer incorrect password attempt from %s\n", jreq.peerAddr); /* Deter brute-forcing If this results in a DoS the user really diff --git a/src/init.cpp b/src/init.cpp index e5443eef0e..64b513d63a 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -43,10 +43,8 @@ #include <util.h> #include <utilmoneystr.h> #include <validationinterface.h> -#ifdef ENABLE_WALLET -#include <wallet/init.h> -#endif #include <warnings.h> +#include <walletinitinterface.h> #include <stdint.h> #include <stdio.h> #include <memory> @@ -74,6 +72,7 @@ static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false; std::unique_ptr<CConnman> g_connman; std::unique_ptr<PeerLogicValidation> peerLogic; +std::unique_ptr<WalletInitInterface> g_wallet_init_interface; #if ENABLE_ZMQ static CZMQNotificationInterface* pzmqNotificationInterface = nullptr; @@ -189,9 +188,7 @@ void Shutdown() StopREST(); StopRPC(); StopHTTPServer(); -#ifdef ENABLE_WALLET - FlushWallets(); -#endif + g_wallet_init_interface->Flush(); StopMapPort(); // Because these depend on each-other, we make sure that neither can be @@ -249,9 +246,7 @@ void Shutdown() pcoinsdbview.reset(); pblocktree.reset(); } -#ifdef ENABLE_WALLET - StopWallets(); -#endif + g_wallet_init_interface->Stop(); #if ENABLE_ZMQ if (pzmqNotificationInterface) { @@ -271,9 +266,8 @@ void Shutdown() UnregisterAllValidationInterfaces(); GetMainSignals().UnregisterBackgroundSignalScheduler(); GetMainSignals().UnregisterWithMempoolSignals(mempool); -#ifdef ENABLE_WALLET - CloseWallets(); -#endif + g_wallet_init_interface->Close(); + g_wallet_init_interface.reset(); globalVerifyHandle.reset(); ECC_Stop(); LogPrintf("%s: done\n", __func__); @@ -333,12 +327,13 @@ std::string HelpMessage(HelpMessageMode mode) strUsage += HelpMessageOpt("-version", _("Print version and exit")); strUsage += HelpMessageOpt("-alertnotify=<cmd>", _("Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)")); strUsage +=HelpMessageOpt("-assumevalid=<hex>", strprintf(_("If this block is in the chain assume that it and its ancestors are valid and potentially skip their script verification (0 to verify all, default: %s, testnet: %s)"), defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(), testnetChainParams->GetConsensus().defaultAssumeValid.GetHex())); + strUsage += HelpMessageOpt("-blocksdir=<dir>", _("Specify blocks directory (default: <datadir>/blocks)")); strUsage += HelpMessageOpt("-blocknotify=<cmd>", _("Execute command when the best block changes (%s in cmd is replaced by block hash)")); strUsage += HelpMessageOpt("-blockreconstructionextratxn=<n>", strprintf(_("Extra transactions to keep in memory for compact block reconstructions (default: %u)"), DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN)); if (showDebug) strUsage += HelpMessageOpt("-blocksonly", strprintf(_("Whether to operate in a blocks only mode (default: %u)"), DEFAULT_BLOCKSONLY)); strUsage += HelpMessageOpt("-conf=<file>", strprintf(_("Specify configuration file. Relative paths will be prefixed by datadir location. (default: %s)"), BITCOIN_CONF_FILENAME)); - if (mode == HMM_BITCOIND) + if (mode == HelpMessageMode::BITCOIND) { #if HAVE_DECL_DAEMON strUsage += HelpMessageOpt("-daemon", _("Run in the background as a daemon and accept commands")); @@ -415,9 +410,7 @@ std::string HelpMessage(HelpMessageMode mode) strUsage += HelpMessageOpt("-whitelist=<IP address or network>", _("Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) or CIDR notated network (e.g. 1.2.3.0/24). Can be specified multiple times.") + " " + _("Whitelisted peers cannot be DoS banned and their transactions are always relayed, even if they are already in the mempool, useful e.g. for a gateway")); -#ifdef ENABLE_WALLET - strUsage += GetWalletHelpString(showDebug); -#endif + strUsage += g_wallet_init_interface->GetHelpString(showDebug); #if ENABLE_ZMQ strUsage += HelpMessageGroup(_("ZeroMQ notification options:")); @@ -432,7 +425,7 @@ std::string HelpMessage(HelpMessageMode mode) { strUsage += HelpMessageOpt("-checkblocks=<n>", strprintf(_("How many blocks to check at startup (default: %u, 0 = all)"), DEFAULT_CHECKBLOCKS)); strUsage += HelpMessageOpt("-checklevel=<n>", strprintf(_("How thorough the block verification of -checkblocks is (0-4, default: %u)"), DEFAULT_CHECKLEVEL)); - strUsage += HelpMessageOpt("-checkblockindex", strprintf("Do a full consistency check for mapBlockIndex, setBlockIndexCandidates, chainActive and mapBlocksUnlinked occasionally. Also sets -checkmempool (default: %u)", defaultChainParams->DefaultConsistencyChecks())); + strUsage += HelpMessageOpt("-checkblockindex", strprintf("Do a full consistency check for mapBlockIndex, setBlockIndexCandidates, chainActive and mapBlocksUnlinked occasionally. (default: %u)", defaultChainParams->DefaultConsistencyChecks())); strUsage += HelpMessageOpt("-checkmempool=<n>", strprintf("Run checks every <n> transactions (default: %u)", defaultChainParams->DefaultConsistencyChecks())); strUsage += HelpMessageOpt("-checkpoints", strprintf("Disable expensive verification for known chain history (default: %u)", DEFAULT_CHECKPOINTS_ENABLED)); strUsage += HelpMessageOpt("-disablesafemode", strprintf("Disable safemode, override a real safe mode event (default: %u)", DEFAULT_DISABLE_SAFEMODE)); @@ -594,7 +587,7 @@ void CleanupBlockRevFiles() // Remove the rev files immediately and insert the blk file paths into an // ordered map keyed by block file index. LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n"); - fs::path blocksdir = GetDataDir() / "blocks"; + fs::path blocksdir = GetBlocksDir(); for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) { if (fs::is_regular_file(*it) && it->path().filename().string().length() == 12 && @@ -897,6 +890,10 @@ bool AppInitParameterInteraction() // also see: InitParameterInteraction() + if (!fs::is_directory(GetBlocksDir(false))) { + return InitError(strprintf(_("Specified blocks directory \"%s\" does not exist.\n"), gArgs.GetArg("-blocksdir", "").c_str())); + } + // if using block pruning, then disallow txindex if (gArgs.GetArg("-prune", 0)) { if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) @@ -1082,10 +1079,7 @@ bool AppInitParameterInteraction() return InitError(strprintf("acceptnonstdtxn is not currently supported for %s chain", chainparams.NetworkIDString())); nBytesPerSigOp = gArgs.GetArg("-bytespersigop", nBytesPerSigOp); -#ifdef ENABLE_WALLET - if (!WalletParameterInteraction()) - return false; -#endif + if (!g_wallet_init_interface->ParameterInteraction()) return false; fIsBareMultisigStd = gArgs.GetBoolArg("-permitbaremultisig", DEFAULT_PERMIT_BAREMULTISIG); fAcceptDatacarrier = gArgs.GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER); @@ -1251,9 +1245,7 @@ bool AppInitMain() * available in the GUI RPC console even if external calls are disabled. */ RegisterAllCoreRPCCommands(tableRPC); -#ifdef ENABLE_WALLET - RegisterWalletRPC(tableRPC); -#endif + g_wallet_init_interface->RegisterRPC(tableRPC); /* Start the RPC server already. It will be started in "warmup" mode * and not really process calls already (but it will signify connections @@ -1270,10 +1262,8 @@ bool AppInitMain() int64_t nStart; // ********************************************************* Step 5: verify wallet database integrity -#ifdef ENABLE_WALLET - if (!VerifyWallets()) - return false; -#endif + if (!g_wallet_init_interface->Verify()) return false; + // ********************************************************* Step 6: network initialization // Note that we absolutely cannot open any actual connections // until the very end ("start node") as the UTXO/block state @@ -1591,12 +1581,7 @@ bool AppInitMain() fFeeEstimatesInitialized = true; // ********************************************************* Step 8: load wallet -#ifdef ENABLE_WALLET - if (!OpenWallets()) - return false; -#else - LogPrintf("No wallet support compiled in!\n"); -#endif + if (!g_wallet_init_interface->Open()) return false; // ********************************************************* Step 9: data directory maintenance @@ -1622,7 +1607,7 @@ bool AppInitMain() // ********************************************************* Step 10: import blocks - if (!CheckDiskSpace()) + if (!CheckDiskSpace() && !CheckDiskSpace(0, true)) return false; // Either install a handler to notify us when genesis activates, or set fHaveGenesis directly. @@ -1742,9 +1727,7 @@ bool AppInitMain() SetRPCWarmupFinished(); uiInterface.InitMessage(_("Done loading")); -#ifdef ENABLE_WALLET - StartWallets(scheduler); -#endif + g_wallet_init_interface->Start(scheduler); return true; } diff --git a/src/init.h b/src/init.h index 33f97a55a5..c93a210154 100644 --- a/src/init.h +++ b/src/init.h @@ -6,11 +6,15 @@ #ifndef BITCOIN_INIT_H #define BITCOIN_INIT_H +#include <memory> #include <string> class CScheduler; class CWallet; +class WalletInitInterface; +extern std::unique_ptr<WalletInitInterface> g_wallet_init_interface; + namespace boost { class thread_group; @@ -57,9 +61,9 @@ bool AppInitLockDataDirectory(); bool AppInitMain(); /** The help message mode determines what help message to show */ -enum HelpMessageMode { - HMM_BITCOIND, - HMM_BITCOIN_QT +enum class HelpMessageMode { + BITCOIND, + BITCOIN_QT }; /** Help for options shared between UI and daemon (for -help) */ diff --git a/src/keystore.cpp b/src/keystore.cpp index fab1b81c9a..dfdfa5ea9f 100644 --- a/src/keystore.cpp +++ b/src/keystore.cpp @@ -7,10 +7,6 @@ #include <util.h> -bool CKeyStore::AddKey(const CKey &key) { - return AddKeyPubKey(key, key.GetPubKey()); -} - void CBasicKeyStore::ImplicitlyLearnRelatedKeyScripts(const CPubKey& pubkey) { AssertLockHeld(cs_KeyStore); diff --git a/src/keystore.h b/src/keystore.h index ffd3238fd6..fa912cb195 100644 --- a/src/keystore.h +++ b/src/keystore.h @@ -9,35 +9,27 @@ #include <key.h> #include <pubkey.h> #include <script/script.h> +#include <script/sign.h> #include <script/standard.h> #include <sync.h> #include <boost/signals2/signal.hpp> /** A virtual base class for key stores */ -class CKeyStore +class CKeyStore : public SigningProvider { -protected: - mutable CCriticalSection cs_KeyStore; - public: - virtual ~CKeyStore() {} - //! Add a key to the store. virtual bool AddKeyPubKey(const CKey &key, const CPubKey &pubkey) =0; - virtual bool AddKey(const CKey &key); //! Check whether a key corresponding to a given address is present in the store. virtual bool HaveKey(const CKeyID &address) const =0; - virtual bool GetKey(const CKeyID &address, CKey& keyOut) const =0; virtual std::set<CKeyID> GetKeys() const =0; - virtual bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const =0; //! Support for BIP 0013 : see https://github.com/bitcoin/bips/blob/master/bip-0013.mediawiki virtual bool AddCScript(const CScript& redeemScript) =0; virtual bool HaveCScript(const CScriptID &hash) const =0; virtual std::set<CScriptID> GetCScripts() const =0; - virtual bool GetCScript(const CScriptID &hash, CScript& redeemScriptOut) const =0; //! Support for Watch-only addresses virtual bool AddWatchOnly(const CScript &dest) =0; @@ -55,6 +47,8 @@ typedef std::set<CScript> WatchOnlySet; class CBasicKeyStore : public CKeyStore { protected: + mutable CCriticalSection cs_KeyStore; + KeyMap mapKeys; WatchKeyMap mapWatchKeys; ScriptMap mapScripts; @@ -64,6 +58,7 @@ protected: public: bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override; + bool AddKey(const CKey &key) { return AddKeyPubKey(key, key.GetPubKey()); } bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const override; bool HaveKey(const CKeyID &address) const override; std::set<CKeyID> GetKeys() const override; diff --git a/src/policy/fees.h b/src/policy/fees.h index 5f69e989c1..7f80fc92c2 100644 --- a/src/policy/fees.h +++ b/src/policy/fees.h @@ -68,7 +68,7 @@ class TxConfirmStats; /* Identifier for each of the 3 different TxConfirmStats which will track * history over different time horizons. */ -enum FeeEstimateHorizon { +enum class FeeEstimateHorizon { SHORT_HALFLIFE = 0, MED_HALFLIFE = 1, LONG_HALFLIFE = 2 diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp index db1ad4f26d..c3f65fb2ab 100644 --- a/src/policy/policy.cpp +++ b/src/policy/policy.cpp @@ -179,7 +179,7 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) { std::vector<std::vector<unsigned char> > stack; // convert the scriptSig into a stack, so we can inspect the redeemScript - if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker(), SIGVERSION_BASE)) + if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker(), SigVersion::BASE)) return false; if (stack.empty()) return false; @@ -215,7 +215,7 @@ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) // If the scriptPubKey is P2SH, we try to extract the redeemScript casually by converting the scriptSig // into a stack. We do not check IsPushOnly nor compare the hash as these will be done later anyway. // If the check fails at this stage, we know that this txid must be a bad one. - if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker(), SIGVERSION_BASE)) + if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker(), SigVersion::BASE)) return false; if (stack.empty()) return false; diff --git a/src/policy/rbf.cpp b/src/policy/rbf.cpp index c5a1741608..81b2a7fadc 100644 --- a/src/policy/rbf.cpp +++ b/src/policy/rbf.cpp @@ -22,13 +22,13 @@ RBFTransactionState IsRBFOptIn(const CTransaction &tx, CTxMemPool &pool) // First check the transaction itself. if (SignalsOptInRBF(tx)) { - return RBF_TRANSACTIONSTATE_REPLACEABLE_BIP125; + return RBFTransactionState::REPLACEABLE_BIP125; } // If this transaction is not in our mempool, then we can't be sure // we will know about all its inputs. if (!pool.exists(tx.GetHash())) { - return RBF_TRANSACTIONSTATE_UNKNOWN; + return RBFTransactionState::UNKNOWN; } // If all the inputs have nSequence >= maxint-1, it still might be @@ -40,8 +40,8 @@ RBFTransactionState IsRBFOptIn(const CTransaction &tx, CTxMemPool &pool) for (CTxMemPool::txiter it : setAncestors) { if (SignalsOptInRBF(it->GetTx())) { - return RBF_TRANSACTIONSTATE_REPLACEABLE_BIP125; + return RBFTransactionState::REPLACEABLE_BIP125; } } - return RBF_TRANSACTIONSTATE_FINAL; + return RBFTransactionState::FINAL; } diff --git a/src/policy/rbf.h b/src/policy/rbf.h index 72f51b0f03..b10532addf 100644 --- a/src/policy/rbf.h +++ b/src/policy/rbf.h @@ -9,10 +9,10 @@ static const uint32_t MAX_BIP125_RBF_SEQUENCE = 0xfffffffd; -enum RBFTransactionState { - RBF_TRANSACTIONSTATE_UNKNOWN, - RBF_TRANSACTIONSTATE_REPLACEABLE_BIP125, - RBF_TRANSACTIONSTATE_FINAL +enum class RBFTransactionState { + UNKNOWN, + REPLACEABLE_BIP125, + FINAL }; // Check whether the sequence numbers on this transaction are signaling diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index df1be35c8d..f853c04617 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -34,8 +34,10 @@ #include <warnings.h> #ifdef ENABLE_WALLET +#include <wallet/init.h> #include <wallet/wallet.h> #endif +#include <walletinitinterface.h> #include <stdint.h> @@ -623,7 +625,7 @@ int main(int argc, char *argv[]) if (!Intro::pickDataDirectory()) return EXIT_SUCCESS; - /// 6. Determine availability of data directory and parse bitcoin.conf + /// 6. Determine availability of data and blocks directory and parse bitcoin.conf /// - Do not call GetDataDir(true) before this step finishes if (!fs::is_directory(GetDataDir(false))) { @@ -677,6 +679,11 @@ int main(int argc, char *argv[]) // Start up the payment server early, too, so impatient users that click on // bitcoin: links repeatedly have their payment requests routed to this process: app.createPaymentServer(); + + // Hook up the wallet init interface + g_wallet_init_interface.reset(new WalletInit); +#else + g_wallet_init_interface.reset(new DummyWalletInit); #endif /// 9. Main GUI initialization @@ -696,7 +703,7 @@ int main(int argc, char *argv[]) // Allow parameter interaction before we create the options model app.parameterSetup(); // Load GUI settings from QSettings - app.createOptionsModel(gArgs.IsArgSet("-resetguisettings")); + app.createOptionsModel(gArgs.GetBoolArg("-resetguisettings", false)); // Subscribe to global signals from core uiInterface.InitMessage.connect(InitMessage); diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index 4707fcceb9..e4207fce99 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -799,7 +799,7 @@ void BitcoinGUI::setNumBlocks(int count, const QDateTime& blockDate, double nVer // Acquire current block source enum BlockSource blockSource = clientModel->getBlockSource(); switch (blockSource) { - case BLOCK_SOURCE_NETWORK: + case BlockSource::NETWORK: if (header) { updateHeadersSyncProgressLabel(); return; @@ -807,17 +807,17 @@ void BitcoinGUI::setNumBlocks(int count, const QDateTime& blockDate, double nVer progressBarLabel->setText(tr("Synchronizing with network...")); updateHeadersSyncProgressLabel(); break; - case BLOCK_SOURCE_DISK: + case BlockSource::DISK: if (header) { progressBarLabel->setText(tr("Indexing blocks on disk...")); } else { progressBarLabel->setText(tr("Processing blocks on disk...")); } break; - case BLOCK_SOURCE_REINDEX: + case BlockSource::REINDEX: progressBarLabel->setText(tr("Reindexing blocks on disk...")); break; - case BLOCK_SOURCE_NONE: + case BlockSource::NONE: if (header) { return; } diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp index eaf2896bc3..40661d9ec3 100644 --- a/src/qt/clientmodel.cpp +++ b/src/qt/clientmodel.cpp @@ -177,13 +177,13 @@ bool ClientModel::inInitialBlockDownload() const enum BlockSource ClientModel::getBlockSource() const { if (fReindex) - return BLOCK_SOURCE_REINDEX; + return BlockSource::REINDEX; else if (fImporting) - return BLOCK_SOURCE_DISK; + return BlockSource::DISK; else if (getNumConnections() > 0) - return BLOCK_SOURCE_NETWORK; + return BlockSource::NETWORK; - return BLOCK_SOURCE_NONE; + return BlockSource::NONE; } void ClientModel::setNetworkActive(bool active) diff --git a/src/qt/clientmodel.h b/src/qt/clientmodel.h index 99ec2365a9..1118bc31b3 100644 --- a/src/qt/clientmodel.h +++ b/src/qt/clientmodel.h @@ -20,11 +20,11 @@ QT_BEGIN_NAMESPACE class QTimer; QT_END_NAMESPACE -enum BlockSource { - BLOCK_SOURCE_NONE, - BLOCK_SOURCE_REINDEX, - BLOCK_SOURCE_DISK, - BLOCK_SOURCE_NETWORK +enum class BlockSource { + NONE, + REINDEX, + DISK, + NETWORK }; enum NumConnections { diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp index 16ef27a36a..7b653a99da 100644 --- a/src/qt/guiutil.cpp +++ b/src/qt/guiutil.cpp @@ -137,15 +137,6 @@ void setupAddressWidget(QValidatedLineEdit *widget, QWidget *parent) widget->setCheckValidator(new BitcoinAddressCheckValidator(parent)); } -void setupAmountWidget(QLineEdit *widget, QWidget *parent) -{ - QDoubleValidator *amountValidator = new QDoubleValidator(parent); - amountValidator->setDecimals(8); - amountValidator->setBottom(0.0); - widget->setValidator(amountValidator); - widget->setAlignment(Qt::AlignRight|Qt::AlignVCenter); -} - bool parseBitcoinURI(const QUrl &uri, SendCoinsRecipient *out) { // return if URI is not valid or is no bitcoin: URI diff --git a/src/qt/guiutil.h b/src/qt/guiutil.h index 4b856986da..bbbeaf2c43 100644 --- a/src/qt/guiutil.h +++ b/src/qt/guiutil.h @@ -40,9 +40,8 @@ namespace GUIUtil // Return a monospace font QFont fixedPitchFont(); - // Set up widgets for address and amounts + // Set up widget for address void setupAddressWidget(QValidatedLineEdit *widget, QWidget *parent); - void setupAmountWidget(QLineEdit *widget, QWidget *parent); // Parse "bitcoin:" URI into recipient object, return true on successful parsing bool parseBitcoinURI(const QUrl &uri, SendCoinsRecipient *out); diff --git a/src/qt/utilitydialog.cpp b/src/qt/utilitydialog.cpp index 65e7775f2b..c19e6aae78 100644 --- a/src/qt/utilitydialog.cpp +++ b/src/qt/utilitydialog.cpp @@ -77,7 +77,7 @@ HelpMessageDialog::HelpMessageDialog(QWidget *parent, bool about) : cursor.insertText(header); cursor.insertBlock(); - std::string strUsage = HelpMessage(HMM_BITCOIN_QT); + std::string strUsage = HelpMessage(HelpMessageMode::BITCOIN_QT); const bool showDebug = gArgs.GetBoolArg("-help-debug", false); strUsage += HelpMessageGroup(tr("UI Options:").toStdString()); if (showDebug) { diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp index 555a21e8c5..795302be58 100644 --- a/src/qt/walletmodel.cpp +++ b/src/qt/walletmodel.cpp @@ -748,11 +748,7 @@ int WalletModel::getDefaultConfirmTarget() const QString WalletModel::getWalletName() const { LOCK(wallet->cs_wallet); - QString walletName = QString::fromStdString(wallet->GetName()); - if (walletName.endsWith(".dat")) { - walletName.truncate(walletName.size() - 4); - } - return walletName; + return QString::fromStdString(wallet->GetName()); } bool WalletModel::isMultiwallet() diff --git a/src/rest.cpp b/src/rest.cpp index f47b40343b..5871b554a6 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -24,21 +24,21 @@ static const size_t MAX_GETUTXOS_OUTPOINTS = 15; //allow a max of 15 outpoints to be queried at once -enum RetFormat { - RF_UNDEF, - RF_BINARY, - RF_HEX, - RF_JSON, +enum class RetFormat { + UNDEF, + BINARY, + HEX, + JSON, }; static const struct { enum RetFormat rf; const char* name; } rf_names[] = { - {RF_UNDEF, ""}, - {RF_BINARY, "bin"}, - {RF_HEX, "hex"}, - {RF_JSON, "json"}, + {RetFormat::UNDEF, ""}, + {RetFormat::BINARY, "bin"}, + {RetFormat::HEX, "hex"}, + {RetFormat::JSON, "json"}, }; struct CCoin { @@ -162,20 +162,20 @@ static bool rest_headers(HTTPRequest* req, } switch (rf) { - case RF_BINARY: { + case RetFormat::BINARY: { std::string binaryHeader = ssHeader.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryHeader); return true; } - case RF_HEX: { + case RetFormat::HEX: { std::string strHex = HexStr(ssHeader.begin(), ssHeader.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } - case RF_JSON: { + case RetFormat::JSON: { UniValue jsonHeaders(UniValue::VARR); { LOCK(cs_main); @@ -227,21 +227,21 @@ static bool rest_block(HTTPRequest* req, ssBlock << block; switch (rf) { - case RF_BINARY: { + case RetFormat::BINARY: { std::string binaryBlock = ssBlock.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryBlock); return true; } - case RF_HEX: { + case RetFormat::HEX: { std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } - case RF_JSON: { + case RetFormat::JSON: { UniValue objBlock; { LOCK(cs_main); @@ -280,7 +280,7 @@ static bool rest_chaininfo(HTTPRequest* req, const std::string& strURIPart) const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { - case RF_JSON: { + case RetFormat::JSON: { JSONRPCRequest jsonRequest; jsonRequest.params = UniValue(UniValue::VARR); UniValue chainInfoObject = getblockchaininfo(jsonRequest); @@ -303,7 +303,7 @@ static bool rest_mempool_info(HTTPRequest* req, const std::string& strURIPart) const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { - case RF_JSON: { + case RetFormat::JSON: { UniValue mempoolInfoObject = mempoolInfoToJSON(); std::string strJSON = mempoolInfoObject.write() + "\n"; @@ -325,7 +325,7 @@ static bool rest_mempool_contents(HTTPRequest* req, const std::string& strURIPar const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { - case RF_JSON: { + case RetFormat::JSON: { UniValue mempoolObject = mempoolToJSON(true); std::string strJSON = mempoolObject.write() + "\n"; @@ -359,21 +359,21 @@ static bool rest_tx(HTTPRequest* req, const std::string& strURIPart) ssTx << tx; switch (rf) { - case RF_BINARY: { + case RetFormat::BINARY: { std::string binaryTx = ssTx.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryTx); return true; } - case RF_HEX: { + case RetFormat::HEX: { std::string strHex = HexStr(ssTx.begin(), ssTx.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } - case RF_JSON: { + case RetFormat::JSON: { UniValue objTx(UniValue::VOBJ); TxToUniv(*tx, hashBlock, objTx); std::string strJSON = objTx.write() + "\n"; @@ -440,13 +440,13 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart) } switch (rf) { - case RF_HEX: { + case RetFormat::HEX: { // convert hex to bin, continue then with bin part std::vector<unsigned char> strRequestV = ParseHex(strRequestMutable); strRequestMutable.assign(strRequestV.begin(), strRequestV.end()); } - case RF_BINARY: { + case RetFormat::BINARY: { try { //deserialize only if user sent a request if (strRequestMutable.size() > 0) @@ -466,7 +466,7 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart) break; } - case RF_JSON: { + case RetFormat::JSON: { if (!fInputParsed) return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request"); break; @@ -487,33 +487,35 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart) std::vector<bool> hits; bitmap.resize((vOutPoints.size() + 7) / 8); { - LOCK2(cs_main, mempool.cs); - - CCoinsView viewDummy; - CCoinsViewCache view(&viewDummy); - - CCoinsViewCache& viewChain = *pcoinsTip; - CCoinsViewMemPool viewMempool(&viewChain, mempool); - - if (fCheckMemPool) - view.SetBackend(viewMempool); // switch cache backend to db+mempool in case user likes to query mempool - - for (size_t i = 0; i < vOutPoints.size(); i++) { - bool hit = false; - Coin coin; - if (view.GetCoin(vOutPoints[i], coin) && !mempool.isSpent(vOutPoints[i])) { - hit = true; - outs.emplace_back(std::move(coin)); + auto process_utxos = [&vOutPoints, &outs, &hits](const CCoinsView& view, const CTxMemPool& mempool) { + for (const COutPoint& vOutPoint : vOutPoints) { + Coin coin; + bool hit = !mempool.isSpent(vOutPoint) && view.GetCoin(vOutPoint, coin); + hits.push_back(hit); + if (hit) outs.emplace_back(std::move(coin)); } + }; + + if (fCheckMemPool) { + // use db+mempool as cache backend in case user likes to query mempool + LOCK2(cs_main, mempool.cs); + CCoinsViewCache& viewChain = *pcoinsTip; + CCoinsViewMemPool viewMempool(&viewChain, mempool); + process_utxos(viewMempool, mempool); + } else { + LOCK(cs_main); // no need to lock mempool! + process_utxos(*pcoinsTip, CTxMemPool()); + } - hits.push_back(hit); + for (size_t i = 0; i < hits.size(); ++i) { + const bool hit = hits[i]; bitmapStringRepresentation.append(hit ? "1" : "0"); // form a binary string representation (human-readable for json output) bitmap[i / 8] |= ((uint8_t)hit) << (i % 8); } } switch (rf) { - case RF_BINARY: { + case RetFormat::BINARY: { // serialize data // use exact same output as mentioned in Bip64 CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION); @@ -525,7 +527,7 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart) return true; } - case RF_HEX: { + case RetFormat::HEX: { CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION); ssGetUTXOResponse << chainActive.Height() << chainActive.Tip()->GetBlockHash() << bitmap << outs; std::string strHex = HexStr(ssGetUTXOResponse.begin(), ssGetUTXOResponse.end()) + "\n"; @@ -535,7 +537,7 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart) return true; } - case RF_JSON: { + case RetFormat::JSON: { UniValue objGetUTXOResponse(UniValue::VOBJ); // pack in some essentials diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 169caddc59..e15dad2f0b 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1120,20 +1120,20 @@ static UniValue BIP9SoftForkDesc(const Consensus::Params& consensusParams, Conse UniValue rv(UniValue::VOBJ); const ThresholdState thresholdState = VersionBitsTipState(consensusParams, id); switch (thresholdState) { - case THRESHOLD_DEFINED: rv.pushKV("status", "defined"); break; - case THRESHOLD_STARTED: rv.pushKV("status", "started"); break; - case THRESHOLD_LOCKED_IN: rv.pushKV("status", "locked_in"); break; - case THRESHOLD_ACTIVE: rv.pushKV("status", "active"); break; - case THRESHOLD_FAILED: rv.pushKV("status", "failed"); break; + case ThresholdState::DEFINED: rv.pushKV("status", "defined"); break; + case ThresholdState::STARTED: rv.pushKV("status", "started"); break; + case ThresholdState::LOCKED_IN: rv.pushKV("status", "locked_in"); break; + case ThresholdState::ACTIVE: rv.pushKV("status", "active"); break; + case ThresholdState::FAILED: rv.pushKV("status", "failed"); break; } - if (THRESHOLD_STARTED == thresholdState) + if (ThresholdState::STARTED == thresholdState) { rv.pushKV("bit", consensusParams.vDeployments[id].bit); } rv.pushKV("startTime", consensusParams.vDeployments[id].nStartTime); rv.pushKV("timeout", consensusParams.vDeployments[id].nTimeout); rv.pushKV("since", VersionBitsTipStateSinceHeight(consensusParams, id)); - if (THRESHOLD_STARTED == thresholdState) + if (ThresholdState::STARTED == thresholdState) { UniValue statsUV(UniValue::VOBJ); BIP9Stats statsStruct = VersionBitsTipStatistics(consensusParams, id); diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 0537628763..06882c0dfd 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -532,7 +532,7 @@ UniValue getblocktemplate(const JSONRPCRequest& request) pblock->nNonce = 0; // NOTE: If at some point we support pre-segwit miners post-segwit-activation, this needs to take segwit support into consideration - const bool fPreSegWit = (THRESHOLD_ACTIVE != VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT, versionbitscache)); + const bool fPreSegWit = (ThresholdState::ACTIVE != VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT, versionbitscache)); UniValue aCaps(UniValue::VARR); aCaps.push_back("proposal"); @@ -593,15 +593,15 @@ UniValue getblocktemplate(const JSONRPCRequest& request) Consensus::DeploymentPos pos = Consensus::DeploymentPos(j); ThresholdState state = VersionBitsState(pindexPrev, consensusParams, pos, versionbitscache); switch (state) { - case THRESHOLD_DEFINED: - case THRESHOLD_FAILED: + case ThresholdState::DEFINED: + case ThresholdState::FAILED: // Not exposed to GBT at all break; - case THRESHOLD_LOCKED_IN: + case ThresholdState::LOCKED_IN: // Ensure bit is set in block version pblock->nVersion |= VersionBitsMask(consensusParams, pos); // FALL THROUGH to get vbavailable set... - case THRESHOLD_STARTED: + case ThresholdState::STARTED: { const struct VBDeploymentInfo& vbinfo = VersionBitsDeploymentInfo[pos]; vbavailable.pushKV(gbt_vb_name(pos), consensusParams.vDeployments[pos].bit); @@ -613,7 +613,7 @@ UniValue getblocktemplate(const JSONRPCRequest& request) } break; } - case THRESHOLD_ACTIVE: + case ThresholdState::ACTIVE: { // Add to rules only const struct VBDeploymentInfo& vbinfo = VersionBitsDeploymentInfo[pos]; diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 20bfd3f355..77040f75fd 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -1023,18 +1023,18 @@ UniValue signrawtransaction(const JSONRPCRequest& request) new_request.params.push_back(request.params[1]); new_request.params.push_back(request.params[3]); return signrawtransactionwithkey(new_request); - } - // Otherwise sign with the wallet which does not take a privkeys parameter + } else { #ifdef ENABLE_WALLET - else { + // Otherwise sign with the wallet which does not take a privkeys parameter new_request.params.push_back(request.params[0]); new_request.params.push_back(request.params[1]); new_request.params.push_back(request.params[3]); return signrawtransactionwithwallet(new_request); - } +#else + // If we have made it this far, then wallet is disabled and no private keys were given, so fail here. + throw JSONRPCError(RPC_INVALID_PARAMETER, "No private keys available."); #endif - // If we have made it this far, then wallet is disabled and no private keys were given, so fail here. - throw JSONRPCError(RPC_INVALID_PARAMETER, "No private keys available."); + } } UniValue sendrawtransaction(const JSONRPCRequest& request) diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index 54995ef000..c7c3b1f0d3 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -367,7 +367,11 @@ void JSONRPCRequest::parse(const UniValue& valRequest) if (!valMethod.isStr()) throw JSONRPCError(RPC_INVALID_REQUEST, "Method must be a string"); strMethod = valMethod.get_str(); - LogPrint(BCLog::RPC, "ThreadRPCServer method=%s\n", SanitizeString(strMethod)); + if (fLogIPs) + LogPrint(BCLog::RPC, "ThreadRPCServer method=%s user=%s peeraddr=%s\n", SanitizeString(strMethod), + this->authUser, this->peerAddr); + else + LogPrint(BCLog::RPC, "ThreadRPCServer method=%s user=%s\n", SanitizeString(strMethod), this->authUser); // Parse params UniValue valParams = find_value(request, "params"); diff --git a/src/rpc/server.h b/src/rpc/server.h index d25268a8ab..7fc300f554 100644 --- a/src/rpc/server.h +++ b/src/rpc/server.h @@ -45,6 +45,7 @@ public: bool fHelp; std::string URI; std::string authUser; + std::string peerAddr; JSONRPCRequest() : id(NullUniValue), params(NullUniValue), fHelp(false) {} void parse(const UniValue& valRequest); diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index ffaba393c0..07b2292d46 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -219,7 +219,7 @@ bool static CheckPubKeyEncoding(const valtype &vchPubKey, unsigned int flags, co return set_error(serror, SCRIPT_ERR_PUBKEYTYPE); } // Only compressed keys are accepted in segwit - if ((flags & SCRIPT_VERIFY_WITNESS_PUBKEYTYPE) != 0 && sigversion == SIGVERSION_WITNESS_V0 && !IsCompressedPubKey(vchPubKey)) { + if ((flags & SCRIPT_VERIFY_WITNESS_PUBKEYTYPE) != 0 && sigversion == SigVersion::WITNESS_V0 && !IsCompressedPubKey(vchPubKey)) { return set_error(serror, SCRIPT_ERR_WITNESS_PUBKEYTYPE); } return true; @@ -443,7 +443,7 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& if (stack.size() < 1) return set_error(serror, SCRIPT_ERR_UNBALANCED_CONDITIONAL); valtype& vch = stacktop(-1); - if (sigversion == SIGVERSION_WITNESS_V0 && (flags & SCRIPT_VERIFY_MINIMALIF)) { + if (sigversion == SigVersion::WITNESS_V0 && (flags & SCRIPT_VERIFY_MINIMALIF)) { if (vch.size() > 1) return set_error(serror, SCRIPT_ERR_MINIMALIF); if (vch.size() == 1 && vch[0] != 1) @@ -890,7 +890,7 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& CScript scriptCode(pbegincodehash, pend); // Drop the signature in pre-segwit scripts but not segwit scripts - if (sigversion == SIGVERSION_BASE) { + if (sigversion == SigVersion::BASE) { scriptCode.FindAndDelete(CScript(vchSig)); } @@ -954,7 +954,7 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& for (int k = 0; k < nSigsCount; k++) { valtype& vchSig = stacktop(-isig-k); - if (sigversion == SIGVERSION_BASE) { + if (sigversion == SigVersion::BASE) { scriptCode.FindAndDelete(CScript(vchSig)); } } @@ -1182,7 +1182,7 @@ uint256 SignatureHash(const CScript& scriptCode, const CTransaction& txTo, unsig { assert(nIn < txTo.vin.size()); - if (sigversion == SIGVERSION_WITNESS_V0) { + if (sigversion == SigVersion::WITNESS_V0) { uint256 hashPrevouts; uint256 hashSequence; uint256 hashOutputs; @@ -1396,7 +1396,7 @@ static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion, return set_error(serror, SCRIPT_ERR_PUSH_SIZE); } - if (!EvalScript(stack, scriptPubKey, flags, checker, SIGVERSION_WITNESS_V0, serror)) { + if (!EvalScript(stack, scriptPubKey, flags, checker, SigVersion::WITNESS_V0, serror)) { return false; } @@ -1423,12 +1423,12 @@ bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const C } std::vector<std::vector<unsigned char> > stack, stackCopy; - if (!EvalScript(stack, scriptSig, flags, checker, SIGVERSION_BASE, serror)) + if (!EvalScript(stack, scriptSig, flags, checker, SigVersion::BASE, serror)) // serror is set return false; if (flags & SCRIPT_VERIFY_P2SH) stackCopy = stack; - if (!EvalScript(stack, scriptPubKey, flags, checker, SIGVERSION_BASE, serror)) + if (!EvalScript(stack, scriptPubKey, flags, checker, SigVersion::BASE, serror)) // serror is set return false; if (stack.empty()) @@ -1474,7 +1474,7 @@ bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const C CScript pubKey2(pubKeySerialized.begin(), pubKeySerialized.end()); popstack(stack); - if (!EvalScript(stack, pubKey2, flags, checker, SIGVERSION_BASE, serror)) + if (!EvalScript(stack, pubKey2, flags, checker, SigVersion::BASE, serror)) // serror is set return false; if (stack.empty()) diff --git a/src/script/interpreter.h b/src/script/interpreter.h index 4dad6b44c5..bb7750d783 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -123,10 +123,10 @@ struct PrecomputedTransactionData explicit PrecomputedTransactionData(const CTransaction& tx); }; -enum SigVersion +enum class SigVersion { - SIGVERSION_BASE = 0, - SIGVERSION_WITNESS_V0 = 1, + BASE = 0, + WITNESS_V0 = 1, }; uint256 SignatureHash(const CScript &scriptCode, const CTransaction& txTo, unsigned int nIn, int nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache = nullptr); diff --git a/src/script/ismine.cpp b/src/script/ismine.cpp index 35d794b983..05bc5e9bd6 100644 --- a/src/script/ismine.cpp +++ b/src/script/ismine.cpp @@ -61,7 +61,7 @@ isminetype IsMine(const CKeyStore &keystore, const CScript& scriptPubKey, bool& break; case TX_PUBKEY: keyID = CPubKey(vSolutions[0]).GetID(); - if (sigversion != SIGVERSION_BASE && vSolutions[0].size() != 33) { + if (sigversion != SigVersion::BASE && vSolutions[0].size() != 33) { isInvalid = true; return ISMINE_NO; } @@ -76,14 +76,14 @@ isminetype IsMine(const CKeyStore &keystore, const CScript& scriptPubKey, bool& // This also applies to the P2WSH case. break; } - isminetype ret = ::IsMine(keystore, GetScriptForDestination(CKeyID(uint160(vSolutions[0]))), isInvalid, SIGVERSION_WITNESS_V0); + isminetype ret = ::IsMine(keystore, GetScriptForDestination(CKeyID(uint160(vSolutions[0]))), isInvalid, SigVersion::WITNESS_V0); if (ret == ISMINE_SPENDABLE || ret == ISMINE_WATCH_SOLVABLE || (ret == ISMINE_NO && isInvalid)) return ret; break; } case TX_PUBKEYHASH: keyID = CKeyID(uint160(vSolutions[0])); - if (sigversion != SIGVERSION_BASE) { + if (sigversion != SigVersion::BASE) { CPubKey pubkey; if (keystore.GetPubKey(keyID, pubkey) && !pubkey.IsCompressed()) { isInvalid = true; @@ -114,7 +114,7 @@ isminetype IsMine(const CKeyStore &keystore, const CScript& scriptPubKey, bool& CScriptID scriptID = CScriptID(hash); CScript subscript; if (keystore.GetCScript(scriptID, subscript)) { - isminetype ret = IsMine(keystore, subscript, isInvalid, SIGVERSION_WITNESS_V0); + isminetype ret = IsMine(keystore, subscript, isInvalid, SigVersion::WITNESS_V0); if (ret == ISMINE_SPENDABLE || ret == ISMINE_WATCH_SOLVABLE || (ret == ISMINE_NO && isInvalid)) return ret; } @@ -129,7 +129,7 @@ isminetype IsMine(const CKeyStore &keystore, const CScript& scriptPubKey, bool& // them) enable spend-out-from-under-you attacks, especially // in shared-wallet situations. std::vector<valtype> keys(vSolutions.begin()+1, vSolutions.begin()+vSolutions.size()-1); - if (sigversion != SIGVERSION_BASE) { + if (sigversion != SigVersion::BASE) { for (size_t i = 0; i < keys.size(); i++) { if (keys[i].size() != 33) { isInvalid = true; diff --git a/src/script/ismine.h b/src/script/ismine.h index c1338c3a8e..f93a66e35a 100644 --- a/src/script/ismine.h +++ b/src/script/ismine.h @@ -31,11 +31,11 @@ typedef uint8_t isminefilter; /* isInvalid becomes true when the script is found invalid by consensus or policy. This will terminate the recursion * and return ISMINE_NO immediately, as an invalid script should never be considered as "mine". This is needed as * different SIGVERSION may have different network rules. Currently the only use of isInvalid is indicate uncompressed - * keys in SIGVERSION_WITNESS_V0 script, but could also be used in similar cases in the future + * keys in SigVersion::WITNESS_V0 script, but could also be used in similar cases in the future */ -isminetype IsMine(const CKeyStore& keystore, const CScript& scriptPubKey, bool& isInvalid, SigVersion = SIGVERSION_BASE); -isminetype IsMine(const CKeyStore& keystore, const CScript& scriptPubKey, SigVersion = SIGVERSION_BASE); -isminetype IsMine(const CKeyStore& keystore, const CTxDestination& dest, bool& isInvalid, SigVersion = SIGVERSION_BASE); -isminetype IsMine(const CKeyStore& keystore, const CTxDestination& dest, SigVersion = SIGVERSION_BASE); +isminetype IsMine(const CKeyStore& keystore, const CScript& scriptPubKey, bool& isInvalid, SigVersion = SigVersion::BASE); +isminetype IsMine(const CKeyStore& keystore, const CScript& scriptPubKey, SigVersion = SigVersion::BASE); +isminetype IsMine(const CKeyStore& keystore, const CTxDestination& dest, bool& isInvalid, SigVersion = SigVersion::BASE); +isminetype IsMine(const CKeyStore& keystore, const CTxDestination& dest, SigVersion = SigVersion::BASE); #endif // BITCOIN_SCRIPT_ISMINE_H diff --git a/src/script/sign.cpp b/src/script/sign.cpp index baa712dc2d..910bb39ce6 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -6,7 +6,6 @@ #include <script/sign.h> #include <key.h> -#include <keystore.h> #include <policy/policy.h> #include <primitives/transaction.h> #include <script/standard.h> @@ -15,16 +14,16 @@ typedef std::vector<unsigned char> valtype; -TransactionSignatureCreator::TransactionSignatureCreator(const CKeyStore* keystoreIn, const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn) : BaseSignatureCreator(keystoreIn), txTo(txToIn), nIn(nInIn), nHashType(nHashTypeIn), amount(amountIn), checker(txTo, nIn, amountIn) {} +TransactionSignatureCreator::TransactionSignatureCreator(const SigningProvider* provider, const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn) : BaseSignatureCreator(provider), txTo(txToIn), nIn(nInIn), nHashType(nHashTypeIn), amount(amountIn), checker(txTo, nIn, amountIn) {} bool TransactionSignatureCreator::CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& address, const CScript& scriptCode, SigVersion sigversion) const { CKey key; - if (!keystore->GetKey(address, key)) + if (!m_provider->GetKey(address, key)) return false; // Signing with uncompressed keys is disabled in witness scripts - if (sigversion == SIGVERSION_WITNESS_V0 && !key.IsCompressed()) + if (sigversion == SigVersion::WITNESS_V0 && !key.IsCompressed()) return false; uint256 hash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion); @@ -91,12 +90,12 @@ static bool SignStep(const BaseSignatureCreator& creator, const CScript& scriptP else { CPubKey vch; - creator.KeyStore().GetPubKey(keyID, vch); + creator.Provider().GetPubKey(keyID, vch); ret.push_back(ToByteVector(vch)); } return true; case TX_SCRIPTHASH: - if (creator.KeyStore().GetCScript(uint160(vSolutions[0]), scriptRet)) { + if (creator.Provider().GetCScript(uint160(vSolutions[0]), scriptRet)) { ret.push_back(std::vector<unsigned char>(scriptRet.begin(), scriptRet.end())); return true; } @@ -112,7 +111,7 @@ static bool SignStep(const BaseSignatureCreator& creator, const CScript& scriptP case TX_WITNESS_V0_SCRIPTHASH: CRIPEMD160().Write(&vSolutions[0][0], vSolutions[0].size()).Finalize(h160.begin()); - if (creator.KeyStore().GetCScript(h160, scriptRet)) { + if (creator.Provider().GetCScript(h160, scriptRet)) { ret.push_back(std::vector<unsigned char>(scriptRet.begin(), scriptRet.end())); return true; } @@ -142,7 +141,7 @@ bool ProduceSignature(const BaseSignatureCreator& creator, const CScript& fromPu { std::vector<valtype> result; txnouttype whichType; - bool solved = SignStep(creator, fromPubKey, result, whichType, SIGVERSION_BASE); + bool solved = SignStep(creator, fromPubKey, result, whichType, SigVersion::BASE); bool P2SH = false; CScript subscript; sigdata.scriptWitness.stack.clear(); @@ -153,7 +152,7 @@ bool ProduceSignature(const BaseSignatureCreator& creator, const CScript& fromPu // the final scriptSig is the signatures from that // and then the serialized subscript: subscript = CScript(result[0].begin(), result[0].end()); - solved = solved && SignStep(creator, subscript, result, whichType, SIGVERSION_BASE) && whichType != TX_SCRIPTHASH; + solved = solved && SignStep(creator, subscript, result, whichType, SigVersion::BASE) && whichType != TX_SCRIPTHASH; P2SH = true; } @@ -162,7 +161,7 @@ bool ProduceSignature(const BaseSignatureCreator& creator, const CScript& fromPu CScript witnessscript; witnessscript << OP_DUP << OP_HASH160 << ToByteVector(result[0]) << OP_EQUALVERIFY << OP_CHECKSIG; txnouttype subType; - solved = solved && SignStep(creator, witnessscript, result, subType, SIGVERSION_WITNESS_V0); + solved = solved && SignStep(creator, witnessscript, result, subType, SigVersion::WITNESS_V0); sigdata.scriptWitness.stack = result; result.clear(); } @@ -170,7 +169,7 @@ bool ProduceSignature(const BaseSignatureCreator& creator, const CScript& fromPu { CScript witnessscript(result[0].begin(), result[0].end()); txnouttype subType; - solved = solved && SignStep(creator, witnessscript, result, subType, SIGVERSION_WITNESS_V0) && subType != TX_SCRIPTHASH && subType != TX_WITNESS_V0_SCRIPTHASH && subType != TX_WITNESS_V0_KEYHASH; + solved = solved && SignStep(creator, witnessscript, result, subType, SigVersion::WITNESS_V0) && subType != TX_SCRIPTHASH && subType != TX_WITNESS_V0_SCRIPTHASH && subType != TX_WITNESS_V0_KEYHASH; result.push_back(std::vector<unsigned char>(witnessscript.begin(), witnessscript.end())); sigdata.scriptWitness.stack = result; result.clear(); @@ -206,12 +205,12 @@ void UpdateTransaction(CMutableTransaction& tx, unsigned int nIn, const Signatur UpdateInput(tx.vin[nIn], data); } -bool SignSignature(const CKeyStore &keystore, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType) +bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType) { assert(nIn < txTo.vin.size()); CTransaction txToConst(txTo); - TransactionSignatureCreator creator(&keystore, &txToConst, nIn, amount, nHashType); + TransactionSignatureCreator creator(&provider, &txToConst, nIn, amount, nHashType); SignatureData sigdata; bool ret = ProduceSignature(creator, fromPubKey, sigdata); @@ -219,14 +218,14 @@ bool SignSignature(const CKeyStore &keystore, const CScript& fromPubKey, CMutabl return ret; } -bool SignSignature(const CKeyStore &keystore, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType) +bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType) { assert(nIn < txTo.vin.size()); CTxIn& txin = txTo.vin[nIn]; assert(txin.prevout.n < txFrom.vout.size()); const CTxOut& txout = txFrom.vout[txin.prevout.n]; - return SignSignature(keystore, txout.scriptPubKey, txTo, nIn, txout.nValue, nHashType); + return SignSignature(provider, txout.scriptPubKey, txTo, nIn, txout.nValue, nHashType); } static std::vector<valtype> CombineMultisig(const CScript& scriptPubKey, const BaseSignatureChecker& checker, @@ -294,7 +293,7 @@ struct Stacks Stacks() {} explicit Stacks(const std::vector<valtype>& scriptSigStack_) : script(scriptSigStack_), witness() {} explicit Stacks(const SignatureData& data) : witness(data.scriptWitness.stack) { - EvalScript(script, data.scriptSig, SCRIPT_VERIFY_STRICTENC, BaseSignatureChecker(), SIGVERSION_BASE); + EvalScript(script, data.scriptSig, SCRIPT_VERIFY_STRICTENC, BaseSignatureChecker(), SigVersion::BASE); } SignatureData Output() const { @@ -370,7 +369,7 @@ static Stacks CombineSignatures(const CScript& scriptPubKey, const BaseSignature sigs2.witness.pop_back(); sigs2.script = sigs2.witness; sigs2.witness.clear(); - Stacks result = CombineSignatures(pubKey2, checker, txType2, vSolutions2, sigs1, sigs2, SIGVERSION_WITNESS_V0); + Stacks result = CombineSignatures(pubKey2, checker, txType2, vSolutions2, sigs1, sigs2, SigVersion::WITNESS_V0); result.witness = result.script; result.script.clear(); result.witness.push_back(valtype(pubKey2.begin(), pubKey2.end())); @@ -388,7 +387,7 @@ SignatureData CombineSignatures(const CScript& scriptPubKey, const BaseSignature std::vector<std::vector<unsigned char> > vSolutions; Solver(scriptPubKey, txType, vSolutions); - return CombineSignatures(scriptPubKey, checker, txType, vSolutions, Stacks(scriptSig1), Stacks(scriptSig2), SIGVERSION_BASE).Output(); + return CombineSignatures(scriptPubKey, checker, txType, vSolutions, Stacks(scriptSig1), Stacks(scriptSig2), SigVersion::BASE).Output(); } namespace { @@ -427,13 +426,13 @@ bool DummySignatureCreator::CreateSig(std::vector<unsigned char>& vchSig, const return true; } -bool IsSolvable(const CKeyStore& store, const CScript& script) +bool IsSolvable(const SigningProvider& provider, const CScript& script) { // This check is to make sure that the script we created can actually be solved for and signed by us // if we were to have the private keys. This is just to make sure that the script is valid and that, // if found in a transaction, we would still accept and relay that transaction. In particular, // it will reject witness outputs that require signing with an uncompressed public key. - DummySignatureCreator creator(&store); + DummySignatureCreator creator(&provider); SignatureData sigs; // Make sure that STANDARD_SCRIPT_VERIFY_FLAGS includes SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, the most // important property this function is designed to test for. diff --git a/src/script/sign.h b/src/script/sign.h index 2c749521cd..c301f0544f 100644 --- a/src/script/sign.h +++ b/src/script/sign.h @@ -8,21 +8,32 @@ #include <script/interpreter.h> +class CKey; class CKeyID; -class CKeyStore; class CScript; +class CScriptID; class CTransaction; struct CMutableTransaction; +/** An interface to be implemented by keystores that support signing. */ +class SigningProvider +{ +public: + virtual ~SigningProvider() {} + virtual bool GetCScript(const CScriptID &scriptid, CScript& script) const =0; + virtual bool GetPubKey(const CKeyID &address, CPubKey& pubkey) const =0; + virtual bool GetKey(const CKeyID &address, CKey& key) const =0; +}; + /** Virtual base class for signature creators. */ class BaseSignatureCreator { protected: - const CKeyStore* keystore; + const SigningProvider* m_provider; public: - explicit BaseSignatureCreator(const CKeyStore* keystoreIn) : keystore(keystoreIn) {} - const CKeyStore& KeyStore() const { return *keystore; }; + explicit BaseSignatureCreator(const SigningProvider* provider) : m_provider(provider) {} + const SigningProvider& Provider() const { return *m_provider; } virtual ~BaseSignatureCreator() {} virtual const BaseSignatureChecker& Checker() const =0; @@ -39,7 +50,7 @@ class TransactionSignatureCreator : public BaseSignatureCreator { const TransactionSignatureChecker checker; public: - TransactionSignatureCreator(const CKeyStore* keystoreIn, const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn=SIGHASH_ALL); + TransactionSignatureCreator(const SigningProvider* provider, const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn=SIGHASH_ALL); const BaseSignatureChecker& Checker() const override { return checker; } bool CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const override; }; @@ -48,13 +59,13 @@ class MutableTransactionSignatureCreator : public TransactionSignatureCreator { CTransaction tx; public: - MutableTransactionSignatureCreator(const CKeyStore* keystoreIn, const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn) : TransactionSignatureCreator(keystoreIn, &tx, nInIn, amountIn, nHashTypeIn), tx(*txToIn) {} + MutableTransactionSignatureCreator(const SigningProvider* provider, const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn) : TransactionSignatureCreator(provider, &tx, nInIn, amountIn, nHashTypeIn), tx(*txToIn) {} }; /** A signature creator that just produces 72-byte empty signatures. */ class DummySignatureCreator : public BaseSignatureCreator { public: - explicit DummySignatureCreator(const CKeyStore* keystoreIn) : BaseSignatureCreator(keystoreIn) {} + explicit DummySignatureCreator(const SigningProvider* provider) : BaseSignatureCreator(provider) {} const BaseSignatureChecker& Checker() const override; bool CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const override; }; @@ -71,8 +82,8 @@ struct SignatureData { bool ProduceSignature(const BaseSignatureCreator& creator, const CScript& scriptPubKey, SignatureData& sigdata); /** Produce a script signature for a transaction. */ -bool SignSignature(const CKeyStore &keystore, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType); -bool SignSignature(const CKeyStore& keystore, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType); +bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType); +bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType); /** Combine two script signatures using a generic signature checker, intelligently, possibly with OP_0 placeholders. */ SignatureData CombineSignatures(const CScript& scriptPubKey, const BaseSignatureChecker& checker, const SignatureData& scriptSig1, const SignatureData& scriptSig2); @@ -84,8 +95,8 @@ void UpdateInput(CTxIn& input, const SignatureData& data); /* Check whether we know how to sign for an output like this, assuming we * have all private keys. While this function does not need private keys, the passed - * keystore is used to look up public keys and redeemscripts by hash. + * provider is used to look up public keys and redeemscripts by hash. * Solvability is unrelated to whether we consider this output to be ours. */ -bool IsSolvable(const CKeyStore& store, const CScript& script); +bool IsSolvable(const SigningProvider& provider, const CScript& script); #endif // BITCOIN_SCRIPT_SIGN_H diff --git a/src/test/data/tx_invalid.json b/src/test/data/tx_invalid.json index f8a1347c31..abb46fe533 100644 --- a/src/test/data/tx_invalid.json +++ b/src/test/data/tx_invalid.json @@ -321,7 +321,7 @@ ["where the pubkey is obtained through key recovery with sig and the wrong sighash."], ["This is to show that FindAndDelete is applied only to non-segwit scripts"], ["To show that the tests are 'correctly wrong', they should pass by modifying OP_CHECKSIG under interpreter.cpp"], -["by replacing (sigversion == SIGVERSION_BASE) with (sigversion != SIGVERSION_BASE)"], +["by replacing (sigversion == SigVersion::BASE) with (sigversion != SigVersion::BASE)"], ["Non-segwit: wrong sighash (without FindAndDelete) = 1ba1fe3bc90c5d1265460e684ce6774e324f0fabdf67619eda729e64e8b6bc08"], [[["f18783ace138abac5d3a7a5cf08e88fe6912f267ef936452e0c27d090621c169", 7000, "HASH160 0x14 0x0c746489e2d83cdbb5b90b432773342ba809c134 EQUAL", 200000]], "010000000169c12106097dc2e0526493ef67f21269fe888ef05c7a3a5dacab38e1ac8387f1581b0000b64830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e012103b12a1ec8428fc74166926318c15e17408fea82dbb157575e16a8c365f546248f4aad4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e01ffffffff0101000000000000000000000000", "P2SH,WITNESS"], @@ -332,7 +332,7 @@ ["Script is 2 CHECKMULTISIGVERIFY <sig1> <sig2> DROP"], ["52af4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c0395960175"], ["Signature is 0 <sig1> <sig2> 2 <key1> <key2>"], -["Should pass by replacing (sigversion == SIGVERSION_BASE) with (sigversion != SIGVERSION_BASE) under OP_CHECKMULTISIG"], +["Should pass by replacing (sigversion == SigVersion::BASE) with (sigversion != SigVersion::BASE) under OP_CHECKMULTISIG"], ["Non-segwit: wrong sighash (without FindAndDelete) = 4bc6a53e8e16ef508c19e38bba08831daba85228b0211f323d4cb0999cf2a5e8"], [[["9628667ad48219a169b41b020800162287d2c0f713c04157e95c484a8dcb7592", 7000, "HASH160 0x14 0x5748407f5ca5cdca53ba30b79040260770c9ee1b EQUAL", 200000]], "01000000019275cb8d4a485ce95741c013f7c0d28722160008021bb469a11982d47a662896581b0000fd6f01004830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c039596015221023fd5dd42b44769c5653cbc5947ff30ab8871f240ad0c0e7432aefe84b5b4ff3421039d52178dbde360b83f19cf348deb04fa8360e1bf5634577be8e50fafc2b0e4ef4c9552af4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c0395960175ffffffff0101000000000000000000000000", "P2SH,WITNESS"], diff --git a/src/test/multisig_tests.cpp b/src/test/multisig_tests.cpp index 72d9d3d75c..b593f9633c 100644 --- a/src/test/multisig_tests.cpp +++ b/src/test/multisig_tests.cpp @@ -21,7 +21,7 @@ BOOST_FIXTURE_TEST_SUITE(multisig_tests, BasicTestingSetup) CScript sign_multisig(CScript scriptPubKey, std::vector<CKey> keys, CTransaction transaction, int whichIn) { - uint256 hash = SignatureHash(scriptPubKey, transaction, whichIn, SIGHASH_ALL, 0, SIGVERSION_BASE); + uint256 hash = SignatureHash(scriptPubKey, transaction, whichIn, SIGHASH_ALL, 0, SigVersion::BASE); CScript result; result << OP_0; // CHECKMULTISIG bug workaround diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index c7a497f3a7..46a2d13745 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -267,10 +267,10 @@ struct KeyData } }; -enum WitnessMode { - WITNESS_NONE, - WITNESS_PKH, - WITNESS_SH +enum class WitnessMode { + NONE, + PKH, + SH }; class TestBuilder @@ -308,15 +308,15 @@ private: } public: - TestBuilder(const CScript& script_, const std::string& comment_, int flags_, bool P2SH = false, WitnessMode wm = WITNESS_NONE, int witnessversion = 0, CAmount nValue_ = 0) : script(script_), havePush(false), comment(comment_), flags(flags_), scriptError(SCRIPT_ERR_OK), nValue(nValue_) + TestBuilder(const CScript& script_, const std::string& comment_, int flags_, bool P2SH = false, WitnessMode wm = WitnessMode::NONE, int witnessversion = 0, CAmount nValue_ = 0) : script(script_), havePush(false), comment(comment_), flags(flags_), scriptError(SCRIPT_ERR_OK), nValue(nValue_) { CScript scriptPubKey = script; - if (wm == WITNESS_PKH) { + if (wm == WitnessMode::PKH) { uint160 hash; CHash160().Write(&script[1], script.size() - 1).Finalize(hash.begin()); script = CScript() << OP_DUP << OP_HASH160 << ToByteVector(hash) << OP_EQUALVERIFY << OP_CHECKSIG; scriptPubKey = CScript() << witnessversion << ToByteVector(hash); - } else if (wm == WITNESS_SH) { + } else if (wm == WitnessMode::SH) { witscript = scriptPubKey; uint256 hash; CSHA256().Write(&witscript[0], witscript.size()).Finalize(hash.begin()); @@ -361,7 +361,7 @@ public: return *this; } - TestBuilder& PushSig(const CKey& key, int nHashType = SIGHASH_ALL, unsigned int lenR = 32, unsigned int lenS = 32, SigVersion sigversion = SIGVERSION_BASE, CAmount amount = 0) + TestBuilder& PushSig(const CKey& key, int nHashType = SIGHASH_ALL, unsigned int lenR = 32, unsigned int lenS = 32, SigVersion sigversion = SigVersion::BASE, CAmount amount = 0) { uint256 hash = SignatureHash(script, spendTx, 0, nHashType, amount, sigversion); std::vector<unsigned char> vchSig, r, s; @@ -379,7 +379,7 @@ public: return *this; } - TestBuilder& PushWitSig(const CKey& key, CAmount amount = -1, int nHashType = SIGHASH_ALL, unsigned int lenR = 32, unsigned int lenS = 32, SigVersion sigversion = SIGVERSION_WITNESS_V0) + TestBuilder& PushWitSig(const CKey& key, CAmount amount = -1, int nHashType = SIGHASH_ALL, unsigned int lenR = 32, unsigned int lenS = 32, SigVersion sigversion = SigVersion::WITNESS_V0) { if (amount == -1) amount = nValue; @@ -747,57 +747,57 @@ BOOST_AUTO_TEST_CASE(script_build) ).PushSig(keys.key0).PushRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, - "Basic P2WSH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH, + "Basic P2WSH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH, 0, 1).PushWitSig(keys.key0).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "Basic P2WPKH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_PKH, + "Basic P2WPKH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::PKH, 0, 1).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, - "Basic P2SH(P2WSH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_SH, + "Basic P2SH(P2WSH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::SH, 0, 1).PushWitSig(keys.key0).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "Basic P2SH(P2WPKH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_PKH, + "Basic P2SH(P2WPKH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::PKH, 0, 1).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit().PushRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG, - "Basic P2WSH with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH + "Basic P2WSH with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH ).PushWitSig(keys.key0).PushWitRedeem().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1), - "Basic P2WPKH with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_PKH + "Basic P2WPKH with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::PKH ).PushWitSig(keys.key0).Push(keys.pubkey1).AsWit().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG, - "Basic P2SH(P2WSH) with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_SH + "Basic P2SH(P2WSH) with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::SH ).PushWitSig(keys.key0).PushWitRedeem().PushRedeem().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1), - "Basic P2SH(P2WPKH) with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_PKH + "Basic P2SH(P2WPKH) with the wrong key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::PKH ).PushWitSig(keys.key0).Push(keys.pubkey1).AsWit().PushRedeem().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG, - "Basic P2WSH with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, false, WITNESS_SH + "Basic P2WSH with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, false, WitnessMode::SH ).PushWitSig(keys.key0).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1), - "Basic P2WPKH with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, false, WITNESS_PKH + "Basic P2WPKH with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, false, WitnessMode::PKH ).PushWitSig(keys.key0).Push(keys.pubkey1).AsWit()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG, - "Basic P2SH(P2WSH) with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, true, WITNESS_SH + "Basic P2SH(P2WSH) with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, true, WitnessMode::SH ).PushWitSig(keys.key0).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1), - "Basic P2SH(P2WPKH) with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, true, WITNESS_PKH + "Basic P2SH(P2WPKH) with the wrong key but no WITNESS", SCRIPT_VERIFY_P2SH, true, WitnessMode::PKH ).PushWitSig(keys.key0).Push(keys.pubkey1).AsWit().PushRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, - "Basic P2WSH with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH, + "Basic P2WSH with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH, 0, 0).PushWitSig(keys.key0, 1).PushWitRedeem().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "Basic P2WPKH with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_PKH, + "Basic P2WPKH with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::PKH, 0, 0).PushWitSig(keys.key0, 1).Push(keys.pubkey0).AsWit().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, - "Basic P2SH(P2WSH) with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_SH, + "Basic P2SH(P2WSH) with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::SH, 0, 0).PushWitSig(keys.key0, 1).PushWitRedeem().PushRedeem().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "Basic P2SH(P2WPKH) with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_PKH, + "Basic P2SH(P2WPKH) with wrong value", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::PKH, 0, 0).PushWitSig(keys.key0, 1).Push(keys.pubkey0).AsWit().PushRedeem().ScriptError(SCRIPT_ERR_EVAL_FALSE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), "P2WPKH with future witness version", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | - SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM, false, WITNESS_PKH, 1 + SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM, false, WitnessMode::PKH, 1 ).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit().ScriptError(SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM)); { CScript witscript = CScript() << ToByteVector(keys.pubkey0); @@ -810,22 +810,22 @@ BOOST_AUTO_TEST_CASE(script_build) ).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit().ScriptError(SCRIPT_ERR_WITNESS_PROGRAM_WRONG_LENGTH)); } tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, - "P2WSH with empty witness", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH + "P2WSH with empty witness", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH ).ScriptError(SCRIPT_ERR_WITNESS_PROGRAM_WITNESS_EMPTY)); { CScript witscript = CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG; tests.push_back(TestBuilder(witscript, - "P2WSH with witness program mismatch", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH + "P2WSH with witness program mismatch", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH ).PushWitSig(keys.key0).Push(witscript).DamagePush(0).AsWit().ScriptError(SCRIPT_ERR_WITNESS_PROGRAM_MISMATCH)); } tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "P2WPKH with witness program mismatch", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_PKH + "P2WPKH with witness program mismatch", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::PKH ).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit().Push("0").AsWit().ScriptError(SCRIPT_ERR_WITNESS_PROGRAM_MISMATCH)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "P2WPKH with non-empty scriptSig", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_PKH + "P2WPKH with non-empty scriptSig", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::PKH ).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit().Num(11).ScriptError(SCRIPT_ERR_WITNESS_MALLEATED)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1), - "P2SH(P2WPKH) with superfluous push in scriptSig", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_PKH + "P2SH(P2WPKH) with superfluous push in scriptSig", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::PKH ).PushWitSig(keys.key0).Push(keys.pubkey1).AsWit().Num(11).PushRedeem().ScriptError(SCRIPT_ERR_WITNESS_MALLEATED_P2SH)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, "P2PK with witness", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH @@ -833,95 +833,95 @@ BOOST_AUTO_TEST_CASE(script_build) // Compressed keys should pass SCRIPT_VERIFY_WITNESS_PUBKEYTYPE tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0C) << OP_CHECKSIG, - "Basic P2WSH with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "Basic P2WSH with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).PushWitSig(keys.key0C).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0C), - "Basic P2WPKH with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_PKH, + "Basic P2WPKH with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::PKH, 0, 1).PushWitSig(keys.key0C).Push(keys.pubkey0C).AsWit()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0C) << OP_CHECKSIG, - "Basic P2SH(P2WSH) with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "Basic P2SH(P2WSH) with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).PushWitSig(keys.key0C).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0C), - "Basic P2SH(P2WPKH) with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_PKH, + "Basic P2SH(P2WPKH) with compressed key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::PKH, 0, 1).PushWitSig(keys.key0C).Push(keys.pubkey0C).AsWit().PushRedeem()); // Testing uncompressed key in witness with SCRIPT_VERIFY_WITNESS_PUBKEYTYPE tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, - "Basic P2WSH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "Basic P2WSH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).PushWitSig(keys.key0).PushWitRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "Basic P2WPKH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_PKH, + "Basic P2WPKH", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::PKH, 0, 1).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG, - "Basic P2SH(P2WSH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "Basic P2SH(P2WSH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).PushWitSig(keys.key0).PushWitRedeem().PushRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); tests.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0), - "Basic P2SH(P2WPKH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_PKH, + "Basic P2SH(P2WPKH)", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::PKH, 0, 1).PushWitSig(keys.key0).Push(keys.pubkey0).AsWit().PushRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); // P2WSH 1-of-2 multisig with compressed keys tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0C).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0C).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1C).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with compressed keys", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1C).PushWitRedeem().PushRedeem()); // P2WSH 1-of-2 multisig with first key uncompressed tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0).PushWitRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with first key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0).PushWitRedeem().PushRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1C).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1C).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1C).PushWitRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey0) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with first key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1C).PushWitRedeem().PushRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); // P2WSH 1-of-2 multisig with second key uncompressed tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with second key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with second key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0C).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG second key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG second key uncompressed and signing with the first key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0C).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with second key uncompressed and signing with the first key should pass as the uncompressed key is not used", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with second key uncompressed and signing with the first key should pass as the uncompressed key is not used", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0C).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with second key uncompressed and signing with the first key should pass as the uncompressed key is not used", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with second key uncompressed and signing with the first key should pass as the uncompressed key is not used", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key0C).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1).PushWitRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1).PushWitRedeem().PushRedeem()); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2WSH CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WITNESS_SH, + "P2WSH CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, false, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1).PushWitRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); tests.push_back(TestBuilder(CScript() << OP_1 << ToByteVector(keys.pubkey1) << ToByteVector(keys.pubkey0C) << OP_2 << OP_CHECKMULTISIG, - "P2SH(P2WSH) CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WITNESS_SH, + "P2SH(P2WSH) CHECKMULTISIG with second key uncompressed and signing with the second key", SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE, true, WitnessMode::SH, 0, 1).Push(CScript()).AsWit().PushWitSig(keys.key1).PushWitRedeem().PushRedeem().ScriptError(SCRIPT_ERR_WITNESS_PUBKEYTYPE)); std::set<std::string> tests_set; @@ -1009,21 +1009,21 @@ BOOST_AUTO_TEST_CASE(script_PushData) ScriptError err; std::vector<std::vector<unsigned char> > directStack; - BOOST_CHECK(EvalScript(directStack, CScript(&direct[0], &direct[sizeof(direct)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SIGVERSION_BASE, &err)); + BOOST_CHECK(EvalScript(directStack, CScript(&direct[0], &direct[sizeof(direct)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SigVersion::BASE, &err)); BOOST_CHECK_MESSAGE(err == SCRIPT_ERR_OK, ScriptErrorString(err)); std::vector<std::vector<unsigned char> > pushdata1Stack; - BOOST_CHECK(EvalScript(pushdata1Stack, CScript(&pushdata1[0], &pushdata1[sizeof(pushdata1)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SIGVERSION_BASE, &err)); + BOOST_CHECK(EvalScript(pushdata1Stack, CScript(&pushdata1[0], &pushdata1[sizeof(pushdata1)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SigVersion::BASE, &err)); BOOST_CHECK(pushdata1Stack == directStack); BOOST_CHECK_MESSAGE(err == SCRIPT_ERR_OK, ScriptErrorString(err)); std::vector<std::vector<unsigned char> > pushdata2Stack; - BOOST_CHECK(EvalScript(pushdata2Stack, CScript(&pushdata2[0], &pushdata2[sizeof(pushdata2)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SIGVERSION_BASE, &err)); + BOOST_CHECK(EvalScript(pushdata2Stack, CScript(&pushdata2[0], &pushdata2[sizeof(pushdata2)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SigVersion::BASE, &err)); BOOST_CHECK(pushdata2Stack == directStack); BOOST_CHECK_MESSAGE(err == SCRIPT_ERR_OK, ScriptErrorString(err)); std::vector<std::vector<unsigned char> > pushdata4Stack; - BOOST_CHECK(EvalScript(pushdata4Stack, CScript(&pushdata4[0], &pushdata4[sizeof(pushdata4)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SIGVERSION_BASE, &err)); + BOOST_CHECK(EvalScript(pushdata4Stack, CScript(&pushdata4[0], &pushdata4[sizeof(pushdata4)]), SCRIPT_VERIFY_P2SH, BaseSignatureChecker(), SigVersion::BASE, &err)); BOOST_CHECK(pushdata4Stack == directStack); BOOST_CHECK_MESSAGE(err == SCRIPT_ERR_OK, ScriptErrorString(err)); } @@ -1031,7 +1031,7 @@ BOOST_AUTO_TEST_CASE(script_PushData) CScript sign_multisig(CScript scriptPubKey, std::vector<CKey> keys, CTransaction transaction) { - uint256 hash = SignatureHash(scriptPubKey, transaction, 0, SIGHASH_ALL, 0, SIGVERSION_BASE); + uint256 hash = SignatureHash(scriptPubKey, transaction, 0, SIGHASH_ALL, 0, SigVersion::BASE); CScript result; // @@ -1227,15 +1227,15 @@ BOOST_AUTO_TEST_CASE(script_combineSigs) // A couple of partially-signed versions: std::vector<unsigned char> sig1; - uint256 hash1 = SignatureHash(scriptPubKey, txTo, 0, SIGHASH_ALL, 0, SIGVERSION_BASE); + uint256 hash1 = SignatureHash(scriptPubKey, txTo, 0, SIGHASH_ALL, 0, SigVersion::BASE); BOOST_CHECK(keys[0].Sign(hash1, sig1)); sig1.push_back(SIGHASH_ALL); std::vector<unsigned char> sig2; - uint256 hash2 = SignatureHash(scriptPubKey, txTo, 0, SIGHASH_NONE, 0, SIGVERSION_BASE); + uint256 hash2 = SignatureHash(scriptPubKey, txTo, 0, SIGHASH_NONE, 0, SigVersion::BASE); BOOST_CHECK(keys[1].Sign(hash2, sig2)); sig2.push_back(SIGHASH_NONE); std::vector<unsigned char> sig3; - uint256 hash3 = SignatureHash(scriptPubKey, txTo, 0, SIGHASH_SINGLE, 0, SIGVERSION_BASE); + uint256 hash3 = SignatureHash(scriptPubKey, txTo, 0, SIGHASH_SINGLE, 0, SigVersion::BASE); BOOST_CHECK(keys[2].Sign(hash3, sig3)); sig3.push_back(SIGHASH_SINGLE); diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index 32cd3a50b0..a2bd8998b1 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -138,7 +138,7 @@ BOOST_AUTO_TEST_CASE(sighash_test) uint256 sh, sho; sho = SignatureHashOld(scriptCode, txTo, nIn, nHashType); - sh = SignatureHash(scriptCode, txTo, nIn, nHashType, 0, SIGVERSION_BASE); + sh = SignatureHash(scriptCode, txTo, nIn, nHashType, 0, SigVersion::BASE); #if defined(PRINT_SIGHASH_JSON) CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss << txTo; @@ -204,7 +204,7 @@ BOOST_AUTO_TEST_CASE(sighash_from_data) continue; } - sh = SignatureHash(scriptCode, *tx, nIn, nHashType, 0, SIGVERSION_BASE); + sh = SignatureHash(scriptCode, *tx, nIn, nHashType, 0, SigVersion::BASE); BOOST_CHECK_MESSAGE(sh.GetHex() == sigHashHex, strTest); } } diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index edfb35d155..b222392ee5 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -407,7 +407,7 @@ static CScript PushAll(const std::vector<valtype>& values) void ReplaceRedeemScript(CScript& script, const CScript& redeemScript) { std::vector<valtype> stack; - EvalScript(stack, script, SCRIPT_VERIFY_STRICTENC, BaseSignatureChecker(), SIGVERSION_BASE); + EvalScript(stack, script, SCRIPT_VERIFY_STRICTENC, BaseSignatureChecker(), SigVersion::BASE); assert(stack.size() > 0); stack.back() = std::vector<unsigned char>(redeemScript.begin(), redeemScript.end()); script = PushAll(stack); diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp index 1aa54189b6..7087c26774 100644 --- a/src/test/txvalidationcache_tests.cpp +++ b/src/test/txvalidationcache_tests.cpp @@ -56,7 +56,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup) // Sign: std::vector<unsigned char> vchSig; - uint256 hash = SignatureHash(scriptPubKey, spends[i], 0, SIGHASH_ALL, 0, SIGVERSION_BASE); + uint256 hash = SignatureHash(scriptPubKey, spends[i], 0, SIGHASH_ALL, 0, SigVersion::BASE); BOOST_CHECK(coinbaseKey.Sign(hash, vchSig)); vchSig.push_back((unsigned char)SIGHASH_ALL); spends[i].vin[0].scriptSig << vchSig; @@ -182,7 +182,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) // Sign, with a non-DER signature { std::vector<unsigned char> vchSig; - uint256 hash = SignatureHash(p2pk_scriptPubKey, spend_tx, 0, SIGHASH_ALL, 0, SIGVERSION_BASE); + uint256 hash = SignatureHash(p2pk_scriptPubKey, spend_tx, 0, SIGHASH_ALL, 0, SigVersion::BASE); BOOST_CHECK(coinbaseKey.Sign(hash, vchSig)); vchSig.push_back((unsigned char) 0); // padding byte makes this non-DER vchSig.push_back((unsigned char)SIGHASH_ALL); @@ -256,7 +256,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) // Sign std::vector<unsigned char> vchSig; - uint256 hash = SignatureHash(spend_tx.vout[2].scriptPubKey, invalid_with_cltv_tx, 0, SIGHASH_ALL, 0, SIGVERSION_BASE); + uint256 hash = SignatureHash(spend_tx.vout[2].scriptPubKey, invalid_with_cltv_tx, 0, SIGHASH_ALL, 0, SigVersion::BASE); BOOST_CHECK(coinbaseKey.Sign(hash, vchSig)); vchSig.push_back((unsigned char)SIGHASH_ALL); invalid_with_cltv_tx.vin[0].scriptSig = CScript() << vchSig << 101; @@ -284,7 +284,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) // Sign std::vector<unsigned char> vchSig; - uint256 hash = SignatureHash(spend_tx.vout[3].scriptPubKey, invalid_with_csv_tx, 0, SIGHASH_ALL, 0, SIGVERSION_BASE); + uint256 hash = SignatureHash(spend_tx.vout[3].scriptPubKey, invalid_with_csv_tx, 0, SIGHASH_ALL, 0, SigVersion::BASE); BOOST_CHECK(coinbaseKey.Sign(hash, vchSig)); vchSig.push_back((unsigned char)SIGHASH_ALL); invalid_with_csv_tx.vin[0].scriptSig = CScript() << vchSig << 101; diff --git a/src/test/versionbits_tests.cpp b/src/test/versionbits_tests.cpp index 5d6f781404..92ef58e517 100644 --- a/src/test/versionbits_tests.cpp +++ b/src/test/versionbits_tests.cpp @@ -101,8 +101,8 @@ public: VersionBitsTester& TestDefined() { for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { - BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_DEFINED, strprintf("Test %i for DEFINED", num)); - BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); + BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::DEFINED, strprintf("Test %i for DEFINED", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; @@ -112,8 +112,8 @@ public: VersionBitsTester& TestStarted() { for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { - BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_STARTED, strprintf("Test %i for STARTED", num)); - BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); + BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::STARTED, strprintf("Test %i for STARTED", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; @@ -123,8 +123,8 @@ public: VersionBitsTester& TestLockedIn() { for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { - BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_LOCKED_IN, strprintf("Test %i for LOCKED_IN", num)); - BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); + BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::LOCKED_IN, strprintf("Test %i for LOCKED_IN", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; @@ -134,8 +134,8 @@ public: VersionBitsTester& TestActive() { for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { - BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE", num)); - BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); + BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::ACTIVE, strprintf("Test %i for ACTIVE", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; @@ -145,8 +145,8 @@ public: VersionBitsTester& TestFailed() { for (int i = 0; i < CHECKERS; i++) { if (InsecureRandBits(i) == 0) { - BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_FAILED, strprintf("Test %i for FAILED", num)); - BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); + BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::FAILED, strprintf("Test %i for FAILED", num)); + BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == ThresholdState::ACTIVE, strprintf("Test %i for ACTIVE (always active)", num)); } } num++; diff --git a/src/txdb.cpp b/src/txdb.cpp index 91d6c98430..8550a7e889 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -147,7 +147,7 @@ size_t CCoinsViewDB::EstimateSize() const return db.EstimateSize(DB_COIN, (char)(DB_COIN+1)); } -CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(GetDataDir() / "blocks" / "index", nCacheSize, fMemory, fWipe) { +CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(gArgs.IsArgSet("-blocksdir") ? GetDataDir() / "blocks" / "index" : GetBlocksDir() / "index", nCacheSize, fMemory, fWipe) { } bool CBlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo &info) { diff --git a/src/txmempool.cpp b/src/txmempool.cpp index d1edde284f..cc639288d3 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -339,7 +339,7 @@ CTxMemPool::CTxMemPool(CBlockPolicyEstimator* estimator) : nCheckFrequency = 0; } -bool CTxMemPool::isSpent(const COutPoint& outpoint) +bool CTxMemPool::isSpent(const COutPoint& outpoint) const { LOCK(cs); return mapNextTx.count(outpoint); diff --git a/src/txmempool.h b/src/txmempool.h index 08a3421015..699f6b554b 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -550,7 +550,7 @@ public: void _clear(); //lock free bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb); void queryHashes(std::vector<uint256>& vtxid); - bool isSpent(const COutPoint& outpoint); + bool isSpent(const COutPoint& outpoint) const; unsigned int GetTransactionsUpdated() const; void AddTransactionsUpdated(unsigned int n); /** diff --git a/src/util.cpp b/src/util.cpp index 494d5c4eaf..dbf9065113 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -613,10 +613,41 @@ fs::path GetDefaultDataDir() #endif } +static fs::path g_blocks_path_cached; +static fs::path g_blocks_path_cache_net_specific; static fs::path pathCached; static fs::path pathCachedNetSpecific; static CCriticalSection csPathCached; +const fs::path &GetBlocksDir(bool fNetSpecific) +{ + + LOCK(csPathCached); + + fs::path &path = fNetSpecific ? g_blocks_path_cache_net_specific : g_blocks_path_cached; + + // This can be called during exceptions by LogPrintf(), so we cache the + // value so we don't have to do memory allocations after that. + if (!path.empty()) + return path; + + if (gArgs.IsArgSet("-blocksdir")) { + path = fs::system_complete(gArgs.GetArg("-blocksdir", "")); + if (!fs::is_directory(path)) { + path = ""; + return path; + } + } else { + path = GetDataDir(false); + } + if (fNetSpecific) + path /= BaseParams().DataDir(); + + path /= "blocks"; + fs::create_directories(path); + return path; +} + const fs::path &GetDataDir(bool fNetSpecific) { @@ -655,6 +686,8 @@ void ClearDatadirCache() pathCached = fs::path(); pathCachedNetSpecific = fs::path(); + g_blocks_path_cached = fs::path(); + g_blocks_path_cache_net_specific = fs::path(); } fs::path GetConfigFile(const std::string& confPath) diff --git a/src/util.h b/src/util.h index 04ff44f218..592041c0cf 100644 --- a/src/util.h +++ b/src/util.h @@ -183,6 +183,7 @@ void ReleaseDirectoryLocks(); bool TryCreateDirectories(const fs::path& p); fs::path GetDefaultDataDir(); +const fs::path &GetBlocksDir(bool fNetSpecific = true); const fs::path &GetDataDir(bool fNetSpecific = true); void ClearDatadirCache(); fs::path GetConfigFile(const std::string& confPath); diff --git a/src/validation.cpp b/src/validation.cpp index 614876ea49..839b248143 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -280,11 +280,11 @@ std::unique_ptr<CCoinsViewDB> pcoinsdbview; std::unique_ptr<CCoinsViewCache> pcoinsTip; std::unique_ptr<CBlockTreeDB> pblocktree; -enum FlushStateMode { - FLUSH_STATE_NONE, - FLUSH_STATE_IF_NEEDED, - FLUSH_STATE_PERIODIC, - FLUSH_STATE_ALWAYS +enum class FlushStateMode { + NONE, + IF_NEEDED, + PERIODIC, + ALWAYS }; // See definition for documentation @@ -983,7 +983,7 @@ static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPo } // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits CValidationState stateDummy; - FlushStateToDisk(chainparams, stateDummy, FLUSH_STATE_PERIODIC); + FlushStateToDisk(chainparams, stateDummy, FlushStateMode::PERIODIC); return res; } @@ -1684,7 +1684,7 @@ int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Para for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) { ThresholdState state = VersionBitsState(pindexPrev, params, static_cast<Consensus::DeploymentPos>(i), versionbitscache); - if (state == THRESHOLD_LOCKED_IN || state == THRESHOLD_STARTED) { + if (state == ThresholdState::LOCKED_IN || state == ThresholdState::STARTED) { nVersion |= VersionBitsMask(params, static_cast<Consensus::DeploymentPos>(i)); } } @@ -1740,7 +1740,7 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consens } // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic. - if (VersionBitsState(pindex->pprev, consensusparams, Consensus::DEPLOYMENT_CSV, versionbitscache) == THRESHOLD_ACTIVE) { + if (VersionBitsState(pindex->pprev, consensusparams, Consensus::DEPLOYMENT_CSV, versionbitscache) == ThresholdState::ACTIVE) { flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY; } @@ -1926,7 +1926,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic. int nLockTimeFlags = 0; - if (VersionBitsState(pindex->pprev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_CSV, versionbitscache) == THRESHOLD_ACTIVE) { + if (VersionBitsState(pindex->pprev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_CSV, versionbitscache) == ThresholdState::ACTIVE) { nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE; } @@ -2096,19 +2096,19 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState & int64_t cacheSize = pcoinsTip->DynamicMemoryUsage(); int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0); // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing). - bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024); + bool fCacheLarge = mode == FlushStateMode::PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024); // The cache is over the limit, we have to write now. - bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nTotalSpace; + bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cacheSize > nTotalSpace; // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash. - bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000; + bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000; // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage. - bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000; + bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000; // Combine all conditions that result in a full cache flush. - fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; + fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; // Write blocks and block index to disk. if (fDoFullFlush || fPeriodicWrite) { // Depend on nMinDiskSpace to ensure we can write block index - if (!CheckDiskSpace(0)) + if (!CheckDiskSpace(0, true)) return state.Error("out of disk space"); // First make sure all block and undo data is flushed to disk. FlushBlockFile(); @@ -2150,7 +2150,7 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState & nLastFlush = nNow; } } - if (fDoFullFlush || ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000)) { + if (fDoFullFlush || ((mode == FlushStateMode::ALWAYS || mode == FlushStateMode::PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000)) { // Update best block in wallet (so we can detect restored wallets). GetMainSignals().SetBestChain(chainActive.GetLocator()); nLastSetChain = nNow; @@ -2164,14 +2164,14 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState & void FlushStateToDisk() { CValidationState state; const CChainParams& chainparams = Params(); - FlushStateToDisk(chainparams, state, FLUSH_STATE_ALWAYS); + FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS); } void PruneAndFlush() { CValidationState state; fCheckForPruning = true; const CChainParams& chainparams = Params(); - FlushStateToDisk(chainparams, state, FLUSH_STATE_NONE); + FlushStateToDisk(chainparams, state, FlushStateMode::NONE); } static void DoWarning(const std::string& strWarning) @@ -2199,9 +2199,9 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) { WarningBitsConditionChecker checker(bit); ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]); - if (state == THRESHOLD_ACTIVE || state == THRESHOLD_LOCKED_IN) { + if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) { const std::string strWarning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit); - if (state == THRESHOLD_ACTIVE) { + if (state == ThresholdState::ACTIVE) { DoWarning(strWarning); } else { warningMessages.push_back(strWarning); @@ -2267,7 +2267,7 @@ bool CChainState::DisconnectTip(CValidationState& state, const CChainParams& cha } LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI); // Write the chain state to disk, if necessary. - if (!FlushStateToDisk(chainparams, state, FLUSH_STATE_IF_NEEDED)) + if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED)) return false; if (disconnectpool) { @@ -2405,7 +2405,7 @@ bool CChainState::ConnectTip(CValidationState& state, const CChainParams& chainp int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3; LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal); // Write the chain state to disk, if necessary. - if (!FlushStateToDisk(chainparams, state, FLUSH_STATE_IF_NEEDED)) + if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED)) return false; int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4; LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal); @@ -2682,7 +2682,7 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams& CheckBlockIndex(chainparams.GetConsensus()); // Write changes periodically to disk, after relay. - if (!FlushStateToDisk(chainparams, state, FLUSH_STATE_PERIODIC)) { + if (!FlushStateToDisk(chainparams, state, FlushStateMode::PERIODIC)) { return false; } @@ -2953,7 +2953,7 @@ static bool FindBlockPos(CDiskBlockPos &pos, unsigned int nAddSize, unsigned int if (nNewChunks > nOldChunks) { if (fPruneMode) fCheckForPruning = true; - if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) { + if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos, true)) { FILE *file = OpenBlockFile(pos); if (file) { LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile); @@ -2986,7 +2986,7 @@ static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, if (nNewChunks > nOldChunks) { if (fPruneMode) fCheckForPruning = true; - if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) { + if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos, true)) { FILE *file = OpenUndoFile(pos); if (file) { LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile); @@ -3076,7 +3076,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params) { LOCK(cs_main); - return (VersionBitsState(pindexPrev, params, Consensus::DEPLOYMENT_SEGWIT, versionbitscache) == THRESHOLD_ACTIVE); + return (VersionBitsState(pindexPrev, params, Consensus::DEPLOYMENT_SEGWIT, versionbitscache) == ThresholdState::ACTIVE); } // Compute at which vout of the block's coinbase transaction the witness @@ -3195,7 +3195,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c // Start enforcing BIP113 (Median Time Past) using versionbits logic. int nLockTimeFlags = 0; - if (VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CSV, versionbitscache) == THRESHOLD_ACTIVE) { + if (VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CSV, versionbitscache) == ThresholdState::ACTIVE) { nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST; } @@ -3223,13 +3223,13 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c // Validation for witness commitments. // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the // coinbase (where 0x0000....0000 is used instead). - // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness nonce (unconstrained). + // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness reserved value (unconstrained). // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header). // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are - // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness nonce). In case there are + // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are // multiple, the last one is used. bool fHaveWitness = false; - if (VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT, versionbitscache) == THRESHOLD_ACTIVE) { + if (VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT, versionbitscache) == ThresholdState::ACTIVE) { int commitpos = GetWitnessCommitmentIndex(block); if (commitpos != -1) { bool malleated = false; @@ -3238,7 +3238,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c // already does not permit it, it is impossible to trigger in the // witness tree. if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) { - return state.DoS(100, false, REJECT_INVALID, "bad-witness-nonce-size", true, strprintf("%s : invalid witness nonce size", __func__)); + return state.DoS(100, false, REJECT_INVALID, "bad-witness-nonce-size", true, strprintf("%s : invalid witness reserved value size", __func__)); } CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0]->vin[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin()); if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) { @@ -3257,7 +3257,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c } } - // After the coinbase witness nonce and commitment are verified, + // After the coinbase witness reserved value and commitment are verified, // we can check if the block weight passes (before we've checked the // coinbase witness, it would be possible for the weight to be too // large by filling up the coinbase witness, which doesn't change @@ -3443,7 +3443,7 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali } if (fCheckForPruning) - FlushStateToDisk(chainparams, state, FLUSH_STATE_NONE); // we just allocated more disk space for block files + FlushStateToDisk(chainparams, state, FlushStateMode::NONE); // we just allocated more disk space for block files CheckBlockIndex(chainparams.GetConsensus()); @@ -3596,7 +3596,7 @@ void PruneBlockFilesManual(int nManualPruneHeight) { CValidationState state; const CChainParams& chainparams = Params(); - FlushStateToDisk(chainparams, state, FLUSH_STATE_NONE, nManualPruneHeight); + FlushStateToDisk(chainparams, state, FlushStateMode::NONE, nManualPruneHeight); } /** @@ -3661,9 +3661,9 @@ static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfte nLastBlockWeCanPrune, count); } -bool CheckDiskSpace(uint64_t nAdditionalBytes) +bool CheckDiskSpace(uint64_t nAdditionalBytes, bool blocks_dir) { - uint64_t nFreeBytesAvailable = fs::space(GetDataDir()).available; + uint64_t nFreeBytesAvailable = fs::space(blocks_dir ? GetBlocksDir() : GetDataDir()).available; // Check for nMinDiskSpace bytes (currently 50MB) if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes) @@ -3706,7 +3706,7 @@ static FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) { fs::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix) { - return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile); + return GetBlocksDir() / strprintf("%s%05u.dat", prefix, pos.nFile); } CBlockIndex * CChainState::InsertBlockIndex(const uint256& hash) @@ -4094,7 +4094,7 @@ bool CChainState::RewindBlockIndex(const CChainParams& params) return error("RewindBlockIndex: unable to disconnect block at height %i", pindex->nHeight); } // Occasionally flush state to disk. - if (!FlushStateToDisk(params, state, FLUSH_STATE_PERIODIC)) + if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) return false; } @@ -4160,7 +4160,7 @@ bool RewindBlockIndex(const CChainParams& params) { // and skip it here, we're about to -reindex-chainstate anyway, so // it'll get called a bunch real soon. CValidationState state; - if (!FlushStateToDisk(params, state, FLUSH_STATE_ALWAYS)) { + if (!FlushStateToDisk(params, state, FlushStateMode::ALWAYS)) { return false; } } diff --git a/src/validation.h b/src/validation.h index e780f453b2..0a78075316 100644 --- a/src/validation.h +++ b/src/validation.h @@ -254,7 +254,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& block, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex=nullptr, CBlockHeader *first_invalid=nullptr); /** Check whether enough disk space is available for an incoming block */ -bool CheckDiskSpace(uint64_t nAdditionalBytes = 0); +bool CheckDiskSpace(uint64_t nAdditionalBytes = 0, bool blocks_dir = false); /** Open a block file (blk?????.dat) */ FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly = false); /** Translation to a filesystem path */ @@ -411,7 +411,7 @@ bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& pa /** When there are blocks in the active chain with missing data, rewind the chainstate and remove them from the block index */ bool RewindBlockIndex(const CChainParams& params); -/** Update uncommitted block structures (currently: only the witness nonce). This is safe for submitted blocks. */ +/** Update uncommitted block structures (currently: only the witness reserved value). This is safe for submitted blocks. */ void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams); /** Produce the necessary coinbase commitment for a block (modifies the hash, don't call for mined blocks). */ diff --git a/src/versionbits.cpp b/src/versionbits.cpp index d2ee49db20..e3ec078173 100644 --- a/src/versionbits.cpp +++ b/src/versionbits.cpp @@ -29,7 +29,7 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex* // Check if this deployment is always active. if (nTimeStart == Consensus::BIP9Deployment::ALWAYS_ACTIVE) { - return THRESHOLD_ACTIVE; + return ThresholdState::ACTIVE; } // A block's state is always the same as that of the first of its period, so it is computed based on a pindexPrev whose height equals a multiple of nPeriod - 1. @@ -42,12 +42,12 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex* while (cache.count(pindexPrev) == 0) { if (pindexPrev == nullptr) { // The genesis block is by definition defined. - cache[pindexPrev] = THRESHOLD_DEFINED; + cache[pindexPrev] = ThresholdState::DEFINED; break; } if (pindexPrev->GetMedianTimePast() < nTimeStart) { // Optimization: don't recompute down further, as we know every earlier block will be before the start time - cache[pindexPrev] = THRESHOLD_DEFINED; + cache[pindexPrev] = ThresholdState::DEFINED; break; } vToCompute.push_back(pindexPrev); @@ -65,17 +65,17 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex* vToCompute.pop_back(); switch (state) { - case THRESHOLD_DEFINED: { + case ThresholdState::DEFINED: { if (pindexPrev->GetMedianTimePast() >= nTimeTimeout) { - stateNext = THRESHOLD_FAILED; + stateNext = ThresholdState::FAILED; } else if (pindexPrev->GetMedianTimePast() >= nTimeStart) { - stateNext = THRESHOLD_STARTED; + stateNext = ThresholdState::STARTED; } break; } - case THRESHOLD_STARTED: { + case ThresholdState::STARTED: { if (pindexPrev->GetMedianTimePast() >= nTimeTimeout) { - stateNext = THRESHOLD_FAILED; + stateNext = ThresholdState::FAILED; break; } // We need to count @@ -88,17 +88,17 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex* pindexCount = pindexCount->pprev; } if (count >= nThreshold) { - stateNext = THRESHOLD_LOCKED_IN; + stateNext = ThresholdState::LOCKED_IN; } break; } - case THRESHOLD_LOCKED_IN: { + case ThresholdState::LOCKED_IN: { // Always progresses into ACTIVE. - stateNext = THRESHOLD_ACTIVE; + stateNext = ThresholdState::ACTIVE; break; } - case THRESHOLD_FAILED: - case THRESHOLD_ACTIVE: { + case ThresholdState::FAILED: + case ThresholdState::ACTIVE: { // Nothing happens, these are terminal states. break; } @@ -149,7 +149,7 @@ int AbstractThresholdConditionChecker::GetStateSinceHeightFor(const CBlockIndex* const ThresholdState initialState = GetStateFor(pindexPrev, params, cache); // BIP 9 about state DEFINED: "The genesis block is by definition in this state for each deployment." - if (initialState == THRESHOLD_DEFINED) { + if (initialState == ThresholdState::DEFINED) { return 0; } diff --git a/src/versionbits.h b/src/versionbits.h index 1600dc8c93..65cf308c79 100644 --- a/src/versionbits.h +++ b/src/versionbits.h @@ -17,12 +17,12 @@ static const int32_t VERSIONBITS_TOP_MASK = 0xE0000000UL; /** Total bits available for versionbits */ static const int32_t VERSIONBITS_NUM_BITS = 29; -enum ThresholdState { - THRESHOLD_DEFINED, - THRESHOLD_STARTED, - THRESHOLD_LOCKED_IN, - THRESHOLD_ACTIVE, - THRESHOLD_FAILED, +enum class ThresholdState { + DEFINED, + STARTED, + LOCKED_IN, + ACTIVE, + FAILED, }; // A map that gives the state for blocks whose height is a multiple of Period(). diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp index ebe7b48da0..553cae4d02 100644 --- a/src/wallet/db.cpp +++ b/src/wallet/db.cpp @@ -235,13 +235,13 @@ CDBEnv::VerifyResult CDBEnv::Verify(const std::string& strFile, recoverFunc_type Db db(dbenv.get(), 0); int result = db.verify(strFile.c_str(), nullptr, nullptr, 0); if (result == 0) - return VERIFY_OK; + return VerifyResult::VERIFY_OK; else if (recoverFunc == nullptr) - return RECOVER_FAIL; + return VerifyResult::RECOVER_FAIL; // Try to recover: bool fRecovered = (*recoverFunc)(fs::path(strPath) / strFile, out_backup_filename); - return (fRecovered ? RECOVER_OK : RECOVER_FAIL); + return (fRecovered ? VerifyResult::RECOVER_OK : VerifyResult::RECOVER_FAIL); } bool CDB::Recover(const fs::path& file_path, void *callbackDataIn, bool (*recoverKVcallback)(void* callbackData, CDataStream ssKey, CDataStream ssValue), std::string& newFilename) @@ -347,7 +347,7 @@ bool CDB::VerifyDatabaseFile(const fs::path& file_path, std::string& warningStr, { std::string backup_filename; CDBEnv::VerifyResult r = env->Verify(walletFile, recoverFunc, backup_filename); - if (r == CDBEnv::RECOVER_OK) + if (r == CDBEnv::VerifyResult::RECOVER_OK) { warningStr = strprintf(_("Warning: Wallet file corrupt, data salvaged!" " Original %s saved as %s in %s; if" @@ -355,7 +355,7 @@ bool CDB::VerifyDatabaseFile(const fs::path& file_path, std::string& warningStr, " restore from a backup."), walletFile, backup_filename, walletDir); } - if (r == CDBEnv::RECOVER_FAIL) + if (r == CDBEnv::VerifyResult::RECOVER_FAIL) { errorStr = strprintf(_("%s corrupt, salvage failed"), walletFile); return false; diff --git a/src/wallet/db.h b/src/wallet/db.h index b1ce451534..65bb8cc253 100644 --- a/src/wallet/db.h +++ b/src/wallet/db.h @@ -53,7 +53,7 @@ public: * This must be called BEFORE strFile is opened. * Returns true if strFile is OK. */ - enum VerifyResult { VERIFY_OK, + enum class VerifyResult { VERIFY_OK, RECOVER_OK, RECOVER_FAIL }; typedef bool (*recoverFunc_type)(const fs::path& file_path, std::string& out_backup_filename); diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp index 61481e01b6..3d7bb674f0 100644 --- a/src/wallet/init.cpp +++ b/src/wallet/init.cpp @@ -14,7 +14,7 @@ #include <wallet/wallet.h> #include <wallet/walletutil.h> -std::string GetWalletHelpString(bool showDebug) +std::string WalletInit::GetHelpString(bool showDebug) { std::string strUsage = HelpMessageGroup(_("Wallet options:")); strUsage += HelpMessageOpt("-addresstype", strprintf("What type of addresses to use (\"legacy\", \"p2sh-segwit\", or \"bech32\", default: \"%s\")", FormatOutputType(DEFAULT_ADDRESS_TYPE))); @@ -56,7 +56,7 @@ std::string GetWalletHelpString(bool showDebug) return strUsage; } -bool WalletParameterInteraction() +bool WalletInit::ParameterInteraction() { if (gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) { for (const std::string& wallet : gArgs.GetArgs("-wallet")) { @@ -184,7 +184,7 @@ bool WalletParameterInteraction() return true; } -void RegisterWalletRPC(CRPCTable &t) +void WalletInit::RegisterRPC(CRPCTable &t) { if (gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) { return; @@ -193,7 +193,7 @@ void RegisterWalletRPC(CRPCTable &t) RegisterWalletRPCCommands(t); } -bool VerifyWallets() +bool WalletInit::Verify() { if (gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) { return true; @@ -268,7 +268,7 @@ bool VerifyWallets() return true; } -bool OpenWallets() +bool WalletInit::Open() { if (gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) { LogPrintf("Wallet disabled!\n"); @@ -286,25 +286,29 @@ bool OpenWallets() return true; } -void StartWallets(CScheduler& scheduler) { +void WalletInit::Start(CScheduler& scheduler) +{ for (CWalletRef pwallet : vpwallets) { pwallet->postInitProcess(scheduler); } } -void FlushWallets() { +void WalletInit::Flush() +{ for (CWalletRef pwallet : vpwallets) { pwallet->Flush(false); } } -void StopWallets() { +void WalletInit::Stop() +{ for (CWalletRef pwallet : vpwallets) { pwallet->Flush(true); } } -void CloseWallets() { +void WalletInit::Close() +{ for (CWalletRef pwallet : vpwallets) { delete pwallet; } diff --git a/src/wallet/init.h b/src/wallet/init.h index 0b3ee2dda2..f8be90d3e3 100644 --- a/src/wallet/init.h +++ b/src/wallet/init.h @@ -6,38 +6,43 @@ #ifndef BITCOIN_WALLET_INIT_H #define BITCOIN_WALLET_INIT_H +#include <walletinitinterface.h> #include <string> class CRPCTable; class CScheduler; -//! Return the wallets help message. -std::string GetWalletHelpString(bool showDebug); +class WalletInit : public WalletInitInterface { +public: -//! Wallets parameter interaction -bool WalletParameterInteraction(); + //! Return the wallets help message. + std::string GetHelpString(bool showDebug) override; -//! Register wallet RPCs. -void RegisterWalletRPC(CRPCTable &tableRPC); + //! Wallets parameter interaction + bool ParameterInteraction() override; -//! Responsible for reading and validating the -wallet arguments and verifying the wallet database. -// This function will perform salvage on the wallet if requested, as long as only one wallet is -// being loaded (WalletParameterInteraction forbids -salvagewallet, -zapwallettxes or -upgradewallet with multiwallet). -bool VerifyWallets(); + //! Register wallet RPCs. + void RegisterRPC(CRPCTable &tableRPC) override; -//! Load wallet databases. -bool OpenWallets(); + //! Responsible for reading and validating the -wallet arguments and verifying the wallet database. + // This function will perform salvage on the wallet if requested, as long as only one wallet is + // being loaded (WalletParameterInteraction forbids -salvagewallet, -zapwallettxes or -upgradewallet with multiwallet). + bool Verify() override; -//! Complete startup of wallets. -void StartWallets(CScheduler& scheduler); + //! Load wallet databases. + bool Open() override; -//! Flush all wallets in preparation for shutdown. -void FlushWallets(); + //! Complete startup of wallets. + void Start(CScheduler& scheduler) override; -//! Stop all wallets. Wallets will be flushed first. -void StopWallets(); + //! Flush all wallets in preparation for shutdown. + void Flush() override; -//! Close all wallets. -void CloseWallets(); + //! Stop all wallets. Wallets will be flushed first. + void Stop() override; + + //! Close all wallets. + void Close() override; +}; #endif // BITCOIN_WALLET_INIT_H diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp index 1721bc6df6..28b6153ce1 100644 --- a/src/wallet/rpcdump.cpp +++ b/src/wallet/rpcdump.cpp @@ -406,7 +406,7 @@ UniValue removeprunedfunds(const JSONRPCRequest& request) vHash.push_back(hash); std::vector<uint256> vHashOut; - if (pwallet->ZapSelectTx(vHash, vHashOut) != DB_LOAD_OK) { + if (pwallet->ZapSelectTx(vHash, vHashOut) != DBErrors::LOAD_OK) { throw JSONRPCError(RPC_WALLET_ERROR, "Could not properly delete the transaction."); } diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index 365dedfceb..c34b166a41 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -113,9 +113,9 @@ void WalletTxToJSON(const CWalletTx& wtx, UniValue& entry) if (confirms <= 0) { LOCK(mempool.cs); RBFTransactionState rbfState = IsRBFOptIn(*wtx.tx, mempool); - if (rbfState == RBF_TRANSACTIONSTATE_UNKNOWN) + if (rbfState == RBFTransactionState::UNKNOWN) rbfStatus = "unknown"; - else if (rbfState == RBF_TRANSACTIONSTATE_REPLACEABLE_BIP125) + else if (rbfState == RBFTransactionState::REPLACEABLE_BIP125) rbfStatus = "yes"; } entry.pushKV("bip125-replaceable", rbfStatus); diff --git a/src/wallet/test/accounting_tests.cpp b/src/wallet/test/accounting_tests.cpp index aae328d81f..cc6e491f53 100644 --- a/src/wallet/test/accounting_tests.cpp +++ b/src/wallet/test/accounting_tests.cpp @@ -18,7 +18,7 @@ GetResults(CWallet& wallet, std::map<CAmount, CAccountingEntry>& results) std::list<CAccountingEntry> aes; results.clear(); - BOOST_CHECK(wallet.ReorderTransactions() == DB_LOAD_OK); + BOOST_CHECK(wallet.ReorderTransactions() == DBErrors::LOAD_OK); wallet.ListAccountCreditDebit("", aes); for (CAccountingEntry& ae : aes) { diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index a80875e7ed..c9843599d6 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -727,11 +727,11 @@ DBErrors CWallet::ReorderTransactions() if (pwtx) { if (!walletdb.WriteTx(*pwtx)) - return DB_LOAD_FAIL; + return DBErrors::LOAD_FAIL; } else if (!walletdb.WriteAccountingEntry(pacentry->nEntryNo, *pacentry)) - return DB_LOAD_FAIL; + return DBErrors::LOAD_FAIL; } else { @@ -751,16 +751,16 @@ DBErrors CWallet::ReorderTransactions() if (pwtx) { if (!walletdb.WriteTx(*pwtx)) - return DB_LOAD_FAIL; + return DBErrors::LOAD_FAIL; } else if (!walletdb.WriteAccountingEntry(pacentry->nEntryNo, *pacentry)) - return DB_LOAD_FAIL; + return DBErrors::LOAD_FAIL; } } walletdb.WriteOrderPosNext(nOrderPosNext); - return DB_LOAD_OK; + return DBErrors::LOAD_OK; } int64_t CWallet::IncOrderPosNext(CWalletDB *pwalletdb) @@ -3146,7 +3146,7 @@ DBErrors CWallet::LoadWallet(bool& fFirstRunRet) fFirstRunRet = false; DBErrors nLoadWalletRet = CWalletDB(*dbw,"cr+").LoadWallet(this); - if (nLoadWalletRet == DB_NEED_REWRITE) + if (nLoadWalletRet == DBErrors::NEED_REWRITE) { if (dbw->Rewrite("\x04pool")) { @@ -3162,12 +3162,12 @@ DBErrors CWallet::LoadWallet(bool& fFirstRunRet) // This wallet is in its first run if all of these are empty fFirstRunRet = mapKeys.empty() && mapCryptedKeys.empty() && mapWatchKeys.empty() && setWatchOnly.empty() && mapScripts.empty(); - if (nLoadWalletRet != DB_LOAD_OK) + if (nLoadWalletRet != DBErrors::LOAD_OK) return nLoadWalletRet; uiInterface.LoadWallet(this); - return DB_LOAD_OK; + return DBErrors::LOAD_OK; } DBErrors CWallet::ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256>& vHashOut) @@ -3177,7 +3177,7 @@ DBErrors CWallet::ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256 for (uint256 hash : vHashOut) mapWallet.erase(hash); - if (nZapSelectTxRet == DB_NEED_REWRITE) + if (nZapSelectTxRet == DBErrors::NEED_REWRITE) { if (dbw->Rewrite("\x04pool")) { @@ -3190,19 +3190,19 @@ DBErrors CWallet::ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256 } } - if (nZapSelectTxRet != DB_LOAD_OK) + if (nZapSelectTxRet != DBErrors::LOAD_OK) return nZapSelectTxRet; MarkDirty(); - return DB_LOAD_OK; + return DBErrors::LOAD_OK; } DBErrors CWallet::ZapWalletTx(std::vector<CWalletTx>& vWtx) { DBErrors nZapWalletTxRet = CWalletDB(*dbw,"cr+").ZapWalletTx(vWtx); - if (nZapWalletTxRet == DB_NEED_REWRITE) + if (nZapWalletTxRet == DBErrors::NEED_REWRITE) { if (dbw->Rewrite("\x04pool")) { @@ -3216,10 +3216,10 @@ DBErrors CWallet::ZapWalletTx(std::vector<CWalletTx>& vWtx) } } - if (nZapWalletTxRet != DB_LOAD_OK) + if (nZapWalletTxRet != DBErrors::LOAD_OK) return nZapWalletTxRet; - return DB_LOAD_OK; + return DBErrors::LOAD_OK; } @@ -3931,7 +3931,7 @@ CWallet* CWallet::CreateWalletFromFile(const std::string& name, const fs::path& std::unique_ptr<CWallet> tempWallet = MakeUnique<CWallet>(name, CWalletDBWrapper::Create(path)); DBErrors nZapWalletRet = tempWallet->ZapWalletTx(vWtx); - if (nZapWalletRet != DB_LOAD_OK) { + if (nZapWalletRet != DBErrors::LOAD_OK) { InitError(strprintf(_("Error loading %s: Wallet corrupted"), walletFile)); return nullptr; } @@ -3943,23 +3943,23 @@ CWallet* CWallet::CreateWalletFromFile(const std::string& name, const fs::path& bool fFirstRun = true; CWallet *walletInstance = new CWallet(name, CWalletDBWrapper::Create(path)); DBErrors nLoadWalletRet = walletInstance->LoadWallet(fFirstRun); - if (nLoadWalletRet != DB_LOAD_OK) + if (nLoadWalletRet != DBErrors::LOAD_OK) { - if (nLoadWalletRet == DB_CORRUPT) { + if (nLoadWalletRet == DBErrors::CORRUPT) { InitError(strprintf(_("Error loading %s: Wallet corrupted"), walletFile)); return nullptr; } - else if (nLoadWalletRet == DB_NONCRITICAL_ERROR) + else if (nLoadWalletRet == DBErrors::NONCRITICAL_ERROR) { InitWarning(strprintf(_("Error reading %s! All keys read correctly, but transaction data" " or address book entries might be missing or incorrect."), walletFile)); } - else if (nLoadWalletRet == DB_TOO_NEW) { + else if (nLoadWalletRet == DBErrors::TOO_NEW) { InitError(strprintf(_("Error loading %s: Wallet requires newer version of %s"), walletFile, _(PACKAGE_NAME))); return nullptr; } - else if (nLoadWalletRet == DB_NEED_REWRITE) + else if (nLoadWalletRet == DBErrors::NEED_REWRITE) { InitError(strprintf(_("Wallet needed to be rewritten: restart %s to complete"), _(PACKAGE_NAME))); return nullptr; diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index 7f5f3b84b2..77cdfe7dd0 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -522,7 +522,7 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) { CWalletScanState wss; bool fNoncriticalErrors = false; - DBErrors result = DB_LOAD_OK; + DBErrors result = DBErrors::LOAD_OK; LOCK(pwallet->cs_wallet); try { @@ -530,7 +530,7 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) if (batch.Read((std::string)"minversion", nMinVersion)) { if (nMinVersion > CLIENT_VERSION) - return DB_TOO_NEW; + return DBErrors::TOO_NEW; pwallet->LoadMinVersion(nMinVersion); } @@ -539,7 +539,7 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) if (!pcursor) { LogPrintf("Error getting wallet database cursor\n"); - return DB_CORRUPT; + return DBErrors::CORRUPT; } while (true) @@ -553,7 +553,7 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) else if (ret != 0) { LogPrintf("Error reading next record from wallet database\n"); - return DB_CORRUPT; + return DBErrors::CORRUPT; } // Try to be tolerant of single corrupt records: @@ -563,7 +563,7 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) // losing keys is considered a catastrophic error, anything else // we assume the user can live with: if (IsKeyType(strType) || strType == "defaultkey") - result = DB_CORRUPT; + result = DBErrors::CORRUPT; else { // Leave other errors alone, if we try to fix them we might make things worse. @@ -582,15 +582,15 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) throw; } catch (...) { - result = DB_CORRUPT; + result = DBErrors::CORRUPT; } - if (fNoncriticalErrors && result == DB_LOAD_OK) - result = DB_NONCRITICAL_ERROR; + if (fNoncriticalErrors && result == DBErrors::LOAD_OK) + result = DBErrors::NONCRITICAL_ERROR; // Any wallet corruption at all: skip any rewriting or // upgrading, we don't want to make it worse. - if (result != DB_LOAD_OK) + if (result != DBErrors::LOAD_OK) return result; LogPrintf("nFileVersion = %d\n", wss.nFileVersion); @@ -607,7 +607,7 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) // Rewrite encrypted wallets of versions 0.4.0 and 0.5.0rc: if (wss.fIsEncrypted && (wss.nFileVersion == 40000 || wss.nFileVersion == 50000)) - return DB_NEED_REWRITE; + return DBErrors::NEED_REWRITE; if (wss.nFileVersion < CLIENT_VERSION) // Update WriteVersion(CLIENT_VERSION); @@ -626,14 +626,14 @@ DBErrors CWalletDB::LoadWallet(CWallet* pwallet) DBErrors CWalletDB::FindWalletTx(std::vector<uint256>& vTxHash, std::vector<CWalletTx>& vWtx) { - DBErrors result = DB_LOAD_OK; + DBErrors result = DBErrors::LOAD_OK; try { int nMinVersion = 0; if (batch.Read((std::string)"minversion", nMinVersion)) { if (nMinVersion > CLIENT_VERSION) - return DB_TOO_NEW; + return DBErrors::TOO_NEW; } // Get cursor @@ -641,7 +641,7 @@ DBErrors CWalletDB::FindWalletTx(std::vector<uint256>& vTxHash, std::vector<CWal if (!pcursor) { LogPrintf("Error getting wallet database cursor\n"); - return DB_CORRUPT; + return DBErrors::CORRUPT; } while (true) @@ -655,7 +655,7 @@ DBErrors CWalletDB::FindWalletTx(std::vector<uint256>& vTxHash, std::vector<CWal else if (ret != 0) { LogPrintf("Error reading next record from wallet database\n"); - return DB_CORRUPT; + return DBErrors::CORRUPT; } std::string strType; @@ -677,7 +677,7 @@ DBErrors CWalletDB::FindWalletTx(std::vector<uint256>& vTxHash, std::vector<CWal throw; } catch (...) { - result = DB_CORRUPT; + result = DBErrors::CORRUPT; } return result; @@ -689,7 +689,7 @@ DBErrors CWalletDB::ZapSelectTx(std::vector<uint256>& vTxHashIn, std::vector<uin std::vector<uint256> vTxHash; std::vector<CWalletTx> vWtx; DBErrors err = FindWalletTx(vTxHash, vWtx); - if (err != DB_LOAD_OK) { + if (err != DBErrors::LOAD_OK) { return err; } @@ -716,9 +716,9 @@ DBErrors CWalletDB::ZapSelectTx(std::vector<uint256>& vTxHashIn, std::vector<uin } if (delerror) { - return DB_CORRUPT; + return DBErrors::CORRUPT; } - return DB_LOAD_OK; + return DBErrors::LOAD_OK; } DBErrors CWalletDB::ZapWalletTx(std::vector<CWalletTx>& vWtx) @@ -726,16 +726,16 @@ DBErrors CWalletDB::ZapWalletTx(std::vector<CWalletTx>& vWtx) // build list of wallet TXs std::vector<uint256> vTxHash; DBErrors err = FindWalletTx(vTxHash, vWtx); - if (err != DB_LOAD_OK) + if (err != DBErrors::LOAD_OK) return err; // erase each wallet TX for (uint256& hash : vTxHash) { if (!EraseTx(hash)) - return DB_CORRUPT; + return DBErrors::CORRUPT; } - return DB_LOAD_OK; + return DBErrors::LOAD_OK; } void MaybeCompactWalletDB() diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h index 7d754c7284..606b7dace7 100644 --- a/src/wallet/walletdb.h +++ b/src/wallet/walletdb.h @@ -46,14 +46,14 @@ class uint160; class uint256; /** Error statuses for the wallet database */ -enum DBErrors +enum class DBErrors { - DB_LOAD_OK, - DB_CORRUPT, - DB_NONCRITICAL_ERROR, - DB_TOO_NEW, - DB_LOAD_FAIL, - DB_NEED_REWRITE + LOAD_OK, + CORRUPT, + NONCRITICAL_ERROR, + TOO_NEW, + LOAD_FAIL, + NEED_REWRITE }; /* simple HD chain data model */ diff --git a/src/walletinitinterface.h b/src/walletinitinterface.h new file mode 100644 index 0000000000..47e4e2cce1 --- /dev/null +++ b/src/walletinitinterface.h @@ -0,0 +1,51 @@ +// Copyright (c) 2017 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef WALLETINITINTERFACE_H +#define WALLETINITINTERFACE_H + +#include <string> + +class CScheduler; +class CRPCTable; + +class WalletInitInterface { +public: + /** Get wallet help string */ + virtual std::string GetHelpString(bool showDebug) = 0; + /** Check wallet parameter interaction */ + virtual bool ParameterInteraction() = 0; + /** Register wallet RPC*/ + virtual void RegisterRPC(CRPCTable &) = 0; + /** Verify wallets */ + virtual bool Verify() = 0; + /** Open wallets*/ + virtual bool Open() = 0; + /** Start wallets*/ + virtual void Start(CScheduler& scheduler) = 0; + /** Flush Wallets*/ + virtual void Flush() = 0; + /** Stop Wallets*/ + virtual void Stop() = 0; + /** Close wallets */ + virtual void Close() = 0; + + virtual ~WalletInitInterface() {} +}; + +class DummyWalletInit : public WalletInitInterface { +public: + + std::string GetHelpString(bool showDebug) override {return std::string{};} + bool ParameterInteraction() override {return true;} + void RegisterRPC(CRPCTable &) override {} + bool Verify() override {return true;} + bool Open() override {return true;} + void Start(CScheduler& scheduler) override {} + void Flush() override {} + void Stop() override {} + void Close() override {} +}; + +#endif // WALLETINITINTERFACE_H diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index fe9bbda14b..181c7f3369 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -2,36 +2,59 @@ # Copyright (c) 2015-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test block processing. - -This reimplements tests from the bitcoinj/FullBlockTestGenerator used -by the pull-tester. - -We use the testing framework in which we expect a particular answer from -each test. -""" - -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import * -from test_framework.comptool import TestManager, TestInstance, RejectResult -from test_framework.blocktools import * +"""Test block processing.""" +import copy +import struct import time + +from test_framework.blocktools import create_block, create_coinbase, create_transaction, get_legacy_sigopcount_block from test_framework.key import CECKey -from test_framework.script import * -from test_framework.mininode import network_thread_start -import struct +from test_framework.messages import ( + CBlock, + COIN, + COutPoint, + CTransaction, + CTxIn, + CTxOut, + MAX_BLOCK_BASE_SIZE, + uint256_from_compact, + uint256_from_str, +) +from test_framework.mininode import P2PDataStore, network_thread_start, network_thread_join +from test_framework.script import ( + CScript, + MAX_SCRIPT_ELEMENT_SIZE, + OP_2DUP, + OP_CHECKMULTISIG, + OP_CHECKMULTISIGVERIFY, + OP_CHECKSIG, + OP_CHECKSIGVERIFY, + OP_ELSE, + OP_ENDIF, + OP_EQUAL, + OP_FALSE, + OP_HASH160, + OP_IF, + OP_INVALIDOPCODE, + OP_RETURN, + OP_TRUE, + SIGHASH_ALL, + SignatureHash, + hash160, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +MAX_BLOCK_SIGOPS = 20000 class PreviousSpendableOutput(): - def __init__(self, tx = CTransaction(), n = -1): + def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n # the output we're spending # Use this class for tests that require behavior other than normal "mininode" behavior. # For now, it is used to serialize a bloated varint (b64). class CBrokenBlock(CBlock): - def __init__(self, header=None): - super(CBrokenBlock, self).__init__(header) - def initialize(self, base_block): self.vtx = copy.deepcopy(base_block.vtx) self.hashMerkleRoot = self.calc_merkle_root() @@ -48,381 +71,272 @@ class CBrokenBlock(CBlock): return r def normal_serialize(self): - r = b"" - r += super(CBrokenBlock, self).serialize() - return r + return super().serialize() -class FullBlockTest(ComparisonTestFramework): - # Can either run this test as 1 node with expected answers, or two and compare them. - # Change the "outcome" variable from each TestInstance object to only do the comparison. +class FullBlockTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True + self.extra_args = [[]] + + def run_test(self): + node = self.nodes[0] # convenience reference to the node + + # reconnect_p2p() expects the network thread to be running + network_thread_start() + + self.reconnect_p2p() + self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"horsebattery") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.tip = None self.blocks = {} - - def add_options(self, parser): - super().add_options(parser) - parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True) - - def run_test(self): - self.test = TestManager(self, self.options.tmpdir) - self.test.add_all_connections(self.nodes) - network_thread_start() - self.test.run() - - def add_transactions_to_block(self, block, tx_list): - [ tx.rehash() for tx in tx_list ] - block.vtx.extend(tx_list) - - # this is a little handier to use than the version in blocktools.py - def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): - tx = create_transaction(spend_tx, n, b"", value, script) - return tx - - # sign a transaction, using the key we know about - # this signs input 0 in tx, which is assumed to be spending output n in spend_tx - def sign_tx(self, tx, spend_tx, n): - scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) - if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend - tx.vin[0].scriptSig = CScript() - return - (sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL) - tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))]) - - def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): - tx = self.create_tx(spend_tx, n, value, script) - self.sign_tx(tx, spend_tx, n) - tx.rehash() - return tx - - def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True): - if self.tip == None: - base_block_hash = self.genesis_hash - block_time = int(time.time())+1 - else: - base_block_hash = self.tip.sha256 - block_time = self.tip.nTime + 1 - # First create the coinbase - height = self.block_heights[base_block_hash] + 1 - coinbase = create_coinbase(height, self.coinbase_pubkey) - coinbase.vout[0].nValue += additional_coinbase_value - coinbase.rehash() - if spend == None: - block = create_block(base_block_hash, coinbase, block_time) - else: - coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees - coinbase.rehash() - block = create_block(base_block_hash, coinbase, block_time) - tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi - self.sign_tx(tx, spend.tx, spend.n) - self.add_transactions_to_block(block, [tx]) - block.hashMerkleRoot = block.calc_merkle_root() - if solve: - block.solve() - self.tip = block - self.block_heights[block.sha256] = height - assert number not in self.blocks - self.blocks[number] = block - return block - - def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 - spendable_outputs = [] - - # save the current tip so it can be spent by a later block - def save_spendable_output(): - spendable_outputs.append(self.tip) - - # get an output that we previously marked as spendable - def get_spendable_output(): - return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) - - # returns a test case that asserts that the current tip was accepted - def accepted(): - return TestInstance([[self.tip, True]]) - - # returns a test case that asserts that the current tip was rejected - def rejected(reject = None): - if reject is None: - return TestInstance([[self.tip, False]]) - else: - return TestInstance([[self.tip, reject]]) - - # move the tip back to a previous block - def tip(number): - self.tip = self.blocks[number] - - # adds transactions to the block and updates state - def update_block(block_number, new_transactions): - block = self.blocks[block_number] - self.add_transactions_to_block(block, new_transactions) - old_sha256 = block.sha256 - block.hashMerkleRoot = block.calc_merkle_root() - block.solve() - # Update the internal state just like in next_block - self.tip = block - if block.sha256 != old_sha256: - self.block_heights[block.sha256] = self.block_heights[old_sha256] - del self.block_heights[old_sha256] - self.blocks[block_number] = block - return block - - # shorthand for functions - block = self.next_block - create_tx = self.create_tx - create_and_sign_tx = self.create_and_sign_transaction - - # these must be updated if consensus changes - MAX_BLOCK_SIGOPS = 20000 - + self.spendable_outputs = [] # Create a new block - block(0) - save_spendable_output() - yield accepted() - + b0 = self.next_block(0) + self.save_spendable_output() + self.sync_blocks([b0]) - # Now we need that block to mature so we can spend the coinbase. - test = TestInstance(sync_every_block=False) + # Allow the block to mature + blocks = [] for i in range(99): - block(5000 + i) - test.blocks_and_transactions.append([self.tip, True]) - save_spendable_output() - yield test + blocks.append(self.next_block(5000 + i)) + self.save_spendable_output() + self.sync_blocks(blocks) # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(33): - out.append(get_spendable_output()) + out.append(self.get_spendable_output()) # Start by building a couple of blocks on top (which output is spent is # in parentheses): # genesis -> b1 (0) -> b2 (1) - block(1, spend=out[0]) - save_spendable_output() - yield accepted() + b1 = self.next_block(1, spend=out[0]) + self.save_spendable_output() - block(2, spend=out[1]) - yield accepted() - save_spendable_output() + b2 = self.next_block(2, spend=out[1]) + self.save_spendable_output() - # so fork like this: + self.sync_blocks([b1, b2]) + + # Fork like this: # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) # # Nothing should happen at this point. We saw b2 first so it takes priority. - tip(1) - b3 = block(3, spend=out[1]) + self.log.info("Don't reorg to a chain of the same length") + self.move_tip(1) + b3 = self.next_block(3, spend=out[1]) txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0) - yield rejected() - + self.sync_blocks([b3], False) # Now we add another block to make the alternative chain longer. # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) -> b4 (2) - block(4, spend=out[2]) - yield accepted() - + self.log.info("Reorg to a longer chain") + b4 = self.next_block(4, spend=out[2]) + self.sync_blocks([b4]) # ... and back to the first chain. # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b3 (1) -> b4 (2) - tip(2) - block(5, spend=out[2]) - save_spendable_output() - yield rejected() + self.move_tip(2) + b5 = self.next_block(5, spend=out[2]) + self.save_spendable_output() + self.sync_blocks([b5], False) - block(6, spend=out[3]) - yield accepted() + self.log.info("Reorg back to the original chain") + b6 = self.next_block(6, spend=out[3]) + self.sync_blocks([b6], True) # Try to create a fork that double-spends # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b7 (2) -> b8 (4) # \-> b3 (1) -> b4 (2) - tip(5) - block(7, spend=out[2]) - yield rejected() + self.log.info("Reject a chain with a double spend, even if it is longer") + self.move_tip(5) + b7 = self.next_block(7, spend=out[2]) + self.sync_blocks([b7], False) - block(8, spend=out[4]) - yield rejected() + b8 = self.next_block(8, spend=out[4]) + self.sync_blocks([b8], False, reconnect=True) # Try to create a block that has too much fee # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b9 (4) # \-> b3 (1) -> b4 (2) - tip(6) - block(9, spend=out[4], additional_coinbase_value=1) - yield rejected(RejectResult(16, b'bad-cb-amount')) + self.log.info("Reject a block where the miner creates too much coinbase reward") + self.move_tip(6) + b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1) + self.sync_blocks([b9], False, 16, b'bad-cb-amount', reconnect=True) # Create a fork that ends in a block with too much fee (the one that causes the reorg) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b10 (3) -> b11 (4) # \-> b3 (1) -> b4 (2) - tip(5) - block(10, spend=out[3]) - yield rejected() - - block(11, spend=out[4], additional_coinbase_value=1) - yield rejected(RejectResult(16, b'bad-cb-amount')) + self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer") + self.move_tip(5) + b10 = self.next_block(10, spend=out[3]) + self.sync_blocks([b10], False) + b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1) + self.sync_blocks([b11], False, 16, b'bad-cb-amount', reconnect=True) # Try again, but with a valid fork first # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b14 (5) - # (b12 added last) # \-> b3 (1) -> b4 (2) - tip(5) - b12 = block(12, spend=out[3]) - save_spendable_output() - b13 = block(13, spend=out[4]) - # Deliver the block header for b12, and the block b13. - # b13 should be accepted but the tip won't advance until b12 is delivered. - yield TestInstance([[CBlockHeader(b12), None], [b13, False]]) - - save_spendable_output() - # b14 is invalid, but the node won't know that until it tries to connect - # Tip still can't advance because b12 is missing - block(14, spend=out[5], additional_coinbase_value=1) - yield rejected() - - yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. + self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)") + self.move_tip(5) + b12 = self.next_block(12, spend=out[3]) + self.save_spendable_output() + b13 = self.next_block(13, spend=out[4]) + self.save_spendable_output() + b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1) + self.sync_blocks([b12, b13, b14], False, 16, b'bad-cb-amount', reconnect=True) + + # New tip should be b13. + assert_equal(node.getbestblockhash(), b13.hash) # Add a block with MAX_BLOCK_SIGOPS and one with one more sigop # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6) # \-> b3 (1) -> b4 (2) - - # Test that a block with a lot of checksigs is okay + self.log.info("Accept a block with lots of checksigs") lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1)) - tip(13) - block(15, spend=out[5], script=lots_of_checksigs) - yield accepted() - save_spendable_output() + self.move_tip(13) + b15 = self.next_block(15, spend=out[5], script=lots_of_checksigs) + self.save_spendable_output() + self.sync_blocks([b15], True) - - # Test that a block with too many checksigs is rejected + self.log.info("Reject a block with too many checksigs") too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS)) - block(16, spend=out[6], script=too_many_checksigs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - + b16 = self.next_block(16, spend=out[6], script=too_many_checksigs) + self.sync_blocks([b16], False, 16, b'bad-blk-sigops', reconnect=True) # Attempt to spend a transaction created on a different fork # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) # \-> b3 (1) -> b4 (2) - tip(15) - block(17, spend=txout_b3) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + self.log.info("Reject a block with a spend from a re-org'ed out tx") + self.move_tip(15) + b17 = self.next_block(17, spend=txout_b3) + self.sync_blocks([b17], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) # Attempt to spend a transaction created on a different fork (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b18 (b3.vtx[1]) -> b19 (6) # \-> b3 (1) -> b4 (2) - tip(13) - block(18, spend=txout_b3) - yield rejected() + self.log.info("Reject a block with a spend from a re-org'ed out tx (on a forked chain)") + self.move_tip(13) + b18 = self.next_block(18, spend=txout_b3) + self.sync_blocks([b18], False) - block(19, spend=out[6]) - yield rejected() + b19 = self.next_block(19, spend=out[6]) + self.sync_blocks([b19], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) # Attempt to spend a coinbase at depth too low # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) # \-> b3 (1) -> b4 (2) - tip(15) - block(20, spend=out[7]) - yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase')) + self.log.info("Reject a block spending an immature coinbase.") + self.move_tip(15) + b20 = self.next_block(20, spend=out[7]) + self.sync_blocks([b20], False, 16, b'bad-txns-premature-spend-of-coinbase') # Attempt to spend a coinbase at depth too low (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b21 (6) -> b22 (5) # \-> b3 (1) -> b4 (2) - tip(13) - block(21, spend=out[6]) - yield rejected() + self.log.info("Reject a block spending an immature coinbase (on a forked chain)") + self.move_tip(13) + b21 = self.next_block(21, spend=out[6]) + self.sync_blocks([b21], False) - block(22, spend=out[5]) - yield rejected() + b22 = self.next_block(22, spend=out[5]) + self.sync_blocks([b22], False, 16, b'bad-txns-premature-spend-of-coinbase') # Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) # \-> b24 (6) -> b25 (7) # \-> b3 (1) -> b4 (2) - tip(15) - b23 = block(23, spend=out[6]) + self.log.info("Accept a block of size MAX_BLOCK_BASE_SIZE") + self.move_tip(15) + b23 = self.next_block(23, spend=out[6]) tx = CTransaction() script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) - b23 = update_block(23, [tx]) + b23 = self.update_block(23, [tx]) # Make sure the math above worked out to produce a max-sized block assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE) - yield accepted() - save_spendable_output() + self.sync_blocks([b23], True) + self.save_spendable_output() - # Make the next block one byte bigger and check that it fails - tip(15) - b24 = block(24, spend=out[6]) + self.log.info("Reject a block of size MAX_BLOCK_BASE_SIZE + 1") + self.move_tip(15) + b24 = self.next_block(24, spend=out[6]) script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69 - script_output = CScript([b'\x00' * (script_length+1)]) + script_output = CScript([b'\x00' * (script_length + 1)]) tx.vout = [CTxOut(0, script_output)] - b24 = update_block(24, [tx]) - assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1) - yield rejected(RejectResult(16, b'bad-blk-length')) + b24 = self.update_block(24, [tx]) + assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE + 1) + self.sync_blocks([b24], False, 16, b'bad-blk-length', reconnect=True) - block(25, spend=out[7]) - yield rejected() + b25 = self.next_block(25, spend=out[7]) + self.sync_blocks([b25], False) # Create blocks with a coinbase input script size out of range # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) # \-> ... (6) -> ... (7) # \-> b3 (1) -> b4 (2) - tip(15) - b26 = block(26, spend=out[6]) + self.log.info("Reject a block with coinbase input script size out of range") + self.move_tip(15) + b26 = self.next_block(26, spend=out[6]) b26.vtx[0].vin[0].scriptSig = b'\x00' b26.vtx[0].rehash() # update_block causes the merkle root to get updated, even with no new # transactions, and updates the required state. - b26 = update_block(26, []) - yield rejected(RejectResult(16, b'bad-cb-length')) + b26 = self.update_block(26, []) + self.sync_blocks([b26], False, 16, b'bad-cb-length', reconnect=True) # Extend the b26 chain to make sure bitcoind isn't accepting b26 - block(27, spend=out[7]) - yield rejected(False) + b27 = self.next_block(27, spend=out[7]) + self.sync_blocks([b27], False) # Now try a too-large-coinbase script - tip(15) - b28 = block(28, spend=out[6]) + self.move_tip(15) + b28 = self.next_block(28, spend=out[6]) b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 b28.vtx[0].rehash() - b28 = update_block(28, []) - yield rejected(RejectResult(16, b'bad-cb-length')) + b28 = self.update_block(28, []) + self.sync_blocks([b28], False, 16, b'bad-cb-length', reconnect=True) # Extend the b28 chain to make sure bitcoind isn't accepting b28 - block(29, spend=out[7]) - yield rejected(False) + b29 = self.next_block(29, spend=out[7]) + self.sync_blocks([b29], False) # b30 has a max-sized coinbase scriptSig. - tip(23) - b30 = block(30) + self.move_tip(23) + b30 = self.next_block(30) b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 b30.vtx[0].rehash() - b30 = update_block(30, []) - yield accepted() - save_spendable_output() + b30 = self.update_block(30, []) + self.sync_blocks([b30], True) + self.save_spendable_output() # b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY # @@ -433,42 +347,45 @@ class FullBlockTest(ComparisonTestFramework): # # MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end. - lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19) - b31 = block(31, spend=out[8], script=lots_of_multisigs) + self.log.info("Accept a block with the max number of OP_CHECKMULTISIG sigops") + lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19) + b31 = self.next_block(31, spend=out[8], script=lots_of_multisigs) assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS) - yield accepted() - save_spendable_output() + self.sync_blocks([b31], True) + self.save_spendable_output() # this goes over the limit because the coinbase has one sigop + self.log.info("Reject a block with too many OP_CHECKMULTISIG sigops") too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20)) - b32 = block(32, spend=out[9], script=too_many_multisigs) + b32 = self.next_block(32, spend=out[9], script=too_many_multisigs) assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - + self.sync_blocks([b32], False, 16, b'bad-blk-sigops', reconnect=True) # CHECKMULTISIGVERIFY - tip(31) - lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19) - block(33, spend=out[9], script=lots_of_multisigs) - yield accepted() - save_spendable_output() - + self.log.info("Accept a block with the max number of OP_CHECKMULTISIGVERIFY sigops") + self.move_tip(31) + lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19) + b33 = self.next_block(33, spend=out[9], script=lots_of_multisigs) + self.sync_blocks([b33], True) + self.save_spendable_output() + + self.log.info("Reject a block with too many OP_CHECKMULTISIGVERIFY sigops") too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20)) - block(34, spend=out[10], script=too_many_multisigs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - + b34 = self.next_block(34, spend=out[10], script=too_many_multisigs) + self.sync_blocks([b34], False, 16, b'bad-blk-sigops', reconnect=True) # CHECKSIGVERIFY - tip(33) + self.log.info("Accept a block with the max number of OP_CHECKSIGVERIFY sigops") + self.move_tip(33) lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1)) - b35 = block(35, spend=out[10], script=lots_of_checksigs) - yield accepted() - save_spendable_output() + b35 = self.next_block(35, spend=out[10], script=lots_of_checksigs) + self.sync_blocks([b35], True) + self.save_spendable_output() + self.log.info("Reject a block with too many OP_CHECKSIGVERIFY sigops") too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS)) - block(36, spend=out[11], script=too_many_checksigs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - + b36 = self.next_block(36, spend=out[11], script=too_many_checksigs) + self.sync_blocks([b36], False, 16, b'bad-blk-sigops', reconnect=True) # Check spending of a transaction in a block which failed to connect # @@ -479,17 +396,18 @@ class FullBlockTest(ComparisonTestFramework): # # save 37's spendable output, but then double-spend out11 to invalidate the block - tip(35) - b37 = block(37, spend=out[11]) + self.log.info("Reject a block spending transaction from a block which failed to connect") + self.move_tip(35) + b37 = self.next_block(37, spend=out[11]) txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0) - tx = create_and_sign_tx(out[11].tx, out[11].n, 0) - b37 = update_block(37, [tx]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + tx = self.create_and_sign_transaction(out[11].tx, out[11].n, 0) + b37 = self.update_block(37, [tx]) + self.sync_blocks([b37], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) # attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid - tip(35) - block(38, spend=txout_b37) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + self.move_tip(35) + b38 = self.next_block(38, spend=txout_b37) + self.sync_blocks([b38], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) # Check P2SH SigOp counting # @@ -502,45 +420,45 @@ class FullBlockTest(ComparisonTestFramework): # redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG # p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL # - tip(35) - b39 = block(39) + self.log.info("Check P2SH SIGOPS are correctly counted") + self.move_tip(35) + b39 = self.next_block(39) b39_outputs = 0 b39_sigops_per_output = 6 # Build the redeem script, hash it, use hash to create the p2sh script - redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG]) + redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE # This must be signed because it is spending a coinbase spend = out[11] - tx = create_tx(spend.tx, spend.n, 1, p2sh_script) + tx = self.create_tx(spend.tx, spend.n, 1, p2sh_script) tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE]))) self.sign_tx(tx, spend.tx, spend.n) tx.rehash() - b39 = update_block(39, [tx]) + b39 = self.update_block(39, [tx]) b39_outputs += 1 # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE tx_new = None tx_last = tx - total_size=len(b39.serialize()) + total_size = len(b39.serialize()) while(total_size < MAX_BLOCK_BASE_SIZE): - tx_new = create_tx(tx_last, 1, 1, p2sh_script) + tx_new = self.create_tx(tx_last, 1, 1, p2sh_script) tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE]))) tx_new.rehash() total_size += len(tx_new.serialize()) if total_size >= MAX_BLOCK_BASE_SIZE: break - b39.vtx.append(tx_new) # add tx to block + b39.vtx.append(tx_new) # add tx to block tx_last = tx_new b39_outputs += 1 - b39 = update_block(39, []) - yield accepted() - save_spendable_output() - + b39 = self.update_block(39, []) + self.sync_blocks([b39], True) + self.save_spendable_output() # Test sigops in P2SH redeem scripts # @@ -549,15 +467,16 @@ class FullBlockTest(ComparisonTestFramework): # # b41 does the same, less one, so it has the maximum sigops permitted. # - tip(39) - b40 = block(40, spend=out[12]) + self.log.info("Reject a block with too many P2SH sigops") + self.move_tip(39) + b40 = self.next_block(40, spend=out[12]) sigops = get_legacy_sigopcount_block(b40) numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output assert_equal(numTxes <= b39_outputs, True) lastOutpoint = COutPoint(b40.vtx[1].sha256, 0) new_txs = [] - for i in range(1, numTxes+1): + for i in range(1, numTxes + 1): tx = CTransaction() tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) tx.vin.append(CTxIn(lastOutpoint, b'')) @@ -579,35 +498,34 @@ class FullBlockTest(ComparisonTestFramework): tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill))) tx.rehash() new_txs.append(tx) - update_block(40, new_txs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) + self.update_block(40, new_txs) + self.sync_blocks([b40], False, 16, b'bad-blk-sigops', reconnect=True) # same as b40, but one less sigop - tip(39) - block(41, spend=None) - update_block(41, b40.vtx[1:-1]) + self.log.info("Accept a block with the max number of P2SH sigops") + self.move_tip(39) + b41 = self.next_block(41, spend=None) + self.update_block(41, b40.vtx[1:-1]) b41_sigops_to_fill = b40_sigops_to_fill - 1 tx = CTransaction() tx.vin.append(CTxIn(lastOutpoint, b'')) tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill))) tx.rehash() - update_block(41, [tx]) - yield accepted() + self.update_block(41, [tx]) + self.sync_blocks([b41], True) # Fork off of b39 to create a constant base again # # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) # \-> b41 (12) # - tip(39) - block(42, spend=out[12]) - yield rejected() - save_spendable_output() - - block(43, spend=out[13]) - yield accepted() - save_spendable_output() + self.move_tip(39) + b42 = self.next_block(42, spend=out[12]) + self.save_spendable_output() + b43 = self.next_block(43, spend=out[13]) + self.save_spendable_output() + self.sync_blocks([b42, b43], True) # Test a number of really invalid scenarios # @@ -616,6 +534,7 @@ class FullBlockTest(ComparisonTestFramework): # The next few blocks are going to be created "by hand" since they'll do funky things, such as having # the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works. + self.log.info("Build block 44 manually") height = self.block_heights[self.tip.sha256] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) b44 = CBlock() @@ -628,10 +547,10 @@ class FullBlockTest(ComparisonTestFramework): self.tip = b44 self.block_heights[b44.sha256] = height self.blocks[44] = b44 - yield accepted() + self.sync_blocks([b44], True) - # A block with a non-coinbase as the first tx - non_coinbase = create_tx(out[15].tx, out[15].n, 1) + self.log.info("Reject a block with a non-coinbase as the first tx") + non_coinbase = self.create_tx(out[15].tx, out[15].n, 1) b45 = CBlock() b45.nTime = self.tip.nTime + 1 b45.hashPrevBlock = self.tip.sha256 @@ -640,104 +559,102 @@ class FullBlockTest(ComparisonTestFramework): b45.hashMerkleRoot = b45.calc_merkle_root() b45.calc_sha256() b45.solve() - self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1 + self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256] + 1 self.tip = b45 self.blocks[45] = b45 - yield rejected(RejectResult(16, b'bad-cb-missing')) + self.sync_blocks([b45], False, 16, b'bad-cb-missing', reconnect=True) - # A block with no txns - tip(44) + self.log.info("Reject a block with no transactions") + self.move_tip(44) b46 = CBlock() - b46.nTime = b44.nTime+1 + b46.nTime = b44.nTime + 1 b46.hashPrevBlock = b44.sha256 b46.nBits = 0x207fffff b46.vtx = [] b46.hashMerkleRoot = 0 b46.solve() - self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1 + self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1 self.tip = b46 assert 46 not in self.blocks self.blocks[46] = b46 - s = ser_uint256(b46.hashMerkleRoot) - yield rejected(RejectResult(16, b'bad-blk-length')) + self.sync_blocks([b46], False, 16, b'bad-blk-length', reconnect=True) - # A block with invalid work - tip(44) - b47 = block(47, solve=False) + self.log.info("Reject a block with invalid work") + self.move_tip(44) + b47 = self.next_block(47, solve=False) target = uint256_from_compact(b47.nBits) - while b47.sha256 < target: #changed > to < + while b47.sha256 < target: b47.nNonce += 1 b47.rehash() - yield rejected(RejectResult(16, b'high-hash')) + self.sync_blocks([b47], False, request_block=False) - # A block with timestamp > 2 hrs in the future - tip(44) - b48 = block(48, solve=False) + self.log.info("Reject a block with a timestamp >2 hours in the future") + self.move_tip(44) + b48 = self.next_block(48, solve=False) b48.nTime = int(time.time()) + 60 * 60 * 3 b48.solve() - yield rejected(RejectResult(16, b'time-too-new')) + self.sync_blocks([b48], False, request_block=False) - # A block with an invalid merkle hash - tip(44) - b49 = block(49) + self.log.info("Reject a block with invalid merkle hash") + self.move_tip(44) + b49 = self.next_block(49) b49.hashMerkleRoot += 1 b49.solve() - yield rejected(RejectResult(16, b'bad-txnmrklroot')) + self.sync_blocks([b49], False, 16, b'bad-txnmrklroot', reconnect=True) - # A block with an incorrect POW limit - tip(44) - b50 = block(50) + self.log.info("Reject a block with incorrect POW limit") + self.move_tip(44) + b50 = self.next_block(50) b50.nBits = b50.nBits - 1 b50.solve() - yield rejected(RejectResult(16, b'bad-diffbits')) + self.sync_blocks([b50], False, request_block=False, reconnect=True) - # A block with two coinbase txns - tip(44) - b51 = block(51) + self.log.info("Reject a block with two coinbase transactions") + self.move_tip(44) + b51 = self.next_block(51) cb2 = create_coinbase(51, self.coinbase_pubkey) - b51 = update_block(51, [cb2]) - yield rejected(RejectResult(16, b'bad-cb-multiple')) + b51 = self.update_block(51, [cb2]) + self.sync_blocks([b51], False, 16, b'bad-cb-multiple', reconnect=True) - # A block w/ duplicate txns + self.log.info("Reject a block with duplicate transactions") # Note: txns have to be in the right position in the merkle tree to trigger this error - tip(44) - b52 = block(52, spend=out[15]) - tx = create_tx(b52.vtx[1], 0, 1) - b52 = update_block(52, [tx, tx]) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) + self.move_tip(44) + b52 = self.next_block(52, spend=out[15]) + tx = self.create_tx(b52.vtx[1], 0, 1) + b52 = self.update_block(52, [tx, tx]) + self.sync_blocks([b52], False, 16, b'bad-txns-duplicate', reconnect=True) # Test block timestamps # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) # \-> b54 (15) # - tip(43) - block(53, spend=out[14]) - yield rejected() # rejected since b44 is at same height - save_spendable_output() + self.move_tip(43) + b53 = self.next_block(53, spend=out[14]) + self.sync_blocks([b53], False) + self.save_spendable_output() - # invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast) - b54 = block(54, spend=out[15]) + self.log.info("Reject a block with timestamp before MedianTimePast") + b54 = self.next_block(54, spend=out[15]) b54.nTime = b35.nTime - 1 b54.solve() - yield rejected(RejectResult(16, b'time-too-old')) + self.sync_blocks([b54], False, request_block=False) # valid timestamp - tip(53) - b55 = block(55, spend=out[15]) + self.move_tip(53) + b55 = self.next_block(55, spend=out[15]) b55.nTime = b35.nTime - update_block(55, []) - yield accepted() - save_spendable_output() + self.update_block(55, []) + self.sync_blocks([b55], True) + self.save_spendable_output() - - # Test CVE-2012-2459 + # Test Merkle tree malleability # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16) # \-> b57 (16) # \-> b56p2 (16) # \-> b56 (16) # - # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without + # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without # affecting the merkle root of a block, while still invalidating it. # See: src/consensus/merkle.h # @@ -758,46 +675,48 @@ class FullBlockTest(ComparisonTestFramework): # that the error was caught early, avoiding a DOS vulnerability.) # b57 - a good block with 2 txs, don't submit until end - tip(55) - b57 = block(57) - tx = create_and_sign_tx(out[16].tx, out[16].n, 1) - tx1 = create_tx(tx, 0, 1) - b57 = update_block(57, [tx, tx1]) + self.move_tip(55) + b57 = self.next_block(57) + tx = self.create_and_sign_transaction(out[16].tx, out[16].n, 1) + tx1 = self.create_tx(tx, 0, 1) + b57 = self.update_block(57, [tx, tx1]) # b56 - copy b57, add a duplicate tx - tip(55) + self.log.info("Reject a block with a duplicate transaction in the Merkle Tree (but with a valid Merkle Root)") + self.move_tip(55) b56 = copy.deepcopy(b57) self.blocks[56] = b56 - assert_equal(len(b56.vtx),3) - b56 = update_block(56, [tx1]) + assert_equal(len(b56.vtx), 3) + b56 = self.update_block(56, [tx1]) assert_equal(b56.hash, b57.hash) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) + self.sync_blocks([b56], False, 16, b'bad-txns-duplicate', reconnect=True) # b57p2 - a good block with 6 tx'es, don't submit until end - tip(55) - b57p2 = block("57p2") - tx = create_and_sign_tx(out[16].tx, out[16].n, 1) - tx1 = create_tx(tx, 0, 1) - tx2 = create_tx(tx1, 0, 1) - tx3 = create_tx(tx2, 0, 1) - tx4 = create_tx(tx3, 0, 1) - b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4]) + self.move_tip(55) + b57p2 = self.next_block("57p2") + tx = self.create_and_sign_transaction(out[16].tx, out[16].n, 1) + tx1 = self.create_tx(tx, 0, 1) + tx2 = self.create_tx(tx1, 0, 1) + tx3 = self.create_tx(tx2, 0, 1) + tx4 = self.create_tx(tx3, 0, 1) + b57p2 = self.update_block("57p2", [tx, tx1, tx2, tx3, tx4]) # b56p2 - copy b57p2, duplicate two non-consecutive tx's - tip(55) + self.log.info("Reject a block with two duplicate transactions in the Merkle Tree (but with a valid Merkle Root)") + self.move_tip(55) b56p2 = copy.deepcopy(b57p2) self.blocks["b56p2"] = b56p2 assert_equal(b56p2.hash, b57p2.hash) - assert_equal(len(b56p2.vtx),6) - b56p2 = update_block("b56p2", [tx3, tx4]) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) + assert_equal(len(b56p2.vtx), 6) + b56p2 = self.update_block("b56p2", [tx3, tx4]) + self.sync_blocks([b56p2], False, 16, b'bad-txns-duplicate', reconnect=True) - tip("57p2") - yield accepted() + self.move_tip("57p2") + self.sync_blocks([b57p2], True) - tip(57) - yield rejected() #rejected because 57p2 seen first - save_spendable_output() + self.move_tip(57) + self.sync_blocks([b57], False) # The tip is not updated because 57p2 seen first + self.save_spendable_output() # Test a few invalid tx types # @@ -806,28 +725,30 @@ class FullBlockTest(ComparisonTestFramework): # # tx with prevout.n out of range - tip(57) - b58 = block(58, spend=out[17]) + self.log.info("Reject a block with a transaction with prevout.n out of range") + self.move_tip(57) + b58 = self.next_block(58, spend=out[17]) tx = CTransaction() assert(len(out[17].tx.vout) < 42) tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff)) tx.vout.append(CTxOut(0, b"")) tx.calc_sha256() - b58 = update_block(58, [tx]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + b58 = self.update_block(58, [tx]) + self.sync_blocks([b58], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) - # tx with output value > input value out of range - tip(57) - b59 = block(59) - tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN) - b59 = update_block(59, [tx]) - yield rejected(RejectResult(16, b'bad-txns-in-belowout')) + # tx with output value > input value + self.log.info("Reject a block with a transaction with outputs > inputs") + self.move_tip(57) + b59 = self.next_block(59) + tx = self.create_and_sign_transaction(out[17].tx, out[17].n, 51 * COIN) + b59 = self.update_block(59, [tx]) + self.sync_blocks([b59], False, 16, b'bad-txns-in-belowout', reconnect=True) # reset to good chain - tip(57) - b60 = block(60, spend=out[17]) - yield accepted() - save_spendable_output() + self.move_tip(57) + b60 = self.next_block(60, spend=out[17]) + self.sync_blocks([b60], True) + self.save_spendable_output() # Test BIP30 # @@ -838,46 +759,46 @@ class FullBlockTest(ComparisonTestFramework): # not-fully-spent transaction in the same chain. To test, make identical coinbases; # the second one should be rejected. # - tip(60) - b61 = block(61, spend=out[18]) - b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases + self.log.info("Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)") + self.move_tip(60) + b61 = self.next_block(61, spend=out[18]) + b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig # Equalize the coinbases b61.vtx[0].rehash() - b61 = update_block(61, []) + b61 = self.update_block(61, []) assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize()) - yield rejected(RejectResult(16, b'bad-txns-BIP30')) - + self.sync_blocks([b61], False, 16, b'bad-txns-BIP30', reconnect=True) # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b62 (18) # - tip(60) - b62 = block(62) + self.log.info("Reject a block with a transaction with a nonfinal locktime") + self.move_tip(60) + b62 = self.next_block(62) tx = CTransaction() - tx.nLockTime = 0xffffffff #this locktime is non-final + tx.nLockTime = 0xffffffff # this locktime is non-final assert(out[18].n < len(out[18].tx.vout)) - tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence + tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) assert(tx.vin[0].nSequence < 0xffffffff) tx.calc_sha256() - b62 = update_block(62, [tx]) - yield rejected(RejectResult(16, b'bad-txns-nonfinal')) - + b62 = self.update_block(62, [tx]) + self.sync_blocks([b62], False, 16, b'bad-txns-nonfinal') # Test a non-final coinbase is also rejected # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b63 (-) # - tip(60) - b63 = block(63) + self.log.info("Reject a block with a coinbase transaction with a nonfinal locktime") + self.move_tip(60) + b63 = self.next_block(63) b63.vtx[0].nLockTime = 0xffffffff b63.vtx[0].vin[0].nSequence = 0xDEADBEEF b63.vtx[0].rehash() - b63 = update_block(63, []) - yield rejected(RejectResult(16, b'bad-txns-nonfinal')) - + b63 = self.update_block(63, []) + self.sync_blocks([b63], False, 16, b'bad-txns-nonfinal') # This checks that a block with a bloated VARINT between the block_header and the array of tx such that # the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint, @@ -893,8 +814,9 @@ class FullBlockTest(ComparisonTestFramework): # b64a is a bloated block (non-canonical varint) # b64 is a good block (same as b64 but w/ canonical varint) # - tip(60) - regular_block = block("64a", spend=out[18]) + self.log.info("Accept a valid block even if a bloated version of the block has previously been sent") + self.move_tip(60) + regular_block = self.next_block("64a", spend=out[18]) # make it a "broken_block," with non-canonical serialization b64a = CBrokenBlock(regular_block) @@ -908,45 +830,51 @@ class FullBlockTest(ComparisonTestFramework): script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) - b64a = update_block("64a", [tx]) + b64a = self.update_block("64a", [tx]) assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8) - yield TestInstance([[self.tip, None]]) + self.sync_blocks([b64a], False, 1, b'error parsing message') - # comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore - self.test.block_store.erase(b64a.sha256) + # bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently + # resend the header message, it won't send us the getdata message again. Just + # disconnect and reconnect and then call sync_blocks. + # TODO: improve this test to be less dependent on P2P DOS behaviour. + node.disconnect_p2ps() + self.reconnect_p2p() - tip(60) + self.move_tip(60) b64 = CBlock(b64a) b64.vtx = copy.deepcopy(b64a.vtx) assert_equal(b64.hash, b64a.hash) assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE) self.blocks[64] = b64 - update_block(64, []) - yield accepted() - save_spendable_output() + b64 = self.update_block(64, []) + self.sync_blocks([b64], True) + self.save_spendable_output() # Spend an output created in the block itself # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # - tip(64) - block(65) - tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue) - tx2 = create_and_sign_tx(tx1, 0, 0) - update_block(65, [tx1, tx2]) - yield accepted() - save_spendable_output() + self.log.info("Accept a block with a transaction spending an output created in the same block") + self.move_tip(64) + b65 = self.next_block(65) + tx1 = self.create_and_sign_transaction(out[19].tx, out[19].n, out[19].tx.vout[0].nValue) + tx2 = self.create_and_sign_transaction(tx1, 0, 0) + b65 = self.update_block(65, [tx1, tx2]) + self.sync_blocks([b65], True) + self.save_spendable_output() # Attempt to spend an output created later in the same block # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # \-> b66 (20) - tip(65) - block(66) - tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) - tx2 = create_and_sign_tx(tx1, 0, 1) - update_block(66, [tx2, tx1]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + self.log.info("Reject a block with a transaction spending an output created later in the same block") + self.move_tip(65) + b66 = self.next_block(66) + tx1 = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) + tx2 = self.create_and_sign_transaction(tx1, 0, 1) + b66 = self.update_block(66, [tx2, tx1]) + self.sync_blocks([b66], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) # Attempt to double-spend a transaction created in a block # @@ -954,13 +882,14 @@ class FullBlockTest(ComparisonTestFramework): # \-> b67 (20) # # - tip(65) - block(67) - tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) - tx2 = create_and_sign_tx(tx1, 0, 1) - tx3 = create_and_sign_tx(tx1, 0, 2) - update_block(67, [tx1, tx2, tx3]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + self.log.info("Reject a block with a transaction double spending a transaction creted in the same block") + self.move_tip(65) + b67 = self.next_block(67) + tx1 = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) + tx2 = self.create_and_sign_transaction(tx1, 0, 1) + tx3 = self.create_and_sign_transaction(tx1, 0, 2) + b67 = self.update_block(67, [tx1, tx2, tx3]) + self.sync_blocks([b67], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) # More tests of block subsidy # @@ -974,34 +903,36 @@ class FullBlockTest(ComparisonTestFramework): # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee # this succeeds # - tip(65) - block(68, additional_coinbase_value=10) - tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9) - update_block(68, [tx]) - yield rejected(RejectResult(16, b'bad-cb-amount')) - - tip(65) - b69 = block(69, additional_coinbase_value=10) - tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10) - update_block(69, [tx]) - yield accepted() - save_spendable_output() + self.log.info("Reject a block trying to claim too much subsidy in the coinbase transaction") + self.move_tip(65) + b68 = self.next_block(68, additional_coinbase_value=10) + tx = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 9) + b68 = self.update_block(68, [tx]) + self.sync_blocks([b68], False, 16, b'bad-cb-amount', reconnect=True) + + self.log.info("Accept a block claiming the correct subsidy in the coinbase transaction") + self.move_tip(65) + b69 = self.next_block(69, additional_coinbase_value=10) + tx = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 10) + self.update_block(69, [tx]) + self.sync_blocks([b69], True) + self.save_spendable_output() # Test spending the outpoint of a non-existent transaction # # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) # \-> b70 (21) # - tip(69) - block(70, spend=out[21]) + self.log.info("Reject a block containing a transaction spending from a non-existent input") + self.move_tip(69) + b70 = self.next_block(70, spend=out[21]) bogus_tx = CTransaction() bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c") tx = CTransaction() tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff)) tx.vout.append(CTxOut(1, b"")) - update_block(70, [tx]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - + b70 = self.update_block(70, [tx]) + self.sync_blocks([b70], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) # @@ -1009,13 +940,13 @@ class FullBlockTest(ComparisonTestFramework): # \-> b71 (21) # # b72 is a good block. - # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71. - # - tip(69) - b72 = block(72) - tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2) - tx2 = create_and_sign_tx(tx1, 0, 1) - b72 = update_block(72, [tx1, tx2]) # now tip is 72 + # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b72. + self.log.info("Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability") + self.move_tip(69) + b72 = self.next_block(72) + tx1 = self.create_and_sign_transaction(out[21].tx, out[21].n, 2) + tx2 = self.create_and_sign_transaction(tx1, 0, 1) + b72 = self.update_block(72, [tx1, tx2]) # now tip is 72 b71 = copy.deepcopy(b72) b71.vtx.append(tx2) # add duplicate tx2 self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69 @@ -1025,12 +956,12 @@ class FullBlockTest(ComparisonTestFramework): assert_equal(len(b72.vtx), 3) assert_equal(b72.sha256, b71.sha256) - tip(71) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) - tip(72) - yield accepted() - save_spendable_output() + self.move_tip(71) + self.sync_blocks([b71], False, 16, b'bad-txns-duplicate', reconnect=True) + self.move_tip(72) + self.sync_blocks([b72], True) + self.save_spendable_output() # Test some invalid scripts and MAX_BLOCK_SIGOPS # @@ -1048,23 +979,23 @@ class FullBlockTest(ComparisonTestFramework): # bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format) # bytearray[20,004-20,525]: unread data (script_element) # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit) - # - tip(72) - b73 = block(73) + self.log.info("Reject a block containing too many sigops after a large script element") + self.move_tip(72) + b73 = self.next_block(73) size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1 a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4 + a[MAX_BLOCK_SIGOPS - 1] = int("4e", 16) # OP_PUSHDATA4 element_size = MAX_SCRIPT_ELEMENT_SIZE + 1 a[MAX_BLOCK_SIGOPS] = element_size % 256 - a[MAX_BLOCK_SIGOPS+1] = element_size // 256 - a[MAX_BLOCK_SIGOPS+2] = 0 - a[MAX_BLOCK_SIGOPS+3] = 0 + a[MAX_BLOCK_SIGOPS + 1] = element_size // 256 + a[MAX_BLOCK_SIGOPS + 2] = 0 + a[MAX_BLOCK_SIGOPS + 3] = 0 - tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) - b73 = update_block(73, [tx]) - assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1) - yield rejected(RejectResult(16, b'bad-blk-sigops')) + tx = self.create_and_sign_transaction(out[22].tx, 0, 1, CScript(a)) + b73 = self.update_block(73, [tx]) + assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS + 1) + self.sync_blocks([b73], False, 16, b'bad-blk-sigops', reconnect=True) # b74/75 - if we push an invalid script element, all prevous sigops are counted, # but sigops after the element are not counted. @@ -1076,45 +1007,44 @@ class FullBlockTest(ComparisonTestFramework): # # b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element # b75 succeeds because we put MAX_BLOCK_SIGOPS before the element - # - # - tip(72) - b74 = block(74) - size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561 + self.log.info("Check sigops are counted correctly after an invalid script element") + self.move_tip(72) + b74 = self.next_block(74) + size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS] = 0x4e - a[MAX_BLOCK_SIGOPS+1] = 0xfe - a[MAX_BLOCK_SIGOPS+2] = 0xff - a[MAX_BLOCK_SIGOPS+3] = 0xff - a[MAX_BLOCK_SIGOPS+4] = 0xff - tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) - b74 = update_block(74, [tx]) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - tip(72) - b75 = block(75) + a[MAX_BLOCK_SIGOPS + 1] = 0xfe + a[MAX_BLOCK_SIGOPS + 2] = 0xff + a[MAX_BLOCK_SIGOPS + 3] = 0xff + a[MAX_BLOCK_SIGOPS + 4] = 0xff + tx = self.create_and_sign_transaction(out[22].tx, 0, 1, CScript(a)) + b74 = self.update_block(74, [tx]) + self.sync_blocks([b74], False, 16, b'bad-blk-sigops', reconnect=True) + + self.move_tip(72) + b75 = self.next_block(75) size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS-1] = 0x4e + a[MAX_BLOCK_SIGOPS - 1] = 0x4e a[MAX_BLOCK_SIGOPS] = 0xff - a[MAX_BLOCK_SIGOPS+1] = 0xff - a[MAX_BLOCK_SIGOPS+2] = 0xff - a[MAX_BLOCK_SIGOPS+3] = 0xff - tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) - b75 = update_block(75, [tx]) - yield accepted() - save_spendable_output() + a[MAX_BLOCK_SIGOPS + 1] = 0xff + a[MAX_BLOCK_SIGOPS + 2] = 0xff + a[MAX_BLOCK_SIGOPS + 3] = 0xff + tx = self.create_and_sign_transaction(out[22].tx, 0, 1, CScript(a)) + b75 = self.update_block(75, [tx]) + self.sync_blocks([b75], True) + self.save_spendable_output() # Check that if we push an element filled with CHECKSIGs, they are not counted - tip(75) - b76 = block(76) + self.move_tip(75) + b76 = self.next_block(76) size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs - tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a)) - b76 = update_block(76, [tx]) - yield accepted() - save_spendable_output() + a[MAX_BLOCK_SIGOPS - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs + tx = self.create_and_sign_transaction(out[23].tx, 0, 1, CScript(a)) + b76 = self.update_block(76, [tx]) + self.sync_blocks([b76], True) + self.save_spendable_output() # Test transaction resurrection # @@ -1133,39 +1063,39 @@ class FullBlockTest(ComparisonTestFramework): # To get around this issue, we construct transactions which are not signed and which # spend to OP_TRUE. If the standard-ness rules change, this test would need to be # updated. (Perhaps to spend to a P2SH OP_TRUE script) - # - tip(76) - block(77) - tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN) - update_block(77, [tx77]) - yield accepted() - save_spendable_output() - - block(78) - tx78 = create_tx(tx77, 0, 9*COIN) - update_block(78, [tx78]) - yield accepted() - - block(79) - tx79 = create_tx(tx78, 0, 8*COIN) - update_block(79, [tx79]) - yield accepted() + self.log.info("Test transaction resurrection during a re-org") + self.move_tip(76) + b77 = self.next_block(77) + tx77 = self.create_and_sign_transaction(out[24].tx, out[24].n, 10 * COIN) + b77 = self.update_block(77, [tx77]) + self.sync_blocks([b77], True) + self.save_spendable_output() + + b78 = self.next_block(78) + tx78 = self.create_tx(tx77, 0, 9 * COIN) + b78 = self.update_block(78, [tx78]) + self.sync_blocks([b78], True) + + b79 = self.next_block(79) + tx79 = self.create_tx(tx78, 0, 8 * COIN) + b79 = self.update_block(79, [tx79]) + self.sync_blocks([b79], True) # mempool should be empty assert_equal(len(self.nodes[0].getrawmempool()), 0) - tip(77) - block(80, spend=out[25]) - yield rejected() - save_spendable_output() + self.move_tip(77) + b80 = self.next_block(80, spend=out[25]) + self.sync_blocks([b80], False, request_block=False) + self.save_spendable_output() - block(81, spend=out[26]) - yield rejected() # other chain is same length - save_spendable_output() + b81 = self.next_block(81, spend=out[26]) + self.sync_blocks([b81], False, request_block=False) # other chain is same length + self.save_spendable_output() - block(82, spend=out[27]) - yield accepted() # now this chain is longer, triggers re-org - save_spendable_output() + b82 = self.next_block(82, spend=out[27]) + self.sync_blocks([b82], True) # now this chain is longer, triggers re-org + self.save_spendable_output() # now check that tx78 and tx79 have been put back into the peer's mempool mempool = self.nodes[0].getrawmempool() @@ -1173,33 +1103,32 @@ class FullBlockTest(ComparisonTestFramework): assert(tx78.hash in mempool) assert(tx79.hash in mempool) - # Test invalid opcodes in dead execution paths. # # -> b81 (26) -> b82 (27) -> b83 (28) # - block(83) + self.log.info("Accept a block with invalid opcodes in dead execution paths") + b83 = self.next_block(83) op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF] script = CScript(op_codes) - tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script) + tx1 = self.create_and_sign_transaction(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script) - tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE])) + tx2 = self.create_and_sign_transaction(tx1, 0, 0, CScript([OP_TRUE])) tx2.vin[0].scriptSig = CScript([OP_FALSE]) tx2.rehash() - update_block(83, [tx1, tx2]) - yield accepted() - save_spendable_output() - + b83 = self.update_block(83, [tx1, tx2]) + self.sync_blocks([b83], True) + self.save_spendable_output() # Reorg on/off blocks that have OP_RETURN in them (and try to spend them) # # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31) # \-> b85 (29) -> b86 (30) \-> b89a (32) # - # - block(84) - tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN])) + self.log.info("Test re-orging blocks with OP_RETURN in them") + b84 = self.next_block(84) + tx1 = self.create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN])) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) @@ -1207,87 +1136,186 @@ class FullBlockTest(ComparisonTestFramework): tx1.calc_sha256() self.sign_tx(tx1, out[29].tx, out[29].n) tx1.rehash() - tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN])) + tx2 = self.create_tx(tx1, 1, 0, CScript([OP_RETURN])) tx2.vout.append(CTxOut(0, CScript([OP_RETURN]))) - tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN])) + tx3 = self.create_tx(tx1, 2, 0, CScript([OP_RETURN])) tx3.vout.append(CTxOut(0, CScript([OP_TRUE]))) - tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE])) + tx4 = self.create_tx(tx1, 3, 0, CScript([OP_TRUE])) tx4.vout.append(CTxOut(0, CScript([OP_RETURN]))) - tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN])) + tx5 = self.create_tx(tx1, 4, 0, CScript([OP_RETURN])) - update_block(84, [tx1,tx2,tx3,tx4,tx5]) - yield accepted() - save_spendable_output() + b84 = self.update_block(84, [tx1, tx2, tx3, tx4, tx5]) + self.sync_blocks([b84], True) + self.save_spendable_output() - tip(83) - block(85, spend=out[29]) - yield rejected() + self.move_tip(83) + b85 = self.next_block(85, spend=out[29]) + self.sync_blocks([b85], False) # other chain is same length - block(86, spend=out[30]) - yield accepted() + b86 = self.next_block(86, spend=out[30]) + self.sync_blocks([b86], True) - tip(84) - block(87, spend=out[30]) - yield rejected() - save_spendable_output() + self.move_tip(84) + b87 = self.next_block(87, spend=out[30]) + self.sync_blocks([b87], False) # other chain is same length + self.save_spendable_output() - block(88, spend=out[31]) - yield accepted() - save_spendable_output() + b88 = self.next_block(88, spend=out[31]) + self.sync_blocks([b88], True) + self.save_spendable_output() # trying to spend the OP_RETURN output is rejected - block("89a", spend=out[32]) - tx = create_tx(tx1, 0, 0, CScript([OP_TRUE])) - update_block("89a", [tx]) - yield rejected() - - - # Test re-org of a week's worth of blocks (1088 blocks) - # This test takes a minute or two and can be accomplished in memory - # - if self.options.runbarelyexpensive: - tip(88) - LARGE_REORG_SIZE = 1088 - test1 = TestInstance(sync_every_block=False) - spend=out[32] - for i in range(89, LARGE_REORG_SIZE + 89): - b = block(i, spend) - tx = CTransaction() - script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69 - script_output = CScript([b'\x00' * script_length]) - tx.vout.append(CTxOut(0, script_output)) - tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) - b = update_block(i, [tx]) - assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE) - test1.blocks_and_transactions.append([self.tip, True]) - save_spendable_output() - spend = get_spendable_output() - - yield test1 - chain1_tip = i - - # now create alt chain of same length - tip(88) - test2 = TestInstance(sync_every_block=False) - for i in range(89, LARGE_REORG_SIZE + 89): - block("alt"+str(i)) - test2.blocks_and_transactions.append([self.tip, False]) - yield test2 - - # extend alt chain to trigger re-org - block("alt" + str(chain1_tip + 1)) - yield accepted() - - # ... and re-org back to the first chain - tip(chain1_tip) - block(chain1_tip + 1) - yield rejected() - block(chain1_tip + 2) - yield accepted() - - chain1_tip += 2 + b89a = self.next_block("89a", spend=out[32]) + tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE])) + b89a = self.update_block("89a", [tx]) + self.sync_blocks([b89a], False, 16, b'bad-txns-inputs-missingorspent', reconnect=True) + + self.log.info("Test a re-org of one week's worth of blocks (1088 blocks)") + + self.move_tip(88) + LARGE_REORG_SIZE = 1088 + blocks = [] + spend = out[32] + for i in range(89, LARGE_REORG_SIZE + 89): + b = self.next_block(i, spend) + tx = CTransaction() + script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69 + script_output = CScript([b'\x00' * script_length]) + tx.vout.append(CTxOut(0, script_output)) + tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) + b = self.update_block(i, [tx]) + assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE) + blocks.append(b) + self.save_spendable_output() + spend = self.get_spendable_output() + + self.sync_blocks(blocks, True, timeout=180) + chain1_tip = i + + # now create alt chain of same length + self.move_tip(88) + blocks2 = [] + for i in range(89, LARGE_REORG_SIZE + 89): + blocks2.append(self.next_block("alt" + str(i))) + self.sync_blocks(blocks2, False, request_block=False) + + # extend alt chain to trigger re-org + block = self.next_block("alt" + str(chain1_tip + 1)) + self.sync_blocks([block], True, timeout=180) + + # ... and re-org back to the first chain + self.move_tip(chain1_tip) + block = self.next_block(chain1_tip + 1) + self.sync_blocks([block], False, request_block=False) + block = self.next_block(chain1_tip + 2) + self.sync_blocks([block], True, timeout=180) + + # Helper methods + ################ + + def add_transactions_to_block(self, block, tx_list): + [tx.rehash() for tx in tx_list] + block.vtx.extend(tx_list) + + # this is a little handier to use than the version in blocktools.py + def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): + return create_transaction(spend_tx, n, b"", value, script) + + # sign a transaction, using the key we know about + # this signs input 0 in tx, which is assumed to be spending output n in spend_tx + def sign_tx(self, tx, spend_tx, n): + scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) + if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend + tx.vin[0].scriptSig = CScript() + return + (sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL) + tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))]) + + def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): + tx = self.create_tx(spend_tx, n, value, script) + self.sign_tx(tx, spend_tx, n) + tx.rehash() + return tx + + def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True): + if self.tip is None: + base_block_hash = self.genesis_hash + block_time = int(time.time()) + 1 + else: + base_block_hash = self.tip.sha256 + block_time = self.tip.nTime + 1 + # First create the coinbase + height = self.block_heights[base_block_hash] + 1 + coinbase = create_coinbase(height, self.coinbase_pubkey) + coinbase.vout[0].nValue += additional_coinbase_value + coinbase.rehash() + if spend is None: + block = create_block(base_block_hash, coinbase, block_time) + else: + coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees + coinbase.rehash() + block = create_block(base_block_hash, coinbase, block_time) + tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi + self.sign_tx(tx, spend.tx, spend.n) + self.add_transactions_to_block(block, [tx]) + block.hashMerkleRoot = block.calc_merkle_root() + if solve: + block.solve() + self.tip = block + self.block_heights[block.sha256] = height + assert number not in self.blocks + self.blocks[number] = block + return block + + # save the current tip so it can be spent by a later block + def save_spendable_output(self): + self.log.debug("saving spendable output %s" % self.tip.vtx[0]) + self.spendable_outputs.append(self.tip) + + # get an output that we previously marked as spendable + def get_spendable_output(self): + self.log.debug("getting spendable output %s" % self.spendable_outputs[0].vtx[0]) + return PreviousSpendableOutput(self.spendable_outputs.pop(0).vtx[0], 0) + + # move the tip back to a previous block + def move_tip(self, number): + self.tip = self.blocks[number] + + # adds transactions to the block and updates state + def update_block(self, block_number, new_transactions): + block = self.blocks[block_number] + self.add_transactions_to_block(block, new_transactions) + old_sha256 = block.sha256 + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + # Update the internal state just like in next_block + self.tip = block + if block.sha256 != old_sha256: + self.block_heights[block.sha256] = self.block_heights[old_sha256] + del self.block_heights[old_sha256] + self.blocks[block_number] = block + return block + + def reconnect_p2p(self): + """Add a P2P connection to the node. + + The node gets disconnected several times in this test. This helper + method reconnects the p2p and restarts the network thread.""" + + network_thread_join() + self.nodes[0].disconnect_p2ps() + self.nodes[0].add_p2p_connection(P2PDataStore()) + network_thread_start() + self.nodes[0].p2p.wait_for_verack() + + def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, request_block=True, reconnect=False, timeout=60): + """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. + Call with success = False if the tip shouldn't advance to the most recent block.""" + self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_code=reject_code, reject_reason=reject_reason, request_block=request_block, timeout=timeout) + if reconnect: + self.reconnect_p2p() if __name__ == '__main__': FullBlockTest().main() diff --git a/test/functional/feature_blocksdir.py b/test/functional/feature_blocksdir.py new file mode 100755 index 0000000000..a77014a524 --- /dev/null +++ b/test/functional/feature_blocksdir.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +# Copyright (c) 2018 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the blocksdir option. +""" + +import os +import re +import shutil + +from test_framework.test_framework import BitcoinTestFramework, initialize_datadir + + +class BlocksdirTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def run_test(self): + self.stop_node(0) + shutil.rmtree(self.nodes[0].datadir) + initialize_datadir(self.options.tmpdir, 0) + self.log.info("Starting with non exiting blocksdir ...") + blocksdir_path = os.path.join(self.options.tmpdir, 'blocksdir') + self.nodes[0].assert_start_raises_init_error(["-blocksdir=" + blocksdir_path], re.escape('Error: Specified blocks directory "{}" does not exist.'.format(blocksdir_path))) + os.mkdir(blocksdir_path) + self.log.info("Starting with exiting blocksdir ...") + self.start_node(0, ["-blocksdir=" + blocksdir_path]) + self.log.info("mining blocks..") + self.nodes[0].generate(10) + assert os.path.isfile(os.path.join(blocksdir_path, "regtest", "blocks", "blk00000.dat")) + assert os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest", "blocks", "index")) + + +if __name__ == '__main__': + BlocksdirTest().main() diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py index 8e97df2361..32a6bd5d59 100755 --- a/test/functional/feature_fee_estimation.py +++ b/test/functional/feature_fee_estimation.py @@ -133,12 +133,12 @@ class EstimateFeeTest(BitcoinTestFramework): which we will use to generate our transactions. """ self.add_nodes(3, extra_args=[["-maxorphantx=1000", "-whitelist=127.0.0.1"], - ["-maxorphantx=1000"], - ["-maxorphantx=1000"]]) + ["-blockmaxweight=68000", "-maxorphantx=1000"], + ["-blockmaxweight=32000", "-maxorphantx=1000"]]) # Use node0 to mine blocks for input splitting # Node1 mines small blocks but that are bigger than the expected transaction rate. - # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes, - # (17k is room enough for 110 or so transactions) + # NOTE: the CreateNewBlock code starts counting block weight at 4,000 weight, + # (68k weight is room enough for 120 or so transactions) # Node2 is a stingy miner, that # produces too small blocks (room for only 55 or so transactions) diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index 227ae6cb36..3adde8dd73 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -124,7 +124,7 @@ class PruneTest(BitcoinTestFramework): # Reboot node 1 to clear its mempool (hopefully make the invalidate faster) # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks) self.stop_node(1) - self.start_node(1, extra_args=["-maxreceivebuffer=20000","-checkblocks=5", "-disablesafemode"]) + self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxweight=20000", "-checkblocks=5", "-disablesafemode"]) height = self.nodes[1].getblockcount() self.log.info("Current block height: %d" % height) @@ -147,7 +147,7 @@ class PruneTest(BitcoinTestFramework): # Reboot node1 to clear those giant tx's from mempool self.stop_node(1) - self.start_node(1, extra_args=["-maxreceivebuffer=20000","-checkblocks=5", "-disablesafemode"]) + self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxweight=20000", "-checkblocks=5", "-disablesafemode"]) self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py index c6cb4c54cd..6f585f6825 100755 --- a/test/functional/interface_rest.py +++ b/test/functional/interface_rest.py @@ -85,7 +85,7 @@ class RESTTest (BitcoinTestFramework): ####################################### # GETUTXOS: query an unspent outpoint # ####################################### - json_request = '/checkmempool/'+txid+'-'+str(n) + json_request = '/'+txid+'-'+str(n) json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) @@ -100,7 +100,7 @@ class RESTTest (BitcoinTestFramework): ################################################# # GETUTXOS: now query an already spent outpoint # ################################################# - json_request = '/checkmempool/'+vintx+'-0' + json_request = '/'+vintx+'-0' json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) @@ -117,7 +117,7 @@ class RESTTest (BitcoinTestFramework): ################################################## # GETUTXOS: now check both with the same request # ################################################## - json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0' + json_request = '/'+txid+'-'+str(n)+'/'+vintx+'-0' json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) assert_equal(len(json_obj['utxos']), 1) @@ -151,23 +151,48 @@ class RESTTest (BitcoinTestFramework): txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json") json_obj = json.loads(json_string) - vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then) + # get the spent output to later check for utxo (should be spent by then) + spent = '{}-{}'.format(json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']) # get n of 0.1 outpoint n = 0 for vout in json_obj['vout']: if vout['value'] == 0.1: n = vout['n'] + spending = '{}-{}'.format(txid, n) - json_request = '/'+txid+'-'+str(n) + json_request = '/'+spending json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) - assert_equal(len(json_obj['utxos']), 0) #there should be an outpoint because it has just added to the mempool + assert_equal(len(json_obj['utxos']), 0) #there should be no outpoint because it has just added to the mempool - json_request = '/checkmempool/'+txid+'-'+str(n) + json_request = '/checkmempool/'+spending json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because it has just added to the mempool + json_request = '/'+spent + json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') + json_obj = json.loads(json_string) + assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because its spending tx is not confirmed + + json_request = '/checkmempool/'+spent + json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') + json_obj = json.loads(json_string) + assert_equal(len(json_obj['utxos']), 0) #there should be no outpoint because it has just spent (by mempool tx) + + self.nodes[0].generate(1) + self.sync_all() + + json_request = '/'+spending + json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') + json_obj = json.loads(json_string) + assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because it was mined + + json_request = '/checkmempool/'+spending + json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') + json_obj = json.loads(json_string) + assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because it was mined + #do some invalid requests json_request = '{"checkmempool' response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True) diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 8b226c2e9d..e6af35fc3d 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -425,7 +425,7 @@ class SegWitTest(BitcoinTestFramework): assert(self.nodes[0].getbestblockhash() == block.hash) - # Now make sure that malleating the witness nonce doesn't + # Now make sure that malleating the witness reserved value doesn't # result in a block permanently marked bad. block = self.build_next_block() add_witness_commitment(block) @@ -436,7 +436,7 @@ class SegWitTest(BitcoinTestFramework): block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ] test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) - # Changing the witness nonce doesn't change the block hash + # Changing the witness reserved value doesn't change the block hash block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ] test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 041e2b86e8..a24a2ec4f5 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -8,6 +8,7 @@ from base64 import b64encode from binascii import hexlify, unhexlify from decimal import Decimal, ROUND_DOWN import hashlib +import inspect import json import logging import os @@ -204,9 +205,9 @@ def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=N if attempts == float('inf') and timeout == float('inf'): timeout = 60 attempt = 0 - timeout += time.time() + time_end = time.time() + timeout - while attempt < attempts and time.time() < timeout: + while attempt < attempts and time.time() < time_end: if lock: with lock: if predicate(): @@ -218,8 +219,12 @@ def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=N time.sleep(0.05) # Print the cause of the timeout - assert_greater_than(attempts, attempt) - assert_greater_than(timeout, time.time()) + predicate_source = inspect.getsourcelines(predicate) + logger.error("wait_until() failed. Predicate: {}".format(predicate_source)) + if attempt >= attempts: + raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts)) + elif time.time() >= time_end: + raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout)) raise RuntimeError('Unreachable') # RPC/P2P connection constants and functions diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index a2eaf99146..39f1180a45 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -136,6 +136,7 @@ BASE_SCRIPTS= [ 'p2p_unrequested_blocks.py', 'feature_logging.py', 'p2p_node_network_limited.py', + 'feature_blocksdir.py', 'feature_config_args.py', # Don't append tests at the end to avoid merge conflicts # Put them in a random line within the section that fits their approximate run-time @@ -366,7 +367,7 @@ def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_cove def print_results(test_results, max_len_name, runtime): results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0] - test_results.sort(key=lambda result: result.name.lower()) + test_results.sort(key=TestResult.sort_key) all_passed = True time_sum = 0 @@ -377,7 +378,11 @@ def print_results(test_results, max_len_name, runtime): results += str(test_result) status = TICK + "Passed" if all_passed else CROSS + "Failed" + if not all_passed: + results += RED[1] results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0] + if not all_passed: + results += RED[0] results += "Runtime: %s s\n" % (runtime) print(results) @@ -454,6 +459,14 @@ class TestResult(): self.time = time self.padding = 0 + def sort_key(self): + if self.status == "Passed": + return 0, self.name.lower() + elif self.status == "Failed": + return 2, self.name.lower() + elif self.status == "Skipped": + return 1, self.name.lower() + def __repr__(self): if self.status == "Passed": color = BLUE |