diff options
34 files changed, 1140 insertions, 515 deletions
diff --git a/.travis.yml b/.travis.yml index 6e3fc5144c..69397c26bf 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,8 +8,6 @@ cache: - depends/built - depends/sdk-sources - $HOME/.ccache -git: - depth: 1 env: global: - MAKEJOBS=-j3 diff --git a/configure.ac b/configure.ac index 4f57750a1a..c422914a26 100644 --- a/configure.ac +++ b/configure.ac @@ -219,6 +219,12 @@ AC_ARG_ENABLE([debug], [enable_debug=$enableval], [enable_debug=no]) +# Enable different -fsanitize options +AC_ARG_WITH([sanitizers], + [AS_HELP_STRING([--with-sanitizers], + [comma separated list of extra sanitizers to build with (default is none enabled)])], + [use_sanitizers=$withval]) + # Enable gprof profiling AC_ARG_ENABLE([gprof], [AS_HELP_STRING([--enable-gprof], @@ -247,6 +253,26 @@ if test "x$enable_debug" = xyes; then fi fi +if test x$use_sanitizers != x; then + # First check if the compiler accepts flags. If an incompatible pair like + # -fsanitize=address,thread is used here, this check will fail. This will also + # fail if a bad argument is passed, e.g. -fsanitize=undfeined + AX_CHECK_COMPILE_FLAG( + [[-fsanitize=$use_sanitizers]], + [[SANITIZER_CXXFLAGS=-fsanitize=$use_sanitizers]], + [AC_MSG_ERROR([compiler did not accept requested flags])]) + + # Some compilers (e.g. GCC) require additional libraries like libasan, + # libtsan, libubsan, etc. Make sure linking still works with the sanitize + # flag. This is a separate check so we can give a better error message when + # the sanitize flags are supported by the compiler but the actual sanitizer + # libs are missing. + AX_CHECK_LINK_FLAG( + [[-fsanitize=$use_sanitizers]], + [[SANITIZER_LDFLAGS=-fsanitize=$use_sanitizers]], + [AC_MSG_ERROR([linker did not accept requested flags, you are missing required libraries])]) +fi + ERROR_CXXFLAGS= if test "x$enable_werror" = "xyes"; then if test "x$CXXFLAG_WERROR" = "x"; then @@ -1258,6 +1284,8 @@ AC_SUBST(HARDENED_CPPFLAGS) AC_SUBST(HARDENED_LDFLAGS) AC_SUBST(PIC_FLAGS) AC_SUBST(PIE_FLAGS) +AC_SUBST(SANITIZER_CXXFLAGS) +AC_SUBST(SANITIZER_LDFLAGS) AC_SUBST(SSE42_CXXFLAGS) AC_SUBST(LIBTOOL_APP_LDFLAGS) AC_SUBST(USE_UPNP) diff --git a/contrib/devtools/check-doc.py b/contrib/devtools/check-doc.py index 4e87cdae82..0c2e1a24be 100755 --- a/contrib/devtools/check-doc.py +++ b/contrib/devtools/check-doc.py @@ -16,31 +16,33 @@ import sys FOLDER_GREP = 'src' FOLDER_TEST = 'src/test/' +REGEX_ARG = '(?:ForceSet|SoftSet|Get|Is)(?:Bool)?Args?(?:Set)?\("(-[^"]+)"' +REGEX_DOC = 'HelpMessageOpt\("(-[^"=]+?)(?:=|")' CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/{}'.format(FOLDER_GREP) -CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' {} | grep -v '{}'".format(CMD_ROOT_DIR, FOLDER_TEST) -CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' {}".format(CMD_ROOT_DIR) -REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"') -REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")') +CMD_GREP_ARGS = r"git grep --perl-regexp '{}' -- {} ':(exclude){}'".format(REGEX_ARG, CMD_ROOT_DIR, FOLDER_TEST) +CMD_GREP_DOCS = r"git grep --perl-regexp '{}' {}".format(REGEX_DOC, CMD_ROOT_DIR) # list unsupported, deprecated and duplicate args as they need no documentation SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio', '-forcecompactdb', '-usehd']) + def main(): - used = check_output(CMD_GREP_ARGS, shell=True, universal_newlines=True) - docd = check_output(CMD_GREP_DOCS, shell=True, universal_newlines=True) - - args_used = set(re.findall(REGEX_ARG,used)) - args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL) - args_need_doc = args_used.difference(args_docd) - args_unknown = args_docd.difference(args_used) - - print("Args used : {}".format(len(args_used))) - print("Args documented : {}".format(len(args_docd))) - print("Args undocumented: {}".format(len(args_need_doc))) - print(args_need_doc) - print("Args unknown : {}".format(len(args_unknown))) - print(args_unknown) - - sys.exit(len(args_need_doc)) + used = check_output(CMD_GREP_ARGS, shell=True, universal_newlines=True) + docd = check_output(CMD_GREP_DOCS, shell=True, universal_newlines=True) + + args_used = set(re.findall(re.compile(REGEX_ARG), used)) + args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL) + args_need_doc = args_used.difference(args_docd) + args_unknown = args_docd.difference(args_used) + + print("Args used : {}".format(len(args_used))) + print("Args documented : {}".format(len(args_docd))) + print("Args undocumented: {}".format(len(args_need_doc))) + print(args_need_doc) + print("Args unknown : {}".format(len(args_unknown))) + print(args_unknown) + + sys.exit(len(args_need_doc)) + if __name__ == "__main__": main() diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py index b8f54192c9..0f2099953f 100755 --- a/contrib/devtools/security-check.py +++ b/contrib/devtools/security-check.py @@ -20,38 +20,38 @@ def check_ELF_PIE(executable): ''' Check for position independent executable (PIE), allowing for address space randomization. ''' - p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') ok = False - for line in stdout.split(b'\n'): + for line in stdout.splitlines(): line = line.split() - if len(line)>=2 and line[0] == b'Type:' and line[1] == b'DYN': + if len(line)>=2 and line[0] == 'Type:' and line[1] == 'DYN': ok = True return ok def get_ELF_program_headers(executable): '''Return type and flags for ELF program headers''' - p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') in_headers = False count = 0 headers = [] - for line in stdout.split(b'\n'): - if line.startswith(b'Program Headers:'): + for line in stdout.splitlines(): + if line.startswith('Program Headers:'): in_headers = True - if line == b'': + if line == '': in_headers = False if in_headers: if count == 1: # header line - ofs_typ = line.find(b'Type') - ofs_offset = line.find(b'Offset') - ofs_flags = line.find(b'Flg') - ofs_align = line.find(b'Align') + ofs_typ = line.find('Type') + ofs_offset = line.find('Offset') + ofs_flags = line.find('Flg') + ofs_align = line.find('Align') if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1: raise ValueError('Cannot parse elfread -lW output') elif count > 1: @@ -68,9 +68,9 @@ def check_ELF_NX(executable): have_wx = False have_gnu_stack = False for (typ, flags) in get_ELF_program_headers(executable): - if typ == b'GNU_STACK': + if typ == 'GNU_STACK': have_gnu_stack = True - if b'W' in flags and b'E' in flags: # section is both writable and executable + if 'W' in flags and 'E' in flags: # section is both writable and executable have_wx = True return have_gnu_stack and not have_wx @@ -87,17 +87,17 @@ def check_ELF_RELRO(executable): # However, the dynamic linker need to write to this area so these are RW. # Glibc itself takes care of mprotecting this area R after relocations are finished. # See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347 - if typ == b'GNU_RELRO': + if typ == 'GNU_RELRO': have_gnu_relro = True have_bindnow = False - p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') - for line in stdout.split(b'\n'): + for line in stdout.splitlines(): tokens = line.split() - if len(tokens)>1 and tokens[1] == b'(BIND_NOW)' or (len(tokens)>2 and tokens[1] == b'(FLAGS)' and b'BIND_NOW' in tokens[2]): + if len(tokens)>1 and tokens[1] == '(BIND_NOW)' or (len(tokens)>2 and tokens[1] == '(FLAGS)' and 'BIND_NOW' in tokens[2]): have_bindnow = True return have_gnu_relro and have_bindnow @@ -105,13 +105,13 @@ def check_ELF_Canary(executable): ''' Check for use of stack canary ''' - p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') ok = False - for line in stdout.split(b'\n'): - if b'__stack_chk_fail' in line: + for line in stdout.splitlines(): + if '__stack_chk_fail' in line: ok = True return ok @@ -121,13 +121,13 @@ def get_PE_dll_characteristics(executable): Returns a tuple (arch,bits) where arch is 'i386:x86-64' or 'i386' and bits is the DllCharacteristics value. ''' - p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') arch = '' bits = 0 - for line in stdout.split('\n'): + for line in stdout.splitlines(): tokens = line.split() if len(tokens)>=2 and tokens[0] == 'architecture:': arch = tokens[1].rstrip(',') diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py index 2b77857766..3a67319eaa 100755 --- a/contrib/devtools/symbol-check.py +++ b/contrib/devtools/symbol-check.py @@ -46,28 +46,28 @@ MAX_VERSIONS = { # Ignore symbols that are exported as part of every executable IGNORE_EXPORTS = { -b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used' +'_edata', '_end', '_init', '__bss_start', '_fini', '_IO_stdin_used' } READELF_CMD = os.getenv('READELF', '/usr/bin/readelf') CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt') # Allowed NEEDED libraries ALLOWED_LIBRARIES = { # bitcoind and bitcoin-qt -b'libgcc_s.so.1', # GCC base support -b'libc.so.6', # C library -b'libpthread.so.0', # threading -b'libanl.so.1', # DNS resolve -b'libm.so.6', # math library -b'librt.so.1', # real-time (clock) -b'ld-linux-x86-64.so.2', # 64-bit dynamic linker -b'ld-linux.so.2', # 32-bit dynamic linker +'libgcc_s.so.1', # GCC base support +'libc.so.6', # C library +'libpthread.so.0', # threading +'libanl.so.1', # DNS resolve +'libm.so.6', # math library +'librt.so.1', # real-time (clock) +'ld-linux-x86-64.so.2', # 64-bit dynamic linker +'ld-linux.so.2', # 32-bit dynamic linker # bitcoin-qt only -b'libX11-xcb.so.1', # part of X11 -b'libX11.so.6', # part of X11 -b'libxcb.so.1', # part of X11 -b'libfontconfig.so.1', # font support -b'libfreetype.so.6', # font parsing -b'libdl.so.2' # programming interface to dynamic linker +'libX11-xcb.so.1', # part of X11 +'libX11.so.6', # part of X11 +'libxcb.so.1', # part of X11 +'libfontconfig.so.1', # font support +'libfreetype.so.6', # font parsing +'libdl.so.2' # programming interface to dynamic linker } class CPPFilt(object): @@ -77,10 +77,10 @@ class CPPFilt(object): Use a pipe to the 'c++filt' command. ''' def __init__(self): - self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE) + self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) def __call__(self, mangled): - self.proc.stdin.write(mangled + b'\n') + self.proc.stdin.write(mangled + '\n') self.proc.stdin.flush() return self.proc.stdout.readline().rstrip() @@ -94,43 +94,43 @@ def read_symbols(executable, imports=True): Parse an ELF executable and return a list of (symbol,version) tuples for dynamic, imported symbols. ''' - p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip())) syms = [] - for line in stdout.split(b'\n'): + for line in stdout.splitlines(): line = line.split() - if len(line)>7 and re.match(b'[0-9]+:$', line[0]): - (sym, _, version) = line[7].partition(b'@') - is_import = line[6] == b'UND' - if version.startswith(b'@'): + if len(line)>7 and re.match('[0-9]+:$', line[0]): + (sym, _, version) = line[7].partition('@') + is_import = line[6] == 'UND' + if version.startswith('@'): version = version[1:] if is_import == imports: syms.append((sym, version)) return syms def check_version(max_versions, version): - if b'_' in version: - (lib, _, ver) = version.rpartition(b'_') + if '_' in version: + (lib, _, ver) = version.rpartition('_') else: lib = version ver = '0' - ver = tuple([int(x) for x in ver.split(b'.')]) + ver = tuple([int(x) for x in ver.split('.')]) if not lib in max_versions: return False return ver <= max_versions[lib] def read_libraries(filename): - p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') libraries = [] - for line in stdout.split(b'\n'): + for line in stdout.splitlines(): tokens = line.split() - if len(tokens)>2 and tokens[1] == b'(NEEDED)': - match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:])) + if len(tokens)>2 and tokens[1] == '(NEEDED)': + match = re.match('^Shared library: \[(.*)\]$', ' '.join(tokens[2:])) if match: libraries.append(match.group(1)) else: @@ -144,18 +144,18 @@ if __name__ == '__main__': # Check imported symbols for sym,version in read_symbols(filename, True): if version and not check_version(MAX_VERSIONS, version): - print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8'))) + print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym), version)) retval = 1 # Check exported symbols for sym,version in read_symbols(filename, False): if sym in IGNORE_EXPORTS: continue - print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8'))) + print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym))) retval = 1 # Check dependency libraries for library_name in read_libraries(filename): if library_name not in ALLOWED_LIBRARIES: - print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8'))) + print('%s: NEEDED library %s is not allowed' % (filename, library_name)) retval = 1 sys.exit(retval) diff --git a/contrib/verify-commits/README.md b/contrib/verify-commits/README.md index e9e3f65da2..fa492fdd27 100644 --- a/contrib/verify-commits/README.md +++ b/contrib/verify-commits/README.md @@ -24,3 +24,24 @@ keys: Note that the above isn't a good UI/UX yet, and needs significant improvements to make it more convenient and reduce the chance of errors; pull-reqs improving this process would be much appreciated. + +Configuration files +------------------- + +* `trusted-git-root`: This file should contain a single git commit hash which is the first unsigned git commit (hence it is the "root of trust"). +* `trusted-sha512-root-commit`: This file should contain a single git commit hash which is the first commit without a SHA512 root commitment. +* `trusted-keys`: This file should contain a \n-delimited list of all PGP fingerprints of authorized commit signers (primary, not subkeys). +* `allow-revsig-commits`: This file should contain a \n-delimited list of git commit hashes. See next section for more info. + +Key expiry/revocation +--------------------- + +When a key (or subkey) which has signed old commits expires or is revoked, +verify-commits will start failing to verify all commits which were signed by +said key. In order to avoid bumping the root-of-trust `trusted-git-root` +file, individual commits which were signed by such a key can be added to the +`allow-revsig-commits` file. That way, the PGP signatures are still verified +but no new commits can be signed by any expired/revoked key. To easily build a +list of commits which need to be added, verify-commits.sh can be edited to test +each commit with BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG set to both 1 and 0, and +those which need it set to 1 printed. diff --git a/contrib/verify-commits/allow-revsig-commits b/contrib/verify-commits/allow-revsig-commits index f0088cdca4..3abf82e529 100644 --- a/contrib/verify-commits/allow-revsig-commits +++ b/contrib/verify-commits/allow-revsig-commits @@ -102,3 +102,403 @@ bafd075c5e6a1088ef0f1aa0b0b224e026a3d3e0 c8d2473e6cb042e7275a10c49d3f6a4a91bf0166 386f4385ab04b0b2c3d47bddc0dc0f2de7354964 9f33dba05c01ecc5c56eb1284ab7d64d42f55171 +7466a26cab5d66665991433947964a638f5b957e +b43aba89e356ff95b706e80d4802f60fc46a569a +02b7e8319aef2a870264ad4fa2e3bb18664dcc36 +f686002a8eba820a40ac2f34a6e8f57b2b5cc54c +2b1c50b9352ab1dc40b0f877db23c1fa4048fae3 +2405ce1df043f778b8efb9205009500cbc17313a +4ad3b3c72c73d61e0a0cab541dca20acf651320d +4ba3d4f4393d81148422d24d222fe7ed00130194 +8ee5c7b747171e335793c74cd9d2f7491da58164 +872c921c0a208b04bd0713758e52fcab5b7c1684 +00d1680498c5550e7db1f359202d3433a092fafd +585db41e9ab7a6fb262c8bad7f427cdbdc497188 +18462960c0f13bd07d8f52b61e7d7bc17e991eea +0630974647dacaf25e7fcb7f9cbb785bb078ede6 +0f58d7f3d62f012f2584f5e781fc73de4763dd9e +3d16f581538b0974853e820508e8b3093269d2fd +66e91420ab233cf1dac64504e0dc129019bf8c0d +d8d9162f5bad39b2720dd2b2da237c6159e4755f +29fad97c320c892ab6a480c81e2078ec22ab354b +791c3ea61b4e49fd46a1a71b84ca99ddf69d2ff7 +a312e201ba56742499a5480b5f2115f01505c217 +ce56fdd2e8cdf94fd0ab76d71adbfa755e23ce7d +480f42630cbd598c04fa59ee0e406f56904ecffb +6012f1caf744ac9b53383d7d10a8f1b70ca2c0e1 +ded6a2afa549f693dcabb430ce0862f8631360c8 +07090c5339436f856e79a8036d1c85deeb453803 +0e265916d1c6a63e4a3821dab9db597b5ec64b46 +e4ffcacc2187d3419c8ea12b82fb06d82d8751d2 +e117cfe45eee9169409e74a44ef4a866be25bc35 +dcfe218626b05204e9fbc95ba5d95ca0eb72ec9b +23481fa50301201ef5a60675ef899aa6ce94ca03 +27c59dc502f29cf1d76290556c21e366145e3b2e +4a62ddd01873d18dbca96c81d756be1020249b45 +a233fb4f1d037e68ff70eef3a9f5b7bf1d631918 +b2089c51cc4af2f7e1c0ec75be9449ee222b1d69 +c997f8808256521397f1c003bb1e9896fee6eaa0 +5dc00f68c49c46a380a98d06233f90528b8e2557 +fe53d5f3636aed064823bc220d828c7ff08d1d52 +935eb8de039dec65669a96a1c3b86f4b03a1b86c +0277173b1defb63216d40a8d8805ae6d5d563c26 +2a30e67d20f76bbcd9a7d445f616f005316e0a1a +d32528e733f2711b34dbc41fbb2bb0f153bf7e9a +4cad91663df381d0dff8526f3b4aa74569dfb626 +1b06ed136f17b526360617a70026aed5ded5746c +895fbd768f0c89cea3f78acac58b233d4e3a145e +f0295becbf3ef1fb78095306408789253fe0c114 +8d573198638e52e2dbd9abc609861430f9d2bcc3 +9d9c4185fadaf243bb97c226e2fef16b65299699 +eebe4580bc8d6484d79ecb24dd87412221cf2ea7 +9cf6393a4f82b9c81d3b4b468a17a89db10531a2 +598a9c4e4dcd03c6d80fba005de729a6a3aeba7e +6970b30c6f1d2be7947295fe18f2390649b17a4b +f359afcc410432ed5d30001acda0c66741ee8935 +126000ba9e7ff16271be2f4eef3df99ade8d624f +b5e4b9b5100ec15217d43edb5f4149439f4b20a5 +b987ca4ee495a7fff82f0ac14ef0753bfb7586e2 +b03013396cb2f4bf25746388b3982a2c3616e16b +9a97f39afaa890caa7987c6bc001b9a66e3e74e8 +cad504bf4c302f7a72e0a0e191f3fdbafda7340f +45cf8a03cb57b8639a8d47323bde46ba22d9eeaf +b7450cdbd89a1c862f4d4d8bf093f8a0b5448f9c +0910cbe4ef31eb95fd76c7c2f820419fe64a3150 +92a810d04b906722c9efe60e3997243c71ff3d4c +45173fa6fca9537abb0a0554f731d14b9f89c456 +fd4ca17360e6fc0c9bb76bf6b5b07c9102c12728 +ddff3447f29b62d79a33f728791f42fa9436216e +36a5a4404836da323c755523fbd27563a8e84f94 +c991b304dee368f506cfee27ddaa333f1f82c518 +d38d1a3e75aa97ffa8755ddd431754a6d0942964 +a332a7d5a15214015f9553fdb2bcf80a1a4b8dc0 +604e08c83cf58ca7e7cda2ab284c1ace7bb12977 +18a1bbad98bd4321f15e7921d9aec91661499d90 +8049241e226c16bd07b029c0cb4b62ac40f0c923 +797441ee995aac59f55d59a93ecb55e8ecbe7dbc +62fdf9b07087b80d2142799bdd2324f61483359d +f60b4ad57912b78a96af08046a503f7905610a8c +13e31dd6548d64a5992f439e74bb424bf88aca04 +fbce66a982679b5409a295be5c99a2eef429cabf +9f2c2dba21855b8cb9b193b1819be73fa4a23a99 +a89221873a3ee2451c73b41bbe2d99d36f439d31 +3d6ad407770e13958e157bf026cae0bfb9254899 +901ba3e3819405306414628306746552b0aa1d28 +7a43fbb959c38e025e558e472ad57de357539894 +0d89fa0877930c6c8a539a656c1009ad8ab6755b +54aedc013744c86b11157423fa3cffc9a51eef02 +f0c1f8abb0182da557d07372b938f3a0a4bb906f +4ed818060ecf4a38a02c8cb48f6cbc78d2ee7708 +3bdf242fc68a8d767932c6214455d4d413effbc9 +5e468994fbb349e8eefc996954a31a67a34aaa15 +41aa9c4a801a01eca1fad22a7095372d23dace60 +2adbddb03840ad71e843c6c4a207a13e871cd1d4 +13e352dc53dec0127c5f94a60055d0ca829420dc +95e14dc81dd30ee0d396ad08dca9a6980d16eee1 +61fb80660f73e5aa5b69302ecc7ac33da206ba5a +05a761932edd05cf94ffe938908baf058f38632a +ee92243e66f2df03b3a759a8ffb75dc06f0cea0d +22cdf93c062eeaa0f8f9d6220f01b67240073dfb +76b33491596736ca804e3a29bd8398d7a1516ab7 +6e4e98ee8ce2da3cca2e2fd210e9e8dbc9b1c936 +c838283ecdfb9490425bb071b7c22e542de46c7c +5e3f5e4f25b65b583d3bfefac9e1148035781089 +f7388e93d3dd91a90239aedac4ec58404f103a2e +0a2f46b0158b6fc7244a585913b0925c0acf707f +dd561667cb7ccbbfed3134b05a565971ef6f5873 +6f01dcf63873a5e42798635ab4026c9a5f9fa213 +70fec9e36bcd1a3d93df019be084aaf89cecd7d7 +f9b74ef3fc74fd7d2aa94560820341f03cda8e12 +998c3046fab2b52bc9f141cfb588a18c05506a86 +89cc4f905e30b913ca20e4192d538cc5cbe2c38d +87d90efd69b64f769116956a5db89e536e9e3714 +5aeaa9ccd1568a77e075dbe2bd2435bd60c87c91 +bfb270acfa30713dc8c968bb9ee40cf5a2360359 +1b8c88451b0554502435d3883c528ad0aad1b09b +57ee73990f1ce29916adfd99f93eae1ccea1a43b +808c84f89d0edcef9ddaab0b849a382719f6ec9e +14b860bf64020451ced823b859da8cb912278ab9 +c63364610f4a041df1c1bd81d01b1f6856160749 +92eadc395071876d77f3babddc056b4325bdbabc +e93fff1463ae906fc986bf98c3b118c82f171546 +9ccafb1d7bdd172a9b963444072a844da379c4f7 +b4a509a3f817121c3df98ddfd96b2769e18a3e5a +dbc4ae03963014ab4b7957d62ba59dbd8f938c33 +8ddf60db7ad636b6a31b590251c671ded635fa1d +f199b8a33d9443a258a1f49a1a29674cd9ee9a20 +e542728cde676f218c552d841d0af29b92f9800b +763231051596b8e3455b839911ad6a3a1f1c3c74 +ff4cd6075b12fb32b9a906deea3ed033e3f9560a +9c3c9cdae3e20b5bdea91a0631edac5116bbc89f +93d20a734d2ee873832bed8ca5c05cf8e539c53c +ef8340d25f7c5dd5682bdecea97ce84cfce1493c +69c7ecef405d168f658a9cc7996da84c17f61e66 +4ce2f3d0d33346e9f0e96851689ee6550b2a72e3 +44e1fd926cfb0df0fbd8c41de8cd65ed8d5d6e18 +d6d2c8503c4039b682196d83a67dc28359c10c5c +ae233c4ec3d14a97c6195059f52873cdba2b4755 +0f399a9ff227896265cafab9b2e9fab6cdb9b5b9 +f4ed44ab4a8f9a87ba678d5fd1449fbf636103dc +7fcd61b2613c211bb042a82a889655178be6a212 +42973f834445d7735738bdba8847812ba3c34d95 +8df48b36ed3201d938b9974ecbee455d7dc2fb84 +96ac26e56627f0c24213fcd3a1cce9fc95f1f661 +cce94c518a46b7b0006f984bbe4d69e8749182d2 +801dd40666d1e6009920ad3ff755c7bb993b2a62 +ce829855cfca103dde55661fa1524e66b139d063 +b148803b181e30213e8a7f3bd89c8239e9dcb866 +c377feaad87f8109f85da6caf62602b30c20effc +b37cab65c63e051ebc5b491da9bd687581df94df +16e41844e7d6c5876d2caaeef6010656950c6ec5 +ee50c9e48786dea0d9df2e45805c25565c100fe3 +11dacc6154c42bc6fe3ba94c1823f8a46e4fe81a +791a0e6ddade27d1b69f4861a6640de60b9553cf +638e6c59da4fad987c437592174b188510193b2e +52f8877525d5238f3440e73710507be889d14127 +2a56baf395bf11835d784c4f8634f4525deed6a1 +bc561b4b7d6a3f71649d37d5eb9047c29efa2b13 +31809d6f8514c4a8d5677e947e3f1ebb0db210b9 +a31e9ad4f027955d43c04a05517244647e250161 +777519bd96f68c18150a0f5942f8f97a91937f5e +4eb1f39d421024d9666cec61deaf96715ffae4c6 +50fae68d416b4b8ec4ca192923dfd5ae9ea42773 +ce665863b137ac4a7470cf006a92aa7694faca71 +81f8c0378b2ab5ea0d7b65635cb529bd3c69127c +108222b9c323a05cc9339368f10ddd0859f62b43 +28f788e47e58f2b462351d6989348a4e1a241b2b +d81dccf191a48a6b59c3747d7b4ccbe3535dde40 +a90e6d2bffc422ddcdb771c53aac0bceb970a2c4 +91e49c51f1aecc9e1d75457f4920d52a4b0a133c +60dd9cc470584960431de425e2a9ffbed0e8034a +ede386c2193fc31351e193b3a8cf30030d6be62c +a084767b40c0d3ba8fa8f8d60f1e8d99a9dc3457 +3f726c99f819f97f2ab21b94d34c6b3129cd883a +77fc469fc78cdd87c29f398d46ac58dbb9ef62c0 +4ae6d0fbef60ccbecf8f23bb482e201b3678f7a3 +8858b6ddd3bce9daa08da6e05de3ca863a399c15 +22e301a3d56dc9e6878380ee92c7d19ca43119d2 +c484ec6c9b85ca4e331e395c564ae232fd0681dd +a46a671e253528e450bd57645c400bf761da07ab +655970d9c60ae6850daf452457e14e21047c0e1b +b6a48914c50631914192aa11b19205436a9c664d +7db65c363a0cc6ca7cdb04de9a973ab70013baad +6366941275344dac7e2130b0c972e90117d37ed0 +4fb2586661471a1572c2df2a5a091011d45eb7c4 +d7be7b39fa1021ec4518186afe145ee948e12a94 +85aec87b11ec41295558175c63f1f5a849460fdf +aeb31756276034dd506fdf97c8aaade0e7e584f5 +ac016e17d20253129a0287cee7e1d06b7ef15966 +bf74d377fb8e20140da6eac1407414928384bcea +2c811e08db651a4aed6ea0f7c1972d60de6de8ab +e5d26e47c7a482c072a7fe47bb84c56854734184 +96a63a3e0cefe920819bd42add0041837b1214a1 +e526ca6284b9e13be1b912b80dd73a34e739b539 +ecd21357f16106e541e9c2854ead2a906659b938 +4b5a7ce0c301ad971f383eb60f61bf9b4026efda +929fd7276c0f0c30b9416f61a6f5f35d763d81e4 +fa8a0639f7b0ce04030b72b4d5be4f0aa36fc5cb +f1f1605c22a6283bbfd757055fcf2b584a857709 +0c173a15ca1bf20999f74987988985508c9de463 +df0793f324e33066cc746c0cb1d053d35733d626 +2b0179d8a9b75397937126b36114df0dddeab40c +bf0a08be281dc42241e7f264c2a20515eb4781bb +3895e25a77363ae8b49358fb793f50fa8b271e2d +1fc783fc08bc078239537535f174ab8a489772c0 +1d4805ce04645f3203b0cfd3d66ea710e7433eb4 +d3b58704d1d325875fc605580c1c02b825c1bbcc +ed88e3194c4bc43aeafef929da7b419d03dea1ad +dd07f47b79628668e29cc0143b21e790100ee445 +65cc7aacfbfc7b747926375280a1d839e88d576b +080ec5209172ac9605f1434559dbb3c1e012b10a +416af3edf5b5ab265acf95568f2bc9eabd3d96de +e0a7801223fd573863939e76cb633f1dcc2d22c4 +4bc853b50fd9127687eb9e4f3b679dd261a4fa96 +c68a9a69278aa194fed96bd9733d32af3690a11e +c38f540298f0e188df5ed68fd56c623b9ac8331b +643fa0b22d70e459d7f7ec3d728ae4811dc5158f +e053e05c130549f43953f1d70e724dc9ce3e1b85 +75e898c094eea533d1dfaf141c6afccc3072c49f +2805d606bc46bf5589093a1b92d3542c13ce50c2 +32751807c9c06011eb689cba56b401a6302699c0 +30853e16d332816752dafcfca92147c7ffef5b54 +bea5b00cfe95cd37832305c0f93c339a22a7d79d +c871f323b418fac27bf834843ca26985010df53f +329fc1dce7a1c372c8b10c2f2f8732b2c60daff0 +1aefc94dd78d6e0c9209cb09fc16f53dedf42108 +8e5725666b519b61fcdc3141da5c6a57c1959909 +a4ca0b042365061020627a8c045cddacea3312ec +8bd16ee12fc8ef6723e0572c29b979c15b92b4f4 +87abe20fc118721cc5efdbd94a8462468cd1da2b +4b766fcdd4ca16399075d1e081a321b3b05ce516 +f6241b3e420e19f3f0507cbbc872fe9218916a02 +7ee523604851af62c0a47c07ee023a8710ef32f7 +776ba233e939fe41a74c6b2632b93a0679a32c71 +6a796b2b53fe542e0f340f250f4f20d69efed8d0 +23d78c4dd01bc74ba35db3e3df95280f6f1b2e22 +f4b15e2de97c4f8cdbb40bef4c9d0ab2807974d9 +fff72de5bf8ac7b70208e655f237b80e70e18851 +170bc2c381f86a523de2fc8b71d62ade66303c0d +314ebdfcb38d4b4c977579f787d5e1a20d068c94 +e9274839bf316b1972d80d28e45759f898edbf86 +75171f099e82e3527d7c3469b15891bd92227ec2 +3c5e6c94caf40395e031fbde44a0cca46fdd76ec +dc8fc0c73bebbc1c48ac5540026030c9cc00ec23 +492d22f92919d8d9d59568318c26c1e2ac4890cc +80c3a734298e824f9321c4efdd446086a3baad89 +47535d7c3ec79c5978cdcc03a5351ddbbb22538d +1b25b6df0f08f7474228c5b6ed13b58682e1e440 +c530c15180631cea95e9c292cf7fabde9dca9db3 +2723bcdce3248417e98e6c43207bef74d34076c1 +ed22eb4a62bd8d5369aaec87d4cbdc03c9f16368 +9111df9673beb6d6616d491a5478f09b5f14d040 +d86bb075bf6d1e78c1e4f3dd38b0ea828ef5ecfe +50a1cc0f0aef1514b917a5a3f4476967170b429d +6ce733747e160ca699711f2c47e686284ca9aa07 +b44adf92342ad4f9c343ba29c081a91687932936 +88799ea1b1c08f4bc1a487c9e3c2effd5e1650ae +080d7c700fc3291560d79fc590e05b8e2bad984f +12af74b289f8cdc6caf850dc6c802f9936b1e8b3 +8e4f7e72410df3ba430082c7cf385f26fd75b033 +8ac80412867118172dc4172494304e19969e9489 +f2734c2828f69d9cfd535e5eab0592a7674b2b61 +0b9fb682890b8fe10cec54072b809a5efe57d33d +5b029aaedb5fcf7cadd249607dd28eb3f233ab8c +79af9fbd8c3c0e54702a9c92b171f134bd4466c8 +c412fd805ddf3282dc2e1f28e30f51ffcb1f1da2 +111849345bb5140f86b48e730ceab4bff45fa2e9 +a0b1e57b20a17177ed5a9a54e4a8aab597a546b4 +ca209230c8e73745cf8cfc79f500c9c46e103306 +a230b0588788dbe1ac84622aea169c577b381241 +dfef6b6af08097f0676a2323085558fbbd3c48c6 +3192e5278abca7c1f3b4a2a7f77a0ce941c73985 +7c7ddd9ead99a8b5033a1a5d4698032c9e2b3a92 +10b930dde8f14e9cb661810e97a33bbf144fc55c +9225de2cf652fe2bf6e50636824cdb641546f57d +598ef9c44b3ea2cc142c175f077b493f39f5ba22 +c49355c7170a64bdd7864cc3ba9a64916b67fe7c +857d1e171e051b254a617f27b39f6a551054cee2 +21833f9456f6ad5bc06321ad6d9590f42ce0195c +8910b4717e5bb946ee6988f7fe9fd461f53a5935 +5703dff0939f05c7457cebd6fc61d88ab13afe41 +8bfa13b15b84cb372950fb7b25a1080173060b6a +ac23a7c1f19b3d8c326ffe75c8e13edf285f90fe +19be26afe3d04783a92d032b55bf3fb1e2ae63cc +f7ec7cfd38b543ba81ac7bed5b77f9a19739460b +36afd4db4442c45d4078b1a7ad16a1872b5bee0d +88c2ae3ed2bb5d367dd408c9255cd8f1e7a36c7d +a13a417cdcfdfd1f1b3bf997bb6ffe6e69b096b9 +d6064a89ac97dc0d2ce9da3982e1a4e25afaeda8 +7146d96de3e15a80cafbab2af48ff6f65d8e41bb +5628c70f2a44567695e5331fe2293c5b7f35b629 +7ff4a538a8682cdf02a4bcd6f15499c841001b73 +aa5fa642b0e7ce2ea55e2298886f212f11a8894e +8efd1c820b9a782d8608d54d924658536178295c +50a226563cd8d7c0a5e8448e87fede0eb72a8354 +b860915f8b0dae98e57a254d11575ea41f5c5a79 +d304fef3746039183f51b3ac8f4774dcf3a64f59 +53ab12d9318d5d195ccc77028b0e3ae66dc6e1fd +668de70be039a4f1ffcf20aeae2a22ee71fc55a8 +0fea960ca917b73aff853fe88476174c8a313863 +f89502306dcf6393a2c7b0efbb0fa728fc582137 +ff58b1c3bdff5e5f687f10f9e40ce495ca49674e +0b96abc35f1a9d46a27eeddd7df418d107c29c57 +b0b57a17306a7e963a4fe463f84e2b150a00a859 +4105cb6fd964ad13099ca83b1fdf3d35f3961f74 +23281a4dc3afc42a001346caec4dbb8193f0bb53 +8daf103fa138f9a184448ebf1c2e03b9dbd96f21 +02e5308c1b9f3771bbe49bc5036215fa2bd66aa9 +a65ced1a66575c652baf5084644b8647f531be8c +2456a835f0bc7796d9ff71f64837fa6790e2b7cc +9ec1330b455c1ab2eb6b89f8a2ab885677d4ae8a +0b738075bd43fbd4410e30a51e0498cbfd2b7513 +98c80e374b84e5a9c2d5c36889a0b1ebed5b814b +25720fc394e27a951bcad26095fb5a711bfacb8f +4cfd57d2e38207d78722ce8c9274ba8dd700d1cc +0fc1c31a878e93d938c67db3f958e82e3c39659f +df1ab5b4d67b46b5e9e840b1fbe0ff02520831f9 +5bc3b6cede8dabdf3f4f27ddb03723cbb7cde51a +c2ea1e6561caba3abffce361abc800822b9e0efe +caa2f106d704ec3ade63498031dd58d34510bc76 +dce853ef76ef90c46d84294225088d595467d08c +dbc8a8c86ae50059fddb2d6834fa5f0c9bbf9b71 +0f921e6a0492c4e9f037a9ed91f474885032d68c +041331e1da23e4136fd046ed870cdcc177464176 +e6ba5068f107ac234576e77cedbd748b665369c2 +76fcd9d5034143a5b041766552670d19f926097d +72bf1b3d0962304850a3ef5fe375db4bff1d0a39 +919db037f1f5cc73cdcaef92dd9cb0e7f5c8dec3 +c36229b0b2e9d4554053f5c9fc451ac29a493b1f +9e4bb312e6958d2baa309ba670e5eed1523c6f47 +d7ba4a233bd5a6f8fadee681c68a995e23fe36d7 +98514988a3d3e8b7dbf0463884a5c38f5ed5562d +5412c08c3cf13577566064edd04da021c37b7cbe +31bcc667863f368157efa1143a78623a5db8f0d1 +7bd1aa566fb4a4fe194f209085649f2c722b0cff +c4522e71c7e1d8ecfd70112e9375b9d00d6733a8 +e22f409f18881b63a8e747036584a71217f40e6e +97ec6e5c9098a1240655cfcab05b6cd5eedb6cd1 +bc121b0eb19713ec72002b5be03ba5ac35903a17 +c98f6b3d93a2cc1b49a6db425ea2b661089d0f9e +0de7fd36de57a68e543b4c1f184fba192c398c73 +e662d281b837c25b2b70525aa8fe8af894339823 +44adf683ad232db8ce0cb89b3e236a1f5944cfb0 +cb2ed300a89ebf9f0654da869ced665ed8b2abe7 +0a6d48d9ed60b0b02177059ab116f8f46d2cbed3 +b42291334651fff46dbfe5947a726f65cb9d7dfe +e5364991daecb73aca3bb5ac37f2619d7a89211b +4a2b170c075ce703cbdc82519a48016a9ee3f99c +924de0bd75a7f75df65d7d15f9d1587a2e794abf +1253f8692fc3a11be9430685cd405236a68df6c3 +2b799ae9e1e0a540f9a5971ddf27d83254668279 +c9bdf9a75f9fde8cd011e4aa94be4ed4347078a3 +3d69ecb4edeb80003a1a41442e320898a30dbd9c +f08222e882b18c1f279308636e03beceece2dbf1 +23e03f8d26d7bd03273a5dcbdcfe3905dfb49ffb +03dd707dc027fbf6f24120213f8eb66571600374 +d0754799698de2c032abcb8198ee5d5401063213 +072116fceb2294b97d1c40f79305f2e3ff71812b +e66cc1d58e16bf1650dd6479fed64ecaca8c6098 +f137753a2dcd8229f89d1d1ac28039364e5850b4 +61d191fbf953700ba8aeadc9c8cf4c195efbd10c +76f3c02fb01a6df98fbd8c16ac21d159d4649d37 +6013c73b3312e11b447ed387426749014716f820 +6faffb8a83db3f209a303a4464dbdd597faad5a4 +cc9e8aca5f950c78dcfeff63c441ba993c1fe12f +8ca69a2a88a77eb06149fa049ab1a7e6de38b321 +2f71490d21796594ca6f55e375558944de9db5a0 +08cc5fd666456cb476467473ed1880c90c92dedb +e31a43c725ebe641d7c219c3886eee18eebf0bb8 +52b5a8785de760a204b2b0aab19dfaf79c2c3ff0 +483e8e4f4875a1a621ec9e9df2880d3037d95ed7 +1e5799c52535a3fc20e885916f1e7ed33ecc7f46 +a82e5d8220bbc8b5d786bed99b0876f530b9b7cc +7fe6c5c993706e8395cdaf7977bee793c06f48f3 +2a0836f6d5e7c1d7e97bedb0e0ea33dcaf981f77 +ddc308068d69c6c9aa629ee3c4ce75e1d1cf08b5 +ec139a5621a9c9f03e1988391a3c7c6c5d849776 +c01a6c48b982d625fd9f4f69005878781d3d56fa +95a983d56dbda457e3bf8766d59bac74c7aa5699 +760741a00833876976389ed7a6b73f36ee5b4c13 +6e5e5abba6f8bbbe61c22795df440dfafcfdc378 +cf2cecb18779ce83de9adebf382dff1c19b12840 +af9b7a9f2f73b1a2f9728106774dd13e8d1cdd8d +115735d547fdeade822f547eb3e8c8f9961a9b07 +c2c69edf37b5c02aafa01d0407dadbf5ef8751b5 +a072d1a83787e786d074a4b5871b0b961781f7c6 +ed2cd59e258f756b2eaed7909a60956ade6ef7ee +ae5575ba41c8a782805afb1c08730343cfc22397 +6ff2c8d29f6b5a5c2ce63f0a16f3bb0dbd049451 +a80de15113166354cdf208e3d8b6e25f4511a591 +06bd4f637f15e769f088d9051a5af94bbb0217a3 +6700cc993cc07fb0f5b8b577ff8c4afcf0b18274 +37f9a1f627c0995d89b62923e75cd092600894f9 +8844ef15ded02d5ed86fb95aaf251235fcef2396 +1b87e5b5b184a0a6c683eda23b36393822b57f03 +e2bf830bb6c1bfa038c943dd6f5d92a406bd723f +423ca302a3ee87000530da3c105f269b8fabece7 +4e14afe42fdd468d5de11df8cc13defdcb8e83f8 +3e90fe6534206412ea22beaa445cf20d28fbe718 +88b77c7da0a672c89e24df37ea6e9085b4e2a05c +0ad104190465d8d65c2344bbe10dcf3df025d86c +5c7df7022bcd360e6af00b9458b1a3fd54e1cc9a +59ad56851a342d2c62f6b38bf15002b23ab439e1 diff --git a/contrib/verify-commits/trusted-git-root b/contrib/verify-commits/trusted-git-root index e560b98d02..c60f8ab695 100644 --- a/contrib/verify-commits/trusted-git-root +++ b/contrib/verify-commits/trusted-git-root @@ -1 +1 @@ -11049f4fe62606d1b0380a9ef800ac130f0fbadf +82bcf405f6db1d55b684a1f63a4aabad376cdad7 diff --git a/doc/developer-notes.md b/doc/developer-notes.md index 0a4ad32a2b..0de1892200 100644 --- a/doc/developer-notes.md +++ b/doc/developer-notes.md @@ -243,6 +243,57 @@ make cov # A coverage report will now be accessible at `./test_bitcoin.coverage/index.html`. ``` +**Sanitizers** + +Bitcoin can be compiled with various "sanitizers" enabled, which add +instrumentation for issues regarding things like memory safety, thread race +conditions, or undefined behavior. This is controlled with the +`--with-sanitizers` configure flag, which should be a comma separated list of +sanitizers to enable. The sanitizer list should correspond to supported +`-fsanitize=` options in your compiler. These sanitizers have runtime overhead, +so they are most useful when testing changes or producing debugging builds. + +Some examples: + +```bash +# Enable both the address sanitizer and the undefined behavior sanitizer +./configure --with-sanitizers=address,undefined + +# Enable the thread sanitizer +./configure --with-sanitizers=thread +``` + +If you are compiling with GCC you will typically need to install corresponding +"san" libraries to actually compile with these flags, e.g. libasan for the +address sanitizer, libtsan for the thread sanitizer, and libubsan for the +undefined sanitizer. If you are missing required libraries, the configure script +will fail with a linker error when testing the sanitizer flags. + +The test suite should pass cleanly with the `thread` and `undefined` sanitizers, +but there are a number of known problems when using the `address` sanitizer. The +address sanitizer is known to fail in +[sha256_sse4::Transform](/src/crypto/sha256_sse4.cpp) which makes it unusable +unless you also use `--disable-asm` when running configure. We would like to fix +sanitizer issues, so please send pull requests if you can fix any errors found +by the address sanitizer (or any other sanitizer). + +Not all sanitizer options can be enabled at the same time, e.g. trying to build +with `--with-sanitizers=address,thread` will fail in the configure script as +these sanitizers are mutually incompatible. Refer to your compiler manual to +learn more about these options and which sanitizers are supported by your +compiler. + +Additional resources: + + * [AddressSanitizer](https://clang.llvm.org/docs/AddressSanitizer.html) + * [LeakSanitizer](https://clang.llvm.org/docs/LeakSanitizer.html) + * [MemorySanitizer](https://clang.llvm.org/docs/MemorySanitizer.html) + * [ThreadSanitizer](https://clang.llvm.org/docs/ThreadSanitizer.html) + * [UndefinedBehaviorSanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html) + * [GCC Instrumentation Options](https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html) + * [Google Sanitizers Wiki](https://github.com/google/sanitizers/wiki) + * [Issue #12691: Enable -fsanitize flags in Travis](https://github.com/bitcoin/bitcoin/issues/12691) + Locking/mutex usage notes ------------------------- diff --git a/src/Makefile.am b/src/Makefile.am index 7d0358619f..72e5cdb95d 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -4,8 +4,8 @@ DIST_SUBDIRS = secp256k1 univalue -AM_LDFLAGS = $(PTHREAD_CFLAGS) $(LIBTOOL_LDFLAGS) $(HARDENED_LDFLAGS) $(GPROF_LDFLAGS) -AM_CXXFLAGS = $(HARDENED_CXXFLAGS) $(ERROR_CXXFLAGS) $(GPROF_CXXFLAGS) +AM_LDFLAGS = $(PTHREAD_CFLAGS) $(LIBTOOL_LDFLAGS) $(HARDENED_LDFLAGS) $(GPROF_LDFLAGS) $(SANITIZER_LDFLAGS) +AM_CXXFLAGS = $(HARDENED_CXXFLAGS) $(ERROR_CXXFLAGS) $(GPROF_CXXFLAGS) $(SANITIZER_CXXFLAGS) AM_CPPFLAGS = $(HARDENED_CPPFLAGS) EXTRA_LIBRARIES = diff --git a/src/addrdb.cpp b/src/addrdb.cpp index 675f3c28af..e4620e63c6 100644 --- a/src/addrdb.cpp +++ b/src/addrdb.cpp @@ -22,8 +22,8 @@ bool SerializeDB(Stream& stream, const Data& data) // Write and commit header, data try { CHashWriter hasher(SER_DISK, CLIENT_VERSION); - stream << FLATDATA(Params().MessageStart()) << data; - hasher << FLATDATA(Params().MessageStart()) << data; + stream << Params().MessageStart() << data; + hasher << Params().MessageStart() << data; stream << hasher.GetHash(); } catch (const std::exception& e) { return error("%s: Serialize or I/O error - %s", __func__, e.what()); @@ -66,7 +66,7 @@ bool DeserializeDB(Stream& stream, Data& data, bool fCheckSum = true) CHashVerifier<Stream> verifier(&stream); // de-serialize file header (network specific magic number) and .. unsigned char pchMsgTmp[4]; - verifier >> FLATDATA(pchMsgTmp); + verifier >> pchMsgTmp; // ... verify the network matches ours if (memcmp(pchMsgTmp, Params().MessageStart(), sizeof(pchMsgTmp))) return error("%s: Invalid network magic number", __func__); diff --git a/src/compressor.cpp b/src/compressor.cpp index 86de2900e9..da639a01af 100644 --- a/src/compressor.cpp +++ b/src/compressor.cpp @@ -9,7 +9,15 @@ #include <pubkey.h> #include <script/standard.h> -bool CScriptCompressor::IsToKeyID(CKeyID &hash) const +/* + * These check for scripts for which a special case with a shorter encoding is defined. + * They are implemented separately from the CScript test, as these test for exact byte + * sequence correspondences, and are more strict. For example, IsToPubKey also verifies + * whether the public key is valid (as invalid ones cannot be represented in compressed + * form). + */ + +static bool IsToKeyID(const CScript& script, CKeyID &hash) { if (script.size() == 25 && script[0] == OP_DUP && script[1] == OP_HASH160 && script[2] == 20 && script[23] == OP_EQUALVERIFY @@ -20,7 +28,7 @@ bool CScriptCompressor::IsToKeyID(CKeyID &hash) const return false; } -bool CScriptCompressor::IsToScriptID(CScriptID &hash) const +static bool IsToScriptID(const CScript& script, CScriptID &hash) { if (script.size() == 23 && script[0] == OP_HASH160 && script[1] == 20 && script[22] == OP_EQUAL) { @@ -30,7 +38,7 @@ bool CScriptCompressor::IsToScriptID(CScriptID &hash) const return false; } -bool CScriptCompressor::IsToPubKey(CPubKey &pubkey) const +static bool IsToPubKey(const CScript& script, CPubKey &pubkey) { if (script.size() == 35 && script[0] == 33 && script[34] == OP_CHECKSIG && (script[1] == 0x02 || script[1] == 0x03)) { @@ -45,24 +53,24 @@ bool CScriptCompressor::IsToPubKey(CPubKey &pubkey) const return false; } -bool CScriptCompressor::Compress(std::vector<unsigned char> &out) const +bool CompressScript(const CScript& script, std::vector<unsigned char> &out) { CKeyID keyID; - if (IsToKeyID(keyID)) { + if (IsToKeyID(script, keyID)) { out.resize(21); out[0] = 0x00; memcpy(&out[1], &keyID, 20); return true; } CScriptID scriptID; - if (IsToScriptID(scriptID)) { + if (IsToScriptID(script, scriptID)) { out.resize(21); out[0] = 0x01; memcpy(&out[1], &scriptID, 20); return true; } CPubKey pubkey; - if (IsToPubKey(pubkey)) { + if (IsToPubKey(script, pubkey)) { out.resize(33); memcpy(&out[1], &pubkey[1], 32); if (pubkey[0] == 0x02 || pubkey[0] == 0x03) { @@ -76,7 +84,7 @@ bool CScriptCompressor::Compress(std::vector<unsigned char> &out) const return false; } -unsigned int CScriptCompressor::GetSpecialSize(unsigned int nSize) const +unsigned int GetSpecialScriptSize(unsigned int nSize) { if (nSize == 0 || nSize == 1) return 20; @@ -85,7 +93,7 @@ unsigned int CScriptCompressor::GetSpecialSize(unsigned int nSize) const return 0; } -bool CScriptCompressor::Decompress(unsigned int nSize, const std::vector<unsigned char> &in) +bool DecompressScript(CScript& script, unsigned int nSize, const std::vector<unsigned char> &in) { switch(nSize) { case 0x00: @@ -139,7 +147,7 @@ bool CScriptCompressor::Decompress(unsigned int nSize, const std::vector<unsigne // * if e==9, we only know the resulting number is not zero, so output 1 + 10*(n - 1) + 9 // (this is decodable, as d is in [1-9] and e is in [0-9]) -uint64_t CTxOutCompressor::CompressAmount(uint64_t n) +uint64_t CompressAmount(uint64_t n) { if (n == 0) return 0; @@ -158,7 +166,7 @@ uint64_t CTxOutCompressor::CompressAmount(uint64_t n) } } -uint64_t CTxOutCompressor::DecompressAmount(uint64_t x) +uint64_t DecompressAmount(uint64_t x) { // x = 0 OR x = 1+10*(9*n + d - 1) + e OR x = 1+10*(n - 1) + 9 if (x == 0) diff --git a/src/compressor.h b/src/compressor.h index 6fcecd27e9..561c8e66d0 100644 --- a/src/compressor.h +++ b/src/compressor.h @@ -14,6 +14,13 @@ class CKeyID; class CPubKey; class CScriptID; +bool CompressScript(const CScript& script, std::vector<unsigned char> &out); +unsigned int GetSpecialScriptSize(unsigned int nSize); +bool DecompressScript(CScript& script, unsigned int nSize, const std::vector<unsigned char> &out); + +uint64_t CompressAmount(uint64_t nAmount); +uint64_t DecompressAmount(uint64_t nAmount); + /** Compact serializer for scripts. * * It detects common cases and encodes them much more efficiently. @@ -37,28 +44,13 @@ private: static const unsigned int nSpecialScripts = 6; CScript &script; -protected: - /** - * These check for scripts for which a special case with a shorter encoding is defined. - * They are implemented separately from the CScript test, as these test for exact byte - * sequence correspondences, and are more strict. For example, IsToPubKey also verifies - * whether the public key is valid (as invalid ones cannot be represented in compressed - * form). - */ - bool IsToKeyID(CKeyID &hash) const; - bool IsToScriptID(CScriptID &hash) const; - bool IsToPubKey(CPubKey &pubkey) const; - - bool Compress(std::vector<unsigned char> &out) const; - unsigned int GetSpecialSize(unsigned int nSize) const; - bool Decompress(unsigned int nSize, const std::vector<unsigned char> &out); public: explicit CScriptCompressor(CScript &scriptIn) : script(scriptIn) { } template<typename Stream> void Serialize(Stream &s) const { std::vector<unsigned char> compr; - if (Compress(compr)) { + if (CompressScript(script, compr)) { s << CFlatData(compr); return; } @@ -72,9 +64,9 @@ public: unsigned int nSize = 0; s >> VARINT(nSize); if (nSize < nSpecialScripts) { - std::vector<unsigned char> vch(GetSpecialSize(nSize), 0x00); + std::vector<unsigned char> vch(GetSpecialScriptSize(nSize), 0x00); s >> CFlatData(vch); - Decompress(nSize, vch); + DecompressScript(script, nSize, vch); return; } nSize -= nSpecialScripts; @@ -96,9 +88,6 @@ private: CTxOut &txout; public: - static uint64_t CompressAmount(uint64_t nAmount); - static uint64_t DecompressAmount(uint64_t nAmount); - explicit CTxOutCompressor(CTxOut &txoutIn) : txout(txoutIn) { } ADD_SERIALIZE_METHODS; diff --git a/src/init.cpp b/src/init.cpp index 64b513d63a..f6f522da66 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -115,7 +115,6 @@ static const char* FEE_ESTIMATES_FILENAME="fee_estimates.dat"; // std::atomic<bool> fRequestShutdown(false); -std::atomic<bool> fDumpMempoolLater(false); void StartShutdown() { @@ -205,7 +204,7 @@ void Shutdown() threadGroup.interrupt_all(); threadGroup.join_all(); - if (fDumpMempoolLater && gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { + if (g_is_mempool_loaded && gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { DumpMempool(); } @@ -421,8 +420,7 @@ std::string HelpMessage(HelpMessageMode mode) #endif strUsage += HelpMessageGroup(_("Debugging/Testing options:")); - if (showDebug) - { + if (showDebug) { strUsage += HelpMessageOpt("-checkblocks=<n>", strprintf(_("How many blocks to check at startup (default: %u, 0 = all)"), DEFAULT_CHECKBLOCKS)); strUsage += HelpMessageOpt("-checklevel=<n>", strprintf(_("How thorough the block verification of -checkblocks is (0-4, default: %u)"), DEFAULT_CHECKLEVEL)); strUsage += HelpMessageOpt("-checkblockindex", strprintf("Do a full consistency check for mapBlockIndex, setBlockIndexCandidates, chainActive and mapBlocksUnlinked occasionally. (default: %u)", defaultChainParams->DefaultConsistencyChecks())); @@ -432,7 +430,6 @@ std::string HelpMessage(HelpMessageMode mode) strUsage += HelpMessageOpt("-deprecatedrpc=<method>", "Allows deprecated RPC method(s) to be used"); strUsage += HelpMessageOpt("-testsafemode", strprintf("Force safe mode (default: %u)", DEFAULT_TESTSAFEMODE)); strUsage += HelpMessageOpt("-dropmessagestest=<n>", "Randomly drop 1 of every <n> network messages"); - strUsage += HelpMessageOpt("-fuzzmessagestest=<n>", "Randomly fuzz 1 of every <n> network messages"); strUsage += HelpMessageOpt("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT)); strUsage += HelpMessageOpt("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT)); @@ -684,8 +681,8 @@ void ThreadImport(std::vector<fs::path> vImportFiles) } // End scope of CImportingNow if (gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { LoadMempool(); - fDumpMempoolLater = !fRequestShutdown; } + g_is_mempool_loaded = !fRequestShutdown; } /** Sanity checks diff --git a/src/netaddress.h b/src/netaddress.h index 93bbb66491..ad6b55eb58 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -93,7 +93,7 @@ class CNetAddr template <typename Stream, typename Operation> inline void SerializationOp(Stream& s, Operation ser_action) { - READWRITE(FLATDATA(ip)); + READWRITE(ip); } friend class CSubNet; @@ -131,8 +131,8 @@ class CSubNet template <typename Stream, typename Operation> inline void SerializationOp(Stream& s, Operation ser_action) { READWRITE(network); - READWRITE(FLATDATA(netmask)); - READWRITE(FLATDATA(valid)); + READWRITE(netmask); + READWRITE(valid); } }; @@ -166,7 +166,7 @@ class CService : public CNetAddr template <typename Stream, typename Operation> inline void SerializationOp(Stream& s, Operation ser_action) { - READWRITE(FLATDATA(ip)); + READWRITE(ip); unsigned short portN = htons(port); READWRITE(FLATDATA(portN)); if (ser_action.ForRead()) diff --git a/src/protocol.h b/src/protocol.h index e518d11944..a07c5ea862 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -48,10 +48,10 @@ public: template <typename Stream, typename Operation> inline void SerializationOp(Stream& s, Operation ser_action) { - READWRITE(FLATDATA(pchMessageStart)); - READWRITE(FLATDATA(pchCommand)); + READWRITE(pchMessageStart); + READWRITE(pchCommand); READWRITE(nMessageSize); - READWRITE(FLATDATA(pchChecksum)); + READWRITE(pchChecksum); } char pchMessageStart[MESSAGE_START_SIZE]; diff --git a/src/qt/recentrequeststablemodel.cpp b/src/qt/recentrequeststablemodel.cpp index 0dd7d46960..f045053c3b 100644 --- a/src/qt/recentrequeststablemodel.cpp +++ b/src/qt/recentrequeststablemodel.cpp @@ -139,10 +139,9 @@ bool RecentRequestsTableModel::removeRows(int row, int count, const QModelIndex if(count > 0 && row >= 0 && (row+count) <= list.size()) { - const RecentRequestEntry *rec; for (int i = 0; i < count; ++i) { - rec = &list[row+i]; + const RecentRequestEntry* rec = &list[row+i]; if (!walletModel->saveReceiveRequest(rec->recipient.address.toStdString(), rec->id, "")) return false; } diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index e15dad2f0b..31cbec4c86 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1607,13 +1607,17 @@ UniValue savemempool(const JSONRPCRequest& request) if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "savemempool\n" - "\nDumps the mempool to disk.\n" + "\nDumps the mempool to disk. It will fail until the previous dump is fully loaded.\n" "\nExamples:\n" + HelpExampleCli("savemempool", "") + HelpExampleRpc("savemempool", "") ); } + if (!g_is_mempool_loaded) { + throw JSONRPCError(RPC_MISC_ERROR, "The mempool was not loaded yet"); + } + if (!DumpMempool()) { throw JSONRPCError(RPC_MISC_ERROR, "Unable to dump mempool to disk"); } diff --git a/src/serialize.h b/src/serialize.h index 91da6b0f80..247e915298 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -59,6 +59,12 @@ inline T* NCONST_PTR(const T* val) return const_cast<T*>(val); } +//! Safely convert odd char pointer types to standard ones. +inline char* CharCast(char* c) { return c; } +inline char* CharCast(unsigned char* c) { return (char*)c; } +inline const char* CharCast(const char* c) { return c; } +inline const char* CharCast(const unsigned char* c) { return (const char*)c; } + /* * Lowest-level serialization and conversion. * @note Sizes of these types are verified in the tests @@ -177,6 +183,8 @@ template<typename Stream> inline void Serialize(Stream& s, int64_t a ) { ser_wri template<typename Stream> inline void Serialize(Stream& s, uint64_t a) { ser_writedata64(s, a); } template<typename Stream> inline void Serialize(Stream& s, float a ) { ser_writedata32(s, ser_float_to_uint32(a)); } template<typename Stream> inline void Serialize(Stream& s, double a ) { ser_writedata64(s, ser_double_to_uint64(a)); } +template<typename Stream, int N> inline void Serialize(Stream& s, const char (&a)[N]) { s.write(a, N); } +template<typename Stream, int N> inline void Serialize(Stream& s, const unsigned char (&a)[N]) { s.write(CharCast(a), N); } template<typename Stream> inline void Unserialize(Stream& s, char& a ) { a = ser_readdata8(s); } // TODO Get rid of bare char template<typename Stream> inline void Unserialize(Stream& s, int8_t& a ) { a = ser_readdata8(s); } @@ -189,6 +197,8 @@ template<typename Stream> inline void Unserialize(Stream& s, int64_t& a ) { a = template<typename Stream> inline void Unserialize(Stream& s, uint64_t& a) { a = ser_readdata64(s); } template<typename Stream> inline void Unserialize(Stream& s, float& a ) { a = ser_uint32_to_float(ser_readdata32(s)); } template<typename Stream> inline void Unserialize(Stream& s, double& a ) { a = ser_uint64_to_double(ser_readdata64(s)); } +template<typename Stream, int N> inline void Unserialize(Stream& s, char (&a)[N]) { s.read(a, N); } +template<typename Stream, int N> inline void Unserialize(Stream& s, unsigned char (&a)[N]) { s.read(CharCast(a), N); } template<typename Stream> inline void Serialize(Stream& s, bool a) { char f=a; ser_writedata8(s, f); } template<typename Stream> inline void Unserialize(Stream& s, bool& a) { char f=ser_readdata8(s); a=f; } diff --git a/src/test/compress_tests.cpp b/src/test/compress_tests.cpp index 3c26013622..127cc154df 100644 --- a/src/test/compress_tests.cpp +++ b/src/test/compress_tests.cpp @@ -25,16 +25,16 @@ BOOST_FIXTURE_TEST_SUITE(compress_tests, BasicTestingSetup) bool static TestEncode(uint64_t in) { - return in == CTxOutCompressor::DecompressAmount(CTxOutCompressor::CompressAmount(in)); + return in == DecompressAmount(CompressAmount(in)); } bool static TestDecode(uint64_t in) { - return in == CTxOutCompressor::CompressAmount(CTxOutCompressor::DecompressAmount(in)); + return in == CompressAmount(DecompressAmount(in)); } bool static TestPair(uint64_t dec, uint64_t enc) { - return CTxOutCompressor::CompressAmount(dec) == enc && - CTxOutCompressor::DecompressAmount(enc) == dec; + return CompressAmount(dec) == enc && + DecompressAmount(enc) == dec; } BOOST_AUTO_TEST_CASE(compress_amounts) diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index e03234060d..6552613c04 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -64,7 +64,7 @@ public: CDataStream AddrmanToStream(CAddrManSerializationMock& _addrman) { CDataStream ssPeersIn(SER_DISK, CLIENT_VERSION); - ssPeersIn << FLATDATA(Params().MessageStart()); + ssPeersIn << Params().MessageStart(); ssPeersIn << _addrman; std::string str = ssPeersIn.str(); std::vector<unsigned char> vchData(str.begin(), str.end()); @@ -110,7 +110,7 @@ BOOST_AUTO_TEST_CASE(caddrdb_read) BOOST_CHECK(addrman1.size() == 0); try { unsigned char pchMsgTmp[4]; - ssPeers1 >> FLATDATA(pchMsgTmp); + ssPeers1 >> pchMsgTmp; ssPeers1 >> addrman1; } catch (const std::exception& e) { exceptionThrown = true; @@ -142,7 +142,7 @@ BOOST_AUTO_TEST_CASE(caddrdb_read_corrupted) BOOST_CHECK(addrman1.size() == 0); try { unsigned char pchMsgTmp[4]; - ssPeers1 >> FLATDATA(pchMsgTmp); + ssPeers1 >> pchMsgTmp; ssPeers1 >> addrman1; } catch (const std::exception& e) { exceptionThrown = true; diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index 7a79a77e8b..9b8b7bdc56 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -19,11 +19,15 @@ protected: int intval; bool boolval; std::string stringval; - const char* charstrval; + char charstrval[16]; CTransactionRef txval; public: CSerializeMethodsTestSingle() = default; - CSerializeMethodsTestSingle(int intvalin, bool boolvalin, std::string stringvalin, const char* charstrvalin, CTransaction txvalin) : intval(intvalin), boolval(boolvalin), stringval(std::move(stringvalin)), charstrval(charstrvalin), txval(MakeTransactionRef(txvalin)){} + CSerializeMethodsTestSingle(int intvalin, bool boolvalin, std::string stringvalin, const char* charstrvalin, CTransaction txvalin) : intval(intvalin), boolval(boolvalin), stringval(std::move(stringvalin)), txval(MakeTransactionRef(txvalin)) + { + memcpy(charstrval, charstrvalin, sizeof(charstrval)); + } + ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> @@ -31,7 +35,7 @@ public: READWRITE(intval); READWRITE(boolval); READWRITE(stringval); - READWRITE(FLATDATA(charstrval)); + READWRITE(charstrval); READWRITE(txval); } @@ -53,7 +57,7 @@ public: template <typename Stream, typename Operation> inline void SerializationOp(Stream& s, Operation ser_action) { - READWRITE(intval, boolval, stringval, FLATDATA(charstrval), txval); + READWRITE(intval, boolval, stringval, charstrval, txval); } }; @@ -344,7 +348,7 @@ BOOST_AUTO_TEST_CASE(class_methods) int intval(100); bool boolval(true); std::string stringval("testing"); - const char* charstrval("testing charstr"); + const char charstrval[16] = "testing charstr"; CMutableTransaction txval; CSerializeMethodsTestSingle methodtest1(intval, boolval, stringval, charstrval, txval); CSerializeMethodsTestMany methodtest2(intval, boolval, stringval, charstrval, txval); @@ -360,7 +364,7 @@ BOOST_AUTO_TEST_CASE(class_methods) BOOST_CHECK(methodtest2 == methodtest3); BOOST_CHECK(methodtest3 == methodtest4); - CDataStream ss2(SER_DISK, PROTOCOL_VERSION, intval, boolval, stringval, FLATDATA(charstrval), txval); + CDataStream ss2(SER_DISK, PROTOCOL_VERSION, intval, boolval, stringval, charstrval, txval); ss2 >> methodtest3; BOOST_CHECK(methodtest3 == methodtest4); } diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index 1108dab584..5d057108b1 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -57,16 +57,16 @@ BOOST_AUTO_TEST_CASE(streams_vector_writer) BOOST_CHECK((vch == std::vector<unsigned char>{{0, 0, 0, 0, 1, 2}})); vch.clear(); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, FLATDATA(bytes)); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, bytes); BOOST_CHECK((vch == std::vector<unsigned char>{{3, 4, 5, 6}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, FLATDATA(bytes)); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, bytes); BOOST_CHECK((vch == std::vector<unsigned char>{{3, 4, 5, 6}})); vch.clear(); vch.resize(4, 8); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, FLATDATA(bytes), b); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, bytes, b); BOOST_CHECK((vch == std::vector<unsigned char>{{8, 8, 1, 3, 4, 5, 6, 2}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, FLATDATA(bytes), b); + CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, bytes, b); BOOST_CHECK((vch == std::vector<unsigned char>{{8, 8, 1, 3, 4, 5, 6, 2}})); vch.clear(); } diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp index 95c4825b84..9390a93b99 100644 --- a/src/test/test_bitcoin.cpp +++ b/src/test/test_bitcoin.cpp @@ -145,9 +145,9 @@ TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransaction>& for (const CMutableTransaction& tx : txns) block.vtx.push_back(MakeTransactionRef(tx)); // IncrementExtraNonce creates a valid coinbase and merkleRoot - unsigned int extraNonce = 0; { LOCK(cs_main); + unsigned int extraNonce = 0; IncrementExtraNonce(&block, chainActive.Tip(), extraNonce); } diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index b6f3cbe2b7..4b44bbadac 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -185,17 +185,11 @@ BOOST_AUTO_TEST_CASE(util_FormatISO8601Time) BOOST_CHECK_EQUAL(FormatISO8601Time(1317425777), "23:36:17Z"); } -class TestArgsManager : public ArgsManager +struct TestArgsManager : public ArgsManager { -public: - std::map<std::string, std::string>& GetMapArgs() - { - return mapArgs; - }; - const std::map<std::string, std::vector<std::string> >& GetMapMultiArgs() - { - return mapMultiArgs; - }; + std::map<std::string, std::string>& GetMapArgs() { return mapArgs; } + const std::map<std::string, std::vector<std::string> >& GetMapMultiArgs() { return mapMultiArgs; } + const std::unordered_set<std::string>& GetNegatedArgs() { return m_negated_args; } }; BOOST_AUTO_TEST_CASE(util_ParseParameters) @@ -223,6 +217,54 @@ BOOST_AUTO_TEST_CASE(util_ParseParameters) BOOST_CHECK(testArgs.GetArgs("-ccc").size() == 2); } +BOOST_AUTO_TEST_CASE(util_GetBoolArg) +{ + TestArgsManager testArgs; + const char *argv_test[] = { + "ignored", "-a", "-nob", "-c=0", "-d=1", "-e=false", "-f=true"}; + testArgs.ParseParameters(7, (char**)argv_test); + + // Each letter should be set. + for (char opt : "abcdef") + BOOST_CHECK(testArgs.IsArgSet({'-', opt}) || !opt); + + // Nothing else should be in the map + BOOST_CHECK(testArgs.GetMapArgs().size() == 6 && + testArgs.GetMapMultiArgs().size() == 6); + + // The -no prefix should get stripped on the way in. + BOOST_CHECK(!testArgs.IsArgSet("-nob")); + + // The -b option is flagged as negated, and nothing else is + BOOST_CHECK(testArgs.IsArgNegated("-b")); + BOOST_CHECK(testArgs.GetNegatedArgs().size() == 1); + BOOST_CHECK(!testArgs.IsArgNegated("-a")); + + // Check expected values. + BOOST_CHECK(testArgs.GetBoolArg("-a", false) == true); + BOOST_CHECK(testArgs.GetBoolArg("-b", true) == false); + BOOST_CHECK(testArgs.GetBoolArg("-c", true) == false); + BOOST_CHECK(testArgs.GetBoolArg("-d", false) == true); + BOOST_CHECK(testArgs.GetBoolArg("-e", true) == false); + BOOST_CHECK(testArgs.GetBoolArg("-f", true) == false); +} + +BOOST_AUTO_TEST_CASE(util_GetBoolArgEdgeCases) +{ + // Test some awful edge cases that hopefully no user will ever exercise. + TestArgsManager testArgs; + const char *argv_test[] = {"ignored", "-nofoo", "-foo", "-nobar=0"}; + testArgs.ParseParameters(4, (char**)argv_test); + + // This was passed twice, second one overrides the negative setting. + BOOST_CHECK(!testArgs.IsArgNegated("-foo")); + BOOST_CHECK(testArgs.GetBoolArg("-foo", false) == true); + + // A double negative is a positive. + BOOST_CHECK(testArgs.IsArgNegated("-bar")); + BOOST_CHECK(testArgs.GetBoolArg("-bar", false) == true); +} + BOOST_AUTO_TEST_CASE(util_GetArg) { TestArgsManager testArgs; @@ -704,9 +746,8 @@ static constexpr char ExitCommand = 'X'; static void TestOtherProcess(fs::path dirname, std::string lockname, int fd) { char ch; - int rv; while (true) { - rv = read(fd, &ch, 1); // Wait for command + int rv = read(fd, &ch, 1); // Wait for command assert(rv == 1); switch(ch) { case LockCommand: diff --git a/src/util.cpp b/src/util.cpp index dbf9065113..490897899b 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -70,8 +70,6 @@ #include <malloc.h> #endif -#include <boost/algorithm/string/case_conv.hpp> // for to_lower() -#include <boost/algorithm/string/predicate.hpp> // for startswith() and endswith() #include <boost/interprocess/sync/file_lock.hpp> #include <boost/program_options/detail/config_file.hpp> #include <boost/thread.hpp> @@ -432,7 +430,23 @@ bool DirIsWritable(const fs::path& directory) return true; } -/** Interpret string as boolean, for argument parsing */ +/** + * Interpret a string argument as a boolean. + * + * The definition of atoi() requires that non-numeric string values like "foo", + * return 0. This means that if a user unintentionally supplies a non-integer + * argument here, the return value is always false. This means that -foo=false + * does what the user probably expects, but -foo=true is well defined but does + * not do what they probably expected. + * + * The return value of atoi() is undefined when given input not representable as + * an int. On most systems this means string value between "-2147483648" and + * "2147483647" are well defined (this method will return true). Setting + * -txindex=2147483648 on most systems, however, is probably undefined. + * + * For a more extensive discussion of this topic (and a wide range of opinions + * on the Right Way to change this code), see PR12713. + */ static bool InterpretBool(const std::string& strValue) { if (strValue.empty()) @@ -440,13 +454,30 @@ static bool InterpretBool(const std::string& strValue) return (atoi(strValue) != 0); } -/** Turn -noX into -X=0 */ -static void InterpretNegativeSetting(std::string& strKey, std::string& strValue) +/** + * Interpret -nofoo as if the user supplied -foo=0. + * + * This method also tracks when the -no form was supplied, and treats "-foo" as + * a negated option when this happens. This can be later checked using the + * IsArgNegated() method. One use case for this is to have a way to disable + * options that are not normally boolean (e.g. using -nodebuglogfile to request + * that debug log output is not sent to any file at all). + */ +void ArgsManager::InterpretNegatedOption(std::string& key, std::string& val) { - if (strKey.length()>3 && strKey[0]=='-' && strKey[1]=='n' && strKey[2]=='o') - { - strKey = "-" + strKey.substr(3); - strValue = InterpretBool(strValue) ? "0" : "1"; + if (key.substr(0, 3) == "-no") { + bool bool_val = InterpretBool(val); + if (!bool_val ) { + // Double negatives like -nofoo=0 are supported (but discouraged) + LogPrintf("Warning: parsed potentially confusing double-negative %s=%s\n", key, val); + } + key.erase(1, 2); + m_negated_args.insert(key); + val = bool_val ? "0" : "1"; + } else { + // In an invocation like "bitcoind -nofoo -foo" we want to unmark -foo + // as negated when we see the second option. + m_negated_args.erase(key); } } @@ -455,34 +486,34 @@ void ArgsManager::ParseParameters(int argc, const char* const argv[]) LOCK(cs_args); mapArgs.clear(); mapMultiArgs.clear(); - - for (int i = 1; i < argc; i++) - { - std::string str(argv[i]); - std::string strValue; - size_t is_index = str.find('='); - if (is_index != std::string::npos) - { - strValue = str.substr(is_index+1); - str = str.substr(0, is_index); + m_negated_args.clear(); + + for (int i = 1; i < argc; i++) { + std::string key(argv[i]); + std::string val; + size_t is_index = key.find('='); + if (is_index != std::string::npos) { + val = key.substr(is_index + 1); + key.erase(is_index); } #ifdef WIN32 - boost::to_lower(str); - if (boost::algorithm::starts_with(str, "/")) - str = "-" + str.substr(1); + std::transform(key.begin(), key.end(), key.begin(), ::tolower); + if (key[0] == '/') + key[0] = '-'; #endif - if (str[0] != '-') + if (key[0] != '-') break; - // Interpret --foo as -foo. - // If both --foo and -foo are set, the last takes effect. - if (str.length() > 1 && str[1] == '-') - str = str.substr(1); - InterpretNegativeSetting(str, strValue); + // Transform --foo to -foo + if (key.length() > 1 && key[1] == '-') + key.erase(0, 1); + + // Transform -nofoo to -foo=0 + InterpretNegatedOption(key, val); - mapArgs[str] = strValue; - mapMultiArgs[str].push_back(strValue); + mapArgs[key] = val; + mapMultiArgs[key].push_back(val); } } @@ -500,6 +531,12 @@ bool ArgsManager::IsArgSet(const std::string& strArg) const return mapArgs.count(strArg); } +bool ArgsManager::IsArgNegated(const std::string& strArg) const +{ + LOCK(cs_args); + return m_negated_args.find(strArg) != m_negated_args.end(); +} + std::string ArgsManager::GetArg(const std::string& strArg, const std::string& strDefault) const { LOCK(cs_args); @@ -711,7 +748,7 @@ void ArgsManager::ReadConfigFile(const std::string& confPath) // Don't overwrite existing settings so command line settings override bitcoin.conf std::string strKey = std::string("-") + it->string_key; std::string strValue = it->value[0]; - InterpretNegativeSetting(strKey, strValue); + InterpretNegatedOption(strKey, strValue); if (mapArgs.count(strKey) == 0) mapArgs[strKey] = strValue; mapMultiArgs[strKey].push_back(strValue); diff --git a/src/util.h b/src/util.h index 592041c0cf..4c473c9354 100644 --- a/src/util.h +++ b/src/util.h @@ -25,6 +25,7 @@ #include <map> #include <stdint.h> #include <string> +#include <unordered_set> #include <vector> #include <boost/signals2/signal.hpp> @@ -224,6 +225,8 @@ protected: mutable CCriticalSection cs_args; std::map<std::string, std::string> mapArgs; std::map<std::string, std::vector<std::string>> mapMultiArgs; + std::unordered_set<std::string> m_negated_args; + public: void ParseParameters(int argc, const char*const argv[]); void ReadConfigFile(const std::string& confPath); @@ -245,6 +248,15 @@ public: bool IsArgSet(const std::string& strArg) const; /** + * Return true if the argument was originally passed as a negated option, + * i.e. -nofoo. + * + * @param strArg Argument to get (e.g. "-foo") + * @return true if the argument was passed negated + */ + bool IsArgNegated(const std::string& strArg) const; + + /** * Return string argument or default value * * @param strArg Argument to get (e.g. "-foo") @@ -292,6 +304,11 @@ public: // Forces an arg setting. Called by SoftSetArg() if the arg hasn't already // been set. Also called directly in testing. void ForceSetArg(const std::string& strArg, const std::string& strValue); + +private: + + // Munge -nofoo into -foo=0 and track the value as negated. + void InterpretNegatedOption(std::string &key, std::string &val); }; extern ArgsManager gArgs; diff --git a/src/validation.cpp b/src/validation.cpp index 839b248143..77764ff923 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -227,6 +227,7 @@ CAmount maxTxFee = DEFAULT_TRANSACTION_MAXFEE; CBlockPolicyEstimator feeEstimator; CTxMemPool mempool(&feeEstimator); +std::atomic_bool g_is_mempool_loaded{false}; /** Constant stuff for coinbase transactions we create: */ CScript COINBASE_FLAGS; @@ -1077,7 +1078,7 @@ static bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMes // Write index header unsigned int nSize = GetSerializeSize(fileout, block); - fileout << FLATDATA(messageStart) << nSize; + fileout << messageStart << nSize; // Write block long fileOutPos = ftell(fileout.Get()); @@ -1441,7 +1442,7 @@ bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint // Write index header unsigned int nSize = GetSerializeSize(fileout, blockundo); - fileout << FLATDATA(messageStart) << nSize; + fileout << messageStart << nSize; // Write undo data long fileOutPos = ftell(fileout.Get()); @@ -4283,7 +4284,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB unsigned char buf[CMessageHeader::MESSAGE_START_SIZE]; blkdat.FindByte(chainparams.MessageStart()[0]); nRewind = blkdat.GetPos()+1; - blkdat >> FLATDATA(buf); + blkdat >> buf; if (memcmp(buf, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE)) continue; // read size diff --git a/src/validation.h b/src/validation.h index 0a78075316..4031989f00 100644 --- a/src/validation.h +++ b/src/validation.h @@ -158,6 +158,7 @@ extern CScript COINBASE_FLAGS; extern CCriticalSection cs_main; extern CBlockPolicyEstimator feeEstimator; extern CTxMemPool mempool; +extern std::atomic_bool g_is_mempool_loaded; typedef std::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap; extern BlockMap& mapBlockIndex; extern uint64_t nLastBlockTx; diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py index 8b5e5681e4..37d60aad61 100755 --- a/test/functional/feature_csv_activation.py +++ b/test/functional/feature_csv_activation.py @@ -42,98 +42,131 @@ bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {re bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP bip112tx_special - test negative argument to OP_CSV """ - -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import * -from test_framework.mininode import ToHex, CTransaction, network_thread_start -from test_framework.blocktools import create_coinbase, create_block -from test_framework.comptool import TestInstance, TestManager -from test_framework.script import * +from decimal import Decimal +from itertools import product from io import BytesIO import time -base_relative_locktime = 10 -seq_disable_flag = 1<<31 -seq_random_high_bit = 1<<25 -seq_type_flag = 1<<22 -seq_random_low_bit = 1<<18 - -# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field -# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1 -relative_locktimes = [] -for b31 in range(2): - b25times = [] - for b25 in range(2): - b22times = [] - for b22 in range(2): - b18times = [] - for b18 in range(2): - rlt = base_relative_locktime - if (b31): - rlt = rlt | seq_disable_flag - if (b25): - rlt = rlt | seq_random_high_bit - if (b22): - rlt = rlt | seq_type_flag - if (b18): - rlt = rlt | seq_random_low_bit - b18times.append(rlt) - b22times.append(b18times) - b25times.append(b22times) - relative_locktimes.append(b25times) - -def all_rlt_txs(txarray): +from test_framework.blocktools import create_coinbase, create_block +from test_framework.messages import ToHex, CTransaction +from test_framework.mininode import network_thread_start, P2PDataStore +from test_framework.script import ( + CScript, + OP_CHECKSEQUENCEVERIFY, + OP_DROP, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + get_bip9_status, + hex_str_to_bytes, +) + +BASE_RELATIVE_LOCKTIME = 10 +SEQ_DISABLE_FLAG = 1 << 31 +SEQ_RANDOM_HIGH_BIT = 1 << 25 +SEQ_TYPE_FLAG = 1 << 22 +SEQ_RANDOM_LOW_BIT = 1 << 18 + +def relative_locktime(sdf, srhb, stf, srlb): + """Returns a locktime with certain bits set.""" + + locktime = BASE_RELATIVE_LOCKTIME + if sdf: + locktime |= SEQ_DISABLE_FLAG + if srhb: + locktime |= SEQ_RANDOM_HIGH_BIT + if stf: + locktime |= SEQ_TYPE_FLAG + if srlb: + locktime |= SEQ_RANDOM_LOW_BIT + return locktime + +def all_rlt_txs(txs): + return [tx['tx'] for tx in txs] + +def create_transaction(node, txid, to_address, amount): + inputs = [{"txid": txid, "vout": 0}] + outputs = {to_address: amount} + rawtx = node.createrawtransaction(inputs, outputs) + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(rawtx)) + tx.deserialize(f) + return tx + +def sign_transaction(node, unsignedtx): + rawtx = ToHex(unsignedtx) + signresult = node.signrawtransactionwithwallet(rawtx) + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(signresult['hex'])) + tx.deserialize(f) + return tx + +def create_bip112special(node, input, txversion, address): + tx = create_transaction(node, input, address, Decimal("49.98")) + tx.nVersion = txversion + signtx = sign_transaction(node, tx) + signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) + return signtx + +def send_generic_input_tx(node, coinbases, address): + amount = Decimal("49.99") + return node.sendrawtransaction(ToHex(sign_transaction(node, create_transaction(node, node.getblock(coinbases.pop())['tx'][0], address, amount)))) + +def create_bip68txs(node, bip68inputs, txversion, address, locktime_delta=0): + """Returns a list of bip68 transactions with different bits set.""" txs = [] - for b31 in range(2): - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - txs.append(txarray[b31][b25][b22][b18]) + assert(len(bip68inputs) >= 16) + for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)): + locktime = relative_locktime(sdf, srhb, stf, srlb) + tx = create_transaction(node, bip68inputs[i], address, Decimal("49.98")) + tx.nVersion = txversion + tx.vin[0].nSequence = locktime + locktime_delta + tx = sign_transaction(node, tx) + tx.rehash() + txs.append({'tx': tx, 'sdf': sdf, 'stf': stf}) + return txs -class BIP68_112_113Test(ComparisonTestFramework): +def create_bip112txs(node, bip112inputs, varyOP_CSV, txversion, address, locktime_delta=0): + """Returns a list of bip68 transactions with different bits set.""" + txs = [] + assert(len(bip112inputs) >= 16) + for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)): + locktime = relative_locktime(sdf, srhb, stf, srlb) + tx = create_transaction(node, bip112inputs[i], address, Decimal("49.98")) + if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed + tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME + locktime_delta + else: # vary nSequence instead, OP_CSV is fixed + tx.vin[0].nSequence = locktime + locktime_delta + tx.nVersion = txversion + signtx = sign_transaction(node, tx) + if (varyOP_CSV): + signtx.vin[0].scriptSig = CScript([locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) + else: + signtx.vin[0].scriptSig = CScript([BASE_RELATIVE_LOCKTIME, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) + tx.rehash() + txs.append({'tx': signtx, 'sdf': sdf, 'stf': stf}) + return txs + +class BIP68_112_113Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4', '-addresstype=legacy']] - def run_test(self): - test = TestManager(self, self.options.tmpdir) - test.add_all_connections(self.nodes) - network_thread_start() - test.run() - - def send_generic_input_tx(self, node, coinbases): - amount = Decimal("49.99") - return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount)))) - - def create_transaction(self, node, txid, to_address, amount): - inputs = [{ "txid" : txid, "vout" : 0}] - outputs = { to_address : amount } - rawtx = node.createrawtransaction(inputs, outputs) - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(rawtx)) - tx.deserialize(f) - return tx - - def sign_transaction(self, node, unsignedtx): - rawtx = ToHex(unsignedtx) - signresult = node.signrawtransactionwithwallet(rawtx) - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(signresult['hex'])) - tx.deserialize(f) - return tx - - def generate_blocks(self, number, version, test_blocks = []): + def generate_blocks(self, number, version, test_blocks=None): + if test_blocks is None: + test_blocks = [] for i in range(number): block = self.create_test_block([], version) - test_blocks.append([block, True]) + test_blocks.append(block) self.last_block_time += 600 self.tip = block.sha256 self.tipheight += 1 return test_blocks - def create_test_block(self, txs, version = 536870912): + def create_test_block(self, txs, version=536870912): block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600) block.nVersion = version block.vtx.extend(txs) @@ -142,184 +175,148 @@ class BIP68_112_113Test(ComparisonTestFramework): block.solve() return block - def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0): - txs = [] - assert(len(bip68inputs) >= 16) - i = 0 - for b31 in range(2): - b25txs = [] - for b25 in range(2): - b22txs = [] - for b22 in range(2): - b18txs = [] - for b18 in range(2): - tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98")) - i += 1 - tx.nVersion = txversion - tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta - b18txs.append(self.sign_transaction(self.nodes[0], tx)) - b22txs.append(b18txs) - b25txs.append(b22txs) - txs.append(b25txs) - return txs - - def create_bip112special(self, input, txversion): - tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98")) - tx.nVersion = txversion - signtx = self.sign_transaction(self.nodes[0], tx) - signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) - return signtx - - def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0): - txs = [] - assert(len(bip112inputs) >= 16) - i = 0 - for b31 in range(2): - b25txs = [] - for b25 in range(2): - b22txs = [] - for b22 in range(2): - b18txs = [] - for b18 in range(2): - tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98")) - i += 1 - if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed - tx.vin[0].nSequence = base_relative_locktime + locktime_delta - else: # vary nSequence instead, OP_CSV is fixed - tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta - tx.nVersion = txversion - signtx = self.sign_transaction(self.nodes[0], tx) - if (varyOP_CSV): - signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) - else: - signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) - b18txs.append(signtx) - b22txs.append(b18txs) - b25txs.append(b22txs) - txs.append(b25txs) - return txs - - def get_tests(self): - long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future - self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time - self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs - self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time - self.tipheight = 82 # height of the next block to build + def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, request_block=True): + """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. + + Call with success = False if the tip shouldn't advance to the most recent block.""" + self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_code=reject_code, reject_reason=reject_reason, request_block=request_block) + + def run_test(self): + self.nodes[0].add_p2p_connection(P2PDataStore()) + network_thread_start() + self.nodes[0].p2p.wait_for_verack() + + self.log.info("Generate blocks in the past for coinbase outputs.") + long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future + self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time + self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2 * 32 + 1) # 82 blocks generated for inputs + self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time + self.tipheight = 82 # height of the next block to build self.last_block_time = long_past_time - self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) + self.tip = int(self.nodes[0].getbestblockhash(), 16) self.nodeaddress = self.nodes[0].getnewaddress() + self.log.info("Test that the csv softfork is DEFINED") assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined') test_blocks = self.generate_blocks(61, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 1 - # Advanced from DEFINED to STARTED, height = 143 + self.sync_blocks(test_blocks) + + self.log.info("Advance from DEFINED to STARTED, height = 143") assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started') - # Fail to achieve LOCKED_IN 100 out of 144 signal bit 0 - # using a variety of bits to simulate multiple parallel softforks - test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready) - test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) - test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) # 2 - # Failed to advance past STARTED, height = 287 + self.log.info("Fail to achieve LOCKED_IN") + # 100 out of 144 signal bit 0. Use a variety of bits to simulate multiple parallel softforks + + test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready) + test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) + test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not) + self.sync_blocks(test_blocks) + + self.log.info("Failed to advance past STARTED, height = 287") assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started') + self.log.info("Generate blocks to achieve LOCK-IN") # 108 out of 144 signal bit 0 to achieve lock-in # using a variety of bits to simulate multiple parallel softforks - test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready) - test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) - test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) # 3 - # Advanced from STARTED to LOCKED_IN, height = 431 + test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready) + test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) + test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not) + self.sync_blocks(test_blocks) + + self.log.info("Advanced from STARTED to LOCKED_IN, height = 431") assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') - # 140 more version 4 blocks + # Generate 140 more version 4 blocks test_blocks = self.generate_blocks(140, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 4 + self.sync_blocks(test_blocks) - ### Inputs at height = 572 + # Inputs at height = 572 + # # Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block) # Note we reuse inputs for v1 and v2 txs so must test these separately # 16 normal inputs bip68inputs = [] for i in range(16): - bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) + bip68inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) + # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112basicinputs = [] for j in range(2): inputs = [] for i in range(16): - inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) + inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) bip112basicinputs.append(inputs) + # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112diverseinputs = [] for j in range(2): inputs = [] for i in range(16): - inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) + inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) bip112diverseinputs.append(inputs) + # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) - bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks) + bip112specialinput = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress) + # 1 normal input - bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks) + bip113input = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress) self.nodes[0].setmocktime(self.last_block_time + 600) - inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572 + inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572 self.nodes[0].setmocktime(0) - self.tip = int("0x" + inputblockhash, 0) + self.tip = int(inputblockhash, 16) self.tipheight += 1 self.last_block_time += 600 - assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1) + assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), 82 + 1) # 2 more version 4 blocks test_blocks = self.generate_blocks(2, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 5 - # Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575) + self.sync_blocks(test_blocks) + + self.log.info("Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)") assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') # Test both version 1 and version 2 transactions for all tests # BIP113 test transaction will be modified before each use to put in appropriate block time - bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) + bip113tx_v1 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE bip113tx_v1.nVersion = 1 - bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) + bip113tx_v2 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE bip113tx_v2.nVersion = 2 # For BIP68 test all 16 relative sequence locktimes - bip68txs_v1 = self.create_bip68txs(bip68inputs, 1) - bip68txs_v2 = self.create_bip68txs(bip68inputs, 2) + bip68txs_v1 = create_bip68txs(self.nodes[0], bip68inputs, 1, self.nodeaddress) + bip68txs_v2 = create_bip68txs(self.nodes[0], bip68inputs, 2, self.nodeaddress) # For BIP112 test: # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs - bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1) - bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2) + bip112txs_vary_nSequence_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 1, self.nodeaddress) + bip112txs_vary_nSequence_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 2, self.nodeaddress) # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs - bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1) - bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1) + bip112txs_vary_nSequence_9_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[1], False, 1, self.nodeaddress, -1) + bip112txs_vary_nSequence_9_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[1], False, 2, self.nodeaddress, -1) # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs - bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1) - bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2) + bip112txs_vary_OP_CSV_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 1, self.nodeaddress) + bip112txs_vary_OP_CSV_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 2, self.nodeaddress) # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs - bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1) - bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1) + bip112txs_vary_OP_CSV_9_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 1, self.nodeaddress, -1) + bip112txs_vary_OP_CSV_9_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 2, self.nodeaddress, -1) # -1 OP_CSV OP_DROP input - bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1) - bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2) + bip112tx_special_v1 = create_bip112special(self.nodes[0], bip112specialinput, 1, self.nodeaddress) + bip112tx_special_v2 = create_bip112special(self.nodes[0], bip112specialinput, 2, self.nodeaddress) + + self.log.info("TESTING") + self.log.info("Pre-Soft Fork Tests. All txs should pass.") + self.log.info("Test version 1 txs") - ### TESTING ### - ################################## - ### Before Soft Forks Activate ### - ################################## - # All txs should pass - ### Version 1 txs ### success_txs = [] # add BIP113 tx and -1 CSV tx - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) success_txs.append(bip113signed1) success_txs.append(bip112tx_special_v1) # add BIP 68 txs @@ -330,14 +327,15 @@ class BIP68_112_113Test(ComparisonTestFramework): # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 6 + self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - ### Version 2 txs ### + self.log.info("Test version 2 txs") + success_txs = [] # add BIP113 tx and -1 CSV tx - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) success_txs.append(bip113signed2) success_txs.append(bip112tx_special_v2) # add BIP 68 txs @@ -348,187 +346,149 @@ class BIP68_112_113Test(ComparisonTestFramework): # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 7 + self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - # 1 more version 4 block to get us to height 575 so the fork should now be active for the next block test_blocks = self.generate_blocks(1, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 8 + self.sync_blocks(test_blocks) assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active') + self.log.info("Post-Soft Fork Tests.") - ################################# - ### After Soft Forks Activate ### - ################################# - ### BIP 113 ### + self.log.info("BIP 113 tests") # BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: - yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10 + self.sync_blocks([self.create_test_block([bip113tx])], success=False) # BIP 113 tests should now pass if the locktime is < MTP - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block - bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block - bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block + bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block + bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: - yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12 + self.sync_blocks([self.create_test_block([bip113tx])]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Next block height = 580 after 4 blocks of random version test_blocks = self.generate_blocks(4, 1234) - yield TestInstance(test_blocks, sync_every_block=False) # 13 + self.sync_blocks(test_blocks) + + self.log.info("BIP 68 tests") + self.log.info("Test version 1 txs - all should still pass") - ### BIP 68 ### - ### Version 1 txs ### - # All still pass success_txs = [] success_txs.extend(all_rlt_txs(bip68txs_v1)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 14 + self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - ### Version 2 txs ### - bip68success_txs = [] + self.log.info("Test version 2 txs") + # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - bip68success_txs.append(bip68txs_v2[1][b25][b22][b18]) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15 + bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']] + self.sync_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + # All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512 - bip68timetxs = [] - for b25 in range(2): - for b18 in range(2): - bip68timetxs.append(bip68txs_v2[0][b25][1][b18]) + bip68timetxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']] for tx in bip68timetxs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19 - bip68heighttxs = [] - for b25 in range(2): - for b18 in range(2): - bip68heighttxs.append(bip68txs_v2[0][b25][0][b18]) + self.sync_blocks([self.create_test_block([tx])], success=False) + + bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']] for tx in bip68heighttxs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23 + self.sync_blocks([self.create_test_block([tx])], success=False) # Advance one block to 581 test_blocks = self.generate_blocks(1, 1234) - yield TestInstance(test_blocks, sync_every_block=False) # 24 + self.sync_blocks(test_blocks) # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512 bip68success_txs.extend(bip68timetxs) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25 + self.sync_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) for tx in bip68heighttxs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29 + self.sync_blocks([self.create_test_block([tx])], success=False) # Advance one block to 582 test_blocks = self.generate_blocks(1, 1234) - yield TestInstance(test_blocks, sync_every_block=False) # 30 + self.sync_blocks(test_blocks) # All BIP 68 txs should pass bip68success_txs.extend(bip68heighttxs) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31 + self.sync_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + self.log.info("BIP 112 tests") + self.log.info("Test version 1 txs") - ### BIP 112 ### - ### Version 1 txs ### # -1 OP_CSV tx should fail - yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32 + self.sync_blocks([self.create_test_block([bip112tx_special_v1])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass - success_txs = [] - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18]) - success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18]) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 33 + + success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']] + success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf']] + self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail - fail_txs = [] - fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) - fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18]) - fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18]) - + fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1) + fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1) + fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']] + fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']] for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81 + self.sync_blocks([self.create_test_block([tx])], success=False) + + self.log.info("Test version 2 txs") - ### Version 2 txs ### # -1 OP_CSV tx should fail - yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82 + self.sync_blocks([self.create_test_block([bip112tx_special_v2])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met) - success_txs = [] - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV - success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9 + success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']] + success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf']] - yield TestInstance([[self.create_test_block(success_txs), True]]) # 83 + self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - ## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ## - # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check - fail_txs = [] - fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9 - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9 + # SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ## + # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check + fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2) + fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']] for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107 + self.sync_blocks([self.create_test_block([tx])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail - fail_txs = [] - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence + fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf']] for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115 + self.sync_blocks([self.create_test_block([tx])], success=False) # If sequencelock types mismatch, tx should fail - fail_txs = [] - for b25 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence - fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV + fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']] + fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']] for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123 + self.sync_blocks([self.create_test_block([tx])], success=False) # Remaining txs should pass, just test masking works properly - success_txs = [] - for b25 in range(2): - for b18 in range(2): - success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence - success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV - yield TestInstance([[self.create_test_block(success_txs), True]]) # 124 + success_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']] + success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf']] + self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Additional test, of checking that comparison of two time types works properly time_txs = [] - for b25 in range(2): - for b18 in range(2): - tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18] - tx.vin[0].nSequence = base_relative_locktime | seq_type_flag - signtx = self.sign_transaction(self.nodes[0], tx) - time_txs.append(signtx) - yield TestInstance([[self.create_test_block(time_txs), True]]) # 125 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + for tx in [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]: + tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME | SEQ_TYPE_FLAG + signtx = sign_transaction(self.nodes[0], tx) + time_txs.append(signtx) - ### Missing aspects of test - ## Testing empty stack fails + self.sync_blocks([self.create_test_block(time_txs)]) + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + # TODO: Test empty stack fails if __name__ == '__main__': BIP68_112_113Test().main() diff --git a/test/functional/feature_help.py b/test/functional/feature_help.py new file mode 100755 index 0000000000..1e62d7a409 --- /dev/null +++ b/test/functional/feature_help.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +# Copyright (c) 2018 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Verify that starting bitcoin with -h works as expected.""" +import subprocess + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class HelpTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def setup_network(self): + self.add_nodes(self.num_nodes) + # Don't start the node + + def run_test(self): + self.log.info("Start bitcoin with -h for help text") + self.nodes[0].start(extra_args=['-h'], stderr=subprocess.PIPE, stdout=subprocess.PIPE) + # Node should exit immediately and output help to stdout. + ret_code = self.nodes[0].process.wait(timeout=1) + assert_equal(ret_code, 0) + output = self.nodes[0].process.stdout.read() + assert b'Options' in output + self.log.info("Help text received: {} (...)".format(output[0:60])) + self.nodes[0].running = False + + self.log.info("Start bitcoin with -version for version information") + self.nodes[0].start(extra_args=['-version'], stderr=subprocess.PIPE, stdout=subprocess.PIPE) + # Node should exit immediately and output version to stdout. + ret_code = self.nodes[0].process.wait(timeout=1) + assert_equal(ret_code, 0) + output = self.nodes[0].process.stdout.read() + assert b'version' in output + self.log.info("Version text received: {} (...)".format(output[0:60])) + self.nodes[0].running = False + +if __name__ == '__main__': + HelpTest().main() diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index d8348432aa..72b5f4748f 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -10,6 +10,7 @@ Tests correspond to code in rpc/net.cpp. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, + assert_greater_than_or_equal, assert_raises_rpc_error, connect_nodes_bi, p2p_port, @@ -33,26 +34,34 @@ class NetTest(BitcoinTestFramework): assert_equal(self.nodes[0].getconnectioncount(), 2) def _test_getnettotals(self): - # check that getnettotals totalbytesrecv and totalbytessent - # are consistent with getpeerinfo + # getnettotals totalbytesrecv and totalbytessent should be + # consistent with getpeerinfo. Since the RPC calls are not atomic, + # and messages might have been recvd or sent between RPC calls, call + # getnettotals before and after and verify that the returned values + # from getpeerinfo are bounded by those values. + net_totals_before = self.nodes[0].getnettotals() peer_info = self.nodes[0].getpeerinfo() + net_totals_after = self.nodes[0].getnettotals() assert_equal(len(peer_info), 2) - net_totals = self.nodes[0].getnettotals() - assert_equal(sum([peer['bytesrecv'] for peer in peer_info]), - net_totals['totalbytesrecv']) - assert_equal(sum([peer['bytessent'] for peer in peer_info]), - net_totals['totalbytessent']) + peers_recv = sum([peer['bytesrecv'] for peer in peer_info]) + peers_sent = sum([peer['bytessent'] for peer in peer_info]) + + assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv']) + assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv) + assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent']) + assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent) + # test getnettotals and getpeerinfo by doing a ping # the bytes sent/received should change # note ping and pong are 32 bytes each self.nodes[0].ping() - wait_until(lambda: (net_totals['totalbytessent'] + 32*2) == self.nodes[0].getnettotals()['totalbytessent'], timeout=1) - wait_until(lambda: (net_totals['totalbytesrecv'] + 32*2) == self.nodes[0].getnettotals()['totalbytesrecv'], timeout=1) + wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1) + wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1) peer_info_after_ping = self.nodes[0].getpeerinfo() for before, after in zip(peer_info, peer_info_after_ping): - assert_equal(before['bytesrecv_per_msg']['pong'] + 32, after['bytesrecv_per_msg']['pong']) - assert_equal(before['bytessent_per_msg']['ping'] + 32, after['bytessent_per_msg']['ping']) + assert_greater_than_or_equal(after['bytesrecv_per_msg']['pong'], before['bytesrecv_per_msg']['pong'] + 32) + assert_greater_than_or_equal(after['bytessent_per_msg']['ping'], before['bytessent_per_msg']['ping'] + 32) def _test_getnetworkinginfo(self): assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True) @@ -78,8 +87,7 @@ class NetTest(BitcoinTestFramework): assert_equal(len(added_nodes), 1) assert_equal(added_nodes[0]['addednode'], ip_port) # check that a non-existent node returns an error - assert_raises_rpc_error(-24, "Node has not been added", - self.nodes[0].getaddednodeinfo, '1.1.1.1') + assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1') def _test_getpeerinfo(self): peer_info = [x.getpeerinfo() for x in self.nodes] diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 583d07deec..291ac3ee46 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -30,6 +30,11 @@ JSONDecodeError = getattr(json, "JSONDecodeError", ValueError) BITCOIND_PROC_WAIT_TIMEOUT = 60 + +class FailedToStartError(Exception): + """Raised when a node fails to start correctly.""" + + class TestNode(): """A class for representing a bitcoind node under test. @@ -102,7 +107,8 @@ class TestNode(): # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): - assert self.process.poll() is None, "bitcoind exited with status %i during initialization" % self.process.returncode + if self.process.poll() is not None: + raise FailedToStartError('bitcoind exited with status {} during initialization'.format(self.process.returncode)) try: self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir) self.rpc.getblockcount() @@ -179,9 +185,9 @@ class TestNode(): self.start(extra_args, stderr=log_stderr, *args, **kwargs) self.wait_for_rpc_connection() self.stop_node() - self.wait_util_stopped() - except Exception as e: - assert 'bitcoind exited' in str(e) # node must have shutdown + self.wait_until_stopped() + except FailedToStartError as e: + self.log.debug('bitcoind failed to start: %s', e) self.running = False self.process = None # Check stderr for expected message diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 39f1180a45..a2e92dce3b 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -138,6 +138,7 @@ BASE_SCRIPTS= [ 'p2p_node_network_limited.py', 'feature_blocksdir.py', 'feature_config_args.py', + 'feature_help.py', # Don't append tests at the end to avoid merge conflicts # Put them in a random line within the section that fits their approximate run-time ] |