diff options
Diffstat (limited to 'contrib')
-rw-r--r-- | contrib/builder-keys/keys.txt | 1 | ||||
-rw-r--r-- | contrib/guix/manifest.scm | 7 | ||||
-rw-r--r-- | contrib/guix/patches/gcc-broken-longjmp.patch | 68 | ||||
-rwxr-xr-x | contrib/seeds/makeseeds.py | 90 | ||||
-rw-r--r-- | contrib/seeds/suspicious_hosts.txt | 16 | ||||
-rw-r--r-- | contrib/testgen/README.md | 2 | ||||
-rwxr-xr-x | contrib/testgen/gen_key_io_test_vectors.py | 22 |
7 files changed, 137 insertions, 69 deletions
diff --git a/contrib/builder-keys/keys.txt b/contrib/builder-keys/keys.txt index 913e7d32f6..c70069b440 100644 --- a/contrib/builder-keys/keys.txt +++ b/contrib/builder-keys/keys.txt @@ -51,5 +51,6 @@ ED9BDF7AD6A55E232E84524257FF9BDBCC301009 Sjors Provoost (sjors) 9EDAFF80E080659604F4A76B2EBB056FD847F8A7 Stephan Oeste (Emzy) 6DEEF79B050C4072509B743F8C275BC595448867 Tomas Kanocz (KanoczTomas) AEC1884398647C47413C1C3FB1179EB7347DC10D Warren Togami (wtogami) +74E2DEF5D77260B98BC19438099BAD163C70FBFA Will Clark (will8clark) 79D00BAC68B56D422F945A8F8E3A8F3247DBCBBF Willy Ko (willyko) 71A3B16735405025D447E8F274810B012346C9A6 Wladimir J. van der Laan (laanwj) diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index fcec592c2c..708d2e698d 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -162,9 +162,10 @@ desirable for building Bitcoin Core release binaries." (define (make-gcc-with-pthreads gcc) (package-with-extra-configure-variable gcc "--enable-threads" "posix")) -(define (make-mingw-w64-cross-gcc-vmov-alignment cross-gcc) +(define (make-mingw-w64-cross-gcc cross-gcc) (package-with-extra-patches cross-gcc - (search-our-patches "vmov-alignment.patch"))) + (search-our-patches "vmov-alignment.patch" + "gcc-broken-longjmp.patch"))) (define (make-mingw-pthreads-cross-toolchain target) "Create a cross-compilation toolchain package for TARGET" @@ -172,7 +173,7 @@ desirable for building Bitcoin Core release binaries." (pthreads-xlibc mingw-w64-x86_64-winpthreads) (pthreads-xgcc (make-gcc-with-pthreads (cross-gcc target - #:xgcc (make-ssp-fixed-gcc (make-mingw-w64-cross-gcc-vmov-alignment base-gcc)) + #:xgcc (make-ssp-fixed-gcc (make-mingw-w64-cross-gcc base-gcc)) #:xbinutils xbinutils #:libc pthreads-xlibc)))) ;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and diff --git a/contrib/guix/patches/gcc-broken-longjmp.patch b/contrib/guix/patches/gcc-broken-longjmp.patch new file mode 100644 index 0000000000..1cfc0918b0 --- /dev/null +++ b/contrib/guix/patches/gcc-broken-longjmp.patch @@ -0,0 +1,68 @@ +commit eb5698897c52702498938592d7f76e67d126451f +Author: Eric Botcazou <ebotcazou@adacore.com> +Date: Wed May 5 22:48:51 2021 +0200 + + Fix PR target/100402 + + This is a regression for 64-bit Windows present from mainline down to the 9 + branch and introduced by the fix for PR target/99234. Again SEH, but with + a twist related to the way MinGW implements setjmp/longjmp, which turns out + to be piggybacked on SEH with recent versions of MinGW, i.e. the longjmp + performs a bona-fide unwinding of the stack, because it calls RtlUnwindEx + with the second argument initially passed to setjmp, which is the result of + __builtin_frame_address (0) in the MinGW header file: + + define setjmp(BUF) _setjmp((BUF), __builtin_frame_address (0)) + + This means that we directly expose the frame pointer to the SEH machinery + here (unlike with regular exception handling where we use an intermediate + CFA) and thus that we cannot do whatever we want with it. The old code + would leave it unaligned, i.e. not multiple of 16, whereas the new code + aligns it, but this breaks for some reason; at least it appears that a + .seh_setframe directive with 0 as second argument always works, so the + fix aligns it this way. + + gcc/ + PR target/100402 + * config/i386/i386.c (ix86_compute_frame_layout): For a SEH target, + always return the establisher frame for __builtin_frame_address (0). + gcc/testsuite/ + * gcc.c-torture/execute/20210505-1.c: New test. + +diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c +index 2f838840e96..06ad1b2274e 100644 +--- a/gcc/config/i386/i386.c ++++ b/gcc/config/i386/i386.c +@@ -6356,12 +6356,29 @@ ix86_compute_frame_layout (void) + area, see the SEH code in config/i386/winnt.c for the rationale. */ + frame->hard_frame_pointer_offset = frame->sse_reg_save_offset; + +- /* If we can leave the frame pointer where it is, do so. Also, return ++ /* If we can leave the frame pointer where it is, do so; however return + the establisher frame for __builtin_frame_address (0) or else if the +- frame overflows the SEH maximum frame size. */ ++ frame overflows the SEH maximum frame size. ++ ++ Note that the value returned by __builtin_frame_address (0) is quite ++ constrained, because setjmp is piggybacked on the SEH machinery with ++ recent versions of MinGW: ++ ++ # elif defined(__SEH__) ++ # if defined(__aarch64__) || defined(_ARM64_) ++ # define setjmp(BUF) _setjmp((BUF), __builtin_sponentry()) ++ # elif (__MINGW_GCC_VERSION < 40702) ++ # define setjmp(BUF) _setjmp((BUF), mingw_getsp()) ++ # else ++ # define setjmp(BUF) _setjmp((BUF), __builtin_frame_address (0)) ++ # endif ++ ++ and the second argument passed to _setjmp, if not null, is forwarded ++ to the TargetFrame parameter of RtlUnwindEx by longjmp (after it has ++ built an ExceptionRecord on the fly describing the setjmp buffer). */ + const HOST_WIDE_INT diff + = frame->stack_pointer_offset - frame->hard_frame_pointer_offset; +- if (diff <= 255) ++ if (diff <= 255 && !crtl->accesses_prior_frames) + { + /* The resulting diff will be a multiple of 16 lower than 255, + i.e. at most 240 as required by the unwind data structure. */ diff --git a/contrib/seeds/makeseeds.py b/contrib/seeds/makeseeds.py index 2b377f6c01..78eb04a836 100755 --- a/contrib/seeds/makeseeds.py +++ b/contrib/seeds/makeseeds.py @@ -10,18 +10,16 @@ import re import sys import dns.resolver import collections +from typing import List, Dict, Union NSEEDS=512 -MAX_SEEDS_PER_ASN=2 - -MIN_BLOCKS = 337600 - -# These are hosts that have been observed to be behaving strangely (e.g. -# aggressively connecting to every node). -with open("suspicious_hosts.txt", mode="r", encoding="utf-8") as f: - SUSPICIOUS_HOSTS = {s.strip() for s in f if s.strip()} +MAX_SEEDS_PER_ASN = { + 'ipv4': 2, + 'ipv6': 10, +} +MIN_BLOCKS = 730000 PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$") PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$") @@ -40,9 +38,13 @@ PATTERN_AGENT = re.compile( r"23.99" r")") -def parseline(line): +def parseline(line: str) -> Union[dict, None]: + """ Parses a line from `seeds_main.txt` into a dictionary of details for that line. + or `None`, if the line could not be parsed. + """ sline = line.split() if len(sline) < 11: + # line too short to be valid, skip it. return None m = PATTERN_IPV4.match(sline[0]) sortkey = None @@ -107,25 +109,26 @@ def parseline(line): 'sortkey': sortkey, } -def dedup(ips): - '''deduplicate by address,port''' +def dedup(ips: List[Dict]) -> List[Dict]: + """ Remove duplicates from `ips` where multiple ips share address and port. """ d = {} for ip in ips: d[ip['ip'],ip['port']] = ip return list(d.values()) -def filtermultiport(ips): - '''Filter out hosts with more nodes per IP''' +def filtermultiport(ips: List[Dict]) -> List[Dict]: + """ Filter out hosts with more nodes per IP""" hist = collections.defaultdict(list) for ip in ips: hist[ip['sortkey']].append(ip) return [value[0] for (key,value) in list(hist.items()) if len(value)==1] -def lookup_asn(net, ip): - ''' - Look up the asn for an IP (4 or 6) address by querying cymru.com, or None - if it could not be found. - ''' +def lookup_asn(net: str, ip: str) -> Union[int, None]: + """ Look up the asn for an `ip` address by querying cymru.com + on network `net` (e.g. ipv4 or ipv6). + + Returns in integer ASN or None if it could not be found. + """ try: if net == 'ipv4': ipaddr = ip @@ -147,20 +150,33 @@ def lookup_asn(net, ip): return None # Based on Greg Maxwell's seed_filter.py -def filterbyasn(ips, max_per_asn, max_per_net): +def filterbyasn(ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Dict]: + """ Prunes `ips` by + (a) trimming ips to have at most `max_per_net` ips from each net (e.g. ipv4, ipv6); and + (b) trimming ips to have at most `max_per_asn` ips from each asn in each net. + """ # Sift out ips by type ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']] ips_onion = [ip for ip in ips if ip['net'] == 'onion'] # Filter IPv46 by ASN, and limit to max_per_net per network result = [] - net_count = collections.defaultdict(int) - asn_count = collections.defaultdict(int) - for ip in ips_ipv46: + net_count: Dict[str, int] = collections.defaultdict(int) + asn_count: Dict[int, int] = collections.defaultdict(int) + + for i, ip in enumerate(ips_ipv46): + if i % 10 == 0: + # give progress update + print(f"{i:6d}/{len(ips_ipv46)} [{100*i/len(ips_ipv46):04.1f}%]\r", file=sys.stderr, end='', flush=True) + if net_count[ip['net']] == max_per_net: + # do not add this ip as we already too many + # ips from this network continue asn = lookup_asn(ip['net'], ip['ip']) - if asn is None or asn_count[asn] == max_per_asn: + if asn is None or asn_count[asn] == max_per_asn[ip['net']]: + # do not add this ip as we already have too many + # ips from this ASN on this network continue asn_count[asn] += 1 net_count[ip['net']] += 1 @@ -170,35 +186,33 @@ def filterbyasn(ips, max_per_asn, max_per_net): result.extend(ips_onion[0:max_per_net]) return result -def ip_stats(ips): - hist = collections.defaultdict(int) +def ip_stats(ips: List[Dict]) -> str: + """ Format and return pretty string from `ips`. """ + hist: Dict[str, int] = collections.defaultdict(int) for ip in ips: if ip is not None: hist[ip['net']] += 1 - return '%6d %6d %6d' % (hist['ipv4'], hist['ipv6'], hist['onion']) + return f"{hist['ipv4']:6d} {hist['ipv6']:6d} {hist['onion']:6d}" def main(): lines = sys.stdin.readlines() ips = [parseline(line) for line in lines] print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr) - print('%s Initial' % (ip_stats(ips)), file=sys.stderr) + print(f'{ip_stats(ips):s} Initial', file=sys.stderr) # Skip entries with invalid address. ips = [ip for ip in ips if ip is not None] - print('%s Skip entries with invalid address' % (ip_stats(ips)), file=sys.stderr) + print(f'{ip_stats(ips):s} Skip entries with invalid address', file=sys.stderr) # Skip duplicates (in case multiple seeds files were concatenated) ips = dedup(ips) - print('%s After removing duplicates' % (ip_stats(ips)), file=sys.stderr) - # Skip entries from suspicious hosts. - ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS] - print('%s Skip entries from suspicious hosts' % (ip_stats(ips)), file=sys.stderr) + print(f'{ip_stats(ips):s} After removing duplicates', file=sys.stderr) # Enforce minimal number of blocks. ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS] - print('%s Enforce minimal number of blocks' % (ip_stats(ips)), file=sys.stderr) + print(f'{ip_stats(ips):s} Enforce minimal number of blocks', file=sys.stderr) # Require service bit 1. ips = [ip for ip in ips if (ip['service'] & 1) == 1] - print('%s Require service bit 1' % (ip_stats(ips)), file=sys.stderr) + print(f'{ip_stats(ips):s} Require service bit 1', file=sys.stderr) # Require at least 50% 30-day uptime for clearnet, 10% for onion. req_uptime = { 'ipv4': 50, @@ -206,18 +220,18 @@ def main(): 'onion': 10, } ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]] - print('%s Require minimum uptime' % (ip_stats(ips)), file=sys.stderr) + print(f'{ip_stats(ips):s} Require minimum uptime', file=sys.stderr) # Require a known and recent user agent. ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])] - print('%s Require a known and recent user agent' % (ip_stats(ips)), file=sys.stderr) + print(f'{ip_stats(ips):s} Require a known and recent user agent', file=sys.stderr) # Sort by availability (and use last success as tie breaker) ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True) # Filter out hosts with multiple bitcoin ports, these are likely abusive ips = filtermultiport(ips) - print('%s Filter out hosts with multiple bitcoin ports' % (ip_stats(ips)), file=sys.stderr) + print(f'{ip_stats(ips):s} Filter out hosts with multiple bitcoin ports', file=sys.stderr) # Look up ASNs and limit results, both per ASN and globally. ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS) - print('%s Look up ASNs and limit results per ASN and per net' % (ip_stats(ips)), file=sys.stderr) + print(f'{ip_stats(ips):s} Look up ASNs and limit results per ASN and per net', file=sys.stderr) # Sort the results by IP address (for deterministic output). ips.sort(key=lambda x: (x['net'], x['sortkey'])) for ip in ips: diff --git a/contrib/seeds/suspicious_hosts.txt b/contrib/seeds/suspicious_hosts.txt deleted file mode 100644 index 13385cc816..0000000000 --- a/contrib/seeds/suspicious_hosts.txt +++ /dev/null @@ -1,16 +0,0 @@ -130.211.129.106 -148.251.238.178 -176.9.46.6 -178.63.107.226 -54.173.72.127 -54.174.10.182 -54.183.64.54 -54.194.231.211 -54.66.214.167 -54.66.220.137 -54.67.33.14 -54.77.251.214 -54.94.195.96 -54.94.200.247 -83.81.130.26 -88.198.17.7
\ No newline at end of file diff --git a/contrib/testgen/README.md b/contrib/testgen/README.md index 66276ec9dd..2f0288df16 100644 --- a/contrib/testgen/README.md +++ b/contrib/testgen/README.md @@ -2,7 +2,7 @@ Utilities to generate test vectors for the data-driven Bitcoin tests. -Usage: +To use inside a scripted-diff (or just execute directly): ./gen_key_io_test_vectors.py valid 70 > ../../src/test/data/key_io_valid.json ./gen_key_io_test_vectors.py invalid 70 > ../../src/test/data/key_io_invalid.json diff --git a/contrib/testgen/gen_key_io_test_vectors.py b/contrib/testgen/gen_key_io_test_vectors.py index 4aa7dc200b..7bfb1d76a8 100755 --- a/contrib/testgen/gen_key_io_test_vectors.py +++ b/contrib/testgen/gen_key_io_test_vectors.py @@ -4,10 +4,6 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Generate valid and invalid base58/bech32(m) address and private key test vectors. - -Usage: - ./gen_key_io_test_vectors.py valid 70 > ../../src/test/data/key_io_valid.json - ./gen_key_io_test_vectors.py invalid 70 > ../../src/test/data/key_io_invalid.json ''' from itertools import islice @@ -131,7 +127,7 @@ def is_valid_bech32(v): def gen_valid_base58_vector(template): '''Generate valid base58 vector''' prefix = bytearray(template[0]) - payload = bytearray(os.urandom(template[1])) + payload = rand_bytes(size=template[1]) suffix = bytearray(template[2]) dst_prefix = bytearray(template[4]) dst_suffix = bytearray(template[5]) @@ -143,7 +139,7 @@ def gen_valid_bech32_vector(template): '''Generate valid bech32 vector''' hrp = template[0] witver = template[1] - witprog = bytearray(os.urandom(template[2])) + witprog = rand_bytes(size=template[2]) encoding = template[4] dst_prefix = bytearray(template[5]) rv = bech32_encode(encoding, hrp, [witver] + convertbits(witprog, 8, 5)) @@ -173,17 +169,17 @@ def gen_invalid_base58_vector(template): corrupt_suffix = randbool(0.2) if corrupt_prefix: - prefix = os.urandom(1) + prefix = rand_bytes(size=1) else: prefix = bytearray(template[0]) if randomize_payload_size: - payload = os.urandom(max(int(random.expovariate(0.5)), 50)) + payload = rand_bytes(size=max(int(random.expovariate(0.5)), 50)) else: - payload = os.urandom(template[1]) + payload = rand_bytes(size=template[1]) if corrupt_suffix: - suffix = os.urandom(len(template[2])) + suffix = rand_bytes(size=len(template[2])) else: suffix = bytearray(template[2]) @@ -204,7 +200,7 @@ def gen_invalid_bech32_vector(template): to_upper = randbool(0.1) hrp = template[0] witver = template[1] - witprog = bytearray(os.urandom(template[2])) + witprog = rand_bytes(size=template[2]) encoding = template[3] if no_data: @@ -234,6 +230,9 @@ def randbool(p = 0.5): '''Return True with P(p)''' return random.random() < p +def rand_bytes(*, size): + return bytearray(random.getrandbits(8) for _ in range(size)) + def gen_invalid_vectors(): '''Generate invalid test vectors''' # start with some manual edge-cases @@ -250,6 +249,7 @@ def gen_invalid_vectors(): if __name__ == '__main__': import json iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors} + random.seed(42) try: uiter = iters[sys.argv[1]] except IndexError: |