aboutsummaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
Diffstat (limited to 'contrib')
-rw-r--r--contrib/debian/copyright2
-rwxr-xr-xcontrib/devtools/circular-dependencies.py9
-rwxr-xr-xcontrib/devtools/gen-manpages.sh7
-rw-r--r--contrib/devtools/pixie.py323
-rwxr-xr-xcontrib/devtools/security-check.py173
-rwxr-xr-xcontrib/devtools/symbol-check.py98
-rwxr-xr-xcontrib/filter-lcov.py2
-rwxr-xr-xcontrib/gitian-build.py2
-rw-r--r--contrib/gitian-descriptors/gitian-osx-signer.yml23
-rw-r--r--contrib/gitian-descriptors/gitian-osx.yml13
-rw-r--r--contrib/gitian-keys/keys.txt5
-rw-r--r--contrib/guix/README.md76
-rwxr-xr-xcontrib/guix/guix-build.sh159
-rw-r--r--contrib/guix/libexec/build.sh26
-rw-r--r--contrib/init/README.md4
-rw-r--r--contrib/init/bitcoind.service6
-rw-r--r--contrib/macdeploy/README.md14
-rwxr-xr-xcontrib/macdeploy/detached-sig-apply.sh36
-rwxr-xr-xcontrib/macdeploy/detached-sig-create.sh35
-rwxr-xr-xcontrib/macdeploy/extract-osx-sdk.sh34
-rw-r--r--contrib/signet/README.md61
-rwxr-xr-xcontrib/signet/miner639
-rw-r--r--contrib/testgen/base58.py2
-rwxr-xr-xcontrib/testgen/gen_key_io_test_vectors.py2
-rwxr-xr-xcontrib/zmq/zmq_sub.py2
25 files changed, 1392 insertions, 361 deletions
diff --git a/contrib/debian/copyright b/contrib/debian/copyright
index 581fe712e9..a18c5bccc5 100644
--- a/contrib/debian/copyright
+++ b/contrib/debian/copyright
@@ -5,7 +5,7 @@ Upstream-Contact: Satoshi Nakamoto <satoshin@gmx.com>
Source: https://github.com/bitcoin/bitcoin
Files: *
-Copyright: 2009-2020, Bitcoin Core Developers
+Copyright: 2009-2021, Bitcoin Core Developers
License: Expat
Comment: The Bitcoin Core Developers encompasses the current developers listed on bitcoin.org,
as well as the numerous contributors to the project.
diff --git a/contrib/devtools/circular-dependencies.py b/contrib/devtools/circular-dependencies.py
index bc5f09a3e2..b1d9f2b7db 100755
--- a/contrib/devtools/circular-dependencies.py
+++ b/contrib/devtools/circular-dependencies.py
@@ -1,10 +1,11 @@
#!/usr/bin/env python3
-# Copyright (c) 2018-2019 The Bitcoin Core developers
+# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import sys
import re
+from typing import Dict, List, Set
MAPPING = {
'core_read.cpp': 'core_io.cpp',
@@ -32,7 +33,7 @@ def module_name(path):
return None
files = dict()
-deps = dict()
+deps: Dict[str, Set[str]] = dict()
RE = re.compile("^#include <(.*)>")
@@ -59,12 +60,12 @@ for arg in sorted(files.keys()):
deps[module].add(included_module)
# Loop to find the shortest (remaining) circular dependency
-have_cycle = False
+have_cycle: bool = False
while True:
shortest_cycle = None
for module in sorted(deps.keys()):
# Build the transitive closure of dependencies of module
- closure = dict()
+ closure: Dict[str, List[str]] = dict()
for dep in deps[module]:
closure[dep] = []
while True:
diff --git a/contrib/devtools/gen-manpages.sh b/contrib/devtools/gen-manpages.sh
index 3fdcda4fd4..b7bf76ce77 100755
--- a/contrib/devtools/gen-manpages.sh
+++ b/contrib/devtools/gen-manpages.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Copyright (c) 2016-2019 The Bitcoin Core developers
+# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -14,13 +14,14 @@ BITCOIND=${BITCOIND:-$BINDIR/bitcoind}
BITCOINCLI=${BITCOINCLI:-$BINDIR/bitcoin-cli}
BITCOINTX=${BITCOINTX:-$BINDIR/bitcoin-tx}
WALLET_TOOL=${WALLET_TOOL:-$BINDIR/bitcoin-wallet}
+BITCOINUTIL=${BITCOINQT:-$BINDIR/bitcoin-util}
BITCOINQT=${BITCOINQT:-$BINDIR/qt/bitcoin-qt}
[ ! -x $BITCOIND ] && echo "$BITCOIND not found or not executable." && exit 1
# Don't allow man pages to be generated for binaries built from a dirty tree
DIRTY=""
-for cmd in $BITCOIND $BITCOINCLI $BITCOINTX $WALLET_TOOL $BITCOINQT; do
+for cmd in $BITCOIND $BITCOINCLI $BITCOINTX $WALLET_TOOL $BITCOINUTIL $BITCOINQT; do
VERSION_OUTPUT=$($cmd --version)
if [[ $VERSION_OUTPUT == *"dirty"* ]]; then
DIRTY="${DIRTY}${cmd}\n"
@@ -43,7 +44,7 @@ read -r -a BTCVER <<< "$($BITCOINCLI --version | head -n1 | awk -F'[ -]' '{ prin
echo "[COPYRIGHT]" > footer.h2m
$BITCOIND --version | sed -n '1!p' >> footer.h2m
-for cmd in $BITCOIND $BITCOINCLI $BITCOINTX $WALLET_TOOL $BITCOINQT; do
+for cmd in $BITCOIND $BITCOINCLI $BITCOINTX $WALLET_TOOL $BITCOINUTIL $BITCOINQT; do
cmdname="${cmd##*/}"
help2man -N --version-string=${BTCVER[0]} --include=footer.h2m -o ${MANDIR}/${cmdname}.1 ${cmd}
sed -i "s/\\\-${BTCVER[1]}//g" ${MANDIR}/${cmdname}.1
diff --git a/contrib/devtools/pixie.py b/contrib/devtools/pixie.py
new file mode 100644
index 0000000000..8cf06a799a
--- /dev/null
+++ b/contrib/devtools/pixie.py
@@ -0,0 +1,323 @@
+#!/usr/bin/env python3
+# Copyright (c) 2020 Wladimir J. van der Laan
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+'''
+Compact, self-contained ELF implementation for bitcoin-core security checks.
+'''
+import struct
+import types
+from typing import Dict, List, Optional, Union, Tuple
+
+# you can find all these values in elf.h
+EI_NIDENT = 16
+
+# Byte indices in e_ident
+EI_CLASS = 4 # ELFCLASSxx
+EI_DATA = 5 # ELFDATAxxxx
+
+ELFCLASS32 = 1 # 32-bit
+ELFCLASS64 = 2 # 64-bit
+
+ELFDATA2LSB = 1 # little endian
+ELFDATA2MSB = 2 # big endian
+
+# relevant values for e_machine
+EM_386 = 3
+EM_PPC64 = 21
+EM_ARM = 40
+EM_AARCH64 = 183
+EM_X86_64 = 62
+EM_RISCV = 243
+
+# relevant values for e_type
+ET_DYN = 3
+
+# relevant values for sh_type
+SHT_PROGBITS = 1
+SHT_STRTAB = 3
+SHT_DYNAMIC = 6
+SHT_DYNSYM = 11
+SHT_GNU_verneed = 0x6ffffffe
+SHT_GNU_versym = 0x6fffffff
+
+# relevant values for p_type
+PT_LOAD = 1
+PT_GNU_STACK = 0x6474e551
+PT_GNU_RELRO = 0x6474e552
+
+# relevant values for p_flags
+PF_X = (1 << 0)
+PF_W = (1 << 1)
+PF_R = (1 << 2)
+
+# relevant values for d_tag
+DT_NEEDED = 1
+DT_FLAGS = 30
+
+# relevant values of `d_un.d_val' in the DT_FLAGS entry
+DF_BIND_NOW = 0x00000008
+
+# relevant d_tags with string payload
+STRING_TAGS = {DT_NEEDED}
+
+# rrlevant values for ST_BIND subfield of st_info (symbol binding)
+STB_LOCAL = 0
+
+class ELFRecord(types.SimpleNamespace):
+ '''Unified parsing for ELF records.'''
+ def __init__(self, data: bytes, offset: int, eh: 'ELFHeader', total_size: Optional[int]) -> None:
+ hdr_struct = self.STRUCT[eh.ei_class][0][eh.ei_data]
+ if total_size is not None and hdr_struct.size > total_size:
+ raise ValueError(f'{self.__class__.__name__} header size too small ({total_size} < {hdr_struct.size})')
+ for field, value in zip(self.STRUCT[eh.ei_class][1], hdr_struct.unpack(data[offset:offset + hdr_struct.size])):
+ setattr(self, field, value)
+
+def BiStruct(chars: str) -> Dict[int, struct.Struct]:
+ '''Compile a struct parser for both endians.'''
+ return {
+ ELFDATA2LSB: struct.Struct('<' + chars),
+ ELFDATA2MSB: struct.Struct('>' + chars),
+ }
+
+class ELFHeader(ELFRecord):
+ FIELDS = ['e_type', 'e_machine', 'e_version', 'e_entry', 'e_phoff', 'e_shoff', 'e_flags', 'e_ehsize', 'e_phentsize', 'e_phnum', 'e_shentsize', 'e_shnum', 'e_shstrndx']
+ STRUCT = {
+ ELFCLASS32: (BiStruct('HHIIIIIHHHHHH'), FIELDS),
+ ELFCLASS64: (BiStruct('HHIQQQIHHHHHH'), FIELDS),
+ }
+
+ def __init__(self, data: bytes, offset: int) -> None:
+ self.e_ident = data[offset:offset + EI_NIDENT]
+ if self.e_ident[0:4] != b'\x7fELF':
+ raise ValueError('invalid ELF magic')
+ self.ei_class = self.e_ident[EI_CLASS]
+ self.ei_data = self.e_ident[EI_DATA]
+
+ super().__init__(data, offset + EI_NIDENT, self, None)
+
+ def __repr__(self) -> str:
+ return f'Header(e_ident={self.e_ident!r}, e_type={self.e_type}, e_machine={self.e_machine}, e_version={self.e_version}, e_entry={self.e_entry}, e_phoff={self.e_phoff}, e_shoff={self.e_shoff}, e_flags={self.e_flags}, e_ehsize={self.e_ehsize}, e_phentsize={self.e_phentsize}, e_phnum={self.e_phnum}, e_shentsize={self.e_shentsize}, e_shnum={self.e_shnum}, e_shstrndx={self.e_shstrndx})'
+
+class Section(ELFRecord):
+ name: Optional[bytes] = None
+ FIELDS = ['sh_name', 'sh_type', 'sh_flags', 'sh_addr', 'sh_offset', 'sh_size', 'sh_link', 'sh_info', 'sh_addralign', 'sh_entsize']
+ STRUCT = {
+ ELFCLASS32: (BiStruct('IIIIIIIIII'), FIELDS),
+ ELFCLASS64: (BiStruct('IIQQQQIIQQ'), FIELDS),
+ }
+
+ def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None:
+ super().__init__(data, offset, eh, eh.e_shentsize)
+ self._data = data
+
+ def __repr__(self) -> str:
+ return f'Section(sh_name={self.sh_name}({self.name!r}), sh_type=0x{self.sh_type:x}, sh_flags={self.sh_flags}, sh_addr=0x{self.sh_addr:x}, sh_offset=0x{self.sh_offset:x}, sh_size={self.sh_size}, sh_link={self.sh_link}, sh_info={self.sh_info}, sh_addralign={self.sh_addralign}, sh_entsize={self.sh_entsize})'
+
+ def contents(self) -> bytes:
+ '''Return section contents.'''
+ return self._data[self.sh_offset:self.sh_offset + self.sh_size]
+
+class ProgramHeader(ELFRecord):
+ STRUCT = {
+ # different ELF classes have the same fields, but in a different order to optimize space versus alignment
+ ELFCLASS32: (BiStruct('IIIIIIII'), ['p_type', 'p_offset', 'p_vaddr', 'p_paddr', 'p_filesz', 'p_memsz', 'p_flags', 'p_align']),
+ ELFCLASS64: (BiStruct('IIQQQQQQ'), ['p_type', 'p_flags', 'p_offset', 'p_vaddr', 'p_paddr', 'p_filesz', 'p_memsz', 'p_align']),
+ }
+
+ def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None:
+ super().__init__(data, offset, eh, eh.e_phentsize)
+
+ def __repr__(self) -> str:
+ return f'ProgramHeader(p_type={self.p_type}, p_offset={self.p_offset}, p_vaddr={self.p_vaddr}, p_paddr={self.p_paddr}, p_filesz={self.p_filesz}, p_memsz={self.p_memsz}, p_flags={self.p_flags}, p_align={self.p_align})'
+
+class Symbol(ELFRecord):
+ STRUCT = {
+ # different ELF classes have the same fields, but in a different order to optimize space versus alignment
+ ELFCLASS32: (BiStruct('IIIBBH'), ['st_name', 'st_value', 'st_size', 'st_info', 'st_other', 'st_shndx']),
+ ELFCLASS64: (BiStruct('IBBHQQ'), ['st_name', 'st_info', 'st_other', 'st_shndx', 'st_value', 'st_size']),
+ }
+
+ def __init__(self, data: bytes, offset: int, eh: ELFHeader, symtab: Section, strings: bytes, version: Optional[bytes]) -> None:
+ super().__init__(data, offset, eh, symtab.sh_entsize)
+ self.name = _lookup_string(strings, self.st_name)
+ self.version = version
+
+ def __repr__(self) -> str:
+ return f'Symbol(st_name={self.st_name}({self.name!r}), st_value={self.st_value}, st_size={self.st_size}, st_info={self.st_info}, st_other={self.st_other}, st_shndx={self.st_shndx}, version={self.version!r})'
+
+ @property
+ def is_import(self) -> bool:
+ '''Returns whether the symbol is an imported symbol.'''
+ return self.st_bind != STB_LOCAL and self.st_shndx == 0
+
+ @property
+ def is_export(self) -> bool:
+ '''Returns whether the symbol is an exported symbol.'''
+ return self.st_bind != STB_LOCAL and self.st_shndx != 0
+
+ @property
+ def st_bind(self) -> int:
+ '''Returns STB_*.'''
+ return self.st_info >> 4
+
+class Verneed(ELFRecord):
+ DEF = (BiStruct('HHIII'), ['vn_version', 'vn_cnt', 'vn_file', 'vn_aux', 'vn_next'])
+ STRUCT = { ELFCLASS32: DEF, ELFCLASS64: DEF }
+
+ def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None:
+ super().__init__(data, offset, eh, None)
+
+ def __repr__(self) -> str:
+ return f'Verneed(vn_version={self.vn_version}, vn_cnt={self.vn_cnt}, vn_file={self.vn_file}, vn_aux={self.vn_aux}, vn_next={self.vn_next})'
+
+class Vernaux(ELFRecord):
+ DEF = (BiStruct('IHHII'), ['vna_hash', 'vna_flags', 'vna_other', 'vna_name', 'vna_next'])
+ STRUCT = { ELFCLASS32: DEF, ELFCLASS64: DEF }
+
+ def __init__(self, data: bytes, offset: int, eh: ELFHeader, strings: bytes) -> None:
+ super().__init__(data, offset, eh, None)
+ self.name = _lookup_string(strings, self.vna_name)
+
+ def __repr__(self) -> str:
+ return f'Veraux(vna_hash={self.vna_hash}, vna_flags={self.vna_flags}, vna_other={self.vna_other}, vna_name={self.vna_name}({self.name!r}), vna_next={self.vna_next})'
+
+class DynTag(ELFRecord):
+ STRUCT = {
+ ELFCLASS32: (BiStruct('II'), ['d_tag', 'd_val']),
+ ELFCLASS64: (BiStruct('QQ'), ['d_tag', 'd_val']),
+ }
+
+ def __init__(self, data: bytes, offset: int, eh: ELFHeader, section: Section) -> None:
+ super().__init__(data, offset, eh, section.sh_entsize)
+
+ def __repr__(self) -> str:
+ return f'DynTag(d_tag={self.d_tag}, d_val={self.d_val})'
+
+def _lookup_string(data: bytes, index: int) -> bytes:
+ '''Look up string by offset in ELF string table.'''
+ endx = data.find(b'\x00', index)
+ assert endx != -1
+ return data[index:endx]
+
+VERSYM_S = BiStruct('H') # .gnu_version section has a single 16-bit integer per symbol in the linked section
+def _parse_symbol_table(section: Section, strings: bytes, eh: ELFHeader, versym: bytes, verneed: Dict[int, bytes]) -> List[Symbol]:
+ '''Parse symbol table, return a list of symbols.'''
+ data = section.contents()
+ symbols = []
+ versym_iter = (verneed.get(v[0]) for v in VERSYM_S[eh.ei_data].iter_unpack(versym))
+ for ofs, version in zip(range(0, len(data), section.sh_entsize), versym_iter):
+ symbols.append(Symbol(data, ofs, eh, section, strings, version))
+ return symbols
+
+def _parse_verneed(section: Section, strings: bytes, eh: ELFHeader) -> Dict[int, bytes]:
+ '''Parse .gnu.version_r section, return a dictionary of {versym: 'GLIBC_...'}.'''
+ data = section.contents()
+ ofs = 0
+ result = {}
+ while True:
+ verneed = Verneed(data, ofs, eh)
+ aofs = verneed.vn_aux
+ while True:
+ vernaux = Vernaux(data, aofs, eh, strings)
+ result[vernaux.vna_other] = vernaux.name
+ if not vernaux.vna_next:
+ break
+ aofs += vernaux.vna_next
+
+ if not verneed.vn_next:
+ break
+ ofs += verneed.vn_next
+
+ return result
+
+def _parse_dyn_tags(section: Section, strings: bytes, eh: ELFHeader) -> List[Tuple[int, Union[bytes, int]]]:
+ '''Parse dynamic tags. Return array of tuples.'''
+ data = section.contents()
+ ofs = 0
+ result = []
+ for ofs in range(0, len(data), section.sh_entsize):
+ tag = DynTag(data, ofs, eh, section)
+ val = _lookup_string(strings, tag.d_val) if tag.d_tag in STRING_TAGS else tag.d_val
+ result.append((tag.d_tag, val))
+
+ return result
+
+class ELFFile:
+ sections: List[Section]
+ program_headers: List[ProgramHeader]
+ dyn_symbols: List[Symbol]
+ dyn_tags: List[Tuple[int, Union[bytes, int]]]
+
+ def __init__(self, data: bytes) -> None:
+ self.data = data
+ self.hdr = ELFHeader(self.data, 0)
+ self._load_sections()
+ self._load_program_headers()
+ self._load_dyn_symbols()
+ self._load_dyn_tags()
+ self._section_to_segment_mapping()
+
+ def _load_sections(self) -> None:
+ self.sections = []
+ for idx in range(self.hdr.e_shnum):
+ offset = self.hdr.e_shoff + idx * self.hdr.e_shentsize
+ self.sections.append(Section(self.data, offset, self.hdr))
+
+ shstr = self.sections[self.hdr.e_shstrndx].contents()
+ for section in self.sections:
+ section.name = _lookup_string(shstr, section.sh_name)
+
+ def _load_program_headers(self) -> None:
+ self.program_headers = []
+ for idx in range(self.hdr.e_phnum):
+ offset = self.hdr.e_phoff + idx * self.hdr.e_phentsize
+ self.program_headers.append(ProgramHeader(self.data, offset, self.hdr))
+
+ def _load_dyn_symbols(self) -> None:
+ # first, load 'verneed' section
+ verneed = None
+ for section in self.sections:
+ if section.sh_type == SHT_GNU_verneed:
+ strtab = self.sections[section.sh_link].contents() # associated string table
+ assert verneed is None # only one section of this kind please
+ verneed = _parse_verneed(section, strtab, self.hdr)
+ assert verneed is not None
+
+ # then, correlate GNU versym sections with dynamic symbol sections
+ versym = {}
+ for section in self.sections:
+ if section.sh_type == SHT_GNU_versym:
+ versym[section.sh_link] = section
+
+ # finally, load dynsym sections
+ self.dyn_symbols = []
+ for idx, section in enumerate(self.sections):
+ if section.sh_type == SHT_DYNSYM: # find dynamic symbol tables
+ strtab_data = self.sections[section.sh_link].contents() # associated string table
+ versym_data = versym[idx].contents() # associated symbol version table
+ self.dyn_symbols += _parse_symbol_table(section, strtab_data, self.hdr, versym_data, verneed)
+
+ def _load_dyn_tags(self) -> None:
+ self.dyn_tags = []
+ for idx, section in enumerate(self.sections):
+ if section.sh_type == SHT_DYNAMIC: # find dynamic tag tables
+ strtab = self.sections[section.sh_link].contents() # associated string table
+ self.dyn_tags += _parse_dyn_tags(section, strtab, self.hdr)
+
+ def _section_to_segment_mapping(self) -> None:
+ for ph in self.program_headers:
+ ph.sections = []
+ for section in self.sections:
+ if ph.p_vaddr <= section.sh_addr < (ph.p_vaddr + ph.p_memsz):
+ ph.sections.append(section)
+
+ def query_dyn_tags(self, tag_in: int) -> List[Union[int, bytes]]:
+ '''Return the values of all dyn tags with the specified tag.'''
+ return [val for (tag, val) in self.dyn_tags if tag == tag_in]
+
+
+def load(filename: str) -> ELFFile:
+ with open(filename, 'rb') as f:
+ data = f.read()
+ return ELFFile(data)
diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py
index 02615edb54..7b09c42fde 100755
--- a/contrib/devtools/security-check.py
+++ b/contrib/devtools/security-check.py
@@ -6,15 +6,15 @@
Perform basic security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
-Needs `readelf` (for ELF), `objdump` (for PE) and `otool` (for MACHO).
+Needs `objdump` (for PE) and `otool` (for MACHO).
'''
import subprocess
import sys
import os
-
from typing import List, Optional
-READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
+import pixie
+
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
OTOOL_CMD = os.getenv('OTOOL', '/usr/bin/otool')
@@ -26,75 +26,20 @@ def check_ELF_PIE(executable) -> bool:
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
- stdout = run_command([READELF_CMD, '-h', '-W', executable])
-
- ok = False
- for line in stdout.splitlines():
- tokens = line.split()
- if len(line)>=2 and tokens[0] == 'Type:' and tokens[1] == 'DYN':
- ok = True
- return ok
-
-def get_ELF_program_headers(executable):
- '''Return type and flags for ELF program headers'''
- stdout = run_command([READELF_CMD, '-l', '-W', executable])
-
- in_headers = False
- headers = []
- for line in stdout.splitlines():
- if line.startswith('Program Headers:'):
- in_headers = True
- count = 0
- if line == '':
- in_headers = False
- if in_headers:
- if count == 1: # header line
- header = [x.strip() for x in line.split()]
- ofs_typ = header.index('Type')
- ofs_flags = header.index('Flg')
- # assert readelf output is what we expect
- if ofs_typ == -1 or ofs_flags == -1:
- raise ValueError('Cannot parse elfread -lW output')
- elif count > 1:
- splitline = [x.strip() for x in line.split()]
- typ = splitline[ofs_typ]
- if not typ.startswith('[R'): # skip [Requesting ...]
- splitline = [x.strip() for x in line.split()]
- flags = splitline[ofs_flags]
- # check for 'R', ' E'
- if splitline[ofs_flags + 1] == 'E':
- flags += ' E'
- headers.append((typ, flags, []))
- count += 1
-
- if line.startswith(' Section to Segment mapping:'):
- in_mapping = True
- count = 0
- if line == '':
- in_mapping = False
- if in_mapping:
- if count == 1: # header line
- ofs_segment = line.find('Segment')
- ofs_sections = line.find('Sections...')
- if ofs_segment == -1 or ofs_sections == -1:
- raise ValueError('Cannot parse elfread -lW output')
- elif count > 1:
- segment = int(line[ofs_segment:ofs_sections].strip())
- sections = line[ofs_sections:].strip().split()
- headers[segment][2].extend(sections)
- count += 1
- return headers
+ elf = pixie.load(executable)
+ return elf.hdr.e_type == pixie.ET_DYN
def check_ELF_NX(executable) -> bool:
'''
Check that no sections are writable and executable (including the stack)
'''
+ elf = pixie.load(executable)
have_wx = False
have_gnu_stack = False
- for (typ, flags, _) in get_ELF_program_headers(executable):
- if typ == 'GNU_STACK':
+ for ph in elf.program_headers:
+ if ph.p_type == pixie.PT_GNU_STACK:
have_gnu_stack = True
- if 'W' in flags and 'E' in flags: # section is both writable and executable
+ if (ph.p_flags & pixie.PF_W) != 0 and (ph.p_flags & pixie.PF_X) != 0: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
@@ -104,35 +49,34 @@ def check_ELF_RELRO(executable) -> bool:
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
+ elf = pixie.load(executable)
have_gnu_relro = False
- for (typ, flags, _) in get_ELF_program_headers(executable):
- # Note: not checking flags == 'R': here as linkers set the permission differently
+ for ph in elf.program_headers:
+ # Note: not checking p_flags == PF_R: here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program
# header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also https://marc.info/?l=binutils&m=1498883354122353
- if typ == 'GNU_RELRO':
+ if ph.p_type == pixie.PT_GNU_RELRO:
have_gnu_relro = True
have_bindnow = False
- stdout = run_command([READELF_CMD, '-d', '-W', executable])
-
- for line in stdout.splitlines():
- tokens = line.split()
- if len(tokens)>1 and tokens[1] == '(BIND_NOW)' or (len(tokens)>2 and tokens[1] == '(FLAGS)' and 'BIND_NOW' in tokens[2:]):
+ for flags in elf.query_dyn_tags(pixie.DT_FLAGS):
+ assert isinstance(flags, int)
+ if flags & pixie.DF_BIND_NOW:
have_bindnow = True
+
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable) -> bool:
'''
Check for use of stack canary
'''
- stdout = run_command([READELF_CMD, '--dyn-syms', '-W', executable])
-
+ elf = pixie.load(executable)
ok = False
- for line in stdout.splitlines():
- if '__stack_chk_fail' in line:
+ for symbol in elf.dyn_symbols:
+ if symbol.name == b'__stack_chk_fail':
ok = True
return ok
@@ -142,48 +86,55 @@ def check_ELF_separate_code(executable):
based on their permissions. This checks for missing -Wl,-z,separate-code
and potentially other problems.
'''
+ elf = pixie.load(executable)
+ R = pixie.PF_R
+ W = pixie.PF_W
+ E = pixie.PF_X
EXPECTED_FLAGS = {
# Read + execute
- '.init': 'R E',
- '.plt': 'R E',
- '.plt.got': 'R E',
- '.plt.sec': 'R E',
- '.text': 'R E',
- '.fini': 'R E',
+ b'.init': R | E,
+ b'.plt': R | E,
+ b'.plt.got': R | E,
+ b'.plt.sec': R | E,
+ b'.text': R | E,
+ b'.fini': R | E,
# Read-only data
- '.interp': 'R',
- '.note.gnu.property': 'R',
- '.note.gnu.build-id': 'R',
- '.note.ABI-tag': 'R',
- '.gnu.hash': 'R',
- '.dynsym': 'R',
- '.dynstr': 'R',
- '.gnu.version': 'R',
- '.gnu.version_r': 'R',
- '.rela.dyn': 'R',
- '.rela.plt': 'R',
- '.rodata': 'R',
- '.eh_frame_hdr': 'R',
- '.eh_frame': 'R',
- '.qtmetadata': 'R',
- '.gcc_except_table': 'R',
- '.stapsdt.base': 'R',
+ b'.interp': R,
+ b'.note.gnu.property': R,
+ b'.note.gnu.build-id': R,
+ b'.note.ABI-tag': R,
+ b'.gnu.hash': R,
+ b'.dynsym': R,
+ b'.dynstr': R,
+ b'.gnu.version': R,
+ b'.gnu.version_r': R,
+ b'.rela.dyn': R,
+ b'.rela.plt': R,
+ b'.rodata': R,
+ b'.eh_frame_hdr': R,
+ b'.eh_frame': R,
+ b'.qtmetadata': R,
+ b'.gcc_except_table': R,
+ b'.stapsdt.base': R,
# Writable data
- '.init_array': 'RW',
- '.fini_array': 'RW',
- '.dynamic': 'RW',
- '.got': 'RW',
- '.data': 'RW',
- '.bss': 'RW',
+ b'.init_array': R | W,
+ b'.fini_array': R | W,
+ b'.dynamic': R | W,
+ b'.got': R | W,
+ b'.data': R | W,
+ b'.bss': R | W,
}
+ if elf.hdr.e_machine == pixie.EM_PPC64:
+ # .plt is RW on ppc64 even with separate-code
+ EXPECTED_FLAGS[b'.plt'] = R | W
# For all LOAD program headers get mapping to the list of sections,
# and for each section, remember the flags of the associated program header.
flags_per_section = {}
- for (typ, flags, sections) in get_ELF_program_headers(executable):
- if typ == 'LOAD':
- for section in sections:
- assert(section not in flags_per_section)
- flags_per_section[section] = flags
+ for ph in elf.program_headers:
+ if ph.p_type == pixie.PT_LOAD:
+ for section in ph.sections:
+ assert(section.name not in flags_per_section)
+ flags_per_section[section.name] = ph.p_flags
# Spot-check ELF LOAD program header flags per section
# If these sections exist, check them against the expected R/W/E flags
for (section, flags) in flags_per_section.items():
@@ -236,7 +187,7 @@ def check_PE_NX(executable) -> bool:
def get_MACHO_executable_flags(executable) -> List[str]:
stdout = run_command([OTOOL_CMD, '-vh', executable])
- flags = []
+ flags: List[str] = []
for line in stdout.splitlines():
tokens = line.split()
# filter first two header lines
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index 6949cb7ced..b30ed62521 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -11,10 +11,11 @@ Example usage:
find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py
'''
import subprocess
-import re
import sys
import os
-from typing import List, Optional, Tuple
+from typing import List, Optional
+
+import pixie
# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases
#
@@ -50,7 +51,6 @@ IGNORE_EXPORTS = {
'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr',
'environ', '_environ', '__environ',
}
-READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
OTOOL_CMD = os.getenv('OTOOL', '/usr/bin/otool')
@@ -68,6 +68,8 @@ ELF_ALLOWED_LIBRARIES = {
'ld-linux.so.2', # 32-bit dynamic linker
'ld-linux-aarch64.so.1', # 64-bit ARM dynamic linker
'ld-linux-armhf.so.3', # 32-bit ARM dynamic linker
+'ld64.so.1', # POWER64 ABIv1 dynamic linker
+'ld64.so.2', # POWER64 ABIv2 dynamic linker
'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker
# bitcoin-qt only
'libxcb.so.1', # part of X11
@@ -76,11 +78,12 @@ ELF_ALLOWED_LIBRARIES = {
'libdl.so.2' # programming interface to dynamic linker
}
ARCH_MIN_GLIBC_VER = {
-'80386': (2,1),
-'X86-64': (2,2,5),
-'ARM': (2,4),
-'AArch64':(2,17),
-'RISC-V': (2,27)
+pixie.EM_386: (2,1),
+pixie.EM_X86_64: (2,2,5),
+pixie.EM_ARM: (2,4),
+pixie.EM_AARCH64:(2,17),
+pixie.EM_PPC64: (2,17),
+pixie.EM_RISCV: (2,27)
}
MACHO_ALLOWED_LIBRARIES = {
@@ -140,29 +143,6 @@ class CPPFilt(object):
self.proc.stdout.close()
self.proc.wait()
-def read_symbols(executable, imports=True) -> List[Tuple[str, str, str]]:
- '''
- Parse an ELF executable and return a list of (symbol,version, arch) tuples
- for dynamic, imported symbols.
- '''
- p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', '-h', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
- (stdout, stderr) = p.communicate()
- if p.returncode:
- raise IOError('Could not read symbols for {}: {}'.format(executable, stderr.strip()))
- syms = []
- for line in stdout.splitlines():
- line = line.split()
- if 'Machine:' in line:
- arch = line[-1]
- if len(line)>7 and re.match('[0-9]+:$', line[0]):
- (sym, _, version) = line[7].partition('@')
- is_import = line[6] == 'UND'
- if version.startswith('@'):
- version = version[1:]
- if is_import == imports:
- syms.append((sym, version, arch))
- return syms
-
def check_version(max_versions, version, arch) -> bool:
if '_' in version:
(lib, _, ver) = version.rpartition('_')
@@ -174,46 +154,42 @@ def check_version(max_versions, version, arch) -> bool:
return False
return ver <= max_versions[lib] or lib == 'GLIBC' and ver <= ARCH_MIN_GLIBC_VER[arch]
-def elf_read_libraries(filename) -> List[str]:
- p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
- (stdout, stderr) = p.communicate()
- if p.returncode:
- raise IOError('Error opening file')
- libraries = []
- for line in stdout.splitlines():
- tokens = line.split()
- if len(tokens)>2 and tokens[1] == '(NEEDED)':
- match = re.match(r'^Shared library: \[(.*)\]$', ' '.join(tokens[2:]))
- if match:
- libraries.append(match.group(1))
- else:
- raise ValueError('Unparseable (NEEDED) specification')
- return libraries
-
def check_imported_symbols(filename) -> bool:
+ elf = pixie.load(filename)
cppfilt = CPPFilt()
- ok = True
- for sym, version, arch in read_symbols(filename, True):
- if version and not check_version(MAX_VERSIONS, version, arch):
+ ok: bool = True
+
+ for symbol in elf.dyn_symbols:
+ if not symbol.is_import:
+ continue
+ sym = symbol.name.decode()
+ version = symbol.version.decode() if symbol.version is not None else None
+ if version and not check_version(MAX_VERSIONS, version, elf.hdr.e_machine):
print('{}: symbol {} from unsupported version {}'.format(filename, cppfilt(sym), version))
ok = False
return ok
def check_exported_symbols(filename) -> bool:
+ elf = pixie.load(filename)
cppfilt = CPPFilt()
- ok = True
- for sym,version,arch in read_symbols(filename, False):
- if arch == 'RISC-V' or sym in IGNORE_EXPORTS:
+ ok: bool = True
+ for symbol in elf.dyn_symbols:
+ if not symbol.is_export:
+ continue
+ sym = symbol.name.decode()
+ if elf.hdr.e_machine == pixie.EM_RISCV or sym in IGNORE_EXPORTS:
continue
print('{}: export of symbol {} not allowed'.format(filename, cppfilt(sym)))
ok = False
return ok
def check_ELF_libraries(filename) -> bool:
- ok = True
- for library_name in elf_read_libraries(filename):
- if library_name not in ELF_ALLOWED_LIBRARIES:
- print('{}: NEEDED library {} is not allowed'.format(filename, library_name))
+ ok: bool = True
+ elf = pixie.load(filename)
+ for library_name in elf.query_dyn_tags(pixie.DT_NEEDED):
+ assert(isinstance(library_name, bytes))
+ if library_name.decode() not in ELF_ALLOWED_LIBRARIES:
+ print('{}: NEEDED library {} is not allowed'.format(filename, library_name.decode()))
ok = False
return ok
@@ -231,7 +207,7 @@ def macho_read_libraries(filename) -> List[str]:
return libraries
def check_MACHO_libraries(filename) -> bool:
- ok = True
+ ok: bool = True
for dylib in macho_read_libraries(filename):
if dylib not in MACHO_ALLOWED_LIBRARIES:
print('{} is not in ALLOWED_LIBRARIES!'.format(dylib))
@@ -251,7 +227,7 @@ def pe_read_libraries(filename) -> List[str]:
return libraries
def check_PE_libraries(filename) -> bool:
- ok = True
+ ok: bool = True
for dylib in pe_read_libraries(filename):
if dylib not in PE_ALLOWED_LIBRARIES:
print('{} is not in ALLOWED_LIBRARIES!'.format(dylib))
@@ -284,7 +260,7 @@ def identify_executable(executable) -> Optional[str]:
return None
if __name__ == '__main__':
- retval = 0
+ retval: int = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
@@ -293,7 +269,7 @@ if __name__ == '__main__':
retval = 1
continue
- failed = []
+ failed: List[str] = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
diff --git a/contrib/filter-lcov.py b/contrib/filter-lcov.py
index e005cb96da..db780ad53b 100755
--- a/contrib/filter-lcov.py
+++ b/contrib/filter-lcov.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2017-2019 The Bitcoin Core developers
+# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/contrib/gitian-build.py b/contrib/gitian-build.py
index d498c9e2c8..f105968515 100755
--- a/contrib/gitian-build.py
+++ b/contrib/gitian-build.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2018-2019 The Bitcoin Core developers
+# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/contrib/gitian-descriptors/gitian-osx-signer.yml b/contrib/gitian-descriptors/gitian-osx-signer.yml
index a4f3219c22..214ca9823d 100644
--- a/contrib/gitian-descriptors/gitian-osx-signer.yml
+++ b/contrib/gitian-descriptors/gitian-osx-signer.yml
@@ -7,9 +7,14 @@ architectures:
- "amd64"
packages:
- "faketime"
+- "xorriso"
+- "python3-pip"
remotes:
- "url": "https://github.com/bitcoin-core/bitcoin-detached-sigs.git"
"dir": "signature"
+- "url": "https://github.com/achow101/signapple.git"
+ "dir": "signapple"
+ "commit": "c7e73aa27a7615ac9506559173f787e2906b25eb"
files:
- "bitcoin-osx-unsigned.tar.gz"
script: |
@@ -18,7 +23,7 @@ script: |
WRAP_DIR=$HOME/wrapped
mkdir -p ${WRAP_DIR}
export PATH="$PWD":$PATH
- FAKETIME_PROGS="dmg genisoimage"
+ FAKETIME_PROGS="dmg xorrisofs"
# Create global faketime wrappers
for prog in ${FAKETIME_PROGS}; do
@@ -30,11 +35,19 @@ script: |
chmod +x ${WRAP_DIR}/${prog}
done
- UNSIGNED=bitcoin-osx-unsigned.tar.gz
+ # Install signapple
+ cd signapple
+ python3 -m pip install -U pip setuptools
+ python3 -m pip install .
+ export PATH="$HOME/.local/bin":$PATH
+ cd ..
+
+ UNSIGNED_TARBALL=bitcoin-osx-unsigned.tar.gz
+ UNSIGNED_APP=dist/Bitcoin-Qt.app
SIGNED=bitcoin-osx-signed.dmg
- tar -xf ${UNSIGNED}
+ tar -xf ${UNSIGNED_TARBALL}
OSX_VOLNAME="$(cat osx_volname)"
- ./detached-sig-apply.sh ${UNSIGNED} signature/osx
- ${WRAP_DIR}/genisoimage -no-cache-inodes -D -l -probe -V "${OSX_VOLNAME}" -no-pad -r -dir-mode 0755 -apple -o uncompressed.dmg signed-app
+ ./detached-sig-apply.sh ${UNSIGNED_APP} signature/osx/dist
+ ${WRAP_DIR}/xorrisofs -D -l -V "${OSX_VOLNAME}" -no-pad -r -dir-mode 0755 -o uncompressed.dmg signed-app
${WRAP_DIR}/dmg dmg uncompressed.dmg ${OUTDIR}/${SIGNED}
diff --git a/contrib/gitian-descriptors/gitian-osx.yml b/contrib/gitian-descriptors/gitian-osx.yml
index 4119e88003..86f976f568 100644
--- a/contrib/gitian-descriptors/gitian-osx.yml
+++ b/contrib/gitian-descriptors/gitian-osx.yml
@@ -28,6 +28,7 @@ packages:
- "python3-dev"
- "python3-setuptools"
- "fonts-tuffy"
+- "xorriso"
remotes:
- "url": "https://github.com/bitcoin/bitcoin.git"
"dir": "bitcoin"
@@ -38,9 +39,9 @@ script: |
WRAP_DIR=$HOME/wrapped
HOSTS="x86_64-apple-darwin18"
- CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests GENISOIMAGE=$WRAP_DIR/genisoimage"
+ CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests XORRISOFS=${WRAP_DIR}/xorrisofs DMG=${WRAP_DIR}/dmg"
FAKETIME_HOST_PROGS=""
- FAKETIME_PROGS="ar ranlib date dmg genisoimage"
+ FAKETIME_PROGS="ar ranlib date dmg xorrisofs"
export QT_RCC_TEST=1
export QT_RCC_SOURCE_DATE_OVERRIDE=1
@@ -132,21 +133,17 @@ script: |
make osx_volname
make deploydir
- OSX_VOLNAME="$(cat osx_volname)"
mkdir -p unsigned-app-${i}
cp osx_volname unsigned-app-${i}/
cp contrib/macdeploy/detached-sig-apply.sh unsigned-app-${i}
cp contrib/macdeploy/detached-sig-create.sh unsigned-app-${i}
- cp ${BASEPREFIX}/${i}/native/bin/dmg ${BASEPREFIX}/${i}/native/bin/genisoimage unsigned-app-${i}
- cp ${BASEPREFIX}/${i}/native/bin/${i}-codesign_allocate unsigned-app-${i}/codesign_allocate
- cp ${BASEPREFIX}/${i}/native/bin/${i}-pagestuff unsigned-app-${i}/pagestuff
+ cp ${BASEPREFIX}/${i}/native/bin/dmg unsigned-app-${i}
mv dist unsigned-app-${i}
pushd unsigned-app-${i}
find . | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-osx-unsigned.tar.gz
popd
- make deploy
- ${WRAP_DIR}/dmg dmg "${OSX_VOLNAME}.dmg" ${OUTDIR}/${DISTNAME}-osx-unsigned.dmg
+ make deploy OSX_DMG="${OUTDIR}/${DISTNAME}-osx-unsigned.dmg"
cd installed
find . -name "lib*.la" -delete
diff --git a/contrib/gitian-keys/keys.txt b/contrib/gitian-keys/keys.txt
index 0a2c1302c8..7e28d27454 100644
--- a/contrib/gitian-keys/keys.txt
+++ b/contrib/gitian-keys/keys.txt
@@ -2,6 +2,8 @@
617C90010B3BD370B0AC7D424BB42E31C79111B8 Akira Takizawa
E944AE667CF960B1004BC32FCA662BE18B877A60 Andreas Schildbach
152812300785C96444D3334D17565732E08E5E41 Andrew Chow
+590B7292695AFFA5B672CBB2E13FC145CD3F4304 Antoine Poinsot (darosior)
+0AD83877C1F0CD1EE9BD660AD7CC770B81FD22A8 Ben Carman (benthecarman)
912FD3228387123DC97E0E57D5566241A0295FA9 BtcDrak
C519EBCF3B926298946783EFF6430754120EC2F4 Christian Decker (cdecker)
F20F56EF6A067F70E8A5C99FFF95FAA971697405 centaur
@@ -22,12 +24,15 @@ B8B3F1C0E58C15DB6A81D30C3648A882F4316B9B Marco Falke
07DF3E57A548CCFB7530709189BBB8663E2E65CE Matt Corallo (BlueMatt)
CA03882CB1FC067B5D3ACFE4D300116E1C875A3D MeshCollider
E777299FC265DD04793070EB944D35F9AC3DB76A Michael Ford
+AD5764F4ADCE1B99BDFD179E12335A271D4D62EC Michael Tidwell (miketwenty1)
9692B91BBF0E8D34DFD33B1882C5C009628ECF0C Michagogo
77E72E69DA7EE0A148C06B21B34821D4944DE5F7 Nils Schneider
+F4FC70F07310028424EFC20A8E4256593F177720 Oliver Gugger
D62A803E27E7F43486035ADBBCD04D8E9CCCAC2A Paul Rabahy
37EC7D7B0A217CDB4B4E007E7FAB114267E4FA04 Peter Todd
D762373D24904A3E42F33B08B9A408E71DAAC974 Pieter Wuille (Location: Leuven, Belgium)
133EAC179436F14A5CF1B794860FEB804E669320 Pieter Wuille
+6A8F9C266528E25AEB1D7731C2371D91CB716EA7 Sebastian Falbesoner (theStack)
A8FC55F3B04BA3146F3492E79303B33A305224CB Sebastian Kung (TheCharlatan)
ED9BDF7AD6A55E232E84524257FF9BDBCC301009 Sjors Provoost
9EDAFF80E080659604F4A76B2EBB056FD847F8A7 Stephan Oeste (Emzy)
diff --git a/contrib/guix/README.md b/contrib/guix/README.md
index dffcf99607..dbe1ea837b 100644
--- a/contrib/guix/README.md
+++ b/contrib/guix/README.md
@@ -40,25 +40,27 @@ Otherwise, follow the [Guix installation guide][guix/bin-install].
Guix allows us to achieve better binary security by using our CPU time to build
everything from scratch. However, it doesn't sacrifice user choice in pursuit of
-this: users can decide whether or not to bootstrap and to use substitutes.
+this: users can decide whether or not to bootstrap and to use substitutes
+(pre-built packages).
After installation, you may want to consider [adding substitute
-servers](#speeding-up-builds-with-substitute-servers) to speed up your build if
-that fits your security model (say, if you're just testing that this works).
-This is skippable if you're using the [Dockerfile][fanquake/guix-docker].
+servers](#speeding-up-builds-with-substitute-servers) from which to download
+pre-built packages to speed up your build if that fits your security model (say,
+if you're just testing that this works). Substitute servers are set up by
+default if you're using the [Dockerfile][fanquake/guix-docker].
-If you prefer not to use any substitutes, make sure to set
-`ADDITIONAL_GUIX_ENVIRONMENT_FLAGS` like the following snippet. The first build
-will take a while, but the resulting packages will be cached for future builds.
+If you prefer not to use any substitutes, make sure to supply `--no-substitutes`
+like in the following snippet. The first build will take a while, but the
+resulting packages will be cached for future builds.
```sh
-export ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--no-substitutes'
+export ADDITIONAL_GUIX_COMMON_FLAGS='--no-substitutes'
```
Likewise, to perform a bootstrapped build (takes even longer):
```sh
-export ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--bootstrap --no-substitutes'
+export ADDITIONAL_GUIX_COMMON_FLAGS='--no-substitutes' ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--bootstrap'
```
### Using a version of Guix with `guix time-machine` capabilities
@@ -82,17 +84,6 @@ export PATH="${HOME}/.config/guix/current/bin${PATH:+:}$PATH"
## Usage
-### As a Development Environment
-
-For a Bitcoin Core depends development environment, simply invoke
-
-```sh
-guix environment --manifest=contrib/guix/manifest.scm
-```
-
-And you'll land back in your shell with all the build dependencies required for
-a `depends` build injected into your environment.
-
### As a Tool for Deterministic Builds
From the top of a clean Bitcoin Core repository:
@@ -113,10 +104,8 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum
* _**HOSTS**_
Override the space-separated list of platform triples for which to perform a
- bootstrappable build. _(defaults to "x86\_64-linux-gnu
- arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu")_
-
- > Windows and OS X platform triplet support are WIP.
+ bootstrappable build. _(defaults to "x86\_64-linux-gnu arm-linux-gnueabihf
+ aarch64-linux-gnu riscv64-linux-gnu x86_64-w64-mingw32")_
* _**SOURCES_PATH**_
@@ -147,13 +136,29 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum
string) is interpreted the same way as not setting `V` at all, and that `V=0`
has the same effect as `V=1`.
-* _**ADDITIONAL_GUIX_ENVIRONMENT_FLAGS**_
+* _**SUBSTITUTE_URLS**_
- Additional flags to be passed to `guix environment`. For a fully-bootstrapped
+ A whitespace-delimited list of URLs from which to download pre-built packages.
+ A URL is only used if its signing key is authorized (refer to the [substitute
+ servers section](#speeding-up-builds-with-substitute-servers) for more
+ details).
+
+* _**ADDITIONAL_GUIX_COMMON_FLAGS**_
+
+ Additional flags to be passed to all `guix` commands. For a fully-bootstrapped
build, set this to `--bootstrap --no-substitutes` (refer to the [security
model section](#choosing-your-security-model) for more details). Note that a
fully-bootstrapped build will take quite a long time on the first run.
+* _**ADDITIONAL_GUIX_TIMEMACHINE_FLAGS**_
+
+ Additional flags to be passed to `guix time-machine`.
+
+* _**ADDITIONAL_GUIX_ENVIRONMENT_FLAGS**_
+
+ Additional flags to be passed to the invocation of `guix environment` inside
+ `guix time-machine`.
+
## Tips and Tricks
### Speeding up builds with substitute servers
@@ -161,14 +166,15 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum
_This whole section is automatically done in the convenience
[Dockerfiles][fanquake/guix-docker]_
-For those who are used to life in the fast _(and trustful)_ lane, you can use
-[substitute servers][guix/substitutes] to enable binary downloads of packages.
+For those who are used to life in the fast _(and trustful)_ lane, you can
+specify [substitute servers][guix/substitutes] from which to download pre-built
+packages.
> For those who only want to use substitutes from the official Guix build farm
> and have authorized the build farm's signing key during Guix's installation,
> you don't need to do anything.
-#### Authorize the signing keys
+#### Step 1: Authorize the signing keys
For the official Guix build farm at https://ci.guix.gnu.org, run as root:
@@ -182,7 +188,7 @@ For dongcarl's substitute server at https://guix.carldong.io, run as root:
wget -qO- 'https://guix.carldong.io/signing-key.pub' | guix archive --authorize
```
-#### Use the substitute servers
+#### Step 2: Specify the substitute servers
The official Guix build farm at https://ci.guix.gnu.org is automatically used
unless the `--no-substitutes` flag is supplied.
@@ -196,7 +202,7 @@ To use dongcarl's substitute server for Bitcoin Core builds after having
[authorized his signing key](#authorize-the-signing-keys):
```
-export ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--substitute-urls="https://guix.carldong.io https://ci.guix.gnu.org"'
+export SUBSTITUTE_URLS='https://guix.carldong.io https://ci.guix.gnu.org'
```
## FAQ
@@ -212,9 +218,9 @@ As mentioned at the bottom of [this manual page][guix/bin-install]:
### When will Guix be packaged in debian?
-Vagrant Cascadian has been making good progress on this
-[here][debian/guix-package]. We have all the pieces needed to put up an APT
-repository and will likely put one up soon.
+Thanks to Vagrant Cascadian's diligent work, Guix is now [in debian
+experimental][debian/guix-experimental]! Hopefully it will make its way into a
+release soon.
[b17e]: http://bootstrappable.org/
[r12e/source-date-epoch]: https://reproducible-builds.org/docs/source-date-epoch/
@@ -226,5 +232,5 @@ repository and will likely put one up soon.
[guix/substitute-server-auth]: https://www.gnu.org/software/guix/manual/en/html_node/Substitute-Server-Authorization.html
[guix/time-machine]: https://guix.gnu.org/manual/en/html_node/Invoking-guix-time_002dmachine.html
-[debian/guix-package]: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=850644
+[debian/guix-experimental]: https://packages.debian.org/experimental/guix
[fanquake/guix-docker]: https://github.com/fanquake/core-review/tree/master/guix
diff --git a/contrib/guix/guix-build.sh b/contrib/guix/guix-build.sh
index 11d2c8b867..54cc5793f6 100755
--- a/contrib/guix/guix-build.sh
+++ b/contrib/guix/guix-build.sh
@@ -2,6 +2,116 @@
export LC_ALL=C
set -e -o pipefail
+###################
+## Sanity Checks ##
+###################
+
+################
+# Check 1: Make sure that we can invoke required tools
+################
+for cmd in git make guix cat mkdir; do
+ if ! command -v "$cmd" > /dev/null 2>&1; then
+ echo "ERR: This script requires that '$cmd' is installed and available in your \$PATH"
+ exit 1
+ fi
+done
+
+################
+# Check 2: Make sure GUIX_BUILD_OPTIONS is empty
+################
+#
+# GUIX_BUILD_OPTIONS is an environment variable recognized by guix commands that
+# can perform builds. This seems like what we want instead of
+# ADDITIONAL_GUIX_COMMON_FLAGS, but the value of GUIX_BUILD_OPTIONS is actually
+# _appended_ to normal command-line options. Meaning that they will take
+# precedence over the command-specific ADDITIONAL_GUIX_<CMD>_FLAGS.
+#
+# This seems like a poor user experience. Thus we check for GUIX_BUILD_OPTIONS's
+# existence here and direct users of this script to use our (more flexible)
+# custom environment variables.
+if [ -n "$GUIX_BUILD_OPTIONS" ]; then
+cat << EOF
+Error: Environment variable GUIX_BUILD_OPTIONS is not empty:
+ '$GUIX_BUILD_OPTIONS'
+
+Unfortunately this script is incompatible with GUIX_BUILD_OPTIONS, please unset
+GUIX_BUILD_OPTIONS and use ADDITIONAL_GUIX_COMMON_FLAGS to set build options
+across guix commands or ADDITIONAL_GUIX_<CMD>_FLAGS to set build options for a
+specific guix command.
+
+See contrib/guix/README.md for more details.
+EOF
+exit 1
+fi
+
+################
+# Check 3: Make sure that we're not in a dirty worktree
+################
+if ! git diff-index --quiet HEAD -- && [ -z "$FORCE_DIRTY_WORKTREE" ]; then
+cat << EOF
+ERR: The current git worktree is dirty, which may lead to broken builds.
+
+ Aborting...
+
+Hint: To make your git worktree clean, You may want to:
+ 1. Commit your changes,
+ 2. Stash your changes, or
+ 3. Set the 'FORCE_DIRTY_WORKTREE' environment variable if you insist on
+ using a dirty worktree
+EOF
+exit 1
+else
+ GIT_COMMIT=$(git rev-parse --short=12 HEAD)
+fi
+
+################
+# Check 4: Make sure that build directories do no exist
+################
+
+# Default to building for all supported HOSTs (overridable by environment)
+export HOSTS="${HOSTS:-x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu
+ x86_64-w64-mingw32}"
+
+DISTSRC_BASE="${DISTSRC_BASE:-${PWD}}"
+
+# Usage: distsrc_for_host HOST
+#
+# HOST: The current platform triple we're building for
+#
+distsrc_for_host() {
+ echo "${DISTSRC_BASE}/distsrc-${GIT_COMMIT}-${1}"
+}
+
+# Accumulate a list of build directories that already exist...
+hosts_distsrc_exists=""
+for host in $HOSTS; do
+ if [ -e "$(distsrc_for_host "$host")" ]; then
+ hosts_distsrc_exists+=" ${host}"
+ fi
+done
+
+if [ -n "$hosts_distsrc_exists" ]; then
+# ...so that we can print them out nicely in an error message
+cat << EOF
+ERR: Build directories for this commit already exist for the following platform
+ triples you're attempting to build, probably because of previous builds.
+ Please remove, or otherwise deal with them prior to starting another build.
+
+ Aborting...
+
+EOF
+for host in $hosts_distsrc_exists; do
+ echo " ${host} '$(distsrc_for_host "$host")'"
+done
+exit 1
+else
+ mkdir -p "$DISTSRC_BASE"
+fi
+
+#########
+# Setup #
+#########
+
# Determine the maximum number of jobs to run simultaneously (overridable by
# environment)
MAX_JOBS="${MAX_JOBS:-$(nproc)}"
@@ -16,11 +126,23 @@ SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git log --format=%at -1)}"
# Execute "$@" in a pinned, possibly older version of Guix, for reproducibility
# across time.
time-machine() {
+ # shellcheck disable=SC2086
guix time-machine --url=https://github.com/dongcarl/guix.git \
--commit=b066c25026f21fb57677aa34692a5034338e7ee3 \
+ --max-jobs="$MAX_JOBS" \
+ ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \
+ ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_TIMEMACHINE_FLAGS} \
-- "$@"
}
+# Make sure an output directory exists for our builds
+OUTDIR="${OUTDIR:-${PWD}/output}"
+[ -e "$OUTDIR" ] || mkdir -p "$OUTDIR"
+
+#########
+# Build #
+#########
+
# Function to be called when building for host ${1} and the user interrupts the
# build
int_trap() {
@@ -38,9 +160,9 @@ and untracked files and directories will be wiped, allowing you to start anew.
EOF
}
-# Deterministically build Bitcoin Core for HOSTs (overridable by environment)
+# Deterministically build Bitcoin Core
# shellcheck disable=SC2153
-for host in ${HOSTS=x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu x86_64-w64-mingw32}; do
+for host in $HOSTS; do
# Display proper warning when the user interrupts the build
trap 'int_trap ${host}' INT
@@ -50,6 +172,19 @@ for host in ${HOSTS=x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv
# for the particular $HOST we're building for
export HOST="$host"
+ # shellcheck disable=SC2030
+cat << EOF
+INFO: Building commit ${GIT_COMMIT:?not set} for platform triple ${HOST:?not set}:
+ ...using reference timestamp: ${SOURCE_DATE_EPOCH:?not set}
+ ...running at most ${MAX_JOBS:?not set} jobs
+ ...from worktree directory: '${PWD}'
+ ...bind-mounted in container to: '/bitcoin'
+ ...in build directory: '$(distsrc_for_host "$HOST")'
+ ...bind-mounted in container to: '$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")'
+ ...outputting in: '${OUTDIR:?not set}'
+ ...bind-mounted in container to: '/outdir'
+EOF
+
# Run the build script 'contrib/guix/libexec/build.sh' in the build
# container specified by 'contrib/guix/manifest.scm'.
#
@@ -99,20 +234,36 @@ for host in ${HOSTS=x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv
# make the downloaded depends sources available to it. The sources
# should have been downloaded prior to this invocation.
#
- # shellcheck disable=SC2086
+ # ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"}
+ #
+ # fetch substitute from SUBSTITUTE_URLS if they are
+ # authorized
+ #
+ # Depending on the user's security model, it may be desirable to use
+ # substitutes (pre-built packages) from servers that the user trusts.
+ # Please read the README.md in the same directory as this file for
+ # more information.
+ #
+ # shellcheck disable=SC2086,SC2031
time-machine environment --manifest="${PWD}/contrib/guix/manifest.scm" \
--container \
--pure \
--no-cwd \
--share="$PWD"=/bitcoin \
+ --share="$DISTSRC_BASE"=/distsrc-base \
+ --share="$OUTDIR"=/outdir \
--expose="$(git rev-parse --git-common-dir)" \
${SOURCES_PATH:+--share="$SOURCES_PATH"} \
- ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \
+ --max-jobs="$MAX_JOBS" \
+ ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \
+ ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \
-- env HOST="$host" \
MAX_JOBS="$MAX_JOBS" \
SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \
${V:+V=1} \
${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \
+ DISTSRC="$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")" \
+ OUTDIR=/outdir \
bash -c "cd /bitcoin && bash contrib/guix/libexec/build.sh"
)
diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh
index d658c4f6a6..b00c42ce01 100644
--- a/contrib/guix/libexec/build.sh
+++ b/contrib/guix/libexec/build.sh
@@ -11,9 +11,15 @@ if [ -n "$V" ]; then
export VERBOSE="$V"
fi
-# Check that environment variables assumed to be set by the environment are set
-echo "Building for platform triple ${HOST:?not set} with reference timestamp ${SOURCE_DATE_EPOCH:?not set}..."
-echo "At most ${MAX_JOBS:?not set} jobs will run at once..."
+# Check that required environment variables are set
+cat << EOF
+Required environment variables as seen inside the container:
+ HOST: ${HOST:?not set}
+ SOURCE_DATE_EPOCH: ${SOURCE_DATE_EPOCH:?not set}
+ MAX_JOBS: ${MAX_JOBS:?not set}
+ DISTSRC: ${DISTSRC:?not set}
+ OUTDIR: ${OUTDIR:?not set}
+EOF
#####################
# Environment Setup #
@@ -23,19 +29,6 @@ echo "At most ${MAX_JOBS:?not set} jobs will run at once..."
# $HOSTs after successfully building.
BASEPREFIX="${PWD}/depends"
-# Setup an output directory for our build
-OUTDIR="${OUTDIR:-${PWD}/output}"
-[ -e "$OUTDIR" ] || mkdir -p "$OUTDIR"
-
-# Setup the directory where our Bitcoin Core build for HOST will occur
-DISTSRC="${DISTSRC:-${PWD}/distsrc-${HOST}}"
-if [ -e "$DISTSRC" ]; then
- echo "DISTSRC directory '${DISTSRC}' exists, probably because of previous builds... Aborting..."
- exit 1
-else
- mkdir -p "$DISTSRC"
-fi
-
# Given a package name and an output name, return the path of that output in our
# current guix environment
store_path() {
@@ -189,6 +182,7 @@ esac
# Make $HOST-specific native binaries from depends available in $PATH
export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}"
+mkdir -p "$DISTSRC"
(
cd "$DISTSRC"
diff --git a/contrib/init/README.md b/contrib/init/README.md
index 306a37f75a..affc7c2e75 100644
--- a/contrib/init/README.md
+++ b/contrib/init/README.md
@@ -1,6 +1,6 @@
Sample configuration files for:
```
-SystemD: bitcoind.service
+systemd: bitcoind.service
Upstart: bitcoind.conf
OpenRC: bitcoind.openrc
bitcoind.openrcconf
@@ -9,4 +9,4 @@ macOS: org.bitcoin.bitcoind.plist
```
have been made available to assist packagers in creating node packages here.
-See doc/init.md for more information.
+See [doc/init.md](../../doc/init.md) for more information.
diff --git a/contrib/init/bitcoind.service b/contrib/init/bitcoind.service
index 8b308644b1..5999928aa4 100644
--- a/contrib/init/bitcoind.service
+++ b/contrib/init/bitcoind.service
@@ -11,7 +11,11 @@
[Unit]
Description=Bitcoin daemon
-After=network.target
+Documentation=https://github.com/bitcoin/bitcoin/blob/master/doc/init.md
+
+# https://www.freedesktop.org/wiki/Software/systemd/NetworkTarget/
+After=network-online.target
+Wants=network-online.target
[Service]
ExecStart=/usr/bin/bitcoind -daemon \
diff --git a/contrib/macdeploy/README.md b/contrib/macdeploy/README.md
index 6c3db2620b..2d9a4a2153 100644
--- a/contrib/macdeploy/README.md
+++ b/contrib/macdeploy/README.md
@@ -92,19 +92,15 @@ created using these tools. The build process has been designed to avoid includin
SDK's files in Gitian's outputs. All interim tarballs are fully deterministic and may be freely
redistributed.
-`genisoimage` is used to create the initial DMG. It is not deterministic as-is, so it has been
-patched. A system `genisoimage` will work fine, but it will not be deterministic because
-the file-order will change between invocations. The patch can be seen here: [cdrkit-deterministic.patch](https://github.com/bitcoin/bitcoin/blob/master/depends/patches/native_cdrkit/cdrkit-deterministic.patch).
-No effort was made to fix this cleanly, so it likely leaks memory badly, however it's only used for
-a single invocation, so that's no real concern.
+[`xorrisofs`](https://www.gnu.org/software/xorriso/) is used to create the DMG.
-`genisoimage` cannot compress DMGs, so afterwards, the DMG tool from the
-`libdmg-hfsplus` project is used to compress it. There are several bugs in this tool and its
-maintainer has seemingly abandoned the project.
+`xorrisofs` cannot compress DMGs, so afterwards, the DMG tool from the
+`libdmg-hfsplus` project is used to compress it. There are several bugs in this
+tool and its maintainer has seemingly abandoned the project.
The DMG tool has the ability to create DMGs from scratch as well, but this functionality is
broken. Only the compression feature is currently used. Ideally, the creation could be fixed
-and `genisoimage` would no longer be necessary.
+and `xorrisofs` would no longer be necessary.
Background images and other features can be added to DMG files by inserting a
`.DS_Store` during creation.
diff --git a/contrib/macdeploy/detached-sig-apply.sh b/contrib/macdeploy/detached-sig-apply.sh
index 5c5a85d3fe..d481413cc3 100755
--- a/contrib/macdeploy/detached-sig-apply.sh
+++ b/contrib/macdeploy/detached-sig-apply.sh
@@ -8,10 +8,9 @@ set -e
UNSIGNED="$1"
SIGNATURE="$2"
-ARCH=x86_64
ROOTDIR=dist
-TEMPDIR=signed.temp
OUTDIR=signed-app
+SIGNAPPLE=signapple
if [ -z "$UNSIGNED" ]; then
echo "usage: $0 <unsigned app> <signature>"
@@ -23,35 +22,6 @@ if [ -z "$SIGNATURE" ]; then
exit 1
fi
-rm -rf ${TEMPDIR} && mkdir -p ${TEMPDIR}
-tar -C ${TEMPDIR} -xf ${UNSIGNED}
-cp -rf "${SIGNATURE}"/* ${TEMPDIR}
-
-if [ -z "${PAGESTUFF}" ]; then
- PAGESTUFF=${TEMPDIR}/pagestuff
-fi
-
-if [ -z "${CODESIGN_ALLOCATE}" ]; then
- CODESIGN_ALLOCATE=${TEMPDIR}/codesign_allocate
-fi
-
-find ${TEMPDIR} -name "*.sign" | while read i; do
- SIZE=$(stat -c %s "${i}")
- TARGET_FILE="$(echo "${i}" | sed 's/\.sign$//')"
-
- echo "Allocating space for the signature of size ${SIZE} in ${TARGET_FILE}"
- ${CODESIGN_ALLOCATE} -i "${TARGET_FILE}" -a ${ARCH} ${SIZE} -o "${i}.tmp"
-
- OFFSET=$(${PAGESTUFF} "${i}.tmp" -p | tail -2 | grep offset | sed 's/[^0-9]*//g')
- if [ -z ${QUIET} ]; then
- echo "Attaching signature at offset ${OFFSET}"
- fi
-
- dd if="$i" of="${i}.tmp" bs=1 seek=${OFFSET} count=${SIZE} 2>/dev/null
- mv "${i}.tmp" "${TARGET_FILE}"
- rm "${i}"
- echo "Success."
-done
-mv ${TEMPDIR}/${ROOTDIR} ${OUTDIR}
-rm -rf ${TEMPDIR}
+${SIGNAPPLE} apply ${UNSIGNED} ${SIGNATURE}
+mv ${ROOTDIR} ${OUTDIR}
echo "Signed: ${OUTDIR}"
diff --git a/contrib/macdeploy/detached-sig-create.sh b/contrib/macdeploy/detached-sig-create.sh
index 31a97f0a24..4f246cbb3f 100755
--- a/contrib/macdeploy/detached-sig-create.sh
+++ b/contrib/macdeploy/detached-sig-create.sh
@@ -8,44 +8,21 @@ set -e
ROOTDIR=dist
BUNDLE="${ROOTDIR}/Bitcoin-Qt.app"
-CODESIGN=codesign
+SIGNAPPLE=signapple
TEMPDIR=sign.temp
-TEMPLIST=${TEMPDIR}/signatures.txt
OUT=signature-osx.tar.gz
-OUTROOT=osx
+OUTROOT=osx/dist
if [ -z "$1" ]; then
- echo "usage: $0 <codesign args>"
- echo "example: $0 -s MyIdentity"
+ echo "usage: $0 <signapple args>"
+ echo "example: $0 <path to key>"
exit 1
fi
-rm -rf ${TEMPDIR} ${TEMPLIST}
+rm -rf ${TEMPDIR}
mkdir -p ${TEMPDIR}
-${CODESIGN} -f --file-list ${TEMPLIST} "$@" "${BUNDLE}"
-
-grep -v CodeResources < "${TEMPLIST}" | while read i; do
- TARGETFILE="${BUNDLE}/$(echo "${i}" | sed "s|.*${BUNDLE}/||")"
- SIZE=$(pagestuff "$i" -p | tail -2 | grep size | sed 's/[^0-9]*//g')
- OFFSET=$(pagestuff "$i" -p | tail -2 | grep offset | sed 's/[^0-9]*//g')
- SIGNFILE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}.sign"
- DIRNAME="$(dirname "${SIGNFILE}")"
- mkdir -p "${DIRNAME}"
- echo "Adding detached signature for: ${TARGETFILE}. Size: ${SIZE}. Offset: ${OFFSET}"
- dd if="$i" of="${SIGNFILE}" bs=1 skip=${OFFSET} count=${SIZE} 2>/dev/null
-done
-
-grep CodeResources < "${TEMPLIST}" | while read i; do
- TARGETFILE="${BUNDLE}/$(echo "${i}" | sed "s|.*${BUNDLE}/||")"
- RESOURCE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}"
- DIRNAME="$(dirname "${RESOURCE}")"
- mkdir -p "${DIRNAME}"
- echo "Adding resource for: \"${TARGETFILE}\""
- cp "${i}" "${RESOURCE}"
-done
-
-rm ${TEMPLIST}
+${SIGNAPPLE} sign -f --detach "${TEMPDIR}/${OUTROOT}" "$@" "${BUNDLE}"
tar -C "${TEMPDIR}" -czf "${OUT}" .
rm -rf "${TEMPDIR}"
diff --git a/contrib/macdeploy/extract-osx-sdk.sh b/contrib/macdeploy/extract-osx-sdk.sh
deleted file mode 100755
index 3c7bdf4217..0000000000
--- a/contrib/macdeploy/extract-osx-sdk.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-# Copyright (c) 2016-2020 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-export LC_ALL=C
-set -e
-
-INPUTFILE="Xcode_7.3.1.dmg"
-HFSFILENAME="5.hfs"
-SDKDIR="Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk"
-
-7z x "${INPUTFILE}" "${HFSFILENAME}"
-SDKNAME="$(basename "${SDKDIR}")"
-SDKDIRINODE=$(ifind -n "${SDKDIR}" "${HFSFILENAME}")
-fls "${HFSFILENAME}" -rpF ${SDKDIRINODE} |
- while read type inode filename; do
- inode="${inode::-1}"
- if [ "${filename:0:14}" = "usr/share/man/" ]; then
- continue
- fi
- filename="${SDKNAME}/$filename"
- echo "Extracting $filename ..."
- mkdir -p "$(dirname "$filename")"
- if [ "$type" = "l/l" ]; then
- ln -s "$(icat "${HFSFILENAME}" $inode)" "$filename"
- else
- icat "${HFSFILENAME}" $inode >"$filename"
- fi
-done
-echo "Building ${SDKNAME}.tar.gz ..."
-MTIME="$(istat "${HFSFILENAME}" "${SDKDIRINODE}" | perl -nle 'm/Content Modified:\s+(.*?)\s\(/ && print $1')"
-find "${SDKNAME}" | sort | tar --no-recursion --mtime="${MTIME}" --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > "${SDKNAME}.tar.gz"
-echo 'All done!'
diff --git a/contrib/signet/README.md b/contrib/signet/README.md
index c4aa5ae2f7..71dc2f9638 100644
--- a/contrib/signet/README.md
+++ b/contrib/signet/README.md
@@ -17,3 +17,64 @@ Syntax: `getcoins.py [-h|--help] [-c|--cmd=<bitcoin-cli path>] [-f|--faucet=<fau
If using the default network, invoking the script with no arguments should be sufficient under normal
circumstances, but if multiple people are behind the same IP address, the faucet will by default only
accept one claim per day. See `--password` above.
+
+miner
+=====
+
+To mine the first block in your custom chain, you can run:
+
+ cd src/
+ CLI="./bitcoin-cli -conf=mysignet.conf"
+ MINER="..contrib/signet/miner"
+ GRIND="./bitcoin-util grind"
+ ADDR=$($CLI -signet getnewaddress)
+ $MINER --cli="$CLI" generate --grind-cmd="$GRIND" --address="$ADDR" --set-block-time=-1
+
+This will mine a block with the current timestamp. If you want to backdate the chain, you can give a different timestamp to --set-block-time.
+
+You will then need to pick a difficulty target. Since signet chains are primarily protected by a signature rather than proof of work, there is no need to spend as much energy as possible mining, however you may wish to choose to spend more time than the absolute minimum. The calibrate subcommand can be used to pick a target, eg:
+
+ $MINER calibrate --grind-cmd="$GRIND"
+ nbits=1e00f403 for 25s average mining time
+
+It defaults to estimating an nbits value resulting in 25s average time to find a block, but the --seconds parameter can be used to pick a different target, or the --nbits parameter can be used to estimate how long it will take for a given difficulty.
+
+Using the --ongoing parameter will then cause the signet miner to create blocks indefinitely. It will pick the time between blocks so that difficulty is adjusted to match the provided --nbits value.
+
+ $MINER --cli="$CLI" generate --grind-cmd="$GRIND" --address="$ADDR" --nbits=1e00f403 --ongoing
+
+Other options
+-------------
+
+The --debug and --quiet options are available to control how noisy the signet miner's output is. Note that the --debug, --quiet and --cli parameters must all appear before the subcommand (generate, calibrate, etc) if used.
+
+Instead of specifying --ongoing, you can specify --max-blocks=N to mine N blocks and stop.
+
+Instead of using a single address, a ranged descriptor may be provided instead (via the --descriptor parameter), with the reward for the block at height H being sent to the H'th address generated from the descriptor.
+
+Instead of calculating a specific nbits value, --min-nbits can be specified instead, in which case the mininmum signet difficulty will be targeted.
+
+By default, the signet miner mines blocks at fixed intervals with minimal variation. If you want blocks to appear more randomly, as they do in mainnet, specify the --poisson option.
+
+Using the --multiminer parameter allows mining to be distributed amongst multiple miners. For example, if you have 3 miners and want to share blocks between them, specify --multiminer=1/3 on one, --multiminer=2/3 on another, and --multiminer=3/3 on the last one. If you want one to do 10% of blocks and two others to do 45% each, --multiminer=1-10/100 on the first, and --multiminer=11-55 and --multiminer=56-100 on the others. Note that which miner mines which block is determined by the previous block hash, so occasional runs of one miner doing many blocks in a row is to be expected.
+
+When --multiminer is used, if a miner is down and does not mine a block within five minutes of when it is due, the other miners will automatically act as redundant backups ensuring the chain does not halt. The --backup-delay parameter can be used to change how long a given miner waits, allowing one to be the primary backup (after five minutes) and another to be the secondary backup (after six minutes, eg).
+
+The --standby-delay parameter can be used to make a backup miner that only mines if a block doesn't arrive on time. This can be combined with --multiminer if desired. Setting --standby-delay also prevents the first block from being mined immediately.
+
+Advanced usage
+--------------
+
+The process generate follows internally is to get a block template, convert that into a PSBT, sign the PSBT, move the signature from the signed PSBT into the block template's coinbase, grind proof of work for the block, and then submit the block to the network.
+
+These steps can instead be done explicitly:
+
+ $CLI -signet getblocktemplate '{"rules": ["signet","segwit"]}' |
+ $MINER --cli="$CLI" genpsbt --address="$ADDR" |
+ $CLI -signet -stdin walletprocesspsbt |
+ jq -r .psbt |
+ $MINER --cli="$CLI" solvepsbt --grind-cmd="$GRIND" |
+ $CLI -signet -stdin submitblock
+
+This is intended to allow you to replace part of the pipeline for further experimentation, if desired.
+
diff --git a/contrib/signet/miner b/contrib/signet/miner
new file mode 100755
index 0000000000..a3fba49d0e
--- /dev/null
+++ b/contrib/signet/miner
@@ -0,0 +1,639 @@
+#!/usr/bin/env python3
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+import argparse
+import base64
+import json
+import logging
+import math
+import os.path
+import re
+import struct
+import sys
+import time
+import subprocess
+
+from binascii import unhexlify
+from io import BytesIO
+
+PATH_BASE_CONTRIB_SIGNET = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+PATH_BASE_TEST_FUNCTIONAL = os.path.abspath(os.path.join(PATH_BASE_CONTRIB_SIGNET, "..", "..", "test", "functional"))
+sys.path.insert(0, PATH_BASE_TEST_FUNCTIONAL)
+
+from test_framework.blocktools import WITNESS_COMMITMENT_HEADER, script_BIP34_coinbase_height # noqa: E402
+from test_framework.messages import CBlock, CBlockHeader, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, FromHex, ToHex, deser_string, hash256, ser_compact_size, ser_string, ser_uint256, uint256_from_str # noqa: E402
+from test_framework.script import CScriptOp # noqa: E402
+
+logging.basicConfig(
+ format='%(asctime)s %(levelname)s %(message)s',
+ level=logging.INFO,
+ datefmt='%Y-%m-%d %H:%M:%S')
+
+SIGNET_HEADER = b"\xec\xc7\xda\xa2"
+PSBT_SIGNET_BLOCK = b"\xfc\x06signetb" # proprietary PSBT global field holding the block being signed
+RE_MULTIMINER = re.compile("^(\d+)(-(\d+))?/(\d+)$")
+
+# #### some helpers that could go into test_framework
+
+# like FromHex, but without the hex part
+def FromBinary(cls, stream):
+ """deserialize a binary stream (or bytes object) into an object"""
+ # handle bytes object by turning it into a stream
+ was_bytes = isinstance(stream, bytes)
+ if was_bytes:
+ stream = BytesIO(stream)
+ obj = cls()
+ obj.deserialize(stream)
+ if was_bytes:
+ assert len(stream.read()) == 0
+ return obj
+
+class PSBTMap:
+ """Class for serializing and deserializing PSBT maps"""
+
+ def __init__(self, map=None):
+ self.map = map if map is not None else {}
+
+ def deserialize(self, f):
+ m = {}
+ while True:
+ k = deser_string(f)
+ if len(k) == 0:
+ break
+ v = deser_string(f)
+ if len(k) == 1:
+ k = k[0]
+ assert k not in m
+ m[k] = v
+ self.map = m
+
+ def serialize(self):
+ m = b""
+ for k,v in self.map.items():
+ if isinstance(k, int) and 0 <= k and k <= 255:
+ k = bytes([k])
+ m += ser_compact_size(len(k)) + k
+ m += ser_compact_size(len(v)) + v
+ m += b"\x00"
+ return m
+
+class PSBT:
+ """Class for serializing and deserializing PSBTs"""
+
+ def __init__(self):
+ self.g = PSBTMap()
+ self.i = []
+ self.o = []
+ self.tx = None
+
+ def deserialize(self, f):
+ assert f.read(5) == b"psbt\xff"
+ self.g = FromBinary(PSBTMap, f)
+ assert 0 in self.g.map
+ self.tx = FromBinary(CTransaction, self.g.map[0])
+ self.i = [FromBinary(PSBTMap, f) for _ in self.tx.vin]
+ self.o = [FromBinary(PSBTMap, f) for _ in self.tx.vout]
+ return self
+
+ def serialize(self):
+ assert isinstance(self.g, PSBTMap)
+ assert isinstance(self.i, list) and all(isinstance(x, PSBTMap) for x in self.i)
+ assert isinstance(self.o, list) and all(isinstance(x, PSBTMap) for x in self.o)
+ assert 0 in self.g.map
+ tx = FromBinary(CTransaction, self.g.map[0])
+ assert len(tx.vin) == len(self.i)
+ assert len(tx.vout) == len(self.o)
+
+ psbt = [x.serialize() for x in [self.g] + self.i + self.o]
+ return b"psbt\xff" + b"".join(psbt)
+
+ def to_base64(self):
+ return base64.b64encode(self.serialize()).decode("utf8")
+
+ @classmethod
+ def from_base64(cls, b64psbt):
+ return FromBinary(cls, base64.b64decode(b64psbt))
+
+# #####
+
+def create_coinbase(height, value, spk):
+ cb = CTransaction()
+ cb.vin = [CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), 0xffffffff)]
+ cb.vout = [CTxOut(value, spk)]
+ return cb
+
+def get_witness_script(witness_root, witness_nonce):
+ commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)))
+ return b"\x6a" + CScriptOp.encode_op_pushdata(WITNESS_COMMITMENT_HEADER + ser_uint256(commitment))
+
+def signet_txs(block, challenge):
+ # assumes signet solution has not been added yet so does not need
+ # to be removed
+
+ txs = block.vtx[:]
+ txs[0] = CTransaction(txs[0])
+ txs[0].vout[-1].scriptPubKey += CScriptOp.encode_op_pushdata(SIGNET_HEADER)
+ hashes = []
+ for tx in txs:
+ tx.rehash()
+ hashes.append(ser_uint256(tx.sha256))
+ mroot = block.get_merkle_root(hashes)
+
+ sd = b""
+ sd += struct.pack("<i", block.nVersion)
+ sd += ser_uint256(block.hashPrevBlock)
+ sd += ser_uint256(mroot)
+ sd += struct.pack("<I", block.nTime)
+
+ to_spend = CTransaction()
+ to_spend.nVersion = 0
+ to_spend.nLockTime = 0
+ to_spend.vin = [CTxIn(COutPoint(0, 0xFFFFFFFF), b"\x00" + CScriptOp.encode_op_pushdata(sd), 0)]
+ to_spend.vout = [CTxOut(0, challenge)]
+ to_spend.rehash()
+
+ spend = CTransaction()
+ spend.nVersion = 0
+ spend.nLockTime = 0
+ spend.vin = [CTxIn(COutPoint(to_spend.sha256, 0), b"", 0)]
+ spend.vout = [CTxOut(0, b"\x6a")]
+
+ return spend, to_spend
+
+def do_createpsbt(block, signme, spendme):
+ psbt = PSBT()
+ psbt.g = PSBTMap( {0: signme.serialize(),
+ PSBT_SIGNET_BLOCK: block.serialize()
+ } )
+ psbt.i = [ PSBTMap( {0: spendme.serialize(),
+ 3: bytes([1,0,0,0])})
+ ]
+ psbt.o = [ PSBTMap() ]
+ return psbt.to_base64()
+
+def do_decode_psbt(b64psbt):
+ psbt = PSBT.from_base64(b64psbt)
+
+ assert len(psbt.tx.vin) == 1
+ assert len(psbt.tx.vout) == 1
+ assert PSBT_SIGNET_BLOCK in psbt.g.map
+
+ scriptSig = psbt.i[0].map.get(7, b"")
+ scriptWitness = psbt.i[0].map.get(8, b"\x00")
+
+ return FromBinary(CBlock, psbt.g.map[PSBT_SIGNET_BLOCK]), ser_string(scriptSig) + scriptWitness
+
+def finish_block(block, signet_solution, grind_cmd):
+ block.vtx[0].vout[-1].scriptPubKey += CScriptOp.encode_op_pushdata(SIGNET_HEADER + signet_solution)
+ block.vtx[0].rehash()
+ block.hashMerkleRoot = block.calc_merkle_root()
+ if grind_cmd is None:
+ block.solve()
+ else:
+ headhex = CBlockHeader.serialize(block).hex()
+ cmd = grind_cmd.split(" ") + [headhex]
+ newheadhex = subprocess.run(cmd, stdout=subprocess.PIPE, input=b"", check=True).stdout.strip()
+ newhead = FromHex(CBlockHeader(), newheadhex.decode('utf8'))
+ block.nNonce = newhead.nNonce
+ block.rehash()
+ return block
+
+def generate_psbt(tmpl, reward_spk, *, blocktime=None):
+ signet_spk = tmpl["signet_challenge"]
+ signet_spk_bin = unhexlify(signet_spk)
+
+ cbtx = create_coinbase(height=tmpl["height"], value=tmpl["coinbasevalue"], spk=reward_spk)
+ cbtx.vin[0].nSequence = 2**32-2
+ cbtx.rehash()
+
+ block = CBlock()
+ block.nVersion = tmpl["version"]
+ block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
+ block.nTime = tmpl["curtime"] if blocktime is None else blocktime
+ if block.nTime < tmpl["mintime"]:
+ block.nTime = tmpl["mintime"]
+ block.nBits = int(tmpl["bits"], 16)
+ block.nNonce = 0
+ block.vtx = [cbtx] + [FromHex(CTransaction(), t["data"]) for t in tmpl["transactions"]]
+
+ witnonce = 0
+ witroot = block.calc_witness_merkle_root()
+ cbwit = CTxInWitness()
+ cbwit.scriptWitness.stack = [ser_uint256(witnonce)]
+ block.vtx[0].wit.vtxinwit = [cbwit]
+ block.vtx[0].vout.append(CTxOut(0, get_witness_script(witroot, witnonce)))
+
+ signme, spendme = signet_txs(block, signet_spk_bin)
+
+ return do_createpsbt(block, signme, spendme)
+
+def get_reward_address(args, height):
+ if args.address is not None:
+ return args.address
+
+ if '*' not in args.descriptor:
+ addr = json.loads(args.bcli("deriveaddresses", args.descriptor))[0]
+ args.address = addr
+ return addr
+
+ remove = [k for k in args.derived_addresses.keys() if k+20 <= height]
+ for k in remove:
+ del args.derived_addresses[k]
+
+ addr = args.derived_addresses.get(height, None)
+ if addr is None:
+ addrs = json.loads(args.bcli("deriveaddresses", args.descriptor, "[%d,%d]" % (height, height+20)))
+ addr = addrs[0]
+ for k, a in enumerate(addrs):
+ args.derived_addresses[height+k] = a
+
+ return addr
+
+def get_reward_addr_spk(args, height):
+ assert args.address is not None or args.descriptor is not None
+
+ if hasattr(args, "reward_spk"):
+ return args.address, args.reward_spk
+
+ reward_addr = get_reward_address(args, height)
+ reward_spk = unhexlify(json.loads(args.bcli("getaddressinfo", reward_addr))["scriptPubKey"])
+ if args.address is not None:
+ # will always be the same, so cache
+ args.reward_spk = reward_spk
+
+ return reward_addr, reward_spk
+
+def do_genpsbt(args):
+ tmpl = json.load(sys.stdin)
+ _, reward_spk = get_reward_addr_spk(args, tmpl["height"])
+ psbt = generate_psbt(tmpl, reward_spk)
+ print(psbt)
+
+def do_solvepsbt(args):
+ block, signet_solution = do_decode_psbt(sys.stdin.read())
+ block = finish_block(block, signet_solution, args.grind_cmd)
+ print(ToHex(block))
+
+def nbits_to_target(nbits):
+ shift = (nbits >> 24) & 0xff
+ return (nbits & 0x00ffffff) * 2**(8*(shift - 3))
+
+def target_to_nbits(target):
+ tstr = "{0:x}".format(target)
+ if len(tstr) < 6:
+ tstr = ("000000"+tstr)[-6:]
+ if len(tstr) % 2 != 0:
+ tstr = "0" + tstr
+ if int(tstr[0],16) >= 0x8:
+ # avoid "negative"
+ tstr = "00" + tstr
+ fix = int(tstr[:6], 16)
+ sz = len(tstr)//2
+ if tstr[6:] != "0"*(sz*2-6):
+ fix += 1
+
+ return int("%02x%06x" % (sz,fix), 16)
+
+def seconds_to_hms(s):
+ if s == 0:
+ return "0s"
+ neg = (s < 0)
+ if neg:
+ s = -s
+ out = ""
+ if s % 60 > 0:
+ out = "%ds" % (s % 60)
+ s //= 60
+ if s % 60 > 0:
+ out = "%dm%s" % (s % 60, out)
+ s //= 60
+ if s > 0:
+ out = "%dh%s" % (s, out)
+ if neg:
+ out = "-" + out
+ return out
+
+def next_block_delta(last_nbits, last_hash, ultimate_target, do_poisson):
+ # strategy:
+ # 1) work out how far off our desired target we are
+ # 2) cap it to a factor of 4 since that's the best we can do in a single retarget period
+ # 3) use that to work out the desired average interval in this retarget period
+ # 4) if doing poisson, use the last hash to pick a uniformly random number in [0,1), and work out a random multiplier to vary the average by
+ # 5) cap the resulting interval between 1 second and 1 hour to avoid extremes
+
+ INTERVAL = 600.0*2016/2015 # 10 minutes, adjusted for the off-by-one bug
+
+ current_target = nbits_to_target(last_nbits)
+ retarget_factor = ultimate_target / current_target
+ retarget_factor = max(0.25, min(retarget_factor, 4.0))
+
+ avg_interval = INTERVAL * retarget_factor
+
+ if do_poisson:
+ det_rand = int(last_hash[-8:], 16) * 2**-32
+ this_interval_variance = -math.log1p(-det_rand)
+ else:
+ this_interval_variance = 1
+
+ this_interval = avg_interval * this_interval_variance
+ this_interval = max(1, min(this_interval, 3600))
+
+ return this_interval
+
+def next_block_is_mine(last_hash, my_blocks):
+ det_rand = int(last_hash[-16:-8], 16)
+ return my_blocks[0] <= (det_rand % my_blocks[2]) < my_blocks[1]
+
+def do_generate(args):
+ if args.max_blocks is not None:
+ if args.ongoing:
+ logging.error("Cannot specify both --ongoing and --max-blocks")
+ return 1
+ if args.max_blocks < 1:
+ logging.error("N must be a positive integer")
+ return 1
+ max_blocks = args.max_blocks
+ elif args.ongoing:
+ max_blocks = None
+ else:
+ max_blocks = 1
+
+ if args.set_block_time is not None and max_blocks != 1:
+ logging.error("Cannot specify --ongoing or --max-blocks > 1 when using --set-block-time")
+ return 1
+ if args.set_block_time is not None and args.set_block_time < 0:
+ args.set_block_time = time.time()
+ logging.info("Treating negative block time as current time (%d)" % (args.set_block_time))
+
+ if args.min_nbits:
+ if args.nbits is not None:
+ logging.error("Cannot specify --nbits and --min-nbits")
+ return 1
+ args.nbits = "1e0377ae"
+ logging.info("Using nbits=%s" % (args.nbits))
+
+ if args.set_block_time is None:
+ if args.nbits is None or len(args.nbits) != 8:
+ logging.error("Must specify --nbits (use calibrate command to determine value)")
+ return 1
+
+ if args.multiminer is None:
+ my_blocks = (0,1,1)
+ else:
+ if not args.ongoing:
+ logging.error("Cannot specify --multiminer without --ongoing")
+ return 1
+ m = RE_MULTIMINER.match(args.multiminer)
+ if m is None:
+ logging.error("--multiminer argument must be k/m or j-k/m")
+ return 1
+ start,_,stop,total = m.groups()
+ if stop is None:
+ stop = start
+ start, stop, total = map(int, (start, stop, total))
+ if stop < start or start <= 0 or total < stop or total == 0:
+ logging.error("Inconsistent values for --multiminer")
+ return 1
+ my_blocks = (start-1, stop, total)
+
+ ultimate_target = nbits_to_target(int(args.nbits,16))
+
+ mined_blocks = 0
+ bestheader = {"hash": None}
+ lastheader = None
+ while max_blocks is None or mined_blocks < max_blocks:
+
+ # current status?
+ bci = json.loads(args.bcli("getblockchaininfo"))
+
+ if bestheader["hash"] != bci["bestblockhash"]:
+ bestheader = json.loads(args.bcli("getblockheader", bci["bestblockhash"]))
+
+ if lastheader is None:
+ lastheader = bestheader["hash"]
+ elif bestheader["hash"] != lastheader:
+ next_delta = next_block_delta(int(bestheader["bits"], 16), bestheader["hash"], ultimate_target, args.poisson)
+ next_delta += bestheader["time"] - time.time()
+ next_is_mine = next_block_is_mine(bestheader["hash"], my_blocks)
+ logging.info("Received new block at height %d; next in %s (%s)", bestheader["height"], seconds_to_hms(next_delta), ("mine" if next_is_mine else "backup"))
+ lastheader = bestheader["hash"]
+
+ # when is the next block due to be mined?
+ now = time.time()
+ if args.set_block_time is not None:
+ logging.debug("Setting start time to %d", args.set_block_time)
+ mine_time = args.set_block_time
+ action_time = now
+ is_mine = True
+ elif bestheader["height"] == 0:
+ logging.error("When mining first block in a new signet, must specify --set-block-time")
+ return 1
+ else:
+
+ time_delta = next_block_delta(int(bestheader["bits"], 16), bci["bestblockhash"], ultimate_target, args.poisson)
+ mine_time = bestheader["time"] + time_delta
+
+ is_mine = next_block_is_mine(bci["bestblockhash"], my_blocks)
+
+ action_time = mine_time
+ if not is_mine:
+ action_time += args.backup_delay
+
+ if args.standby_delay > 0:
+ action_time += args.standby_delay
+ elif mined_blocks == 0:
+ # for non-standby, always mine immediately on startup,
+ # even if the next block shouldn't be ours
+ action_time = now
+
+ # don't want fractional times so round down
+ mine_time = int(mine_time)
+ action_time = int(action_time)
+
+ # can't mine a block 2h in the future; 1h55m for some safety
+ action_time = max(action_time, mine_time - 6900)
+
+ # ready to go? otherwise sleep and check for new block
+ if now < action_time:
+ sleep_for = min(action_time - now, 60)
+ if mine_time < now:
+ # someone else might have mined the block,
+ # so check frequently, so we don't end up late
+ # mining the next block if it's ours
+ sleep_for = min(20, sleep_for)
+ minestr = "mine" if is_mine else "backup"
+ logging.debug("Sleeping for %s, next block due in %s (%s)" % (seconds_to_hms(sleep_for), seconds_to_hms(mine_time - now), minestr))
+ time.sleep(sleep_for)
+ continue
+
+ # gbt
+ tmpl = json.loads(args.bcli("getblocktemplate", '{"rules":["signet","segwit"]}'))
+ if tmpl["previousblockhash"] != bci["bestblockhash"]:
+ logging.warning("GBT based off unexpected block (%s not %s), retrying", tmpl["previousblockhash"], bci["bestblockhash"])
+ time.sleep(1)
+ continue
+
+ logging.debug("GBT template: %s", tmpl)
+
+ if tmpl["mintime"] > mine_time:
+ logging.info("Updating block time from %d to %d", mine_time, tmpl["mintime"])
+ mine_time = tmpl["mintime"]
+ if mine_time > now:
+ logging.error("GBT mintime is in the future: %d is %d seconds later than %d", mine_time, (mine_time-now), now)
+ return 1
+
+ # address for reward
+ reward_addr, reward_spk = get_reward_addr_spk(args, tmpl["height"])
+
+ # mine block
+ logging.debug("Mining block delta=%s start=%s mine=%s", seconds_to_hms(mine_time-bestheader["time"]), mine_time, is_mine)
+ mined_blocks += 1
+ psbt = generate_psbt(tmpl, reward_spk, blocktime=mine_time)
+ psbt_signed = json.loads(args.bcli("-stdin", "walletprocesspsbt", input=psbt.encode('utf8')))
+ if not psbt_signed.get("complete",False):
+ logging.debug("Generated PSBT: %s" % (psbt,))
+ sys.stderr.write("PSBT signing failed")
+ return 1
+ block, signet_solution = do_decode_psbt(psbt_signed["psbt"])
+ block = finish_block(block, signet_solution, args.grind_cmd)
+
+ # submit block
+ r = args.bcli("-stdin", "submitblock", input=ToHex(block).encode('utf8'))
+
+ # report
+ bstr = "block" if is_mine else "backup block"
+
+ next_delta = next_block_delta(block.nBits, block.hash, ultimate_target, args.poisson)
+ next_delta += block.nTime - time.time()
+ next_is_mine = next_block_is_mine(block.hash, my_blocks)
+
+ logging.debug("Block hash %s payout to %s", block.hash, reward_addr)
+ logging.info("Mined %s at height %d; next in %s (%s)", bstr, tmpl["height"], seconds_to_hms(next_delta), ("mine" if next_is_mine else "backup"))
+ if r != "":
+ logging.warning("submitblock returned %s for height %d hash %s", r, tmpl["height"], block.hash)
+ lastheader = block.hash
+
+def do_calibrate(args):
+ if args.nbits is not None and args.seconds is not None:
+ sys.stderr.write("Can only specify one of --nbits or --seconds\n")
+ return 1
+ if args.nbits is not None and len(args.nbits) != 8:
+ sys.stderr.write("Must specify 8 hex digits for --nbits")
+ return 1
+
+ TRIALS = 600 # gets variance down pretty low
+ TRIAL_BITS = 0x1e3ea75f # takes about 5m to do 600 trials
+ #TRIAL_BITS = 0x1e7ea75f # XXX
+
+ header = CBlockHeader()
+ header.nBits = TRIAL_BITS
+ targ = nbits_to_target(header.nBits)
+
+ start = time.time()
+ count = 0
+ #CHECKS=[]
+ for i in range(TRIALS):
+ header.nTime = i
+ header.nNonce = 0
+ headhex = header.serialize().hex()
+ cmd = args.grind_cmd.split(" ") + [headhex]
+ newheadhex = subprocess.run(cmd, stdout=subprocess.PIPE, input=b"", check=True).stdout.strip()
+ #newhead = FromHex(CBlockHeader(), newheadhex.decode('utf8'))
+ #count += newhead.nNonce
+ #if (i+1) % 100 == 0:
+ # CHECKS.append((i+1, count, time.time()-start))
+
+ #print("checks =", [c*1.0 / (b*targ*2**-256) for _,b,c in CHECKS])
+
+ avg = (time.time() - start) * 1.0 / TRIALS
+ #exp_count = 2**256 / targ * TRIALS
+ #print("avg =", avg, "count =", count, "exp_count =", exp_count)
+
+ if args.nbits is not None:
+ want_targ = nbits_to_target(int(args.nbits,16))
+ want_time = avg*targ/want_targ
+ else:
+ want_time = args.seconds if args.seconds is not None else 25
+ want_targ = int(targ*(avg/want_time))
+
+ print("nbits=%08x for %ds average mining time" % (target_to_nbits(want_targ), want_time))
+ return 0
+
+def bitcoin_cli(basecmd, args, **kwargs):
+ cmd = basecmd + ["-signet"] + args
+ logging.debug("Calling bitcoin-cli: %r", cmd)
+ out = subprocess.run(cmd, stdout=subprocess.PIPE, **kwargs, check=True).stdout
+ if isinstance(out, bytes):
+ out = out.decode('utf8')
+ return out.strip()
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--cli", default="bitcoin-cli", type=str, help="bitcoin-cli command")
+ parser.add_argument("--debug", action="store_true", help="Print debugging info")
+ parser.add_argument("--quiet", action="store_true", help="Only print warnings/errors")
+
+ cmds = parser.add_subparsers(help="sub-commands")
+ genpsbt = cmds.add_parser("genpsbt", help="Generate a block PSBT for signing")
+ genpsbt.set_defaults(fn=do_genpsbt)
+
+ solvepsbt = cmds.add_parser("solvepsbt", help="Solve a signed block PSBT")
+ solvepsbt.set_defaults(fn=do_solvepsbt)
+
+ generate = cmds.add_parser("generate", help="Mine blocks")
+ generate.set_defaults(fn=do_generate)
+ generate.add_argument("--ongoing", action="store_true", help="Keep mining blocks")
+ generate.add_argument("--max-blocks", default=None, type=int, help="Max blocks to mine (default=1)")
+ generate.add_argument("--set-block-time", default=None, type=int, help="Set block time (unix timestamp)")
+ generate.add_argument("--nbits", default=None, type=str, help="Target nBits (specify difficulty)")
+ generate.add_argument("--min-nbits", action="store_true", help="Target minimum nBits (use min difficulty)")
+ generate.add_argument("--poisson", action="store_true", help="Simulate randomised block times")
+ #generate.add_argument("--signcmd", default=None, type=str, help="Alternative signing command")
+ generate.add_argument("--multiminer", default=None, type=str, help="Specify which set of blocks to mine (eg: 1-40/100 for the first 40%%, 2/3 for the second 3rd)")
+ generate.add_argument("--backup-delay", default=300, type=int, help="Seconds to delay before mining blocks reserved for other miners (default=300)")
+ generate.add_argument("--standby-delay", default=0, type=int, help="Seconds to delay before mining blocks (default=0)")
+
+ calibrate = cmds.add_parser("calibrate", help="Calibrate difficulty")
+ calibrate.set_defaults(fn=do_calibrate)
+ calibrate.add_argument("--nbits", type=str, default=None)
+ calibrate.add_argument("--seconds", type=int, default=None)
+
+ for sp in [genpsbt, generate]:
+ sp.add_argument("--address", default=None, type=str, help="Address for block reward payment")
+ sp.add_argument("--descriptor", default=None, type=str, help="Descriptor for block reward payment")
+
+ for sp in [solvepsbt, generate, calibrate]:
+ sp.add_argument("--grind-cmd", default=None, type=str, help="Command to grind a block header for proof-of-work")
+
+ args = parser.parse_args(sys.argv[1:])
+
+ args.bcli = lambda *a, input=b"", **kwargs: bitcoin_cli(args.cli.split(" "), list(a), input=input, **kwargs)
+
+ if hasattr(args, "address") and hasattr(args, "descriptor"):
+ if args.address is None and args.descriptor is None:
+ sys.stderr.write("Must specify --address or --descriptor\n")
+ return 1
+ elif args.address is not None and args.descriptor is not None:
+ sys.stderr.write("Only specify one of --address or --descriptor\n")
+ return 1
+ args.derived_addresses = {}
+
+ if args.debug:
+ logging.getLogger().setLevel(logging.DEBUG)
+ elif args.quiet:
+ logging.getLogger().setLevel(logging.WARNING)
+ else:
+ logging.getLogger().setLevel(logging.INFO)
+
+ if hasattr(args, "fn"):
+ return args.fn(args)
+ else:
+ logging.error("Must specify command")
+ return 1
+
+if __name__ == "__main__":
+ main()
+
+
diff --git a/contrib/testgen/base58.py b/contrib/testgen/base58.py
index c7ebac50d4..87341ccf96 100644
--- a/contrib/testgen/base58.py
+++ b/contrib/testgen/base58.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2012-2018 The Bitcoin Core developers
+# Copyright (c) 2012-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
diff --git a/contrib/testgen/gen_key_io_test_vectors.py b/contrib/testgen/gen_key_io_test_vectors.py
index 49320d92e6..8a3918da6b 100755
--- a/contrib/testgen/gen_key_io_test_vectors.py
+++ b/contrib/testgen/gen_key_io_test_vectors.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2012-2018 The Bitcoin Core developers
+# Copyright (c) 2012-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
diff --git a/contrib/zmq/zmq_sub.py b/contrib/zmq/zmq_sub.py
index 8b8503331d..9cb887e2dc 100755
--- a/contrib/zmq/zmq_sub.py
+++ b/contrib/zmq/zmq_sub.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2014-2018 The Bitcoin Core developers
+# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.