diff options
211 files changed, 2525 insertions, 2528 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index c4aae3a50a..ccf7077546 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -324,7 +324,7 @@ task: << : *BASE_TEMPLATE android_sdk_cache: folder: "depends/SDKs/android" - fingerprint_key: "ANDROID_API_LEVEL=28 ANDROID_BUILD_TOOLS_VERSION=28.0.3 ANDROID_NDK_VERSION=23.1.7779620" + fingerprint_key: "ANDROID_API_LEVEL=28 ANDROID_BUILD_TOOLS_VERSION=28.0.3 ANDROID_NDK_VERSION=23.2.8568313" depends_sources_cache: folder: "depends/sources" fingerprint_script: git rev-list -1 HEAD ./depends diff --git a/.gitignore b/.gitignore index 6c888bfdc4..6ca9d39a16 100644 --- a/.gitignore +++ b/.gitignore @@ -44,8 +44,6 @@ src/obj share/setup.nsi share/qt/Info.plist -src/univalue/gen - src/qt/*.moc src/qt/moc_*.cpp src/qt/forms/ui_*.h diff --git a/Makefile.am b/Makefile.am index 05e89f12b7..011faa62c7 100644 --- a/Makefile.am +++ b/Makefile.am @@ -174,7 +174,6 @@ LCOV_FILTER_PATTERN = \ -p "src/leveldb/" \ -p "src/crc32c/" \ -p "src/bench/" \ - -p "src/univalue" \ -p "src/crypto/ctaes" \ -p "src/minisketch" \ -p "src/secp256k1" \ diff --git a/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj b/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj index a64ae881f2..9f9dc9d5fa 100644 --- a/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj +++ b/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj @@ -53,6 +53,7 @@ <ClCompile Include="..\..\src\qt\transactiondesc.cpp" /> <ClCompile Include="..\..\src\qt\transactiondescdialog.cpp" /> <ClCompile Include="..\..\src\qt\transactionfilterproxy.cpp" /> + <ClCompile Include="..\..\src\qt\transactionoverviewwidget.cpp" /> <ClCompile Include="..\..\src\qt\transactionrecord.cpp" /> <ClCompile Include="..\..\src\qt\transactiontablemodel.cpp" /> <ClCompile Include="..\..\src\qt\transactionview.cpp" /> diff --git a/ci/lint/06_script.sh b/ci/lint/06_script.sh index f174b4d074..1f14dd079f 100755 --- a/ci/lint/06_script.sh +++ b/ci/lint/06_script.sh @@ -18,11 +18,10 @@ export COMMIT_RANGE test/lint/git-subtree-check.sh src/crypto/ctaes test/lint/git-subtree-check.sh src/secp256k1 test/lint/git-subtree-check.sh src/minisketch -test/lint/git-subtree-check.sh src/univalue test/lint/git-subtree-check.sh src/leveldb test/lint/git-subtree-check.sh src/crc32c test/lint/check-doc.py -test/lint/lint-all.py +test/lint/all-lint.py if [ "$CIRRUS_REPO_FULL_NAME" = "bitcoin/bitcoin" ] && [ "$CIRRUS_PR" = "" ] ; then # Sanity check only the last few commits to get notified of missing sigs, diff --git a/ci/test/00_setup_env_android.sh b/ci/test/00_setup_env_android.sh index 522a5497fa..6732db36ad 100755 --- a/ci/test/00_setup_env_android.sh +++ b/ci/test/00_setup_env_android.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 export HOST=aarch64-linux-android -export PACKAGES="clang llvm unzip openjdk-8-jdk gradle" +export PACKAGES="unzip openjdk-8-jdk gradle" export CONTAINER_NAME=ci_android export DOCKER_NAME_TAG="ubuntu:focal" @@ -16,8 +16,8 @@ export RUN_FUNCTIONAL_TESTS=false export ANDROID_API_LEVEL=28 export ANDROID_BUILD_TOOLS_VERSION=28.0.3 -export ANDROID_NDK_VERSION=23.1.7779620 -export ANDROID_TOOLS_URL=https://dl.google.com/android/repository/commandlinetools-linux-6609375_latest.zip +export ANDROID_NDK_VERSION=23.2.8568313 +export ANDROID_TOOLS_URL=https://dl.google.com/android/repository/commandlinetools-linux-8512546_latest.zip export ANDROID_HOME="${DEPENDS_DIR}/SDKs/android" export ANDROID_NDK_HOME="${ANDROID_HOME}/ndk/${ANDROID_NDK_VERSION}" export DEP_OPTS="ANDROID_SDK=${ANDROID_HOME} ANDROID_NDK=${ANDROID_NDK_HOME} ANDROID_API_LEVEL=${ANDROID_API_LEVEL} ANDROID_TOOLCHAIN_BIN=${ANDROID_NDK_HOME}/toolchains/llvm/prebuilt/linux-x86_64/bin/" diff --git a/ci/test/05_before_script.sh b/ci/test/05_before_script.sh index fc2f76797c..f3da6b4f31 100755 --- a/ci/test/05_before_script.sh +++ b/ci/test/05_before_script.sh @@ -31,9 +31,9 @@ if [ -n "$ANDROID_HOME" ] && [ ! -d "$ANDROID_HOME" ]; then if [ ! -f "$ANDROID_TOOLS_PATH" ]; then CI_EXEC curl --location --fail "${ANDROID_TOOLS_URL}" -o "$ANDROID_TOOLS_PATH" fi - CI_EXEC mkdir -p "${ANDROID_HOME}/cmdline-tools" - CI_EXEC unzip -o "$ANDROID_TOOLS_PATH" -d "${ANDROID_HOME}/cmdline-tools" - CI_EXEC "yes | ${ANDROID_HOME}/cmdline-tools/tools/bin/sdkmanager --install \"build-tools;${ANDROID_BUILD_TOOLS_VERSION}\" \"platform-tools\" \"platforms;android-${ANDROID_API_LEVEL}\" \"ndk;${ANDROID_NDK_VERSION}\"" + CI_EXEC mkdir -p "$ANDROID_HOME" + CI_EXEC unzip -o "$ANDROID_TOOLS_PATH" -d "$ANDROID_HOME" + CI_EXEC "yes | ${ANDROID_HOME}/cmdline-tools/bin/sdkmanager --sdk_root=\"${ANDROID_HOME}\" --install \"build-tools;${ANDROID_BUILD_TOOLS_VERSION}\" \"platform-tools\" \"platforms;android-${ANDROID_API_LEVEL}\" \"ndk;${ANDROID_NDK_VERSION}\"" fi if [ -z "$NO_DEPENDS" ]; then diff --git a/ci/test/06_script_a.sh b/ci/test/06_script_a.sh index 6a6bde05a1..218f5eeb63 100755 --- a/ci/test/06_script_a.sh +++ b/ci/test/06_script_a.sh @@ -6,18 +6,20 @@ export LC_ALL=C.UTF-8 +BITCOIN_CONFIG_ALL="--enable-suppress-external-warnings --disable-dependency-tracking --prefix=$DEPENDS_DIR/$HOST" +if [ -z "$NO_WERROR" ]; then + BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} --enable-werror" +fi + if [ -n "$ANDROID_TOOLS_URL" ]; then CI_EXEC make distclean || true CI_EXEC ./autogen.sh - CI_EXEC ./configure "$BITCOIN_CONFIG" --prefix="${DEPENDS_DIR}/aarch64-linux-android" || ( (CI_EXEC cat config.log) && false) + CI_EXEC ./configure "$BITCOIN_CONFIG_ALL" "$BITCOIN_CONFIG" || ( (CI_EXEC cat config.log) && false) CI_EXEC "make $MAKEJOBS && cd src/qt && ANDROID_HOME=${ANDROID_HOME} ANDROID_NDK_HOME=${ANDROID_NDK_HOME} make apk" exit 0 fi -BITCOIN_CONFIG_ALL="--enable-external-signer --enable-suppress-external-warnings --disable-dependency-tracking --prefix=$DEPENDS_DIR/$HOST --bindir=$BASE_OUTDIR/bin --libdir=$BASE_OUTDIR/lib" -if [ -z "$NO_WERROR" ]; then - BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} --enable-werror" -fi +BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} --enable-external-signer --bindir=$BASE_OUTDIR/bin --libdir=$BASE_OUTDIR/lib" CI_EXEC "ccache --zero-stats --max-size=$CCACHE_SIZE" if [ -n "$CONFIG_SHELL" ]; then diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh index e64af2ad5d..bdb68e0f6f 100755 --- a/ci/test/06_script_b.sh +++ b/ci/test/06_script_b.sh @@ -41,6 +41,9 @@ if [ "${RUN_TIDY}" = "true" ]; then CI_EXEC "python3 ${DIR_IWYU}/include-what-you-use/iwyu_tool.py"\ " src/compat"\ " src/init"\ + " src/policy/feerate.cpp"\ + " src/policy/packages.cpp"\ + " src/policy/settings.cpp"\ " src/rpc/fees.cpp"\ " src/rpc/signmessage.cpp"\ " -p . ${MAKEJOBS} -- -Xiwyu --cxx17ns -Xiwyu --mapping_file=${BASE_BUILD_DIR}/bitcoin-$HOST/contrib/devtools/iwyu/bitcoin.core.imp" diff --git a/configure.ac b/configure.ac index e09ef14156..a4a23474c8 100644 --- a/configure.ac +++ b/configure.ac @@ -998,6 +998,8 @@ AC_CHECK_DECLS([setsid]) AC_CHECK_DECLS([pipe2]) +AC_CHECK_FUNCS([timingsafe_bcmp]) + AC_CHECK_DECLS([le16toh, le32toh, le64toh, htole16, htole32, htole64, be16toh, be32toh, be64toh, htobe16, htobe32, htobe64],,, [#if HAVE_ENDIAN_H #include <endian.h> @@ -1454,6 +1456,10 @@ if test "$use_boost" = "yes"; then dnl we don't use multi_index serialization BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION" + if test "$enable_debug" = "yes" || test "$enable_fuzz" = "yes"; then + BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE" + fi + if test "$suppress_external_warnings" != "no"; then BOOST_CPPFLAGS=SUPPRESS_WARNINGS($BOOST_CPPFLAGS) fi @@ -1548,6 +1554,7 @@ fi dnl libevent check +use_libevent=no if test "$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench" != "nonononono"; then PKG_CHECK_MODULES([EVENT], [libevent >= 2.1.8], [use_libevent=yes], [AC_MSG_ERROR([libevent version 2.1.8 or greater not found.])]) if test "$TARGET_OS" != "windows"; then diff --git a/contrib/devtools/copyright_header.py b/contrib/devtools/copyright_header.py index e20eb4b0d2..680de1f1b3 100755 --- a/contrib/devtools/copyright_header.py +++ b/contrib/devtools/copyright_header.py @@ -35,7 +35,6 @@ EXCLUDE_DIRS = [ "src/leveldb/", "src/minisketch", "src/secp256k1/", - "src/univalue/", "src/crc32c/", ] diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py index a419e392ee..23d29af3f1 100755 --- a/contrib/devtools/symbol-check.py +++ b/contrib/devtools/symbol-check.py @@ -50,9 +50,8 @@ MAX_VERSIONS = { # Ignore symbols that are exported as part of every executable IGNORE_EXPORTS = { -'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', -'__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr', -'environ', '_environ', '__environ', +'environ', '_environ', '__environ', '_fini', '_init', 'stdin', +'stdout', 'stderr', } # Expected linker-loader names can be found here: diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh index cdf0020d4d..44c769f463 100755 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -250,7 +250,7 @@ case "$HOST" in esac case "$HOST" in - powerpc64-linux-*|riscv64-linux-*) HOST_LDFLAGS="${HOST_LDFLAGS} -Wl,-z,noexecstack" ;; + powerpc64-linux-*) HOST_LDFLAGS="${HOST_LDFLAGS} -Wl,-z,noexecstack" ;; esac # Make $HOST-specific native binaries from depends available in $PATH diff --git a/contrib/message-capture/message-capture-parser.py b/contrib/message-capture/message-capture-parser.py index 9988478f1b..eefd22a60e 100755 --- a/contrib/message-capture/message-capture-parser.py +++ b/contrib/message-capture/message-capture-parser.py @@ -79,7 +79,8 @@ def to_jsonable(obj: Any) -> Any: val = getattr(obj, slot, None) if slot in HASH_INTS and isinstance(val, int): ret[slot] = ser_uint256(val).hex() - elif slot in HASH_INT_VECTORS and isinstance(val[0], int): + elif slot in HASH_INT_VECTORS: + assert all(isinstance(a, int) for a in val) ret[slot] = [ser_uint256(a).hex() for a in val] else: ret[slot] = to_jsonable(val) diff --git a/contrib/seeds/.gitignore b/contrib/seeds/.gitignore index e4a39d6093..d9a2451f70 100644 --- a/contrib/seeds/.gitignore +++ b/contrib/seeds/.gitignore @@ -1 +1,2 @@ seeds_main.txt +asmap-filled.dat diff --git a/contrib/seeds/README.md b/contrib/seeds/README.md index c53446bfb0..b2ea7522ac 100644 --- a/contrib/seeds/README.md +++ b/contrib/seeds/README.md @@ -8,21 +8,11 @@ and remove old versions as necessary (at a minimum when GetDesirableServiceFlags changes its default return value, as those are the services which seeds are added to addrman with). -The seeds compiled into the release are created from sipa's DNS seed data, like this: +The seeds compiled into the release are created from sipa's DNS seed and AS map +data. Run the following commands from the `/contrib/seeds` directory: curl https://bitcoin.sipa.be/seeds.txt.gz | gzip -dc > seeds_main.txt - python3 makeseeds.py < seeds_main.txt > nodes_main.txt + curl https://bitcoin.sipa.be/asmap-filled.dat > asmap-filled.dat + python3 makeseeds.py -a asmap-filled.dat < seeds_main.txt > nodes_main.txt cat nodes_main_manual.txt >> nodes_main.txt python3 generate-seeds.py . > ../../src/chainparamsseeds.h - -## Dependencies - -Ubuntu, Debian: - - sudo apt-get install python3-dnspython - -and/or for other operating systems: - - pip install dnspython - -See https://dnspython.readthedocs.io/en/latest/installation.html for more information. diff --git a/contrib/seeds/asmap.py b/contrib/seeds/asmap.py new file mode 100644 index 0000000000..7a605d0b9e --- /dev/null +++ b/contrib/seeds/asmap.py @@ -0,0 +1,815 @@ +# Copyright (c) 2022 Pieter Wuille +# Distributed under the MIT software license, see the accompanying +# file LICENSE or http://www.opensource.org/licenses/mit-license.php. + +""" +This module provides the ASNEntry and ASMap classes. +""" + +import copy +import ipaddress +import random +import unittest +from enum import Enum +from functools import total_ordering +from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union, overload + +def net_to_prefix(net: Union[ipaddress.IPv4Network,ipaddress.IPv6Network]) -> List[bool]: + """ + Convert an IPv4 or IPv6 network to a prefix represented as a list of bits. + + IPv4 ranges are remapped to their IPv4-mapped IPv6 range (::ffff:0:0/96). + """ + num_bits = net.prefixlen + netrange = int.from_bytes(net.network_address.packed, 'big') + + # Map an IPv4 prefix into IPv6 space. + if isinstance(net, ipaddress.IPv4Network): + num_bits += 96 + netrange += 0xffff00000000 + + # Strip unused bottom bits. + assert (netrange & ((1 << (128 - num_bits)) - 1)) == 0 + return [((netrange >> (127 - i)) & 1) != 0 for i in range(num_bits)] + +def prefix_to_net(prefix: List[bool]) -> Union[ipaddress.IPv4Network,ipaddress.IPv6Network]: + """The reverse operation of net_to_prefix.""" + # Convert to number + netrange = sum(b << (127 - i) for i, b in enumerate(prefix)) + num_bits = len(prefix) + assert num_bits <= 128 + + # Return IPv4 range if in ::ffff:0:0/96 + if num_bits >= 96 and (netrange >> 32) == 0xffff: + return ipaddress.IPv4Network((netrange & 0xffffffff, num_bits - 96), True) + + # Return IPv6 range otherwise. + return ipaddress.IPv6Network((netrange, num_bits), True) + +# Shortcut for (prefix, ASN) entries. +ASNEntry = Tuple[List[bool], int] + +# Shortcut for (prefix, old ASN, new ASN) entries. +ASNDiff = Tuple[List[bool], int, int] + +class _VarLenCoder: + """ + A class representing a custom variable-length binary encoder/decoder for + integers. Each object represents a different coder, with different parameters + minval and clsbits. + + The encoding is easiest to describe using an example. Let's say minval=100 and + clsbits=[4,2,2,3]. In that case: + - x in [100..115]: encoded as [0] + [4-bit BE encoding of (x-100)]. + - x in [116..119]: encoded as [1,0] + [2-bit BE encoding of (x-116)]. + - x in [120..123]: encoded as [1,1,0] + [2-bit BE encoding of (x-120)]. + - x in [124..131]: encoded as [1,1,1] + [3-bit BE encoding of (x-124)]. + + In general, every number is encoded as: + - First, k "1"-bits, where k is the class the number falls in (there is one class + per element of clsbits). + - Then, a "0"-bit, unless k is the highest class, in which case there is nothing. + - Lastly, clsbits[k] bits encoding in big endian the position in its class that + number falls into. + - Every class k consists of 2^clsbits[k] consecutive integers. k=0 starts at minval, + other classes start one past the last element of the class before it. + """ + + def __init__(self, minval: int, clsbits: List[int]): + """Construct a new _VarLenCoder.""" + self._minval = minval + self._clsbits = clsbits + self._maxval = minval + sum(1 << b for b in clsbits) - 1 + + def can_encode(self, val: int) -> bool: + """Check whether value val is in the range this coder supports.""" + return self._minval <= val <= self._maxval + + def encode(self, val: int, ret: List[int]) -> None: + """Append encoding of val onto integer list ret.""" + + assert self._minval <= val <= self._maxval + val -= self._minval + bits = 0 + for k, bits in enumerate(self._clsbits): + if val >> bits: + # If the value will not fit in class k, subtract its range from v, + # emit a "1" bit and continue with the next class. + val -= 1 << bits + ret.append(1) + else: + if k + 1 < len(self._clsbits): + # Unless we're in the last class, emit a "0" bit. + ret.append(0) + break + # And then encode v (now the position within the class) in big endian. + ret.extend((val >> (bits - 1 - b)) & 1 for b in range(bits)) + + def encode_size(self, val: int) -> int: + """Compute how many bits are needed to encode val.""" + assert self._minval <= val <= self._maxval + val -= self._minval + ret = 0 + bits = 0 + for k, bits in enumerate(self._clsbits): + if val >> bits: + val -= 1 << bits + ret += 1 + else: + ret += k + 1 < len(self._clsbits) + break + return ret + bits + + def decode(self, stream, bitpos) -> Tuple[int,int]: + """Decode a number starting at bitpos in stream, returning value and new bitpos.""" + val = self._minval + bits = 0 + for k, bits in enumerate(self._clsbits): + bit = 0 + if k + 1 < len(self._clsbits): + bit = stream[bitpos] + bitpos += 1 + if not bit: + break + val += 1 << bits + for i in range(bits): + bit = stream[bitpos] + bitpos += 1 + val += bit << (bits - 1 - i) + return val, bitpos + +# Variable-length encoders used in the binary asmap format. +_CODER_INS = _VarLenCoder(0, [0, 0, 1]) +_CODER_ASN = _VarLenCoder(1, list(range(15, 25))) +_CODER_MATCH = _VarLenCoder(2, list(range(1, 9))) +_CODER_JUMP = _VarLenCoder(17, list(range(5, 31))) + +class _Instruction(Enum): + """One instruction in the binary asmap format.""" + # A return instruction, encoded as [0], returns a constant ASN. It is followed by + # an integer using the ASN encoding. + RETURN = 0 + # A jump instruction, encoded as [1,0] inspects the next unused bit in the input + # and either continues execution (if 0), or skips a specified number of bits (if 1). + # It is followed by an integer, and then two subprograms. The integer uses jump encoding + # and corresponds to the length of the first subprogram (so it can be skipped). + JUMP = 1 + # A match instruction, encoded as [1,1,0] inspects 1 or more of the next unused bits + # in the input with its argument. If they all match, execution continues. If they do + # not, failure is returned. If a default instruction has been executed before, instead + # of failure the default instruction's argument is returned. It is followed by an + # integer in match encoding, and a subprogram. That value is at least 2 bits and at + # most 9 bits. An n-bit value signifies matching (n-1) bits in the input with the lower + # (n-1) bits in the match value. + MATCH = 2 + # A default instruction, encoded as [1,1,1] sets the default variable to its argument, + # and continues execution. It is followed by an integer in ASN encoding, and a subprogram. + DEFAULT = 3 + # Not an actual instruction, but a way to encode the empty program that fails. In the + # encoder, it is used more generally to represent the failure case inside MATCH instructions, + # which may (if used inside the context of a DEFAULT instruction) actually correspond to + # a succesful return. In this usage, they're always converted to an actual MATCH or RETURN + # before the top level is reached (see make_default below). + END = 4 + +class _BinNode: + """A class representing a (node of) the parsed binary asmap format.""" + + @overload + def __init__(self, ins: _Instruction): ... + @overload + def __init__(self, ins: _Instruction, arg1: int): ... + @overload + def __init__(self, ins: _Instruction, arg1: "_BinNode", arg2: "_BinNode"): ... + @overload + def __init__(self, ins: _Instruction, arg1: int, arg2: "_BinNode"): ... + + def __init__(self, ins: _Instruction, arg1=None, arg2=None): + """ + Construct a new asmap node. Possibilities are: + - _BinNode(_Instruction.RETURN, asn) + - _BinNode(_Instruction.JUMP, node_0, node_1) + - _BinNode(_Instruction.MATCH, val, node) + - _BinNode(_Instruction.DEFAULT, asn, node) + - _BinNode(_Instruction.END) + """ + self.ins = ins + self.arg1 = arg1 + self.arg2 = arg2 + if ins == _Instruction.RETURN: + assert isinstance(arg1, int) + assert arg2 is None + self.size = _CODER_INS.encode_size(ins.value) + _CODER_ASN.encode_size(arg1) + elif ins == _Instruction.JUMP: + assert isinstance(arg1, _BinNode) + assert isinstance(arg2, _BinNode) + self.size = (_CODER_INS.encode_size(ins.value) + _CODER_JUMP.encode_size(arg1.size) + + arg1.size + arg2.size) + elif ins == _Instruction.DEFAULT: + assert isinstance(arg1, int) + assert isinstance(arg2, _BinNode) + self.size = _CODER_INS.encode_size(ins.value) + _CODER_ASN.encode_size(arg1) + arg2.size + elif ins == _Instruction.MATCH: + assert isinstance(arg1, int) + assert isinstance(arg2, _BinNode) + self.size = (_CODER_INS.encode_size(ins.value) + _CODER_MATCH.encode_size(arg1) + + arg2.size) + elif ins == _Instruction.END: + assert arg1 is None + assert arg2 is None + self.size = 0 + else: + assert False + + @staticmethod + def make_end() -> "_BinNode": + """Constructor for a _BinNode with just an END instruction.""" + return _BinNode(_Instruction.END) + + @staticmethod + def make_leaf(val: int) -> "_BinNode": + """Constructor for a _BinNode of just a RETURN instruction.""" + assert val is not None and val > 0 + return _BinNode(_Instruction.RETURN, val) + + @staticmethod + def make_branch(node0: "_BinNode", node1: "_BinNode") -> "_BinNode": + """ + Construct a _BinNode corresponding to running either the node0 or node1 subprogram, + based on the next input bit. It exploits shortcuts that are possible in the encoding, + and uses either a JUMP, MATCH, or END instruction. + """ + if node0.ins == _Instruction.END and node1.ins == _Instruction.END: + return node0 + if node0.ins == _Instruction.END: + if node1.ins == _Instruction.MATCH and node1.arg1 <= 0xFF: + return _BinNode(node1.ins, node1.arg1 + (1 << node1.arg1.bit_length()), node1.arg2) + return _BinNode(_Instruction.MATCH, 3, node1) + if node1.ins == _Instruction.END: + if node0.ins == _Instruction.MATCH and node0.arg1 <= 0xFF: + return _BinNode(node0.ins, node0.arg1 + (1 << (node0.arg1.bit_length() - 1)), + node0.arg2) + return _BinNode(_Instruction.MATCH, 2, node0) + return _BinNode(_Instruction.JUMP, node0, node1) + + @staticmethod + def make_default(val: int, sub: "_BinNode") -> "_BinNode": + """ + Construct a _BinNode that corresponds to the specified subprogram, with the specified + default value. It exploits shortcuts that are possible in the encoding, and will use + either a DEFAULT or a RETURN instruction.""" + assert val is not None and val > 0 + if sub.ins == _Instruction.END: + return _BinNode(_Instruction.RETURN, val) + if sub.ins in (_Instruction.RETURN, _Instruction.DEFAULT): + return sub + return _BinNode(_Instruction.DEFAULT, val, sub) + +@total_ordering +class ASMap: + """ + A class whose objects represent a mapping from subnets to ASNs. + + Internally the mapping is stored as a binary trie, but can be converted + from/to a list of ASNEntry objects, and from/to the binary asmap file format. + + In the trie representation, nodes are represented as bare lists for efficiency + and ease of manipulation: + - [0] means an unassigned subnet (no ASN mapping for it is present) + - [int] means a subnet mapped entirely to the specified ASN. + - [node,node] means a subnet whose lower half and upper half have different + - mappings, represented by new trie nodes. + """ + + def update(self, prefix: List[bool], asn: int) -> None: + """Update this ASMap object to map prefix to the specified asn.""" + assert asn == 0 or _CODER_ASN.can_encode(asn) + + def recurse(node: List, offset: int) -> None: + if offset == len(prefix): + # Reached the end of prefix; overwrite this node. + node.clear() + node.append(asn) + return + if len(node) == 1: + # Need to descend into a leaf node; split it up. + oldasn = node[0] + node.clear() + node.append([oldasn]) + node.append([oldasn]) + # Descend into the node. + recurse(node[prefix[offset]], offset + 1) + # If the result is two identical leaf children, merge them. + if len(node[0]) == 1 and len(node[1]) == 1 and node[0] == node[1]: + oldasn = node[0][0] + node.clear() + node.append(oldasn) + recurse(self._trie, 0) + + def update_multi(self, entries: List[Tuple[List[bool], int]]) -> None: + """Apply multiple update operations, where longer prefixes take precedence.""" + entries.sort(key=lambda entry: len(entry[0])) + for prefix, asn in entries: + self.update(prefix, asn) + + def _set_trie(self, trie) -> None: + """Set trie directly. Internal use only.""" + def recurse(node: List) -> None: + if len(node) < 2: + return + recurse(node[0]) + recurse(node[1]) + if len(node[0]) == 2: + return + if node[0] == node[1]: + if len(node[0]) == 0: + node.clear() + else: + asn = node[0][0] + node.clear() + node.append(asn) + recurse(trie) + self._trie = trie + + def __init__(self, entries: Optional[Iterable[ASNEntry]] = None) -> None: + """Construct an ASMap object from an optional list of entries.""" + self._trie = [0] + if entries is not None: + def entry_key(entry): + """Sort function that places shorter prefixes first.""" + prefix, asn = entry + return len(prefix), prefix, asn + for prefix, asn in sorted(entries, key=entry_key): + self.update(prefix, asn) + + def lookup(self, prefix: List[bool]) -> Optional[int]: + """Look up a prefix. Returns ASN, or 0 if unassigned, or None if indeterminate.""" + node = self._trie + for bit in prefix: + if len(node) == 1: + break + node = node[bit] + if len(node) == 1: + return node[0] + return None + + def _to_entries_flat(self, fill: bool = False) -> List[ASNEntry]: + """Convert an ASMap object to a list of non-overlapping (prefix, asn) objects.""" + prefix : List[bool] = [] + + def recurse(node: List) -> List[ASNEntry]: + ret = [] + if len(node) == 1: + if node[0] > 0: + ret = [(list(prefix), node[0])] + elif len(node) == 2: + prefix.append(False) + ret = recurse(node[0]) + prefix[-1] = True + ret += recurse(node[1]) + prefix.pop() + if fill and len(ret) > 1: + asns = set(x[1] for x in ret) + if len(asns) == 1: + ret = [(list(prefix), list(asns)[0])] + return ret + return recurse(self._trie) + + def _to_entries_minimal(self, fill: bool = False) -> List[ASNEntry]: + """Convert a trie to a minimal list of ASNEntry objects, exploiting overlap.""" + prefix : List[bool] = [] + + def recurse(node: List) -> (Tuple[Dict[Optional[int], List[ASNEntry]], bool]): + if len(node) == 1 and node[0] == 0: + return {None if fill else 0: []}, True + if len(node) == 1: + return {node[0]: [], None: [(list(prefix), node[0])]}, False + ret: Dict[Optional[int], List[ASNEntry]] = {} + prefix.append(False) + left, lhole = recurse(node[0]) + prefix[-1] = True + right, rhole = recurse(node[1]) + prefix.pop() + hole = not fill and (lhole or rhole) + def candidate(ctx: Optional[int], res0: Optional[List[ASNEntry]], + res1: Optional[List[ASNEntry]]): + if res0 is not None and res1 is not None: + if ctx not in ret or len(res0) + len(res1) < len(ret[ctx]): + ret[ctx] = res0 + res1 + for ctx in set(left) | set(right): + candidate(ctx, left.get(ctx), right.get(ctx)) + candidate(ctx, left.get(None), right.get(ctx)) + candidate(ctx, left.get(ctx), right.get(None)) + if not hole: + for ctx in list(ret): + if ctx is not None: + candidate(None, [(list(prefix), ctx)], ret[ctx]) + if None in ret: + ret = {ctx:entries for ctx, entries in ret.items() + if ctx is None or len(entries) < len(ret[None])} + if hole: + ret = {ctx:entries for ctx, entries in ret.items() if ctx is None or ctx == 0} + return ret, hole + res, _ = recurse(self._trie) + return res[0] if 0 in res else res[None] + + def __str__(self) -> str: + """Convert this ASMap object to a string containing Python code constructing it.""" + return f"ASMap({self._trie})" + + def to_entries(self, overlapping: bool = True, fill: bool = False) -> List[ASNEntry]: + """ + Convert the mappings in this ASMap object to a list of ASNEntry objects. + + Arguments: + overlapping: Permit the subnets in the resulting ASNEntry to overlap. + Setting this can result in a shorter list. + fill: Permit the resulting ASNEntry objects to cover subnets that + are unassigned in this ASMap object. Setting this can + result in a shorter list. + """ + if overlapping: + return self._to_entries_minimal(fill) + return self._to_entries_flat(fill) + + @staticmethod + def from_random(num_leaves: int = 10, max_asn: int = 6, + unassigned_prob: float = 0.5) -> "ASMap": + """ + Construct a random ASMap object, with specified: + - Number of leaves in its trie (at least 1) + - Maximum ASN value (at least 1) + - Probability for leaf nodes to be unassigned + + The number of leaves in the resulting object may be less than what is + requested. This method is mostly intended for testing. + """ + assert num_leaves >= 1 + assert max_asn >= 1 or unassigned_prob == 1 + assert _CODER_ASN.can_encode(max_asn) + assert 0.0 <= unassigned_prob <= 1.0 + trie: List = [] + leaves = [trie] + ret = ASMap() + for i in range(1, num_leaves): + idx = random.randrange(i) + leaf = leaves[idx] + lastleaf = leaves.pop() + if idx + 1 < i: + leaves[idx] = lastleaf + leaf.append([]) + leaf.append([]) + leaves.append(leaf[0]) + leaves.append(leaf[1]) + for leaf in leaves: + if random.random() >= unassigned_prob: + leaf.append(random.randrange(1, max_asn + 1)) + else: + leaf.append(0) + #pylint: disable=protected-access + ret._set_trie(trie) + return ret + + def _to_binnode(self, fill: bool = False) -> _BinNode: + """Convert a trie to a _BinNode object.""" + def recurse(node: List) -> Tuple[Dict[Optional[int], _BinNode], bool]: + if len(node) == 1 and node[0] == 0: + return {(None if fill else 0): _BinNode.make_end()}, True + if len(node) == 1: + return {None: _BinNode.make_leaf(node[0]), node[0]: _BinNode.make_end()}, False + ret: Dict[Optional[int], _BinNode] = {} + left, lhole = recurse(node[0]) + right, rhole = recurse(node[1]) + hole = (lhole or rhole) and not fill + + def candidate(ctx: Optional[int], arg1, arg2, func: Callable): + if arg1 is not None and arg2 is not None: + cand = func(arg1, arg2) + if ctx not in ret or cand.size < ret[ctx].size: + ret[ctx] = cand + + for ctx in set(left) | set(right): + candidate(ctx, left.get(ctx), right.get(ctx), _BinNode.make_branch) + candidate(ctx, left.get(None), right.get(ctx), _BinNode.make_branch) + candidate(ctx, left.get(ctx), right.get(None), _BinNode.make_branch) + if not hole: + for ctx in set(ret) - set([None]): + candidate(None, ctx, ret[ctx], _BinNode.make_default) + if None in ret: + ret = {ctx:enc for ctx, enc in ret.items() + if ctx is None or enc.size < ret[None].size} + if hole: + ret = {ctx:enc for ctx, enc in ret.items() if ctx is None or ctx == 0} + return ret, hole + res, _ = recurse(self._trie) + return res[0] if 0 in res else res[None] + + @staticmethod + def _from_binnode(binnode: _BinNode) -> "ASMap": + """Construct an ASMap object from a _BinNode. Internal use only.""" + def recurse(node: _BinNode, default: int) -> List: + if node.ins == _Instruction.RETURN: + return [node.arg1] + if node.ins == _Instruction.JUMP: + return [recurse(node.arg1, default), recurse(node.arg2, default)] + if node.ins == _Instruction.MATCH: + val = node.arg1 + sub = recurse(node.arg2, default) + while val >= 2: + bit = val & 1 + val >>= 1 + if bit: + sub = [[default], sub] + else: + sub = [sub, [default]] + return sub + assert node.ins == _Instruction.DEFAULT + return recurse(node.arg2, node.arg1) + ret = ASMap() + if binnode.ins != _Instruction.END: + #pylint: disable=protected-access + ret._set_trie(recurse(binnode, 0)) + return ret + + def to_binary(self, fill: bool = False) -> bytes: + """ + Convert this ASMap object to binary. + + Argument: + fill: permit the resulting binary encoder to contain mappers for + unassigned subnets in this ASMap object. Doing so may + reduce the size of the encoding. + Returns: + A bytes object with the encoding of this ASMap object. + """ + bits: List[int] = [] + + def recurse(node: _BinNode) -> None: + _CODER_INS.encode(node.ins.value, bits) + if node.ins == _Instruction.RETURN: + _CODER_ASN.encode(node.arg1, bits) + elif node.ins == _Instruction.JUMP: + _CODER_JUMP.encode(node.arg1.size, bits) + recurse(node.arg1) + recurse(node.arg2) + elif node.ins == _Instruction.DEFAULT: + _CODER_ASN.encode(node.arg1, bits) + recurse(node.arg2) + else: + assert node.ins == _Instruction.MATCH + _CODER_MATCH.encode(node.arg1, bits) + recurse(node.arg2) + + binnode = self._to_binnode(fill) + if binnode.ins != _Instruction.END: + recurse(binnode) + + val = 0 + nbits = 0 + ret = [] + for bit in bits: + val += (bit << nbits) + nbits += 1 + if nbits == 8: + ret.append(val) + val = 0 + nbits = 0 + if nbits: + ret.append(val) + return bytes(ret) + + @staticmethod + def from_binary(bindata: bytes) -> Optional["ASMap"]: + """Decode an ASMap object from the provided binary encoding.""" + + bits: List[int] = [] + for byte in bindata: + bits.extend((byte >> i) & 1 for i in range(8)) + + def recurse(bitpos: int) -> Tuple[_BinNode, int]: + insval, bitpos = _CODER_INS.decode(bits, bitpos) + ins = _Instruction(insval) + if ins == _Instruction.RETURN: + asn, bitpos = _CODER_ASN.decode(bits, bitpos) + return _BinNode(ins, asn), bitpos + if ins == _Instruction.JUMP: + jump, bitpos = _CODER_JUMP.decode(bits, bitpos) + left, bitpos1 = recurse(bitpos) + if bitpos1 != bitpos + jump: + raise ValueError("Inconsistent jump") + right, bitpos = recurse(bitpos1) + return _BinNode(ins, left, right), bitpos + if ins == _Instruction.MATCH: + match, bitpos = _CODER_MATCH.decode(bits, bitpos) + sub, bitpos = recurse(bitpos) + return _BinNode(ins, match, sub), bitpos + assert ins == _Instruction.DEFAULT + asn, bitpos = _CODER_ASN.decode(bits, bitpos) + sub, bitpos = recurse(bitpos) + return _BinNode(ins, asn, sub), bitpos + + if len(bits) == 0: + binnode = _BinNode(_Instruction.END) + else: + try: + binnode, bitpos = recurse(0) + except (ValueError, IndexError): + return None + if bitpos < len(bits) - 7: + return None + if not all(bit == 0 for bit in bits[bitpos:]): + return None + + return ASMap._from_binnode(binnode) + + def __lt__(self, other: "ASMap") -> bool: + return self._trie < other._trie + + def __eq__(self, other: object) -> bool: + if isinstance(other, ASMap): + return self._trie == other._trie + return False + + def extends(self, req: "ASMap") -> bool: + """Determine whether this matches req for all subranges where req is assigned.""" + def recurse(actual: List, require: List) -> bool: + if len(require) == 1 and require[0] == 0: + return True + if len(require) == 1: + if len(actual) == 1: + return bool(require[0] == actual[0]) + return recurse(actual[0], require) and recurse(actual[1], require) + if len(actual) == 2: + return recurse(actual[0], require[0]) and recurse(actual[1], require[1]) + return recurse(actual, require[0]) and recurse(actual, require[1]) + assert isinstance(req, ASMap) + #pylint: disable=protected-access + return recurse(self._trie, req._trie) + + def diff(self, other: "ASMap") -> List[ASNDiff]: + """Compute the diff from self to other.""" + prefix: List[bool] = [] + ret: List[ASNDiff] = [] + + def recurse(old_node: List, new_node: List): + if len(old_node) == 1 and len(new_node) == 1: + if old_node[0] != new_node[0]: + ret.append((list(prefix), old_node[0], new_node[0])) + else: + old_left: List = old_node if len(old_node) == 1 else old_node[0] + old_right: List = old_node if len(old_node) == 1 else old_node[1] + new_left: List = new_node if len(new_node) == 1 else new_node[0] + new_right: List = new_node if len(new_node) == 1 else new_node[1] + prefix.append(False) + recurse(old_left, new_left) + prefix[-1] = True + recurse(old_right, new_right) + prefix.pop() + assert isinstance(other, ASMap) + #pylint: disable=protected-access + recurse(self._trie, other._trie) + return ret + + def __copy__(self) -> "ASMap": + """Construct a copy of this ASMap object. Its state will not be shared.""" + ret = ASMap() + #pylint: disable=protected-access + ret._set_trie(copy.deepcopy(self._trie)) + return ret + + def __deepcopy__(self, _) -> "ASMap": + # ASMap objects do not allow sharing of the _trie member, so we don't need the memoization. + return self.__copy__() + + +class TestASMap(unittest.TestCase): + """Unit tests for this module.""" + + def test_ipv6_prefix_roundtrips(self) -> None: + """Test that random IPv6 network ranges roundtrip through prefix encoding.""" + for _ in range(20): + net_bits = random.getrandbits(128) + for prefix_len in range(0, 129): + masked_bits = (net_bits >> (128 - prefix_len)) << (128 - prefix_len) + net = ipaddress.IPv6Network((masked_bits.to_bytes(16, 'big'), prefix_len)) + prefix = net_to_prefix(net) + self.assertTrue(len(prefix) <= 128) + net2 = prefix_to_net(prefix) + self.assertEqual(net, net2) + + def test_ipv4_prefix_roundtrips(self) -> None: + """Test that random IPv4 network ranges roundtrip through prefix encoding.""" + for _ in range(100): + net_bits = random.getrandbits(32) + for prefix_len in range(0, 33): + masked_bits = (net_bits >> (32 - prefix_len)) << (32 - prefix_len) + net = ipaddress.IPv4Network((masked_bits.to_bytes(4, 'big'), prefix_len)) + prefix = net_to_prefix(net) + self.assertTrue(32 <= len(prefix) <= 128) + net2 = prefix_to_net(prefix) + self.assertEqual(net, net2) + + def test_asmap_roundtrips(self) -> None: + """Test case that verifies random ASMap objects roundtrip to/from entries/binary.""" + # Iterate over the number of leaves the random test ASMap objects have. + for leaves in range(1, 20): + # Iterate over the number of bits in the AS numbers used. + for asnbits in range(0, 24): + # Iterate over the probability that leaves are unassigned. + for pct in range(101): + # Construct a random ASMap object according to the above parameters. + asmap = ASMap.from_random(num_leaves=leaves, max_asn=1 + (1 << asnbits), + unassigned_prob=0.01 * pct) + # Run tests for to_entries and construction from those entries, both + # for overlapping and non-overlapping ones. + for overlapping in [False, True]: + entries = asmap.to_entries(overlapping=overlapping, fill=False) + random.shuffle(entries) + asmap2 = ASMap(entries) + assert asmap2 is not None + self.assertEqual(asmap2, asmap) + entries = asmap.to_entries(overlapping=overlapping, fill=True) + random.shuffle(entries) + asmap2 = ASMap(entries) + assert asmap2 is not None + self.assertTrue(asmap2.extends(asmap)) + + # Run tests for to_binary and construction from binary. + enc = asmap.to_binary(fill=False) + asmap3 = ASMap.from_binary(enc) + assert asmap3 is not None + self.assertEqual(asmap3, asmap) + enc = asmap.to_binary(fill=True) + asmap3 = ASMap.from_binary(enc) + assert asmap3 is not None + self.assertTrue(asmap3.extends(asmap)) + + def test_patching(self) -> None: + """Test behavior of update, lookup, extends, and diff.""" + #pylint: disable=too-many-locals,too-many-nested-blocks + # Iterate over the number of leaves the random test ASMap objects have. + for leaves in range(1, 20): + # Iterate over the number of bits in the AS numbers used. + for asnbits in range(0, 10): + # Iterate over the probability that leaves are unassigned. + for pct in range(0, 101): + # Construct a random ASMap object according to the above parameters. + asmap = ASMap.from_random(num_leaves=leaves, max_asn=1 + (1 << asnbits), + unassigned_prob=0.01 * pct) + # Make a copy of that asmap object to which patches will be applied. + # It starts off being equal to asmap. + patched = copy.copy(asmap) + # Keep a list of patches performed. + patches: List[ASNEntry] = [] + # Initially there cannot be any difference. + self.assertEqual(asmap.diff(patched), []) + # Make 5 patches, each building on top of the previous ones. + for _ in range(0, 5): + # Construct a random path and new ASN to assign it to, apply it to patched, + # and remember it in patches. + pathlen = random.randrange(5) + path = [random.getrandbits(1) != 0 for _ in range(pathlen)] + newasn = random.randrange(1 + (1 << asnbits)) + patched.update(path, newasn) + patches = [(path, newasn)] + patches + + # Compute the diff, and whether asmap extends patched, and the other way + # around. + diff = asmap.diff(patched) + self.assertEqual(asmap == patched, len(diff) == 0) + extends = asmap.extends(patched) + back_extends = patched.extends(asmap) + # Determine whether those extends results are consistent with the diff + # result. + self.assertEqual(extends, all(d[2] == 0 for d in diff)) + self.assertEqual(back_extends, all(d[1] == 0 for d in diff)) + # For every diff found: + for path, old_asn, new_asn in diff: + # Verify asmap and patched actually differ there. + self.assertTrue(old_asn != new_asn) + self.assertEqual(asmap.lookup(path), old_asn) + self.assertEqual(patched.lookup(path), new_asn) + for _ in range(2): + # Extend the path far enough that it's smaller than any mapped + # range, and check the lookup holds there too. + spec_path = list(path) + while len(spec_path) < 32: + spec_path.append(random.getrandbits(1) != 0) + self.assertEqual(asmap.lookup(spec_path), old_asn) + self.assertEqual(patched.lookup(spec_path), new_asn) + # Search through the list of performed patches to find the last one + # applying to the extended path (note that patches is in reverse + # order, so the first match should work). + found = False + for patch_path, patch_asn in patches: + if spec_path[:len(patch_path)] == patch_path: + # When found, it must match whatever the result was patched + # to. + self.assertEqual(new_asn, patch_asn) + found = True + break + # And such a patch must exist. + self.assertTrue(found) + +if __name__ == '__main__': + unittest.main() diff --git a/contrib/seeds/makeseeds.py b/contrib/seeds/makeseeds.py index 78eb04a836..37c6f5fd7c 100755 --- a/contrib/seeds/makeseeds.py +++ b/contrib/seeds/makeseeds.py @@ -6,12 +6,15 @@ # Generate seeds.txt from Pieter's DNS seeder # +import argparse +import ipaddress import re import sys -import dns.resolver import collections from typing import List, Dict, Union +from asmap import ASMap, net_to_prefix + NSEEDS=512 MAX_SEEDS_PER_ASN = { @@ -45,7 +48,7 @@ def parseline(line: str) -> Union[dict, None]: sline = line.split() if len(sline) < 11: # line too short to be valid, skip it. - return None + return None m = PATTERN_IPV4.match(sline[0]) sortkey = None ip = None @@ -123,34 +126,8 @@ def filtermultiport(ips: List[Dict]) -> List[Dict]: hist[ip['sortkey']].append(ip) return [value[0] for (key,value) in list(hist.items()) if len(value)==1] -def lookup_asn(net: str, ip: str) -> Union[int, None]: - """ Look up the asn for an `ip` address by querying cymru.com - on network `net` (e.g. ipv4 or ipv6). - - Returns in integer ASN or None if it could not be found. - """ - try: - if net == 'ipv4': - ipaddr = ip - prefix = '.origin' - else: # http://www.team-cymru.com/IP-ASN-mapping.html - res = str() # 2001:4860:b002:23::68 - for nb in ip.split(':')[:4]: # pick the first 4 nibbles - for c in nb.zfill(4): # right padded with '0' - res += c + '.' # 2001 4860 b002 0023 - ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3 - prefix = '.origin6' - - asn = int([x.to_text() for x in dns.resolver.resolve('.'.join( - reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com', - 'TXT').response.answer][0].split('\"')[1].split(' ')[0]) - return asn - except Exception as e: - sys.stderr.write(f'ERR: Could not resolve ASN for "{ip}": {e}\n') - return None - # Based on Greg Maxwell's seed_filter.py -def filterbyasn(ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Dict]: +def filterbyasn(asmap: ASMap, ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Dict]: """ Prunes `ips` by (a) trimming ips to have at most `max_per_net` ips from each net (e.g. ipv4, ipv6); and (b) trimming ips to have at most `max_per_asn` ips from each asn in each net. @@ -165,21 +142,18 @@ def filterbyasn(ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Di asn_count: Dict[int, int] = collections.defaultdict(int) for i, ip in enumerate(ips_ipv46): - if i % 10 == 0: - # give progress update - print(f"{i:6d}/{len(ips_ipv46)} [{100*i/len(ips_ipv46):04.1f}%]\r", file=sys.stderr, end='', flush=True) - if net_count[ip['net']] == max_per_net: # do not add this ip as we already too many # ips from this network continue - asn = lookup_asn(ip['net'], ip['ip']) - if asn is None or asn_count[asn] == max_per_asn[ip['net']]: + asn = asmap.lookup(net_to_prefix(ipaddress.ip_network(ip['ip']))) + if not asn or asn_count[ip['net'], asn] == max_per_asn[ip['net']]: # do not add this ip as we already have too many # ips from this ASN on this network continue - asn_count[asn] += 1 + asn_count[ip['net'], asn] += 1 net_count[ip['net']] += 1 + ip['asn'] = asn result.append(ip) # Add back Onions (up to max_per_net) @@ -195,9 +169,23 @@ def ip_stats(ips: List[Dict]) -> str: return f"{hist['ipv4']:6d} {hist['ipv6']:6d} {hist['onion']:6d}" +def parse_args(): + argparser = argparse.ArgumentParser(description='Generate a list of bitcoin node seed ip addresses.') + argparser.add_argument("-a","--asmap", help='the location of the asmap asn database file (required)', required=True) + return argparser.parse_args() + def main(): + args = parse_args() + + print(f'Loading asmap database "{args.asmap}"…', end='', file=sys.stderr, flush=True) + with open(args.asmap, 'rb') as f: + asmap = ASMap.from_binary(f.read()) + print('Done.', file=sys.stderr) + + print('Loading and parsing DNS seeds…', end='', file=sys.stderr, flush=True) lines = sys.stdin.readlines() ips = [parseline(line) for line in lines] + print('Done.', file=sys.stderr) print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr) print(f'{ip_stats(ips):s} Initial', file=sys.stderr) @@ -230,15 +218,18 @@ def main(): ips = filtermultiport(ips) print(f'{ip_stats(ips):s} Filter out hosts with multiple bitcoin ports', file=sys.stderr) # Look up ASNs and limit results, both per ASN and globally. - ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS) + ips = filterbyasn(asmap, ips, MAX_SEEDS_PER_ASN, NSEEDS) print(f'{ip_stats(ips):s} Look up ASNs and limit results per ASN and per net', file=sys.stderr) # Sort the results by IP address (for deterministic output). ips.sort(key=lambda x: (x['net'], x['sortkey'])) for ip in ips: if ip['net'] == 'ipv6': - print('[%s]:%i' % (ip['ip'], ip['port'])) + print(f"[{ip['ip']}]:{ip['port']}", end="") else: - print('%s:%i' % (ip['ip'], ip['port'])) + print(f"{ip['ip']}:{ip['port']}", end="") + if 'asn' in ip: + print(f" # AS{ip['asn']}", end="") + print() if __name__ == '__main__': main() diff --git a/depends/Makefile b/depends/Makefile index 20f5f6b2c6..5a1c472a15 100644 --- a/depends/Makefile +++ b/depends/Makefile @@ -42,8 +42,12 @@ NO_UPNP ?= NO_USDT ?= NO_NATPMP ?= MULTIPROCESS ?= +LTO ?= FALLBACK_DOWNLOAD_PATH ?= https://bitcoincore.org/depends-sources +C_STANDARD ?= c11 +CXX_STANDARD ?= c++17 + BUILD = $(shell ./config.guess) HOST ?= $(BUILD) PATCHES_PATH = $(BASEDIR)/patches @@ -140,8 +144,8 @@ include packages/packages.mk # 2. Before including packages/*.mk (excluding packages/packages.mk), since # they rely on the build_id variables # -build_id:=$(shell env CC='$(build_CC)' CXX='$(build_CXX)' AR='$(build_AR)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') -$(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' CXX='$(host_CXX)' AR='$(host_AR)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') +build_id:=$(shell env CC='$(build_CC)' CXX='$(build_CXX)' AR='$(build_AR)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' LTO='$(LTO)' ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') +$(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' CXX='$(host_CXX)' AR='$(host_AR)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' LTO='$(LTO)' ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') qrencode_packages_$(NO_QR) = $(qrencode_$(host_os)_packages) @@ -242,6 +246,7 @@ $(host_prefix)/share/config.site : config.site.in $(host_prefix)/.stamp_$(final_ -e 's|@no_usdt@|$(NO_USDT)|' \ -e 's|@no_natpmp@|$(NO_NATPMP)|' \ -e 's|@multiprocess@|$(MULTIPROCESS)|' \ + -e 's|@lto@|$(LTO)|' \ -e 's|@debug@|$(DEBUG)|' \ $< > $@ touch $@ diff --git a/depends/README.md b/depends/README.md index da2a74e0e7..66e1ddc4eb 100644 --- a/depends/README.md +++ b/depends/README.md @@ -96,6 +96,8 @@ The following can be set when running make: `make FOO=bar` - `BASE_CACHE`: Built packages will be placed here - `SDK_PATH`: Path where SDKs can be found (used by macOS) - `FALLBACK_DOWNLOAD_PATH`: If a source file can't be fetched, try here before giving up +- `C_STANDARD`: Set the C standard version used. Defaults to `c11`. +- `CXX_STANDARD`: Set the C++ standard version used. Defaults to `c++17`. - `NO_QT`: Don't download/build/cache Qt and its dependencies - `NO_QR`: Don't download/build/cache packages needed for enabling qrencode - `NO_ZMQ`: Don't download/build/cache packages needed for enabling ZeroMQ @@ -117,6 +119,7 @@ The following can be set when running make: `make FOO=bar` - `LOG`: Use file-based logging for individual packages. During a package build its log file resides in the `depends` directory, and the log file is printed out automatically in case of build error. After successful build log files are moved along with package archives +- `LTO`: Use LTO when building packages. If some packages are not built, for example `make NO_WALLET=1`, the appropriate options will be passed to bitcoin's configure. In this case, `--disable-wallet`. diff --git a/depends/config.site.in b/depends/config.site.in index 03dabeea0a..189330c42d 100644 --- a/depends/config.site.in +++ b/depends/config.site.in @@ -78,6 +78,10 @@ if test "@host_os@" = darwin; then BREW=no fi +if test -z "$enable_lto" && test -n "@lto@"; then + enable_lto=yes +fi + PKG_CONFIG="$(which pkg-config) --static" # These two need to remain exported because pkg-config does not see them diff --git a/depends/gen_id b/depends/gen_id index ac69ca7ee1..a0cd586461 100755 --- a/depends/gen_id +++ b/depends/gen_id @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Usage: env [ CC=... ] [ CXX=... ] [ AR=... ] [ RANLIB=... ] [ STRIP=... ] \ -# [ DEBUG=... ] ./build-id [ID_SALT]... +# [ DEBUG=... ] [ LTO=... ] ./build-id [ID_SALT]... # # Prints to stdout a SHA256 hash representing the current toolset, used by # depends/Makefile as a build id for caching purposes (detecting when the @@ -63,6 +63,10 @@ env | grep '^STRIP_' echo "END STRIP" + echo "BEGIN LTO" + echo "LTO=${LTO}" + echo "END LTO" + echo "END ALL" ) | if [ -n "$DEBUG" ] && command -v tee > /dev/null 2>&1; then # When debugging and `tee` is available, output the preimage to stderr diff --git a/depends/hosts/android.mk b/depends/hosts/android.mk index fcc1c4f5c3..b53966dcf8 100644 --- a/depends/hosts/android.mk +++ b/depends/hosts/android.mk @@ -5,6 +5,15 @@ else android_CXX=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)$(ANDROID_API_LEVEL)-clang++ android_CC=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)$(ANDROID_API_LEVEL)-clang endif + +android_CFLAGS=-std=$(C_STANDARD) +android_CXXFLAGS=-std=$(CXX_STANDARD) + +ifneq ($(LTO),) +android_CFLAGS += -flto +android_LDFLAGS += -flto +endif + android_AR=$(ANDROID_TOOLCHAIN_BIN)/llvm-ar android_RANLIB=$(ANDROID_TOOLCHAIN_BIN)/llvm-ranlib diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk index bf9b7625f2..8507e28532 100644 --- a/depends/hosts/darwin.mk +++ b/depends/hosts/darwin.mk @@ -109,8 +109,14 @@ darwin_CXX=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \ -Xclang -internal-externc-isystem$(clang_resource_dir)/include \ -Xclang -internal-externc-isystem$(OSX_SDK)/usr/include -darwin_CFLAGS=-pipe -darwin_CXXFLAGS=$(darwin_CFLAGS) +darwin_CFLAGS=-pipe -std=$(C_STANDARD) +darwin_CXXFLAGS=-pipe -std=$(CXX_STANDARD) + +ifneq ($(LTO),) +darwin_CFLAGS += -flto +darwin_CXXFLAGS += -flto +darwin_LDFLAGS += -flto +endif darwin_release_CFLAGS=-O2 darwin_release_CXXFLAGS=$(darwin_release_CFLAGS) diff --git a/depends/hosts/freebsd.mk b/depends/hosts/freebsd.mk index 0a62347b57..5351d0b900 100644 --- a/depends/hosts/freebsd.mk +++ b/depends/hosts/freebsd.mk @@ -1,5 +1,11 @@ -freebsd_CFLAGS=-pipe -freebsd_CXXFLAGS=$(freebsd_CFLAGS) +freebsd_CFLAGS=-pipe -std=$(C_STANDARD) +freebsd_CXXFLAGS=-pipe -std=$(CXX_STANDARD) + +ifneq ($(LTO),) +freebsd_CFLAGS += -flto +freebsd_CXXFLAGS += -flto +freebsd_LDFLAGS += -flto +endif freebsd_release_CFLAGS=-O2 freebsd_release_CXXFLAGS=$(freebsd_release_CFLAGS) diff --git a/depends/hosts/linux.mk b/depends/hosts/linux.mk index 07da752492..b101043439 100644 --- a/depends/hosts/linux.mk +++ b/depends/hosts/linux.mk @@ -1,5 +1,11 @@ -linux_CFLAGS=-pipe -linux_CXXFLAGS=$(linux_CFLAGS) +linux_CFLAGS=-pipe -std=$(C_STANDARD) +linux_CXXFLAGS=-pipe -std=$(CXX_STANDARD) + +ifneq ($(LTO),) +linux_CFLAGS += -flto +linux_CXXFLAGS += -flto +linux_LDFLAGS += -flto +endif linux_release_CFLAGS=-O2 linux_release_CXXFLAGS=$(linux_release_CFLAGS) diff --git a/depends/hosts/mingw32.mk b/depends/hosts/mingw32.mk index 48020d71af..b98f9ab7ac 100644 --- a/depends/hosts/mingw32.mk +++ b/depends/hosts/mingw32.mk @@ -2,8 +2,14 @@ ifneq ($(shell $(SHELL) $(.SHELLFLAGS) "command -v $(host)-g++-posix"),) mingw32_CXX := $(host)-g++-posix endif -mingw32_CFLAGS=-pipe -mingw32_CXXFLAGS=$(mingw32_CFLAGS) +mingw32_CFLAGS=-pipe -std=$(C_STANDARD) +mingw32_CXXFLAGS=-pipe -std=$(CXX_STANDARD) + +ifneq ($(LTO),) +mingw32_CFLAGS += -flto +mingw32_CXXFLAGS += -flto +mingw32_LDFLAGS += -flto +endif mingw32_release_CFLAGS=-O2 mingw32_release_CXXFLAGS=$(mingw32_release_CFLAGS) diff --git a/depends/hosts/netbsd.mk b/depends/hosts/netbsd.mk index b3e4545a64..8342dcc6ed 100644 --- a/depends/hosts/netbsd.mk +++ b/depends/hosts/netbsd.mk @@ -1,4 +1,12 @@ -netbsd_CFLAGS=-pipe +netbsd_CFLAGS=-pipe -std=$(C_STANDARD) +netbsd_CXXFLAGS=-pipe -std=$(CXX_STANDARD) + +ifneq ($(LTO),) +netbsd_CFLAGS += -flto +netbsd_CXXFLAGS += -flto +netbsd_LDFLAGS += -flto +endif + netbsd_CXXFLAGS=$(netbsd_CFLAGS) netbsd_release_CFLAGS=-O2 diff --git a/depends/hosts/openbsd.mk b/depends/hosts/openbsd.mk index 5988f24bff..d330e94d2e 100644 --- a/depends/hosts/openbsd.mk +++ b/depends/hosts/openbsd.mk @@ -1,5 +1,11 @@ -openbsd_CFLAGS=-pipe -openbsd_CXXFLAGS=$(openbsd_CFLAGS) +openbsd_CFLAGS=-pipe -std=$(C_STANDARD) +openbsd_CXXFLAGS=-pipe -std=$(CXX_STANDARD) + +ifneq ($(LTO),) +openbsd_CFLAGS += -flto +openbsd_CXXFLAGS += -flto +openbsd_LDFLAGS += -flto +endif openbsd_release_CFLAGS=-O2 openbsd_release_CXXFLAGS=$(openbsd_release_CFLAGS) diff --git a/depends/packages/bdb.mk b/depends/packages/bdb.mk index dc536fd399..80c7ce8429 100644 --- a/depends/packages/bdb.mk +++ b/depends/packages/bdb.mk @@ -15,7 +15,6 @@ $(package)_config_opts_netbsd=--with-pic $(package)_config_opts_openbsd=--with-pic $(package)_config_opts_android=--with-pic $(package)_cflags+=-Wno-error=implicit-function-declaration -$(package)_cxxflags+=-std=c++17 $(package)_cppflags_mingw32=-DUNICODE -D_UNICODE endef diff --git a/depends/packages/zeromq.mk b/depends/packages/zeromq.mk index c74ae15b31..267ed11253 100644 --- a/depends/packages/zeromq.mk +++ b/depends/packages/zeromq.mk @@ -16,7 +16,6 @@ define $(package)_set_vars $(package)_config_opts_netbsd=--with-pic $(package)_config_opts_openbsd=--with-pic $(package)_config_opts_android=--with-pic - $(package)_cxxflags+=-std=c++17 endef define $(package)_preprocess_cmds diff --git a/depends/patches/qt/mac-qmake.conf b/depends/patches/qt/mac-qmake.conf index 543407f853..cb94bf07b4 100644 --- a/depends/patches/qt/mac-qmake.conf +++ b/depends/patches/qt/mac-qmake.conf @@ -15,7 +15,7 @@ QMAKE_MAC_SDK.macosx.SDKVersion = $${MAC_SDK_VERSION} QMAKE_MAC_SDK.macosx.PlatformPath = /phony !host_build: QMAKE_CFLAGS += -target $${MAC_TARGET} !host_build: QMAKE_OBJECTIVE_CFLAGS += $$QMAKE_CFLAGS -!host_build: QMAKE_CXXFLAGS += $$QMAKE_CFLAGS +!host_build: QMAKE_CXXFLAGS += -target $${MAC_TARGET} !host_build: QMAKE_LFLAGS += -target $${MAC_TARGET} QMAKE_AR = $${CROSS_COMPILE}ar cq QMAKE_RANLIB=$${CROSS_COMPILE}ranlib diff --git a/doc/build-openbsd.md b/doc/build-openbsd.md index 6b2d404427..afbb5c8e75 100644 --- a/doc/build-openbsd.md +++ b/doc/build-openbsd.md @@ -34,7 +34,7 @@ It is not necessary to build wallet functionality to run either `bitcoind` or `b `sqlite3` is required to support [descriptor wallets](descriptors.md). ``` bash -pkg_add install sqlite3 +pkg_add sqlite3 ``` ###### Legacy Wallet Support diff --git a/doc/developer-notes.md b/doc/developer-notes.md index 4843e61ee8..23f975df34 100644 --- a/doc/developer-notes.md +++ b/doc/developer-notes.md @@ -831,11 +831,6 @@ int GetInt(Tabs tab) Strings and formatting ------------------------ -- Be careful of `LogPrint` versus `LogPrintf`. `LogPrint` takes a `category` argument, `LogPrintf` does not. - - - *Rationale*: Confusion of these can result in runtime exceptions due to - formatting mismatch, and it is easy to get wrong because of subtly similar naming. - - Use `std::string`, avoid C string manipulation functions. - *Rationale*: C++ string handling is marginally safer, less scope for @@ -926,14 +921,19 @@ Threads and synchronization - Prefer `Mutex` type to `RecursiveMutex` one. - Consistently use [Clang Thread Safety Analysis](https://clang.llvm.org/docs/ThreadSafetyAnalysis.html) annotations to - get compile-time warnings about potential race conditions in code. Combine annotations in function declarations with - run-time asserts in function definitions: + get compile-time warnings about potential race conditions or deadlocks in code. - In functions that are declared separately from where they are defined, the thread safety annotations should be added exclusively to the function declaration. Annotations on the definition could lead to false positives (lack of compile failure) at call sites between the two. + - Prefer locks that are in a class rather than global, and that are + internal to a class (private or protected) rather than public. + + - Combine annotations in function declarations with run-time asserts in + function definitions: + ```C++ // txmempool.h class CTxMemPool @@ -957,21 +957,37 @@ void CTxMemPool::UpdateTransactionsFromBlock(...) ```C++ // validation.h -class ChainstateManager +class CChainState { +protected: + ... + Mutex m_chainstate_mutex; + ... public: ... - bool ProcessNewBlock(...) LOCKS_EXCLUDED(::cs_main); + bool ActivateBestChain( + BlockValidationState& state, + std::shared_ptr<const CBlock> pblock = nullptr) + EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex) + LOCKS_EXCLUDED(::cs_main); + ... + bool PreciousBlock(BlockValidationState& state, CBlockIndex* pindex) + EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex) + LOCKS_EXCLUDED(::cs_main); ... } // validation.cpp -bool ChainstateManager::ProcessNewBlock(...) +bool CChainState::PreciousBlock(BlockValidationState& state, CBlockIndex* pindex) { + AssertLockNotHeld(m_chainstate_mutex); AssertLockNotHeld(::cs_main); - ... - LOCK(::cs_main); - ... + { + LOCK(cs_main); + ... + } + + return ActivateBestChain(state, std::shared_ptr<const CBlock>()); } ``` @@ -1142,10 +1158,6 @@ Current subtrees include: - src/crypto/ctaes - Upstream at https://github.com/bitcoin-core/ctaes ; maintained by Core contributors. -- src/univalue - - Subtree at https://github.com/bitcoin-core/univalue-subtree ; maintained by Core contributors. - - Deviates from upstream https://github.com/jgarzik/univalue. - - src/minisketch - Upstream at https://github.com/sipa/minisketch ; maintained by Core contributors. diff --git a/doc/policy/mempool-replacements.md b/doc/policy/mempool-replacements.md index 3e844f8d7b..18f08daf88 100644 --- a/doc/policy/mempool-replacements.md +++ b/doc/policy/mempool-replacements.md @@ -51,6 +51,13 @@ other consensus and policy rules, each of the following conditions are met: significant portions of the node's mempool using replacements with multiple directly conflicting transactions, each with large descendant sets. +6. The replacement transaction's feerate is greater than the feerates of all directly conflicting + transactions. + + *Rationale*: This rule was originally intended to ensure that the replacement transaction is + preferable for block-inclusion, compared to what would be removed from the mempool. This rule + predates ancestor feerate-based transaction selection. + This set of rules is similar but distinct from BIP125. ## History diff --git a/doc/productivity.md b/doc/productivity.md index a01c6f545d..e9b7bc270c 100644 --- a/doc/productivity.md +++ b/doc/productivity.md @@ -9,6 +9,7 @@ Table of Contents * [Disable features with `./configure`](#disable-features-with-configure) * [Make use of your threads with `make -j`](#make-use-of-your-threads-with-make--j) * [Only build what you need](#only-build-what-you-need) + * [Compile on multiple machines](#compile-on-multiple-machines) * [Multiple working directories with `git worktrees`](#multiple-working-directories-with-git-worktrees) * [Interactive "dummy rebases" for fixups and execs with `git merge-base`](#interactive-dummy-rebases-for-fixups-and-execs-with-git-merge-base) * [Writing code](#writing-code) @@ -81,6 +82,10 @@ make -C src bitcoin_bench (You can and should combine this with `-j`, as above, for a parallel build.) +### Compile on multiple machines + +If you have more than one computer at your disposal, you can use [distcc](https://www.distcc.org) to speed up compilation. This is easiest when all computers run the same operating system and compiler version. + ### Multiple working directories with `git worktrees` If you work with multiple branches or multiple copies of the repository, you should try `git worktrees`. diff --git a/doc/release-notes-15936.md b/doc/release-notes-15936.md new file mode 100644 index 0000000000..90c0413b9a --- /dev/null +++ b/doc/release-notes-15936.md @@ -0,0 +1,15 @@ +GUI changes +----------- + +Configuration changes made in the bitcoin GUI (such as the pruning setting, +proxy settings, UPNP preferences) are now saved to `<datadir>/settings.json` +file rather than to the Qt settings backend (windows registry or unix desktop +config files), so these settings will now apply to bitcoind, instead of being +ignored. + +Also, the interaction between GUI settings and `bitcoin.conf` settings is +simplified. Settings from `bitcoin.conf` are now displayed normally in the GUI +settings dialog, instead of in a separate warning message ("Options set in this +dialog are overridden by the configuration file: -setting=value"). And these +settings can now be edited because `settings.json` values take precedence over +`bitcoin.conf` values. diff --git a/src/Makefile.am b/src/Makefile.am index 721bf7104c..fa716af619 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -197,7 +197,7 @@ BITCOIN_CORE_H = \ node/minisketchwrapper.h \ node/psbt.h \ node/transaction.h \ - node/ui_interface.h \ + node/interface_ui.h \ node/utxo_snapshot.h \ noui.h \ outputtype.h \ @@ -374,7 +374,7 @@ libbitcoin_node_a_SOURCES = \ node/minisketchwrapper.cpp \ node/psbt.cpp \ node/transaction.cpp \ - node/ui_interface.cpp \ + node/interface_ui.cpp \ noui.cpp \ policy/fees.cpp \ policy/packages.cpp \ @@ -876,7 +876,7 @@ libbitcoinkernel_la_SOURCES = \ logging.cpp \ node/blockstorage.cpp \ node/chainstate.cpp \ - node/ui_interface.cpp \ + node/interface_ui.cpp \ policy/feerate.cpp \ policy/fees.cpp \ policy/packages.cpp \ diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include index 72037b3db2..b4acc47aa1 100644 --- a/src/Makefile.qt.include +++ b/src/Makefile.qt.include @@ -270,6 +270,7 @@ BITCOIN_QT_WALLET_CPP = \ qt/transactiondesc.cpp \ qt/transactiondescdialog.cpp \ qt/transactionfilterproxy.cpp \ + qt/transactionoverviewwidget.cpp \ qt/transactionrecord.cpp \ qt/transactiontablemodel.cpp \ qt/transactionview.cpp \ diff --git a/src/addrman.cpp b/src/addrman.cpp index ed823eeb05..204bb544c5 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -66,14 +66,16 @@ int AddrInfo::GetBucketPosition(const uint256& nKey, bool fNew, int nBucket) con bool AddrInfo::IsTerrible(int64_t nNow) const { - if (nLastTry && nLastTry >= nNow - 60) // never remove things tried in the last minute + if (nNow - nLastTry <= 60) { // never remove things tried in the last minute return false; + } if (nTime > nNow + 10 * 60) // came in a flying DeLorean return true; - if (nTime == 0 || nNow - nTime > ADDRMAN_HORIZON_DAYS * 24 * 60 * 60) // not seen in recent history + if (nNow - nTime > ADDRMAN_HORIZON_DAYS * 24 * 60 * 60) { // not seen in recent history return true; + } if (nLastSuccess == 0 && nAttempts >= ADDRMAN_RETRIES) // tried N times and never a success return true; @@ -557,15 +559,17 @@ bool AddrManImpl::AddSingle(const CAddress& addr, const CNetAddr& source, int64_ // periodically update nTime bool fCurrentlyOnline = (GetAdjustedTime() - addr.nTime < 24 * 60 * 60); int64_t nUpdateInterval = (fCurrentlyOnline ? 60 * 60 : 24 * 60 * 60); - if (addr.nTime && (!pinfo->nTime || pinfo->nTime < addr.nTime - nUpdateInterval - nTimePenalty)) + if (pinfo->nTime < addr.nTime - nUpdateInterval - nTimePenalty) { pinfo->nTime = std::max((int64_t)0, addr.nTime - nTimePenalty); + } // add services pinfo->nServices = ServiceFlags(pinfo->nServices | addr.nServices); // do not update if no new information is present - if (!addr.nTime || (pinfo->nTime && addr.nTime <= pinfo->nTime)) + if (addr.nTime <= pinfo->nTime) { return false; + } // do not update if the entry was already in the "tried" table if (pinfo->fInTried) diff --git a/src/banman.cpp b/src/banman.cpp index 2a6e0e010f..508383d9f2 100644 --- a/src/banman.cpp +++ b/src/banman.cpp @@ -6,7 +6,7 @@ #include <banman.h> #include <netaddress.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <sync.h> #include <util/system.h> #include <util/time.h> diff --git a/src/bench/mempool_eviction.cpp b/src/bench/mempool_eviction.cpp index e80b9e1ac2..60d991fab9 100644 --- a/src/bench/mempool_eviction.cpp +++ b/src/bench/mempool_eviction.cpp @@ -108,7 +108,7 @@ static void MempoolEviction(benchmark::Bench& bench) tx7.vout[1].scriptPubKey = CScript() << OP_7 << OP_EQUAL; tx7.vout[1].nValue = 10 * COIN; - CTxMemPool pool; + CTxMemPool& pool = *Assert(testing_setup->m_node.mempool); LOCK2(cs_main, pool.cs); // Create transaction references outside the "hot loop" const CTransactionRef tx1_r{MakeTransactionRef(tx1)}; diff --git a/src/bench/rpc_mempool.cpp b/src/bench/rpc_mempool.cpp index 6e322ba6aa..0e6fdae3d7 100644 --- a/src/bench/rpc_mempool.cpp +++ b/src/bench/rpc_mempool.cpp @@ -3,7 +3,9 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> +#include <chainparamsbase.h> #include <rpc/mempool.h> +#include <test/util/setup_common.h> #include <txmempool.h> #include <univalue.h> @@ -17,7 +19,8 @@ static void AddTx(const CTransactionRef& tx, const CAmount& fee, CTxMemPool& poo static void RpcMempool(benchmark::Bench& bench) { - CTxMemPool pool; + const auto testing_setup = MakeNoLogFileContext<const ChainTestingSetup>(CBaseChainParams::MAIN); + CTxMemPool& pool = *Assert(testing_setup->m_node.mempool); LOCK2(cs_main, pool.cs); for (int i = 0; i < 1000; ++i) { diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index b9e5a81f8d..0db2b75384 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -9,6 +9,7 @@ #include <chainparamsbase.h> #include <clientversion.h> +#include <compat.h> #include <compat/stdin.h> #include <policy/feerate.h> #include <rpc/client.h> @@ -1212,19 +1213,11 @@ static int CommandLineRPC(int argc, char *argv[]) return nRet; } -#ifdef WIN32 -// Export main() and ensure working ASLR on Windows. -// Exporting a symbol will prevent the linker from stripping -// the .reloc section from the binary, which is a requirement -// for ASLR. This is a temporary workaround until a fixed -// version of binutils is used for releases. -__declspec(dllexport) int main(int argc, char* argv[]) +MAIN_FUNCTION { +#ifdef WIN32 util::WinCmdLineArgs winArgs; std::tie(argc, argv) = winArgs.get(); -#else -int main(int argc, char* argv[]) -{ #endif SetupEnvironment(); if (!SetupNetworking()) { diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp index bcefb3f18e..e0d5c6e5dc 100644 --- a/src/bitcoin-tx.cpp +++ b/src/bitcoin-tx.cpp @@ -8,6 +8,7 @@ #include <clientversion.h> #include <coins.h> +#include <compat.h> #include <consensus/amount.h> #include <consensus/consensus.h> #include <core_io.h> @@ -854,7 +855,7 @@ static int CommandLineRawTx(int argc, char* argv[]) return nRet; } -int main(int argc, char* argv[]) +MAIN_FUNCTION { SetupEnvironment(); diff --git a/src/bitcoin-util.cpp b/src/bitcoin-util.cpp index a2f3fca26c..1739804edb 100644 --- a/src/bitcoin-util.cpp +++ b/src/bitcoin-util.cpp @@ -11,10 +11,12 @@ #include <chainparams.h> #include <chainparamsbase.h> #include <clientversion.h> +#include <compat.h> #include <core_io.h> #include <streams.h> #include <util/system.h> #include <util/translation.h> +#include <version.h> #include <atomic> #include <cstdio> @@ -141,16 +143,7 @@ static int Grind(const std::vector<std::string>& args, std::string& strPrint) return EXIT_SUCCESS; } -#ifdef WIN32 -// Export main() and ensure working ASLR on Windows. -// Exporting a symbol will prevent the linker from stripping -// the .reloc section from the binary, which is a requirement -// for ASLR. This is a temporary workaround until a fixed -// version of binutils is used for releases. -__declspec(dllexport) int main(int argc, char* argv[]) -#else -int main(int argc, char* argv[]) -#endif +MAIN_FUNCTION { ArgsManager& args = gArgs; SetupEnvironment(); diff --git a/src/bitcoin-wallet.cpp b/src/bitcoin-wallet.cpp index 2f3dd45267..7bec3292a1 100644 --- a/src/bitcoin-wallet.cpp +++ b/src/bitcoin-wallet.cpp @@ -9,6 +9,7 @@ #include <chainparams.h> #include <chainparamsbase.h> #include <clientversion.h> +#include <compat.h> #include <interfaces/init.h> #include <key.h> #include <logging.h> @@ -88,7 +89,7 @@ static bool WalletAppInit(ArgsManager& args, int argc, char* argv[]) return true; } -int main(int argc, char* argv[]) +MAIN_FUNCTION { ArgsManager& args = gArgs; #ifdef WIN32 diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp index 92e73d7c2a..be894e192e 100644 --- a/src/bitcoind.cpp +++ b/src/bitcoind.cpp @@ -14,7 +14,7 @@ #include <interfaces/chain.h> #include <interfaces/init.h> #include <node/context.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <noui.h> #include <shutdown.h> #include <util/check.h> @@ -256,7 +256,7 @@ static bool AppInit(NodeContext& node, int argc, char* argv[]) return fRet; } -int main(int argc, char* argv[]) +MAIN_FUNCTION { #ifdef WIN32 util::WinCmdLineArgs winArgs; diff --git a/src/compat.h b/src/compat.h index 3ec4ab53fd..f41c501c84 100644 --- a/src/compat.h +++ b/src/compat.h @@ -86,6 +86,17 @@ typedef void* sockopt_arg_type; typedef char* sockopt_arg_type; #endif +#ifdef WIN32 +// Export main() and ensure working ASLR when using mingw-w64. +// Exporting a symbol will prevent the linker from stripping +// the .reloc section from the binary, which is a requirement +// for ASLR. While release builds are not affected, anyone +// building with a binutils < 2.36 is subject to this ld bug. +#define MAIN_FUNCTION __declspec(dllexport) int main(int argc, char* argv[]) +#else +#define MAIN_FUNCTION int main(int argc, char* argv[]) +#endif + // Note these both should work with the current usage of poll, but best to be safe // WIN32 poll is broken https://daniel.haxx.se/blog/2012/10/10/wsapoll-is-broken/ // __APPLE__ poll is broke https://github.com/bitcoin/bitcoin/pull/14336#issuecomment-437384408 diff --git a/src/crypto/chacha_poly_aead.cpp b/src/crypto/chacha_poly_aead.cpp index 4f3e6f7fa3..f736b2d867 100644 --- a/src/crypto/chacha_poly_aead.cpp +++ b/src/crypto/chacha_poly_aead.cpp @@ -2,6 +2,10 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#if defined(HAVE_CONFIG_H) +#include <config/bitcoin-config.h> +#endif + #include <crypto/chacha_poly_aead.h> #include <crypto/poly1305.h> diff --git a/src/httpserver.cpp b/src/httpserver.cpp index 44937dc523..b8f69b038c 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -11,7 +11,7 @@ #include <chainparamsbase.h> #include <compat.h> #include <netbase.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <rpc/protocol.h> // For HTTP status codes #include <shutdown.h> #include <sync.h> @@ -400,7 +400,7 @@ bool InitHTTPServer() LogPrint(BCLog::HTTP, "Initialized HTTP server\n"); int workQueueDepth = std::max((long)gArgs.GetIntArg("-rpcworkqueue", DEFAULT_HTTP_WORKQUEUE), 1L); - LogPrintf("HTTP: creating work queue of depth %d\n", workQueueDepth); + LogPrintfCategory(BCLog::HTTP, "creating work queue of depth %d\n", workQueueDepth); g_work_queue = std::make_unique<WorkQueue<HTTPClosure>>(workQueueDepth); // transfer ownership to eventBase/HTTP via .release() @@ -424,7 +424,7 @@ void StartHTTPServer() { LogPrint(BCLog::HTTP, "Starting HTTP server\n"); int rpcThreads = std::max((long)gArgs.GetIntArg("-rpcthreads", DEFAULT_HTTP_THREADS), 1L); - LogPrintf("HTTP: starting %d worker threads\n", rpcThreads); + LogPrintfCategory(BCLog::HTTP, "starting %d worker threads\n", rpcThreads); g_thread_http = std::thread(ThreadHTTP, eventBase); for (int i = 0; i < rpcThreads; i++) { diff --git a/src/i2p.cpp b/src/i2p.cpp index e08b5461fe..caff8c1e69 100644 --- a/src/i2p.cpp +++ b/src/i2p.cpp @@ -150,8 +150,8 @@ bool Session::Accept(Connection& conn) throw std::runtime_error("wait on socket failed"); } - if ((occurred & Sock::RECV) == 0) { - // Timeout, no incoming connections within MAX_WAIT_FOR_IO. + if (occurred == 0) { + // Timeout, no incoming connections or errors within MAX_WAIT_FOR_IO. continue; } @@ -242,7 +242,7 @@ std::string Session::Reply::Get(const std::string& key) const template <typename... Args> void Session::Log(const std::string& fmt, const Args&... args) const { - LogPrint(BCLog::I2P, "I2P: %s\n", tfm::format(fmt, args...)); + LogPrint(BCLog::I2P, "%s\n", tfm::format(fmt, args...)); } Session::Reply Session::SendRequestAndGetReply(const Sock& sock, @@ -376,8 +376,8 @@ void Session::CreateIfNotCreatedAlready() m_session_id = session_id; m_control_sock = std::move(sock); - LogPrintf("I2P: SAM session created: session id=%s, my address=%s\n", m_session_id, - m_my_addr.ToString()); + LogPrintfCategory(BCLog::I2P, "SAM session created: session id=%s, my address=%s\n", + m_session_id, m_my_addr.ToString()); } std::unique_ptr<Sock> Session::StreamAccept() diff --git a/src/index/base.cpp b/src/index/base.cpp index 9f0c1dea24..323547900d 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -5,7 +5,7 @@ #include <chainparams.h> #include <index/base.h> #include <node/blockstorage.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <shutdown.h> #include <tinyformat.h> #include <util/syscall_sandbox.h> diff --git a/src/init.cpp b/src/init.cpp index d0fd6074b1..f20c55dcb1 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -40,7 +40,7 @@ #include <node/chainstate.h> #include <node/context.h> #include <node/miner.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <policy/feerate.h> #include <policy/fees.h> #include <policy/policy.h> @@ -599,7 +599,7 @@ void SetupServerArgs(ArgsManager& argsman) } static bool fHaveGenesis = false; -static Mutex g_genesis_wait_mutex; +static GlobalMutex g_genesis_wait_mutex; static std::condition_variable g_genesis_wait_cv; static void BlockNotifyGenesisWait(const CBlockIndex* pBlockIndex) @@ -1506,8 +1506,8 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) uiInterface.InitMessage(_("Verifying blocks…").translated); auto check_blocks = args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS); if (chainman.m_blockman.m_have_pruned && check_blocks > MIN_BLOCKS_TO_KEEP) { - LogPrintf("Prune: pruned datadir may not have more than %d blocks; only checking available blocks\n", - MIN_BLOCKS_TO_KEEP); + LogPrintfCategory(BCLog::PRUNE, "pruned datadir may not have more than %d blocks; only checking available blocks\n", + MIN_BLOCKS_TO_KEEP); } maybe_verify_error = VerifyLoadedChainstate(chainman, fReset, diff --git a/src/init/common.cpp b/src/init/common.cpp index d4e45454d2..a0cdf44f47 100644 --- a/src/init/common.cpp +++ b/src/init/common.cpp @@ -9,7 +9,7 @@ #include <clientversion.h> #include <fs.h> #include <logging.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <tinyformat.h> #include <util/system.h> #include <util/time.h> diff --git a/src/kernel/context.h b/src/kernel/context.h index 0a08511564..9746ef994b 100644 --- a/src/kernel/context.h +++ b/src/kernel/context.h @@ -12,7 +12,7 @@ class ECCVerifyHandle; namespace kernel { //! Context struct holding the kernel library's logically global state, and //! passed to external libbitcoin_kernel functions which need access to this -//! state. The kernel libary API is a work in progress, so state organization +//! state. The kernel library API is a work in progress, so state organization //! and member list will evolve over time. //! //! State stored directly in this struct should be simple. More complex state diff --git a/src/logging.h b/src/logging.h index 8a896b6b33..50869ad89a 100644 --- a/src/logging.h +++ b/src/logging.h @@ -199,13 +199,18 @@ static inline void LogPrintf_(const std::string& logging_function, const std::st } } - #define LogPrintLevel_(category, level, ...) LogPrintf_(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) +// Log unconditionally. #define LogPrintf(...) LogPrintLevel_(BCLog::LogFlags::NONE, BCLog::Level::None, __VA_ARGS__) +// Log unconditionally, prefixing the output with the passed category name. +#define LogPrintfCategory(category, ...) LogPrintLevel_(category, BCLog::Level::None, __VA_ARGS__) + // Use a macro instead of a function for conditional logging to prevent // evaluating arguments when logging for the category is not enabled. + +// Log conditionally, prefixing the output with the passed category name. #define LogPrint(category, ...) \ do { \ if (LogAcceptCategory((category), BCLog::Level::Debug)) { \ @@ -213,6 +218,7 @@ static inline void LogPrintf_(const std::string& logging_function, const std::st } \ } while (0) +// Log conditionally, prefixing the output with the passed category name and severity level. #define LogPrintLevel(category, level, ...) \ do { \ if (LogAcceptCategory((category), (level))) { \ diff --git a/src/net.cpp b/src/net.cpp index 317366df90..d42f130af7 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -21,7 +21,7 @@ #include <net_permissions.h> #include <netaddress.h> #include <netbase.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <protocol.h> #include <random.h> #include <scheduler.h> @@ -114,7 +114,7 @@ static const uint64_t RANDOMIZER_ID_ADDRCACHE = 0x1cf2e4ddd306dda9ULL; // SHA256 // bool fDiscover = true; bool fListen = true; -Mutex g_maplocalhost_mutex; +GlobalMutex g_maplocalhost_mutex; std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(g_maplocalhost_mutex); static bool vfLimited[NET_MAX] GUARDED_BY(g_maplocalhost_mutex) = {}; std::string strSubVersion; @@ -1404,13 +1404,12 @@ bool CConnman::InactivityCheck(const CNode& node) const return false; } -bool CConnman::GenerateSelectSet(const std::vector<CNode*>& nodes, - std::set<SOCKET>& recv_set, - std::set<SOCKET>& send_set, - std::set<SOCKET>& error_set) +Sock::EventsPerSock CConnman::GenerateWaitSockets(Span<CNode* const> nodes) { + Sock::EventsPerSock events_per_sock; + for (const ListenSocket& hListenSocket : vhListenSocket) { - recv_set.insert(hListenSocket.sock->Get()); + events_per_sock.emplace(hListenSocket.sock, Sock::Events{Sock::RECV}); } for (CNode* pnode : nodes) { @@ -1437,172 +1436,49 @@ bool CConnman::GenerateSelectSet(const std::vector<CNode*>& nodes, continue; } - error_set.insert(pnode->m_sock->Get()); + Sock::Event requested{0}; if (select_send) { - send_set.insert(pnode->m_sock->Get()); - continue; - } - if (select_recv) { - recv_set.insert(pnode->m_sock->Get()); - } - } - - return !recv_set.empty() || !send_set.empty() || !error_set.empty(); -} - -#ifdef USE_POLL -void CConnman::SocketEvents(const std::vector<CNode*>& nodes, - std::set<SOCKET>& recv_set, - std::set<SOCKET>& send_set, - std::set<SOCKET>& error_set) -{ - std::set<SOCKET> recv_select_set, send_select_set, error_select_set; - if (!GenerateSelectSet(nodes, recv_select_set, send_select_set, error_select_set)) { - interruptNet.sleep_for(std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS)); - return; - } - - std::unordered_map<SOCKET, struct pollfd> pollfds; - for (SOCKET socket_id : recv_select_set) { - pollfds[socket_id].fd = socket_id; - pollfds[socket_id].events |= POLLIN; - } - - for (SOCKET socket_id : send_select_set) { - pollfds[socket_id].fd = socket_id; - pollfds[socket_id].events |= POLLOUT; - } - - for (SOCKET socket_id : error_select_set) { - pollfds[socket_id].fd = socket_id; - // These flags are ignored, but we set them for clarity - pollfds[socket_id].events |= POLLERR|POLLHUP; - } - - std::vector<struct pollfd> vpollfds; - vpollfds.reserve(pollfds.size()); - for (auto it : pollfds) { - vpollfds.push_back(std::move(it.second)); - } - - if (poll(vpollfds.data(), vpollfds.size(), SELECT_TIMEOUT_MILLISECONDS) < 0) return; - - if (interruptNet) return; - - for (struct pollfd pollfd_entry : vpollfds) { - if (pollfd_entry.revents & POLLIN) recv_set.insert(pollfd_entry.fd); - if (pollfd_entry.revents & POLLOUT) send_set.insert(pollfd_entry.fd); - if (pollfd_entry.revents & (POLLERR|POLLHUP)) error_set.insert(pollfd_entry.fd); - } -} -#else -void CConnman::SocketEvents(const std::vector<CNode*>& nodes, - std::set<SOCKET>& recv_set, - std::set<SOCKET>& send_set, - std::set<SOCKET>& error_set) -{ - std::set<SOCKET> recv_select_set, send_select_set, error_select_set; - if (!GenerateSelectSet(nodes, recv_select_set, send_select_set, error_select_set)) { - interruptNet.sleep_for(std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS)); - return; - } - - // - // Find which sockets have data to receive - // - struct timeval timeout; - timeout.tv_sec = 0; - timeout.tv_usec = SELECT_TIMEOUT_MILLISECONDS * 1000; // frequency to poll pnode->vSend - - fd_set fdsetRecv; - fd_set fdsetSend; - fd_set fdsetError; - FD_ZERO(&fdsetRecv); - FD_ZERO(&fdsetSend); - FD_ZERO(&fdsetError); - SOCKET hSocketMax = 0; - - for (SOCKET hSocket : recv_select_set) { - FD_SET(hSocket, &fdsetRecv); - hSocketMax = std::max(hSocketMax, hSocket); - } - - for (SOCKET hSocket : send_select_set) { - FD_SET(hSocket, &fdsetSend); - hSocketMax = std::max(hSocketMax, hSocket); - } - - for (SOCKET hSocket : error_select_set) { - FD_SET(hSocket, &fdsetError); - hSocketMax = std::max(hSocketMax, hSocket); - } - - int nSelect = select(hSocketMax + 1, &fdsetRecv, &fdsetSend, &fdsetError, &timeout); - - if (interruptNet) - return; - - if (nSelect == SOCKET_ERROR) - { - int nErr = WSAGetLastError(); - LogPrintf("socket select error %s\n", NetworkErrorString(nErr)); - for (unsigned int i = 0; i <= hSocketMax; i++) - FD_SET(i, &fdsetRecv); - FD_ZERO(&fdsetSend); - FD_ZERO(&fdsetError); - if (!interruptNet.sleep_for(std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS))) - return; - } - - for (SOCKET hSocket : recv_select_set) { - if (FD_ISSET(hSocket, &fdsetRecv)) { - recv_set.insert(hSocket); + requested = Sock::SEND; + } else if (select_recv) { + requested = Sock::RECV; } - } - for (SOCKET hSocket : send_select_set) { - if (FD_ISSET(hSocket, &fdsetSend)) { - send_set.insert(hSocket); - } + events_per_sock.emplace(pnode->m_sock, Sock::Events{requested}); } - for (SOCKET hSocket : error_select_set) { - if (FD_ISSET(hSocket, &fdsetError)) { - error_set.insert(hSocket); - } - } + return events_per_sock; } -#endif void CConnman::SocketHandler() { AssertLockNotHeld(m_total_bytes_sent_mutex); - std::set<SOCKET> recv_set; - std::set<SOCKET> send_set; - std::set<SOCKET> error_set; + Sock::EventsPerSock events_per_sock; { const NodesSnapshot snap{*this, /*shuffle=*/false}; + const auto timeout = std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS); + // Check for the readiness of the already connected sockets and the // listening sockets in one call ("readiness" as in poll(2) or // select(2)). If none are ready, wait for a short while and return // empty sets. - SocketEvents(snap.Nodes(), recv_set, send_set, error_set); + events_per_sock = GenerateWaitSockets(snap.Nodes()); + if (events_per_sock.empty() || !events_per_sock.begin()->first->WaitMany(timeout, events_per_sock)) { + interruptNet.sleep_for(timeout); + } // Service (send/receive) each of the already connected nodes. - SocketHandlerConnected(snap.Nodes(), recv_set, send_set, error_set); + SocketHandlerConnected(snap.Nodes(), events_per_sock); } // Accept new connections from listening sockets. - SocketHandlerListening(recv_set); + SocketHandlerListening(events_per_sock); } void CConnman::SocketHandlerConnected(const std::vector<CNode*>& nodes, - const std::set<SOCKET>& recv_set, - const std::set<SOCKET>& send_set, - const std::set<SOCKET>& error_set) + const Sock::EventsPerSock& events_per_sock) { AssertLockNotHeld(m_total_bytes_sent_mutex); @@ -1621,9 +1497,12 @@ void CConnman::SocketHandlerConnected(const std::vector<CNode*>& nodes, if (!pnode->m_sock) { continue; } - recvSet = recv_set.count(pnode->m_sock->Get()) > 0; - sendSet = send_set.count(pnode->m_sock->Get()) > 0; - errorSet = error_set.count(pnode->m_sock->Get()) > 0; + const auto it = events_per_sock.find(pnode->m_sock); + if (it != events_per_sock.end()) { + recvSet = it->second.occurred & Sock::RECV; + sendSet = it->second.occurred & Sock::SEND; + errorSet = it->second.occurred & Sock::ERR; + } } if (recvSet || errorSet) { @@ -1693,13 +1572,14 @@ void CConnman::SocketHandlerConnected(const std::vector<CNode*>& nodes, } } -void CConnman::SocketHandlerListening(const std::set<SOCKET>& recv_set) +void CConnman::SocketHandlerListening(const Sock::EventsPerSock& events_per_sock) { for (const ListenSocket& listen_socket : vhListenSocket) { if (interruptNet) { return; } - if (recv_set.count(listen_socket.sock->Get()) > 0) { + const auto it = events_per_sock.find(listen_socket.sock); + if (it != events_per_sock.end() && it->second.occurred & Sock::RECV) { AcceptConnection(listen_socket); } } @@ -1873,12 +1753,12 @@ bool CConnman::GetTryNewOutboundPeer() const void CConnman::SetTryNewOutboundPeer(bool flag) { m_try_another_outbound_peer = flag; - LogPrint(BCLog::NET, "net: setting try another outbound peer=%s\n", flag ? "true" : "false"); + LogPrint(BCLog::NET, "setting try another outbound peer=%s\n", flag ? "true" : "false"); } void CConnman::StartExtraBlockRelayPeers() { - LogPrint(BCLog::NET, "net: enabling extra block-relay-only peers\n"); + LogPrint(BCLog::NET, "enabling extra block-relay-only peers\n"); m_start_extra_block_relay_peers = true; } @@ -2814,8 +2694,11 @@ std::vector<CAddress> CConnman::GetAddresses(CNode& requestor, size_t max_addres { auto local_socket_bytes = requestor.addrBind.GetAddrBytes(); uint64_t cache_id = GetDeterministicRandomizer(RANDOMIZER_ID_ADDRCACHE) - .Write(requestor.addr.GetNetwork()) + .Write(requestor.ConnectedThroughNetwork()) .Write(local_socket_bytes.data(), local_socket_bytes.size()) + // For outbound connections, the port of the bound address is randomly + // assigned by the OS and would therefore not be useful for seeding. + .Write(requestor.IsInboundConn() ? requestor.addrBind.GetPort() : 0) .Finalize(); const auto current_time = GetTime<std::chrono::microseconds>(); auto r = m_addr_response_caches.emplace(cache_id, CachedAddrResponse{}); @@ -248,7 +248,7 @@ struct LocalServiceInfo { uint16_t nPort; }; -extern Mutex g_maplocalhost_mutex; +extern GlobalMutex g_maplocalhost_mutex; extern std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(g_maplocalhost_mutex); extern const std::string NET_MESSAGE_TYPE_OTHER; @@ -980,28 +980,9 @@ private: /** * Generate a collection of sockets to check for IO readiness. * @param[in] nodes Select from these nodes' sockets. - * @param[out] recv_set Sockets to check for read readiness. - * @param[out] send_set Sockets to check for write readiness. - * @param[out] error_set Sockets to check for errors. - * @return true if at least one socket is to be checked (the returned set is not empty) + * @return sockets to check for readiness */ - bool GenerateSelectSet(const std::vector<CNode*>& nodes, - std::set<SOCKET>& recv_set, - std::set<SOCKET>& send_set, - std::set<SOCKET>& error_set); - - /** - * Check which sockets are ready for IO. - * @param[in] nodes Select from these nodes' sockets. - * @param[out] recv_set Sockets which are ready for read. - * @param[out] send_set Sockets which are ready for write. - * @param[out] error_set Sockets which have errors. - * This calls `GenerateSelectSet()` to gather a list of sockets to check. - */ - void SocketEvents(const std::vector<CNode*>& nodes, - std::set<SOCKET>& recv_set, - std::set<SOCKET>& send_set, - std::set<SOCKET>& error_set); + Sock::EventsPerSock GenerateWaitSockets(Span<CNode* const> nodes); /** * Check connected and listening sockets for IO readiness and process them accordingly. @@ -1010,23 +991,18 @@ private: /** * Do the read/write for connected sockets that are ready for IO. - * @param[in] nodes Nodes to process. The socket of each node is checked against - * `recv_set`, `send_set` and `error_set`. - * @param[in] recv_set Sockets that are ready for read. - * @param[in] send_set Sockets that are ready for send. - * @param[in] error_set Sockets that have an exceptional condition (error). + * @param[in] nodes Nodes to process. The socket of each node is checked against `what`. + * @param[in] events_per_sock Sockets that are ready for IO. */ void SocketHandlerConnected(const std::vector<CNode*>& nodes, - const std::set<SOCKET>& recv_set, - const std::set<SOCKET>& send_set, - const std::set<SOCKET>& error_set) + const Sock::EventsPerSock& events_per_sock) EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !mutexMsgProc); /** * Accept incoming connections, one from each read-ready listening socket. - * @param[in] recv_set Sockets that are ready for read. + * @param[in] events_per_sock Sockets that are ready for IO. */ - void SocketHandlerListening(const std::set<SOCKET>& recv_set); + void SocketHandlerListening(const Sock::EventsPerSock& events_per_sock); void ThreadSocketHandler() EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !mutexMsgProc); void ThreadDNSAddressSeed() EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_nodes_mutex); diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 70b81b3eb2..751a03f01c 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -21,6 +21,7 @@ #include <node/blockstorage.h> #include <policy/fees.h> #include <policy/policy.h> +#include <policy/settings.h> #include <primitives/block.h> #include <primitives/transaction.h> #include <random.h> @@ -291,7 +292,7 @@ struct Peer { return m_tx_relay.get(); }; - TxRelay* GetTxRelay() + TxRelay* GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) { return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get()); }; @@ -602,9 +603,11 @@ private: /** Next time to check for stale tip */ std::chrono::seconds m_stale_tip_check_time{0s}; - /** Whether this node is running in blocks only mode */ + /** Whether this node is running in -blocksonly mode */ const bool m_ignore_incoming_txs; + bool RejectIncomingTxs(const CNode& peer) const; + /** Whether we've completed initial sync yet, for determining when to turn * on extra block-relay-only peers. */ bool m_initial_sync_finished{false}; @@ -1008,7 +1011,7 @@ void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) { AssertLockHeld(cs_main); - // Never request high-bandwidth mode from peers if we're blocks-only. Our + // When in -blocksonly mode, never request high-bandwidth mode from peers. Our // mempool will not contain the transactions necessary to reconstruct the // compact block. if (m_ignore_incoming_txs) return; @@ -3083,14 +3086,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } - // Reject tx INVs when the -blocksonly setting is enabled, or this is a - // block-relay-only peer - bool reject_tx_invs{m_ignore_incoming_txs || pfrom.IsBlockOnlyConn()}; - - // Allow peers with relay permission to send data other than blocks in blocks only mode - if (pfrom.HasPermission(NetPermissionFlags::Relay)) { - reject_tx_invs = false; - } + const bool reject_tx_invs{RejectIncomingTxs(pfrom)}; LOCK(cs_main); @@ -3371,10 +3367,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } if (msg_type == NetMsgType::TX) { - // Stop processing the transaction early if - // 1) We are in blocks only mode and peer has no relay permission; OR - // 2) This peer is a block-relay-only peer - if ((m_ignore_incoming_txs && !pfrom.HasPermission(NetPermissionFlags::Relay)) || pfrom.IsBlockOnlyConn()) { + if (RejectIncomingTxs(pfrom)) { LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId()); pfrom.fDisconnect = true; return; @@ -4642,7 +4635,7 @@ void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::mi namespace { class CompareInvMempoolOrder { - CTxMemPool *mp; + CTxMemPool* mp; bool m_wtxid_relay; public: explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid) @@ -4658,6 +4651,15 @@ public: return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay); } }; +} // namespace + +bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const +{ + // block-relay-only peers may never send txs to us + if (peer.IsBlockOnlyConn()) return true; + // In -blocksonly mode, peers need the 'relay' permission to send txs to us + if (m_ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)) return true; + return false; } bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer) diff --git a/src/netbase.cpp b/src/netbase.cpp index 8ff3b7a68c..030f462ed9 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -30,7 +30,7 @@ #endif // Settings -static Mutex g_proxyinfo_mutex; +static GlobalMutex g_proxyinfo_mutex; static Proxy proxyInfo[NET_MAX] GUARDED_BY(g_proxyinfo_mutex); static Proxy nameProxy GUARDED_BY(g_proxyinfo_mutex); int nConnectTimeout = DEFAULT_CONNECT_TIMEOUT; diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 9112ce3bf3..cadafcaa8d 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -226,7 +226,7 @@ void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPr } } - LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n", + LogPrint(BCLog::PRUNE, "target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n", nPruneTarget/1024/1024, nCurrentUsage/1024/1024, ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024, nLastBlockWeCanPrune, count); diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 2e52716649..e017f3f427 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -49,7 +49,7 @@ extern std::atomic_bool fReindex; /** Pruning-related variables and constants */ /** True if we're running in -prune mode. */ extern bool fPruneMode; -/** Number of MiB of block files that we're trying to stay below. */ +/** Number of bytes of block files that we're trying to stay below. */ extern uint64_t nPruneTarget; // Because validation code takes pointers to the map's CBlockIndex objects, if diff --git a/src/node/ui_interface.cpp b/src/node/interface_ui.cpp index a3a6ede39a..370cde84f8 100644 --- a/src/node/ui_interface.cpp +++ b/src/node/interface_ui.cpp @@ -2,7 +2,7 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <util/translation.h> diff --git a/src/node/ui_interface.h b/src/node/interface_ui.h index d02238b549..37c0f6392b 100644 --- a/src/node/ui_interface.h +++ b/src/node/interface_ui.h @@ -3,8 +3,8 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#ifndef BITCOIN_NODE_UI_INTERFACE_H -#define BITCOIN_NODE_UI_INTERFACE_H +#ifndef BITCOIN_NODE_INTERFACE_UI_H +#define BITCOIN_NODE_INTERFACE_UI_H #include <functional> #include <memory> @@ -120,4 +120,4 @@ constexpr auto AbortError = InitError; extern CClientUIInterface uiInterface; -#endif // BITCOIN_NODE_UI_INTERFACE_H +#endif // BITCOIN_NODE_INTERFACE_UI_H diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index 7752fb0f65..1905a4df29 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -22,7 +22,7 @@ #include <node/coin.h> #include <node/context.h> #include <node/transaction.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <policy/feerate.h> #include <policy/fees.h> #include <policy/policy.h> diff --git a/src/node/miner.cpp b/src/node/miner.cpp index 01db49d5cf..9db10feae4 100644 --- a/src/node/miner.cpp +++ b/src/node/miner.cpp @@ -62,7 +62,7 @@ BlockAssembler::Options::Options() nBlockMaxWeight = DEFAULT_BLOCK_MAX_WEIGHT; } -BlockAssembler::BlockAssembler(CChainState& chainstate, const CTxMemPool& mempool, const Options& options) +BlockAssembler::BlockAssembler(CChainState& chainstate, const CTxMemPool* mempool, const Options& options) : chainparams{chainstate.m_chainman.GetParams()}, m_mempool(mempool), m_chainstate(chainstate) @@ -87,7 +87,7 @@ static BlockAssembler::Options DefaultOptions() return options; } -BlockAssembler::BlockAssembler(CChainState& chainstate, const CTxMemPool& mempool) +BlockAssembler::BlockAssembler(CChainState& chainstate, const CTxMemPool* mempool) : BlockAssembler(chainstate, mempool, DefaultOptions()) {} void BlockAssembler::resetBlock() @@ -121,7 +121,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc pblocktemplate->vTxFees.push_back(-1); // updated at end pblocktemplate->vTxSigOpsCost.push_back(-1); // updated at end - LOCK2(cs_main, m_mempool.cs); + LOCK(::cs_main); CBlockIndex* pindexPrev = m_chainstate.m_chain.Tip(); assert(pindexPrev != nullptr); nHeight = pindexPrev->nHeight + 1; @@ -138,7 +138,10 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc int nPackagesSelected = 0; int nDescendantsUpdated = 0; - addPackageTxs(nPackagesSelected, nDescendantsUpdated); + if (m_mempool) { + LOCK(m_mempool->cs); + addPackageTxs(*m_mempool, nPackagesSelected, nDescendantsUpdated); + } int64_t nTime1 = GetTimeMicros(); @@ -232,15 +235,19 @@ void BlockAssembler::AddToBlock(CTxMemPool::txiter iter) } } -int BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded, - indexed_modified_transaction_set &mapModifiedTx) +/** Add descendants of given transactions to mapModifiedTx with ancestor + * state updated assuming given transactions are inBlock. Returns number + * of updated descendants. */ +static int UpdatePackagesForAdded(const CTxMemPool& mempool, + const CTxMemPool::setEntries& alreadyAdded, + indexed_modified_transaction_set& mapModifiedTx) EXCLUSIVE_LOCKS_REQUIRED(mempool.cs) { - AssertLockHeld(m_mempool.cs); + AssertLockHeld(mempool.cs); int nDescendantsUpdated = 0; for (CTxMemPool::txiter it : alreadyAdded) { CTxMemPool::setEntries descendants; - m_mempool.CalculateDescendants(it, descendants); + mempool.CalculateDescendants(it, descendants); // Insert all descendants (not yet in block) into the modified set for (CTxMemPool::txiter desc : descendants) { if (alreadyAdded.count(desc)) { @@ -262,23 +269,6 @@ int BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& already return nDescendantsUpdated; } -// Skip entries in mapTx that are already in a block or are present -// in mapModifiedTx (which implies that the mapTx ancestor state is -// stale due to ancestor inclusion in the block) -// Also skip transactions that we've already failed to add. This can happen if -// we consider a transaction in mapModifiedTx and it fails: we can then -// potentially consider it again while walking mapTx. It's currently -// guaranteed to fail again, but as a belt-and-suspenders check we put it in -// failedTx and avoid re-evaluation, since the re-evaluation would be using -// cached size/sigops/fee values that are not actually correct. -bool BlockAssembler::SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_transaction_set& mapModifiedTx, CTxMemPool::setEntries& failedTx) -{ - AssertLockHeld(m_mempool.cs); - - assert(it != m_mempool.mapTx.end()); - return mapModifiedTx.count(it) || inBlock.count(it) || failedTx.count(it); -} - void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, std::vector<CTxMemPool::txiter>& sortedEntries) { // Sort package by ancestor count @@ -300,9 +290,9 @@ void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, std::ve // Each time through the loop, we compare the best transaction in // mapModifiedTxs with the next transaction in the mempool to decide what // transaction package to work on next. -void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpdated) +void BlockAssembler::addPackageTxs(const CTxMemPool& mempool, int& nPackagesSelected, int& nDescendantsUpdated) { - AssertLockHeld(m_mempool.cs); + AssertLockHeld(mempool.cs); // mapModifiedTx will store sorted packages after they are modified // because some of their txs are already in the block @@ -310,7 +300,7 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda // Keep track of entries that failed inclusion, to avoid duplicate work CTxMemPool::setEntries failedTx; - CTxMemPool::indexed_transaction_set::index<ancestor_score>::type::iterator mi = m_mempool.mapTx.get<ancestor_score>().begin(); + CTxMemPool::indexed_transaction_set::index<ancestor_score>::type::iterator mi = mempool.mapTx.get<ancestor_score>().begin(); CTxMemPool::txiter iter; // Limit the number of attempts to add transactions to the block when it is @@ -319,12 +309,27 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda const int64_t MAX_CONSECUTIVE_FAILURES = 1000; int64_t nConsecutiveFailed = 0; - while (mi != m_mempool.mapTx.get<ancestor_score>().end() || !mapModifiedTx.empty()) { + while (mi != mempool.mapTx.get<ancestor_score>().end() || !mapModifiedTx.empty()) { // First try to find a new transaction in mapTx to evaluate. - if (mi != m_mempool.mapTx.get<ancestor_score>().end() && - SkipMapTxEntry(m_mempool.mapTx.project<0>(mi), mapModifiedTx, failedTx)) { - ++mi; - continue; + // + // Skip entries in mapTx that are already in a block or are present + // in mapModifiedTx (which implies that the mapTx ancestor state is + // stale due to ancestor inclusion in the block) + // Also skip transactions that we've already failed to add. This can happen if + // we consider a transaction in mapModifiedTx and it fails: we can then + // potentially consider it again while walking mapTx. It's currently + // guaranteed to fail again, but as a belt-and-suspenders check we put it in + // failedTx and avoid re-evaluation, since the re-evaluation would be using + // cached size/sigops/fee values that are not actually correct. + /** Return true if given transaction from mapTx has already been evaluated, + * or if the transaction's cached data in mapTx is incorrect. */ + if (mi != mempool.mapTx.get<ancestor_score>().end()) { + auto it = mempool.mapTx.project<0>(mi); + assert(it != mempool.mapTx.end()); + if (mapModifiedTx.count(it) || inBlock.count(it) || failedTx.count(it)) { + ++mi; + continue; + } } // Now that mi is not stale, determine which transaction to evaluate: @@ -332,13 +337,13 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda bool fUsingModified = false; modtxscoreiter modit = mapModifiedTx.get<ancestor_score>().begin(); - if (mi == m_mempool.mapTx.get<ancestor_score>().end()) { + if (mi == mempool.mapTx.get<ancestor_score>().end()) { // We're out of entries in mapTx; use the entry from mapModifiedTx iter = modit->iter; fUsingModified = true; } else { // Try to compare the mapTx entry to the mapModifiedTx entry - iter = m_mempool.mapTx.project<0>(mi); + iter = mempool.mapTx.project<0>(mi); if (modit != mapModifiedTx.get<ancestor_score>().end() && CompareTxMemPoolEntryByAncestorFee()(*modit, CTxMemPoolModifiedEntry(iter))) { // The best entry in mapModifiedTx has higher score @@ -393,7 +398,7 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda CTxMemPool::setEntries ancestors; uint64_t nNoLimit = std::numeric_limits<uint64_t>::max(); std::string dummy; - m_mempool.CalculateMemPoolAncestors(*iter, ancestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); + mempool.CalculateMemPoolAncestors(*iter, ancestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); onlyUnconfirmed(ancestors); ancestors.insert(iter); @@ -423,7 +428,7 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda ++nPackagesSelected; // Update transactions that depend on each of these - nDescendantsUpdated += UpdatePackagesForAdded(ancestors, mapModifiedTx); + nDescendantsUpdated += UpdatePackagesForAdded(mempool, ancestors, mapModifiedTx); } } } // namespace node diff --git a/src/node/miner.h b/src/node/miner.h index 7cf8e3fb9e..26454df3df 100644 --- a/src/node/miner.h +++ b/src/node/miner.h @@ -147,7 +147,7 @@ private: int64_t m_lock_time_cutoff; const CChainParams& chainparams; - const CTxMemPool& m_mempool; + const CTxMemPool* const m_mempool; CChainState& m_chainstate; public: @@ -157,8 +157,8 @@ public: CFeeRate blockMinFeeRate; }; - explicit BlockAssembler(CChainState& chainstate, const CTxMemPool& mempool); - explicit BlockAssembler(CChainState& chainstate, const CTxMemPool& mempool, const Options& options); + explicit BlockAssembler(CChainState& chainstate, const CTxMemPool* mempool); + explicit BlockAssembler(CChainState& chainstate, const CTxMemPool* mempool, const Options& options); /** Construct a new block template with coinbase to scriptPubKeyIn */ std::unique_ptr<CBlockTemplate> CreateNewBlock(const CScript& scriptPubKeyIn); @@ -177,7 +177,7 @@ private: /** Add transactions based on feerate including unconfirmed ancestors * Increments nPackagesSelected / nDescendantsUpdated with corresponding * statistics from the package selection (for logging statistics). */ - void addPackageTxs(int& nPackagesSelected, int& nDescendantsUpdated) EXCLUSIVE_LOCKS_REQUIRED(m_mempool.cs); + void addPackageTxs(const CTxMemPool& mempool, int& nPackagesSelected, int& nDescendantsUpdated) EXCLUSIVE_LOCKS_REQUIRED(mempool.cs); // helper functions for addPackageTxs() /** Remove confirmed (inBlock) entries from given set */ @@ -189,15 +189,8 @@ private: * These checks should always succeed, and they're here * only as an extra check in case of suboptimal node configuration */ bool TestPackageTransactions(const CTxMemPool::setEntries& package) const; - /** Return true if given transaction from mapTx has already been evaluated, - * or if the transaction's cached data in mapTx is incorrect. */ - bool SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_transaction_set& mapModifiedTx, CTxMemPool::setEntries& failedTx) EXCLUSIVE_LOCKS_REQUIRED(m_mempool.cs); /** Sort the package in an order that is valid to appear in a block */ void SortForBlock(const CTxMemPool::setEntries& package, std::vector<CTxMemPool::txiter>& sortedEntries); - /** Add descendants of given transactions to mapModifiedTx with ancestor - * state updated assuming given transactions are inBlock. Returns number - * of updated descendants. */ - int UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded, indexed_modified_transaction_set& mapModifiedTx) EXCLUSIVE_LOCKS_REQUIRED(m_mempool.cs); }; int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev); diff --git a/src/noui.cpp b/src/noui.cpp index 6fe5c5638c..54cb5f3cbf 100644 --- a/src/noui.cpp +++ b/src/noui.cpp @@ -6,7 +6,7 @@ #include <noui.h> #include <logging.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <util/translation.h> #include <string> diff --git a/src/policy/feerate.cpp b/src/policy/feerate.cpp index 0ea56d8db7..82b767793d 100644 --- a/src/policy/feerate.cpp +++ b/src/policy/feerate.cpp @@ -3,8 +3,8 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include <consensus/amount.h> #include <policy/feerate.h> - #include <tinyformat.h> #include <cmath> diff --git a/src/policy/feerate.h b/src/policy/feerate.h index 50fd6fd11b..a8d4d2fc63 100644 --- a/src/policy/feerate.h +++ b/src/policy/feerate.h @@ -9,7 +9,10 @@ #include <consensus/amount.h> #include <serialize.h> + +#include <cstdint> #include <string> +#include <type_traits> const std::string CURRENCY_UNIT = "BTC"; // One formatted unit const std::string CURRENCY_ATOM = "sat"; // One indivisible minimum value unit diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp index d2deaf69d0..b39632364f 100644 --- a/src/policy/fees.cpp +++ b/src/policy/fees.cpp @@ -6,12 +6,30 @@ #include <policy/fees.h> #include <clientversion.h> +#include <consensus/amount.h> #include <fs.h> #include <logging.h> +#include <policy/feerate.h> +#include <primitives/transaction.h> +#include <random.h> +#include <serialize.h> #include <streams.h> +#include <sync.h> +#include <tinyformat.h> #include <txmempool.h> +#include <uint256.h> #include <util/serfloat.h> #include <util/system.h> +#include <util/time.h> + +#include <algorithm> +#include <cassert> +#include <cmath> +#include <cstddef> +#include <cstdint> +#include <exception> +#include <stdexcept> +#include <utility> static const char* FEE_ESTIMATES_FILENAME = "fee_estimates.dat"; diff --git a/src/policy/fees.h b/src/policy/fees.h index 6e25bb42b8..dea1e1d31b 100644 --- a/src/policy/fees.h +++ b/src/policy/fees.h @@ -7,20 +7,20 @@ #include <consensus/amount.h> #include <policy/feerate.h> -#include <uint256.h> #include <random.h> #include <sync.h> +#include <threadsafety.h> +#include <uint256.h> #include <array> #include <map> #include <memory> +#include <set> #include <string> #include <vector> class CAutoFile; -class CFeeRate; class CTxMemPoolEntry; -class CTxMemPool; class TxConfirmStats; /* Identifier for each of the 3 different TxConfirmStats which will track diff --git a/src/policy/packages.cpp b/src/policy/packages.cpp index 21f5488816..67918c9dec 100644 --- a/src/policy/packages.cpp +++ b/src/policy/packages.cpp @@ -2,12 +2,16 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include <consensus/validation.h> #include <policy/packages.h> +#include <policy/policy.h> #include <primitives/transaction.h> #include <uint256.h> #include <util/hasher.h> +#include <algorithm> +#include <cassert> +#include <iterator> +#include <memory> #include <numeric> #include <unordered_set> diff --git a/src/policy/packages.h b/src/policy/packages.h index 9f274f6b7d..ba6a3a9a06 100644 --- a/src/policy/packages.h +++ b/src/policy/packages.h @@ -5,10 +5,12 @@ #ifndef BITCOIN_POLICY_PACKAGES_H #define BITCOIN_POLICY_PACKAGES_H +#include <consensus/consensus.h> #include <consensus/validation.h> #include <policy/policy.h> #include <primitives/transaction.h> +#include <cstdint> #include <vector> /** Default maximum number of transactions in a package. */ diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp index 6aba6a4a5b..f6452266b7 100644 --- a/src/policy/policy.cpp +++ b/src/policy/policy.cpp @@ -7,10 +7,22 @@ #include <policy/policy.h> -#include <consensus/validation.h> #include <coins.h> +#include <consensus/amount.h> +#include <consensus/consensus.h> +#include <consensus/validation.h> +#include <policy/feerate.h> +#include <primitives/transaction.h> +#include <script/interpreter.h> +#include <script/script.h> +#include <script/standard.h> +#include <serialize.h> #include <span.h> +#include <algorithm> +#include <cstddef> +#include <vector> + CAmount GetDustThreshold(const CTxOut& txout, const CFeeRate& dustRelayFeeIn) { // "Dust" is defined in terms of dustRelayFee, diff --git a/src/policy/policy.h b/src/policy/policy.h index 89f6e72618..94f9623b8a 100644 --- a/src/policy/policy.h +++ b/src/policy/policy.h @@ -6,15 +6,18 @@ #ifndef BITCOIN_POLICY_POLICY_H #define BITCOIN_POLICY_POLICY_H +#include <consensus/amount.h> #include <consensus/consensus.h> -#include <policy/feerate.h> +#include <primitives/transaction.h> #include <script/interpreter.h> #include <script/standard.h> +#include <cstdint> #include <string> class CCoinsViewCache; -class CTxOut; +class CFeeRate; +class CScript; /** Default for -blockmaxweight, which controls the range of block weights the mining code will create **/ static const unsigned int DEFAULT_BLOCK_MAX_WEIGHT = MAX_BLOCK_WEIGHT - 4000; @@ -52,6 +55,8 @@ static const unsigned int MAX_STANDARD_SCRIPTSIG_SIZE = 1650; * only increase the dust limit after prior releases were already not creating * outputs below the new threshold */ static const unsigned int DUST_RELAY_TX_FEE = 3000; +/** Default for -minrelaytxfee, minimum relay fee for transactions */ +static const unsigned int DEFAULT_MIN_RELAY_TX_FEE = 1000; /** * Standard script verification flags that standard transactions will comply * with. However scripts violating these flags may still be present in valid diff --git a/src/policy/rbf.cpp b/src/policy/rbf.cpp index 8fe4dc35b8..e25f5c7c5b 100644 --- a/src/policy/rbf.cpp +++ b/src/policy/rbf.cpp @@ -4,11 +4,19 @@ #include <policy/rbf.h> -#include <policy/settings.h> +#include <consensus/amount.h> +#include <policy/feerate.h> +#include <primitives/transaction.h> +#include <sync.h> #include <tinyformat.h> +#include <txmempool.h> +#include <uint256.h> #include <util/moneystr.h> #include <util/rbf.h> +#include <limits> +#include <vector> + RBFTransactionState IsRBFOptIn(const CTransaction& tx, const CTxMemPool& pool) { AssertLockHeld(pool.cs); diff --git a/src/policy/rbf.h b/src/policy/rbf.h index fcec7052ed..07f68c8fd4 100644 --- a/src/policy/rbf.h +++ b/src/policy/rbf.h @@ -5,13 +5,20 @@ #ifndef BITCOIN_POLICY_RBF_H #define BITCOIN_POLICY_RBF_H +#include <consensus/amount.h> #include <primitives/transaction.h> +#include <threadsafety.h> #include <txmempool.h> -#include <uint256.h> +#include <cstddef> +#include <cstdint> #include <optional> +#include <set> #include <string> +class CFeeRate; +class uint256; + /** Maximum number of transactions that can be replaced by BIP125 RBF (Rule #5). This includes all * mempool conflicts and their descendants. */ static constexpr uint32_t MAX_BIP125_REPLACEMENT_CANDIDATES{100}; diff --git a/src/policy/settings.cpp b/src/policy/settings.cpp index eb2ec56850..0b67d274ce 100644 --- a/src/policy/settings.cpp +++ b/src/policy/settings.cpp @@ -11,4 +11,5 @@ bool fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG; CFeeRate incrementalRelayFee = CFeeRate(DEFAULT_INCREMENTAL_RELAY_FEE); CFeeRate dustRelayFee = CFeeRate(DUST_RELAY_TX_FEE); +CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE); unsigned int nBytesPerSigOp = DEFAULT_BYTES_PER_SIGOP; diff --git a/src/policy/settings.h b/src/policy/settings.h index 0b4fc1e770..2311d01fe8 100644 --- a/src/policy/settings.h +++ b/src/policy/settings.h @@ -6,14 +6,19 @@ #ifndef BITCOIN_POLICY_SETTINGS_H #define BITCOIN_POLICY_SETTINGS_H +#include <policy/feerate.h> #include <policy/policy.h> -class CFeeRate; +#include <cstdint> +#include <string> + class CTransaction; // Policy settings which are configurable at runtime. extern CFeeRate incrementalRelayFee; extern CFeeRate dustRelayFee; +/** A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation) */ +extern CFeeRate minRelayTxFee; extern unsigned int nBytesPerSigOp; extern bool fIsBareMultisigStd; diff --git a/src/protocol.h b/src/protocol.h index fdeaa9a9c5..da2d24aff3 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -3,10 +3,6 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#ifndef __cplusplus -#error This header can only be compiled as C++. -#endif - #ifndef BITCOIN_PROTOCOL_H #define BITCOIN_PROTOCOL_H @@ -15,10 +11,9 @@ #include <serialize.h> #include <streams.h> #include <uint256.h> -#include <version.h> +#include <cstdint> #include <limits> -#include <stdint.h> #include <string> /** Message header. @@ -420,7 +415,6 @@ public: use_v2 = s.GetVersion() & ADDRV2_FORMAT; } - SER_READ(obj, obj.nTime = TIME_INIT); READWRITE(obj.nTime); // nServices is serialized as CompactSize in V2; as uint64_t in V1. if (use_v2) { diff --git a/src/psbt.h b/src/psbt.h index 8fda889bb4..4a6d41076f 100644 --- a/src/psbt.h +++ b/src/psbt.h @@ -54,7 +54,7 @@ static constexpr uint8_t PSBT_SEPARATOR = 0x00; // BIP 174 does not specify a maximum file size, but we set a limit anyway // to prevent reading a stream indefinitely and running out of memory. -const std::streamsize MAX_FILE_SIZE_PSBT = 100000000; // 100 MiB +const std::streamsize MAX_FILE_SIZE_PSBT = 100000000; // 100 MB // PSBT version number static constexpr uint32_t PSBT_HIGHEST_VERSION = 0; diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index 53fbac0106..f11ddad30f 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -13,7 +13,7 @@ #include <interfaces/handler.h> #include <interfaces/init.h> #include <interfaces/node.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <noui.h> #include <qt/bitcoingui.h> #include <qt/clientmodel.h> @@ -259,9 +259,26 @@ void BitcoinApplication::createPaymentServer() } #endif -void BitcoinApplication::createOptionsModel(bool resetSettings) +bool BitcoinApplication::createOptionsModel(bool resetSettings) { - optionsModel = new OptionsModel(node(), this, resetSettings); + optionsModel = new OptionsModel(node(), this); + if (resetSettings) { + optionsModel->Reset(); + } + bilingual_str error; + if (!optionsModel->Init(error)) { + fs::path settings_path; + if (gArgs.GetSettingsPath(&settings_path)) { + error += Untranslated("\n"); + std::string quoted_path = strprintf("%s", fs::quoted(fs::PathToString(settings_path))); + error.original += strprintf("Settings file %s might be corrupt or invalid.", quoted_path); + error.translated += tr("Settings file %1 might be corrupt or invalid.").arg(QString::fromStdString(quoted_path)).toStdString(); + } + InitError(error); + QMessageBox::critical(nullptr, PACKAGE_NAME, QString::fromStdString(error.translated)); + return false; + } + return true; } void BitcoinApplication::createWindow(const NetworkStyle *networkStyle) @@ -327,7 +344,7 @@ void BitcoinApplication::parameterSetup() void BitcoinApplication::InitPruneSetting(int64_t prune_MiB) { - optionsModel->SetPruneTargetGB(PruneMiBtoGB(prune_MiB), true); + optionsModel->SetPruneTargetGB(PruneMiBtoGB(prune_MiB)); } void BitcoinApplication::requestInitialize() @@ -641,7 +658,9 @@ int GuiMain(int argc, char* argv[]) app.createNode(*init); // Load GUI settings from QSettings - app.createOptionsModel(gArgs.GetBoolArg("-resetguisettings", false)); + if (!app.createOptionsModel(gArgs.GetBoolArg("-resetguisettings", false))) { + return EXIT_FAILURE; + } if (did_show_intro) { // Store intro dialog settings other than datadir (network specific) diff --git a/src/qt/bitcoin.h b/src/qt/bitcoin.h index 7a6aa5cdc9..9ad37ca6c9 100644 --- a/src/qt/bitcoin.h +++ b/src/qt/bitcoin.h @@ -47,7 +47,7 @@ public: /// parameter interaction/setup based on rules void parameterSetup(); /// Create options model - void createOptionsModel(bool resetSettings); + [[nodiscard]] bool createOptionsModel(bool resetSettings); /// Initialize prune setting void InitPruneSetting(int64_t prune_MiB); /// Create main window diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index bfcdf6f316..6fea8e1cba 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -35,7 +35,7 @@ #include <chainparams.h> #include <interfaces/handler.h> #include <interfaces/node.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <util/system.h> #include <util/translation.h> #include <validation.h> diff --git a/src/qt/clientmodel.h b/src/qt/clientmodel.h index b10ffb4523..81f03a58ec 100644 --- a/src/qt/clientmodel.h +++ b/src/qt/clientmodel.h @@ -105,7 +105,7 @@ private: //! A thread to interact with m_node asynchronously QThread* const m_thread; - void TipChanged(SynchronizationState sync_state, interfaces::BlockTip tip, double verification_progress, bool header); + void TipChanged(SynchronizationState sync_state, interfaces::BlockTip tip, double verification_progress, bool header) EXCLUSIVE_LOCKS_REQUIRED(!m_cached_tip_mutex); void subscribeToCoreSignals(); void unsubscribeFromCoreSignals(); diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui index 5438811aff..582f02132a 100644 --- a/src/qt/forms/optionsdialog.ui +++ b/src/qt/forms/optionsdialog.ui @@ -896,7 +896,7 @@ <item> <widget class="QLabel" name="overriddenByCommandLineInfoLabel"> <property name="text"> - <string>Options set in this dialog are overridden by the command line or in the configuration file:</string> + <string>Options set in this dialog are overridden by the command line:</string> </property> <property name="textFormat"> <enum>Qt::PlainText</enum> diff --git a/src/qt/main.cpp b/src/qt/main.cpp index 6e772d58c5..38b0ac71a3 100644 --- a/src/qt/main.cpp +++ b/src/qt/main.cpp @@ -4,6 +4,7 @@ #include <qt/bitcoin.h> +#include <compat.h> #include <util/translation.h> #include <util/url.h> @@ -18,4 +19,7 @@ extern const std::function<std::string(const char*)> G_TRANSLATION_FUN = [](cons }; UrlDecodeFn* const URL_DECODE = urlDecode; -int main(int argc, char* argv[]) { return GuiMain(argc, argv); } +MAIN_FUNCTION +{ + return GuiMain(argc, argv); +} diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp index e6ff43a379..f3c3af10e0 100644 --- a/src/qt/optionsdialog.cpp +++ b/src/qt/optionsdialog.cpp @@ -26,7 +26,6 @@ #include <QIntValidator> #include <QLocale> #include <QMessageBox> -#include <QSettings> #include <QSystemTrayIcon> #include <QTimer> @@ -56,10 +55,6 @@ OptionsDialog::OptionsDialog(QWidget *parent, bool enableWallet) : #ifndef USE_NATPMP ui->mapPortNatpmp->setEnabled(false); #endif - connect(this, &QDialog::accepted, [this](){ - QSettings settings; - model->node().mapPort(settings.value("fUseUPnP").toBool(), settings.value("fUseNatpmp").toBool()); - }); ui->proxyIp->setEnabled(false); ui->proxyPort->setEnabled(false); diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp index 612d3009c1..0b4359a917 100644 --- a/src/qt/optionsmodel.cpp +++ b/src/qt/optionsmodel.cpp @@ -19,6 +19,7 @@ #include <txdb.h> // for -dbcache defaults #include <util/string.h> #include <validation.h> // For DEFAULT_SCRIPTCHECK_THREADS +#include <wallet/wallet.h> // For DEFAULT_SPEND_ZEROCONF_CHANGE #include <QDebug> #include <QLatin1Char> @@ -26,14 +27,99 @@ #include <QStringList> #include <QVariant> +#include <univalue.h> + const char *DEFAULT_GUI_PROXY_HOST = "127.0.0.1"; static const QString GetDefaultProxyAddress(); -OptionsModel::OptionsModel(interfaces::Node& node, QObject *parent, bool resetSettings) : +/** Map GUI option ID to node setting name. */ +static const char* SettingName(OptionsModel::OptionID option) +{ + switch (option) { + case OptionsModel::DatabaseCache: return "dbcache"; + case OptionsModel::ThreadsScriptVerif: return "par"; + case OptionsModel::SpendZeroConfChange: return "spendzeroconfchange"; + case OptionsModel::ExternalSignerPath: return "signer"; + case OptionsModel::MapPortUPnP: return "upnp"; + case OptionsModel::MapPortNatpmp: return "natpmp"; + case OptionsModel::Listen: return "listen"; + case OptionsModel::Server: return "server"; + case OptionsModel::PruneSize: return "prune"; + case OptionsModel::Prune: return "prune"; + case OptionsModel::ProxyIP: return "proxy"; + case OptionsModel::ProxyPort: return "proxy"; + case OptionsModel::ProxyUse: return "proxy"; + case OptionsModel::ProxyIPTor: return "onion"; + case OptionsModel::ProxyPortTor: return "onion"; + case OptionsModel::ProxyUseTor: return "onion"; + case OptionsModel::Language: return "lang"; + default: throw std::logic_error(strprintf("GUI option %i has no corresponding node setting.", option)); + } +} + +/** Call node.updateRwSetting() with Bitcoin 22.x workaround. */ +static void UpdateRwSetting(interfaces::Node& node, OptionsModel::OptionID option, const util::SettingsValue& value) +{ + if (value.isNum() && + (option == OptionsModel::DatabaseCache || + option == OptionsModel::ThreadsScriptVerif || + option == OptionsModel::Prune || + option == OptionsModel::PruneSize)) { + // Write certain old settings as strings, even though they are numbers, + // because Bitcoin 22.x releases try to read these specific settings as + // strings in addOverriddenOption() calls at startup, triggering + // uncaught exceptions in UniValue::get_str(). These errors were fixed + // in later releases by https://github.com/bitcoin/bitcoin/pull/24498. + // If new numeric settings are added, they can be written as numbers + // instead of strings, because bitcoin 22.x will not try to read these. + node.updateRwSetting(SettingName(option), value.getValStr()); + } else { + node.updateRwSetting(SettingName(option), value); + } +} + +//! Convert enabled/size values to bitcoin -prune setting. +static util::SettingsValue PruneSetting(bool prune_enabled, int prune_size_gb) +{ + assert(!prune_enabled || prune_size_gb >= 1); // PruneSizeGB and ParsePruneSizeGB never return less + return prune_enabled ? PruneGBtoMiB(prune_size_gb) : 0; +} + +//! Get pruning enabled value to show in GUI from bitcoin -prune setting. +static bool PruneEnabled(const util::SettingsValue& prune_setting) +{ + // -prune=1 setting is manual pruning mode, so disabled for purposes of the gui + return SettingToInt(prune_setting, 0) > 1; +} + +//! Get pruning size value to show in GUI from bitcoin -prune setting. If +//! pruning is not enabled, just show default recommended pruning size (2GB). +static int PruneSizeGB(const util::SettingsValue& prune_setting) +{ + int value = SettingToInt(prune_setting, 0); + return value > 1 ? PruneMiBtoGB(value) : DEFAULT_PRUNE_TARGET_GB; +} + +//! Parse pruning size value provided by user in GUI or loaded from QSettings +//! (windows registry key or qt .conf file). Smallest value that the GUI can +//! display is 1 GB, so round up if anything less is parsed. +static int ParsePruneSizeGB(const QVariant& prune_size) +{ + return std::max(1, prune_size.toInt()); +} + +struct ProxySetting { + bool is_set; + QString ip; + QString port; +}; +static ProxySetting ParseProxyString(const std::string& proxy); +static std::string ProxyString(bool is_set, QString ip, QString port); + +OptionsModel::OptionsModel(interfaces::Node& node, QObject *parent) : QAbstractListModel(parent), m_node{node} { - Init(resetSettings); } void OptionsModel::addOverriddenOption(const std::string &option) @@ -42,10 +128,17 @@ void OptionsModel::addOverriddenOption(const std::string &option) } // Writes all missing QSettings with their default values -void OptionsModel::Init(bool resetSettings) +bool OptionsModel::Init(bilingual_str& error) { - if (resetSettings) - Reset(); + // Initialize display settings from stored settings. + m_prune_size_gb = PruneSizeGB(node().getPersistentSetting("prune")); + ProxySetting proxy = ParseProxyString(SettingToString(node().getPersistentSetting("proxy"), GetDefaultProxyAddress().toStdString())); + m_proxy_ip = proxy.ip; + m_proxy_port = proxy.port; + ProxySetting onion = ParseProxyString(SettingToString(node().getPersistentSetting("onion"), GetDefaultProxyAddress().toStdString())); + m_onion_ip = onion.ip; + m_onion_port = onion.port; + language = QString::fromStdString(SettingToString(node().getPersistentSetting("lang"), "")); checkAndMigrate(); @@ -98,130 +191,43 @@ void OptionsModel::Init(bool resetSettings) // These are shared with the core or have a command-line parameter // and we want command-line parameters to overwrite the GUI settings. - // + for (OptionID option : {DatabaseCache, ThreadsScriptVerif, SpendZeroConfChange, ExternalSignerPath, MapPortUPnP, + MapPortNatpmp, Listen, Server, Prune, ProxyUse, ProxyUseTor, Language}) { + std::string setting = SettingName(option); + if (node().isSettingIgnored(setting)) addOverriddenOption("-" + setting); + try { + getOption(option); + } catch (const std::exception& e) { + // This handles exceptions thrown by univalue that can happen if + // settings in settings.json don't have the expected types. + error.original = strprintf("Could not read setting \"%s\", %s.", setting, e.what()); + error.translated = tr("Could not read setting \"%1\", %2.").arg(QString::fromStdString(setting), e.what()).toStdString(); + return false; + } + } + // If setting doesn't exist create it with defaults. - // - // If gArgs.SoftSetArg() or gArgs.SoftSetBoolArg() return false we were overridden - // by command-line and show this in the UI. // Main - if (!settings.contains("bPrune")) - settings.setValue("bPrune", false); - if (!settings.contains("nPruneSize")) - settings.setValue("nPruneSize", DEFAULT_PRUNE_TARGET_GB); - SetPruneEnabled(settings.value("bPrune").toBool()); - - if (!settings.contains("nDatabaseCache")) - settings.setValue("nDatabaseCache", (qint64)nDefaultDbCache); - if (!gArgs.SoftSetArg("-dbcache", settings.value("nDatabaseCache").toString().toStdString())) - addOverriddenOption("-dbcache"); - - if (!settings.contains("nThreadsScriptVerif")) - settings.setValue("nThreadsScriptVerif", DEFAULT_SCRIPTCHECK_THREADS); - if (!gArgs.SoftSetArg("-par", settings.value("nThreadsScriptVerif").toString().toStdString())) - addOverriddenOption("-par"); - if (!settings.contains("strDataDir")) settings.setValue("strDataDir", GUIUtil::getDefaultDataDirectory()); // Wallet #ifdef ENABLE_WALLET - if (!settings.contains("bSpendZeroConfChange")) - settings.setValue("bSpendZeroConfChange", true); - if (!gArgs.SoftSetBoolArg("-spendzeroconfchange", settings.value("bSpendZeroConfChange").toBool())) - addOverriddenOption("-spendzeroconfchange"); - - if (!settings.contains("external_signer_path")) - settings.setValue("external_signer_path", ""); - - if (!gArgs.SoftSetArg("-signer", settings.value("external_signer_path").toString().toStdString())) { - addOverriddenOption("-signer"); - } - if (!settings.contains("SubFeeFromAmount")) { settings.setValue("SubFeeFromAmount", false); } m_sub_fee_from_amount = settings.value("SubFeeFromAmount", false).toBool(); #endif - // Network - if (!settings.contains("fUseUPnP")) - settings.setValue("fUseUPnP", DEFAULT_UPNP); - if (!gArgs.SoftSetBoolArg("-upnp", settings.value("fUseUPnP").toBool())) - addOverriddenOption("-upnp"); - - if (!settings.contains("fUseNatpmp")) { - settings.setValue("fUseNatpmp", DEFAULT_NATPMP); - } - if (!gArgs.SoftSetBoolArg("-natpmp", settings.value("fUseNatpmp").toBool())) { - addOverriddenOption("-natpmp"); - } - - if (!settings.contains("fListen")) - settings.setValue("fListen", DEFAULT_LISTEN); - const bool listen{settings.value("fListen").toBool()}; - if (!gArgs.SoftSetBoolArg("-listen", listen)) { - addOverriddenOption("-listen"); - } else if (!listen) { - // We successfully set -listen=0, thus mimic the logic from InitParameterInteraction(): - // "parameter interaction: -listen=0 -> setting -listenonion=0". - // - // Both -listen and -listenonion default to true. - // - // The call order is: - // - // InitParameterInteraction() - // would set -listenonion=0 if it sees -listen=0, but for bitcoin-qt with - // fListen=false -listen is 1 at this point - // - // OptionsModel::Init() - // (this method) can flip -listen from 1 to 0 if fListen=false - // - // AppInitParameterInteraction() - // raises an error if -listen=0 and -listenonion=1 - gArgs.SoftSetBoolArg("-listenonion", false); - } - - if (!settings.contains("server")) { - settings.setValue("server", false); - } - if (!gArgs.SoftSetBoolArg("-server", settings.value("server").toBool())) { - addOverriddenOption("-server"); - } - - if (!settings.contains("fUseProxy")) - settings.setValue("fUseProxy", false); - if (!settings.contains("addrProxy")) - settings.setValue("addrProxy", GetDefaultProxyAddress()); - // Only try to set -proxy, if user has enabled fUseProxy - if ((settings.value("fUseProxy").toBool() && !gArgs.SoftSetArg("-proxy", settings.value("addrProxy").toString().toStdString()))) - addOverriddenOption("-proxy"); - else if(!settings.value("fUseProxy").toBool() && !gArgs.GetArg("-proxy", "").empty()) - addOverriddenOption("-proxy"); - - if (!settings.contains("fUseSeparateProxyTor")) - settings.setValue("fUseSeparateProxyTor", false); - if (!settings.contains("addrSeparateProxyTor")) - settings.setValue("addrSeparateProxyTor", GetDefaultProxyAddress()); - // Only try to set -onion, if user has enabled fUseSeparateProxyTor - if ((settings.value("fUseSeparateProxyTor").toBool() && !gArgs.SoftSetArg("-onion", settings.value("addrSeparateProxyTor").toString().toStdString()))) - addOverriddenOption("-onion"); - else if(!settings.value("fUseSeparateProxyTor").toBool() && !gArgs.GetArg("-onion", "").empty()) - addOverriddenOption("-onion"); - // Display - if (!settings.contains("language")) - settings.setValue("language", ""); - if (!gArgs.SoftSetArg("-lang", settings.value("language").toString().toStdString())) - addOverriddenOption("-lang"); - - language = settings.value("language").toString(); - if (!settings.contains("UseEmbeddedMonospacedFont")) { settings.setValue("UseEmbeddedMonospacedFont", "true"); } m_use_embedded_monospaced_font = settings.value("UseEmbeddedMonospacedFont").toBool(); Q_EMIT useEmbeddedMonospacedFontChanged(m_use_embedded_monospaced_font); + + return true; } /** Helper function to copy contents from one QSettings to another. @@ -245,6 +251,9 @@ static void BackupSettings(const fs::path& filename, const QSettings& src) void OptionsModel::Reset() { + // Backup and reset settings.json + node().resetSettings(); + QSettings settings; // Backup old settings to chain-specific datadir for troubleshooting @@ -273,21 +282,15 @@ int OptionsModel::rowCount(const QModelIndex & parent) const return OptionIDRowCount; } -struct ProxySetting { - bool is_set; - QString ip; - QString port; -}; - -static ProxySetting GetProxySetting(QSettings &settings, const QString &name) +static ProxySetting ParseProxyString(const QString& proxy) { static const ProxySetting default_val = {false, DEFAULT_GUI_PROXY_HOST, QString("%1").arg(DEFAULT_GUI_PROXY_PORT)}; // Handle the case that the setting is not set at all - if (!settings.contains(name)) { + if (proxy.isEmpty()) { return default_val; } // contains IP at index 0 and port at index 1 - QStringList ip_port = GUIUtil::SplitSkipEmptyParts(settings.value(name).toString(), ":"); + QStringList ip_port = GUIUtil::SplitSkipEmptyParts(proxy, ":"); if (ip_port.size() == 2) { return {true, ip_port.at(0), ip_port.at(1)}; } else { // Invalid: return default @@ -295,39 +298,42 @@ static ProxySetting GetProxySetting(QSettings &settings, const QString &name) } } -static void SetProxySetting(QSettings &settings, const QString &name, const ProxySetting &ip_port) +static ProxySetting ParseProxyString(const std::string& proxy) { - settings.setValue(name, QString{ip_port.ip + QLatin1Char(':') + ip_port.port}); + return ParseProxyString(QString::fromStdString(proxy)); } -static const QString GetDefaultProxyAddress() +static std::string ProxyString(bool is_set, QString ip, QString port) { - return QString("%1:%2").arg(DEFAULT_GUI_PROXY_HOST).arg(DEFAULT_GUI_PROXY_PORT); + return is_set ? QString(ip + ":" + port).toStdString() : ""; } -void OptionsModel::SetPruneEnabled(bool prune, bool force) +static const QString GetDefaultProxyAddress() { - QSettings settings; - settings.setValue("bPrune", prune); - const int64_t prune_target_mib = PruneGBtoMiB(settings.value("nPruneSize").toInt()); - std::string prune_val = prune ? ToString(prune_target_mib) : "0"; - if (force) { - gArgs.ForceSetArg("-prune", prune_val); - return; - } - if (!gArgs.SoftSetArg("-prune", prune_val)) { - addOverriddenOption("-prune"); - } + return QString("%1:%2").arg(DEFAULT_GUI_PROXY_HOST).arg(DEFAULT_GUI_PROXY_PORT); } -void OptionsModel::SetPruneTargetGB(int prune_target_gb, bool force) +void OptionsModel::SetPruneTargetGB(int prune_target_gb) { - const bool prune = prune_target_gb > 0; - if (prune) { - QSettings settings; - settings.setValue("nPruneSize", prune_target_gb); + const util::SettingsValue cur_value = node().getPersistentSetting("prune"); + const util::SettingsValue new_value = PruneSetting(prune_target_gb > 0, prune_target_gb); + + m_prune_size_gb = prune_target_gb; + + // Force setting to take effect. It is still safe to change the value at + // this point because this function is only called after the intro screen is + // shown, before the node starts. + node().forceSetting("prune", new_value); + + // Update settings.json if value configured in intro screen is different + // from saved value. Avoid writing settings.json if bitcoin.conf value + // doesn't need to be overridden. + if (PruneEnabled(cur_value) != PruneEnabled(new_value) || + PruneSizeGB(cur_value) != PruneSizeGB(new_value)) { + // Call UpdateRwSetting() instead of setOption() to avoid setting + // RestartRequired flag + UpdateRwSetting(node(), Prune, new_value); } - SetPruneEnabled(prune, force); } // read QSettings values and return them @@ -356,6 +362,8 @@ bool OptionsModel::setData(const QModelIndex & index, const QVariant & value, in QVariant OptionsModel::getOption(OptionID option) const { + auto setting = [&]{ return node().getPersistentSetting(SettingName(option)); }; + QSettings settings; switch (option) { case StartAtStartup: @@ -366,13 +374,13 @@ QVariant OptionsModel::getOption(OptionID option) const return fMinimizeToTray; case MapPortUPnP: #ifdef USE_UPNP - return settings.value("fUseUPnP"); + return SettingToBool(setting(), DEFAULT_UPNP); #else return false; #endif // USE_UPNP case MapPortNatpmp: #ifdef USE_NATPMP - return settings.value("fUseNatpmp"); + return SettingToBool(setting(), DEFAULT_NATPMP); #else return false; #endif // USE_NATPMP @@ -381,25 +389,25 @@ QVariant OptionsModel::getOption(OptionID option) const // default proxy case ProxyUse: - return settings.value("fUseProxy", false); + return ParseProxyString(SettingToString(setting(), "")).is_set; case ProxyIP: - return GetProxySetting(settings, "addrProxy").ip; + return m_proxy_ip; case ProxyPort: - return GetProxySetting(settings, "addrProxy").port; + return m_proxy_port; // separate Tor proxy case ProxyUseTor: - return settings.value("fUseSeparateProxyTor", false); + return ParseProxyString(SettingToString(setting(), "")).is_set; case ProxyIPTor: - return GetProxySetting(settings, "addrSeparateProxyTor").ip; + return m_onion_ip; case ProxyPortTor: - return GetProxySetting(settings, "addrSeparateProxyTor").port; + return m_onion_port; #ifdef ENABLE_WALLET case SpendZeroConfChange: - return settings.value("bSpendZeroConfChange"); + return SettingToBool(setting(), wallet::DEFAULT_SPEND_ZEROCONF_CHANGE); case ExternalSignerPath: - return settings.value("external_signer_path"); + return QString::fromStdString(SettingToString(setting(), "")); case SubFeeFromAmount: return m_sub_fee_from_amount; #endif @@ -408,7 +416,7 @@ QVariant OptionsModel::getOption(OptionID option) const case ThirdPartyTxUrls: return strThirdPartyTxUrls; case Language: - return settings.value("language"); + return QString::fromStdString(SettingToString(setting(), "")); case UseEmbeddedMonospacedFont: return m_use_embedded_monospaced_font; case CoinControlFeatures: @@ -416,17 +424,17 @@ QVariant OptionsModel::getOption(OptionID option) const case EnablePSBTControls: return settings.value("enable_psbt_controls"); case Prune: - return settings.value("bPrune"); + return PruneEnabled(setting()); case PruneSize: - return settings.value("nPruneSize"); + return m_prune_size_gb; case DatabaseCache: - return settings.value("nDatabaseCache"); + return qlonglong(SettingToInt(setting(), nDefaultDbCache)); case ThreadsScriptVerif: - return settings.value("nThreadsScriptVerif"); + return qlonglong(SettingToInt(setting(), DEFAULT_SCRIPTCHECK_THREADS)); case Listen: - return settings.value("fListen"); + return SettingToBool(setting(), DEFAULT_LISTEN); case Server: - return settings.value("server"); + return SettingToBool(setting(), false); default: return QVariant(); } @@ -434,6 +442,9 @@ QVariant OptionsModel::getOption(OptionID option) const bool OptionsModel::setOption(OptionID option, const QVariant& value) { + auto changed = [&] { return value.isValid() && value != getOption(option); }; + auto update = [&](const util::SettingsValue& value) { return UpdateRwSetting(node(), option, value); }; + bool successful = true; /* set to false on parse error */ QSettings settings; @@ -451,10 +462,16 @@ bool OptionsModel::setOption(OptionID option, const QVariant& value) settings.setValue("fMinimizeToTray", fMinimizeToTray); break; case MapPortUPnP: // core option - can be changed on-the-fly - settings.setValue("fUseUPnP", value.toBool()); + if (changed()) { + update(value.toBool()); + node().mapPort(value.toBool(), getOption(MapPortNatpmp).toBool()); + } break; case MapPortNatpmp: // core option - can be changed on-the-fly - settings.setValue("fUseNatpmp", value.toBool()); + if (changed()) { + update(value.toBool()); + node().mapPort(getOption(MapPortUPnP).toBool(), value.toBool()); + } break; case MinimizeOnClose: fMinimizeOnClose = value.toBool(); @@ -463,66 +480,66 @@ bool OptionsModel::setOption(OptionID option, const QVariant& value) // default proxy case ProxyUse: - if (settings.value("fUseProxy") != value) { - settings.setValue("fUseProxy", value.toBool()); + if (changed()) { + update(ProxyString(value.toBool(), m_proxy_ip, m_proxy_port)); setRestartRequired(true); } break; - case ProxyIP: { - auto ip_port = GetProxySetting(settings, "addrProxy"); - if (!ip_port.is_set || ip_port.ip != value.toString()) { - ip_port.ip = value.toString(); - SetProxySetting(settings, "addrProxy", ip_port); - setRestartRequired(true); + case ProxyIP: + if (changed()) { + m_proxy_ip = value.toString(); + if (getOption(ProxyUse).toBool()) { + update(ProxyString(true, m_proxy_ip, m_proxy_port)); + setRestartRequired(true); + } } - } - break; - case ProxyPort: { - auto ip_port = GetProxySetting(settings, "addrProxy"); - if (!ip_port.is_set || ip_port.port != value.toString()) { - ip_port.port = value.toString(); - SetProxySetting(settings, "addrProxy", ip_port); - setRestartRequired(true); + break; + case ProxyPort: + if (changed()) { + m_proxy_port = value.toString(); + if (getOption(ProxyUse).toBool()) { + update(ProxyString(true, m_proxy_ip, m_proxy_port)); + setRestartRequired(true); + } } - } - break; + break; // separate Tor proxy case ProxyUseTor: - if (settings.value("fUseSeparateProxyTor") != value) { - settings.setValue("fUseSeparateProxyTor", value.toBool()); + if (changed()) { + update(ProxyString(value.toBool(), m_onion_ip, m_onion_port)); setRestartRequired(true); } break; - case ProxyIPTor: { - auto ip_port = GetProxySetting(settings, "addrSeparateProxyTor"); - if (!ip_port.is_set || ip_port.ip != value.toString()) { - ip_port.ip = value.toString(); - SetProxySetting(settings, "addrSeparateProxyTor", ip_port); - setRestartRequired(true); + case ProxyIPTor: + if (changed()) { + m_onion_ip = value.toString(); + if (getOption(ProxyUseTor).toBool()) { + update(ProxyString(true, m_onion_ip, m_onion_port)); + setRestartRequired(true); + } } - } - break; - case ProxyPortTor: { - auto ip_port = GetProxySetting(settings, "addrSeparateProxyTor"); - if (!ip_port.is_set || ip_port.port != value.toString()) { - ip_port.port = value.toString(); - SetProxySetting(settings, "addrSeparateProxyTor", ip_port); - setRestartRequired(true); + break; + case ProxyPortTor: + if (changed()) { + m_onion_port = value.toString(); + if (getOption(ProxyUseTor).toBool()) { + update(ProxyString(true, m_onion_ip, m_onion_port)); + setRestartRequired(true); + } } - } - break; + break; #ifdef ENABLE_WALLET case SpendZeroConfChange: - if (settings.value("bSpendZeroConfChange") != value) { - settings.setValue("bSpendZeroConfChange", value); + if (changed()) { + update(value.toBool()); setRestartRequired(true); } break; case ExternalSignerPath: - if (settings.value("external_signer_path") != value.toString()) { - settings.setValue("external_signer_path", value.toString()); + if (changed()) { + update(value.toString().toStdString()); setRestartRequired(true); } break; @@ -542,8 +559,8 @@ bool OptionsModel::setOption(OptionID option, const QVariant& value) } break; case Language: - if (settings.value("language") != value) { - settings.setValue("language", value); + if (changed()) { + update(value.toString().toStdString()); setRestartRequired(true); } break; @@ -562,38 +579,36 @@ bool OptionsModel::setOption(OptionID option, const QVariant& value) settings.setValue("enable_psbt_controls", m_enable_psbt_controls); break; case Prune: - if (settings.value("bPrune") != value) { - settings.setValue("bPrune", value); + if (changed()) { + update(PruneSetting(value.toBool(), m_prune_size_gb)); setRestartRequired(true); } break; case PruneSize: - if (settings.value("nPruneSize") != value) { - settings.setValue("nPruneSize", value); - setRestartRequired(true); + if (changed()) { + m_prune_size_gb = ParsePruneSizeGB(value); + if (getOption(Prune).toBool()) { + update(PruneSetting(true, m_prune_size_gb)); + setRestartRequired(true); + } } break; case DatabaseCache: - if (settings.value("nDatabaseCache") != value) { - settings.setValue("nDatabaseCache", value); + if (changed()) { + update(static_cast<int64_t>(value.toLongLong())); setRestartRequired(true); } break; case ThreadsScriptVerif: - if (settings.value("nThreadsScriptVerif") != value) { - settings.setValue("nThreadsScriptVerif", value); + if (changed()) { + update(static_cast<int64_t>(value.toLongLong())); setRestartRequired(true); } break; case Listen: - if (settings.value("fListen") != value) { - settings.setValue("fListen", value); - setRestartRequired(true); - } - break; case Server: - if (settings.value("server") != value) { - settings.setValue("server", value); + if (changed()) { + update(value.toBool()); setRestartRequired(true); } break; @@ -654,4 +669,49 @@ void OptionsModel::checkAndMigrate() if (settings.contains("addrSeparateProxyTor") && settings.value("addrSeparateProxyTor").toString().endsWith("%2")) { settings.setValue("addrSeparateProxyTor", GetDefaultProxyAddress()); } + + // Migrate and delete legacy GUI settings that have now moved to <datadir>/settings.json. + auto migrate_setting = [&](OptionID option, const QString& qt_name) { + if (!settings.contains(qt_name)) return; + QVariant value = settings.value(qt_name); + if (node().getPersistentSetting(SettingName(option)).isNull()) { + if (option == ProxyIP) { + ProxySetting parsed = ParseProxyString(value.toString()); + setOption(ProxyIP, parsed.ip); + setOption(ProxyPort, parsed.port); + } else if (option == ProxyIPTor) { + ProxySetting parsed = ParseProxyString(value.toString()); + setOption(ProxyIPTor, parsed.ip); + setOption(ProxyPortTor, parsed.port); + } else { + setOption(option, value); + } + } + settings.remove(qt_name); + }; + + migrate_setting(DatabaseCache, "nDatabaseCache"); + migrate_setting(ThreadsScriptVerif, "nThreadsScriptVerif"); +#ifdef ENABLE_WALLET + migrate_setting(SpendZeroConfChange, "bSpendZeroConfChange"); + migrate_setting(ExternalSignerPath, "external_signer_path"); +#endif + migrate_setting(MapPortUPnP, "fUseUPnP"); + migrate_setting(MapPortNatpmp, "fUseNatpmp"); + migrate_setting(Listen, "fListen"); + migrate_setting(Server, "server"); + migrate_setting(PruneSize, "nPruneSize"); + migrate_setting(Prune, "bPrune"); + migrate_setting(ProxyIP, "addrProxy"); + migrate_setting(ProxyUse, "fUseProxy"); + migrate_setting(ProxyIPTor, "addrSeparateProxyTor"); + migrate_setting(ProxyUseTor, "fUseSeparateProxyTor"); + migrate_setting(Language, "language"); + + // In case migrating QSettings caused any settings value to change, rerun + // parameter interaction code to update other settings. This is particularly + // important for the -listen setting, which should cause -listenonion, -upnp, + // and other settings to default to false if it was set to false. + // (https://github.com/bitcoin-core/gui/issues/567). + node().initParameterInteraction(); } diff --git a/src/qt/optionsmodel.h b/src/qt/optionsmodel.h index 92f80ecf21..42b89c5029 100644 --- a/src/qt/optionsmodel.h +++ b/src/qt/optionsmodel.h @@ -13,6 +13,7 @@ #include <assert.h> +struct bilingual_str; namespace interfaces { class Node; } @@ -41,7 +42,7 @@ class OptionsModel : public QAbstractListModel Q_OBJECT public: - explicit OptionsModel(interfaces::Node& node, QObject *parent = nullptr, bool resetSettings = false); + explicit OptionsModel(interfaces::Node& node, QObject *parent = nullptr); enum OptionID { StartAtStartup, // bool @@ -74,7 +75,7 @@ public: OptionIDRowCount, }; - void Init(bool resetSettings = false); + bool Init(bilingual_str& error); void Reset(); int rowCount(const QModelIndex & parent = QModelIndex()) const override; @@ -98,8 +99,7 @@ public: const QString& getOverriddenByCommandLine() { return strOverriddenByCommandLine; } /* Explicit setters */ - void SetPruneEnabled(bool prune, bool force = false); - void SetPruneTargetGB(int prune_target_gb, bool force = false); + void SetPruneTargetGB(int prune_target_gb); /* Restart flag helper */ void setRestartRequired(bool fRequired); @@ -120,6 +120,16 @@ private: bool fCoinControlFeatures; bool m_sub_fee_from_amount; bool m_enable_psbt_controls; + + //! In-memory settings for display. These are stored persistently by the + //! bitcoin node but it's also nice to store them in memory to prevent them + //! getting cleared when enable/disable toggles are used in the GUI. + int m_prune_size_gb; + QString m_proxy_ip; + QString m_proxy_port; + QString m_onion_ip; + QString m_onion_port; + /* settings that were overridden by command-line */ QString strOverriddenByCommandLine; @@ -128,6 +138,7 @@ private: // Check settings version and upgrade default values if required void checkAndMigrate(); + Q_SIGNALS: void displayUnitChanged(BitcoinUnit unit); void coinControlFeaturesChanged(bool); diff --git a/src/qt/paymentserver.cpp b/src/qt/paymentserver.cpp index be6f604932..9f87c15c94 100644 --- a/src/qt/paymentserver.cpp +++ b/src/qt/paymentserver.cpp @@ -15,7 +15,7 @@ #include <chainparams.h> #include <interfaces/node.h> #include <key_io.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <policy/policy.h> #include <util/system.h> #include <wallet/wallet.h> diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp index d8daccecbd..bd44d12781 100644 --- a/src/qt/sendcoinsdialog.cpp +++ b/src/qt/sendcoinsdialog.cpp @@ -21,7 +21,7 @@ #include <chainparams.h> #include <interfaces/node.h> #include <key_io.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <policy/fees.h> #include <txmempool.h> #include <validation.h> @@ -543,15 +543,8 @@ void SendCoinsDialog::sendButtonClicked([[maybe_unused]] bool checked) // failed, or more signatures are needed. if (broadcast) { // now send the prepared transaction - WalletModel::SendCoinsReturn sendStatus = model->sendCoins(*m_current_transaction); - // process sendStatus and on error generate message shown to user - processSendCoinsReturn(sendStatus); - - if (sendStatus.status == WalletModel::OK) { - Q_EMIT coinsSent(m_current_transaction->getWtx()->GetHash()); - } else { - send_failure = true; - } + model->sendCoins(*m_current_transaction); + Q_EMIT coinsSent(m_current_transaction->getWtx()->GetHash()); } } if (!send_failure) { diff --git a/src/qt/test/addressbooktests.cpp b/src/qt/test/addressbooktests.cpp index 3b7a40438b..581735263d 100644 --- a/src/qt/test/addressbooktests.cpp +++ b/src/qt/test/addressbooktests.cpp @@ -128,6 +128,8 @@ void TestAddAddressesToSendBook(interfaces::Node& node) // Initialize relevant QT models. std::unique_ptr<const PlatformStyle> platformStyle(PlatformStyle::instantiate("other")); OptionsModel optionsModel(node); + bilingual_str error; + QVERIFY(optionsModel.Init(error)); ClientModel clientModel(node, &optionsModel); WalletContext& context = *node.walletLoader().context(); AddWallet(context, wallet); diff --git a/src/qt/test/apptests.cpp b/src/qt/test/apptests.cpp index 9648ef6188..6fc7a52435 100644 --- a/src/qt/test/apptests.cpp +++ b/src/qt/test/apptests.cpp @@ -70,14 +70,9 @@ void AppTests::appTests() } #endif - fs::create_directories([] { - BasicTestingSetup test{CBaseChainParams::REGTEST}; // Create a temp data directory to backup the gui settings to - return gArgs.GetDataDirNet() / "blocks"; - }()); - qRegisterMetaType<interfaces::BlockAndHeaderTipInfo>("interfaces::BlockAndHeaderTipInfo"); m_app.parameterSetup(); - m_app.createOptionsModel(true /* reset settings */); + QVERIFY(m_app.createOptionsModel(true /* reset settings */)); QScopedPointer<const NetworkStyle> style(NetworkStyle::instantiate(Params().NetworkIDString())); m_app.setupPlatformStyle(); m_app.createWindow(style.data()); diff --git a/src/qt/test/optiontests.cpp b/src/qt/test/optiontests.cpp index 2ef9230c59..3bd0af19ad 100644 --- a/src/qt/test/optiontests.cpp +++ b/src/qt/test/optiontests.cpp @@ -13,6 +13,63 @@ #include <univalue.h> +#include <fstream> + +OptionTests::OptionTests(interfaces::Node& node) : m_node(node) +{ + gArgs.LockSettings([&](util::Settings& s) { m_previous_settings = s; }); +} + +void OptionTests::init() +{ + // reset args + gArgs.LockSettings([&](util::Settings& s) { s = m_previous_settings; }); + gArgs.ClearPathCache(); +} + +void OptionTests::migrateSettings() +{ + // Set legacy QSettings and verify that they get cleared and migrated to + // settings.json + QSettings settings; + settings.setValue("nDatabaseCache", 600); + settings.setValue("nThreadsScriptVerif", 12); + settings.setValue("fUseUPnP", false); + settings.setValue("fListen", false); + settings.setValue("bPrune", true); + settings.setValue("nPruneSize", 3); + settings.setValue("fUseProxy", true); + settings.setValue("addrProxy", "proxy:123"); + settings.setValue("fUseSeparateProxyTor", true); + settings.setValue("addrSeparateProxyTor", "onion:234"); + + settings.sync(); + + OptionsModel options{m_node}; + bilingual_str error; + QVERIFY(options.Init(error)); + QVERIFY(!settings.contains("nDatabaseCache")); + QVERIFY(!settings.contains("nThreadsScriptVerif")); + QVERIFY(!settings.contains("fUseUPnP")); + QVERIFY(!settings.contains("fListen")); + QVERIFY(!settings.contains("bPrune")); + QVERIFY(!settings.contains("nPruneSize")); + QVERIFY(!settings.contains("fUseProxy")); + QVERIFY(!settings.contains("addrProxy")); + QVERIFY(!settings.contains("fUseSeparateProxyTor")); + QVERIFY(!settings.contains("addrSeparateProxyTor")); + + std::ifstream file(gArgs.GetDataDirNet() / "settings.json"); + QCOMPARE(std::string(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>()).c_str(), "{\n" + " \"dbcache\": \"600\",\n" + " \"listen\": false,\n" + " \"onion\": \"onion:234\",\n" + " \"par\": \"12\",\n" + " \"proxy\": \"proxy:123\",\n" + " \"prune\": \"2861\"\n" + "}\n"); +} + void OptionTests::integerGetArgBug() { // Test regression https://github.com/bitcoin/bitcoin/issues/24457. Ensure @@ -23,7 +80,8 @@ void OptionTests::integerGetArgBug() settings.rw_settings["prune"] = 3814; }); gArgs.WriteSettingsFile(); - OptionsModel{m_node}; + bilingual_str error; + QVERIFY(OptionsModel{m_node}.Init(error)); gArgs.LockSettings([&](util::Settings& settings) { settings.rw_settings.erase("prune"); }); @@ -36,8 +94,6 @@ void OptionTests::parametersInteraction() // It was fixed via https://github.com/bitcoin-core/gui/pull/568. // With fListen=false in ~/.config/Bitcoin/Bitcoin-Qt.conf and all else left as default, // bitcoin-qt should set both -listen and -listenonion to false and start successfully. - gArgs.ClearPathCache(); - gArgs.LockSettings([&](util::Settings& s) { s.forced_settings.erase("listen"); s.forced_settings.erase("listenonion"); @@ -48,7 +104,8 @@ void OptionTests::parametersInteraction() QSettings settings; settings.setValue("fListen", false); - OptionsModel{m_node}; + bilingual_str error; + QVERIFY(OptionsModel{m_node}.Init(error)); const bool expected{false}; diff --git a/src/qt/test/optiontests.h b/src/qt/test/optiontests.h index 257a0b65be..286d785572 100644 --- a/src/qt/test/optiontests.h +++ b/src/qt/test/optiontests.h @@ -6,6 +6,8 @@ #define BITCOIN_QT_TEST_OPTIONTESTS_H #include <qt/optionsmodel.h> +#include <univalue.h> +#include <util/settings.h> #include <QObject> @@ -13,14 +15,17 @@ class OptionTests : public QObject { Q_OBJECT public: - explicit OptionTests(interfaces::Node& node) : m_node(node) {} + explicit OptionTests(interfaces::Node& node); private Q_SLOTS: + void init(); // called before each test function execution. + void migrateSettings(); void integerGetArgBug(); void parametersInteraction(); private: interfaces::Node& m_node; + util::Settings m_previous_settings; }; #endif // BITCOIN_QT_TEST_OPTIONTESTS_H diff --git a/src/qt/test/test_main.cpp b/src/qt/test/test_main.cpp index aeedd92834..de23f51c92 100644 --- a/src/qt/test/test_main.cpp +++ b/src/qt/test/test_main.cpp @@ -58,9 +58,10 @@ int main(int argc, char* argv[]) // regtest params. // // All tests must use their own testing setup (if needed). - { + fs::create_directories([] { BasicTestingSetup dummy{CBaseChainParams::REGTEST}; - } + return gArgs.GetDataDirNet() / "blocks"; + }()); std::unique_ptr<interfaces::Init> init = interfaces::MakeGuiInit(argc, argv); gArgs.ForceSetArg("-listen", "0"); diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp index bc06f0f23b..7671bfc739 100644 --- a/src/qt/test/wallettests.cpp +++ b/src/qt/test/wallettests.cpp @@ -185,6 +185,8 @@ void TestGUI(interfaces::Node& node) SendCoinsDialog sendCoinsDialog(platformStyle.get()); TransactionView transactionView(platformStyle.get()); OptionsModel optionsModel(node); + bilingual_str error; + QVERIFY(optionsModel.Init(error)); ClientModel clientModel(node, &optionsModel); WalletContext& context = *node.walletLoader().context(); AddWallet(context, wallet); diff --git a/src/qt/transactionoverviewwidget.cpp b/src/qt/transactionoverviewwidget.cpp new file mode 100644 index 0000000000..360a1364fb --- /dev/null +++ b/src/qt/transactionoverviewwidget.cpp @@ -0,0 +1,27 @@ +// Copyright (c) 2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <qt/transactionoverviewwidget.h> + +#include <qt/transactiontablemodel.h> + +#include <QListView> +#include <QSize> +#include <QSizePolicy> + +TransactionOverviewWidget::TransactionOverviewWidget(QWidget* parent) + : QListView(parent) {} + +QSize TransactionOverviewWidget::sizeHint() const +{ + return {sizeHintForColumn(TransactionTableModel::ToAddress), QListView::sizeHint().height()}; +} + +void TransactionOverviewWidget::showEvent(QShowEvent* event) +{ + Q_UNUSED(event); + QSizePolicy sp = sizePolicy(); + sp.setHorizontalPolicy(QSizePolicy::Minimum); + setSizePolicy(sp); +} diff --git a/src/qt/transactionoverviewwidget.h b/src/qt/transactionoverviewwidget.h index 2bdead7bc4..0572e84090 100644 --- a/src/qt/transactionoverviewwidget.h +++ b/src/qt/transactionoverviewwidget.h @@ -5,11 +5,8 @@ #ifndef BITCOIN_QT_TRANSACTIONOVERVIEWWIDGET_H #define BITCOIN_QT_TRANSACTIONOVERVIEWWIDGET_H -#include <qt/transactiontablemodel.h> - #include <QListView> #include <QSize> -#include <QSizePolicy> QT_BEGIN_NAMESPACE class QShowEvent; @@ -21,21 +18,11 @@ class TransactionOverviewWidget : public QListView Q_OBJECT public: - explicit TransactionOverviewWidget(QWidget* parent = nullptr) : QListView(parent) {} - - QSize sizeHint() const override - { - return {sizeHintForColumn(TransactionTableModel::ToAddress), QListView::sizeHint().height()}; - } + explicit TransactionOverviewWidget(QWidget* parent = nullptr); + QSize sizeHint() const override; protected: - void showEvent(QShowEvent* event) override - { - Q_UNUSED(event); - QSizePolicy sp = sizePolicy(); - sp.setHorizontalPolicy(QSizePolicy::Minimum); - setSizePolicy(sp); - } + void showEvent(QShowEvent* event) override; }; #endif // BITCOIN_QT_TRANSACTIONOVERVIEWWIDGET_H diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp index 47f3ba7e7f..b7432a0d77 100644 --- a/src/qt/transactionview.cpp +++ b/src/qt/transactionview.cpp @@ -17,7 +17,7 @@ #include <qt/transactiontablemodel.h> #include <qt/walletmodel.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <chrono> #include <optional> diff --git a/src/qt/walletframe.cpp b/src/qt/walletframe.cpp index 11bea85b21..6b38e207d3 100644 --- a/src/qt/walletframe.cpp +++ b/src/qt/walletframe.cpp @@ -5,7 +5,7 @@ #include <qt/walletframe.h> #include <fs.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <psbt.h> #include <qt/guiutil.h> #include <qt/overviewpage.h> diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp index 1f6c90af4a..ab4d1cae3f 100644 --- a/src/qt/walletmodel.cpp +++ b/src/qt/walletmodel.cpp @@ -21,7 +21,7 @@ #include <interfaces/handler.h> #include <interfaces/node.h> #include <key_io.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <psbt.h> #include <util/system.h> // for GetBoolArg #include <util/translation.h> @@ -234,7 +234,7 @@ WalletModel::SendCoinsReturn WalletModel::prepareTransaction(WalletModelTransact return SendCoinsReturn(OK); } -WalletModel::SendCoinsReturn WalletModel::sendCoins(WalletModelTransaction &transaction) +void WalletModel::sendCoins(WalletModelTransaction& transaction) { QByteArray transaction_array; /* store serialized transaction */ @@ -280,8 +280,6 @@ WalletModel::SendCoinsReturn WalletModel::sendCoins(WalletModelTransaction &tran } checkBalanceChanged(m_wallet->getBalances()); // update balance immediately, otherwise there could be a short noticeable delay until pollBalanceChanged hits - - return SendCoinsReturn(OK); } OptionsModel* WalletModel::getOptionsModel() const diff --git a/src/qt/walletmodel.h b/src/qt/walletmodel.h index d524d48111..a52290dee8 100644 --- a/src/qt/walletmodel.h +++ b/src/qt/walletmodel.h @@ -102,7 +102,7 @@ public: SendCoinsReturn prepareTransaction(WalletModelTransaction &transaction, const wallet::CCoinControl& coinControl); // Send coins to a list of recipients - SendCoinsReturn sendCoins(WalletModelTransaction &transaction); + void sendCoins(WalletModelTransaction& transaction); // Wallet encryption bool setWalletEncrypted(const SecureString& passphrase); diff --git a/src/qt/walletview.cpp b/src/qt/walletview.cpp index 2f92c57607..10fc0fb6d0 100644 --- a/src/qt/walletview.cpp +++ b/src/qt/walletview.cpp @@ -19,7 +19,7 @@ #include <qt/walletmodel.h> #include <interfaces/node.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <util/strencodings.h> #include <QAction> diff --git a/src/rest.cpp b/src/rest.cpp index 1b90baaf95..43c248b03b 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -799,10 +799,10 @@ static bool rest_getutxos(const std::any& context, HTTPRequest* req, const std:: if (!maybe_chainman) return false; ChainstateManager& chainman = *maybe_chainman; { - auto process_utxos = [&vOutPoints, &outs, &hits](const CCoinsView& view, const CTxMemPool& mempool) { + auto process_utxos = [&vOutPoints, &outs, &hits](const CCoinsView& view, const CTxMemPool* mempool) { for (const COutPoint& vOutPoint : vOutPoints) { Coin coin; - bool hit = !mempool.isSpent(vOutPoint) && view.GetCoin(vOutPoint, coin); + bool hit = (!mempool || !mempool->isSpent(vOutPoint)) && view.GetCoin(vOutPoint, coin); hits.push_back(hit); if (hit) outs.emplace_back(std::move(coin)); } @@ -815,10 +815,10 @@ static bool rest_getutxos(const std::any& context, HTTPRequest* req, const std:: LOCK2(cs_main, mempool->cs); CCoinsViewCache& viewChain = chainman.ActiveChainstate().CoinsTip(); CCoinsViewMemPool viewMempool(&viewChain, *mempool); - process_utxos(viewMempool, *mempool); + process_utxos(viewMempool, mempool); } else { - LOCK(cs_main); // no need to lock mempool! - process_utxos(chainman.ActiveChainstate().CoinsTip(), CTxMemPool()); + LOCK(cs_main); + process_utxos(chainman.ActiveChainstate().CoinsTip(), nullptr); } for (size_t i = 0; i < hits.size(); ++i) { diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index f2186c131f..9766a237c7 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -66,7 +66,7 @@ struct CUpdatedBlock int height; }; -static Mutex cs_blockchange; +static GlobalMutex cs_blockchange; static std::condition_variable cond_blockchange; static CUpdatedBlock latestblock GUARDED_BY(cs_blockchange); @@ -790,7 +790,7 @@ static RPCHelpMan pruneblockchain() const CBlockIndex& block{*CHECK_NONFATAL(active_chain.Tip())}; const CBlockIndex* last_block{active_chainstate.m_blockman.GetFirstStoredBlock(block)}; - return static_cast<uint64_t>(last_block->nHeight); + return static_cast<int64_t>(last_block->nHeight - 1); }, }; } @@ -2040,13 +2040,7 @@ static RPCHelpMan scantxoutset() "[scanobjects,...]"}, }, { - RPCResult{"When action=='abort'", RPCResult::Type::BOOL, "", ""}, - RPCResult{"When action=='status' and no scan is in progress", RPCResult::Type::NONE, "", ""}, - RPCResult{"When action=='status' and scan is in progress", RPCResult::Type::OBJ, "", "", - { - {RPCResult::Type::NUM, "progress", "The scan progress"}, - }}, - RPCResult{"When action=='start'", RPCResult::Type::OBJ, "", "", { + RPCResult{"when action=='start'; only returns after scan completes", RPCResult::Type::OBJ, "", "", { {RPCResult::Type::BOOL, "success", "Whether the scan was completed"}, {RPCResult::Type::NUM, "txouts", "The number of unspent transaction outputs scanned"}, {RPCResult::Type::NUM, "height", "The current block height (index)"}, @@ -2065,6 +2059,12 @@ static RPCHelpMan scantxoutset() }}, {RPCResult::Type::STR_AMOUNT, "total_amount", "The total amount of all found unspent outputs in " + CURRENCY_UNIT}, }}, + RPCResult{"when action=='abort'", RPCResult::Type::BOOL, "success", "True if scan will be aborted (not necessarily before this RPC returns), or false if there is no scan to abort"}, + RPCResult{"when action=='status' and a scan is currently in progress", RPCResult::Type::OBJ, "", "", + { + {RPCResult::Type::NUM, "progress", "Approximate percent complete"}, + }}, + RPCResult{"when action=='status' and no scan is in progress - possibly already completed", RPCResult::Type::NONE, "", ""}, }, RPCExamples{ HelpExampleCli("scantxoutset", "start \'[\"" + EXAMPLE_DESCRIPTOR_RAW + "\"]\'") + diff --git a/src/rpc/fees.cpp b/src/rpc/fees.cpp index bfec0780aa..1873bc1587 100644 --- a/src/rpc/fees.cpp +++ b/src/rpc/fees.cpp @@ -7,6 +7,7 @@ #include <policy/feerate.h> #include <policy/fees.h> #include <policy/policy.h> +#include <policy/settings.h> #include <rpc/protocol.h> #include <rpc/request.h> #include <rpc/server.h> @@ -16,7 +17,6 @@ #include <univalue.h> #include <util/fees.h> #include <util/system.h> -#include <validation.h> #include <algorithm> #include <array> diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index 90dc86cd01..97ec95a166 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -8,6 +8,7 @@ #include <core_io.h> #include <fs.h> #include <policy/rbf.h> +#include <policy/settings.h> #include <primitives/transaction.h> #include <rpc/server.h> #include <rpc/server_util.h> @@ -15,7 +16,6 @@ #include <txmempool.h> #include <univalue.h> #include <util/moneystr.h> -#include <validation.h> using node::DEFAULT_MAX_RAW_TX_FEE_RATE; using node::NodeContext; diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 8fb6daf0cb..ea6db1e9a0 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -144,7 +144,7 @@ static UniValue generateBlocks(ChainstateManager& chainman, const CTxMemPool& me { UniValue blockHashes(UniValue::VARR); while (nGenerate > 0 && !ShutdownRequested()) { - std::unique_ptr<CBlockTemplate> pblocktemplate(BlockAssembler{chainman.ActiveChainstate(), mempool}.CreateNewBlock(coinbase_script)); + std::unique_ptr<CBlockTemplate> pblocktemplate(BlockAssembler{chainman.ActiveChainstate(), &mempool}.CreateNewBlock(coinbase_script)); if (!pblocktemplate.get()) throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block"); CBlock *pblock = &pblocktemplate->block; @@ -354,8 +354,7 @@ static RPCHelpMan generateblock() { LOCK(cs_main); - CTxMemPool empty_mempool; - std::unique_ptr<CBlockTemplate> blocktemplate(BlockAssembler{chainman.ActiveChainstate(), empty_mempool}.CreateNewBlock(coinbase_script)); + std::unique_ptr<CBlockTemplate> blocktemplate(BlockAssembler{chainman.ActiveChainstate(), nullptr}.CreateNewBlock(coinbase_script)); if (!blocktemplate) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block"); } @@ -753,7 +752,7 @@ static RPCHelpMan getblocktemplate() // Create new block CScript scriptDummy = CScript() << OP_TRUE; - pblocktemplate = BlockAssembler{active_chainstate, mempool}.CreateNewBlock(scriptDummy); + pblocktemplate = BlockAssembler{active_chainstate, &mempool}.CreateNewBlock(scriptDummy); if (!pblocktemplate) throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory"); diff --git a/src/rpc/output_script.cpp b/src/rpc/output_script.cpp index 115a656e12..f4bb76f50f 100644 --- a/src/rpc/output_script.cpp +++ b/src/rpc/output_script.cpp @@ -163,11 +163,11 @@ static RPCHelpMan createmultisig() result.pushKV("descriptor", descriptor->ToString()); UniValue warnings(UniValue::VARR); - if (!request.params[2].isNull() && OutputTypeFromDestination(dest) != output_type) { + if (descriptor->GetOutputType() != output_type) { // Only warns if the user has explicitly chosen an address type we cannot generate warnings.push_back("Unable to make chosen address type, please ensure no uncompressed public keys are present."); } - if (warnings.size()) result.pushKV("warnings", warnings); + if (!warnings.empty()) result.pushKV("warnings", warnings); return result; }, diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index af00acdc9f..66ed18045e 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -19,14 +19,14 @@ #include <mutex> #include <unordered_map> -static Mutex g_rpc_warmup_mutex; +static GlobalMutex g_rpc_warmup_mutex; static std::atomic<bool> g_rpc_running{false}; static bool fRPCInWarmup GUARDED_BY(g_rpc_warmup_mutex) = true; static std::string rpcWarmupStatus GUARDED_BY(g_rpc_warmup_mutex) = "RPC server started"; /* Timer-creating functions */ static RPCTimerInterface* timerInterface = nullptr; /* Map of name to timer. */ -static Mutex g_deadline_timers_mutex; +static GlobalMutex g_deadline_timers_mutex; static std::map<std::string, std::unique_ptr<RPCTimerBase> > deadlineTimers GUARDED_BY(g_deadline_timers_mutex); static bool ExecuteCommand(const CRPCCommand& command, const JSONRPCRequest& request, UniValue& result, bool last_handler); diff --git a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 b/src/secp256k1/build-aux/m4/bitcoin_secp.m4 index dda770e001..9cb54de098 100644 --- a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 +++ b/src/secp256k1/build-aux/m4/bitcoin_secp.m4 @@ -1,7 +1,7 @@ dnl escape "$0x" below using the m4 quadrigaph @S|@, and escape it again with a \ for the shell. AC_DEFUN([SECP_64BIT_ASM_CHECK],[ AC_MSG_CHECKING(for x86_64 assembly availability) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ +AC_LINK_IFELSE([AC_LANG_PROGRAM([[ #include <stdint.h>]],[[ uint64_t a = 11, tmp; __asm__ __volatile__("movq \@S|@0x100000000,%1; mulq %%rsi" : "+a"(a) : "S"(tmp) : "cc", "%rdx"); diff --git a/src/secp256k1/include/secp256k1.h b/src/secp256k1/include/secp256k1.h index 86ab7e556d..dddab346ae 100644 --- a/src/secp256k1/include/secp256k1.h +++ b/src/secp256k1/include/secp256k1.h @@ -141,9 +141,13 @@ typedef int (*secp256k1_nonce_function)( # define SECP256K1_NO_BUILD #endif +/** At secp256k1 build-time DLL_EXPORT is defined when building objects destined + * for a shared library, but not for those intended for static libraries. + */ + #ifndef SECP256K1_API # if defined(_WIN32) -# ifdef SECP256K1_BUILD +# if defined(SECP256K1_BUILD) && defined(DLL_EXPORT) # define SECP256K1_API __declspec(dllexport) # else # define SECP256K1_API diff --git a/src/secp256k1/sage/prove_group_implementations.sage b/src/secp256k1/sage/prove_group_implementations.sage index 96ce33506a..652bd87f11 100644 --- a/src/secp256k1/sage/prove_group_implementations.sage +++ b/src/secp256k1/sage/prove_group_implementations.sage @@ -40,29 +40,26 @@ def formula_secp256k1_gej_add_var(branch, a, b): s2 = s2 * a.Z h = -u1 h = h + u2 - i = -s1 - i = i + s2 + i = -s2 + i = i + s1 if branch == 2: r = formula_secp256k1_gej_double_var(a) return (constraints(), constraints(zero={h : 'h=0', i : 'i=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}), r) if branch == 3: return (constraints(), constraints(zero={h : 'h=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={i : 'i!=0'}), point_at_infinity()) - i2 = i^2 + t = h * b.Z + rz = a.Z * t h2 = h^2 + h2 = -h2 h3 = h2 * h - h = h * b.Z - rz = a.Z * h t = u1 * h2 - rx = t - rx = rx * 2 + rx = i^2 rx = rx + h3 - rx = -rx - rx = rx + i2 - ry = -rx - ry = ry + t - ry = ry * i + rx = rx + t + rx = rx + t + t = t + rx + ry = t * i h3 = h3 * s1 - h3 = -h3 ry = ry + h3 return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) @@ -80,28 +77,25 @@ def formula_secp256k1_gej_add_ge_var(branch, a, b): s2 = s2 * a.Z h = -u1 h = h + u2 - i = -s1 - i = i + s2 + i = -s2 + i = i + s1 if (branch == 2): r = formula_secp256k1_gej_double_var(a) return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r) if (branch == 3): return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity()) - i2 = i^2 - h2 = h^2 - h3 = h * h2 rz = a.Z * h + h2 = h^2 + h2 = -h2 + h3 = h2 * h t = u1 * h2 - rx = t - rx = rx * 2 + rx = i^2 rx = rx + h3 - rx = -rx - rx = rx + i2 - ry = -rx - ry = ry + t - ry = ry * i + rx = rx + t + rx = rx + t + t = t + rx + ry = t * i h3 = h3 * s1 - h3 = -h3 ry = ry + h3 return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) @@ -109,14 +103,15 @@ def formula_secp256k1_gej_add_zinv_var(branch, a, b): """libsecp256k1's secp256k1_gej_add_zinv_var""" bzinv = b.Z^(-1) if branch == 0: - return (constraints(), constraints(nonzero={b.Infinity : 'b_infinite'}), a) - if branch == 1: + rinf = b.Infinity bzinv2 = bzinv^2 bzinv3 = bzinv2 * bzinv rx = b.X * bzinv2 ry = b.Y * bzinv3 rz = 1 - return (constraints(), constraints(zero={b.Infinity : 'b_finite'}, nonzero={a.Infinity : 'a_infinite'}), jacobianpoint(rx, ry, rz)) + return (constraints(), constraints(nonzero={a.Infinity : 'a_infinite'}), jacobianpoint(rx, ry, rz, rinf)) + if branch == 1: + return (constraints(), constraints(zero={a.Infinity : 'a_finite'}, nonzero={b.Infinity : 'b_infinite'}), a) azz = a.Z * bzinv z12 = azz^2 u1 = a.X @@ -126,29 +121,25 @@ def formula_secp256k1_gej_add_zinv_var(branch, a, b): s2 = s2 * azz h = -u1 h = h + u2 - i = -s1 - i = i + s2 + i = -s2 + i = i + s1 if branch == 2: r = formula_secp256k1_gej_double_var(a) return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r) if branch == 3: return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity()) - i2 = i^2 + rz = a.Z * h h2 = h^2 - h3 = h * h2 - rz = a.Z - rz = rz * h + h2 = -h2 + h3 = h2 * h t = u1 * h2 - rx = t - rx = rx * 2 + rx = i^2 rx = rx + h3 - rx = -rx - rx = rx + i2 - ry = -rx - ry = ry + t - ry = ry * i + rx = rx + t + rx = rx + t + t = t + rx + ry = t * i h3 = h3 * s1 - h3 = -h3 ry = ry + h3 return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) diff --git a/src/secp256k1/src/bench_internal.c b/src/secp256k1/src/bench_internal.c index 3c145f306c..7eb3af28d7 100644 --- a/src/secp256k1/src/bench_internal.c +++ b/src/secp256k1/src/bench_internal.c @@ -254,6 +254,15 @@ void bench_group_add_affine_var(void* arg, int iters) { } } +void bench_group_add_zinv_var(void* arg, int iters) { + int i; + bench_inv *data = (bench_inv*)arg; + + for (i = 0; i < iters; i++) { + secp256k1_gej_add_zinv_var(&data->gej[0], &data->gej[0], &data->ge[1], &data->gej[0].y); + } +} + void bench_group_to_affine_var(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; @@ -376,6 +385,7 @@ int main(int argc, char **argv) { if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_var", bench_group_add_var, bench_setup, NULL, &data, 10, iters*10); if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, iters*10); if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, iters*10); + if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_zinv_var", bench_group_add_zinv_var, bench_setup, NULL, &data, 10, iters*10); if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "to_affine")) run_benchmark("group_to_affine_var", bench_group_to_affine_var, bench_setup, NULL, &data, 10, iters); if (d || have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, iters); diff --git a/src/secp256k1/src/group_impl.h b/src/secp256k1/src/group_impl.h index b19b02a01f..63735ab682 100644 --- a/src/secp256k1/src/group_impl.h +++ b/src/secp256k1/src/group_impl.h @@ -330,15 +330,14 @@ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, s } static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) { - /* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */ - secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; + /* 12 mul, 4 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */ + secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, h2, h3, t; if (a->infinity) { VERIFY_CHECK(rzr == NULL); *r = *b; return; } - if (b->infinity) { if (rzr != NULL) { secp256k1_fe_set_int(rzr, 1); @@ -347,7 +346,6 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons return; } - r->infinity = 0; secp256k1_fe_sqr(&z22, &b->z); secp256k1_fe_sqr(&z12, &a->z); secp256k1_fe_mul(&u1, &a->x, &z22); @@ -355,7 +353,7 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons secp256k1_fe_mul(&s1, &a->y, &z22); secp256k1_fe_mul(&s1, &s1, &b->z); secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z); secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); - secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); + secp256k1_fe_negate(&i, &s2, 1); secp256k1_fe_add(&i, &s1); if (secp256k1_fe_normalizes_to_zero_var(&h)) { if (secp256k1_fe_normalizes_to_zero_var(&i)) { secp256k1_gej_double_var(r, a, rzr); @@ -367,24 +365,33 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons } return; } - secp256k1_fe_sqr(&i2, &i); - secp256k1_fe_sqr(&h2, &h); - secp256k1_fe_mul(&h3, &h, &h2); - secp256k1_fe_mul(&h, &h, &b->z); + + r->infinity = 0; + secp256k1_fe_mul(&t, &h, &b->z); if (rzr != NULL) { - *rzr = h; + *rzr = t; } - secp256k1_fe_mul(&r->z, &a->z, &h); + secp256k1_fe_mul(&r->z, &a->z, &t); + + secp256k1_fe_sqr(&h2, &h); + secp256k1_fe_negate(&h2, &h2, 1); + secp256k1_fe_mul(&h3, &h2, &h); secp256k1_fe_mul(&t, &u1, &h2); - r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); - secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); - secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); + + secp256k1_fe_sqr(&r->x, &i); + secp256k1_fe_add(&r->x, &h3); + secp256k1_fe_add(&r->x, &t); + secp256k1_fe_add(&r->x, &t); + + secp256k1_fe_add(&t, &r->x); + secp256k1_fe_mul(&r->y, &t, &i); + secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_add(&r->y, &h3); } static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr) { - /* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ - secp256k1_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; + /* 8 mul, 3 sqr, 13 add/negate/normalize_weak/normalizes_to_zero (ignoring special cases) */ + secp256k1_fe z12, u1, u2, s1, s2, h, i, h2, h3, t; if (a->infinity) { VERIFY_CHECK(rzr == NULL); secp256k1_gej_set_ge(r, b); @@ -397,7 +404,6 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c *r = *a; return; } - r->infinity = 0; secp256k1_fe_sqr(&z12, &a->z); u1 = a->x; secp256k1_fe_normalize_weak(&u1); @@ -405,7 +411,7 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c s1 = a->y; secp256k1_fe_normalize_weak(&s1); secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z); secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); - secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); + secp256k1_fe_negate(&i, &s2, 1); secp256k1_fe_add(&i, &s1); if (secp256k1_fe_normalizes_to_zero_var(&h)) { if (secp256k1_fe_normalizes_to_zero_var(&i)) { secp256k1_gej_double_var(r, a, rzr); @@ -417,28 +423,33 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c } return; } - secp256k1_fe_sqr(&i2, &i); - secp256k1_fe_sqr(&h2, &h); - secp256k1_fe_mul(&h3, &h, &h2); + + r->infinity = 0; if (rzr != NULL) { *rzr = h; } secp256k1_fe_mul(&r->z, &a->z, &h); + + secp256k1_fe_sqr(&h2, &h); + secp256k1_fe_negate(&h2, &h2, 1); + secp256k1_fe_mul(&h3, &h2, &h); secp256k1_fe_mul(&t, &u1, &h2); - r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); - secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); - secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); + + secp256k1_fe_sqr(&r->x, &i); + secp256k1_fe_add(&r->x, &h3); + secp256k1_fe_add(&r->x, &t); + secp256k1_fe_add(&r->x, &t); + + secp256k1_fe_add(&t, &r->x); + secp256k1_fe_mul(&r->y, &t, &i); + secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_add(&r->y, &h3); } static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv) { - /* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ - secp256k1_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; + /* 9 mul, 3 sqr, 13 add/negate/normalize_weak/normalizes_to_zero (ignoring special cases) */ + secp256k1_fe az, z12, u1, u2, s1, s2, h, i, h2, h3, t; - if (b->infinity) { - *r = *a; - return; - } if (a->infinity) { secp256k1_fe bzinv2, bzinv3; r->infinity = b->infinity; @@ -449,7 +460,10 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe_set_int(&r->z, 1); return; } - r->infinity = 0; + if (b->infinity) { + *r = *a; + return; + } /** We need to calculate (rx,ry,rz) = (ax,ay,az) + (bx,by,1/bzinv). Due to * secp256k1's isomorphism we can multiply the Z coordinates on both sides @@ -467,7 +481,7 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, s1 = a->y; secp256k1_fe_normalize_weak(&s1); secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &az); secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); - secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); + secp256k1_fe_negate(&i, &s2, 1); secp256k1_fe_add(&i, &s1); if (secp256k1_fe_normalizes_to_zero_var(&h)) { if (secp256k1_fe_normalizes_to_zero_var(&i)) { secp256k1_gej_double_var(r, a, NULL); @@ -476,14 +490,23 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, } return; } - secp256k1_fe_sqr(&i2, &i); + + r->infinity = 0; + secp256k1_fe_mul(&r->z, &a->z, &h); + secp256k1_fe_sqr(&h2, &h); - secp256k1_fe_mul(&h3, &h, &h2); - r->z = a->z; secp256k1_fe_mul(&r->z, &r->z, &h); + secp256k1_fe_negate(&h2, &h2, 1); + secp256k1_fe_mul(&h3, &h2, &h); secp256k1_fe_mul(&t, &u1, &h2); - r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); - secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); - secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); + + secp256k1_fe_sqr(&r->x, &i); + secp256k1_fe_add(&r->x, &h3); + secp256k1_fe_add(&r->x, &t); + secp256k1_fe_add(&r->x, &t); + + secp256k1_fe_add(&t, &r->x); + secp256k1_fe_mul(&r->y, &t, &i); + secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_add(&r->y, &h3); } diff --git a/src/shutdown.cpp b/src/shutdown.cpp index fdf726b5f1..1dbc55aeb5 100644 --- a/src/shutdown.cpp +++ b/src/shutdown.cpp @@ -10,7 +10,7 @@ #endif #include <logging.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <util/tokenpipe.h> #include <warnings.h> diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp index 6907749c6d..b7ef479675 100644 --- a/src/support/lockedpool.cpp +++ b/src/support/lockedpool.cpp @@ -202,7 +202,10 @@ void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len) size_t Win32LockedPageAllocator::GetLimit() { - // TODO is there a limit on Windows, how to get it? + size_t min, max; + if(GetProcessWorkingSetSize(GetCurrentProcess(), &min, &max) != 0) { + return min; + } return std::numeric_limits<size_t>::max(); } #endif diff --git a/src/sync.h b/src/sync.h index a175926113..7ec4b668ac 100644 --- a/src/sync.h +++ b/src/sync.h @@ -129,10 +129,22 @@ using RecursiveMutex = AnnotatedMixin<std::recursive_mutex>; /** Wrapped mutex: supports waiting but not recursive locking */ using Mutex = AnnotatedMixin<std::mutex>; +/** Different type to mark Mutex at global scope + * + * Thread safety analysis can't handle negative assertions about mutexes + * with global scope well, so mark them with a separate type, and + * eventually move all the mutexes into classes so they are not globally + * visible. + * + * See: https://github.com/bitcoin/bitcoin/pull/20272#issuecomment-720755781 + */ +class GlobalMutex : public Mutex { }; + #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) inline void AssertLockNotHeldInline(const char* name, const char* file, int line, Mutex* cs) EXCLUSIVE_LOCKS_REQUIRED(!cs) { AssertLockNotHeldInternal(name, file, line, cs); } inline void AssertLockNotHeldInline(const char* name, const char* file, int line, RecursiveMutex* cs) LOCKS_EXCLUDED(cs) { AssertLockNotHeldInternal(name, file, line, cs); } +inline void AssertLockNotHeldInline(const char* name, const char* file, int line, GlobalMutex* cs) LOCKS_EXCLUDED(cs) { AssertLockNotHeldInternal(name, file, line, cs); } #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) /** Wrapper around std::unique_lock style lock for Mutex. */ @@ -232,12 +244,26 @@ public: template<typename MutexArg> using DebugLock = UniqueLock<typename std::remove_reference<typename std::remove_pointer<MutexArg>::type>::type>; -#define LOCK(cs) DebugLock<decltype(cs)> UNIQUE_NAME(criticalblock)(cs, #cs, __FILE__, __LINE__) +// When locking a Mutex, require negative capability to ensure the lock +// is not already held +inline Mutex& MaybeCheckNotHeld(Mutex& cs) EXCLUSIVE_LOCKS_REQUIRED(!cs) LOCK_RETURNED(cs) { return cs; } +inline Mutex* MaybeCheckNotHeld(Mutex* cs) EXCLUSIVE_LOCKS_REQUIRED(!cs) LOCK_RETURNED(cs) { return cs; } + +// When locking a GlobalMutex, just check it is not locked in the surrounding scope +inline GlobalMutex& MaybeCheckNotHeld(GlobalMutex& cs) LOCKS_EXCLUDED(cs) LOCK_RETURNED(cs) { return cs; } +inline GlobalMutex* MaybeCheckNotHeld(GlobalMutex* cs) LOCKS_EXCLUDED(cs) LOCK_RETURNED(cs) { return cs; } + +// When locking a RecursiveMutex, it's okay to already hold the lock +// but check that it is not known to be locked in the surrounding scope anyway +inline RecursiveMutex& MaybeCheckNotHeld(RecursiveMutex& cs) LOCKS_EXCLUDED(cs) LOCK_RETURNED(cs) { return cs; } +inline RecursiveMutex* MaybeCheckNotHeld(RecursiveMutex* cs) LOCKS_EXCLUDED(cs) LOCK_RETURNED(cs) { return cs; } + +#define LOCK(cs) DebugLock<decltype(cs)> UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) #define LOCK2(cs1, cs2) \ - DebugLock<decltype(cs1)> criticalblock1(cs1, #cs1, __FILE__, __LINE__); \ - DebugLock<decltype(cs2)> criticalblock2(cs2, #cs2, __FILE__, __LINE__); -#define TRY_LOCK(cs, name) DebugLock<decltype(cs)> name(cs, #cs, __FILE__, __LINE__, true) -#define WAIT_LOCK(cs, name) DebugLock<decltype(cs)> name(cs, #cs, __FILE__, __LINE__) + DebugLock<decltype(cs1)> criticalblock1(MaybeCheckNotHeld(cs1), #cs1, __FILE__, __LINE__); \ + DebugLock<decltype(cs2)> criticalblock2(MaybeCheckNotHeld(cs2), #cs2, __FILE__, __LINE__); +#define TRY_LOCK(cs, name) DebugLock<decltype(cs)> name(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__, true) +#define WAIT_LOCK(cs, name) DebugLock<decltype(cs)> name(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) #define ENTER_CRITICAL_SECTION(cs) \ { \ @@ -276,7 +302,7 @@ using DebugLock = UniqueLock<typename std::remove_reference<typename std::remove //! //! The above is detectable at compile-time with the -Wreturn-local-addr flag in //! gcc and the -Wreturn-stack-address flag in clang, both enabled by default. -#define WITH_LOCK(cs, code) [&]() -> decltype(auto) { LOCK(cs); code; }() +#define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) class CSemaphore { diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp index 875241094d..78b82b9b20 100644 --- a/src/test/blockencodings_tests.cpp +++ b/src/test/blockencodings_tests.cpp @@ -54,7 +54,7 @@ constexpr long SHARED_TX_OFFSET{3}; BOOST_AUTO_TEST_CASE(SimpleRoundTripTest) { - CTxMemPool pool; + CTxMemPool& pool = *Assert(m_node.mempool); TestMemPoolEntryHelper entry; CBlock block(BuildBlockTestCase()); @@ -137,7 +137,7 @@ public: BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) { - CTxMemPool pool; + CTxMemPool& pool = *Assert(m_node.mempool); TestMemPoolEntryHelper entry; CBlock block(BuildBlockTestCase()); @@ -207,7 +207,7 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) { - CTxMemPool pool; + CTxMemPool& pool = *Assert(m_node.mempool); TestMemPoolEntryHelper entry; CBlock block(BuildBlockTestCase()); @@ -258,7 +258,7 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) { - CTxMemPool pool; + CTxMemPool& pool = *Assert(m_node.mempool); CMutableTransaction coinbase; coinbase.vin.resize(1); coinbase.vin[0].scriptSig.resize(10); diff --git a/src/test/blockfilter_index_tests.cpp b/src/test/blockfilter_index_tests.cpp index ba1eacfc78..c31e4e51f7 100644 --- a/src/test/blockfilter_index_tests.cpp +++ b/src/test/blockfilter_index_tests.cpp @@ -65,7 +65,7 @@ CBlock BuildChainTestingSetup::CreateBlock(const CBlockIndex* prev, const std::vector<CMutableTransaction>& txns, const CScript& scriptPubKey) { - std::unique_ptr<CBlockTemplate> pblocktemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), *m_node.mempool}.CreateNewBlock(scriptPubKey); + std::unique_ptr<CBlockTemplate> pblocktemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), m_node.mempool.get()}.CreateNewBlock(scriptPubKey); CBlock& block = pblocktemplate->block; block.hashPrevBlock = prev->GetBlockHash(); block.nTime = prev->nTime + 1; diff --git a/src/test/fuzz/tx_out.cpp b/src/test/fuzz/tx_out.cpp index 39a50b6c80..a2421ff582 100644 --- a/src/test/fuzz/tx_out.cpp +++ b/src/test/fuzz/tx_out.cpp @@ -4,6 +4,7 @@ #include <consensus/validation.h> #include <core_memusage.h> +#include <policy/feerate.h> #include <policy/policy.h> #include <primitives/transaction.h> #include <streams.h> diff --git a/src/test/fuzz/tx_pool.cpp b/src/test/fuzz/tx_pool.cpp index 771d7a11cb..4f40608c4f 100644 --- a/src/test/fuzz/tx_pool.cpp +++ b/src/test/fuzz/tx_pool.cpp @@ -97,7 +97,7 @@ void Finish(FuzzedDataProvider& fuzzed_data_provider, MockedTxPool& tx_pool, CCh BlockAssembler::Options options; options.nBlockMaxWeight = fuzzed_data_provider.ConsumeIntegralInRange(0U, MAX_BLOCK_WEIGHT); options.blockMinFeeRate = CFeeRate{ConsumeMoney(fuzzed_data_provider, /*max=*/COIN)}; - auto assembler = BlockAssembler{chainstate, *static_cast<CTxMemPool*>(&tx_pool), options}; + auto assembler = BlockAssembler{chainstate, &tx_pool, options}; auto block_template = assembler.CreateNewBlock(CScript{} << OP_TRUE); Assert(block_template->block.vtx.size() >= 1); } diff --git a/src/test/fuzz/util.cpp b/src/test/fuzz/util.cpp index 033c6e18d5..883698aff1 100644 --- a/src/test/fuzz/util.cpp +++ b/src/test/fuzz/util.cpp @@ -223,6 +223,15 @@ bool FuzzedSock::Wait(std::chrono::milliseconds timeout, Event requested, Event* return true; } +bool FuzzedSock::WaitMany(std::chrono::milliseconds timeout, EventsPerSock& events_per_sock) const +{ + for (auto& [sock, events] : events_per_sock) { + (void)sock; + events.occurred = m_fuzzed_data_provider.ConsumeBool() ? events.requested : 0; + } + return true; +} + bool FuzzedSock::IsConnected(std::string& errmsg) const { if (m_fuzzed_data_provider.ConsumeBool()) { diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h index 3fc6fa1cd5..66d00b1767 100644 --- a/src/test/fuzz/util.h +++ b/src/test/fuzz/util.h @@ -71,6 +71,8 @@ public: bool Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred = nullptr) const override; + bool WaitMany(std::chrono::milliseconds timeout, EventsPerSock& events_per_sock) const override; + bool IsConnected(std::string& errmsg) const override; }; diff --git a/src/test/logging_tests.cpp b/src/test/logging_tests.cpp index 3f6a605945..5a5e3b3f1f 100644 --- a/src/test/logging_tests.cpp +++ b/src/test/logging_tests.cpp @@ -103,6 +103,7 @@ BOOST_FIXTURE_TEST_CASE(logging_LogPrintMacros, LogSetup) LogPrintLevel(BCLog::NET, BCLog::Level::Info, "foo8: %s\n", "bar8"); LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "foo9: %s\n", "bar9"); LogPrintLevel(BCLog::NET, BCLog::Level::Error, "foo10: %s\n", "bar10"); + LogPrintfCategory(BCLog::VALIDATION, "foo11: %s\n", "bar11"); std::ifstream file{tmp_log_path}; std::vector<std::string> log_lines; for (std::string log; std::getline(file, log);) { @@ -114,7 +115,9 @@ BOOST_FIXTURE_TEST_CASE(logging_LogPrintMacros, LogSetup) "[net:debug] foo7: bar7", "[net:info] foo8: bar8", "[net:warning] foo9: bar9", - "[net:error] foo10: bar10"}; + "[net:error] foo10: bar10", + "[validation] foo11: bar11", + }; BOOST_CHECK_EQUAL_COLLECTIONS(log_lines.begin(), log_lines.end(), expected.begin(), expected.end()); } diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp index 89424a0cd2..bc63122025 100644 --- a/src/test/mempool_tests.cpp +++ b/src/test/mempool_tests.cpp @@ -56,7 +56,7 @@ BOOST_AUTO_TEST_CASE(MempoolRemoveTest) } - CTxMemPool testPool; + CTxMemPool& testPool = *Assert(m_node.mempool); LOCK2(cs_main, testPool.cs); // Nothing in pool, remove should do nothing: @@ -108,12 +108,12 @@ BOOST_AUTO_TEST_CASE(MempoolRemoveTest) BOOST_CHECK_EQUAL(testPool.size(), 0U); } -template<typename name> -static void CheckSort(CTxMemPool &pool, std::vector<std::string> &sortedOrder) EXCLUSIVE_LOCKS_REQUIRED(pool.cs) +template <typename name> +static void CheckSort(CTxMemPool& pool, std::vector<std::string>& sortedOrder) EXCLUSIVE_LOCKS_REQUIRED(pool.cs) { BOOST_CHECK_EQUAL(pool.size(), sortedOrder.size()); typename CTxMemPool::indexed_transaction_set::index<name>::type::iterator it = pool.mapTx.get<name>().begin(); - int count=0; + int count = 0; for (; it != pool.mapTx.get<name>().end(); ++it, ++count) { BOOST_CHECK_EQUAL(it->GetTx().GetHash().ToString(), sortedOrder[count]); } @@ -121,7 +121,7 @@ static void CheckSort(CTxMemPool &pool, std::vector<std::string> &sortedOrder) E BOOST_AUTO_TEST_CASE(MempoolIndexingTest) { - CTxMemPool pool; + CTxMemPool& pool = *Assert(m_node.mempool); LOCK2(cs_main, pool.cs); TestMemPoolEntryHelper entry; @@ -294,7 +294,7 @@ BOOST_AUTO_TEST_CASE(MempoolIndexingTest) BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest) { - CTxMemPool pool; + CTxMemPool& pool = *Assert(m_node.mempool); LOCK2(cs_main, pool.cs); TestMemPoolEntryHelper entry; @@ -423,7 +423,7 @@ BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest) BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest) { - CTxMemPool pool; + CTxMemPool& pool = *Assert(m_node.mempool); LOCK2(cs_main, pool.cs); TestMemPoolEntryHelper entry; @@ -594,7 +594,7 @@ BOOST_AUTO_TEST_CASE(MempoolAncestryTests) { size_t ancestors, descendants; - CTxMemPool pool; + CTxMemPool& pool = *Assert(m_node.mempool); LOCK2(cs_main, pool.cs); TestMemPoolEntryHelper entry; @@ -753,7 +753,7 @@ BOOST_AUTO_TEST_CASE(MempoolAncestryTestsDiamond) { size_t ancestors, descendants; - CTxMemPool pool; + CTxMemPool& pool = *Assert(m_node.mempool); LOCK2(::cs_main, pool.cs); TestMemPoolEntryHelper entry; diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp index 439ad174b3..eca4fbf15c 100644 --- a/src/test/miner_tests.cpp +++ b/src/test/miner_tests.cpp @@ -52,7 +52,7 @@ BlockAssembler MinerTestingSetup::AssemblerForTest(const CChainParams& params) options.nBlockMaxWeight = MAX_BLOCK_WEIGHT; options.blockMinFeeRate = blockMinFeeRate; - return BlockAssembler{m_node.chainman->ActiveChainstate(), *m_node.mempool, options}; + return BlockAssembler{m_node.chainman->ActiveChainstate(), m_node.mempool.get(), options}; } constexpr static struct { diff --git a/src/test/policyestimator_tests.cpp b/src/test/policyestimator_tests.cpp index 06877898a4..3f66a8fc46 100644 --- a/src/test/policyestimator_tests.cpp +++ b/src/test/policyestimator_tests.cpp @@ -12,12 +12,12 @@ #include <boost/test/unit_test.hpp> -BOOST_FIXTURE_TEST_SUITE(policyestimator_tests, BasicTestingSetup) +BOOST_FIXTURE_TEST_SUITE(policyestimator_tests, ChainTestingSetup) BOOST_AUTO_TEST_CASE(BlockPolicyEstimates) { - CBlockPolicyEstimator feeEst; - CTxMemPool mpool(&feeEst); + CBlockPolicyEstimator& feeEst = *Assert(m_node.fee_estimator); + CTxMemPool& mpool = *Assert(m_node.mempool); LOCK2(cs_main, mpool.cs); TestMemPoolEntryHelper entry; CAmount basefee(2000); diff --git a/src/test/util/mining.cpp b/src/test/util/mining.cpp index a6d624fe84..88cf9647e7 100644 --- a/src/test/util/mining.cpp +++ b/src/test/util/mining.cpp @@ -77,7 +77,7 @@ CTxIn MineBlock(const NodeContext& node, const CScript& coinbase_scriptPubKey) std::shared_ptr<CBlock> PrepareBlock(const NodeContext& node, const CScript& coinbase_scriptPubKey) { auto block = std::make_shared<CBlock>( - BlockAssembler{Assert(node.chainman)->ActiveChainstate(), *Assert(node.mempool)} + BlockAssembler{Assert(node.chainman)->ActiveChainstate(), Assert(node.mempool.get())} .CreateNewBlock(coinbase_scriptPubKey) ->block); diff --git a/src/test/util/net.h b/src/test/util/net.h index e980fe4967..37d278645a 100644 --- a/src/test/util/net.h +++ b/src/test/util/net.h @@ -162,6 +162,15 @@ public: return true; } + bool WaitMany(std::chrono::milliseconds timeout, EventsPerSock& events_per_sock) const override + { + for (auto& [sock, events] : events_per_sock) { + (void)sock; + events.occurred = events.requested; + } + return true; + } + private: const std::string m_contents; mutable size_t m_consumed; diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 8f03481f72..d9fff85bf5 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -277,8 +277,7 @@ CBlock TestChain100Setup::CreateBlock( const CScript& scriptPubKey, CChainState& chainstate) { - CTxMemPool empty_pool; - CBlock block = BlockAssembler{chainstate, empty_pool}.CreateNewBlock(scriptPubKey)->block; + CBlock block = BlockAssembler{chainstate, nullptr}.CreateNewBlock(scriptPubKey)->block; Assert(block.vtx.size() == 1); for (const CMutableTransaction& tx : txns) { diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp index 331da691b5..7ade4d8195 100644 --- a/src/test/validation_block_tests.cpp +++ b/src/test/validation_block_tests.cpp @@ -65,7 +65,7 @@ std::shared_ptr<CBlock> MinerTestingSetup::Block(const uint256& prev_hash) static int i = 0; static uint64_t time = Params().GenesisBlock().nTime; - auto ptemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), *m_node.mempool}.CreateNewBlock(CScript{} << i++ << OP_TRUE); + auto ptemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), m_node.mempool.get()}.CreateNewBlock(CScript{} << i++ << OP_TRUE); auto pblock = std::make_shared<CBlock>(ptemplate->block); pblock->hashPrevBlock = prev_hash; pblock->nTime = ++time; @@ -327,7 +327,7 @@ BOOST_AUTO_TEST_CASE(witness_commitment_index) { CScript pubKey; pubKey << 1 << OP_TRUE; - auto ptemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), *m_node.mempool}.CreateNewBlock(pubKey); + auto ptemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), m_node.mempool.get()}.CreateNewBlock(pubKey); CBlock pblock = ptemplate->block; CTxOut witness; diff --git a/src/test/validation_chainstate_tests.cpp b/src/test/validation_chainstate_tests.cpp index 98cb713a81..102de74389 100644 --- a/src/test/validation_chainstate_tests.cpp +++ b/src/test/validation_chainstate_tests.cpp @@ -30,7 +30,7 @@ BOOST_AUTO_TEST_CASE(validation_chainstate_resize_caches) ChainstateManager manager{chainman_opts}; WITH_LOCK(::cs_main, manager.m_blockman.m_block_tree_db = std::make_unique<CBlockTreeDB>(1 << 20, true)); - CTxMemPool mempool; + CTxMemPool& mempool = *Assert(m_node.mempool); //! Create and add a Coin with DynamicMemoryUsage of 80 bytes to the given view. auto add_coin = [](CCoinsViewCache& coins_view) -> COutPoint { diff --git a/src/test/validation_flush_tests.cpp b/src/test/validation_flush_tests.cpp index a34895d4ae..012169b17e 100644 --- a/src/test/validation_flush_tests.cpp +++ b/src/test/validation_flush_tests.cpp @@ -20,7 +20,7 @@ BOOST_FIXTURE_TEST_SUITE(validation_flush_tests, ChainTestingSetup) //! BOOST_AUTO_TEST_CASE(getcoinscachesizestate) { - CTxMemPool mempool; + CTxMemPool& mempool = *Assert(m_node.mempool); BlockManager blockman{}; CChainState chainstate{&mempool, blockman, *Assert(m_node.chainman)}; chainstate.InitCoinsDB(/*cache_size_bytes=*/1 << 10, /*in_memory=*/true, /*should_wipe=*/false); diff --git a/src/timedata.cpp b/src/timedata.cpp index 7faf22aba0..ceee08e68c 100644 --- a/src/timedata.cpp +++ b/src/timedata.cpp @@ -9,14 +9,14 @@ #include <timedata.h> #include <netaddress.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <sync.h> #include <tinyformat.h> #include <util/system.h> #include <util/translation.h> #include <warnings.h> -static Mutex g_timeoffset_mutex; +static GlobalMutex g_timeoffset_mutex; static int64_t nTimeOffset GUARDED_BY(g_timeoffset_mutex) = 0; /** diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp index 2064f0fb8a..d6e792a55f 100644 --- a/src/torcontrol.cpp +++ b/src/torcontrol.cpp @@ -94,7 +94,7 @@ void TorControlConnection::readcb(struct bufferevent *bev, void *ctx) self->reply_handlers.front()(*self, self->message); self->reply_handlers.pop_front(); } else { - LogPrint(BCLog::TOR, "tor: Received unexpected sync reply %i\n", self->message.code); + LogPrint(BCLog::TOR, "Received unexpected sync reply %i\n", self->message.code); } } self->message.Clear(); @@ -113,13 +113,13 @@ void TorControlConnection::eventcb(struct bufferevent *bev, short what, void *ct { TorControlConnection *self = static_cast<TorControlConnection*>(ctx); if (what & BEV_EVENT_CONNECTED) { - LogPrint(BCLog::TOR, "tor: Successfully connected!\n"); + LogPrint(BCLog::TOR, "Successfully connected!\n"); self->connected(*self); } else if (what & (BEV_EVENT_EOF|BEV_EVENT_ERROR)) { if (what & BEV_EVENT_ERROR) { - LogPrint(BCLog::TOR, "tor: Error connecting to Tor control socket\n"); + LogPrint(BCLog::TOR, "Error connecting to Tor control socket\n"); } else { - LogPrint(BCLog::TOR, "tor: End of stream\n"); + LogPrint(BCLog::TOR, "End of stream\n"); } self->Disconnect(); self->disconnected(*self); @@ -318,7 +318,7 @@ TorController::TorController(struct event_base* _base, const std::string& tor_co // Read service private key if cached std::pair<bool,std::string> pkf = ReadBinaryFile(GetPrivateKeyFile()); if (pkf.first) { - LogPrint(BCLog::TOR, "tor: Reading cached private key from %s\n", fs::PathToString(GetPrivateKeyFile())); + LogPrint(BCLog::TOR, "Reading cached private key from %s\n", fs::PathToString(GetPrivateKeyFile())); private_key = pkf.second; } } @@ -359,7 +359,7 @@ void TorController::get_socks_cb(TorControlConnection& _conn, const TorControlRe } } if (!socks_location.empty()) { - LogPrint(BCLog::TOR, "tor: Get SOCKS port command yielded %s\n", socks_location); + LogPrint(BCLog::TOR, "Get SOCKS port command yielded %s\n", socks_location); } else { LogPrintf("tor: Get SOCKS port command returned nothing\n"); } @@ -380,7 +380,7 @@ void TorController::get_socks_cb(TorControlConnection& _conn, const TorControlRe } Assume(resolved.IsValid()); - LogPrint(BCLog::TOR, "tor: Configuring onion proxy for %s\n", resolved.ToStringIPPort()); + LogPrint(BCLog::TOR, "Configuring onion proxy for %s\n", resolved.ToStringIPPort()); Proxy addrOnion = Proxy(resolved, true); SetProxy(NET_ONION, addrOnion); @@ -404,7 +404,7 @@ void TorController::get_socks_cb(TorControlConnection& _conn, const TorControlRe void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlReply& reply) { if (reply.code == 250) { - LogPrint(BCLog::TOR, "tor: ADD_ONION successful\n"); + LogPrint(BCLog::TOR, "ADD_ONION successful\n"); for (const std::string &s : reply.lines) { std::map<std::string,std::string> m = ParseTorReplyMapping(s); std::map<std::string,std::string>::iterator i; @@ -421,9 +421,9 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe return; } service = LookupNumeric(std::string(service_id+".onion"), Params().GetDefaultPort()); - LogPrintf("tor: Got service ID %s, advertising service %s\n", service_id, service.ToString()); + LogPrintfCategory(BCLog::TOR, "Got service ID %s, advertising service %s\n", service_id, service.ToString()); if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) { - LogPrint(BCLog::TOR, "tor: Cached service private key to %s\n", fs::PathToString(GetPrivateKeyFile())); + LogPrint(BCLog::TOR, "Cached service private key to %s\n", fs::PathToString(GetPrivateKeyFile())); } else { LogPrintf("tor: Error writing service private key to %s\n", fs::PathToString(GetPrivateKeyFile())); } @@ -439,7 +439,7 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe void TorController::auth_cb(TorControlConnection& _conn, const TorControlReply& reply) { if (reply.code == 250) { - LogPrint(BCLog::TOR, "tor: Authentication successful\n"); + LogPrint(BCLog::TOR, "Authentication successful\n"); // Now that we know Tor is running setup the proxy for onion addresses // if -onion isn't set to something else. @@ -490,7 +490,7 @@ static std::vector<uint8_t> ComputeResponse(const std::string &key, const std::v void TorController::authchallenge_cb(TorControlConnection& _conn, const TorControlReply& reply) { if (reply.code == 250) { - LogPrint(BCLog::TOR, "tor: SAFECOOKIE authentication challenge successful\n"); + LogPrint(BCLog::TOR, "SAFECOOKIE authentication challenge successful\n"); std::pair<std::string,std::string> l = SplitTorReplyLine(reply.lines[0]); if (l.first == "AUTHCHALLENGE") { std::map<std::string,std::string> m = ParseTorReplyMapping(l.second); @@ -500,7 +500,7 @@ void TorController::authchallenge_cb(TorControlConnection& _conn, const TorContr } std::vector<uint8_t> serverHash = ParseHex(m["SERVERHASH"]); std::vector<uint8_t> serverNonce = ParseHex(m["SERVERNONCE"]); - LogPrint(BCLog::TOR, "tor: AUTHCHALLENGE ServerHash %s ServerNonce %s\n", HexStr(serverHash), HexStr(serverNonce)); + LogPrint(BCLog::TOR, "AUTHCHALLENGE ServerHash %s ServerNonce %s\n", HexStr(serverHash), HexStr(serverNonce)); if (serverNonce.size() != 32) { LogPrintf("tor: ServerNonce is not 32 bytes, as required by spec\n"); return; @@ -547,12 +547,12 @@ void TorController::protocolinfo_cb(TorControlConnection& _conn, const TorContro std::map<std::string,std::string> m = ParseTorReplyMapping(l.second); std::map<std::string,std::string>::iterator i; if ((i = m.find("Tor")) != m.end()) { - LogPrint(BCLog::TOR, "tor: Connected to Tor version %s\n", i->second); + LogPrint(BCLog::TOR, "Connected to Tor version %s\n", i->second); } } } for (const std::string &s : methods) { - LogPrint(BCLog::TOR, "tor: Supported authentication method: %s\n", s); + LogPrint(BCLog::TOR, "Supported authentication method: %s\n", s); } // Prefer NULL, otherwise SAFECOOKIE. If a password is provided, use HASHEDPASSWORD /* Authentication: @@ -562,18 +562,18 @@ void TorController::protocolinfo_cb(TorControlConnection& _conn, const TorContro std::string torpassword = gArgs.GetArg("-torpassword", ""); if (!torpassword.empty()) { if (methods.count("HASHEDPASSWORD")) { - LogPrint(BCLog::TOR, "tor: Using HASHEDPASSWORD authentication\n"); + LogPrint(BCLog::TOR, "Using HASHEDPASSWORD authentication\n"); ReplaceAll(torpassword, "\"", "\\\""); _conn.Command("AUTHENTICATE \"" + torpassword + "\"", std::bind(&TorController::auth_cb, this, std::placeholders::_1, std::placeholders::_2)); } else { LogPrintf("tor: Password provided with -torpassword, but HASHEDPASSWORD authentication is not available\n"); } } else if (methods.count("NULL")) { - LogPrint(BCLog::TOR, "tor: Using NULL authentication\n"); + LogPrint(BCLog::TOR, "Using NULL authentication\n"); _conn.Command("AUTHENTICATE", std::bind(&TorController::auth_cb, this, std::placeholders::_1, std::placeholders::_2)); } else if (methods.count("SAFECOOKIE")) { // Cookie: hexdump -e '32/1 "%02x""\n"' ~/.tor/control_auth_cookie - LogPrint(BCLog::TOR, "tor: Using SAFECOOKIE authentication, reading cookie authentication from %s\n", cookiefile); + LogPrint(BCLog::TOR, "Using SAFECOOKIE authentication, reading cookie authentication from %s\n", cookiefile); std::pair<bool,std::string> status_cookie = ReadBinaryFile(fs::PathFromString(cookiefile), TOR_COOKIE_SIZE); if (status_cookie.first && status_cookie.second.size() == TOR_COOKIE_SIZE) { // _conn.Command("AUTHENTICATE " + HexStr(status_cookie.second), std::bind(&TorController::auth_cb, this, std::placeholders::_1, std::placeholders::_2)); @@ -615,7 +615,7 @@ void TorController::disconnected_cb(TorControlConnection& _conn) if (!reconnect) return; - LogPrint(BCLog::TOR, "tor: Not connected to Tor control port %s, trying to reconnect\n", m_tor_control_center); + LogPrint(BCLog::TOR, "Not connected to Tor control port %s, trying to reconnect\n", m_tor_control_center); // Single-shot timer for reconnect. Use exponential backoff. struct timeval time = MillisToTimeval(int64_t(reconnect_timeout * 1000.0)); diff --git a/src/univalue/.cirrus.yml b/src/univalue/.cirrus.yml deleted file mode 100644 index f140fee12b..0000000000 --- a/src/univalue/.cirrus.yml +++ /dev/null @@ -1,44 +0,0 @@ -env: - MAKEJOBS: "-j4" - RUN_TESTS: "true" - BASE_OUTDIR: "$CIRRUS_WORKING_DIR/out_dir_base" - DEBIAN_FRONTEND: "noninteractive" - -task: - container: - image: ubuntu:focal - cpu: 1 - memory: 1G - greedy: true # https://medium.com/cirruslabs/introducing-greedy-container-instances-29aad06dc2b4 - - matrix: - - name: "gcc" - env: - CC: "gcc" - CXX: "g++" - APT_PKGS: "gcc" - - name: "clang" - env: - CC: "clang" - CXX: "clang++" - APT_PKGS: "clang" - - name: "mingw" - env: - CC: "" - CXX: "" - UNIVALUE_CONFIG: "--host=x86_64-w64-mingw32" - APT_PKGS: "g++-mingw-w64-x86-64 gcc-mingw-w64-x86-64 binutils-mingw-w64-x86-64" - RUN_TESTS: "false" - - install_script: - - apt update - - apt install -y pkg-config build-essential libtool autotools-dev automake bsdmainutils - - apt install -y $APT_PKGS - autogen_script: - - ./autogen.sh - configure_script: - - ./configure --cache-file=config.cache --bindir=$BASE_OUTDIR/bin --libdir=$BASE_OUTDIR/lib $UNIVALUE_CONFIG - make_script: - - make $MAKEJOBS V=1 - test_script: - - if [ "$RUN_TESTS" = "true" ]; then make $MAKEJOBS distcheck; fi diff --git a/src/univalue/COPYING b/src/univalue/COPYING deleted file mode 100644 index 1fb429f356..0000000000 --- a/src/univalue/COPYING +++ /dev/null @@ -1,19 +0,0 @@ - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/src/univalue/Makefile.am b/src/univalue/Makefile.am deleted file mode 100644 index 476f14b922..0000000000 --- a/src/univalue/Makefile.am +++ /dev/null @@ -1,58 +0,0 @@ -include sources.mk -ACLOCAL_AMFLAGS = -I build-aux/m4 -.PHONY: gen FORCE -.INTERMEDIATE: $(GENBIN) - -include_HEADERS = $(UNIVALUE_DIST_HEADERS_INT) -noinst_HEADERS = $(UNIVALUE_LIB_HEADERS_INT) - -lib_LTLIBRARIES = libunivalue.la - -pkgconfigdir = $(libdir)/pkgconfig -pkgconfig_DATA = pc/libunivalue.pc - -libunivalue_la_SOURCES = $(UNIVALUE_LIB_SOURCES_INT) - -libunivalue_la_LDFLAGS = \ - -version-info $(LIBUNIVALUE_CURRENT):$(LIBUNIVALUE_REVISION):$(LIBUNIVALUE_AGE) \ - -no-undefined -libunivalue_la_CXXFLAGS = -I$(top_srcdir)/include - -TESTS = test/object test/unitester test/no_nul - -GENBIN = gen/gen$(BUILD_EXEEXT) -GEN_SRCS = gen/gen.cpp - -$(GENBIN): $(GEN_SRCS) - @echo Building $@ - $(AM_V_at)c++ -I$(top_srcdir)/include -o $@ $< - -gen: $(GENBIN) FORCE - @echo Updating lib/univalue_escapes.h - $(AM_V_at)$(GENBIN) > lib/univalue_escapes.h - -noinst_PROGRAMS = $(TESTS) test/test_json - -test_unitester_SOURCES = $(UNIVALUE_TEST_UNITESTER_INT) -test_unitester_LDADD = libunivalue.la -test_unitester_CXXFLAGS = -I$(top_srcdir)/include -DJSON_TEST_SRC=\"$(srcdir)/$(UNIVALUE_TEST_DATA_DIR_INT)\" -test_unitester_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS) - -test_test_json_SOURCES = $(UNIVALUE_TEST_JSON_INT) -test_test_json_LDADD = libunivalue.la -test_test_json_CXXFLAGS = -I$(top_srcdir)/include -test_test_json_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS) - -test_no_nul_SOURCES = $(UNIVALUE_TEST_NO_NUL_INT) -test_no_nul_LDADD = libunivalue.la -test_no_nul_CXXFLAGS = -I$(top_srcdir)/include -test_no_nul_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS) - -test_object_SOURCES = $(UNIVALUE_TEST_OBJECT_INT) -test_object_LDADD = libunivalue.la -test_object_CXXFLAGS = -I$(top_srcdir)/include -test_object_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS) - -TEST_FILES = $(UNIVALUE_TEST_FILES_INT) - -EXTRA_DIST=$(UNIVALUE_TEST_FILES_INT) $(GEN_SRCS) diff --git a/src/univalue/README.md b/src/univalue/README.md deleted file mode 100644 index d622f5b1e0..0000000000 --- a/src/univalue/README.md +++ /dev/null @@ -1,21 +0,0 @@ - -# UniValue - -## Summary - -A universal value class, with JSON encoding and decoding. - -UniValue is an abstract data type that may be a null, boolean, string, -number, array container, or a key/value dictionary container, nested to -an arbitrary depth. - -This class is aligned with the JSON standard, [RFC -7159](https://tools.ietf.org/html/rfc7159.html). - -## Library usage - -This is a fork of univalue used by Bitcoin Core. It is not maintained for usage -by other projects. Notably, the API is broken in non-backward-compatible ways. - -Other projects looking for a maintained library should use the upstream -univalue at https://github.com/jgarzik/univalue. diff --git a/src/univalue/autogen.sh b/src/univalue/autogen.sh deleted file mode 100755 index 4b38721faa..0000000000 --- a/src/univalue/autogen.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -set -e -srcdir="$(dirname $0)" -cd "$srcdir" -if [ -z ${LIBTOOLIZE} ] && GLIBTOOLIZE="`which glibtoolize 2>/dev/null`"; then - LIBTOOLIZE="${GLIBTOOLIZE}" - export LIBTOOLIZE -fi -autoreconf --install --force diff --git a/src/univalue/build-aux/m4/.gitignore b/src/univalue/build-aux/m4/.gitignore deleted file mode 100644 index f063686524..0000000000 --- a/src/univalue/build-aux/m4/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/*.m4 diff --git a/src/univalue/build-aux/m4/ax_cxx_compile_stdcxx.m4 b/src/univalue/build-aux/m4/ax_cxx_compile_stdcxx.m4 deleted file mode 100644 index f7e5137003..0000000000 --- a/src/univalue/build-aux/m4/ax_cxx_compile_stdcxx.m4 +++ /dev/null @@ -1,962 +0,0 @@ -# =========================================================================== -# https://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional]) -# -# DESCRIPTION -# -# Check for baseline language coverage in the compiler for the specified -# version of the C++ standard. If necessary, add switches to CXX and -# CXXCPP to enable support. VERSION may be '11' (for the C++11 standard) -# or '14' (for the C++14 standard). -# -# The second argument, if specified, indicates whether you insist on an -# extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g. -# -std=c++11). If neither is specified, you get whatever works, with -# preference for no added switch, and then for an extended mode. -# -# The third argument, if specified 'mandatory' or if left unspecified, -# indicates that baseline support for the specified C++ standard is -# required and that the macro should error out if no mode with that -# support is found. If specified 'optional', then configuration proceeds -# regardless, after defining HAVE_CXX${VERSION} if and only if a -# supporting mode is found. -# -# LICENSE -# -# Copyright (c) 2008 Benjamin Kosnik <bkoz@redhat.com> -# Copyright (c) 2012 Zack Weinberg <zackw@panix.com> -# Copyright (c) 2013 Roy Stogner <roystgnr@ices.utexas.edu> -# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <sokolov@google.com> -# Copyright (c) 2015 Paul Norman <penorman@mac.com> -# Copyright (c) 2015 Moritz Klammler <moritz@klammler.eu> -# Copyright (c) 2016, 2018 Krzesimir Nowak <qdlacz@gmail.com> -# Copyright (c) 2019 Enji Cooper <yaneurabeya@gmail.com> -# Copyright (c) 2020 Jason Merrill <jason@redhat.com> -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 12 - -dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro -dnl (serial version number 13). - -AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl - m4_if([$1], [11], [ax_cxx_compile_alternatives="11 0x"], - [$1], [14], [ax_cxx_compile_alternatives="14 1y"], - [$1], [17], [ax_cxx_compile_alternatives="17 1z"], - [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl - m4_if([$2], [], [], - [$2], [ext], [], - [$2], [noext], [], - [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl - m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true], - [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true], - [$3], [optional], [ax_cxx_compile_cxx$1_required=false], - [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])]) - AC_LANG_PUSH([C++])dnl - ac_success=no - - m4_if([$2], [], [dnl - AC_CACHE_CHECK(whether $CXX supports C++$1 features by default, - ax_cv_cxx_compile_cxx$1, - [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], - [ax_cv_cxx_compile_cxx$1=yes], - [ax_cv_cxx_compile_cxx$1=no])]) - if test x$ax_cv_cxx_compile_cxx$1 = xyes; then - ac_success=yes - fi]) - - m4_if([$2], [noext], [], [dnl - if test x$ac_success = xno; then - for alternative in ${ax_cxx_compile_alternatives}; do - switch="-std=gnu++${alternative}" - cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch]) - AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch, - $cachevar, - [ac_save_CXX="$CXX" - CXX="$CXX $switch" - AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], - [eval $cachevar=yes], - [eval $cachevar=no]) - CXX="$ac_save_CXX"]) - if eval test x\$$cachevar = xyes; then - CXX="$CXX $switch" - if test -n "$CXXCPP" ; then - CXXCPP="$CXXCPP $switch" - fi - ac_success=yes - break - fi - done - fi]) - - m4_if([$2], [ext], [], [dnl - if test x$ac_success = xno; then - dnl HP's aCC needs +std=c++11 according to: - dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf - dnl Cray's crayCC needs "-h std=c++11" - for alternative in ${ax_cxx_compile_alternatives}; do - for switch in -std=c++${alternative} +std=c++${alternative} "-h std=c++${alternative}"; do - cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch]) - AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch, - $cachevar, - [ac_save_CXX="$CXX" - CXX="$CXX $switch" - AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], - [eval $cachevar=yes], - [eval $cachevar=no]) - CXX="$ac_save_CXX"]) - if eval test x\$$cachevar = xyes; then - CXX="$CXX $switch" - if test -n "$CXXCPP" ; then - CXXCPP="$CXXCPP $switch" - fi - ac_success=yes - break - fi - done - if test x$ac_success = xyes; then - break - fi - done - fi]) - AC_LANG_POP([C++]) - if test x$ax_cxx_compile_cxx$1_required = xtrue; then - if test x$ac_success = xno; then - AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.]) - fi - fi - if test x$ac_success = xno; then - HAVE_CXX$1=0 - AC_MSG_NOTICE([No compiler with C++$1 support was found]) - else - HAVE_CXX$1=1 - AC_DEFINE(HAVE_CXX$1,1, - [define if the compiler supports basic C++$1 syntax]) - fi - AC_SUBST(HAVE_CXX$1) -]) - - -dnl Test body for checking C++11 support - -m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11], - _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 -) - - -dnl Test body for checking C++14 support - -m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14], - _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 - _AX_CXX_COMPILE_STDCXX_testbody_new_in_14 -) - -m4_define([_AX_CXX_COMPILE_STDCXX_testbody_17], - _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 - _AX_CXX_COMPILE_STDCXX_testbody_new_in_14 - _AX_CXX_COMPILE_STDCXX_testbody_new_in_17 -) - -dnl Tests for new features in C++11 - -m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[ - -// If the compiler admits that it is not ready for C++11, why torture it? -// Hopefully, this will speed up the test. - -#ifndef __cplusplus - -#error "This is not a C++ compiler" - -#elif __cplusplus < 201103L - -#error "This is not a C++11 compiler" - -#else - -namespace cxx11 -{ - - namespace test_static_assert - { - - template <typename T> - struct check - { - static_assert(sizeof(int) <= sizeof(T), "not big enough"); - }; - - } - - namespace test_final_override - { - - struct Base - { - virtual ~Base() {} - virtual void f() {} - }; - - struct Derived : public Base - { - virtual ~Derived() override {} - virtual void f() override {} - }; - - } - - namespace test_double_right_angle_brackets - { - - template < typename T > - struct check {}; - - typedef check<void> single_type; - typedef check<check<void>> double_type; - typedef check<check<check<void>>> triple_type; - typedef check<check<check<check<void>>>> quadruple_type; - - } - - namespace test_decltype - { - - int - f() - { - int a = 1; - decltype(a) b = 2; - return a + b; - } - - } - - namespace test_type_deduction - { - - template < typename T1, typename T2 > - struct is_same - { - static const bool value = false; - }; - - template < typename T > - struct is_same<T, T> - { - static const bool value = true; - }; - - template < typename T1, typename T2 > - auto - add(T1 a1, T2 a2) -> decltype(a1 + a2) - { - return a1 + a2; - } - - int - test(const int c, volatile int v) - { - static_assert(is_same<int, decltype(0)>::value == true, ""); - static_assert(is_same<int, decltype(c)>::value == false, ""); - static_assert(is_same<int, decltype(v)>::value == false, ""); - auto ac = c; - auto av = v; - auto sumi = ac + av + 'x'; - auto sumf = ac + av + 1.0; - static_assert(is_same<int, decltype(ac)>::value == true, ""); - static_assert(is_same<int, decltype(av)>::value == true, ""); - static_assert(is_same<int, decltype(sumi)>::value == true, ""); - static_assert(is_same<int, decltype(sumf)>::value == false, ""); - static_assert(is_same<int, decltype(add(c, v))>::value == true, ""); - return (sumf > 0.0) ? sumi : add(c, v); - } - - } - - namespace test_noexcept - { - - int f() { return 0; } - int g() noexcept { return 0; } - - static_assert(noexcept(f()) == false, ""); - static_assert(noexcept(g()) == true, ""); - - } - - namespace test_constexpr - { - - template < typename CharT > - unsigned long constexpr - strlen_c_r(const CharT *const s, const unsigned long acc) noexcept - { - return *s ? strlen_c_r(s + 1, acc + 1) : acc; - } - - template < typename CharT > - unsigned long constexpr - strlen_c(const CharT *const s) noexcept - { - return strlen_c_r(s, 0UL); - } - - static_assert(strlen_c("") == 0UL, ""); - static_assert(strlen_c("1") == 1UL, ""); - static_assert(strlen_c("example") == 7UL, ""); - static_assert(strlen_c("another\0example") == 7UL, ""); - - } - - namespace test_rvalue_references - { - - template < int N > - struct answer - { - static constexpr int value = N; - }; - - answer<1> f(int&) { return answer<1>(); } - answer<2> f(const int&) { return answer<2>(); } - answer<3> f(int&&) { return answer<3>(); } - - void - test() - { - int i = 0; - const int c = 0; - static_assert(decltype(f(i))::value == 1, ""); - static_assert(decltype(f(c))::value == 2, ""); - static_assert(decltype(f(0))::value == 3, ""); - } - - } - - namespace test_uniform_initialization - { - - struct test - { - static const int zero {}; - static const int one {1}; - }; - - static_assert(test::zero == 0, ""); - static_assert(test::one == 1, ""); - - } - - namespace test_lambdas - { - - void - test1() - { - auto lambda1 = [](){}; - auto lambda2 = lambda1; - lambda1(); - lambda2(); - } - - int - test2() - { - auto a = [](int i, int j){ return i + j; }(1, 2); - auto b = []() -> int { return '0'; }(); - auto c = [=](){ return a + b; }(); - auto d = [&](){ return c; }(); - auto e = [a, &b](int x) mutable { - const auto identity = [](int y){ return y; }; - for (auto i = 0; i < a; ++i) - a += b--; - return x + identity(a + b); - }(0); - return a + b + c + d + e; - } - - int - test3() - { - const auto nullary = [](){ return 0; }; - const auto unary = [](int x){ return x; }; - using nullary_t = decltype(nullary); - using unary_t = decltype(unary); - const auto higher1st = [](nullary_t f){ return f(); }; - const auto higher2nd = [unary](nullary_t f1){ - return [unary, f1](unary_t f2){ return f2(unary(f1())); }; - }; - return higher1st(nullary) + higher2nd(nullary)(unary); - } - - } - - namespace test_variadic_templates - { - - template <int...> - struct sum; - - template <int N0, int... N1toN> - struct sum<N0, N1toN...> - { - static constexpr auto value = N0 + sum<N1toN...>::value; - }; - - template <> - struct sum<> - { - static constexpr auto value = 0; - }; - - static_assert(sum<>::value == 0, ""); - static_assert(sum<1>::value == 1, ""); - static_assert(sum<23>::value == 23, ""); - static_assert(sum<1, 2>::value == 3, ""); - static_assert(sum<5, 5, 11>::value == 21, ""); - static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, ""); - - } - - // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae - // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function - // because of this. - namespace test_template_alias_sfinae - { - - struct foo {}; - - template<typename T> - using member = typename T::member_type; - - template<typename T> - void func(...) {} - - template<typename T> - void func(member<T>*) {} - - void test(); - - void test() { func<foo>(0); } - - } - -} // namespace cxx11 - -#endif // __cplusplus >= 201103L - -]]) - - -dnl Tests for new features in C++14 - -m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[ - -// If the compiler admits that it is not ready for C++14, why torture it? -// Hopefully, this will speed up the test. - -#ifndef __cplusplus - -#error "This is not a C++ compiler" - -#elif __cplusplus < 201402L - -#error "This is not a C++14 compiler" - -#else - -namespace cxx14 -{ - - namespace test_polymorphic_lambdas - { - - int - test() - { - const auto lambda = [](auto&&... args){ - const auto istiny = [](auto x){ - return (sizeof(x) == 1UL) ? 1 : 0; - }; - const int aretiny[] = { istiny(args)... }; - return aretiny[0]; - }; - return lambda(1, 1L, 1.0f, '1'); - } - - } - - namespace test_binary_literals - { - - constexpr auto ivii = 0b0000000000101010; - static_assert(ivii == 42, "wrong value"); - - } - - namespace test_generalized_constexpr - { - - template < typename CharT > - constexpr unsigned long - strlen_c(const CharT *const s) noexcept - { - auto length = 0UL; - for (auto p = s; *p; ++p) - ++length; - return length; - } - - static_assert(strlen_c("") == 0UL, ""); - static_assert(strlen_c("x") == 1UL, ""); - static_assert(strlen_c("test") == 4UL, ""); - static_assert(strlen_c("another\0test") == 7UL, ""); - - } - - namespace test_lambda_init_capture - { - - int - test() - { - auto x = 0; - const auto lambda1 = [a = x](int b){ return a + b; }; - const auto lambda2 = [a = lambda1(x)](){ return a; }; - return lambda2(); - } - - } - - namespace test_digit_separators - { - - constexpr auto ten_million = 100'000'000; - static_assert(ten_million == 100000000, ""); - - } - - namespace test_return_type_deduction - { - - auto f(int& x) { return x; } - decltype(auto) g(int& x) { return x; } - - template < typename T1, typename T2 > - struct is_same - { - static constexpr auto value = false; - }; - - template < typename T > - struct is_same<T, T> - { - static constexpr auto value = true; - }; - - int - test() - { - auto x = 0; - static_assert(is_same<int, decltype(f(x))>::value, ""); - static_assert(is_same<int&, decltype(g(x))>::value, ""); - return x; - } - - } - -} // namespace cxx14 - -#endif // __cplusplus >= 201402L - -]]) - - -dnl Tests for new features in C++17 - -m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_17], [[ - -// If the compiler admits that it is not ready for C++17, why torture it? -// Hopefully, this will speed up the test. - -#ifndef __cplusplus - -#error "This is not a C++ compiler" - -#elif __cplusplus < 201703L - -#error "This is not a C++17 compiler" - -#else - -#include <initializer_list> -#include <utility> -#include <type_traits> - -namespace cxx17 -{ - - namespace test_constexpr_lambdas - { - - constexpr int foo = [](){return 42;}(); - - } - - namespace test::nested_namespace::definitions - { - - } - - namespace test_fold_expression - { - - template<typename... Args> - int multiply(Args... args) - { - return (args * ... * 1); - } - - template<typename... Args> - bool all(Args... args) - { - return (args && ...); - } - - } - - namespace test_extended_static_assert - { - - static_assert (true); - - } - - namespace test_auto_brace_init_list - { - - auto foo = {5}; - auto bar {5}; - - static_assert(std::is_same<std::initializer_list<int>, decltype(foo)>::value); - static_assert(std::is_same<int, decltype(bar)>::value); - } - - namespace test_typename_in_template_template_parameter - { - - template<template<typename> typename X> struct D; - - } - - namespace test_fallthrough_nodiscard_maybe_unused_attributes - { - - int f1() - { - return 42; - } - - [[nodiscard]] int f2() - { - [[maybe_unused]] auto unused = f1(); - - switch (f1()) - { - case 17: - f1(); - [[fallthrough]]; - case 42: - f1(); - } - return f1(); - } - - } - - namespace test_extended_aggregate_initialization - { - - struct base1 - { - int b1, b2 = 42; - }; - - struct base2 - { - base2() { - b3 = 42; - } - int b3; - }; - - struct derived : base1, base2 - { - int d; - }; - - derived d1 {{1, 2}, {}, 4}; // full initialization - derived d2 {{}, {}, 4}; // value-initialized bases - - } - - namespace test_general_range_based_for_loop - { - - struct iter - { - int i; - - int& operator* () - { - return i; - } - - const int& operator* () const - { - return i; - } - - iter& operator++() - { - ++i; - return *this; - } - }; - - struct sentinel - { - int i; - }; - - bool operator== (const iter& i, const sentinel& s) - { - return i.i == s.i; - } - - bool operator!= (const iter& i, const sentinel& s) - { - return !(i == s); - } - - struct range - { - iter begin() const - { - return {0}; - } - - sentinel end() const - { - return {5}; - } - }; - - void f() - { - range r {}; - - for (auto i : r) - { - [[maybe_unused]] auto v = i; - } - } - - } - - namespace test_lambda_capture_asterisk_this_by_value - { - - struct t - { - int i; - int foo() - { - return [*this]() - { - return i; - }(); - } - }; - - } - - namespace test_enum_class_construction - { - - enum class byte : unsigned char - {}; - - byte foo {42}; - - } - - namespace test_constexpr_if - { - - template <bool cond> - int f () - { - if constexpr(cond) - { - return 13; - } - else - { - return 42; - } - } - - } - - namespace test_selection_statement_with_initializer - { - - int f() - { - return 13; - } - - int f2() - { - if (auto i = f(); i > 0) - { - return 3; - } - - switch (auto i = f(); i + 4) - { - case 17: - return 2; - - default: - return 1; - } - } - - } - - namespace test_template_argument_deduction_for_class_templates - { - - template <typename T1, typename T2> - struct pair - { - pair (T1 p1, T2 p2) - : m1 {p1}, - m2 {p2} - {} - - T1 m1; - T2 m2; - }; - - void f() - { - [[maybe_unused]] auto p = pair{13, 42u}; - } - - } - - namespace test_non_type_auto_template_parameters - { - - template <auto n> - struct B - {}; - - B<5> b1; - B<'a'> b2; - - } - - namespace test_structured_bindings - { - - int arr[2] = { 1, 2 }; - std::pair<int, int> pr = { 1, 2 }; - - auto f1() -> int(&)[2] - { - return arr; - } - - auto f2() -> std::pair<int, int>& - { - return pr; - } - - struct S - { - int x1 : 2; - volatile double y1; - }; - - S f3() - { - return {}; - } - - auto [ x1, y1 ] = f1(); - auto& [ xr1, yr1 ] = f1(); - auto [ x2, y2 ] = f2(); - auto& [ xr2, yr2 ] = f2(); - const auto [ x3, y3 ] = f3(); - - } - - namespace test_exception_spec_type_system - { - - struct Good {}; - struct Bad {}; - - void g1() noexcept; - void g2(); - - template<typename T> - Bad - f(T*, T*); - - template<typename T1, typename T2> - Good - f(T1*, T2*); - - static_assert (std::is_same_v<Good, decltype(f(g1, g2))>); - - } - - namespace test_inline_variables - { - - template<class T> void f(T) - {} - - template<class T> inline T g(T) - { - return T{}; - } - - template<> inline void f<>(int) - {} - - template<> int g<>(int) - { - return 5; - } - - } - -} // namespace cxx17 - -#endif // __cplusplus < 201703L - -]]) diff --git a/src/univalue/configure.ac b/src/univalue/configure.ac deleted file mode 100644 index ed9c5f0c5c..0000000000 --- a/src/univalue/configure.ac +++ /dev/null @@ -1,72 +0,0 @@ -m4_define([libunivalue_major_version], [1]) -m4_define([libunivalue_minor_version], [1]) -m4_define([libunivalue_micro_version], [4]) -m4_define([libunivalue_interface_age], [4]) -# If you need a modifier for the version number. -# Normally empty, but can be used to make "fixup" releases. -m4_define([libunivalue_extraversion], []) - -dnl libtool versioning from libunivalue -m4_define([libunivalue_current], [m4_eval(100 * libunivalue_minor_version + libunivalue_micro_version - libunivalue_interface_age)]) -m4_define([libunivalue_binary_age], [m4_eval(100 * libunivalue_minor_version + libunivalue_micro_version)]) -m4_define([libunivalue_revision], [libunivalue_interface_age]) -m4_define([libunivalue_age], [m4_eval(libunivalue_binary_age - libunivalue_interface_age)]) -m4_define([libunivalue_version], [libunivalue_major_version().libunivalue_minor_version().libunivalue_micro_version()libunivalue_extraversion()]) - - -AC_INIT([univalue], [1.0.4], - [http://github.com/jgarzik/univalue/]) - -dnl make the compilation flags quiet unless V=1 is used -m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) - -AC_PREREQ(2.60) -AC_CONFIG_SRCDIR([lib/univalue.cpp]) -AC_CONFIG_AUX_DIR([build-aux]) -AC_CONFIG_MACRO_DIR([build-aux/m4]) -AC_CONFIG_HEADERS([univalue-config.h]) -AM_INIT_AUTOMAKE([subdir-objects foreign]) - -LIBUNIVALUE_MAJOR_VERSION=libunivalue_major_version -LIBUNIVALUE_MINOR_VERSION=libunivalue_minor_version -LIBUNIVALUE_MICRO_VERSION=libunivalue_micro_version -LIBUNIVALUE_INTERFACE_AGE=libunivalue_interface_age - -# ABI version -# http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html -LIBUNIVALUE_CURRENT=libunivalue_current -LIBUNIVALUE_REVISION=libunivalue_revision -LIBUNIVALUE_AGE=libunivalue_age - -AC_SUBST(LIBUNIVALUE_CURRENT) -AC_SUBST(LIBUNIVALUE_REVISION) -AC_SUBST(LIBUNIVALUE_AGE) - -LT_INIT -LT_LANG([C++]) - -dnl Require C++17 compiler (no GNU extensions) -AX_CXX_COMPILE_STDCXX([17], [noext], [mandatory], [nodefault]) - -case $host in - *mingw*) - LIBTOOL_APP_LDFLAGS="$LIBTOOL_APP_LDFLAGS -all-static" - ;; -esac - -BUILD_EXEEXT= -case $build in - *mingw*) - BUILD_EXEEXT=".exe" - ;; -esac - -AC_CONFIG_FILES([ - Makefile - pc/libunivalue.pc - pc/libunivalue-uninstalled.pc]) - -AC_SUBST(LIBTOOL_APP_LDFLAGS) -AC_SUBST(BUILD_EXEEXT) -AC_OUTPUT - diff --git a/src/univalue/gen/gen.cpp b/src/univalue/gen/gen.cpp deleted file mode 100644 index ca5b470ddc..0000000000 --- a/src/univalue/gen/gen.cpp +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2014 BitPay Inc. -// Distributed under the MIT software license, see the accompanying -// file COPYING or https://opensource.org/licenses/mit-license.php. - -// -// To re-create univalue_escapes.h: -// $ g++ -o gen gen.cpp -// $ ./gen > univalue_escapes.h -// - -#include <univalue.h> - -#include <cstdio> -#include <cstring> -#include <string> - -static bool initEscapes; -static std::string escapes[256]; - -static void initJsonEscape() -{ - // Escape all lower control characters (some get overridden with smaller sequences below) - for (int ch=0x00; ch<0x20; ++ch) { - char tmpbuf[20]; - snprintf(tmpbuf, sizeof(tmpbuf), "\\u%04x", ch); - escapes[ch] = std::string(tmpbuf); - } - - escapes[(int)'"'] = "\\\""; - escapes[(int)'\\'] = "\\\\"; - escapes[(int)'\b'] = "\\b"; - escapes[(int)'\f'] = "\\f"; - escapes[(int)'\n'] = "\\n"; - escapes[(int)'\r'] = "\\r"; - escapes[(int)'\t'] = "\\t"; - escapes[(int)'\x7f'] = "\\u007f"; // U+007F DELETE - - initEscapes = true; -} - -static void outputEscape() -{ - printf( "// Automatically generated file. Do not modify.\n" - "#ifndef BITCOIN_UNIVALUE_UNIVALUE_ESCAPES_H\n" - "#define BITCOIN_UNIVALUE_UNIVALUE_ESCAPES_H\n" - "static const char *escapes[256] = {\n"); - - for (unsigned int i = 0; i < 256; i++) { - if (escapes[i].empty()) { - printf("\tnullptr,\n"); - } else { - printf("\t\""); - - unsigned int si; - for (si = 0; si < escapes[i].size(); si++) { - char ch = escapes[i][si]; - switch (ch) { - case '"': - printf("\\\""); - break; - case '\\': - printf("\\\\"); - break; - default: - printf("%c", escapes[i][si]); - break; - } - } - - printf("\",\n"); - } - } - - printf( "};\n" - "#endif // BITCOIN_UNIVALUE_UNIVALUE_ESCAPES_H\n"); -} - -int main (int argc, char *argv[]) -{ - initJsonEscape(); - outputEscape(); - return 0; -} - diff --git a/src/univalue/include/univalue.h b/src/univalue/include/univalue.h index f0d4de2035..22be0311e8 100644 --- a/src/univalue/include/univalue.h +++ b/src/univalue/include/univalue.h @@ -3,8 +3,8 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or https://opensource.org/licenses/mit-license.php. -#ifndef __UNIVALUE_H__ -#define __UNIVALUE_H__ +#ifndef BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_H +#define BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_H #include <charconv> #include <cstdint> @@ -194,4 +194,4 @@ extern const UniValue NullUniValue; const UniValue& find_value( const UniValue& obj, const std::string& name); -#endif // __UNIVALUE_H__ +#endif // BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_H diff --git a/src/univalue/lib/univalue_escapes.h b/src/univalue/include/univalue_escapes.h index 3f714f8e5b..83767e8ac5 100644 --- a/src/univalue/lib/univalue_escapes.h +++ b/src/univalue/include/univalue_escapes.h @@ -1,6 +1,5 @@ -// Automatically generated file. Do not modify. -#ifndef BITCOIN_UNIVALUE_UNIVALUE_ESCAPES_H -#define BITCOIN_UNIVALUE_UNIVALUE_ESCAPES_H +#ifndef BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_ESCAPES_H +#define BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_ESCAPES_H static const char *escapes[256] = { "\\u0000", "\\u0001", @@ -259,4 +258,4 @@ static const char *escapes[256] = { nullptr, nullptr, }; -#endif // BITCOIN_UNIVALUE_UNIVALUE_ESCAPES_H +#endif // BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_ESCAPES_H diff --git a/src/univalue/lib/univalue_utffilter.h b/src/univalue/include/univalue_utffilter.h index c24ac58eaf..f688eaaa30 100644 --- a/src/univalue/lib/univalue_utffilter.h +++ b/src/univalue/include/univalue_utffilter.h @@ -1,8 +1,8 @@ // Copyright 2016 Wladimir J. van der Laan // Distributed under the MIT software license, see the accompanying // file COPYING or https://opensource.org/licenses/mit-license.php. -#ifndef UNIVALUE_UTFFILTER_H -#define UNIVALUE_UTFFILTER_H +#ifndef BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_UTFFILTER_H +#define BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_UTFFILTER_H #include <string> @@ -116,4 +116,4 @@ private: } }; -#endif +#endif // BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_UTFFILTER_H diff --git a/src/univalue/lib/univalue_read.cpp b/src/univalue/lib/univalue_read.cpp index a6ed75e57a..2f2385383c 100644 --- a/src/univalue/lib/univalue_read.cpp +++ b/src/univalue/lib/univalue_read.cpp @@ -3,7 +3,7 @@ // file COPYING or https://opensource.org/licenses/mit-license.php. #include <univalue.h> -#include "univalue_utffilter.h" +#include <univalue_utffilter.h> #include <cstdio> #include <cstdint> diff --git a/src/univalue/lib/univalue_write.cpp b/src/univalue/lib/univalue_write.cpp index 18833077b7..4a3cbba20f 100644 --- a/src/univalue/lib/univalue_write.cpp +++ b/src/univalue/lib/univalue_write.cpp @@ -3,7 +3,7 @@ // file COPYING or https://opensource.org/licenses/mit-license.php. #include <univalue.h> -#include "univalue_escapes.h" +#include <univalue_escapes.h> #include <memory> #include <string> diff --git a/src/univalue/pc/libunivalue-uninstalled.pc.in b/src/univalue/pc/libunivalue-uninstalled.pc.in deleted file mode 100644 index b7f53e875e..0000000000 --- a/src/univalue/pc/libunivalue-uninstalled.pc.in +++ /dev/null @@ -1,9 +0,0 @@ -prefix=@prefix@ -exec_prefix=@exec_prefix@ -libdir=@libdir@ -includedir=@includedir@ - -Name: libunivalue -Description: libunivalue, C++ universal value object and JSON library -Version: @VERSION@ -Libs: ${pc_top_builddir}/${pcfiledir}/libunivalue.la diff --git a/src/univalue/pc/libunivalue.pc.in b/src/univalue/pc/libunivalue.pc.in deleted file mode 100644 index 358a2d5f73..0000000000 --- a/src/univalue/pc/libunivalue.pc.in +++ /dev/null @@ -1,10 +0,0 @@ -prefix=@prefix@ -exec_prefix=@exec_prefix@ -libdir=@libdir@ -includedir=@includedir@ - -Name: libunivalue -Description: libunivalue, C++ universal value object and JSON library -Version: @VERSION@ -Libs: -L${libdir} -lunivalue -Cflags: -I${includedir} diff --git a/src/univalue/sources.mk b/src/univalue/sources.mk index efab6d277f..e156216378 100644 --- a/src/univalue/sources.mk +++ b/src/univalue/sources.mk @@ -12,10 +12,8 @@ UNIVALUE_INCLUDE_DIR_INT = %reldir%/include UNIVALUE_DIST_HEADERS_INT = UNIVALUE_DIST_HEADERS_INT += %reldir%/include/univalue.h - -UNIVALUE_LIB_HEADERS_INT = -UNIVALUE_LIB_HEADERS_INT += %reldir%/lib/univalue_utffilter.h -UNIVALUE_LIB_HEADERS_INT += %reldir%/lib/univalue_escapes.h +UNIVALUE_DIST_HEADERS_INT += %reldir%/include/univalue_utffilter.h +UNIVALUE_DIST_HEADERS_INT += %reldir%/include/univalue_escapes.h UNIVALUE_LIB_SOURCES_INT = UNIVALUE_LIB_SOURCES_INT += %reldir%/lib/univalue.cpp diff --git a/src/univalue/test/unitester.cpp b/src/univalue/test/unitester.cpp index 81b1c5d3b1..94c149b39f 100644 --- a/src/univalue/test/unitester.cpp +++ b/src/univalue/test/unitester.cpp @@ -12,15 +12,7 @@ #error JSON_TEST_SRC must point to test source directory #endif -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) -#endif - std::string srcdir(JSON_TEST_SRC); -static bool test_failed = false; - -#define d_assert(expr) { if (!(expr)) { test_failed = true; fprintf(stderr, "%s failed\n", filename.c_str()); } } -#define f_assert(expr) { if (!(expr)) { test_failed = true; fprintf(stderr, "%s failed\n", __func__); } } static std::string rtrim(std::string s) { @@ -41,9 +33,9 @@ static void runtest(std::string filename, const std::string& jdata) bool testResult = val.read(jdata); if (wantPass) { - d_assert(testResult == true); + assert(testResult == true); } else { - d_assert(testResult == false); + assert(testResult == false); } if (wantRoundTrip) { @@ -141,30 +133,30 @@ void unescape_unicode_test() bool testResult; // Escaped ASCII (quote) testResult = val.read("[\"\\u0022\"]"); - f_assert(testResult); - f_assert(val[0].get_str() == "\""); + assert(testResult); + assert(val[0].get_str() == "\""); // Escaped Basic Plane character, two-byte UTF-8 testResult = val.read("[\"\\u0191\"]"); - f_assert(testResult); - f_assert(val[0].get_str() == "\xc6\x91"); + assert(testResult); + assert(val[0].get_str() == "\xc6\x91"); // Escaped Basic Plane character, three-byte UTF-8 testResult = val.read("[\"\\u2191\"]"); - f_assert(testResult); - f_assert(val[0].get_str() == "\xe2\x86\x91"); + assert(testResult); + assert(val[0].get_str() == "\xe2\x86\x91"); // Escaped Supplementary Plane character U+1d161 testResult = val.read("[\"\\ud834\\udd61\"]"); - f_assert(testResult); - f_assert(val[0].get_str() == "\xf0\x9d\x85\xa1"); + assert(testResult); + assert(val[0].get_str() == "\xf0\x9d\x85\xa1"); } int main (int argc, char *argv[]) { - for (unsigned int fidx = 0; fidx < ARRAY_SIZE(filenames); fidx++) { - runtest_file(filenames[fidx]); + for (const auto& f: filenames) { + runtest_file(f); } unescape_unicode_test(); - return test_failed ? 1 : 0; + return 0; } diff --git a/src/util/sock.cpp b/src/util/sock.cpp index 3579af4458..7d5069423a 100644 --- a/src/util/sock.cpp +++ b/src/util/sock.cpp @@ -113,63 +113,103 @@ int Sock::SetSockOpt(int level, int opt_name, const void* opt_val, socklen_t opt bool Sock::Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred) const { -#ifdef USE_POLL - pollfd fd; - fd.fd = m_socket; - fd.events = 0; - if (requested & RECV) { - fd.events |= POLLIN; - } - if (requested & SEND) { - fd.events |= POLLOUT; - } + // We need a `shared_ptr` owning `this` for `WaitMany()`, but don't want + // `this` to be destroyed when the `shared_ptr` goes out of scope at the + // end of this function. Create it with a custom noop deleter. + std::shared_ptr<const Sock> shared{this, [](const Sock*) {}}; + + EventsPerSock events_per_sock{std::make_pair(shared, Events{requested})}; - if (poll(&fd, 1, count_milliseconds(timeout)) == SOCKET_ERROR) { + if (!WaitMany(timeout, events_per_sock)) { return false; } if (occurred != nullptr) { - *occurred = 0; - if (fd.revents & POLLIN) { - *occurred |= RECV; + *occurred = events_per_sock.begin()->second.occurred; + } + + return true; +} + +bool Sock::WaitMany(std::chrono::milliseconds timeout, EventsPerSock& events_per_sock) const +{ +#ifdef USE_POLL + std::vector<pollfd> pfds; + for (const auto& [sock, events] : events_per_sock) { + pfds.emplace_back(); + auto& pfd = pfds.back(); + pfd.fd = sock->m_socket; + if (events.requested & RECV) { + pfd.events |= POLLIN; } - if (fd.revents & POLLOUT) { - *occurred |= SEND; + if (events.requested & SEND) { + pfd.events |= POLLOUT; } } - return true; -#else - if (!IsSelectableSocket(m_socket)) { + if (poll(pfds.data(), pfds.size(), count_milliseconds(timeout)) == SOCKET_ERROR) { return false; } - fd_set fdset_recv; - fd_set fdset_send; - FD_ZERO(&fdset_recv); - FD_ZERO(&fdset_send); - - if (requested & RECV) { - FD_SET(m_socket, &fdset_recv); + assert(pfds.size() == events_per_sock.size()); + size_t i{0}; + for (auto& [sock, events] : events_per_sock) { + assert(sock->m_socket == static_cast<SOCKET>(pfds[i].fd)); + events.occurred = 0; + if (pfds[i].revents & POLLIN) { + events.occurred |= RECV; + } + if (pfds[i].revents & POLLOUT) { + events.occurred |= SEND; + } + if (pfds[i].revents & (POLLERR | POLLHUP)) { + events.occurred |= ERR; + } + ++i; } - if (requested & SEND) { - FD_SET(m_socket, &fdset_send); + return true; +#else + fd_set recv; + fd_set send; + fd_set err; + FD_ZERO(&recv); + FD_ZERO(&send); + FD_ZERO(&err); + SOCKET socket_max{0}; + + for (const auto& [sock, events] : events_per_sock) { + const auto& s = sock->m_socket; + if (!IsSelectableSocket(s)) { + return false; + } + if (events.requested & RECV) { + FD_SET(s, &recv); + } + if (events.requested & SEND) { + FD_SET(s, &send); + } + FD_SET(s, &err); + socket_max = std::max(socket_max, s); } - timeval timeout_struct = MillisToTimeval(timeout); + timeval tv = MillisToTimeval(timeout); - if (select(m_socket + 1, &fdset_recv, &fdset_send, nullptr, &timeout_struct) == SOCKET_ERROR) { + if (select(socket_max + 1, &recv, &send, &err, &tv) == SOCKET_ERROR) { return false; } - if (occurred != nullptr) { - *occurred = 0; - if (FD_ISSET(m_socket, &fdset_recv)) { - *occurred |= RECV; + for (auto& [sock, events] : events_per_sock) { + const auto& s = sock->m_socket; + events.occurred = 0; + if (FD_ISSET(s, &recv)) { + events.occurred |= RECV; + } + if (FD_ISSET(s, &send)) { + events.occurred |= SEND; } - if (FD_ISSET(m_socket, &fdset_send)) { - *occurred |= SEND; + if (FD_ISSET(s, &err)) { + events.occurred |= ERR; } } diff --git a/src/util/sock.h b/src/util/sock.h index dd2913a66c..3245820995 100644 --- a/src/util/sock.h +++ b/src/util/sock.h @@ -12,6 +12,7 @@ #include <chrono> #include <memory> #include <string> +#include <unordered_map> /** * Maximum time to wait for I/O readiness. @@ -130,26 +131,84 @@ public: /** * If passed to `Wait()`, then it will wait for readiness to read from the socket. */ - static constexpr Event RECV = 0b01; + static constexpr Event RECV = 0b001; /** * If passed to `Wait()`, then it will wait for readiness to send to the socket. */ - static constexpr Event SEND = 0b10; + static constexpr Event SEND = 0b010; + + /** + * Ignored if passed to `Wait()`, but could be set in the occurred events if an + * exceptional condition has occurred on the socket or if it has been disconnected. + */ + static constexpr Event ERR = 0b100; /** * Wait for readiness for input (recv) or output (send). * @param[in] timeout Wait this much for at least one of the requested events to occur. * @param[in] requested Wait for those events, bitwise-or of `RECV` and `SEND`. - * @param[out] occurred If not nullptr and `true` is returned, then upon return this - * indicates which of the requested events occurred. A timeout is indicated by return - * value of `true` and `occurred` being set to 0. - * @return true on success and false otherwise + * @param[out] occurred If not nullptr and the function returns `true`, then this + * indicates which of the requested events occurred (`ERR` will be added, even if + * not requested, if an exceptional event occurs on the socket). + * A timeout is indicated by return value of `true` and `occurred` being set to 0. + * @return true on success (or timeout, if `occurred` of 0 is returned), false otherwise */ [[nodiscard]] virtual bool Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred = nullptr) const; + /** + * Auxiliary requested/occurred events to wait for in `WaitMany()`. + */ + struct Events { + explicit Events(Event req) : requested{req}, occurred{0} {} + Event requested; + Event occurred; + }; + + struct HashSharedPtrSock { + size_t operator()(const std::shared_ptr<const Sock>& s) const + { + return s ? s->m_socket : std::numeric_limits<SOCKET>::max(); + } + }; + + struct EqualSharedPtrSock { + bool operator()(const std::shared_ptr<const Sock>& lhs, + const std::shared_ptr<const Sock>& rhs) const + { + if (lhs && rhs) { + return lhs->m_socket == rhs->m_socket; + } + if (!lhs && !rhs) { + return true; + } + return false; + } + }; + + /** + * On which socket to wait for what events in `WaitMany()`. + * The `shared_ptr` is copied into the map to ensure that the `Sock` object + * is not destroyed (its destructor would close the underlying socket). + * If this happens shortly before or after we call `poll(2)` and a new + * socket gets created under the same file descriptor number then the report + * from `WaitMany()` will be bogus. + */ + using EventsPerSock = std::unordered_map<std::shared_ptr<const Sock>, Events, HashSharedPtrSock, EqualSharedPtrSock>; + + /** + * Same as `Wait()`, but wait on many sockets within the same timeout. + * @param[in] timeout Wait this long for at least one of the requested events to occur. + * @param[in,out] events_per_sock Wait for the requested events on these sockets and set + * `occurred` for the events that actually occurred. + * @return true on success (or timeout, if all `what[].occurred` are returned as 0), + * false otherwise + */ + [[nodiscard]] virtual bool WaitMany(std::chrono::milliseconds timeout, + EventsPerSock& events_per_sock) const; + /* Higher level, convenience, methods. These may throw. */ /** diff --git a/src/util/system.cpp b/src/util/system.cpp index f88b0fac77..6e2638a5d6 100644 --- a/src/util/system.cpp +++ b/src/util/system.cpp @@ -96,7 +96,7 @@ const char * const BITCOIN_SETTINGS_FILENAME = "settings.json"; ArgsManager gArgs; /** Mutex to protect dir_locks. */ -static Mutex cs_dir_locks; +static GlobalMutex cs_dir_locks; /** A map that contains all the currently held directory locks. After * successful locking, these will be held here until the global destructor * cleans them up and thus automatically unlocks them, or ReleaseDirectoryLocks diff --git a/src/validation.cpp b/src/validation.cpp index 448720ff83..26ce286c63 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -22,7 +22,7 @@ #include <logging.h> #include <logging/timer.h> #include <node/blockstorage.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <node/utxo_snapshot.h> #include <policy/policy.h> #include <policy/rbf.h> @@ -122,7 +122,7 @@ static constexpr int PRUNE_LOCK_BUFFER{10}; */ RecursiveMutex cs_main; -Mutex g_best_block_mutex; +GlobalMutex g_best_block_mutex; std::condition_variable g_best_block_cv; uint256 g_best_block; bool g_parallel_script_checks{false}; @@ -134,8 +134,6 @@ int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE; uint256 hashAssumeValid; arith_uint256 nMinimumChainWork; -CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE); - const CBlockIndex* CChainState::FindForkInGlobalIndex(const CBlockLocator& locator) const { AssertLockHeld(cs_main); diff --git a/src/validation.h b/src/validation.h index 31dd089005..70b6c072e6 100644 --- a/src/validation.h +++ b/src/validation.h @@ -58,8 +58,6 @@ namespace Consensus { struct Params; } // namespace Consensus -/** Default for -minrelaytxfee, minimum relay fee for transactions */ -static const unsigned int DEFAULT_MIN_RELAY_TX_FEE = 1000; /** Default for -limitancestorcount, max number of in-mempool ancestors */ static const unsigned int DEFAULT_ANCESTOR_LIMIT = 25; /** Default for -limitancestorsize, maximum kilobytes of tx + all in-mempool ancestors */ @@ -115,7 +113,7 @@ enum class SynchronizationState { }; extern RecursiveMutex cs_main; -extern Mutex g_best_block_mutex; +extern GlobalMutex g_best_block_mutex; extern std::condition_variable g_best_block_cv; /** Used to notify getblocktemplate RPC of new tips. */ extern uint256 g_best_block; @@ -126,8 +124,6 @@ extern bool g_parallel_script_checks; extern bool fRequireStandard; extern bool fCheckBlockIndex; extern bool fCheckpointsEnabled; -/** A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation) */ -extern CFeeRate minRelayTxFee; /** If the tip is older than this (in seconds), the node is considered to be in initial block download. */ extern int64_t nMaxTipAge; diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp index 7f038eda84..174c68744c 100644 --- a/src/wallet/init.cpp +++ b/src/wallet/init.cpp @@ -9,7 +9,7 @@ #include <interfaces/wallet.h> #include <net.h> #include <node/context.h> -#include <node/ui_interface.h> +#include <node/interface_ui.h> #include <outputtype.h> #include <univalue.h> #include <util/check.h> diff --git a/src/wallet/rpc/addresses.cpp b/src/wallet/rpc/addresses.cpp index d5444f5051..f25ad59528 100644 --- a/src/wallet/rpc/addresses.cpp +++ b/src/wallet/rpc/addresses.cpp @@ -302,11 +302,11 @@ RPCHelpMan addmultisigaddress() result.pushKV("descriptor", descriptor->ToString()); UniValue warnings(UniValue::VARR); - if (!request.params[3].isNull() && OutputTypeFromDestination(dest) != output_type) { + if (descriptor->GetOutputType() != output_type) { // Only warns if the user has explicitly chosen an address type we cannot generate warnings.push_back("Unable to make chosen address type, please ensure no uncompressed public keys are present."); } - if (warnings.size()) result.pushKV("warnings", warnings); + if (!warnings.empty()) result.pushKV("warnings", warnings); return result; }, diff --git a/src/wallet/rpc/backup.cpp b/src/wallet/rpc/backup.cpp index c289c05f2c..5f1673fb12 100644 --- a/src/wallet/rpc/backup.cpp +++ b/src/wallet/rpc/backup.cpp @@ -198,14 +198,15 @@ RPCHelpMan importprivkey() RPCHelpMan importaddress() { return RPCHelpMan{"importaddress", - "\nAdds an address or script (in hex) that can be watched as if it were in your wallet but cannot be used to spend. Requires a new wallet backup.\n" + "\nAdds an address or script (in hex) that can be watched as if it were in your wallet but cannot be used to spend. Requires a new wallet backup.\n" "\nNote: This call can take over an hour to complete if rescan is true, during that time, other rpc calls\n" "may report that the imported address exists but related transactions are still missing, leading to temporarily incorrect/bogus balances and unspent outputs until rescan completes.\n" "If you have the full public key, you should call importpubkey instead of this.\n" "Hint: use importmulti to import more than one address.\n" "\nNote: If you import a non-standard raw script in hex form, outputs sending to it will be treated\n" "as change, and not show up in many RPCs.\n" - "Note: Use \"getwalletinfo\" to query the scanning progress.\n", + "Note: Use \"getwalletinfo\" to query the scanning progress.\n" + "Note: This command is only compatible with legacy wallets. Use \"importdescriptors\" with \"addr(X)\" for descriptor wallets.\n", { {"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The Bitcoin address (or hex-encoded script)"}, {"label", RPCArg::Type::STR, RPCArg::Default{""}, "An optional label"}, @@ -1621,7 +1622,7 @@ RPCHelpMan importdescriptors() }, RPCExamples{ HelpExampleCli("importdescriptors", "'[{ \"desc\": \"<my descriptor>\", \"timestamp\":1455191478, \"internal\": true }, " - "{ \"desc\": \"<my desccriptor 2>\", \"label\": \"example 2\", \"timestamp\": 1455191480 }]'") + + "{ \"desc\": \"<my descriptor 2>\", \"label\": \"example 2\", \"timestamp\": 1455191480 }]'") + HelpExampleCli("importdescriptors", "'[{ \"desc\": \"<my descriptor>\", \"timestamp\":1455191478, \"active\": true, \"range\": [0,100], \"label\": \"<my bech32 wallet>\" }]'") }, [&](const RPCHelpMan& self, const JSONRPCRequest& main_request) -> UniValue diff --git a/src/wallet/rpc/spend.cpp b/src/wallet/rpc/spend.cpp index d1a0ba50f6..2520cd2532 100644 --- a/src/wallet/rpc/spend.cpp +++ b/src/wallet/rpc/spend.cpp @@ -699,19 +699,6 @@ void FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& fee_out, setSubtractFeeFromOutputs.insert(pos); } - // Fetch specified UTXOs from the UTXO set to get the scriptPubKeys and values of the outputs being selected - // and to match with the given solving_data. Only used for non-wallet outputs. - std::map<COutPoint, Coin> coins; - for (const CTxIn& txin : tx.vin) { - coins[txin.prevout]; // Create empty map entry keyed by prevout. - } - wallet.chain().findCoins(coins); - for (const auto& coin : coins) { - if (!coin.second.out.IsNull()) { - coinControl.SelectExternal(coin.first, coin.second.out); - } - } - bilingual_str error; if (!FundTransaction(wallet, tx, fee_out, change_position, error, lockUnspents, setSubtractFeeFromOutputs, coinControl)) { diff --git a/src/wallet/rpc/util.cpp b/src/wallet/rpc/util.cpp index 59683c5fd8..4fcb393226 100644 --- a/src/wallet/rpc/util.cpp +++ b/src/wallet/rpc/util.cpp @@ -101,7 +101,7 @@ LegacyScriptPubKeyMan& EnsureLegacyScriptPubKeyMan(CWallet& wallet, bool also_cr spk_man = wallet.GetOrCreateLegacyScriptPubKeyMan(); } if (!spk_man) { - throw JSONRPCError(RPC_WALLET_ERROR, "This type of wallet does not support this command"); + throw JSONRPCError(RPC_WALLET_ERROR, "Only legacy wallets are supported by this command"); } return *spk_man; } @@ -110,7 +110,7 @@ const LegacyScriptPubKeyMan& EnsureConstLegacyScriptPubKeyMan(const CWallet& wal { const LegacyScriptPubKeyMan* spk_man = wallet.GetLegacyScriptPubKeyMan(); if (!spk_man) { - throw JSONRPCError(RPC_WALLET_ERROR, "This type of wallet does not support this command"); + throw JSONRPCError(RPC_WALLET_ERROR, "Only legacy wallets are supported by this command"); } return *spk_man; } diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp index 6f79ae4e9b..ce99aeefe5 100644 --- a/src/wallet/spend.cpp +++ b/src/wallet/spend.cpp @@ -1023,14 +1023,28 @@ bool FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& nFeeRet, coinControl.fAllowOtherInputs = true; - for (const CTxIn& txin : tx.vin) { - coinControl.Select(txin.prevout); - } - // Acquire the locks to prevent races to the new locked unspents between the // CreateTransaction call and LockCoin calls (when lockUnspents is true). LOCK(wallet.cs_wallet); + // Fetch specified UTXOs from the UTXO set to get the scriptPubKeys and values of the outputs being selected + // and to match with the given solving_data. Only used for non-wallet outputs. + std::map<COutPoint, Coin> coins; + for (const CTxIn& txin : tx.vin) { + coins[txin.prevout]; // Create empty map entry keyed by prevout. + } + wallet.chain().findCoins(coins); + + for (const CTxIn& txin : tx.vin) { + // if it's not in the wallet and corresponding UTXO is found than select as external output + const auto& outPoint = txin.prevout; + if (wallet.mapWallet.find(outPoint.hash) == wallet.mapWallet.end() && !coins[outPoint].out.IsNull()) { + coinControl.SelectExternal(outPoint, coins[outPoint].out); + } else { + coinControl.Select(outPoint); + } + } + FeeCalculation fee_calc_out; std::optional<CreatedTransactionResult> txr = CreateTransaction(wallet, vecSend, nChangePosInOut, error, coinControl, fee_calc_out, false); if (!txr) return false; diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp index 2515df3177..053fb8f983 100644 --- a/src/wallet/sqlite.cpp +++ b/src/wallet/sqlite.cpp @@ -23,7 +23,7 @@ namespace wallet { static constexpr int32_t WALLET_SCHEMA_VERSION = 0; -static Mutex g_sqlite_mutex; +static GlobalMutex g_sqlite_mutex; static int g_sqlite_count GUARDED_BY(g_sqlite_mutex) = 0; static void ErrorLogCallback(void* arg, int code, const char* msg) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 6c333c709b..910562e669 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -173,8 +173,8 @@ void NotifyWalletLoaded(WalletContext& context, const std::shared_ptr<CWallet>& } } -static Mutex g_loading_wallet_mutex; -static Mutex g_wallet_release_mutex; +static GlobalMutex g_loading_wallet_mutex; +static GlobalMutex g_wallet_release_mutex; static std::condition_variable g_wallet_release_cv; static std::set<std::string> g_loading_wallet_set GUARDED_BY(g_loading_wallet_mutex); static std::set<std::string> g_unloading_wallet_set GUARDED_BY(g_wallet_release_mutex); @@ -2106,7 +2106,7 @@ void CWallet::CommitTransaction(CTransactionRef tx, mapValue_t mapValue, std::ve // Add tx to wallet, because if it has change it's also ours, // otherwise just for transaction history. - AddToWallet(tx, TxStateInactive{}, [&](CWalletTx& wtx, bool new_tx) { + CWalletTx* wtx = AddToWallet(tx, TxStateInactive{}, [&](CWalletTx& wtx, bool new_tx) { CHECK_NONFATAL(wtx.mapValue.empty()); CHECK_NONFATAL(wtx.vOrderForm.empty()); wtx.mapValue = std::move(mapValue); @@ -2116,6 +2116,11 @@ void CWallet::CommitTransaction(CTransactionRef tx, mapValue_t mapValue, std::ve return true; }); + // wtx can only be null if the db write failed. + if (!wtx) { + throw std::runtime_error(std::string(__func__) + ": Wallet db error, transaction commit failed"); + } + // Notify that old coins are spent for (const CTxIn& txin : tx->vin) { CWalletTx &coin = mapWallet.at(txin.prevout.hash); @@ -2123,17 +2128,13 @@ void CWallet::CommitTransaction(CTransactionRef tx, mapValue_t mapValue, std::ve NotifyTransactionChanged(coin.GetHash(), CT_UPDATED); } - // Get the inserted-CWalletTx from mapWallet so that the - // wtx cached mempool state is updated correctly - CWalletTx& wtx = mapWallet.at(tx->GetHash()); - if (!fBroadcastTransactions) { // Don't submit tx to the mempool return; } std::string err_string; - if (!SubmitTxMemoryPoolAndRelay(wtx, err_string, true)) { + if (!SubmitTxMemoryPoolAndRelay(*wtx, err_string, true)) { WalletLogPrintf("CommitTransaction(): Transaction cannot be broadcast immediately, %s\n", err_string); // TODO: if we expect the failure to be long term or permanent, instead delete wtx from the wallet and return failure. } diff --git a/src/warnings.cpp b/src/warnings.cpp index 60388cc713..dabb194ce1 100644 --- a/src/warnings.cpp +++ b/src/warnings.cpp @@ -12,7 +12,7 @@ #include <vector> -static Mutex g_warnings_mutex; +static GlobalMutex g_warnings_mutex; static bilingual_str g_misc_warnings GUARDED_BY(g_warnings_mutex); static bool fLargeWorkInvalidChainFound GUARDED_BY(g_warnings_mutex) = false; diff --git a/src/zmq/zmqnotificationinterface.cpp b/src/zmq/zmqnotificationinterface.cpp index a53de34db4..26618735f6 100644 --- a/src/zmq/zmqnotificationinterface.cpp +++ b/src/zmq/zmqnotificationinterface.cpp @@ -70,9 +70,9 @@ bool CZMQNotificationInterface::Initialize() { int major = 0, minor = 0, patch = 0; zmq_version(&major, &minor, &patch); - LogPrint(BCLog::ZMQ, "zmq: version %d.%d.%d\n", major, minor, patch); + LogPrint(BCLog::ZMQ, "version %d.%d.%d\n", major, minor, patch); - LogPrint(BCLog::ZMQ, "zmq: Initialize notification interface\n"); + LogPrint(BCLog::ZMQ, "Initialize notification interface\n"); assert(!pcontext); pcontext = zmq_ctx_new(); @@ -85,9 +85,9 @@ bool CZMQNotificationInterface::Initialize() for (auto& notifier : notifiers) { if (notifier->Initialize(pcontext)) { - LogPrint(BCLog::ZMQ, "zmq: Notifier %s ready (address = %s)\n", notifier->GetType(), notifier->GetAddress()); + LogPrint(BCLog::ZMQ, "Notifier %s ready (address = %s)\n", notifier->GetType(), notifier->GetAddress()); } else { - LogPrint(BCLog::ZMQ, "zmq: Notifier %s failed (address = %s)\n", notifier->GetType(), notifier->GetAddress()); + LogPrint(BCLog::ZMQ, "Notifier %s failed (address = %s)\n", notifier->GetType(), notifier->GetAddress()); return false; } } @@ -98,11 +98,11 @@ bool CZMQNotificationInterface::Initialize() // Called during shutdown sequence void CZMQNotificationInterface::Shutdown() { - LogPrint(BCLog::ZMQ, "zmq: Shutdown notification interface\n"); + LogPrint(BCLog::ZMQ, "Shutdown notification interface\n"); if (pcontext) { for (auto& notifier : notifiers) { - LogPrint(BCLog::ZMQ, "zmq: Shutdown notifier %s at %s\n", notifier->GetType(), notifier->GetAddress()); + LogPrint(BCLog::ZMQ, "Shutdown notifier %s at %s\n", notifier->GetType(), notifier->GetAddress()); notifier->Shutdown(); } zmq_ctx_term(pcontext); diff --git a/src/zmq/zmqpublishnotifier.cpp b/src/zmq/zmqpublishnotifier.cpp index 2c6f24a239..011336bf72 100644 --- a/src/zmq/zmqpublishnotifier.cpp +++ b/src/zmq/zmqpublishnotifier.cpp @@ -106,7 +106,7 @@ bool CZMQAbstractPublishNotifier::Initialize(void *pcontext) return false; } - LogPrint(BCLog::ZMQ, "zmq: Outbound message high water mark for %s at %s is %d\n", type, address, outbound_message_high_water_mark); + LogPrint(BCLog::ZMQ, "Outbound message high water mark for %s at %s is %d\n", type, address, outbound_message_high_water_mark); int rc = zmq_setsockopt(psocket, ZMQ_SNDHWM, &outbound_message_high_water_mark, sizeof(outbound_message_high_water_mark)); if (rc != 0) @@ -147,8 +147,8 @@ bool CZMQAbstractPublishNotifier::Initialize(void *pcontext) } else { - LogPrint(BCLog::ZMQ, "zmq: Reusing socket for address %s\n", address); - LogPrint(BCLog::ZMQ, "zmq: Outbound message high water mark for %s at %s is %d\n", type, address, outbound_message_high_water_mark); + LogPrint(BCLog::ZMQ, "Reusing socket for address %s\n", address); + LogPrint(BCLog::ZMQ, "Outbound message high water mark for %s at %s is %d\n", type, address, outbound_message_high_water_mark); psocket = i->second->psocket; mapPublishNotifiers.insert(std::make_pair(address, this)); @@ -179,7 +179,7 @@ void CZMQAbstractPublishNotifier::Shutdown() if (count == 1) { - LogPrint(BCLog::ZMQ, "zmq: Close socket at address %s\n", address); + LogPrint(BCLog::ZMQ, "Close socket at address %s\n", address); int linger = 0; zmq_setsockopt(psocket, ZMQ_LINGER, &linger, sizeof(linger)); zmq_close(psocket); @@ -208,7 +208,7 @@ bool CZMQAbstractPublishNotifier::SendZmqMessage(const char *command, const void bool CZMQPublishHashBlockNotifier::NotifyBlock(const CBlockIndex *pindex) { uint256 hash = pindex->GetBlockHash(); - LogPrint(BCLog::ZMQ, "zmq: Publish hashblock %s to %s\n", hash.GetHex(), this->address); + LogPrint(BCLog::ZMQ, "Publish hashblock %s to %s\n", hash.GetHex(), this->address); uint8_t data[32]; for (unsigned int i = 0; i < 32; i++) { data[31 - i] = hash.begin()[i]; @@ -219,7 +219,7 @@ bool CZMQPublishHashBlockNotifier::NotifyBlock(const CBlockIndex *pindex) bool CZMQPublishHashTransactionNotifier::NotifyTransaction(const CTransaction &transaction) { uint256 hash = transaction.GetHash(); - LogPrint(BCLog::ZMQ, "zmq: Publish hashtx %s to %s\n", hash.GetHex(), this->address); + LogPrint(BCLog::ZMQ, "Publish hashtx %s to %s\n", hash.GetHex(), this->address); uint8_t data[32]; for (unsigned int i = 0; i < 32; i++) { data[31 - i] = hash.begin()[i]; @@ -229,7 +229,7 @@ bool CZMQPublishHashTransactionNotifier::NotifyTransaction(const CTransaction &t bool CZMQPublishRawBlockNotifier::NotifyBlock(const CBlockIndex *pindex) { - LogPrint(BCLog::ZMQ, "zmq: Publish rawblock %s to %s\n", pindex->GetBlockHash().GetHex(), this->address); + LogPrint(BCLog::ZMQ, "Publish rawblock %s to %s\n", pindex->GetBlockHash().GetHex(), this->address); const Consensus::Params& consensusParams = Params().GetConsensus(); CDataStream ss(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); @@ -251,7 +251,7 @@ bool CZMQPublishRawBlockNotifier::NotifyBlock(const CBlockIndex *pindex) bool CZMQPublishRawTransactionNotifier::NotifyTransaction(const CTransaction &transaction) { uint256 hash = transaction.GetHash(); - LogPrint(BCLog::ZMQ, "zmq: Publish rawtx %s to %s\n", hash.GetHex(), this->address); + LogPrint(BCLog::ZMQ, "Publish rawtx %s to %s\n", hash.GetHex(), this->address); CDataStream ss(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ss << transaction; return SendZmqMessage(MSG_RAWTX, &(*ss.begin()), ss.size()); @@ -273,27 +273,27 @@ static bool SendSequenceMsg(CZMQAbstractPublishNotifier& notifier, uint256 hash, bool CZMQPublishSequenceNotifier::NotifyBlockConnect(const CBlockIndex *pindex) { uint256 hash = pindex->GetBlockHash(); - LogPrint(BCLog::ZMQ, "zmq: Publish sequence block connect %s to %s\n", hash.GetHex(), this->address); + LogPrint(BCLog::ZMQ, "Publish sequence block connect %s to %s\n", hash.GetHex(), this->address); return SendSequenceMsg(*this, hash, /* Block (C)onnect */ 'C'); } bool CZMQPublishSequenceNotifier::NotifyBlockDisconnect(const CBlockIndex *pindex) { uint256 hash = pindex->GetBlockHash(); - LogPrint(BCLog::ZMQ, "zmq: Publish sequence block disconnect %s to %s\n", hash.GetHex(), this->address); + LogPrint(BCLog::ZMQ, "Publish sequence block disconnect %s to %s\n", hash.GetHex(), this->address); return SendSequenceMsg(*this, hash, /* Block (D)isconnect */ 'D'); } bool CZMQPublishSequenceNotifier::NotifyTransactionAcceptance(const CTransaction &transaction, uint64_t mempool_sequence) { uint256 hash = transaction.GetHash(); - LogPrint(BCLog::ZMQ, "zmq: Publish hashtx mempool acceptance %s to %s\n", hash.GetHex(), this->address); + LogPrint(BCLog::ZMQ, "Publish hashtx mempool acceptance %s to %s\n", hash.GetHex(), this->address); return SendSequenceMsg(*this, hash, /* Mempool (A)cceptance */ 'A', mempool_sequence); } bool CZMQPublishSequenceNotifier::NotifyTransactionRemoval(const CTransaction &transaction, uint64_t mempool_sequence) { uint256 hash = transaction.GetHash(); - LogPrint(BCLog::ZMQ, "zmq: Publish hashtx mempool removal %s to %s\n", hash.GetHex(), this->address); + LogPrint(BCLog::ZMQ, "Publish hashtx mempool removal %s to %s\n", hash.GetHex(), this->address); return SendSequenceMsg(*this, hash, /* Mempool (R)emoval */ 'R', mempool_sequence); } diff --git a/src/zmq/zmqutil.cpp b/src/zmq/zmqutil.cpp index f0568634d4..cf3a0b2d71 100644 --- a/src/zmq/zmqutil.cpp +++ b/src/zmq/zmqutil.cpp @@ -12,5 +12,5 @@ void zmqError(const std::string& str) { - LogPrint(BCLog::ZMQ, "zmq: Error: %s, msg: %s\n", str, zmq_strerror(errno)); + LogPrint(BCLog::ZMQ, "Error: %s, msg: %s\n", str, zmq_strerror(errno)); } diff --git a/test/README.md b/test/README.md index 0d9b9fb89b..6ca7cc0016 100644 --- a/test/README.md +++ b/test/README.md @@ -327,7 +327,7 @@ test/lint/lint-files.py You can run all the shell-based lint tests by running: ``` -test/lint/lint-all.py +test/lint/all-lint.py ``` # Writing functional tests diff --git a/test/functional/feature_bind_extra.py b/test/functional/feature_bind_extra.py index 6802da8d48..5de9ff203c 100755 --- a/test/functional/feature_bind_extra.py +++ b/test/functional/feature_bind_extra.py @@ -18,12 +18,12 @@ from test_framework.test_framework import ( SkipTest, ) from test_framework.util import ( - PORT_MIN, - PORT_RANGE, assert_equal, + p2p_port, rpc_port, ) + class BindExtraTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True @@ -33,11 +33,6 @@ class BindExtraTest(BitcoinTestFramework): self.num_nodes = 2 def setup_network(self): - # Override setup_network() because we want to put the result of - # p2p_port() in self.extra_args[], before the nodes are started. - # p2p_port() is not usable in set_test_params() because PortSeed.n is - # not set at that time. - # Due to OS-specific network stats queries, we only run on Linux. self.log.info("Checking for Linux") if not sys.platform.startswith('linux'): @@ -45,8 +40,8 @@ class BindExtraTest(BitcoinTestFramework): loopback_ipv4 = addr_to_hex("127.0.0.1") - # Start custom ports after p2p and rpc ports. - port = PORT_MIN + 2 * PORT_RANGE + # Start custom ports by reusing unused p2p ports + port = p2p_port(self.num_nodes) # Array of tuples [command line arguments, expected bind addresses]. self.expected = [] diff --git a/test/functional/feature_index_prune.py b/test/functional/feature_index_prune.py index 3ee6a8036c..bc85e43a57 100755 --- a/test/functional/feature_index_prune.py +++ b/test/functional/feature_index_prune.py @@ -73,7 +73,7 @@ class FeatureIndexPruneTest(BitcoinTestFramework): pruneheight_new = node.pruneblockchain(400) # the prune heights used here and below are magic numbers that are determined by the # thresholds at which block files wrap, so they depend on disk serialization and default block file size. - assert_equal(pruneheight_new, 249) + assert_equal(pruneheight_new, 248) self.log.info("check if we can access the tips blockfilter and coinstats when we have pruned some blocks") tip = self.nodes[0].getbestblockhash() @@ -108,7 +108,7 @@ class FeatureIndexPruneTest(BitcoinTestFramework): self.log.info("prune exactly up to the indices best blocks while the indices are disabled") for i in range(3): pruneheight_2 = self.nodes[i].pruneblockchain(1000) - assert_equal(pruneheight_2, 751) + assert_equal(pruneheight_2, 750) # Restart the nodes again with the indices activated self.restart_node(i, extra_args=self.extra_args[i]) @@ -142,7 +142,7 @@ class FeatureIndexPruneTest(BitcoinTestFramework): for node in self.nodes[:2]: with node.assert_debug_log(['limited pruning to height 2489']): pruneheight_new = node.pruneblockchain(2500) - assert_equal(pruneheight_new, 2006) + assert_equal(pruneheight_new, 2005) self.log.info("ensure that prune locks don't prevent indices from failing in a reorg scenario") with self.nodes[0].assert_debug_log(['basic block filter index prune lock moved back to 2480']): diff --git a/test/functional/feature_init.py b/test/functional/feature_init.py index d0cb1e10e2..13c7326519 100755 --- a/test/functional/feature_init.py +++ b/test/functional/feature_init.py @@ -49,33 +49,33 @@ class InitStressTest(BitcoinTestFramework): assert_equal(200, node.getblockcount()) lines_to_terminate_after = [ - 'Validating signatures for all blocks', - 'scheduler thread start', - 'Starting HTTP server', - 'Loading P2P addresses', - 'Loading banlist', - 'Loading block index', - 'Switching active chainstate', - 'Checking all blk files are present', - 'Loaded best chain:', - 'init message: Verifying blocks', - 'init message: Starting network threads', - 'net thread start', - 'addcon thread start', - 'loadblk thread start', - 'txindex thread start', - 'block filter index thread start', - 'coinstatsindex thread start', - 'msghand thread start', - 'net thread start', - 'addcon thread start', + b'Validating signatures for all blocks', + b'scheduler thread start', + b'Starting HTTP server', + b'Loading P2P addresses', + b'Loading banlist', + b'Loading block index', + b'Switching active chainstate', + b'Checking all blk files are present', + b'Loaded best chain:', + b'init message: Verifying blocks', + b'init message: Starting network threads', + b'net thread start', + b'addcon thread start', + b'loadblk thread start', + b'txindex thread start', + b'block filter index thread start', + b'coinstatsindex thread start', + b'msghand thread start', + b'net thread start', + b'addcon thread start', ] if self.is_wallet_compiled(): - lines_to_terminate_after.append('Verifying wallet') + lines_to_terminate_after.append(b'Verifying wallet') for terminate_line in lines_to_terminate_after: - self.log.info(f"Starting node and will exit after line '{terminate_line}'") - with node.wait_for_debug_log([terminate_line], ignore_case=True): + self.log.info(f"Starting node and will exit after line {terminate_line}") + with node.wait_for_debug_log([terminate_line]): node.start(extra_args=['-txindex=1', '-blockfilterindex=1', '-coinstatsindex=1']) self.log.debug("Terminating node after terminate line was found") sigterm_node() diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py index 50e0e2c4cc..dd3cdc96ca 100755 --- a/test/functional/feature_proxy.py +++ b/test/functional/feature_proxy.py @@ -40,19 +40,15 @@ addnode connect to a CJDNS address """ import socket -import os from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( - PORT_MIN, - PORT_RANGE, assert_equal, + p2p_port, ) from test_framework.netutil import test_ipv6_local -RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports - # Networks returned by RPC getpeerinfo. NET_UNROUTABLE = "not_publicly_routable" NET_IPV4 = "ipv4" @@ -75,19 +71,19 @@ class ProxyTest(BitcoinTestFramework): # Create two proxies on different ports # ... one unauthenticated self.conf1 = Socks5Configuration() - self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000)) + self.conf1.addr = ('127.0.0.1', p2p_port(self.num_nodes)) self.conf1.unauth = True self.conf1.auth = False # ... one supporting authenticated and unauthenticated (Tor) self.conf2 = Socks5Configuration() - self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000)) + self.conf2.addr = ('127.0.0.1', p2p_port(self.num_nodes + 1)) self.conf2.unauth = True self.conf2.auth = True if self.have_ipv6: # ... one on IPv6 with similar configuration self.conf3 = Socks5Configuration() self.conf3.af = socket.AF_INET6 - self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000)) + self.conf3.addr = ('::1', p2p_port(self.num_nodes + 2)) self.conf3.unauth = True self.conf3.auth = True else: diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index 77524e85a3..7dbeccbc09 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -291,7 +291,7 @@ class PruneTest(BitcoinTestFramework): def prune(index): ret = node.pruneblockchain(height=height(index)) - assert_equal(ret, node.getblockchaininfo()['pruneheight']) + assert_equal(ret + 1, node.getblockchaininfo()['pruneheight']) def has_block(index): return os.path.isfile(os.path.join(self.nodes[node_number].datadir, self.chain, "blocks", f"blk{index:05}.dat")) diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py index f0ed914461..4c1e86e93d 100755 --- a/test/functional/feature_rbf.py +++ b/test/functional/feature_rbf.py @@ -32,7 +32,7 @@ from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE MAX_REPLACEMENT_LIMIT = 100 class ReplaceByFeeTest(BitcoinTestFramework): def set_test_params(self): - self.num_nodes = 1 + self.num_nodes = 2 self.extra_args = [ [ "-acceptnonstdtxn=1", @@ -42,6 +42,9 @@ class ReplaceByFeeTest(BitcoinTestFramework): "-limitdescendantcount=200", "-limitdescendantsize=101", ], + # second node has default mempool parameters + [ + ], ] self.supports_cli = False @@ -73,6 +76,9 @@ class ReplaceByFeeTest(BitcoinTestFramework): self.log.info("Running test too many replacements...") self.test_too_many_replacements() + self.log.info("Running test too many replacements using default mempool params...") + self.test_too_many_replacements_with_default_mempool_params() + self.log.info("Running test opt-in...") self.test_opt_in() @@ -397,6 +403,94 @@ class ReplaceByFeeTest(BitcoinTestFramework): double_tx_hex = double_tx.serialize().hex() self.nodes[0].sendrawtransaction(double_tx_hex, 0) + def test_too_many_replacements_with_default_mempool_params(self): + """ + Test rule 5 of BIP125 (do not allow replacements that cause more than 100 + evictions) without having to rely on non-default mempool parameters. + + In order to do this, create a number of "root" UTXOs, and then hang + enough transactions off of each root UTXO to exceed the MAX_REPLACEMENT_LIMIT. + Then create a conflicting RBF replacement transaction. + """ + normal_node = self.nodes[1] + wallet = MiniWallet(normal_node) + wallet.rescan_utxos() + # Clear mempools to avoid cross-node sync failure. + for node in self.nodes: + self.generate(node, 1) + + # This has to be chosen so that the total number of transactions can exceed + # MAX_REPLACEMENT_LIMIT without having any one tx graph run into the descendant + # limit; 10 works. + num_tx_graphs = 10 + + # (Number of transactions per graph, BIP125 rule 5 failure expected) + cases = [ + # Test the base case of evicting fewer than MAX_REPLACEMENT_LIMIT + # transactions. + ((MAX_REPLACEMENT_LIMIT // num_tx_graphs) - 1, False), + + # Test hitting the rule 5 eviction limit. + (MAX_REPLACEMENT_LIMIT // num_tx_graphs, True), + ] + + for (txs_per_graph, failure_expected) in cases: + self.log.debug(f"txs_per_graph: {txs_per_graph}, failure: {failure_expected}") + # "Root" utxos of each txn graph that we will attempt to double-spend with + # an RBF replacement. + root_utxos = [] + + # For each root UTXO, create a package that contains the spend of that + # UTXO and `txs_per_graph` children tx. + for graph_num in range(num_tx_graphs): + root_utxos.append(wallet.get_utxo()) + + optin_parent_tx = wallet.send_self_transfer_multi( + from_node=normal_node, + sequence=BIP125_SEQUENCE_NUMBER, + utxos_to_spend=[root_utxos[graph_num]], + num_outputs=txs_per_graph, + ) + assert_equal(True, normal_node.getmempoolentry(optin_parent_tx['txid'])['bip125-replaceable']) + new_utxos = optin_parent_tx['new_utxos'] + + for utxo in new_utxos: + # Create spends for each output from the "root" of this graph. + child_tx = wallet.send_self_transfer( + from_node=normal_node, + utxo_to_spend=utxo, + ) + + assert normal_node.getmempoolentry(child_tx['txid']) + + num_txs_invalidated = len(root_utxos) + (num_tx_graphs * txs_per_graph) + + if failure_expected: + assert num_txs_invalidated > MAX_REPLACEMENT_LIMIT + else: + assert num_txs_invalidated <= MAX_REPLACEMENT_LIMIT + + # Now attempt to submit a tx that double-spends all the root tx inputs, which + # would invalidate `num_txs_invalidated` transactions. + double_tx = wallet.create_self_transfer_multi( + from_node=normal_node, + utxos_to_spend=root_utxos, + fee_per_output=10_000_000, # absurdly high feerate + ) + tx_hex = double_tx.serialize().hex() + + if failure_expected: + assert_raises_rpc_error( + -26, "too many potential replacements", normal_node.sendrawtransaction, tx_hex, 0) + else: + txid = normal_node.sendrawtransaction(tx_hex, 0) + assert normal_node.getmempoolentry(txid) + + # Clear the mempool once finished, and rescan the other nodes' wallet + # to account for the spends we've made on `normal_node`. + self.generate(normal_node, 1) + self.wallet.rescan_utxos() + def test_opt_in(self): """Replacing should only work if orig tx opted in""" tx0_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN)) @@ -590,7 +684,6 @@ class ReplaceByFeeTest(BitcoinTestFramework): utxo_to_spend=parent_utxo, sequence=SEQUENCE_FINAL, fee_rate=Decimal('0.02'), - mempool_valid=False, ) # Broadcast replacement child tx diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py index 3619e05761..e92f73304b 100755 --- a/test/functional/mempool_limit.py +++ b/test/functional/mempool_limit.py @@ -7,12 +7,12 @@ from decimal import Decimal from test_framework.blocktools import COINBASE_MATURITY -from test_framework.messages import COIN from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, + create_lots_of_big_transactions, gen_return_txouts, ) from test_framework.wallet import MiniWallet @@ -29,16 +29,6 @@ class MempoolLimitTest(BitcoinTestFramework): ]] self.supports_cli = False - def send_large_txs(self, node, miniwallet, txouts, fee, tx_batch_size): - for _ in range(tx_batch_size): - tx = miniwallet.create_self_transfer(from_node=node, fee_rate=0, mempool_valid=False)['tx'] - for txout in txouts: - tx.vout.append(txout) - tx.vout[0].nValue -= int(fee * COIN) - res = node.testmempoolaccept([tx.serialize().hex()])[0] - assert_equal(res['fees']['base'], fee) - miniwallet.sendrawtransaction(from_node=node, tx_hex=tx.serialize().hex()) - def run_test(self): txouts = gen_return_txouts() node = self.nodes[0] @@ -71,7 +61,7 @@ class MempoolLimitTest(BitcoinTestFramework): self.log.info("Fill up the mempool with txs with higher fee rate") for batch_of_txid in range(num_of_batches): fee = (batch_of_txid + 1) * base_fee - self.send_large_txs(node, miniwallet, txouts, fee, tx_batch_size) + create_lots_of_big_transactions(miniwallet, node, fee, tx_batch_size, txouts) self.log.info('The tx should be evicted by now') # The number of transactions created should be greater than the ones present in the mempool @@ -85,7 +75,11 @@ class MempoolLimitTest(BitcoinTestFramework): # Deliberately try to create a tx with a fee less than the minimum mempool fee to assert that it does not get added to the mempool self.log.info('Create a mempool tx that will not pass mempoolminfee') - assert_raises_rpc_error(-26, "mempool min fee not met", miniwallet.send_self_transfer, from_node=node, fee_rate=relayfee, mempool_valid=False) + assert_raises_rpc_error(-26, "mempool min fee not met", miniwallet.send_self_transfer, from_node=node, fee_rate=relayfee) + + self.log.info('Test passing a value below the minimum (5 MB) to -maxmempool throws an error') + self.stop_node(0) + self.nodes[0].assert_start_raises_init_error(["-maxmempool=4"], "Error: -maxmempool must be at least 5 MB") if __name__ == '__main__': diff --git a/test/functional/mempool_reorg.py b/test/functional/mempool_reorg.py index 91f2d0051a..8dd6cc9cea 100755 --- a/test/functional/mempool_reorg.py +++ b/test/functional/mempool_reorg.py @@ -53,8 +53,7 @@ class MempoolCoinbaseTest(BitcoinTestFramework): utxo = wallet.get_utxo(txid=coinbase_txids[0]) timelock_tx = wallet.create_self_transfer( utxo_to_spend=utxo, - mempool_valid=False, - locktime=self.nodes[0].getblockcount() + 2 + locktime=self.nodes[0].getblockcount() + 2, )['hex'] self.log.info("Check that the time-locked transaction is too immature to spend") diff --git a/test/functional/mempool_spend_coinbase.py b/test/functional/mempool_spend_coinbase.py index 9c43ddaf6f..3585871350 100755 --- a/test/functional/mempool_spend_coinbase.py +++ b/test/functional/mempool_spend_coinbase.py @@ -40,7 +40,7 @@ class MempoolSpendCoinbaseTest(BitcoinTestFramework): spend_mature_id = wallet.send_self_transfer(from_node=self.nodes[0], utxo_to_spend=utxo_mature)["txid"] # other coinbase should be too immature to spend - immature_tx = wallet.create_self_transfer(utxo_to_spend=utxo_immature, mempool_valid=False) + immature_tx = wallet.create_self_transfer(utxo_to_spend=utxo_immature) assert_raises_rpc_error(-26, "bad-txns-premature-spend-of-coinbase", lambda: self.nodes[0].sendrawtransaction(immature_tx['hex'])) diff --git a/test/functional/mining_prioritisetransaction.py b/test/functional/mining_prioritisetransaction.py index a15fbe5a24..e0f2446b2d 100755 --- a/test/functional/mining_prioritisetransaction.py +++ b/test/functional/mining_prioritisetransaction.py @@ -7,16 +7,22 @@ from decimal import Decimal import time -from test_framework.blocktools import COINBASE_MATURITY -from test_framework.messages import COIN, MAX_BLOCK_WEIGHT +from test_framework.messages import ( + COIN, + MAX_BLOCK_WEIGHT, +) from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, + create_lots_of_big_transactions, + gen_return_txouts, +) from test_framework.wallet import MiniWallet class PrioritiseTransactionTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [[ "-printpriority=1", @@ -24,12 +30,8 @@ class PrioritiseTransactionTest(BitcoinTestFramework): ]] * self.num_nodes self.supports_cli = False - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - def test_diamond(self): self.log.info("Test diamond-shape package with priority") - self.generate(self.wallet, COINBASE_MATURITY + 1) mock_time = int(time.time()) self.nodes[0].setmocktime(mock_time) @@ -104,6 +106,7 @@ class PrioritiseTransactionTest(BitcoinTestFramework): def run_test(self): self.wallet = MiniWallet(self.nodes[0]) + self.wallet.rescan_utxos() # Test `prioritisetransaction` required parameters assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction) @@ -131,7 +134,10 @@ class PrioritiseTransactionTest(BitcoinTestFramework): self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] utxo_count = 90 - utxos = create_confirmed_utxos(self, self.relayfee, self.nodes[0], utxo_count) + utxos = self.wallet.send_self_transfer_multi(from_node=self.nodes[0], num_outputs=utxo_count)['new_utxos'] + self.generate(self.wallet, 1) + assert_equal(len(self.nodes[0].getrawmempool()), 0) + base_fee = self.relayfee*100 # our transactions are smaller than 100kb txids = [] @@ -141,7 +147,13 @@ class PrioritiseTransactionTest(BitcoinTestFramework): txids.append([]) start_range = i * range_size end_range = start_range + range_size - txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee) + txids[i] = create_lots_of_big_transactions( + self.wallet, + self.nodes[0], + (i+1) * base_fee, + end_range - start_range, + self.txouts, + utxos[start_range:end_range]) # Make sure that the size of each group of transactions exceeds # MAX_BLOCK_WEIGHT // 4 -- otherwise the test needs to be revised to @@ -200,17 +212,9 @@ class PrioritiseTransactionTest(BitcoinTestFramework): assert x not in mempool # Create a free transaction. Should be rejected. - utxo_list = self.nodes[0].listunspent() - assert len(utxo_list) > 0 - utxo = utxo_list[0] - - inputs = [] - outputs = {} - inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]}) - outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) - tx_hex = self.nodes[0].signrawtransactionwithwallet(raw_tx)["hex"] - tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"] + tx_res = self.wallet.create_self_transfer(from_node=self.nodes[0], fee_rate=0) + tx_hex = tx_res['hex'] + tx_id = tx_res['txid'] # This will raise an exception due to min relay fee not being met assert_raises_rpc_error(-26, "min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex) diff --git a/test/functional/p2p_blocksonly.py b/test/functional/p2p_blocksonly.py index 12ee4b3c27..8ac38bff3a 100755 --- a/test/functional/p2p_blocksonly.py +++ b/test/functional/p2p_blocksonly.py @@ -89,6 +89,11 @@ class P2PBlocksOnly(BitcoinTestFramework): assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], False) _, txid, _, tx_hex = self.check_p2p_tx_violation() + self.log.info("Tests with node in normal mode with block-relay-only connection, sending an inv") + conn = self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="block-relay-only") + assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], False) + self.check_p2p_inv_violation(conn) + self.log.info("Check that txs from RPC are not sent to blockrelay connection") conn = self.nodes[0].add_outbound_p2p_connection(P2PTxInvStore(), p2p_idx=1, connection_type="block-relay-only") @@ -100,6 +105,13 @@ class P2PBlocksOnly(BitcoinTestFramework): conn.sync_send_with_ping() assert(int(txid, 16) not in conn.get_invs()) + def check_p2p_inv_violation(self, peer): + self.log.info("Check that tx-invs from P2P are rejected and result in disconnect") + with self.nodes[0].assert_debug_log(["inv sent in violation of protocol, disconnecting peer"]): + peer.send_message(msg_inv([CInv(t=MSG_WTX, h=0x12345)])) + peer.wait_for_disconnect() + self.nodes[0].disconnect_p2ps() + def check_p2p_tx_violation(self): self.log.info('Check that txs from P2P are rejected and result in disconnect') spendtx = self.miniwallet.create_self_transfer() @@ -108,9 +120,7 @@ class P2PBlocksOnly(BitcoinTestFramework): self.nodes[0].p2ps[0].send_message(msg_tx(spendtx['tx'])) self.nodes[0].p2ps[0].wait_for_disconnect() assert_equal(self.nodes[0].getmempoolinfo()['size'], 0) - - # Remove the disconnected peer - del self.nodes[0].p2ps[0] + self.nodes[0].disconnect_p2ps() return spendtx['tx'], spendtx['txid'], spendtx['wtxid'], spendtx['hex'] diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index b9ac3c32c5..5e50e1ebce 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -606,6 +606,15 @@ class CompactBlocksTest(BitcoinTestFramework): assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16)) assert "blocktxn" not in test_node.last_message + # Request with out-of-bounds tx index results in disconnect + bad_peer = self.nodes[0].add_p2p_connection(TestP2PConn()) + block_hash = node.getblockhash(chain_height) + block = from_hex(CBlock(), node.getblock(block_hash, False)) + msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [len(block.vtx)]) + with node.assert_debug_log(['getblocktxn with out-of-bounds tx indices']): + bad_peer.send_message(msg) + bad_peer.wait_for_disconnect() + def test_compactblocks_not_at_tip(self, test_node): node = self.nodes[0] # Test that requesting old compactblocks doesn't work. diff --git a/test/functional/p2p_getaddr_caching.py b/test/functional/p2p_getaddr_caching.py index d375af6fe1..8907c34a89 100755 --- a/test/functional/p2p_getaddr_caching.py +++ b/test/functional/p2p_getaddr_caching.py @@ -14,6 +14,7 @@ from test_framework.p2p import ( from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, + p2p_port, ) # As defined in net_processing. @@ -42,6 +43,12 @@ class AddrReceiver(P2PInterface): class AddrTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 + # Use some of the remaining p2p ports for the onion binds. + self.onion_port1 = p2p_port(self.num_nodes) + self.onion_port2 = p2p_port(self.num_nodes + 1) + self.extra_args = [ + [f"-bind=127.0.0.1:{self.onion_port1}=onion", f"-bind=127.0.0.1:{self.onion_port2}=onion"], + ] def run_test(self): self.log.info('Fill peer AddrMan with a lot of records') @@ -55,35 +62,66 @@ class AddrTest(BitcoinTestFramework): # only a fraction of all known addresses can be cached and returned. assert(len(self.nodes[0].getnodeaddresses(0)) > int(MAX_ADDR_TO_SEND / (MAX_PCT_ADDR_TO_SEND / 100))) - responses = [] + last_response_on_local_bind = None + last_response_on_onion_bind1 = None + last_response_on_onion_bind2 = None self.log.info('Send many addr requests within short time to receive same response') N = 5 cur_mock_time = int(time.time()) for i in range(N): - addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver()) - addr_receiver.send_and_ping(msg_getaddr()) + addr_receiver_local = self.nodes[0].add_p2p_connection(AddrReceiver()) + addr_receiver_local.send_and_ping(msg_getaddr()) + addr_receiver_onion1 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port1) + addr_receiver_onion1.send_and_ping(msg_getaddr()) + addr_receiver_onion2 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port2) + addr_receiver_onion2.send_and_ping(msg_getaddr()) + # Trigger response cur_mock_time += 5 * 60 self.nodes[0].setmocktime(cur_mock_time) - addr_receiver.wait_until(addr_receiver.addr_received) - responses.append(addr_receiver.get_received_addrs()) - for response in responses[1:]: - assert_equal(response, responses[0]) - assert(len(response) == MAX_ADDR_TO_SEND) + addr_receiver_local.wait_until(addr_receiver_local.addr_received) + addr_receiver_onion1.wait_until(addr_receiver_onion1.addr_received) + addr_receiver_onion2.wait_until(addr_receiver_onion2.addr_received) + + if i > 0: + # Responses from different binds should be unique + assert(last_response_on_local_bind != addr_receiver_onion1.get_received_addrs()) + assert(last_response_on_local_bind != addr_receiver_onion2.get_received_addrs()) + assert(last_response_on_onion_bind1 != addr_receiver_onion2.get_received_addrs()) + # Responses on from the same bind should be the same + assert_equal(last_response_on_local_bind, addr_receiver_local.get_received_addrs()) + assert_equal(last_response_on_onion_bind1, addr_receiver_onion1.get_received_addrs()) + assert_equal(last_response_on_onion_bind2, addr_receiver_onion2.get_received_addrs()) + + last_response_on_local_bind = addr_receiver_local.get_received_addrs() + last_response_on_onion_bind1 = addr_receiver_onion1.get_received_addrs() + last_response_on_onion_bind2 = addr_receiver_onion2.get_received_addrs() + + for response in [last_response_on_local_bind, last_response_on_onion_bind1, last_response_on_onion_bind2]: + assert_equal(len(response), MAX_ADDR_TO_SEND) cur_mock_time += 3 * 24 * 60 * 60 self.nodes[0].setmocktime(cur_mock_time) self.log.info('After time passed, see a new response to addr request') - last_addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver()) - last_addr_receiver.send_and_ping(msg_getaddr()) + addr_receiver_local = self.nodes[0].add_p2p_connection(AddrReceiver()) + addr_receiver_local.send_and_ping(msg_getaddr()) + addr_receiver_onion1 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port1) + addr_receiver_onion1.send_and_ping(msg_getaddr()) + addr_receiver_onion2 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port2) + addr_receiver_onion2.send_and_ping(msg_getaddr()) + # Trigger response cur_mock_time += 5 * 60 self.nodes[0].setmocktime(cur_mock_time) - last_addr_receiver.wait_until(last_addr_receiver.addr_received) - # new response is different - assert(set(responses[0]) != set(last_addr_receiver.get_received_addrs())) + addr_receiver_local.wait_until(addr_receiver_local.addr_received) + addr_receiver_onion1.wait_until(addr_receiver_onion1.addr_received) + addr_receiver_onion2.wait_until(addr_receiver_onion2.addr_received) + # new response is different + assert(set(last_response_on_local_bind) != set(addr_receiver_local.get_received_addrs())) + assert(set(last_response_on_onion_bind1) != set(addr_receiver_onion1.get_received_addrs())) + assert(set(last_response_on_onion_bind2) != set(addr_receiver_onion2.get_received_addrs())) if __name__ == '__main__': AddrTest().main() diff --git a/test/functional/rpc_createmultisig.py b/test/functional/rpc_createmultisig.py index 1695acaaa8..716ee8f7ef 100755 --- a/test/functional/rpc_createmultisig.py +++ b/test/functional/rpc_createmultisig.py @@ -91,15 +91,17 @@ class RpcCreateMultiSigTest(BitcoinTestFramework): assert 'warnings' not in result # Generate addresses with the segwit types. These should all make legacy addresses + err_msg = ["Unable to make chosen address type, please ensure no uncompressed public keys are present."] + for addr_type in ['bech32', 'p2sh-segwit']: - result = self.nodes[0].createmultisig(2, keys, addr_type) + result = self.nodes[0].createmultisig(nrequired=2, keys=keys, address_type=addr_type) assert_equal(legacy_addr, result['address']) - assert_equal(result['warnings'], ["Unable to make chosen address type, please ensure no uncompressed public keys are present."]) + assert_equal(result['warnings'], err_msg) if self.is_bdb_compiled(): - result = wmulti0.addmultisigaddress(2, keys, '', addr_type) + result = wmulti0.addmultisigaddress(nrequired=2, keys=keys, address_type=addr_type) assert_equal(legacy_addr, result['address']) - assert_equal(result['warnings'], ["Unable to make chosen address type, please ensure no uncompressed public keys are present."]) + assert_equal(result['warnings'], err_msg) self.log.info('Testing sortedmulti descriptors with BIP 67 test vectors') with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_bip67.json'), encoding='utf-8') as f: @@ -173,6 +175,7 @@ class RpcCreateMultiSigTest(BitcoinTestFramework): desc = descsum_create(desc) msig = node2.createmultisig(self.nsigs, self.pub, self.output_type) + assert 'warnings' not in msig madd = msig["address"] mredeem = msig["redeemScript"] assert_equal(desc, msig['descriptor']) diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py index 759e43194b..948deaaec4 100755 --- a/test/functional/rpc_fundrawtransaction.py +++ b/test/functional/rpc_fundrawtransaction.py @@ -11,6 +11,9 @@ from math import ceil from test_framework.descriptors import descsum_create from test_framework.key import ECKey +from test_framework.messages import ( + COIN, +) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_approx, @@ -103,6 +106,7 @@ class RawTransactionsTest(BitcoinTestFramework): self.generate(self.nodes[2], 1) self.generate(self.nodes[0], 121) + self.test_weight_calculation() self.test_change_position() self.test_simple() self.test_simple_two_coins() @@ -1069,6 +1073,27 @@ class RawTransactionsTest(BitcoinTestFramework): self.nodes[2].unloadwallet("extfund") + def test_weight_calculation(self): + self.log.info("Test weight calculation with external inputs") + + self.nodes[2].createwallet("test_weight_calculation") + wallet = self.nodes[2].get_wallet_rpc("test_weight_calculation") + + addr = wallet.getnewaddress() + txid = self.nodes[0].sendtoaddress(addr, 5) + vout = find_vout_for_address(self.nodes[0], txid, addr) + + self.nodes[0].sendtoaddress(wallet.getnewaddress(), 5) + self.generate(self.nodes[0], 1) + + rawtx = wallet.createrawtransaction([{'txid': txid, 'vout': vout}], [{self.nodes[0].getnewaddress(): 9.999}]) + fundedtx = wallet.fundrawtransaction(rawtx, {'fee_rate': 10}) + # with 71-byte signatures we should expect following tx size + tx_size = 10 + 41*2 + 31*2 + (2 + 107*2)/4 + assert_equal(fundedtx['fee'] * COIN, tx_size * 10) + + self.nodes[2].unloadwallet("test_weight_calculation") + def test_include_unsafe(self): self.log.info("Test fundrawtxn with unsafe inputs") diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py index 1f95814e18..dee6d777af 100755 --- a/test/functional/rpc_rawtransaction.py +++ b/test/functional/rpc_rawtransaction.py @@ -285,7 +285,7 @@ class RawTransactionsTest(BitcoinTestFramework): # Test a transaction with a large fee. # Fee rate is 0.20000000 BTC/kvB - tx = self.wallet.create_self_transfer(mempool_valid=False, from_node=self.nodes[0], fee_rate=Decimal('0.20000000')) + tx = self.wallet.create_self_transfer(from_node=self.nodes[0], fee_rate=Decimal("0.20000000")) # Thus, testmempoolaccept should reject testres = self.nodes[2].testmempoolaccept([tx['hex']])[0] assert_equal(testres['allowed'], False) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index a39ee003ef..3f02d21d42 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -223,11 +223,11 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): # It still needs to exist and be None in order for tests to work however. self.options.descriptors = None + PortSeed.n = self.options.port_seed + def setup(self): """Call this method to start up the test framework object with options set.""" - PortSeed.n = self.options.port_seed - check_json_precision() self.options.cachedir = os.path.abspath(self.options.cachedir) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 7d2db391b6..03f6c8adea 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -423,7 +423,7 @@ class TestNode(): self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log)) @contextlib.contextmanager - def wait_for_debug_log(self, expected_msgs, timeout=60, ignore_case=False): + def wait_for_debug_log(self, expected_msgs, timeout=60): """ Block until we see a particular debug log message fragment or until we exceed the timeout. Return: @@ -431,18 +431,17 @@ class TestNode(): """ time_end = time.time() + timeout * self.timeout_factor prev_size = self.debug_log_bytes() - re_flags = re.MULTILINE | (re.IGNORECASE if ignore_case else 0) yield while True: found = True - with open(self.debug_log_path, encoding='utf-8') as dl: + with open(self.debug_log_path, "rb") as dl: dl.seek(prev_size) log = dl.read() for expected_msg in expected_msgs: - if re.search(re.escape(expected_msg), log, flags=re_flags) is None: + if expected_msg not in log: found = False if found: diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index b043d1a70d..1c64fa4028 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -476,39 +476,6 @@ def find_output(node, txid, amount, *, blockhash=None): raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount))) -# Helper to create at least "count" utxos -# Pass in a fee that is sufficient for relay and mining new transactions. -def create_confirmed_utxos(test_framework, fee, node, count, **kwargs): - to_generate = int(0.5 * count) + 101 - while to_generate > 0: - test_framework.generate(node, min(25, to_generate), **kwargs) - to_generate -= 25 - utxos = node.listunspent() - iterations = count - len(utxos) - addr1 = node.getnewaddress() - addr2 = node.getnewaddress() - if iterations <= 0: - return utxos - for _ in range(iterations): - t = utxos.pop() - inputs = [] - inputs.append({"txid": t["txid"], "vout": t["vout"]}) - outputs = {} - send_value = t['amount'] - fee - outputs[addr1] = satoshi_round(send_value / 2) - outputs[addr2] = satoshi_round(send_value / 2) - raw_tx = node.createrawtransaction(inputs, outputs) - signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"] - node.sendrawtransaction(signed_tx) - - while (node.getmempoolinfo()['size'] > 0): - test_framework.generate(node, 1, **kwargs) - - utxos = node.listunspent() - assert len(utxos) >= count - return utxos - - def chain_transaction(node, parent_txids, vouts, value, fee, num_outputs): """Build and send a transaction that spends the given inputs (specified by lists of parent_txid:vout each), with the desired total value and fee, @@ -553,24 +520,22 @@ def gen_return_txouts(): # Create a spend of each passed-in utxo, splicing in "txouts" to each raw # transaction to make it large. See gen_return_txouts() above. -def create_lots_of_big_transactions(node, txouts, utxos, num, fee): - addr = node.getnewaddress() +def create_lots_of_big_transactions(mini_wallet, node, fee, tx_batch_size, txouts, utxos=None): + from .messages import COIN + fee_sats = int(fee * COIN) txids = [] - from .messages import tx_from_hex - for _ in range(num): - t = utxos.pop() - inputs = [{"txid": t["txid"], "vout": t["vout"]}] - outputs = {} - change = t['amount'] - fee - outputs[addr] = satoshi_round(change) - rawtx = node.createrawtransaction(inputs, outputs) - tx = tx_from_hex(rawtx) - for txout in txouts: - tx.vout.append(txout) - newtx = tx.serialize().hex() - signresult = node.signrawtransactionwithwallet(newtx, None, "NONE") - txid = node.sendrawtransaction(signresult["hex"], 0) - txids.append(txid) + use_internal_utxos = utxos is None + for _ in range(tx_batch_size): + tx = mini_wallet.create_self_transfer( + from_node=node, + utxo_to_spend=None if use_internal_utxos else utxos.pop(), + fee_rate=0, + )["tx"] + tx.vout[0].nValue -= fee_sats + tx.vout.extend(txouts) + res = node.testmempoolaccept([tx.serialize().hex()])[0] + assert_equal(res['fees']['base'], fee) + txids.append(node.sendrawtransaction(tx.serialize().hex())) return txids @@ -578,13 +543,8 @@ def mine_large_block(test_framework, mini_wallet, node): # generate a 66k transaction, # and 14 of them is close to the 1MB block limit txouts = gen_return_txouts() - from .messages import COIN - fee = 100 * int(node.getnetworkinfo()["relayfee"] * COIN) - for _ in range(14): - tx = mini_wallet.create_self_transfer(from_node=node, fee_rate=0, mempool_valid=False)['tx'] - tx.vout[0].nValue -= fee - tx.vout.extend(txouts) - mini_wallet.sendrawtransaction(from_node=node, tx_hex=tx.serialize().hex()) + fee = 100 * node.getnetworkinfo()["relayfee"] + create_lots_of_big_transactions(mini_wallet, node, fee, 14, txouts) test_framework.generate(node, 1) diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py index 336b2afe26..93f3ea9e81 100644 --- a/test/functional/test_framework/wallet.py +++ b/test/functional/test_framework/wallet.py @@ -10,6 +10,7 @@ from enum import Enum from random import choice from typing import ( Any, + List, Optional, ) from test_framework.address import ( @@ -147,7 +148,7 @@ class MiniWallet: def get_address(self): return self._address - def get_utxo(self, *, txid: str = '', vout: Optional[int] = None, mark_as_spent=True): + def get_utxo(self, *, txid: str = '', vout: Optional[int] = None, mark_as_spent=True) -> dict: """ Returns a utxo and marks it as spent (pops it from the internal list) @@ -192,7 +193,7 @@ class MiniWallet: Returns a tuple (txid, n) referring to the created external utxo outpoint. """ - tx = self.create_self_transfer(from_node=from_node, fee_rate=0, mempool_valid=False)['tx'] + tx = self.create_self_transfer(from_node=from_node, fee_rate=0)["tx"] assert_greater_than_or_equal(tx.vout[0].nValue, amount + fee) tx.vout[0].nValue -= (amount + fee) # change output -> MiniWallet tx.vout.append(CTxOut(amount, scriptPubKey)) # arbitrary output -> to be returned @@ -215,14 +216,21 @@ class MiniWallet: return {'new_utxos': [self.get_utxo(txid=txid, vout=vout) for vout in range(len(tx.vout))], 'txid': txid, 'hex': tx.serialize().hex(), 'tx': tx} - def create_self_transfer_multi(self, *, from_node, utxos_to_spend=None, num_outputs=1, fee_per_output=1000): + def create_self_transfer_multi( + self, *, from_node, + utxos_to_spend: Optional[List[dict]] = None, + num_outputs=1, + sequence=0, + fee_per_output=1000): """ Create and return a transaction that spends the given UTXOs and creates a certain number of outputs with equal amounts. """ utxos_to_spend = utxos_to_spend or [self.get_utxo()] # create simple tx template (1 input, 1 output) - tx = self.create_self_transfer(fee_rate=0, from_node=from_node, utxo_to_spend=utxos_to_spend[0], mempool_valid=False)['tx'] + tx = self.create_self_transfer( + fee_rate=0, from_node=from_node, + utxo_to_spend=utxos_to_spend[0], sequence=sequence)["tx"] # duplicate inputs, witnesses and outputs tx.vin = [deepcopy(tx.vin[0]) for _ in range(len(utxos_to_spend))] @@ -240,9 +248,8 @@ class MiniWallet: o.nValue = outputs_value_total // num_outputs return tx - def create_self_transfer(self, *, fee_rate=Decimal("0.003"), from_node=None, utxo_to_spend=None, mempool_valid=True, locktime=0, sequence=0): - """Create and return a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed. - Checking mempool validity via the testmempoolaccept RPC can be skipped by setting mempool_valid to False.""" + def create_self_transfer(self, *, fee_rate=Decimal("0.003"), from_node=None, utxo_to_spend=None, locktime=0, sequence=0): + """Create and return a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed.""" from_node = from_node or self._test_node utxo_to_spend = utxo_to_spend or self.get_utxo() if self._priv_key is None: @@ -269,11 +276,7 @@ class MiniWallet: tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE]), bytes([LEAF_VERSION_TAPSCRIPT]) + self._internal_key] tx_hex = tx.serialize().hex() - if mempool_valid: - tx_info = from_node.testmempoolaccept([tx_hex])[0] - assert_equal(tx_info['allowed'], True) - assert_equal(tx_info['vsize'], vsize) - assert_equal(tx_info['fees']['base'], utxo_to_spend['value'] - Decimal(send_value) / COIN) + assert_equal(tx.get_vsize(), vsize) return {'txid': tx.rehash(), 'wtxid': tx.getwtxid(), 'hex': tx_hex, 'tx': tx} diff --git a/test/functional/wallet_descriptor.py b/test/functional/wallet_descriptor.py index e47d021210..5dc23ba245 100755 --- a/test/functional/wallet_descriptor.py +++ b/test/functional/wallet_descriptor.py @@ -93,15 +93,15 @@ class WalletDescriptorTest(BitcoinTestFramework): # Make sure things are disabled self.log.info("Test disabled RPCs") - assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importprivkey, "cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW") - assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importpubkey, send_wrpc.getaddressinfo(send_wrpc.getnewaddress())) - assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importaddress, recv_wrpc.getnewaddress()) - assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importmulti, []) - assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.addmultisigaddress, 1, [recv_wrpc.getnewaddress()]) - assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.dumpprivkey, recv_wrpc.getnewaddress()) - assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.dumpwallet, 'wallet.dump') - assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importwallet, 'wallet.dump') - assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.sethdseed) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importprivkey, "cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW") + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importpubkey, send_wrpc.getaddressinfo(send_wrpc.getnewaddress())) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importaddress, recv_wrpc.getnewaddress()) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importmulti, []) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.addmultisigaddress, 1, [recv_wrpc.getnewaddress()]) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.dumpprivkey, recv_wrpc.getnewaddress()) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.dumpwallet, 'wallet.dump') + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importwallet, 'wallet.dump') + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.sethdseed) self.log.info("Test encryption") # Get the master fingerprint before encrypt diff --git a/test/get_previous_releases.py b/test/get_previous_releases.py index cbdb67216c..7b7cfbfef5 100755 --- a/test/get_previous_releases.py +++ b/test/get_previous_releases.py @@ -106,7 +106,7 @@ def download_binary(tag, args) -> int: bin_path = 'bin/bitcoin-core-{}/test.{}'.format( match.group(1), match.group(2)) platform = args.platform - if tag < "v23" and platform in ["x86_64-apple-darwin", "aarch64-apple-darwin"]: + if tag < "v23" and platform in ["x86_64-apple-darwin", "arm64-apple-darwin"]: platform = "osx64" tarball = 'bitcoin-{tag}-{platform}.tar.gz'.format( tag=tag[1:], platform=platform) @@ -214,7 +214,7 @@ def check_host(args) -> int: 'aarch64-*-linux*': 'aarch64-linux-gnu', 'x86_64-*-linux*': 'x86_64-linux-gnu', 'x86_64-apple-darwin*': 'x86_64-apple-darwin', - 'aarch64-apple-darwin*': 'aarch64-apple-darwin', + 'aarch64-apple-darwin*': 'arm64-apple-darwin', } args.platform = '' for pattern, target in platforms.items(): diff --git a/test/lint/README.md b/test/lint/README.md index 1f683c10b3..8d592c3282 100644 --- a/test/lint/README.md +++ b/test/lint/README.md @@ -28,7 +28,6 @@ To do a full check with `-r`, make sure that you have fetched the upstream repos maintained: * for `src/secp256k1`: https://github.com/bitcoin-core/secp256k1.git (branch master) * for `src/leveldb`: https://github.com/bitcoin-core/leveldb-subtree.git (branch bitcoin-fork) -* for `src/univalue`: https://github.com/bitcoin-core/univalue-subtree.git (branch master) * for `src/crypto/ctaes`: https://github.com/bitcoin-core/ctaes.git (branch master) * for `src/crc32c`: https://github.com/bitcoin-core/crc32c-subtree.git (branch bitcoin-fork) * for `src/minisketch`: https://github.com/sipa/minisketch.git (branch master) @@ -39,6 +38,6 @@ To do so, add the upstream repository as remote: git remote add --fetch secp256k1 https://github.com/bitcoin-core/secp256k1.git ``` -lint-all.py +all-lint.py =========== Calls other scripts with the `lint-` prefix. diff --git a/test/lint/lint-all.py b/test/lint/all-lint.py index c280ba2db2..34a7b9742a 100755 --- a/test/lint/lint-all.py +++ b/test/lint/all-lint.py @@ -13,11 +13,10 @@ from subprocess import run exit_code = 0 mod_path = Path(__file__).parent -for lint in glob(f"{mod_path}/lint-*"): - if lint != __file__: - result = run([lint]) - if result.returncode != 0: - print(f"^---- failure generated from {lint.split('/')[-1]}") - exit_code |= result.returncode +for lint in glob(f"{mod_path}/lint-*.py"): + result = run([lint]) + if result.returncode != 0: + print(f"^---- failure generated from {lint.split('/')[-1]}") + exit_code |= result.returncode exit(exit_code) diff --git a/test/lint/lint-files.py b/test/lint/lint-files.py index dbb51ce54e..123bee2cbc 100755 --- a/test/lint/lint-files.py +++ b/test/lint/lint-files.py @@ -21,7 +21,7 @@ ALL_SOURCE_FILENAMES_REGEXP = r"^.*\.(cpp|h|py|sh)$" ALLOWED_FILENAME_REGEXP = "^[a-zA-Z0-9/_.@][a-zA-Z0-9/_.@-]*$" ALLOWED_SOURCE_FILENAME_REGEXP = "^[a-z0-9_./-]+$" ALLOWED_SOURCE_FILENAME_EXCEPTION_REGEXP = ( - "^src/(secp256k1/|minisketch/|univalue/|test/fuzz/FuzzedDataProvider.h)" + "^src/(secp256k1/|minisketch/|test/fuzz/FuzzedDataProvider.h)" ) ALLOWED_PERMISSION_NON_EXECUTABLES = 0o644 ALLOWED_PERMISSION_EXECUTABLES = 0o755 diff --git a/test/lint/lint-format-strings.py b/test/lint/lint-format-strings.py index 28e7b1e4ff..f54126e023 100755 --- a/test/lint/lint-format-strings.py +++ b/test/lint/lint-format-strings.py @@ -22,6 +22,8 @@ FUNCTION_NAMES_AND_NUMBER_OF_LEADING_ARGUMENTS = [ 'LogConnectFailure,1', 'LogPrint,1', 'LogPrintf,0', + 'LogPrintfCategory,1', + 'LogPrintLevel,2', 'printf,0', 'snprintf,2', 'sprintf,1', @@ -75,7 +77,7 @@ def main(): matching_files_filtered = [] for matching_file in matching_files: - if not re.search('^src/(leveldb|secp256k1|minisketch|tinyformat|univalue|test/fuzz/strprintf.cpp)', matching_file): + if not re.search('^src/(leveldb|secp256k1|minisketch|tinyformat|test/fuzz/strprintf.cpp)', matching_file): matching_files_filtered.append(matching_file) matching_files_filtered.sort() diff --git a/test/lint/lint-include-guards.py b/test/lint/lint-include-guards.py index 86284517d5..5867aae028 100755 --- a/test/lint/lint-include-guards.py +++ b/test/lint/lint-include-guards.py @@ -22,7 +22,6 @@ EXCLUDE_FILES_WITH_PREFIX = ['src/crypto/ctaes', 'src/crc32c', 'src/secp256k1', 'src/minisketch', - 'src/univalue', 'src/tinyformat.h', 'src/bench/nanobench.h', 'src/test/fuzz/FuzzedDataProvider.h'] diff --git a/test/lint/lint-includes.py b/test/lint/lint-includes.py index ae62994642..afdca0d418 100755 --- a/test/lint/lint-includes.py +++ b/test/lint/lint-includes.py @@ -19,7 +19,7 @@ EXCLUDED_DIRS = ["src/leveldb/", "src/crc32c/", "src/secp256k1/", "src/minisketch/", - "src/univalue/"] + ] EXPECTED_BOOST_INCLUDES = ["boost/algorithm/string/replace.hpp", "boost/date_time/posix_time/posix_time.hpp", diff --git a/test/lint/lint-locale-dependence.py b/test/lint/lint-locale-dependence.py index 9b2cf4587a..4876ac2e2d 100755 --- a/test/lint/lint-locale-dependence.py +++ b/test/lint/lint-locale-dependence.py @@ -60,7 +60,6 @@ REGEXP_EXTERNAL_DEPENDENCIES_EXCLUSIONS = [ "src/secp256k1/", "src/minisketch/", "src/tinyformat.h", - "src/univalue/" ] LOCALE_DEPENDENT_FUNCTIONS = [ diff --git a/test/lint/lint-logs.py b/test/lint/lint-logs.py index de53729b4e..aaf697467d 100755 --- a/test/lint/lint-logs.py +++ b/test/lint/lint-logs.py @@ -16,12 +16,12 @@ from subprocess import check_output def main(): - logs_list = check_output(["git", "grep", "--extended-regexp", r"(LogPrintLevel|LogPrintf?)\(", "--", "*.cpp"], universal_newlines=True, encoding="utf8").splitlines() + logs_list = check_output(["git", "grep", "--extended-regexp", r"(LogPrintLevel|LogPrintfCategory|LogPrintf?)\(", "--", "*.cpp"], universal_newlines=True, encoding="utf8").splitlines() unterminated_logs = [line for line in logs_list if not re.search(r'(\\n"|/\* Continued \*/)', line)] if unterminated_logs != []: - print("All calls to LogPrintf(), LogPrint(), LogPrintLevel(), and WalletLogPrintf() should be terminated with \"\\n\".") + print("All calls to LogPrintf(), LogPrintfCategory(), LogPrint(), LogPrintLevel(), and WalletLogPrintf() should be terminated with \"\\n\".") print("") for line in unterminated_logs: diff --git a/test/lint/lint-shell-locale.py b/test/lint/lint-shell-locale.py index f3dfe18a95..309c426374 100755 --- a/test/lint/lint-shell-locale.py +++ b/test/lint/lint-shell-locale.py @@ -41,7 +41,7 @@ def main(): exit_code = 0 shell_files = get_shell_files_list() for file_path in shell_files: - if re.search('src/(secp256k1|minisketch|univalue)/', file_path): + if re.search('src/(secp256k1|minisketch)/', file_path): continue with open(file_path, 'r', encoding='utf-8') as file_obj: diff --git a/test/lint/lint-shell.py b/test/lint/lint-shell.py index f1e4494350..ed95024ef5 100755 --- a/test/lint/lint-shell.py +++ b/test/lint/lint-shell.py @@ -68,7 +68,7 @@ def main(): ] files = get_files(files_cmd) # remove everything that doesn't match this regex - reg = re.compile(r'src/[leveldb,secp256k1,minisketch,univalue]') + reg = re.compile(r'src/[leveldb,secp256k1,minisketch]') files[:] = [file for file in files if not reg.match(file)] # build the `shellcheck` command diff --git a/test/lint/lint-spelling.py b/test/lint/lint-spelling.py index 5da1b243f7..14d7d13a75 100755 --- a/test/lint/lint-spelling.py +++ b/test/lint/lint-spelling.py @@ -12,7 +12,7 @@ Note: Will exit successfully regardless of spelling errors. from subprocess import check_output, STDOUT, CalledProcessError IGNORE_WORDS_FILE = 'test/lint/spelling.ignore-words.txt' -FILES_ARGS = ['git', 'ls-files', '--', ":(exclude)build-aux/m4/", ":(exclude)contrib/seeds/*.txt", ":(exclude)depends/", ":(exclude)doc/release-notes/", ":(exclude)src/leveldb/", ":(exclude)src/crc32c/", ":(exclude)src/qt/locale/", ":(exclude)src/qt/*.qrc", ":(exclude)src/secp256k1/", ":(exclude)src/minisketch/", ":(exclude)src/univalue/", ":(exclude)contrib/builder-keys/keys.txt", ":(exclude)contrib/guix/patches"] +FILES_ARGS = ['git', 'ls-files', '--', ":(exclude)build-aux/m4/", ":(exclude)contrib/seeds/*.txt", ":(exclude)depends/", ":(exclude)doc/release-notes/", ":(exclude)src/leveldb/", ":(exclude)src/crc32c/", ":(exclude)src/qt/locale/", ":(exclude)src/qt/*.qrc", ":(exclude)src/secp256k1/", ":(exclude)src/minisketch/", ":(exclude)contrib/builder-keys/keys.txt", ":(exclude)contrib/guix/patches"] def check_codespell_install(): diff --git a/test/lint/lint-whitespace.py b/test/lint/lint-whitespace.py index d98fc8d9a2..3fb5b80013 100755 --- a/test/lint/lint-whitespace.py +++ b/test/lint/lint-whitespace.py @@ -22,7 +22,6 @@ EXCLUDED_DIRS = ["depends/patches/", "src/crc32c/", "src/secp256k1/", "src/minisketch/", - "src/univalue/", "doc/release-notes/", "src/qt/locale"] diff --git a/test/lint/spelling.ignore-words.txt b/test/lint/spelling.ignore-words.txt index c931a0aae1..0efd298408 100644 --- a/test/lint/spelling.ignore-words.txt +++ b/test/lint/spelling.ignore-words.txt @@ -3,6 +3,7 @@ ba blockin cachable creat +desig fo fpr hights |