aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--Makefile.am1
-rw-r--r--build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj1
-rwxr-xr-xci/lint/06_script.sh1
-rw-r--r--configure.ac3
-rwxr-xr-xcontrib/devtools/copyright_header.py1
-rwxr-xr-xcontrib/devtools/symbol-check.py5
-rwxr-xr-xcontrib/message-capture/message-capture-parser.py3
-rw-r--r--contrib/seeds/.gitignore1
-rw-r--r--contrib/seeds/README.md18
-rw-r--r--contrib/seeds/asmap.py815
-rwxr-xr-xcontrib/seeds/makeseeds.py69
-rw-r--r--depends/Makefile9
-rw-r--r--depends/README.md3
-rw-r--r--depends/config.site.in4
-rwxr-xr-xdepends/gen_id6
-rw-r--r--depends/hosts/android.mk9
-rw-r--r--depends/hosts/darwin.mk10
-rw-r--r--depends/hosts/freebsd.mk10
-rw-r--r--depends/hosts/linux.mk10
-rw-r--r--depends/hosts/mingw32.mk10
-rw-r--r--depends/hosts/netbsd.mk10
-rw-r--r--depends/hosts/openbsd.mk10
-rw-r--r--depends/packages/bdb.mk1
-rw-r--r--depends/packages/zeromq.mk1
-rw-r--r--depends/patches/qt/mac-qmake.conf2
-rw-r--r--doc/developer-notes.md4
-rw-r--r--doc/policy/mempool-replacements.md7
-rw-r--r--doc/productivity.md5
-rw-r--r--src/Makefile.am7
-rw-r--r--src/Makefile.qt.include1
-rw-r--r--src/banman.cpp2
-rw-r--r--src/bench/mempool_eviction.cpp2
-rw-r--r--src/bench/rpc_mempool.cpp5
-rw-r--r--src/bitcoin-cli.cpp13
-rw-r--r--src/bitcoin-tx.cpp3
-rw-r--r--src/bitcoin-util.cpp12
-rw-r--r--src/bitcoin-wallet.cpp3
-rw-r--r--src/bitcoind.cpp4
-rw-r--r--src/compat.h11
-rw-r--r--src/crypto/chacha_poly_aead.cpp4
-rw-r--r--src/httpserver.cpp6
-rw-r--r--src/i2p.cpp8
-rw-r--r--src/index/base.cpp2
-rw-r--r--src/init.cpp6
-rw-r--r--src/init/common.cpp2
-rw-r--r--src/logging.h8
-rw-r--r--src/net.cpp180
-rw-r--r--src/net.h38
-rw-r--r--src/net_processing.cpp31
-rw-r--r--src/node/blockstorage.h2
-rw-r--r--src/node/interface_ui.cpp (renamed from src/node/ui_interface.cpp)2
-rw-r--r--src/node/interface_ui.h (renamed from src/node/ui_interface.h)6
-rw-r--r--src/node/interfaces.cpp2
-rw-r--r--src/node/miner.cpp79
-rw-r--r--src/node/miner.h15
-rw-r--r--src/noui.cpp2
-rw-r--r--src/psbt.h2
-rw-r--r--src/qt/bitcoin.cpp2
-rw-r--r--src/qt/bitcoingui.cpp2
-rw-r--r--src/qt/main.cpp6
-rw-r--r--src/qt/paymentserver.cpp2
-rw-r--r--src/qt/sendcoinsdialog.cpp2
-rw-r--r--src/qt/transactionoverviewwidget.cpp27
-rw-r--r--src/qt/transactionoverviewwidget.h19
-rw-r--r--src/qt/transactionview.cpp2
-rw-r--r--src/qt/walletframe.cpp2
-rw-r--r--src/qt/walletmodel.cpp2
-rw-r--r--src/qt/walletview.cpp2
-rw-r--r--src/rest.cpp10
-rw-r--r--src/rpc/blockchain.cpp14
-rw-r--r--src/rpc/mining.cpp7
-rw-r--r--src/secp256k1/build-aux/m4/bitcoin_secp.m42
-rw-r--r--src/secp256k1/include/secp256k1.h6
-rw-r--r--src/secp256k1/sage/prove_group_implementations.sage77
-rw-r--r--src/secp256k1/src/bench_internal.c10
-rw-r--r--src/secp256k1/src/group_impl.h99
-rw-r--r--src/shutdown.cpp2
-rw-r--r--src/support/lockedpool.cpp5
-rw-r--r--src/test/blockencodings_tests.cpp8
-rw-r--r--src/test/blockfilter_index_tests.cpp2
-rw-r--r--src/test/fuzz/tx_pool.cpp2
-rw-r--r--src/test/fuzz/util.cpp9
-rw-r--r--src/test/fuzz/util.h2
-rw-r--r--src/test/logging_tests.cpp5
-rw-r--r--src/test/mempool_tests.cpp18
-rw-r--r--src/test/miner_tests.cpp2
-rw-r--r--src/test/policyestimator_tests.cpp6
-rw-r--r--src/test/util/mining.cpp2
-rw-r--r--src/test/util/net.h9
-rw-r--r--src/test/util/setup_common.cpp3
-rw-r--r--src/test/validation_block_tests.cpp4
-rw-r--r--src/test/validation_chainstate_tests.cpp2
-rw-r--r--src/test/validation_flush_tests.cpp2
-rw-r--r--src/timedata.cpp2
-rw-r--r--src/torcontrol.cpp2
-rw-r--r--src/univalue/.cirrus.yml44
-rw-r--r--src/univalue/COPYING19
-rw-r--r--src/univalue/Makefile.am58
-rw-r--r--src/univalue/README.md21
-rwxr-xr-xsrc/univalue/autogen.sh9
-rw-r--r--src/univalue/build-aux/m4/.gitignore1
-rw-r--r--src/univalue/build-aux/m4/ax_cxx_compile_stdcxx.m4962
-rw-r--r--src/univalue/configure.ac72
-rw-r--r--src/univalue/gen/gen.cpp84
-rw-r--r--src/univalue/include/univalue.h6
-rw-r--r--src/univalue/include/univalue_escapes.h (renamed from src/univalue/lib/univalue_escapes.h)7
-rw-r--r--src/univalue/include/univalue_utffilter.h (renamed from src/univalue/lib/univalue_utffilter.h)6
-rw-r--r--src/univalue/lib/univalue_read.cpp2
-rw-r--r--src/univalue/lib/univalue_write.cpp2
-rw-r--r--src/univalue/pc/libunivalue-uninstalled.pc.in9
-rw-r--r--src/univalue/pc/libunivalue.pc.in10
-rw-r--r--src/univalue/sources.mk6
-rw-r--r--src/univalue/test/unitester.cpp34
-rw-r--r--src/util/sock.cpp112
-rw-r--r--src/util/sock.h71
-rw-r--r--src/validation.cpp2
-rw-r--r--src/wallet/init.cpp2
-rw-r--r--src/wallet/rpc/backup.cpp5
-rw-r--r--src/wallet/rpc/spend.cpp13
-rw-r--r--src/wallet/rpc/util.cpp4
-rw-r--r--src/wallet/spend.cpp22
-rwxr-xr-xtest/functional/feature_rbf.py1
-rwxr-xr-xtest/functional/mempool_limit.py20
-rwxr-xr-xtest/functional/mempool_reorg.py3
-rwxr-xr-xtest/functional/mempool_spend_coinbase.py2
-rwxr-xr-xtest/functional/mining_prioritisetransaction.py46
-rwxr-xr-xtest/functional/p2p_blocksonly.py16
-rwxr-xr-xtest/functional/p2p_compactblocks.py9
-rwxr-xr-xtest/functional/rpc_fundrawtransaction.py25
-rwxr-xr-xtest/functional/rpc_rawtransaction.py2
-rw-r--r--test/functional/test_framework/util.py74
-rw-r--r--test/functional/test_framework/wallet.py15
-rwxr-xr-xtest/functional/wallet_descriptor.py18
-rwxr-xr-xtest/get_previous_releases.py4
-rw-r--r--test/lint/README.md1
-rwxr-xr-xtest/lint/lint-files.py2
-rwxr-xr-xtest/lint/lint-format-strings.py3
-rwxr-xr-xtest/lint/lint-include-guards.py1
-rwxr-xr-xtest/lint/lint-includes.py2
-rwxr-xr-xtest/lint/lint-locale-dependence.py1
-rwxr-xr-xtest/lint/lint-logs.py4
-rwxr-xr-xtest/lint/lint-shell-locale.py2
-rwxr-xr-xtest/lint/lint-shell.py2
-rwxr-xr-xtest/lint/lint-spelling.py2
-rwxr-xr-xtest/lint/lint-whitespace.py1
146 files changed, 1651 insertions, 2057 deletions
diff --git a/.gitignore b/.gitignore
index 6c888bfdc4..6ca9d39a16 100644
--- a/.gitignore
+++ b/.gitignore
@@ -44,8 +44,6 @@ src/obj
share/setup.nsi
share/qt/Info.plist
-src/univalue/gen
-
src/qt/*.moc
src/qt/moc_*.cpp
src/qt/forms/ui_*.h
diff --git a/Makefile.am b/Makefile.am
index 05e89f12b7..011faa62c7 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -174,7 +174,6 @@ LCOV_FILTER_PATTERN = \
-p "src/leveldb/" \
-p "src/crc32c/" \
-p "src/bench/" \
- -p "src/univalue" \
-p "src/crypto/ctaes" \
-p "src/minisketch" \
-p "src/secp256k1" \
diff --git a/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj b/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj
index a64ae881f2..9f9dc9d5fa 100644
--- a/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj
+++ b/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj
@@ -53,6 +53,7 @@
<ClCompile Include="..\..\src\qt\transactiondesc.cpp" />
<ClCompile Include="..\..\src\qt\transactiondescdialog.cpp" />
<ClCompile Include="..\..\src\qt\transactionfilterproxy.cpp" />
+ <ClCompile Include="..\..\src\qt\transactionoverviewwidget.cpp" />
<ClCompile Include="..\..\src\qt\transactionrecord.cpp" />
<ClCompile Include="..\..\src\qt\transactiontablemodel.cpp" />
<ClCompile Include="..\..\src\qt\transactionview.cpp" />
diff --git a/ci/lint/06_script.sh b/ci/lint/06_script.sh
index 84b3404e78..1f14dd079f 100755
--- a/ci/lint/06_script.sh
+++ b/ci/lint/06_script.sh
@@ -18,7 +18,6 @@ export COMMIT_RANGE
test/lint/git-subtree-check.sh src/crypto/ctaes
test/lint/git-subtree-check.sh src/secp256k1
test/lint/git-subtree-check.sh src/minisketch
-test/lint/git-subtree-check.sh src/univalue
test/lint/git-subtree-check.sh src/leveldb
test/lint/git-subtree-check.sh src/crc32c
test/lint/check-doc.py
diff --git a/configure.ac b/configure.ac
index f6d00d0283..a4a23474c8 100644
--- a/configure.ac
+++ b/configure.ac
@@ -998,6 +998,8 @@ AC_CHECK_DECLS([setsid])
AC_CHECK_DECLS([pipe2])
+AC_CHECK_FUNCS([timingsafe_bcmp])
+
AC_CHECK_DECLS([le16toh, le32toh, le64toh, htole16, htole32, htole64, be16toh, be32toh, be64toh, htobe16, htobe32, htobe64],,,
[#if HAVE_ENDIAN_H
#include <endian.h>
@@ -1552,6 +1554,7 @@ fi
dnl libevent check
+use_libevent=no
if test "$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench" != "nonononono"; then
PKG_CHECK_MODULES([EVENT], [libevent >= 2.1.8], [use_libevent=yes], [AC_MSG_ERROR([libevent version 2.1.8 or greater not found.])])
if test "$TARGET_OS" != "windows"; then
diff --git a/contrib/devtools/copyright_header.py b/contrib/devtools/copyright_header.py
index e20eb4b0d2..680de1f1b3 100755
--- a/contrib/devtools/copyright_header.py
+++ b/contrib/devtools/copyright_header.py
@@ -35,7 +35,6 @@ EXCLUDE_DIRS = [
"src/leveldb/",
"src/minisketch",
"src/secp256k1/",
- "src/univalue/",
"src/crc32c/",
]
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index a419e392ee..23d29af3f1 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -50,9 +50,8 @@ MAX_VERSIONS = {
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
-'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__',
-'__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr',
-'environ', '_environ', '__environ',
+'environ', '_environ', '__environ', '_fini', '_init', 'stdin',
+'stdout', 'stderr',
}
# Expected linker-loader names can be found here:
diff --git a/contrib/message-capture/message-capture-parser.py b/contrib/message-capture/message-capture-parser.py
index 9988478f1b..eefd22a60e 100755
--- a/contrib/message-capture/message-capture-parser.py
+++ b/contrib/message-capture/message-capture-parser.py
@@ -79,7 +79,8 @@ def to_jsonable(obj: Any) -> Any:
val = getattr(obj, slot, None)
if slot in HASH_INTS and isinstance(val, int):
ret[slot] = ser_uint256(val).hex()
- elif slot in HASH_INT_VECTORS and isinstance(val[0], int):
+ elif slot in HASH_INT_VECTORS:
+ assert all(isinstance(a, int) for a in val)
ret[slot] = [ser_uint256(a).hex() for a in val]
else:
ret[slot] = to_jsonable(val)
diff --git a/contrib/seeds/.gitignore b/contrib/seeds/.gitignore
index e4a39d6093..d9a2451f70 100644
--- a/contrib/seeds/.gitignore
+++ b/contrib/seeds/.gitignore
@@ -1 +1,2 @@
seeds_main.txt
+asmap-filled.dat
diff --git a/contrib/seeds/README.md b/contrib/seeds/README.md
index c53446bfb0..b2ea7522ac 100644
--- a/contrib/seeds/README.md
+++ b/contrib/seeds/README.md
@@ -8,21 +8,11 @@ and remove old versions as necessary (at a minimum when GetDesirableServiceFlags
changes its default return value, as those are the services which seeds are added
to addrman with).
-The seeds compiled into the release are created from sipa's DNS seed data, like this:
+The seeds compiled into the release are created from sipa's DNS seed and AS map
+data. Run the following commands from the `/contrib/seeds` directory:
curl https://bitcoin.sipa.be/seeds.txt.gz | gzip -dc > seeds_main.txt
- python3 makeseeds.py < seeds_main.txt > nodes_main.txt
+ curl https://bitcoin.sipa.be/asmap-filled.dat > asmap-filled.dat
+ python3 makeseeds.py -a asmap-filled.dat < seeds_main.txt > nodes_main.txt
cat nodes_main_manual.txt >> nodes_main.txt
python3 generate-seeds.py . > ../../src/chainparamsseeds.h
-
-## Dependencies
-
-Ubuntu, Debian:
-
- sudo apt-get install python3-dnspython
-
-and/or for other operating systems:
-
- pip install dnspython
-
-See https://dnspython.readthedocs.io/en/latest/installation.html for more information.
diff --git a/contrib/seeds/asmap.py b/contrib/seeds/asmap.py
new file mode 100644
index 0000000000..7a605d0b9e
--- /dev/null
+++ b/contrib/seeds/asmap.py
@@ -0,0 +1,815 @@
+# Copyright (c) 2022 Pieter Wuille
+# Distributed under the MIT software license, see the accompanying
+# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
+
+"""
+This module provides the ASNEntry and ASMap classes.
+"""
+
+import copy
+import ipaddress
+import random
+import unittest
+from enum import Enum
+from functools import total_ordering
+from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union, overload
+
+def net_to_prefix(net: Union[ipaddress.IPv4Network,ipaddress.IPv6Network]) -> List[bool]:
+ """
+ Convert an IPv4 or IPv6 network to a prefix represented as a list of bits.
+
+ IPv4 ranges are remapped to their IPv4-mapped IPv6 range (::ffff:0:0/96).
+ """
+ num_bits = net.prefixlen
+ netrange = int.from_bytes(net.network_address.packed, 'big')
+
+ # Map an IPv4 prefix into IPv6 space.
+ if isinstance(net, ipaddress.IPv4Network):
+ num_bits += 96
+ netrange += 0xffff00000000
+
+ # Strip unused bottom bits.
+ assert (netrange & ((1 << (128 - num_bits)) - 1)) == 0
+ return [((netrange >> (127 - i)) & 1) != 0 for i in range(num_bits)]
+
+def prefix_to_net(prefix: List[bool]) -> Union[ipaddress.IPv4Network,ipaddress.IPv6Network]:
+ """The reverse operation of net_to_prefix."""
+ # Convert to number
+ netrange = sum(b << (127 - i) for i, b in enumerate(prefix))
+ num_bits = len(prefix)
+ assert num_bits <= 128
+
+ # Return IPv4 range if in ::ffff:0:0/96
+ if num_bits >= 96 and (netrange >> 32) == 0xffff:
+ return ipaddress.IPv4Network((netrange & 0xffffffff, num_bits - 96), True)
+
+ # Return IPv6 range otherwise.
+ return ipaddress.IPv6Network((netrange, num_bits), True)
+
+# Shortcut for (prefix, ASN) entries.
+ASNEntry = Tuple[List[bool], int]
+
+# Shortcut for (prefix, old ASN, new ASN) entries.
+ASNDiff = Tuple[List[bool], int, int]
+
+class _VarLenCoder:
+ """
+ A class representing a custom variable-length binary encoder/decoder for
+ integers. Each object represents a different coder, with different parameters
+ minval and clsbits.
+
+ The encoding is easiest to describe using an example. Let's say minval=100 and
+ clsbits=[4,2,2,3]. In that case:
+ - x in [100..115]: encoded as [0] + [4-bit BE encoding of (x-100)].
+ - x in [116..119]: encoded as [1,0] + [2-bit BE encoding of (x-116)].
+ - x in [120..123]: encoded as [1,1,0] + [2-bit BE encoding of (x-120)].
+ - x in [124..131]: encoded as [1,1,1] + [3-bit BE encoding of (x-124)].
+
+ In general, every number is encoded as:
+ - First, k "1"-bits, where k is the class the number falls in (there is one class
+ per element of clsbits).
+ - Then, a "0"-bit, unless k is the highest class, in which case there is nothing.
+ - Lastly, clsbits[k] bits encoding in big endian the position in its class that
+ number falls into.
+ - Every class k consists of 2^clsbits[k] consecutive integers. k=0 starts at minval,
+ other classes start one past the last element of the class before it.
+ """
+
+ def __init__(self, minval: int, clsbits: List[int]):
+ """Construct a new _VarLenCoder."""
+ self._minval = minval
+ self._clsbits = clsbits
+ self._maxval = minval + sum(1 << b for b in clsbits) - 1
+
+ def can_encode(self, val: int) -> bool:
+ """Check whether value val is in the range this coder supports."""
+ return self._minval <= val <= self._maxval
+
+ def encode(self, val: int, ret: List[int]) -> None:
+ """Append encoding of val onto integer list ret."""
+
+ assert self._minval <= val <= self._maxval
+ val -= self._minval
+ bits = 0
+ for k, bits in enumerate(self._clsbits):
+ if val >> bits:
+ # If the value will not fit in class k, subtract its range from v,
+ # emit a "1" bit and continue with the next class.
+ val -= 1 << bits
+ ret.append(1)
+ else:
+ if k + 1 < len(self._clsbits):
+ # Unless we're in the last class, emit a "0" bit.
+ ret.append(0)
+ break
+ # And then encode v (now the position within the class) in big endian.
+ ret.extend((val >> (bits - 1 - b)) & 1 for b in range(bits))
+
+ def encode_size(self, val: int) -> int:
+ """Compute how many bits are needed to encode val."""
+ assert self._minval <= val <= self._maxval
+ val -= self._minval
+ ret = 0
+ bits = 0
+ for k, bits in enumerate(self._clsbits):
+ if val >> bits:
+ val -= 1 << bits
+ ret += 1
+ else:
+ ret += k + 1 < len(self._clsbits)
+ break
+ return ret + bits
+
+ def decode(self, stream, bitpos) -> Tuple[int,int]:
+ """Decode a number starting at bitpos in stream, returning value and new bitpos."""
+ val = self._minval
+ bits = 0
+ for k, bits in enumerate(self._clsbits):
+ bit = 0
+ if k + 1 < len(self._clsbits):
+ bit = stream[bitpos]
+ bitpos += 1
+ if not bit:
+ break
+ val += 1 << bits
+ for i in range(bits):
+ bit = stream[bitpos]
+ bitpos += 1
+ val += bit << (bits - 1 - i)
+ return val, bitpos
+
+# Variable-length encoders used in the binary asmap format.
+_CODER_INS = _VarLenCoder(0, [0, 0, 1])
+_CODER_ASN = _VarLenCoder(1, list(range(15, 25)))
+_CODER_MATCH = _VarLenCoder(2, list(range(1, 9)))
+_CODER_JUMP = _VarLenCoder(17, list(range(5, 31)))
+
+class _Instruction(Enum):
+ """One instruction in the binary asmap format."""
+ # A return instruction, encoded as [0], returns a constant ASN. It is followed by
+ # an integer using the ASN encoding.
+ RETURN = 0
+ # A jump instruction, encoded as [1,0] inspects the next unused bit in the input
+ # and either continues execution (if 0), or skips a specified number of bits (if 1).
+ # It is followed by an integer, and then two subprograms. The integer uses jump encoding
+ # and corresponds to the length of the first subprogram (so it can be skipped).
+ JUMP = 1
+ # A match instruction, encoded as [1,1,0] inspects 1 or more of the next unused bits
+ # in the input with its argument. If they all match, execution continues. If they do
+ # not, failure is returned. If a default instruction has been executed before, instead
+ # of failure the default instruction's argument is returned. It is followed by an
+ # integer in match encoding, and a subprogram. That value is at least 2 bits and at
+ # most 9 bits. An n-bit value signifies matching (n-1) bits in the input with the lower
+ # (n-1) bits in the match value.
+ MATCH = 2
+ # A default instruction, encoded as [1,1,1] sets the default variable to its argument,
+ # and continues execution. It is followed by an integer in ASN encoding, and a subprogram.
+ DEFAULT = 3
+ # Not an actual instruction, but a way to encode the empty program that fails. In the
+ # encoder, it is used more generally to represent the failure case inside MATCH instructions,
+ # which may (if used inside the context of a DEFAULT instruction) actually correspond to
+ # a succesful return. In this usage, they're always converted to an actual MATCH or RETURN
+ # before the top level is reached (see make_default below).
+ END = 4
+
+class _BinNode:
+ """A class representing a (node of) the parsed binary asmap format."""
+
+ @overload
+ def __init__(self, ins: _Instruction): ...
+ @overload
+ def __init__(self, ins: _Instruction, arg1: int): ...
+ @overload
+ def __init__(self, ins: _Instruction, arg1: "_BinNode", arg2: "_BinNode"): ...
+ @overload
+ def __init__(self, ins: _Instruction, arg1: int, arg2: "_BinNode"): ...
+
+ def __init__(self, ins: _Instruction, arg1=None, arg2=None):
+ """
+ Construct a new asmap node. Possibilities are:
+ - _BinNode(_Instruction.RETURN, asn)
+ - _BinNode(_Instruction.JUMP, node_0, node_1)
+ - _BinNode(_Instruction.MATCH, val, node)
+ - _BinNode(_Instruction.DEFAULT, asn, node)
+ - _BinNode(_Instruction.END)
+ """
+ self.ins = ins
+ self.arg1 = arg1
+ self.arg2 = arg2
+ if ins == _Instruction.RETURN:
+ assert isinstance(arg1, int)
+ assert arg2 is None
+ self.size = _CODER_INS.encode_size(ins.value) + _CODER_ASN.encode_size(arg1)
+ elif ins == _Instruction.JUMP:
+ assert isinstance(arg1, _BinNode)
+ assert isinstance(arg2, _BinNode)
+ self.size = (_CODER_INS.encode_size(ins.value) + _CODER_JUMP.encode_size(arg1.size) +
+ arg1.size + arg2.size)
+ elif ins == _Instruction.DEFAULT:
+ assert isinstance(arg1, int)
+ assert isinstance(arg2, _BinNode)
+ self.size = _CODER_INS.encode_size(ins.value) + _CODER_ASN.encode_size(arg1) + arg2.size
+ elif ins == _Instruction.MATCH:
+ assert isinstance(arg1, int)
+ assert isinstance(arg2, _BinNode)
+ self.size = (_CODER_INS.encode_size(ins.value) + _CODER_MATCH.encode_size(arg1)
+ + arg2.size)
+ elif ins == _Instruction.END:
+ assert arg1 is None
+ assert arg2 is None
+ self.size = 0
+ else:
+ assert False
+
+ @staticmethod
+ def make_end() -> "_BinNode":
+ """Constructor for a _BinNode with just an END instruction."""
+ return _BinNode(_Instruction.END)
+
+ @staticmethod
+ def make_leaf(val: int) -> "_BinNode":
+ """Constructor for a _BinNode of just a RETURN instruction."""
+ assert val is not None and val > 0
+ return _BinNode(_Instruction.RETURN, val)
+
+ @staticmethod
+ def make_branch(node0: "_BinNode", node1: "_BinNode") -> "_BinNode":
+ """
+ Construct a _BinNode corresponding to running either the node0 or node1 subprogram,
+ based on the next input bit. It exploits shortcuts that are possible in the encoding,
+ and uses either a JUMP, MATCH, or END instruction.
+ """
+ if node0.ins == _Instruction.END and node1.ins == _Instruction.END:
+ return node0
+ if node0.ins == _Instruction.END:
+ if node1.ins == _Instruction.MATCH and node1.arg1 <= 0xFF:
+ return _BinNode(node1.ins, node1.arg1 + (1 << node1.arg1.bit_length()), node1.arg2)
+ return _BinNode(_Instruction.MATCH, 3, node1)
+ if node1.ins == _Instruction.END:
+ if node0.ins == _Instruction.MATCH and node0.arg1 <= 0xFF:
+ return _BinNode(node0.ins, node0.arg1 + (1 << (node0.arg1.bit_length() - 1)),
+ node0.arg2)
+ return _BinNode(_Instruction.MATCH, 2, node0)
+ return _BinNode(_Instruction.JUMP, node0, node1)
+
+ @staticmethod
+ def make_default(val: int, sub: "_BinNode") -> "_BinNode":
+ """
+ Construct a _BinNode that corresponds to the specified subprogram, with the specified
+ default value. It exploits shortcuts that are possible in the encoding, and will use
+ either a DEFAULT or a RETURN instruction."""
+ assert val is not None and val > 0
+ if sub.ins == _Instruction.END:
+ return _BinNode(_Instruction.RETURN, val)
+ if sub.ins in (_Instruction.RETURN, _Instruction.DEFAULT):
+ return sub
+ return _BinNode(_Instruction.DEFAULT, val, sub)
+
+@total_ordering
+class ASMap:
+ """
+ A class whose objects represent a mapping from subnets to ASNs.
+
+ Internally the mapping is stored as a binary trie, but can be converted
+ from/to a list of ASNEntry objects, and from/to the binary asmap file format.
+
+ In the trie representation, nodes are represented as bare lists for efficiency
+ and ease of manipulation:
+ - [0] means an unassigned subnet (no ASN mapping for it is present)
+ - [int] means a subnet mapped entirely to the specified ASN.
+ - [node,node] means a subnet whose lower half and upper half have different
+ - mappings, represented by new trie nodes.
+ """
+
+ def update(self, prefix: List[bool], asn: int) -> None:
+ """Update this ASMap object to map prefix to the specified asn."""
+ assert asn == 0 or _CODER_ASN.can_encode(asn)
+
+ def recurse(node: List, offset: int) -> None:
+ if offset == len(prefix):
+ # Reached the end of prefix; overwrite this node.
+ node.clear()
+ node.append(asn)
+ return
+ if len(node) == 1:
+ # Need to descend into a leaf node; split it up.
+ oldasn = node[0]
+ node.clear()
+ node.append([oldasn])
+ node.append([oldasn])
+ # Descend into the node.
+ recurse(node[prefix[offset]], offset + 1)
+ # If the result is two identical leaf children, merge them.
+ if len(node[0]) == 1 and len(node[1]) == 1 and node[0] == node[1]:
+ oldasn = node[0][0]
+ node.clear()
+ node.append(oldasn)
+ recurse(self._trie, 0)
+
+ def update_multi(self, entries: List[Tuple[List[bool], int]]) -> None:
+ """Apply multiple update operations, where longer prefixes take precedence."""
+ entries.sort(key=lambda entry: len(entry[0]))
+ for prefix, asn in entries:
+ self.update(prefix, asn)
+
+ def _set_trie(self, trie) -> None:
+ """Set trie directly. Internal use only."""
+ def recurse(node: List) -> None:
+ if len(node) < 2:
+ return
+ recurse(node[0])
+ recurse(node[1])
+ if len(node[0]) == 2:
+ return
+ if node[0] == node[1]:
+ if len(node[0]) == 0:
+ node.clear()
+ else:
+ asn = node[0][0]
+ node.clear()
+ node.append(asn)
+ recurse(trie)
+ self._trie = trie
+
+ def __init__(self, entries: Optional[Iterable[ASNEntry]] = None) -> None:
+ """Construct an ASMap object from an optional list of entries."""
+ self._trie = [0]
+ if entries is not None:
+ def entry_key(entry):
+ """Sort function that places shorter prefixes first."""
+ prefix, asn = entry
+ return len(prefix), prefix, asn
+ for prefix, asn in sorted(entries, key=entry_key):
+ self.update(prefix, asn)
+
+ def lookup(self, prefix: List[bool]) -> Optional[int]:
+ """Look up a prefix. Returns ASN, or 0 if unassigned, or None if indeterminate."""
+ node = self._trie
+ for bit in prefix:
+ if len(node) == 1:
+ break
+ node = node[bit]
+ if len(node) == 1:
+ return node[0]
+ return None
+
+ def _to_entries_flat(self, fill: bool = False) -> List[ASNEntry]:
+ """Convert an ASMap object to a list of non-overlapping (prefix, asn) objects."""
+ prefix : List[bool] = []
+
+ def recurse(node: List) -> List[ASNEntry]:
+ ret = []
+ if len(node) == 1:
+ if node[0] > 0:
+ ret = [(list(prefix), node[0])]
+ elif len(node) == 2:
+ prefix.append(False)
+ ret = recurse(node[0])
+ prefix[-1] = True
+ ret += recurse(node[1])
+ prefix.pop()
+ if fill and len(ret) > 1:
+ asns = set(x[1] for x in ret)
+ if len(asns) == 1:
+ ret = [(list(prefix), list(asns)[0])]
+ return ret
+ return recurse(self._trie)
+
+ def _to_entries_minimal(self, fill: bool = False) -> List[ASNEntry]:
+ """Convert a trie to a minimal list of ASNEntry objects, exploiting overlap."""
+ prefix : List[bool] = []
+
+ def recurse(node: List) -> (Tuple[Dict[Optional[int], List[ASNEntry]], bool]):
+ if len(node) == 1 and node[0] == 0:
+ return {None if fill else 0: []}, True
+ if len(node) == 1:
+ return {node[0]: [], None: [(list(prefix), node[0])]}, False
+ ret: Dict[Optional[int], List[ASNEntry]] = {}
+ prefix.append(False)
+ left, lhole = recurse(node[0])
+ prefix[-1] = True
+ right, rhole = recurse(node[1])
+ prefix.pop()
+ hole = not fill and (lhole or rhole)
+ def candidate(ctx: Optional[int], res0: Optional[List[ASNEntry]],
+ res1: Optional[List[ASNEntry]]):
+ if res0 is not None and res1 is not None:
+ if ctx not in ret or len(res0) + len(res1) < len(ret[ctx]):
+ ret[ctx] = res0 + res1
+ for ctx in set(left) | set(right):
+ candidate(ctx, left.get(ctx), right.get(ctx))
+ candidate(ctx, left.get(None), right.get(ctx))
+ candidate(ctx, left.get(ctx), right.get(None))
+ if not hole:
+ for ctx in list(ret):
+ if ctx is not None:
+ candidate(None, [(list(prefix), ctx)], ret[ctx])
+ if None in ret:
+ ret = {ctx:entries for ctx, entries in ret.items()
+ if ctx is None or len(entries) < len(ret[None])}
+ if hole:
+ ret = {ctx:entries for ctx, entries in ret.items() if ctx is None or ctx == 0}
+ return ret, hole
+ res, _ = recurse(self._trie)
+ return res[0] if 0 in res else res[None]
+
+ def __str__(self) -> str:
+ """Convert this ASMap object to a string containing Python code constructing it."""
+ return f"ASMap({self._trie})"
+
+ def to_entries(self, overlapping: bool = True, fill: bool = False) -> List[ASNEntry]:
+ """
+ Convert the mappings in this ASMap object to a list of ASNEntry objects.
+
+ Arguments:
+ overlapping: Permit the subnets in the resulting ASNEntry to overlap.
+ Setting this can result in a shorter list.
+ fill: Permit the resulting ASNEntry objects to cover subnets that
+ are unassigned in this ASMap object. Setting this can
+ result in a shorter list.
+ """
+ if overlapping:
+ return self._to_entries_minimal(fill)
+ return self._to_entries_flat(fill)
+
+ @staticmethod
+ def from_random(num_leaves: int = 10, max_asn: int = 6,
+ unassigned_prob: float = 0.5) -> "ASMap":
+ """
+ Construct a random ASMap object, with specified:
+ - Number of leaves in its trie (at least 1)
+ - Maximum ASN value (at least 1)
+ - Probability for leaf nodes to be unassigned
+
+ The number of leaves in the resulting object may be less than what is
+ requested. This method is mostly intended for testing.
+ """
+ assert num_leaves >= 1
+ assert max_asn >= 1 or unassigned_prob == 1
+ assert _CODER_ASN.can_encode(max_asn)
+ assert 0.0 <= unassigned_prob <= 1.0
+ trie: List = []
+ leaves = [trie]
+ ret = ASMap()
+ for i in range(1, num_leaves):
+ idx = random.randrange(i)
+ leaf = leaves[idx]
+ lastleaf = leaves.pop()
+ if idx + 1 < i:
+ leaves[idx] = lastleaf
+ leaf.append([])
+ leaf.append([])
+ leaves.append(leaf[0])
+ leaves.append(leaf[1])
+ for leaf in leaves:
+ if random.random() >= unassigned_prob:
+ leaf.append(random.randrange(1, max_asn + 1))
+ else:
+ leaf.append(0)
+ #pylint: disable=protected-access
+ ret._set_trie(trie)
+ return ret
+
+ def _to_binnode(self, fill: bool = False) -> _BinNode:
+ """Convert a trie to a _BinNode object."""
+ def recurse(node: List) -> Tuple[Dict[Optional[int], _BinNode], bool]:
+ if len(node) == 1 and node[0] == 0:
+ return {(None if fill else 0): _BinNode.make_end()}, True
+ if len(node) == 1:
+ return {None: _BinNode.make_leaf(node[0]), node[0]: _BinNode.make_end()}, False
+ ret: Dict[Optional[int], _BinNode] = {}
+ left, lhole = recurse(node[0])
+ right, rhole = recurse(node[1])
+ hole = (lhole or rhole) and not fill
+
+ def candidate(ctx: Optional[int], arg1, arg2, func: Callable):
+ if arg1 is not None and arg2 is not None:
+ cand = func(arg1, arg2)
+ if ctx not in ret or cand.size < ret[ctx].size:
+ ret[ctx] = cand
+
+ for ctx in set(left) | set(right):
+ candidate(ctx, left.get(ctx), right.get(ctx), _BinNode.make_branch)
+ candidate(ctx, left.get(None), right.get(ctx), _BinNode.make_branch)
+ candidate(ctx, left.get(ctx), right.get(None), _BinNode.make_branch)
+ if not hole:
+ for ctx in set(ret) - set([None]):
+ candidate(None, ctx, ret[ctx], _BinNode.make_default)
+ if None in ret:
+ ret = {ctx:enc for ctx, enc in ret.items()
+ if ctx is None or enc.size < ret[None].size}
+ if hole:
+ ret = {ctx:enc for ctx, enc in ret.items() if ctx is None or ctx == 0}
+ return ret, hole
+ res, _ = recurse(self._trie)
+ return res[0] if 0 in res else res[None]
+
+ @staticmethod
+ def _from_binnode(binnode: _BinNode) -> "ASMap":
+ """Construct an ASMap object from a _BinNode. Internal use only."""
+ def recurse(node: _BinNode, default: int) -> List:
+ if node.ins == _Instruction.RETURN:
+ return [node.arg1]
+ if node.ins == _Instruction.JUMP:
+ return [recurse(node.arg1, default), recurse(node.arg2, default)]
+ if node.ins == _Instruction.MATCH:
+ val = node.arg1
+ sub = recurse(node.arg2, default)
+ while val >= 2:
+ bit = val & 1
+ val >>= 1
+ if bit:
+ sub = [[default], sub]
+ else:
+ sub = [sub, [default]]
+ return sub
+ assert node.ins == _Instruction.DEFAULT
+ return recurse(node.arg2, node.arg1)
+ ret = ASMap()
+ if binnode.ins != _Instruction.END:
+ #pylint: disable=protected-access
+ ret._set_trie(recurse(binnode, 0))
+ return ret
+
+ def to_binary(self, fill: bool = False) -> bytes:
+ """
+ Convert this ASMap object to binary.
+
+ Argument:
+ fill: permit the resulting binary encoder to contain mappers for
+ unassigned subnets in this ASMap object. Doing so may
+ reduce the size of the encoding.
+ Returns:
+ A bytes object with the encoding of this ASMap object.
+ """
+ bits: List[int] = []
+
+ def recurse(node: _BinNode) -> None:
+ _CODER_INS.encode(node.ins.value, bits)
+ if node.ins == _Instruction.RETURN:
+ _CODER_ASN.encode(node.arg1, bits)
+ elif node.ins == _Instruction.JUMP:
+ _CODER_JUMP.encode(node.arg1.size, bits)
+ recurse(node.arg1)
+ recurse(node.arg2)
+ elif node.ins == _Instruction.DEFAULT:
+ _CODER_ASN.encode(node.arg1, bits)
+ recurse(node.arg2)
+ else:
+ assert node.ins == _Instruction.MATCH
+ _CODER_MATCH.encode(node.arg1, bits)
+ recurse(node.arg2)
+
+ binnode = self._to_binnode(fill)
+ if binnode.ins != _Instruction.END:
+ recurse(binnode)
+
+ val = 0
+ nbits = 0
+ ret = []
+ for bit in bits:
+ val += (bit << nbits)
+ nbits += 1
+ if nbits == 8:
+ ret.append(val)
+ val = 0
+ nbits = 0
+ if nbits:
+ ret.append(val)
+ return bytes(ret)
+
+ @staticmethod
+ def from_binary(bindata: bytes) -> Optional["ASMap"]:
+ """Decode an ASMap object from the provided binary encoding."""
+
+ bits: List[int] = []
+ for byte in bindata:
+ bits.extend((byte >> i) & 1 for i in range(8))
+
+ def recurse(bitpos: int) -> Tuple[_BinNode, int]:
+ insval, bitpos = _CODER_INS.decode(bits, bitpos)
+ ins = _Instruction(insval)
+ if ins == _Instruction.RETURN:
+ asn, bitpos = _CODER_ASN.decode(bits, bitpos)
+ return _BinNode(ins, asn), bitpos
+ if ins == _Instruction.JUMP:
+ jump, bitpos = _CODER_JUMP.decode(bits, bitpos)
+ left, bitpos1 = recurse(bitpos)
+ if bitpos1 != bitpos + jump:
+ raise ValueError("Inconsistent jump")
+ right, bitpos = recurse(bitpos1)
+ return _BinNode(ins, left, right), bitpos
+ if ins == _Instruction.MATCH:
+ match, bitpos = _CODER_MATCH.decode(bits, bitpos)
+ sub, bitpos = recurse(bitpos)
+ return _BinNode(ins, match, sub), bitpos
+ assert ins == _Instruction.DEFAULT
+ asn, bitpos = _CODER_ASN.decode(bits, bitpos)
+ sub, bitpos = recurse(bitpos)
+ return _BinNode(ins, asn, sub), bitpos
+
+ if len(bits) == 0:
+ binnode = _BinNode(_Instruction.END)
+ else:
+ try:
+ binnode, bitpos = recurse(0)
+ except (ValueError, IndexError):
+ return None
+ if bitpos < len(bits) - 7:
+ return None
+ if not all(bit == 0 for bit in bits[bitpos:]):
+ return None
+
+ return ASMap._from_binnode(binnode)
+
+ def __lt__(self, other: "ASMap") -> bool:
+ return self._trie < other._trie
+
+ def __eq__(self, other: object) -> bool:
+ if isinstance(other, ASMap):
+ return self._trie == other._trie
+ return False
+
+ def extends(self, req: "ASMap") -> bool:
+ """Determine whether this matches req for all subranges where req is assigned."""
+ def recurse(actual: List, require: List) -> bool:
+ if len(require) == 1 and require[0] == 0:
+ return True
+ if len(require) == 1:
+ if len(actual) == 1:
+ return bool(require[0] == actual[0])
+ return recurse(actual[0], require) and recurse(actual[1], require)
+ if len(actual) == 2:
+ return recurse(actual[0], require[0]) and recurse(actual[1], require[1])
+ return recurse(actual, require[0]) and recurse(actual, require[1])
+ assert isinstance(req, ASMap)
+ #pylint: disable=protected-access
+ return recurse(self._trie, req._trie)
+
+ def diff(self, other: "ASMap") -> List[ASNDiff]:
+ """Compute the diff from self to other."""
+ prefix: List[bool] = []
+ ret: List[ASNDiff] = []
+
+ def recurse(old_node: List, new_node: List):
+ if len(old_node) == 1 and len(new_node) == 1:
+ if old_node[0] != new_node[0]:
+ ret.append((list(prefix), old_node[0], new_node[0]))
+ else:
+ old_left: List = old_node if len(old_node) == 1 else old_node[0]
+ old_right: List = old_node if len(old_node) == 1 else old_node[1]
+ new_left: List = new_node if len(new_node) == 1 else new_node[0]
+ new_right: List = new_node if len(new_node) == 1 else new_node[1]
+ prefix.append(False)
+ recurse(old_left, new_left)
+ prefix[-1] = True
+ recurse(old_right, new_right)
+ prefix.pop()
+ assert isinstance(other, ASMap)
+ #pylint: disable=protected-access
+ recurse(self._trie, other._trie)
+ return ret
+
+ def __copy__(self) -> "ASMap":
+ """Construct a copy of this ASMap object. Its state will not be shared."""
+ ret = ASMap()
+ #pylint: disable=protected-access
+ ret._set_trie(copy.deepcopy(self._trie))
+ return ret
+
+ def __deepcopy__(self, _) -> "ASMap":
+ # ASMap objects do not allow sharing of the _trie member, so we don't need the memoization.
+ return self.__copy__()
+
+
+class TestASMap(unittest.TestCase):
+ """Unit tests for this module."""
+
+ def test_ipv6_prefix_roundtrips(self) -> None:
+ """Test that random IPv6 network ranges roundtrip through prefix encoding."""
+ for _ in range(20):
+ net_bits = random.getrandbits(128)
+ for prefix_len in range(0, 129):
+ masked_bits = (net_bits >> (128 - prefix_len)) << (128 - prefix_len)
+ net = ipaddress.IPv6Network((masked_bits.to_bytes(16, 'big'), prefix_len))
+ prefix = net_to_prefix(net)
+ self.assertTrue(len(prefix) <= 128)
+ net2 = prefix_to_net(prefix)
+ self.assertEqual(net, net2)
+
+ def test_ipv4_prefix_roundtrips(self) -> None:
+ """Test that random IPv4 network ranges roundtrip through prefix encoding."""
+ for _ in range(100):
+ net_bits = random.getrandbits(32)
+ for prefix_len in range(0, 33):
+ masked_bits = (net_bits >> (32 - prefix_len)) << (32 - prefix_len)
+ net = ipaddress.IPv4Network((masked_bits.to_bytes(4, 'big'), prefix_len))
+ prefix = net_to_prefix(net)
+ self.assertTrue(32 <= len(prefix) <= 128)
+ net2 = prefix_to_net(prefix)
+ self.assertEqual(net, net2)
+
+ def test_asmap_roundtrips(self) -> None:
+ """Test case that verifies random ASMap objects roundtrip to/from entries/binary."""
+ # Iterate over the number of leaves the random test ASMap objects have.
+ for leaves in range(1, 20):
+ # Iterate over the number of bits in the AS numbers used.
+ for asnbits in range(0, 24):
+ # Iterate over the probability that leaves are unassigned.
+ for pct in range(101):
+ # Construct a random ASMap object according to the above parameters.
+ asmap = ASMap.from_random(num_leaves=leaves, max_asn=1 + (1 << asnbits),
+ unassigned_prob=0.01 * pct)
+ # Run tests for to_entries and construction from those entries, both
+ # for overlapping and non-overlapping ones.
+ for overlapping in [False, True]:
+ entries = asmap.to_entries(overlapping=overlapping, fill=False)
+ random.shuffle(entries)
+ asmap2 = ASMap(entries)
+ assert asmap2 is not None
+ self.assertEqual(asmap2, asmap)
+ entries = asmap.to_entries(overlapping=overlapping, fill=True)
+ random.shuffle(entries)
+ asmap2 = ASMap(entries)
+ assert asmap2 is not None
+ self.assertTrue(asmap2.extends(asmap))
+
+ # Run tests for to_binary and construction from binary.
+ enc = asmap.to_binary(fill=False)
+ asmap3 = ASMap.from_binary(enc)
+ assert asmap3 is not None
+ self.assertEqual(asmap3, asmap)
+ enc = asmap.to_binary(fill=True)
+ asmap3 = ASMap.from_binary(enc)
+ assert asmap3 is not None
+ self.assertTrue(asmap3.extends(asmap))
+
+ def test_patching(self) -> None:
+ """Test behavior of update, lookup, extends, and diff."""
+ #pylint: disable=too-many-locals,too-many-nested-blocks
+ # Iterate over the number of leaves the random test ASMap objects have.
+ for leaves in range(1, 20):
+ # Iterate over the number of bits in the AS numbers used.
+ for asnbits in range(0, 10):
+ # Iterate over the probability that leaves are unassigned.
+ for pct in range(0, 101):
+ # Construct a random ASMap object according to the above parameters.
+ asmap = ASMap.from_random(num_leaves=leaves, max_asn=1 + (1 << asnbits),
+ unassigned_prob=0.01 * pct)
+ # Make a copy of that asmap object to which patches will be applied.
+ # It starts off being equal to asmap.
+ patched = copy.copy(asmap)
+ # Keep a list of patches performed.
+ patches: List[ASNEntry] = []
+ # Initially there cannot be any difference.
+ self.assertEqual(asmap.diff(patched), [])
+ # Make 5 patches, each building on top of the previous ones.
+ for _ in range(0, 5):
+ # Construct a random path and new ASN to assign it to, apply it to patched,
+ # and remember it in patches.
+ pathlen = random.randrange(5)
+ path = [random.getrandbits(1) != 0 for _ in range(pathlen)]
+ newasn = random.randrange(1 + (1 << asnbits))
+ patched.update(path, newasn)
+ patches = [(path, newasn)] + patches
+
+ # Compute the diff, and whether asmap extends patched, and the other way
+ # around.
+ diff = asmap.diff(patched)
+ self.assertEqual(asmap == patched, len(diff) == 0)
+ extends = asmap.extends(patched)
+ back_extends = patched.extends(asmap)
+ # Determine whether those extends results are consistent with the diff
+ # result.
+ self.assertEqual(extends, all(d[2] == 0 for d in diff))
+ self.assertEqual(back_extends, all(d[1] == 0 for d in diff))
+ # For every diff found:
+ for path, old_asn, new_asn in diff:
+ # Verify asmap and patched actually differ there.
+ self.assertTrue(old_asn != new_asn)
+ self.assertEqual(asmap.lookup(path), old_asn)
+ self.assertEqual(patched.lookup(path), new_asn)
+ for _ in range(2):
+ # Extend the path far enough that it's smaller than any mapped
+ # range, and check the lookup holds there too.
+ spec_path = list(path)
+ while len(spec_path) < 32:
+ spec_path.append(random.getrandbits(1) != 0)
+ self.assertEqual(asmap.lookup(spec_path), old_asn)
+ self.assertEqual(patched.lookup(spec_path), new_asn)
+ # Search through the list of performed patches to find the last one
+ # applying to the extended path (note that patches is in reverse
+ # order, so the first match should work).
+ found = False
+ for patch_path, patch_asn in patches:
+ if spec_path[:len(patch_path)] == patch_path:
+ # When found, it must match whatever the result was patched
+ # to.
+ self.assertEqual(new_asn, patch_asn)
+ found = True
+ break
+ # And such a patch must exist.
+ self.assertTrue(found)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/contrib/seeds/makeseeds.py b/contrib/seeds/makeseeds.py
index 78eb04a836..37c6f5fd7c 100755
--- a/contrib/seeds/makeseeds.py
+++ b/contrib/seeds/makeseeds.py
@@ -6,12 +6,15 @@
# Generate seeds.txt from Pieter's DNS seeder
#
+import argparse
+import ipaddress
import re
import sys
-import dns.resolver
import collections
from typing import List, Dict, Union
+from asmap import ASMap, net_to_prefix
+
NSEEDS=512
MAX_SEEDS_PER_ASN = {
@@ -45,7 +48,7 @@ def parseline(line: str) -> Union[dict, None]:
sline = line.split()
if len(sline) < 11:
# line too short to be valid, skip it.
- return None
+ return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
@@ -123,34 +126,8 @@ def filtermultiport(ips: List[Dict]) -> List[Dict]:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
-def lookup_asn(net: str, ip: str) -> Union[int, None]:
- """ Look up the asn for an `ip` address by querying cymru.com
- on network `net` (e.g. ipv4 or ipv6).
-
- Returns in integer ASN or None if it could not be found.
- """
- try:
- if net == 'ipv4':
- ipaddr = ip
- prefix = '.origin'
- else: # http://www.team-cymru.com/IP-ASN-mapping.html
- res = str() # 2001:4860:b002:23::68
- for nb in ip.split(':')[:4]: # pick the first 4 nibbles
- for c in nb.zfill(4): # right padded with '0'
- res += c + '.' # 2001 4860 b002 0023
- ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3
- prefix = '.origin6'
-
- asn = int([x.to_text() for x in dns.resolver.resolve('.'.join(
- reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com',
- 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
- return asn
- except Exception as e:
- sys.stderr.write(f'ERR: Could not resolve ASN for "{ip}": {e}\n')
- return None
-
# Based on Greg Maxwell's seed_filter.py
-def filterbyasn(ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Dict]:
+def filterbyasn(asmap: ASMap, ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Dict]:
""" Prunes `ips` by
(a) trimming ips to have at most `max_per_net` ips from each net (e.g. ipv4, ipv6); and
(b) trimming ips to have at most `max_per_asn` ips from each asn in each net.
@@ -165,21 +142,18 @@ def filterbyasn(ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Di
asn_count: Dict[int, int] = collections.defaultdict(int)
for i, ip in enumerate(ips_ipv46):
- if i % 10 == 0:
- # give progress update
- print(f"{i:6d}/{len(ips_ipv46)} [{100*i/len(ips_ipv46):04.1f}%]\r", file=sys.stderr, end='', flush=True)
-
if net_count[ip['net']] == max_per_net:
# do not add this ip as we already too many
# ips from this network
continue
- asn = lookup_asn(ip['net'], ip['ip'])
- if asn is None or asn_count[asn] == max_per_asn[ip['net']]:
+ asn = asmap.lookup(net_to_prefix(ipaddress.ip_network(ip['ip'])))
+ if not asn or asn_count[ip['net'], asn] == max_per_asn[ip['net']]:
# do not add this ip as we already have too many
# ips from this ASN on this network
continue
- asn_count[asn] += 1
+ asn_count[ip['net'], asn] += 1
net_count[ip['net']] += 1
+ ip['asn'] = asn
result.append(ip)
# Add back Onions (up to max_per_net)
@@ -195,9 +169,23 @@ def ip_stats(ips: List[Dict]) -> str:
return f"{hist['ipv4']:6d} {hist['ipv6']:6d} {hist['onion']:6d}"
+def parse_args():
+ argparser = argparse.ArgumentParser(description='Generate a list of bitcoin node seed ip addresses.')
+ argparser.add_argument("-a","--asmap", help='the location of the asmap asn database file (required)', required=True)
+ return argparser.parse_args()
+
def main():
+ args = parse_args()
+
+ print(f'Loading asmap database "{args.asmap}"…', end='', file=sys.stderr, flush=True)
+ with open(args.asmap, 'rb') as f:
+ asmap = ASMap.from_binary(f.read())
+ print('Done.', file=sys.stderr)
+
+ print('Loading and parsing DNS seeds…', end='', file=sys.stderr, flush=True)
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
+ print('Done.', file=sys.stderr)
print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr)
print(f'{ip_stats(ips):s} Initial', file=sys.stderr)
@@ -230,15 +218,18 @@ def main():
ips = filtermultiport(ips)
print(f'{ip_stats(ips):s} Filter out hosts with multiple bitcoin ports', file=sys.stderr)
# Look up ASNs and limit results, both per ASN and globally.
- ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
+ ips = filterbyasn(asmap, ips, MAX_SEEDS_PER_ASN, NSEEDS)
print(f'{ip_stats(ips):s} Look up ASNs and limit results per ASN and per net', file=sys.stderr)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
- print('[%s]:%i' % (ip['ip'], ip['port']))
+ print(f"[{ip['ip']}]:{ip['port']}", end="")
else:
- print('%s:%i' % (ip['ip'], ip['port']))
+ print(f"{ip['ip']}:{ip['port']}", end="")
+ if 'asn' in ip:
+ print(f" # AS{ip['asn']}", end="")
+ print()
if __name__ == '__main__':
main()
diff --git a/depends/Makefile b/depends/Makefile
index 20f5f6b2c6..5a1c472a15 100644
--- a/depends/Makefile
+++ b/depends/Makefile
@@ -42,8 +42,12 @@ NO_UPNP ?=
NO_USDT ?=
NO_NATPMP ?=
MULTIPROCESS ?=
+LTO ?=
FALLBACK_DOWNLOAD_PATH ?= https://bitcoincore.org/depends-sources
+C_STANDARD ?= c11
+CXX_STANDARD ?= c++17
+
BUILD = $(shell ./config.guess)
HOST ?= $(BUILD)
PATCHES_PATH = $(BASEDIR)/patches
@@ -140,8 +144,8 @@ include packages/packages.mk
# 2. Before including packages/*.mk (excluding packages/packages.mk), since
# they rely on the build_id variables
#
-build_id:=$(shell env CC='$(build_CC)' CXX='$(build_CXX)' AR='$(build_AR)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))')
-$(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' CXX='$(host_CXX)' AR='$(host_AR)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))')
+build_id:=$(shell env CC='$(build_CC)' CXX='$(build_CXX)' AR='$(build_AR)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' LTO='$(LTO)' ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))')
+$(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' CXX='$(host_CXX)' AR='$(host_AR)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' LTO='$(LTO)' ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))')
qrencode_packages_$(NO_QR) = $(qrencode_$(host_os)_packages)
@@ -242,6 +246,7 @@ $(host_prefix)/share/config.site : config.site.in $(host_prefix)/.stamp_$(final_
-e 's|@no_usdt@|$(NO_USDT)|' \
-e 's|@no_natpmp@|$(NO_NATPMP)|' \
-e 's|@multiprocess@|$(MULTIPROCESS)|' \
+ -e 's|@lto@|$(LTO)|' \
-e 's|@debug@|$(DEBUG)|' \
$< > $@
touch $@
diff --git a/depends/README.md b/depends/README.md
index da2a74e0e7..66e1ddc4eb 100644
--- a/depends/README.md
+++ b/depends/README.md
@@ -96,6 +96,8 @@ The following can be set when running make: `make FOO=bar`
- `BASE_CACHE`: Built packages will be placed here
- `SDK_PATH`: Path where SDKs can be found (used by macOS)
- `FALLBACK_DOWNLOAD_PATH`: If a source file can't be fetched, try here before giving up
+- `C_STANDARD`: Set the C standard version used. Defaults to `c11`.
+- `CXX_STANDARD`: Set the C++ standard version used. Defaults to `c++17`.
- `NO_QT`: Don't download/build/cache Qt and its dependencies
- `NO_QR`: Don't download/build/cache packages needed for enabling qrencode
- `NO_ZMQ`: Don't download/build/cache packages needed for enabling ZeroMQ
@@ -117,6 +119,7 @@ The following can be set when running make: `make FOO=bar`
- `LOG`: Use file-based logging for individual packages. During a package build its log file
resides in the `depends` directory, and the log file is printed out automatically in case
of build error. After successful build log files are moved along with package archives
+- `LTO`: Use LTO when building packages.
If some packages are not built, for example `make NO_WALLET=1`, the appropriate
options will be passed to bitcoin's configure. In this case, `--disable-wallet`.
diff --git a/depends/config.site.in b/depends/config.site.in
index 03dabeea0a..189330c42d 100644
--- a/depends/config.site.in
+++ b/depends/config.site.in
@@ -78,6 +78,10 @@ if test "@host_os@" = darwin; then
BREW=no
fi
+if test -z "$enable_lto" && test -n "@lto@"; then
+ enable_lto=yes
+fi
+
PKG_CONFIG="$(which pkg-config) --static"
# These two need to remain exported because pkg-config does not see them
diff --git a/depends/gen_id b/depends/gen_id
index ac69ca7ee1..a0cd586461 100755
--- a/depends/gen_id
+++ b/depends/gen_id
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
# Usage: env [ CC=... ] [ CXX=... ] [ AR=... ] [ RANLIB=... ] [ STRIP=... ] \
-# [ DEBUG=... ] ./build-id [ID_SALT]...
+# [ DEBUG=... ] [ LTO=... ] ./build-id [ID_SALT]...
#
# Prints to stdout a SHA256 hash representing the current toolset, used by
# depends/Makefile as a build id for caching purposes (detecting when the
@@ -63,6 +63,10 @@
env | grep '^STRIP_'
echo "END STRIP"
+ echo "BEGIN LTO"
+ echo "LTO=${LTO}"
+ echo "END LTO"
+
echo "END ALL"
) | if [ -n "$DEBUG" ] && command -v tee > /dev/null 2>&1; then
# When debugging and `tee` is available, output the preimage to stderr
diff --git a/depends/hosts/android.mk b/depends/hosts/android.mk
index fcc1c4f5c3..b53966dcf8 100644
--- a/depends/hosts/android.mk
+++ b/depends/hosts/android.mk
@@ -5,6 +5,15 @@ else
android_CXX=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)$(ANDROID_API_LEVEL)-clang++
android_CC=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)$(ANDROID_API_LEVEL)-clang
endif
+
+android_CFLAGS=-std=$(C_STANDARD)
+android_CXXFLAGS=-std=$(CXX_STANDARD)
+
+ifneq ($(LTO),)
+android_CFLAGS += -flto
+android_LDFLAGS += -flto
+endif
+
android_AR=$(ANDROID_TOOLCHAIN_BIN)/llvm-ar
android_RANLIB=$(ANDROID_TOOLCHAIN_BIN)/llvm-ranlib
diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk
index bf9b7625f2..8507e28532 100644
--- a/depends/hosts/darwin.mk
+++ b/depends/hosts/darwin.mk
@@ -109,8 +109,14 @@ darwin_CXX=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \
-Xclang -internal-externc-isystem$(clang_resource_dir)/include \
-Xclang -internal-externc-isystem$(OSX_SDK)/usr/include
-darwin_CFLAGS=-pipe
-darwin_CXXFLAGS=$(darwin_CFLAGS)
+darwin_CFLAGS=-pipe -std=$(C_STANDARD)
+darwin_CXXFLAGS=-pipe -std=$(CXX_STANDARD)
+
+ifneq ($(LTO),)
+darwin_CFLAGS += -flto
+darwin_CXXFLAGS += -flto
+darwin_LDFLAGS += -flto
+endif
darwin_release_CFLAGS=-O2
darwin_release_CXXFLAGS=$(darwin_release_CFLAGS)
diff --git a/depends/hosts/freebsd.mk b/depends/hosts/freebsd.mk
index 0a62347b57..5351d0b900 100644
--- a/depends/hosts/freebsd.mk
+++ b/depends/hosts/freebsd.mk
@@ -1,5 +1,11 @@
-freebsd_CFLAGS=-pipe
-freebsd_CXXFLAGS=$(freebsd_CFLAGS)
+freebsd_CFLAGS=-pipe -std=$(C_STANDARD)
+freebsd_CXXFLAGS=-pipe -std=$(CXX_STANDARD)
+
+ifneq ($(LTO),)
+freebsd_CFLAGS += -flto
+freebsd_CXXFLAGS += -flto
+freebsd_LDFLAGS += -flto
+endif
freebsd_release_CFLAGS=-O2
freebsd_release_CXXFLAGS=$(freebsd_release_CFLAGS)
diff --git a/depends/hosts/linux.mk b/depends/hosts/linux.mk
index 07da752492..b101043439 100644
--- a/depends/hosts/linux.mk
+++ b/depends/hosts/linux.mk
@@ -1,5 +1,11 @@
-linux_CFLAGS=-pipe
-linux_CXXFLAGS=$(linux_CFLAGS)
+linux_CFLAGS=-pipe -std=$(C_STANDARD)
+linux_CXXFLAGS=-pipe -std=$(CXX_STANDARD)
+
+ifneq ($(LTO),)
+linux_CFLAGS += -flto
+linux_CXXFLAGS += -flto
+linux_LDFLAGS += -flto
+endif
linux_release_CFLAGS=-O2
linux_release_CXXFLAGS=$(linux_release_CFLAGS)
diff --git a/depends/hosts/mingw32.mk b/depends/hosts/mingw32.mk
index 48020d71af..b98f9ab7ac 100644
--- a/depends/hosts/mingw32.mk
+++ b/depends/hosts/mingw32.mk
@@ -2,8 +2,14 @@ ifneq ($(shell $(SHELL) $(.SHELLFLAGS) "command -v $(host)-g++-posix"),)
mingw32_CXX := $(host)-g++-posix
endif
-mingw32_CFLAGS=-pipe
-mingw32_CXXFLAGS=$(mingw32_CFLAGS)
+mingw32_CFLAGS=-pipe -std=$(C_STANDARD)
+mingw32_CXXFLAGS=-pipe -std=$(CXX_STANDARD)
+
+ifneq ($(LTO),)
+mingw32_CFLAGS += -flto
+mingw32_CXXFLAGS += -flto
+mingw32_LDFLAGS += -flto
+endif
mingw32_release_CFLAGS=-O2
mingw32_release_CXXFLAGS=$(mingw32_release_CFLAGS)
diff --git a/depends/hosts/netbsd.mk b/depends/hosts/netbsd.mk
index b3e4545a64..8342dcc6ed 100644
--- a/depends/hosts/netbsd.mk
+++ b/depends/hosts/netbsd.mk
@@ -1,4 +1,12 @@
-netbsd_CFLAGS=-pipe
+netbsd_CFLAGS=-pipe -std=$(C_STANDARD)
+netbsd_CXXFLAGS=-pipe -std=$(CXX_STANDARD)
+
+ifneq ($(LTO),)
+netbsd_CFLAGS += -flto
+netbsd_CXXFLAGS += -flto
+netbsd_LDFLAGS += -flto
+endif
+
netbsd_CXXFLAGS=$(netbsd_CFLAGS)
netbsd_release_CFLAGS=-O2
diff --git a/depends/hosts/openbsd.mk b/depends/hosts/openbsd.mk
index 5988f24bff..d330e94d2e 100644
--- a/depends/hosts/openbsd.mk
+++ b/depends/hosts/openbsd.mk
@@ -1,5 +1,11 @@
-openbsd_CFLAGS=-pipe
-openbsd_CXXFLAGS=$(openbsd_CFLAGS)
+openbsd_CFLAGS=-pipe -std=$(C_STANDARD)
+openbsd_CXXFLAGS=-pipe -std=$(CXX_STANDARD)
+
+ifneq ($(LTO),)
+openbsd_CFLAGS += -flto
+openbsd_CXXFLAGS += -flto
+openbsd_LDFLAGS += -flto
+endif
openbsd_release_CFLAGS=-O2
openbsd_release_CXXFLAGS=$(openbsd_release_CFLAGS)
diff --git a/depends/packages/bdb.mk b/depends/packages/bdb.mk
index dc536fd399..80c7ce8429 100644
--- a/depends/packages/bdb.mk
+++ b/depends/packages/bdb.mk
@@ -15,7 +15,6 @@ $(package)_config_opts_netbsd=--with-pic
$(package)_config_opts_openbsd=--with-pic
$(package)_config_opts_android=--with-pic
$(package)_cflags+=-Wno-error=implicit-function-declaration
-$(package)_cxxflags+=-std=c++17
$(package)_cppflags_mingw32=-DUNICODE -D_UNICODE
endef
diff --git a/depends/packages/zeromq.mk b/depends/packages/zeromq.mk
index c74ae15b31..267ed11253 100644
--- a/depends/packages/zeromq.mk
+++ b/depends/packages/zeromq.mk
@@ -16,7 +16,6 @@ define $(package)_set_vars
$(package)_config_opts_netbsd=--with-pic
$(package)_config_opts_openbsd=--with-pic
$(package)_config_opts_android=--with-pic
- $(package)_cxxflags+=-std=c++17
endef
define $(package)_preprocess_cmds
diff --git a/depends/patches/qt/mac-qmake.conf b/depends/patches/qt/mac-qmake.conf
index 543407f853..cb94bf07b4 100644
--- a/depends/patches/qt/mac-qmake.conf
+++ b/depends/patches/qt/mac-qmake.conf
@@ -15,7 +15,7 @@ QMAKE_MAC_SDK.macosx.SDKVersion = $${MAC_SDK_VERSION}
QMAKE_MAC_SDK.macosx.PlatformPath = /phony
!host_build: QMAKE_CFLAGS += -target $${MAC_TARGET}
!host_build: QMAKE_OBJECTIVE_CFLAGS += $$QMAKE_CFLAGS
-!host_build: QMAKE_CXXFLAGS += $$QMAKE_CFLAGS
+!host_build: QMAKE_CXXFLAGS += -target $${MAC_TARGET}
!host_build: QMAKE_LFLAGS += -target $${MAC_TARGET}
QMAKE_AR = $${CROSS_COMPILE}ar cq
QMAKE_RANLIB=$${CROSS_COMPILE}ranlib
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index d1b660efff..23f975df34 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -1158,10 +1158,6 @@ Current subtrees include:
- src/crypto/ctaes
- Upstream at https://github.com/bitcoin-core/ctaes ; maintained by Core contributors.
-- src/univalue
- - Subtree at https://github.com/bitcoin-core/univalue-subtree ; maintained by Core contributors.
- - Deviates from upstream https://github.com/jgarzik/univalue.
-
- src/minisketch
- Upstream at https://github.com/sipa/minisketch ; maintained by Core contributors.
diff --git a/doc/policy/mempool-replacements.md b/doc/policy/mempool-replacements.md
index 3e844f8d7b..18f08daf88 100644
--- a/doc/policy/mempool-replacements.md
+++ b/doc/policy/mempool-replacements.md
@@ -51,6 +51,13 @@ other consensus and policy rules, each of the following conditions are met:
significant portions of the node's mempool using replacements with multiple directly conflicting
transactions, each with large descendant sets.
+6. The replacement transaction's feerate is greater than the feerates of all directly conflicting
+ transactions.
+
+ *Rationale*: This rule was originally intended to ensure that the replacement transaction is
+ preferable for block-inclusion, compared to what would be removed from the mempool. This rule
+ predates ancestor feerate-based transaction selection.
+
This set of rules is similar but distinct from BIP125.
## History
diff --git a/doc/productivity.md b/doc/productivity.md
index a01c6f545d..e9b7bc270c 100644
--- a/doc/productivity.md
+++ b/doc/productivity.md
@@ -9,6 +9,7 @@ Table of Contents
* [Disable features with `./configure`](#disable-features-with-configure)
* [Make use of your threads with `make -j`](#make-use-of-your-threads-with-make--j)
* [Only build what you need](#only-build-what-you-need)
+ * [Compile on multiple machines](#compile-on-multiple-machines)
* [Multiple working directories with `git worktrees`](#multiple-working-directories-with-git-worktrees)
* [Interactive "dummy rebases" for fixups and execs with `git merge-base`](#interactive-dummy-rebases-for-fixups-and-execs-with-git-merge-base)
* [Writing code](#writing-code)
@@ -81,6 +82,10 @@ make -C src bitcoin_bench
(You can and should combine this with `-j`, as above, for a parallel build.)
+### Compile on multiple machines
+
+If you have more than one computer at your disposal, you can use [distcc](https://www.distcc.org) to speed up compilation. This is easiest when all computers run the same operating system and compiler version.
+
### Multiple working directories with `git worktrees`
If you work with multiple branches or multiple copies of the repository, you should try `git worktrees`.
diff --git a/src/Makefile.am b/src/Makefile.am
index 89329f5f69..fa716af619 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -29,7 +29,6 @@ LIBBITCOIN_NODE=libbitcoin_node.a
LIBBITCOIN_COMMON=libbitcoin_common.a
LIBBITCOIN_CONSENSUS=libbitcoin_consensus.a
LIBBITCOIN_CLI=libbitcoin_cli.a
-LIBBITCOIN_KERNEL=libbitcoin_kernel.a
LIBBITCOIN_UTIL=libbitcoin_util.a
LIBBITCOIN_CRYPTO_BASE=crypto/libbitcoin_crypto_base.la
LIBBITCOINQT=qt/libbitcoinqt.a
@@ -198,7 +197,7 @@ BITCOIN_CORE_H = \
node/minisketchwrapper.h \
node/psbt.h \
node/transaction.h \
- node/ui_interface.h \
+ node/interface_ui.h \
node/utxo_snapshot.h \
noui.h \
outputtype.h \
@@ -375,7 +374,7 @@ libbitcoin_node_a_SOURCES = \
node/minisketchwrapper.cpp \
node/psbt.cpp \
node/transaction.cpp \
- node/ui_interface.cpp \
+ node/interface_ui.cpp \
noui.cpp \
policy/fees.cpp \
policy/packages.cpp \
@@ -877,7 +876,7 @@ libbitcoinkernel_la_SOURCES = \
logging.cpp \
node/blockstorage.cpp \
node/chainstate.cpp \
- node/ui_interface.cpp \
+ node/interface_ui.cpp \
policy/feerate.cpp \
policy/fees.cpp \
policy/packages.cpp \
diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include
index 72037b3db2..b4acc47aa1 100644
--- a/src/Makefile.qt.include
+++ b/src/Makefile.qt.include
@@ -270,6 +270,7 @@ BITCOIN_QT_WALLET_CPP = \
qt/transactiondesc.cpp \
qt/transactiondescdialog.cpp \
qt/transactionfilterproxy.cpp \
+ qt/transactionoverviewwidget.cpp \
qt/transactionrecord.cpp \
qt/transactiontablemodel.cpp \
qt/transactionview.cpp \
diff --git a/src/banman.cpp b/src/banman.cpp
index 2a6e0e010f..508383d9f2 100644
--- a/src/banman.cpp
+++ b/src/banman.cpp
@@ -6,7 +6,7 @@
#include <banman.h>
#include <netaddress.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <sync.h>
#include <util/system.h>
#include <util/time.h>
diff --git a/src/bench/mempool_eviction.cpp b/src/bench/mempool_eviction.cpp
index e80b9e1ac2..60d991fab9 100644
--- a/src/bench/mempool_eviction.cpp
+++ b/src/bench/mempool_eviction.cpp
@@ -108,7 +108,7 @@ static void MempoolEviction(benchmark::Bench& bench)
tx7.vout[1].scriptPubKey = CScript() << OP_7 << OP_EQUAL;
tx7.vout[1].nValue = 10 * COIN;
- CTxMemPool pool;
+ CTxMemPool& pool = *Assert(testing_setup->m_node.mempool);
LOCK2(cs_main, pool.cs);
// Create transaction references outside the "hot loop"
const CTransactionRef tx1_r{MakeTransactionRef(tx1)};
diff --git a/src/bench/rpc_mempool.cpp b/src/bench/rpc_mempool.cpp
index 6e322ba6aa..0e6fdae3d7 100644
--- a/src/bench/rpc_mempool.cpp
+++ b/src/bench/rpc_mempool.cpp
@@ -3,7 +3,9 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
+#include <chainparamsbase.h>
#include <rpc/mempool.h>
+#include <test/util/setup_common.h>
#include <txmempool.h>
#include <univalue.h>
@@ -17,7 +19,8 @@ static void AddTx(const CTransactionRef& tx, const CAmount& fee, CTxMemPool& poo
static void RpcMempool(benchmark::Bench& bench)
{
- CTxMemPool pool;
+ const auto testing_setup = MakeNoLogFileContext<const ChainTestingSetup>(CBaseChainParams::MAIN);
+ CTxMemPool& pool = *Assert(testing_setup->m_node.mempool);
LOCK2(cs_main, pool.cs);
for (int i = 0; i < 1000; ++i) {
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index b9e5a81f8d..0db2b75384 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -9,6 +9,7 @@
#include <chainparamsbase.h>
#include <clientversion.h>
+#include <compat.h>
#include <compat/stdin.h>
#include <policy/feerate.h>
#include <rpc/client.h>
@@ -1212,19 +1213,11 @@ static int CommandLineRPC(int argc, char *argv[])
return nRet;
}
-#ifdef WIN32
-// Export main() and ensure working ASLR on Windows.
-// Exporting a symbol will prevent the linker from stripping
-// the .reloc section from the binary, which is a requirement
-// for ASLR. This is a temporary workaround until a fixed
-// version of binutils is used for releases.
-__declspec(dllexport) int main(int argc, char* argv[])
+MAIN_FUNCTION
{
+#ifdef WIN32
util::WinCmdLineArgs winArgs;
std::tie(argc, argv) = winArgs.get();
-#else
-int main(int argc, char* argv[])
-{
#endif
SetupEnvironment();
if (!SetupNetworking()) {
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index bcefb3f18e..e0d5c6e5dc 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -8,6 +8,7 @@
#include <clientversion.h>
#include <coins.h>
+#include <compat.h>
#include <consensus/amount.h>
#include <consensus/consensus.h>
#include <core_io.h>
@@ -854,7 +855,7 @@ static int CommandLineRawTx(int argc, char* argv[])
return nRet;
}
-int main(int argc, char* argv[])
+MAIN_FUNCTION
{
SetupEnvironment();
diff --git a/src/bitcoin-util.cpp b/src/bitcoin-util.cpp
index 1aeac3cef0..1739804edb 100644
--- a/src/bitcoin-util.cpp
+++ b/src/bitcoin-util.cpp
@@ -11,6 +11,7 @@
#include <chainparams.h>
#include <chainparamsbase.h>
#include <clientversion.h>
+#include <compat.h>
#include <core_io.h>
#include <streams.h>
#include <util/system.h>
@@ -142,16 +143,7 @@ static int Grind(const std::vector<std::string>& args, std::string& strPrint)
return EXIT_SUCCESS;
}
-#ifdef WIN32
-// Export main() and ensure working ASLR on Windows.
-// Exporting a symbol will prevent the linker from stripping
-// the .reloc section from the binary, which is a requirement
-// for ASLR. This is a temporary workaround until a fixed
-// version of binutils is used for releases.
-__declspec(dllexport) int main(int argc, char* argv[])
-#else
-int main(int argc, char* argv[])
-#endif
+MAIN_FUNCTION
{
ArgsManager& args = gArgs;
SetupEnvironment();
diff --git a/src/bitcoin-wallet.cpp b/src/bitcoin-wallet.cpp
index 2f3dd45267..7bec3292a1 100644
--- a/src/bitcoin-wallet.cpp
+++ b/src/bitcoin-wallet.cpp
@@ -9,6 +9,7 @@
#include <chainparams.h>
#include <chainparamsbase.h>
#include <clientversion.h>
+#include <compat.h>
#include <interfaces/init.h>
#include <key.h>
#include <logging.h>
@@ -88,7 +89,7 @@ static bool WalletAppInit(ArgsManager& args, int argc, char* argv[])
return true;
}
-int main(int argc, char* argv[])
+MAIN_FUNCTION
{
ArgsManager& args = gArgs;
#ifdef WIN32
diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp
index 92e73d7c2a..be894e192e 100644
--- a/src/bitcoind.cpp
+++ b/src/bitcoind.cpp
@@ -14,7 +14,7 @@
#include <interfaces/chain.h>
#include <interfaces/init.h>
#include <node/context.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <noui.h>
#include <shutdown.h>
#include <util/check.h>
@@ -256,7 +256,7 @@ static bool AppInit(NodeContext& node, int argc, char* argv[])
return fRet;
}
-int main(int argc, char* argv[])
+MAIN_FUNCTION
{
#ifdef WIN32
util::WinCmdLineArgs winArgs;
diff --git a/src/compat.h b/src/compat.h
index 3ec4ab53fd..f41c501c84 100644
--- a/src/compat.h
+++ b/src/compat.h
@@ -86,6 +86,17 @@ typedef void* sockopt_arg_type;
typedef char* sockopt_arg_type;
#endif
+#ifdef WIN32
+// Export main() and ensure working ASLR when using mingw-w64.
+// Exporting a symbol will prevent the linker from stripping
+// the .reloc section from the binary, which is a requirement
+// for ASLR. While release builds are not affected, anyone
+// building with a binutils < 2.36 is subject to this ld bug.
+#define MAIN_FUNCTION __declspec(dllexport) int main(int argc, char* argv[])
+#else
+#define MAIN_FUNCTION int main(int argc, char* argv[])
+#endif
+
// Note these both should work with the current usage of poll, but best to be safe
// WIN32 poll is broken https://daniel.haxx.se/blog/2012/10/10/wsapoll-is-broken/
// __APPLE__ poll is broke https://github.com/bitcoin/bitcoin/pull/14336#issuecomment-437384408
diff --git a/src/crypto/chacha_poly_aead.cpp b/src/crypto/chacha_poly_aead.cpp
index 4f3e6f7fa3..f736b2d867 100644
--- a/src/crypto/chacha_poly_aead.cpp
+++ b/src/crypto/chacha_poly_aead.cpp
@@ -2,6 +2,10 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#if defined(HAVE_CONFIG_H)
+#include <config/bitcoin-config.h>
+#endif
+
#include <crypto/chacha_poly_aead.h>
#include <crypto/poly1305.h>
diff --git a/src/httpserver.cpp b/src/httpserver.cpp
index 44937dc523..b8f69b038c 100644
--- a/src/httpserver.cpp
+++ b/src/httpserver.cpp
@@ -11,7 +11,7 @@
#include <chainparamsbase.h>
#include <compat.h>
#include <netbase.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <rpc/protocol.h> // For HTTP status codes
#include <shutdown.h>
#include <sync.h>
@@ -400,7 +400,7 @@ bool InitHTTPServer()
LogPrint(BCLog::HTTP, "Initialized HTTP server\n");
int workQueueDepth = std::max((long)gArgs.GetIntArg("-rpcworkqueue", DEFAULT_HTTP_WORKQUEUE), 1L);
- LogPrintf("HTTP: creating work queue of depth %d\n", workQueueDepth);
+ LogPrintfCategory(BCLog::HTTP, "creating work queue of depth %d\n", workQueueDepth);
g_work_queue = std::make_unique<WorkQueue<HTTPClosure>>(workQueueDepth);
// transfer ownership to eventBase/HTTP via .release()
@@ -424,7 +424,7 @@ void StartHTTPServer()
{
LogPrint(BCLog::HTTP, "Starting HTTP server\n");
int rpcThreads = std::max((long)gArgs.GetIntArg("-rpcthreads", DEFAULT_HTTP_THREADS), 1L);
- LogPrintf("HTTP: starting %d worker threads\n", rpcThreads);
+ LogPrintfCategory(BCLog::HTTP, "starting %d worker threads\n", rpcThreads);
g_thread_http = std::thread(ThreadHTTP, eventBase);
for (int i = 0; i < rpcThreads; i++) {
diff --git a/src/i2p.cpp b/src/i2p.cpp
index 9b8f83caef..caff8c1e69 100644
--- a/src/i2p.cpp
+++ b/src/i2p.cpp
@@ -150,8 +150,8 @@ bool Session::Accept(Connection& conn)
throw std::runtime_error("wait on socket failed");
}
- if ((occurred & Sock::RECV) == 0) {
- // Timeout, no incoming connections within MAX_WAIT_FOR_IO.
+ if (occurred == 0) {
+ // Timeout, no incoming connections or errors within MAX_WAIT_FOR_IO.
continue;
}
@@ -376,8 +376,8 @@ void Session::CreateIfNotCreatedAlready()
m_session_id = session_id;
m_control_sock = std::move(sock);
- LogPrintf("I2P: SAM session created: session id=%s, my address=%s\n", m_session_id,
- m_my_addr.ToString());
+ LogPrintfCategory(BCLog::I2P, "SAM session created: session id=%s, my address=%s\n",
+ m_session_id, m_my_addr.ToString());
}
std::unique_ptr<Sock> Session::StreamAccept()
diff --git a/src/index/base.cpp b/src/index/base.cpp
index 9f0c1dea24..323547900d 100644
--- a/src/index/base.cpp
+++ b/src/index/base.cpp
@@ -5,7 +5,7 @@
#include <chainparams.h>
#include <index/base.h>
#include <node/blockstorage.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <shutdown.h>
#include <tinyformat.h>
#include <util/syscall_sandbox.h>
diff --git a/src/init.cpp b/src/init.cpp
index f6e6f7cf43..f20c55dcb1 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -40,7 +40,7 @@
#include <node/chainstate.h>
#include <node/context.h>
#include <node/miner.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <policy/feerate.h>
#include <policy/fees.h>
#include <policy/policy.h>
@@ -1506,8 +1506,8 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
uiInterface.InitMessage(_("Verifying blocks…").translated);
auto check_blocks = args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS);
if (chainman.m_blockman.m_have_pruned && check_blocks > MIN_BLOCKS_TO_KEEP) {
- LogPrintf("Prune: pruned datadir may not have more than %d blocks; only checking available blocks\n",
- MIN_BLOCKS_TO_KEEP);
+ LogPrintfCategory(BCLog::PRUNE, "pruned datadir may not have more than %d blocks; only checking available blocks\n",
+ MIN_BLOCKS_TO_KEEP);
}
maybe_verify_error = VerifyLoadedChainstate(chainman,
fReset,
diff --git a/src/init/common.cpp b/src/init/common.cpp
index d4e45454d2..a0cdf44f47 100644
--- a/src/init/common.cpp
+++ b/src/init/common.cpp
@@ -9,7 +9,7 @@
#include <clientversion.h>
#include <fs.h>
#include <logging.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <tinyformat.h>
#include <util/system.h>
#include <util/time.h>
diff --git a/src/logging.h b/src/logging.h
index 8a896b6b33..50869ad89a 100644
--- a/src/logging.h
+++ b/src/logging.h
@@ -199,13 +199,18 @@ static inline void LogPrintf_(const std::string& logging_function, const std::st
}
}
-
#define LogPrintLevel_(category, level, ...) LogPrintf_(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__)
+// Log unconditionally.
#define LogPrintf(...) LogPrintLevel_(BCLog::LogFlags::NONE, BCLog::Level::None, __VA_ARGS__)
+// Log unconditionally, prefixing the output with the passed category name.
+#define LogPrintfCategory(category, ...) LogPrintLevel_(category, BCLog::Level::None, __VA_ARGS__)
+
// Use a macro instead of a function for conditional logging to prevent
// evaluating arguments when logging for the category is not enabled.
+
+// Log conditionally, prefixing the output with the passed category name.
#define LogPrint(category, ...) \
do { \
if (LogAcceptCategory((category), BCLog::Level::Debug)) { \
@@ -213,6 +218,7 @@ static inline void LogPrintf_(const std::string& logging_function, const std::st
} \
} while (0)
+// Log conditionally, prefixing the output with the passed category name and severity level.
#define LogPrintLevel(category, level, ...) \
do { \
if (LogAcceptCategory((category), (level))) { \
diff --git a/src/net.cpp b/src/net.cpp
index 0cdf98c40f..d42f130af7 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -21,7 +21,7 @@
#include <net_permissions.h>
#include <netaddress.h>
#include <netbase.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <protocol.h>
#include <random.h>
#include <scheduler.h>
@@ -1404,13 +1404,12 @@ bool CConnman::InactivityCheck(const CNode& node) const
return false;
}
-bool CConnman::GenerateSelectSet(const std::vector<CNode*>& nodes,
- std::set<SOCKET>& recv_set,
- std::set<SOCKET>& send_set,
- std::set<SOCKET>& error_set)
+Sock::EventsPerSock CConnman::GenerateWaitSockets(Span<CNode* const> nodes)
{
+ Sock::EventsPerSock events_per_sock;
+
for (const ListenSocket& hListenSocket : vhListenSocket) {
- recv_set.insert(hListenSocket.sock->Get());
+ events_per_sock.emplace(hListenSocket.sock, Sock::Events{Sock::RECV});
}
for (CNode* pnode : nodes) {
@@ -1437,172 +1436,49 @@ bool CConnman::GenerateSelectSet(const std::vector<CNode*>& nodes,
continue;
}
- error_set.insert(pnode->m_sock->Get());
+ Sock::Event requested{0};
if (select_send) {
- send_set.insert(pnode->m_sock->Get());
- continue;
- }
- if (select_recv) {
- recv_set.insert(pnode->m_sock->Get());
- }
- }
-
- return !recv_set.empty() || !send_set.empty() || !error_set.empty();
-}
-
-#ifdef USE_POLL
-void CConnman::SocketEvents(const std::vector<CNode*>& nodes,
- std::set<SOCKET>& recv_set,
- std::set<SOCKET>& send_set,
- std::set<SOCKET>& error_set)
-{
- std::set<SOCKET> recv_select_set, send_select_set, error_select_set;
- if (!GenerateSelectSet(nodes, recv_select_set, send_select_set, error_select_set)) {
- interruptNet.sleep_for(std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS));
- return;
- }
-
- std::unordered_map<SOCKET, struct pollfd> pollfds;
- for (SOCKET socket_id : recv_select_set) {
- pollfds[socket_id].fd = socket_id;
- pollfds[socket_id].events |= POLLIN;
- }
-
- for (SOCKET socket_id : send_select_set) {
- pollfds[socket_id].fd = socket_id;
- pollfds[socket_id].events |= POLLOUT;
- }
-
- for (SOCKET socket_id : error_select_set) {
- pollfds[socket_id].fd = socket_id;
- // These flags are ignored, but we set them for clarity
- pollfds[socket_id].events |= POLLERR|POLLHUP;
- }
-
- std::vector<struct pollfd> vpollfds;
- vpollfds.reserve(pollfds.size());
- for (auto it : pollfds) {
- vpollfds.push_back(std::move(it.second));
- }
-
- if (poll(vpollfds.data(), vpollfds.size(), SELECT_TIMEOUT_MILLISECONDS) < 0) return;
-
- if (interruptNet) return;
-
- for (struct pollfd pollfd_entry : vpollfds) {
- if (pollfd_entry.revents & POLLIN) recv_set.insert(pollfd_entry.fd);
- if (pollfd_entry.revents & POLLOUT) send_set.insert(pollfd_entry.fd);
- if (pollfd_entry.revents & (POLLERR|POLLHUP)) error_set.insert(pollfd_entry.fd);
- }
-}
-#else
-void CConnman::SocketEvents(const std::vector<CNode*>& nodes,
- std::set<SOCKET>& recv_set,
- std::set<SOCKET>& send_set,
- std::set<SOCKET>& error_set)
-{
- std::set<SOCKET> recv_select_set, send_select_set, error_select_set;
- if (!GenerateSelectSet(nodes, recv_select_set, send_select_set, error_select_set)) {
- interruptNet.sleep_for(std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS));
- return;
- }
-
- //
- // Find which sockets have data to receive
- //
- struct timeval timeout;
- timeout.tv_sec = 0;
- timeout.tv_usec = SELECT_TIMEOUT_MILLISECONDS * 1000; // frequency to poll pnode->vSend
-
- fd_set fdsetRecv;
- fd_set fdsetSend;
- fd_set fdsetError;
- FD_ZERO(&fdsetRecv);
- FD_ZERO(&fdsetSend);
- FD_ZERO(&fdsetError);
- SOCKET hSocketMax = 0;
-
- for (SOCKET hSocket : recv_select_set) {
- FD_SET(hSocket, &fdsetRecv);
- hSocketMax = std::max(hSocketMax, hSocket);
- }
-
- for (SOCKET hSocket : send_select_set) {
- FD_SET(hSocket, &fdsetSend);
- hSocketMax = std::max(hSocketMax, hSocket);
- }
-
- for (SOCKET hSocket : error_select_set) {
- FD_SET(hSocket, &fdsetError);
- hSocketMax = std::max(hSocketMax, hSocket);
- }
-
- int nSelect = select(hSocketMax + 1, &fdsetRecv, &fdsetSend, &fdsetError, &timeout);
-
- if (interruptNet)
- return;
-
- if (nSelect == SOCKET_ERROR)
- {
- int nErr = WSAGetLastError();
- LogPrintf("socket select error %s\n", NetworkErrorString(nErr));
- for (unsigned int i = 0; i <= hSocketMax; i++)
- FD_SET(i, &fdsetRecv);
- FD_ZERO(&fdsetSend);
- FD_ZERO(&fdsetError);
- if (!interruptNet.sleep_for(std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS)))
- return;
- }
-
- for (SOCKET hSocket : recv_select_set) {
- if (FD_ISSET(hSocket, &fdsetRecv)) {
- recv_set.insert(hSocket);
+ requested = Sock::SEND;
+ } else if (select_recv) {
+ requested = Sock::RECV;
}
- }
- for (SOCKET hSocket : send_select_set) {
- if (FD_ISSET(hSocket, &fdsetSend)) {
- send_set.insert(hSocket);
- }
+ events_per_sock.emplace(pnode->m_sock, Sock::Events{requested});
}
- for (SOCKET hSocket : error_select_set) {
- if (FD_ISSET(hSocket, &fdsetError)) {
- error_set.insert(hSocket);
- }
- }
+ return events_per_sock;
}
-#endif
void CConnman::SocketHandler()
{
AssertLockNotHeld(m_total_bytes_sent_mutex);
- std::set<SOCKET> recv_set;
- std::set<SOCKET> send_set;
- std::set<SOCKET> error_set;
+ Sock::EventsPerSock events_per_sock;
{
const NodesSnapshot snap{*this, /*shuffle=*/false};
+ const auto timeout = std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS);
+
// Check for the readiness of the already connected sockets and the
// listening sockets in one call ("readiness" as in poll(2) or
// select(2)). If none are ready, wait for a short while and return
// empty sets.
- SocketEvents(snap.Nodes(), recv_set, send_set, error_set);
+ events_per_sock = GenerateWaitSockets(snap.Nodes());
+ if (events_per_sock.empty() || !events_per_sock.begin()->first->WaitMany(timeout, events_per_sock)) {
+ interruptNet.sleep_for(timeout);
+ }
// Service (send/receive) each of the already connected nodes.
- SocketHandlerConnected(snap.Nodes(), recv_set, send_set, error_set);
+ SocketHandlerConnected(snap.Nodes(), events_per_sock);
}
// Accept new connections from listening sockets.
- SocketHandlerListening(recv_set);
+ SocketHandlerListening(events_per_sock);
}
void CConnman::SocketHandlerConnected(const std::vector<CNode*>& nodes,
- const std::set<SOCKET>& recv_set,
- const std::set<SOCKET>& send_set,
- const std::set<SOCKET>& error_set)
+ const Sock::EventsPerSock& events_per_sock)
{
AssertLockNotHeld(m_total_bytes_sent_mutex);
@@ -1621,9 +1497,12 @@ void CConnman::SocketHandlerConnected(const std::vector<CNode*>& nodes,
if (!pnode->m_sock) {
continue;
}
- recvSet = recv_set.count(pnode->m_sock->Get()) > 0;
- sendSet = send_set.count(pnode->m_sock->Get()) > 0;
- errorSet = error_set.count(pnode->m_sock->Get()) > 0;
+ const auto it = events_per_sock.find(pnode->m_sock);
+ if (it != events_per_sock.end()) {
+ recvSet = it->second.occurred & Sock::RECV;
+ sendSet = it->second.occurred & Sock::SEND;
+ errorSet = it->second.occurred & Sock::ERR;
+ }
}
if (recvSet || errorSet)
{
@@ -1693,13 +1572,14 @@ void CConnman::SocketHandlerConnected(const std::vector<CNode*>& nodes,
}
}
-void CConnman::SocketHandlerListening(const std::set<SOCKET>& recv_set)
+void CConnman::SocketHandlerListening(const Sock::EventsPerSock& events_per_sock)
{
for (const ListenSocket& listen_socket : vhListenSocket) {
if (interruptNet) {
return;
}
- if (recv_set.count(listen_socket.sock->Get()) > 0) {
+ const auto it = events_per_sock.find(listen_socket.sock);
+ if (it != events_per_sock.end() && it->second.occurred & Sock::RECV) {
AcceptConnection(listen_socket);
}
}
diff --git a/src/net.h b/src/net.h
index d6b605652b..bf169649c0 100644
--- a/src/net.h
+++ b/src/net.h
@@ -980,28 +980,9 @@ private:
/**
* Generate a collection of sockets to check for IO readiness.
* @param[in] nodes Select from these nodes' sockets.
- * @param[out] recv_set Sockets to check for read readiness.
- * @param[out] send_set Sockets to check for write readiness.
- * @param[out] error_set Sockets to check for errors.
- * @return true if at least one socket is to be checked (the returned set is not empty)
+ * @return sockets to check for readiness
*/
- bool GenerateSelectSet(const std::vector<CNode*>& nodes,
- std::set<SOCKET>& recv_set,
- std::set<SOCKET>& send_set,
- std::set<SOCKET>& error_set);
-
- /**
- * Check which sockets are ready for IO.
- * @param[in] nodes Select from these nodes' sockets.
- * @param[out] recv_set Sockets which are ready for read.
- * @param[out] send_set Sockets which are ready for write.
- * @param[out] error_set Sockets which have errors.
- * This calls `GenerateSelectSet()` to gather a list of sockets to check.
- */
- void SocketEvents(const std::vector<CNode*>& nodes,
- std::set<SOCKET>& recv_set,
- std::set<SOCKET>& send_set,
- std::set<SOCKET>& error_set);
+ Sock::EventsPerSock GenerateWaitSockets(Span<CNode* const> nodes);
/**
* Check connected and listening sockets for IO readiness and process them accordingly.
@@ -1010,23 +991,18 @@ private:
/**
* Do the read/write for connected sockets that are ready for IO.
- * @param[in] nodes Nodes to process. The socket of each node is checked against
- * `recv_set`, `send_set` and `error_set`.
- * @param[in] recv_set Sockets that are ready for read.
- * @param[in] send_set Sockets that are ready for send.
- * @param[in] error_set Sockets that have an exceptional condition (error).
+ * @param[in] nodes Nodes to process. The socket of each node is checked against `what`.
+ * @param[in] events_per_sock Sockets that are ready for IO.
*/
void SocketHandlerConnected(const std::vector<CNode*>& nodes,
- const std::set<SOCKET>& recv_set,
- const std::set<SOCKET>& send_set,
- const std::set<SOCKET>& error_set)
+ const Sock::EventsPerSock& events_per_sock)
EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !mutexMsgProc);
/**
* Accept incoming connections, one from each read-ready listening socket.
- * @param[in] recv_set Sockets that are ready for read.
+ * @param[in] events_per_sock Sockets that are ready for IO.
*/
- void SocketHandlerListening(const std::set<SOCKET>& recv_set);
+ void SocketHandlerListening(const Sock::EventsPerSock& events_per_sock);
void ThreadSocketHandler() EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !mutexMsgProc);
void ThreadDNSAddressSeed() EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_nodes_mutex);
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 1d69975f55..751a03f01c 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -603,9 +603,11 @@ private:
/** Next time to check for stale tip */
std::chrono::seconds m_stale_tip_check_time{0s};
- /** Whether this node is running in blocks only mode */
+ /** Whether this node is running in -blocksonly mode */
const bool m_ignore_incoming_txs;
+ bool RejectIncomingTxs(const CNode& peer) const;
+
/** Whether we've completed initial sync yet, for determining when to turn
* on extra block-relay-only peers. */
bool m_initial_sync_finished{false};
@@ -1009,7 +1011,7 @@ void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
{
AssertLockHeld(cs_main);
- // Never request high-bandwidth mode from peers if we're blocks-only. Our
+ // When in -blocksonly mode, never request high-bandwidth mode from peers. Our
// mempool will not contain the transactions necessary to reconstruct the
// compact block.
if (m_ignore_incoming_txs) return;
@@ -3084,14 +3086,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
- // Reject tx INVs when the -blocksonly setting is enabled, or this is a
- // block-relay-only peer
- bool reject_tx_invs{m_ignore_incoming_txs || pfrom.IsBlockOnlyConn()};
-
- // Allow peers with relay permission to send data other than blocks in blocks only mode
- if (pfrom.HasPermission(NetPermissionFlags::Relay)) {
- reject_tx_invs = false;
- }
+ const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
LOCK(cs_main);
@@ -3372,10 +3367,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}
if (msg_type == NetMsgType::TX) {
- // Stop processing the transaction early if
- // 1) We are in blocks only mode and peer has no relay permission; OR
- // 2) This peer is a block-relay-only peer
- if ((m_ignore_incoming_txs && !pfrom.HasPermission(NetPermissionFlags::Relay)) || pfrom.IsBlockOnlyConn()) {
+ if (RejectIncomingTxs(pfrom)) {
LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId());
pfrom.fDisconnect = true;
return;
@@ -4643,7 +4635,7 @@ void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::mi
namespace {
class CompareInvMempoolOrder
{
- CTxMemPool *mp;
+ CTxMemPool* mp;
bool m_wtxid_relay;
public:
explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid)
@@ -4659,6 +4651,15 @@ public:
return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay);
}
};
+} // namespace
+
+bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const
+{
+ // block-relay-only peers may never send txs to us
+ if (peer.IsBlockOnlyConn()) return true;
+ // In -blocksonly mode, peers need the 'relay' permission to send txs to us
+ if (m_ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)) return true;
+ return false;
}
bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer)
diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h
index 2e52716649..e017f3f427 100644
--- a/src/node/blockstorage.h
+++ b/src/node/blockstorage.h
@@ -49,7 +49,7 @@ extern std::atomic_bool fReindex;
/** Pruning-related variables and constants */
/** True if we're running in -prune mode. */
extern bool fPruneMode;
-/** Number of MiB of block files that we're trying to stay below. */
+/** Number of bytes of block files that we're trying to stay below. */
extern uint64_t nPruneTarget;
// Because validation code takes pointers to the map's CBlockIndex objects, if
diff --git a/src/node/ui_interface.cpp b/src/node/interface_ui.cpp
index a3a6ede39a..370cde84f8 100644
--- a/src/node/ui_interface.cpp
+++ b/src/node/interface_ui.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <util/translation.h>
diff --git a/src/node/ui_interface.h b/src/node/interface_ui.h
index d02238b549..37c0f6392b 100644
--- a/src/node/ui_interface.h
+++ b/src/node/interface_ui.h
@@ -3,8 +3,8 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#ifndef BITCOIN_NODE_UI_INTERFACE_H
-#define BITCOIN_NODE_UI_INTERFACE_H
+#ifndef BITCOIN_NODE_INTERFACE_UI_H
+#define BITCOIN_NODE_INTERFACE_UI_H
#include <functional>
#include <memory>
@@ -120,4 +120,4 @@ constexpr auto AbortError = InitError;
extern CClientUIInterface uiInterface;
-#endif // BITCOIN_NODE_UI_INTERFACE_H
+#endif // BITCOIN_NODE_INTERFACE_UI_H
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index 7752fb0f65..1905a4df29 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -22,7 +22,7 @@
#include <node/coin.h>
#include <node/context.h>
#include <node/transaction.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <policy/feerate.h>
#include <policy/fees.h>
#include <policy/policy.h>
diff --git a/src/node/miner.cpp b/src/node/miner.cpp
index 01db49d5cf..9db10feae4 100644
--- a/src/node/miner.cpp
+++ b/src/node/miner.cpp
@@ -62,7 +62,7 @@ BlockAssembler::Options::Options()
nBlockMaxWeight = DEFAULT_BLOCK_MAX_WEIGHT;
}
-BlockAssembler::BlockAssembler(CChainState& chainstate, const CTxMemPool& mempool, const Options& options)
+BlockAssembler::BlockAssembler(CChainState& chainstate, const CTxMemPool* mempool, const Options& options)
: chainparams{chainstate.m_chainman.GetParams()},
m_mempool(mempool),
m_chainstate(chainstate)
@@ -87,7 +87,7 @@ static BlockAssembler::Options DefaultOptions()
return options;
}
-BlockAssembler::BlockAssembler(CChainState& chainstate, const CTxMemPool& mempool)
+BlockAssembler::BlockAssembler(CChainState& chainstate, const CTxMemPool* mempool)
: BlockAssembler(chainstate, mempool, DefaultOptions()) {}
void BlockAssembler::resetBlock()
@@ -121,7 +121,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
pblocktemplate->vTxFees.push_back(-1); // updated at end
pblocktemplate->vTxSigOpsCost.push_back(-1); // updated at end
- LOCK2(cs_main, m_mempool.cs);
+ LOCK(::cs_main);
CBlockIndex* pindexPrev = m_chainstate.m_chain.Tip();
assert(pindexPrev != nullptr);
nHeight = pindexPrev->nHeight + 1;
@@ -138,7 +138,10 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
int nPackagesSelected = 0;
int nDescendantsUpdated = 0;
- addPackageTxs(nPackagesSelected, nDescendantsUpdated);
+ if (m_mempool) {
+ LOCK(m_mempool->cs);
+ addPackageTxs(*m_mempool, nPackagesSelected, nDescendantsUpdated);
+ }
int64_t nTime1 = GetTimeMicros();
@@ -232,15 +235,19 @@ void BlockAssembler::AddToBlock(CTxMemPool::txiter iter)
}
}
-int BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded,
- indexed_modified_transaction_set &mapModifiedTx)
+/** Add descendants of given transactions to mapModifiedTx with ancestor
+ * state updated assuming given transactions are inBlock. Returns number
+ * of updated descendants. */
+static int UpdatePackagesForAdded(const CTxMemPool& mempool,
+ const CTxMemPool::setEntries& alreadyAdded,
+ indexed_modified_transaction_set& mapModifiedTx) EXCLUSIVE_LOCKS_REQUIRED(mempool.cs)
{
- AssertLockHeld(m_mempool.cs);
+ AssertLockHeld(mempool.cs);
int nDescendantsUpdated = 0;
for (CTxMemPool::txiter it : alreadyAdded) {
CTxMemPool::setEntries descendants;
- m_mempool.CalculateDescendants(it, descendants);
+ mempool.CalculateDescendants(it, descendants);
// Insert all descendants (not yet in block) into the modified set
for (CTxMemPool::txiter desc : descendants) {
if (alreadyAdded.count(desc)) {
@@ -262,23 +269,6 @@ int BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& already
return nDescendantsUpdated;
}
-// Skip entries in mapTx that are already in a block or are present
-// in mapModifiedTx (which implies that the mapTx ancestor state is
-// stale due to ancestor inclusion in the block)
-// Also skip transactions that we've already failed to add. This can happen if
-// we consider a transaction in mapModifiedTx and it fails: we can then
-// potentially consider it again while walking mapTx. It's currently
-// guaranteed to fail again, but as a belt-and-suspenders check we put it in
-// failedTx and avoid re-evaluation, since the re-evaluation would be using
-// cached size/sigops/fee values that are not actually correct.
-bool BlockAssembler::SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_transaction_set& mapModifiedTx, CTxMemPool::setEntries& failedTx)
-{
- AssertLockHeld(m_mempool.cs);
-
- assert(it != m_mempool.mapTx.end());
- return mapModifiedTx.count(it) || inBlock.count(it) || failedTx.count(it);
-}
-
void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, std::vector<CTxMemPool::txiter>& sortedEntries)
{
// Sort package by ancestor count
@@ -300,9 +290,9 @@ void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, std::ve
// Each time through the loop, we compare the best transaction in
// mapModifiedTxs with the next transaction in the mempool to decide what
// transaction package to work on next.
-void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpdated)
+void BlockAssembler::addPackageTxs(const CTxMemPool& mempool, int& nPackagesSelected, int& nDescendantsUpdated)
{
- AssertLockHeld(m_mempool.cs);
+ AssertLockHeld(mempool.cs);
// mapModifiedTx will store sorted packages after they are modified
// because some of their txs are already in the block
@@ -310,7 +300,7 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda
// Keep track of entries that failed inclusion, to avoid duplicate work
CTxMemPool::setEntries failedTx;
- CTxMemPool::indexed_transaction_set::index<ancestor_score>::type::iterator mi = m_mempool.mapTx.get<ancestor_score>().begin();
+ CTxMemPool::indexed_transaction_set::index<ancestor_score>::type::iterator mi = mempool.mapTx.get<ancestor_score>().begin();
CTxMemPool::txiter iter;
// Limit the number of attempts to add transactions to the block when it is
@@ -319,12 +309,27 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda
const int64_t MAX_CONSECUTIVE_FAILURES = 1000;
int64_t nConsecutiveFailed = 0;
- while (mi != m_mempool.mapTx.get<ancestor_score>().end() || !mapModifiedTx.empty()) {
+ while (mi != mempool.mapTx.get<ancestor_score>().end() || !mapModifiedTx.empty()) {
// First try to find a new transaction in mapTx to evaluate.
- if (mi != m_mempool.mapTx.get<ancestor_score>().end() &&
- SkipMapTxEntry(m_mempool.mapTx.project<0>(mi), mapModifiedTx, failedTx)) {
- ++mi;
- continue;
+ //
+ // Skip entries in mapTx that are already in a block or are present
+ // in mapModifiedTx (which implies that the mapTx ancestor state is
+ // stale due to ancestor inclusion in the block)
+ // Also skip transactions that we've already failed to add. This can happen if
+ // we consider a transaction in mapModifiedTx and it fails: we can then
+ // potentially consider it again while walking mapTx. It's currently
+ // guaranteed to fail again, but as a belt-and-suspenders check we put it in
+ // failedTx and avoid re-evaluation, since the re-evaluation would be using
+ // cached size/sigops/fee values that are not actually correct.
+ /** Return true if given transaction from mapTx has already been evaluated,
+ * or if the transaction's cached data in mapTx is incorrect. */
+ if (mi != mempool.mapTx.get<ancestor_score>().end()) {
+ auto it = mempool.mapTx.project<0>(mi);
+ assert(it != mempool.mapTx.end());
+ if (mapModifiedTx.count(it) || inBlock.count(it) || failedTx.count(it)) {
+ ++mi;
+ continue;
+ }
}
// Now that mi is not stale, determine which transaction to evaluate:
@@ -332,13 +337,13 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda
bool fUsingModified = false;
modtxscoreiter modit = mapModifiedTx.get<ancestor_score>().begin();
- if (mi == m_mempool.mapTx.get<ancestor_score>().end()) {
+ if (mi == mempool.mapTx.get<ancestor_score>().end()) {
// We're out of entries in mapTx; use the entry from mapModifiedTx
iter = modit->iter;
fUsingModified = true;
} else {
// Try to compare the mapTx entry to the mapModifiedTx entry
- iter = m_mempool.mapTx.project<0>(mi);
+ iter = mempool.mapTx.project<0>(mi);
if (modit != mapModifiedTx.get<ancestor_score>().end() &&
CompareTxMemPoolEntryByAncestorFee()(*modit, CTxMemPoolModifiedEntry(iter))) {
// The best entry in mapModifiedTx has higher score
@@ -393,7 +398,7 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda
CTxMemPool::setEntries ancestors;
uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
std::string dummy;
- m_mempool.CalculateMemPoolAncestors(*iter, ancestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false);
+ mempool.CalculateMemPoolAncestors(*iter, ancestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false);
onlyUnconfirmed(ancestors);
ancestors.insert(iter);
@@ -423,7 +428,7 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda
++nPackagesSelected;
// Update transactions that depend on each of these
- nDescendantsUpdated += UpdatePackagesForAdded(ancestors, mapModifiedTx);
+ nDescendantsUpdated += UpdatePackagesForAdded(mempool, ancestors, mapModifiedTx);
}
}
} // namespace node
diff --git a/src/node/miner.h b/src/node/miner.h
index 7cf8e3fb9e..26454df3df 100644
--- a/src/node/miner.h
+++ b/src/node/miner.h
@@ -147,7 +147,7 @@ private:
int64_t m_lock_time_cutoff;
const CChainParams& chainparams;
- const CTxMemPool& m_mempool;
+ const CTxMemPool* const m_mempool;
CChainState& m_chainstate;
public:
@@ -157,8 +157,8 @@ public:
CFeeRate blockMinFeeRate;
};
- explicit BlockAssembler(CChainState& chainstate, const CTxMemPool& mempool);
- explicit BlockAssembler(CChainState& chainstate, const CTxMemPool& mempool, const Options& options);
+ explicit BlockAssembler(CChainState& chainstate, const CTxMemPool* mempool);
+ explicit BlockAssembler(CChainState& chainstate, const CTxMemPool* mempool, const Options& options);
/** Construct a new block template with coinbase to scriptPubKeyIn */
std::unique_ptr<CBlockTemplate> CreateNewBlock(const CScript& scriptPubKeyIn);
@@ -177,7 +177,7 @@ private:
/** Add transactions based on feerate including unconfirmed ancestors
* Increments nPackagesSelected / nDescendantsUpdated with corresponding
* statistics from the package selection (for logging statistics). */
- void addPackageTxs(int& nPackagesSelected, int& nDescendantsUpdated) EXCLUSIVE_LOCKS_REQUIRED(m_mempool.cs);
+ void addPackageTxs(const CTxMemPool& mempool, int& nPackagesSelected, int& nDescendantsUpdated) EXCLUSIVE_LOCKS_REQUIRED(mempool.cs);
// helper functions for addPackageTxs()
/** Remove confirmed (inBlock) entries from given set */
@@ -189,15 +189,8 @@ private:
* These checks should always succeed, and they're here
* only as an extra check in case of suboptimal node configuration */
bool TestPackageTransactions(const CTxMemPool::setEntries& package) const;
- /** Return true if given transaction from mapTx has already been evaluated,
- * or if the transaction's cached data in mapTx is incorrect. */
- bool SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_transaction_set& mapModifiedTx, CTxMemPool::setEntries& failedTx) EXCLUSIVE_LOCKS_REQUIRED(m_mempool.cs);
/** Sort the package in an order that is valid to appear in a block */
void SortForBlock(const CTxMemPool::setEntries& package, std::vector<CTxMemPool::txiter>& sortedEntries);
- /** Add descendants of given transactions to mapModifiedTx with ancestor
- * state updated assuming given transactions are inBlock. Returns number
- * of updated descendants. */
- int UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded, indexed_modified_transaction_set& mapModifiedTx) EXCLUSIVE_LOCKS_REQUIRED(m_mempool.cs);
};
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev);
diff --git a/src/noui.cpp b/src/noui.cpp
index 6fe5c5638c..54cb5f3cbf 100644
--- a/src/noui.cpp
+++ b/src/noui.cpp
@@ -6,7 +6,7 @@
#include <noui.h>
#include <logging.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <util/translation.h>
#include <string>
diff --git a/src/psbt.h b/src/psbt.h
index 8fda889bb4..4a6d41076f 100644
--- a/src/psbt.h
+++ b/src/psbt.h
@@ -54,7 +54,7 @@ static constexpr uint8_t PSBT_SEPARATOR = 0x00;
// BIP 174 does not specify a maximum file size, but we set a limit anyway
// to prevent reading a stream indefinitely and running out of memory.
-const std::streamsize MAX_FILE_SIZE_PSBT = 100000000; // 100 MiB
+const std::streamsize MAX_FILE_SIZE_PSBT = 100000000; // 100 MB
// PSBT version number
static constexpr uint32_t PSBT_HIGHEST_VERSION = 0;
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index bd6a1f5eca..f11ddad30f 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -13,7 +13,7 @@
#include <interfaces/handler.h>
#include <interfaces/init.h>
#include <interfaces/node.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <noui.h>
#include <qt/bitcoingui.h>
#include <qt/clientmodel.h>
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index bfcdf6f316..6fea8e1cba 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -35,7 +35,7 @@
#include <chainparams.h>
#include <interfaces/handler.h>
#include <interfaces/node.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <util/system.h>
#include <util/translation.h>
#include <validation.h>
diff --git a/src/qt/main.cpp b/src/qt/main.cpp
index 6e772d58c5..38b0ac71a3 100644
--- a/src/qt/main.cpp
+++ b/src/qt/main.cpp
@@ -4,6 +4,7 @@
#include <qt/bitcoin.h>
+#include <compat.h>
#include <util/translation.h>
#include <util/url.h>
@@ -18,4 +19,7 @@ extern const std::function<std::string(const char*)> G_TRANSLATION_FUN = [](cons
};
UrlDecodeFn* const URL_DECODE = urlDecode;
-int main(int argc, char* argv[]) { return GuiMain(argc, argv); }
+MAIN_FUNCTION
+{
+ return GuiMain(argc, argv);
+}
diff --git a/src/qt/paymentserver.cpp b/src/qt/paymentserver.cpp
index be6f604932..9f87c15c94 100644
--- a/src/qt/paymentserver.cpp
+++ b/src/qt/paymentserver.cpp
@@ -15,7 +15,7 @@
#include <chainparams.h>
#include <interfaces/node.h>
#include <key_io.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <policy/policy.h>
#include <util/system.h>
#include <wallet/wallet.h>
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index 61d2782222..bd44d12781 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -21,7 +21,7 @@
#include <chainparams.h>
#include <interfaces/node.h>
#include <key_io.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <policy/fees.h>
#include <txmempool.h>
#include <validation.h>
diff --git a/src/qt/transactionoverviewwidget.cpp b/src/qt/transactionoverviewwidget.cpp
new file mode 100644
index 0000000000..360a1364fb
--- /dev/null
+++ b/src/qt/transactionoverviewwidget.cpp
@@ -0,0 +1,27 @@
+// Copyright (c) 2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <qt/transactionoverviewwidget.h>
+
+#include <qt/transactiontablemodel.h>
+
+#include <QListView>
+#include <QSize>
+#include <QSizePolicy>
+
+TransactionOverviewWidget::TransactionOverviewWidget(QWidget* parent)
+ : QListView(parent) {}
+
+QSize TransactionOverviewWidget::sizeHint() const
+{
+ return {sizeHintForColumn(TransactionTableModel::ToAddress), QListView::sizeHint().height()};
+}
+
+void TransactionOverviewWidget::showEvent(QShowEvent* event)
+{
+ Q_UNUSED(event);
+ QSizePolicy sp = sizePolicy();
+ sp.setHorizontalPolicy(QSizePolicy::Minimum);
+ setSizePolicy(sp);
+}
diff --git a/src/qt/transactionoverviewwidget.h b/src/qt/transactionoverviewwidget.h
index 2bdead7bc4..0572e84090 100644
--- a/src/qt/transactionoverviewwidget.h
+++ b/src/qt/transactionoverviewwidget.h
@@ -5,11 +5,8 @@
#ifndef BITCOIN_QT_TRANSACTIONOVERVIEWWIDGET_H
#define BITCOIN_QT_TRANSACTIONOVERVIEWWIDGET_H
-#include <qt/transactiontablemodel.h>
-
#include <QListView>
#include <QSize>
-#include <QSizePolicy>
QT_BEGIN_NAMESPACE
class QShowEvent;
@@ -21,21 +18,11 @@ class TransactionOverviewWidget : public QListView
Q_OBJECT
public:
- explicit TransactionOverviewWidget(QWidget* parent = nullptr) : QListView(parent) {}
-
- QSize sizeHint() const override
- {
- return {sizeHintForColumn(TransactionTableModel::ToAddress), QListView::sizeHint().height()};
- }
+ explicit TransactionOverviewWidget(QWidget* parent = nullptr);
+ QSize sizeHint() const override;
protected:
- void showEvent(QShowEvent* event) override
- {
- Q_UNUSED(event);
- QSizePolicy sp = sizePolicy();
- sp.setHorizontalPolicy(QSizePolicy::Minimum);
- setSizePolicy(sp);
- }
+ void showEvent(QShowEvent* event) override;
};
#endif // BITCOIN_QT_TRANSACTIONOVERVIEWWIDGET_H
diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp
index 47f3ba7e7f..b7432a0d77 100644
--- a/src/qt/transactionview.cpp
+++ b/src/qt/transactionview.cpp
@@ -17,7 +17,7 @@
#include <qt/transactiontablemodel.h>
#include <qt/walletmodel.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <chrono>
#include <optional>
diff --git a/src/qt/walletframe.cpp b/src/qt/walletframe.cpp
index 11bea85b21..6b38e207d3 100644
--- a/src/qt/walletframe.cpp
+++ b/src/qt/walletframe.cpp
@@ -5,7 +5,7 @@
#include <qt/walletframe.h>
#include <fs.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <psbt.h>
#include <qt/guiutil.h>
#include <qt/overviewpage.h>
diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp
index 454c9a05d0..ab4d1cae3f 100644
--- a/src/qt/walletmodel.cpp
+++ b/src/qt/walletmodel.cpp
@@ -21,7 +21,7 @@
#include <interfaces/handler.h>
#include <interfaces/node.h>
#include <key_io.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <psbt.h>
#include <util/system.h> // for GetBoolArg
#include <util/translation.h>
diff --git a/src/qt/walletview.cpp b/src/qt/walletview.cpp
index 2f92c57607..10fc0fb6d0 100644
--- a/src/qt/walletview.cpp
+++ b/src/qt/walletview.cpp
@@ -19,7 +19,7 @@
#include <qt/walletmodel.h>
#include <interfaces/node.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <util/strencodings.h>
#include <QAction>
diff --git a/src/rest.cpp b/src/rest.cpp
index 1b90baaf95..43c248b03b 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -799,10 +799,10 @@ static bool rest_getutxos(const std::any& context, HTTPRequest* req, const std::
if (!maybe_chainman) return false;
ChainstateManager& chainman = *maybe_chainman;
{
- auto process_utxos = [&vOutPoints, &outs, &hits](const CCoinsView& view, const CTxMemPool& mempool) {
+ auto process_utxos = [&vOutPoints, &outs, &hits](const CCoinsView& view, const CTxMemPool* mempool) {
for (const COutPoint& vOutPoint : vOutPoints) {
Coin coin;
- bool hit = !mempool.isSpent(vOutPoint) && view.GetCoin(vOutPoint, coin);
+ bool hit = (!mempool || !mempool->isSpent(vOutPoint)) && view.GetCoin(vOutPoint, coin);
hits.push_back(hit);
if (hit) outs.emplace_back(std::move(coin));
}
@@ -815,10 +815,10 @@ static bool rest_getutxos(const std::any& context, HTTPRequest* req, const std::
LOCK2(cs_main, mempool->cs);
CCoinsViewCache& viewChain = chainman.ActiveChainstate().CoinsTip();
CCoinsViewMemPool viewMempool(&viewChain, *mempool);
- process_utxos(viewMempool, *mempool);
+ process_utxos(viewMempool, mempool);
} else {
- LOCK(cs_main); // no need to lock mempool!
- process_utxos(chainman.ActiveChainstate().CoinsTip(), CTxMemPool());
+ LOCK(cs_main);
+ process_utxos(chainman.ActiveChainstate().CoinsTip(), nullptr);
}
for (size_t i = 0; i < hits.size(); ++i) {
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index bf4bfbb95f..9766a237c7 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -2040,13 +2040,7 @@ static RPCHelpMan scantxoutset()
"[scanobjects,...]"},
},
{
- RPCResult{"When action=='abort'", RPCResult::Type::BOOL, "", ""},
- RPCResult{"When action=='status' and no scan is in progress", RPCResult::Type::NONE, "", ""},
- RPCResult{"When action=='status' and scan is in progress", RPCResult::Type::OBJ, "", "",
- {
- {RPCResult::Type::NUM, "progress", "The scan progress"},
- }},
- RPCResult{"When action=='start'", RPCResult::Type::OBJ, "", "", {
+ RPCResult{"when action=='start'; only returns after scan completes", RPCResult::Type::OBJ, "", "", {
{RPCResult::Type::BOOL, "success", "Whether the scan was completed"},
{RPCResult::Type::NUM, "txouts", "The number of unspent transaction outputs scanned"},
{RPCResult::Type::NUM, "height", "The current block height (index)"},
@@ -2065,6 +2059,12 @@ static RPCHelpMan scantxoutset()
}},
{RPCResult::Type::STR_AMOUNT, "total_amount", "The total amount of all found unspent outputs in " + CURRENCY_UNIT},
}},
+ RPCResult{"when action=='abort'", RPCResult::Type::BOOL, "success", "True if scan will be aborted (not necessarily before this RPC returns), or false if there is no scan to abort"},
+ RPCResult{"when action=='status' and a scan is currently in progress", RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "progress", "Approximate percent complete"},
+ }},
+ RPCResult{"when action=='status' and no scan is in progress - possibly already completed", RPCResult::Type::NONE, "", ""},
},
RPCExamples{
HelpExampleCli("scantxoutset", "start \'[\"" + EXAMPLE_DESCRIPTOR_RAW + "\"]\'") +
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 8fb6daf0cb..ea6db1e9a0 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -144,7 +144,7 @@ static UniValue generateBlocks(ChainstateManager& chainman, const CTxMemPool& me
{
UniValue blockHashes(UniValue::VARR);
while (nGenerate > 0 && !ShutdownRequested()) {
- std::unique_ptr<CBlockTemplate> pblocktemplate(BlockAssembler{chainman.ActiveChainstate(), mempool}.CreateNewBlock(coinbase_script));
+ std::unique_ptr<CBlockTemplate> pblocktemplate(BlockAssembler{chainman.ActiveChainstate(), &mempool}.CreateNewBlock(coinbase_script));
if (!pblocktemplate.get())
throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block");
CBlock *pblock = &pblocktemplate->block;
@@ -354,8 +354,7 @@ static RPCHelpMan generateblock()
{
LOCK(cs_main);
- CTxMemPool empty_mempool;
- std::unique_ptr<CBlockTemplate> blocktemplate(BlockAssembler{chainman.ActiveChainstate(), empty_mempool}.CreateNewBlock(coinbase_script));
+ std::unique_ptr<CBlockTemplate> blocktemplate(BlockAssembler{chainman.ActiveChainstate(), nullptr}.CreateNewBlock(coinbase_script));
if (!blocktemplate) {
throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block");
}
@@ -753,7 +752,7 @@ static RPCHelpMan getblocktemplate()
// Create new block
CScript scriptDummy = CScript() << OP_TRUE;
- pblocktemplate = BlockAssembler{active_chainstate, mempool}.CreateNewBlock(scriptDummy);
+ pblocktemplate = BlockAssembler{active_chainstate, &mempool}.CreateNewBlock(scriptDummy);
if (!pblocktemplate)
throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory");
diff --git a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 b/src/secp256k1/build-aux/m4/bitcoin_secp.m4
index dda770e001..9cb54de098 100644
--- a/src/secp256k1/build-aux/m4/bitcoin_secp.m4
+++ b/src/secp256k1/build-aux/m4/bitcoin_secp.m4
@@ -1,7 +1,7 @@
dnl escape "$0x" below using the m4 quadrigaph @S|@, and escape it again with a \ for the shell.
AC_DEFUN([SECP_64BIT_ASM_CHECK],[
AC_MSG_CHECKING(for x86_64 assembly availability)
-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+AC_LINK_IFELSE([AC_LANG_PROGRAM([[
#include <stdint.h>]],[[
uint64_t a = 11, tmp;
__asm__ __volatile__("movq \@S|@0x100000000,%1; mulq %%rsi" : "+a"(a) : "S"(tmp) : "cc", "%rdx");
diff --git a/src/secp256k1/include/secp256k1.h b/src/secp256k1/include/secp256k1.h
index 86ab7e556d..dddab346ae 100644
--- a/src/secp256k1/include/secp256k1.h
+++ b/src/secp256k1/include/secp256k1.h
@@ -141,9 +141,13 @@ typedef int (*secp256k1_nonce_function)(
# define SECP256K1_NO_BUILD
#endif
+/** At secp256k1 build-time DLL_EXPORT is defined when building objects destined
+ * for a shared library, but not for those intended for static libraries.
+ */
+
#ifndef SECP256K1_API
# if defined(_WIN32)
-# ifdef SECP256K1_BUILD
+# if defined(SECP256K1_BUILD) && defined(DLL_EXPORT)
# define SECP256K1_API __declspec(dllexport)
# else
# define SECP256K1_API
diff --git a/src/secp256k1/sage/prove_group_implementations.sage b/src/secp256k1/sage/prove_group_implementations.sage
index 96ce33506a..652bd87f11 100644
--- a/src/secp256k1/sage/prove_group_implementations.sage
+++ b/src/secp256k1/sage/prove_group_implementations.sage
@@ -40,29 +40,26 @@ def formula_secp256k1_gej_add_var(branch, a, b):
s2 = s2 * a.Z
h = -u1
h = h + u2
- i = -s1
- i = i + s2
+ i = -s2
+ i = i + s1
if branch == 2:
r = formula_secp256k1_gej_double_var(a)
return (constraints(), constraints(zero={h : 'h=0', i : 'i=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}), r)
if branch == 3:
return (constraints(), constraints(zero={h : 'h=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={i : 'i!=0'}), point_at_infinity())
- i2 = i^2
+ t = h * b.Z
+ rz = a.Z * t
h2 = h^2
+ h2 = -h2
h3 = h2 * h
- h = h * b.Z
- rz = a.Z * h
t = u1 * h2
- rx = t
- rx = rx * 2
+ rx = i^2
rx = rx + h3
- rx = -rx
- rx = rx + i2
- ry = -rx
- ry = ry + t
- ry = ry * i
+ rx = rx + t
+ rx = rx + t
+ t = t + rx
+ ry = t * i
h3 = h3 * s1
- h3 = -h3
ry = ry + h3
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz))
@@ -80,28 +77,25 @@ def formula_secp256k1_gej_add_ge_var(branch, a, b):
s2 = s2 * a.Z
h = -u1
h = h + u2
- i = -s1
- i = i + s2
+ i = -s2
+ i = i + s1
if (branch == 2):
r = formula_secp256k1_gej_double_var(a)
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r)
if (branch == 3):
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity())
- i2 = i^2
- h2 = h^2
- h3 = h * h2
rz = a.Z * h
+ h2 = h^2
+ h2 = -h2
+ h3 = h2 * h
t = u1 * h2
- rx = t
- rx = rx * 2
+ rx = i^2
rx = rx + h3
- rx = -rx
- rx = rx + i2
- ry = -rx
- ry = ry + t
- ry = ry * i
+ rx = rx + t
+ rx = rx + t
+ t = t + rx
+ ry = t * i
h3 = h3 * s1
- h3 = -h3
ry = ry + h3
return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz))
@@ -109,14 +103,15 @@ def formula_secp256k1_gej_add_zinv_var(branch, a, b):
"""libsecp256k1's secp256k1_gej_add_zinv_var"""
bzinv = b.Z^(-1)
if branch == 0:
- return (constraints(), constraints(nonzero={b.Infinity : 'b_infinite'}), a)
- if branch == 1:
+ rinf = b.Infinity
bzinv2 = bzinv^2
bzinv3 = bzinv2 * bzinv
rx = b.X * bzinv2
ry = b.Y * bzinv3
rz = 1
- return (constraints(), constraints(zero={b.Infinity : 'b_finite'}, nonzero={a.Infinity : 'a_infinite'}), jacobianpoint(rx, ry, rz))
+ return (constraints(), constraints(nonzero={a.Infinity : 'a_infinite'}), jacobianpoint(rx, ry, rz, rinf))
+ if branch == 1:
+ return (constraints(), constraints(zero={a.Infinity : 'a_finite'}, nonzero={b.Infinity : 'b_infinite'}), a)
azz = a.Z * bzinv
z12 = azz^2
u1 = a.X
@@ -126,29 +121,25 @@ def formula_secp256k1_gej_add_zinv_var(branch, a, b):
s2 = s2 * azz
h = -u1
h = h + u2
- i = -s1
- i = i + s2
+ i = -s2
+ i = i + s1
if branch == 2:
r = formula_secp256k1_gej_double_var(a)
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r)
if branch == 3:
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity())
- i2 = i^2
+ rz = a.Z * h
h2 = h^2
- h3 = h * h2
- rz = a.Z
- rz = rz * h
+ h2 = -h2
+ h3 = h2 * h
t = u1 * h2
- rx = t
- rx = rx * 2
+ rx = i^2
rx = rx + h3
- rx = -rx
- rx = rx + i2
- ry = -rx
- ry = ry + t
- ry = ry * i
+ rx = rx + t
+ rx = rx + t
+ t = t + rx
+ ry = t * i
h3 = h3 * s1
- h3 = -h3
ry = ry + h3
return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz))
diff --git a/src/secp256k1/src/bench_internal.c b/src/secp256k1/src/bench_internal.c
index 3c145f306c..7eb3af28d7 100644
--- a/src/secp256k1/src/bench_internal.c
+++ b/src/secp256k1/src/bench_internal.c
@@ -254,6 +254,15 @@ void bench_group_add_affine_var(void* arg, int iters) {
}
}
+void bench_group_add_zinv_var(void* arg, int iters) {
+ int i;
+ bench_inv *data = (bench_inv*)arg;
+
+ for (i = 0; i < iters; i++) {
+ secp256k1_gej_add_zinv_var(&data->gej[0], &data->gej[0], &data->ge[1], &data->gej[0].y);
+ }
+}
+
void bench_group_to_affine_var(void* arg, int iters) {
int i;
bench_inv *data = (bench_inv*)arg;
@@ -376,6 +385,7 @@ int main(int argc, char **argv) {
if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_var", bench_group_add_var, bench_setup, NULL, &data, 10, iters*10);
if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, iters*10);
if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, iters*10);
+ if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_zinv_var", bench_group_add_zinv_var, bench_setup, NULL, &data, 10, iters*10);
if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "to_affine")) run_benchmark("group_to_affine_var", bench_group_to_affine_var, bench_setup, NULL, &data, 10, iters);
if (d || have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, iters);
diff --git a/src/secp256k1/src/group_impl.h b/src/secp256k1/src/group_impl.h
index b19b02a01f..63735ab682 100644
--- a/src/secp256k1/src/group_impl.h
+++ b/src/secp256k1/src/group_impl.h
@@ -330,15 +330,14 @@ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, s
}
static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) {
- /* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */
- secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
+ /* 12 mul, 4 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */
+ secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, h2, h3, t;
if (a->infinity) {
VERIFY_CHECK(rzr == NULL);
*r = *b;
return;
}
-
if (b->infinity) {
if (rzr != NULL) {
secp256k1_fe_set_int(rzr, 1);
@@ -347,7 +346,6 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons
return;
}
- r->infinity = 0;
secp256k1_fe_sqr(&z22, &b->z);
secp256k1_fe_sqr(&z12, &a->z);
secp256k1_fe_mul(&u1, &a->x, &z22);
@@ -355,7 +353,7 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons
secp256k1_fe_mul(&s1, &a->y, &z22); secp256k1_fe_mul(&s1, &s1, &b->z);
secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z);
secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
- secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
+ secp256k1_fe_negate(&i, &s2, 1); secp256k1_fe_add(&i, &s1);
if (secp256k1_fe_normalizes_to_zero_var(&h)) {
if (secp256k1_fe_normalizes_to_zero_var(&i)) {
secp256k1_gej_double_var(r, a, rzr);
@@ -367,24 +365,33 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons
}
return;
}
- secp256k1_fe_sqr(&i2, &i);
- secp256k1_fe_sqr(&h2, &h);
- secp256k1_fe_mul(&h3, &h, &h2);
- secp256k1_fe_mul(&h, &h, &b->z);
+
+ r->infinity = 0;
+ secp256k1_fe_mul(&t, &h, &b->z);
if (rzr != NULL) {
- *rzr = h;
+ *rzr = t;
}
- secp256k1_fe_mul(&r->z, &a->z, &h);
+ secp256k1_fe_mul(&r->z, &a->z, &t);
+
+ secp256k1_fe_sqr(&h2, &h);
+ secp256k1_fe_negate(&h2, &h2, 1);
+ secp256k1_fe_mul(&h3, &h2, &h);
secp256k1_fe_mul(&t, &u1, &h2);
- r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
- secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
- secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
+
+ secp256k1_fe_sqr(&r->x, &i);
+ secp256k1_fe_add(&r->x, &h3);
+ secp256k1_fe_add(&r->x, &t);
+ secp256k1_fe_add(&r->x, &t);
+
+ secp256k1_fe_add(&t, &r->x);
+ secp256k1_fe_mul(&r->y, &t, &i);
+ secp256k1_fe_mul(&h3, &h3, &s1);
secp256k1_fe_add(&r->y, &h3);
}
static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr) {
- /* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
- secp256k1_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
+ /* 8 mul, 3 sqr, 13 add/negate/normalize_weak/normalizes_to_zero (ignoring special cases) */
+ secp256k1_fe z12, u1, u2, s1, s2, h, i, h2, h3, t;
if (a->infinity) {
VERIFY_CHECK(rzr == NULL);
secp256k1_gej_set_ge(r, b);
@@ -397,7 +404,6 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
*r = *a;
return;
}
- r->infinity = 0;
secp256k1_fe_sqr(&z12, &a->z);
u1 = a->x; secp256k1_fe_normalize_weak(&u1);
@@ -405,7 +411,7 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
s1 = a->y; secp256k1_fe_normalize_weak(&s1);
secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z);
secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
- secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
+ secp256k1_fe_negate(&i, &s2, 1); secp256k1_fe_add(&i, &s1);
if (secp256k1_fe_normalizes_to_zero_var(&h)) {
if (secp256k1_fe_normalizes_to_zero_var(&i)) {
secp256k1_gej_double_var(r, a, rzr);
@@ -417,28 +423,33 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
}
return;
}
- secp256k1_fe_sqr(&i2, &i);
- secp256k1_fe_sqr(&h2, &h);
- secp256k1_fe_mul(&h3, &h, &h2);
+
+ r->infinity = 0;
if (rzr != NULL) {
*rzr = h;
}
secp256k1_fe_mul(&r->z, &a->z, &h);
+
+ secp256k1_fe_sqr(&h2, &h);
+ secp256k1_fe_negate(&h2, &h2, 1);
+ secp256k1_fe_mul(&h3, &h2, &h);
secp256k1_fe_mul(&t, &u1, &h2);
- r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
- secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
- secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
+
+ secp256k1_fe_sqr(&r->x, &i);
+ secp256k1_fe_add(&r->x, &h3);
+ secp256k1_fe_add(&r->x, &t);
+ secp256k1_fe_add(&r->x, &t);
+
+ secp256k1_fe_add(&t, &r->x);
+ secp256k1_fe_mul(&r->y, &t, &i);
+ secp256k1_fe_mul(&h3, &h3, &s1);
secp256k1_fe_add(&r->y, &h3);
}
static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv) {
- /* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
- secp256k1_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
+ /* 9 mul, 3 sqr, 13 add/negate/normalize_weak/normalizes_to_zero (ignoring special cases) */
+ secp256k1_fe az, z12, u1, u2, s1, s2, h, i, h2, h3, t;
- if (b->infinity) {
- *r = *a;
- return;
- }
if (a->infinity) {
secp256k1_fe bzinv2, bzinv3;
r->infinity = b->infinity;
@@ -449,7 +460,10 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
secp256k1_fe_set_int(&r->z, 1);
return;
}
- r->infinity = 0;
+ if (b->infinity) {
+ *r = *a;
+ return;
+ }
/** We need to calculate (rx,ry,rz) = (ax,ay,az) + (bx,by,1/bzinv). Due to
* secp256k1's isomorphism we can multiply the Z coordinates on both sides
@@ -467,7 +481,7 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
s1 = a->y; secp256k1_fe_normalize_weak(&s1);
secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &az);
secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
- secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
+ secp256k1_fe_negate(&i, &s2, 1); secp256k1_fe_add(&i, &s1);
if (secp256k1_fe_normalizes_to_zero_var(&h)) {
if (secp256k1_fe_normalizes_to_zero_var(&i)) {
secp256k1_gej_double_var(r, a, NULL);
@@ -476,14 +490,23 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
}
return;
}
- secp256k1_fe_sqr(&i2, &i);
+
+ r->infinity = 0;
+ secp256k1_fe_mul(&r->z, &a->z, &h);
+
secp256k1_fe_sqr(&h2, &h);
- secp256k1_fe_mul(&h3, &h, &h2);
- r->z = a->z; secp256k1_fe_mul(&r->z, &r->z, &h);
+ secp256k1_fe_negate(&h2, &h2, 1);
+ secp256k1_fe_mul(&h3, &h2, &h);
secp256k1_fe_mul(&t, &u1, &h2);
- r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
- secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
- secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
+
+ secp256k1_fe_sqr(&r->x, &i);
+ secp256k1_fe_add(&r->x, &h3);
+ secp256k1_fe_add(&r->x, &t);
+ secp256k1_fe_add(&r->x, &t);
+
+ secp256k1_fe_add(&t, &r->x);
+ secp256k1_fe_mul(&r->y, &t, &i);
+ secp256k1_fe_mul(&h3, &h3, &s1);
secp256k1_fe_add(&r->y, &h3);
}
diff --git a/src/shutdown.cpp b/src/shutdown.cpp
index fdf726b5f1..1dbc55aeb5 100644
--- a/src/shutdown.cpp
+++ b/src/shutdown.cpp
@@ -10,7 +10,7 @@
#endif
#include <logging.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <util/tokenpipe.h>
#include <warnings.h>
diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp
index 6907749c6d..b7ef479675 100644
--- a/src/support/lockedpool.cpp
+++ b/src/support/lockedpool.cpp
@@ -202,7 +202,10 @@ void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len)
size_t Win32LockedPageAllocator::GetLimit()
{
- // TODO is there a limit on Windows, how to get it?
+ size_t min, max;
+ if(GetProcessWorkingSetSize(GetCurrentProcess(), &min, &max) != 0) {
+ return min;
+ }
return std::numeric_limits<size_t>::max();
}
#endif
diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp
index 875241094d..78b82b9b20 100644
--- a/src/test/blockencodings_tests.cpp
+++ b/src/test/blockencodings_tests.cpp
@@ -54,7 +54,7 @@ constexpr long SHARED_TX_OFFSET{3};
BOOST_AUTO_TEST_CASE(SimpleRoundTripTest)
{
- CTxMemPool pool;
+ CTxMemPool& pool = *Assert(m_node.mempool);
TestMemPoolEntryHelper entry;
CBlock block(BuildBlockTestCase());
@@ -137,7 +137,7 @@ public:
BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest)
{
- CTxMemPool pool;
+ CTxMemPool& pool = *Assert(m_node.mempool);
TestMemPoolEntryHelper entry;
CBlock block(BuildBlockTestCase());
@@ -207,7 +207,7 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest)
BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest)
{
- CTxMemPool pool;
+ CTxMemPool& pool = *Assert(m_node.mempool);
TestMemPoolEntryHelper entry;
CBlock block(BuildBlockTestCase());
@@ -258,7 +258,7 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest)
BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest)
{
- CTxMemPool pool;
+ CTxMemPool& pool = *Assert(m_node.mempool);
CMutableTransaction coinbase;
coinbase.vin.resize(1);
coinbase.vin[0].scriptSig.resize(10);
diff --git a/src/test/blockfilter_index_tests.cpp b/src/test/blockfilter_index_tests.cpp
index ba1eacfc78..c31e4e51f7 100644
--- a/src/test/blockfilter_index_tests.cpp
+++ b/src/test/blockfilter_index_tests.cpp
@@ -65,7 +65,7 @@ CBlock BuildChainTestingSetup::CreateBlock(const CBlockIndex* prev,
const std::vector<CMutableTransaction>& txns,
const CScript& scriptPubKey)
{
- std::unique_ptr<CBlockTemplate> pblocktemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), *m_node.mempool}.CreateNewBlock(scriptPubKey);
+ std::unique_ptr<CBlockTemplate> pblocktemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), m_node.mempool.get()}.CreateNewBlock(scriptPubKey);
CBlock& block = pblocktemplate->block;
block.hashPrevBlock = prev->GetBlockHash();
block.nTime = prev->nTime + 1;
diff --git a/src/test/fuzz/tx_pool.cpp b/src/test/fuzz/tx_pool.cpp
index 771d7a11cb..4f40608c4f 100644
--- a/src/test/fuzz/tx_pool.cpp
+++ b/src/test/fuzz/tx_pool.cpp
@@ -97,7 +97,7 @@ void Finish(FuzzedDataProvider& fuzzed_data_provider, MockedTxPool& tx_pool, CCh
BlockAssembler::Options options;
options.nBlockMaxWeight = fuzzed_data_provider.ConsumeIntegralInRange(0U, MAX_BLOCK_WEIGHT);
options.blockMinFeeRate = CFeeRate{ConsumeMoney(fuzzed_data_provider, /*max=*/COIN)};
- auto assembler = BlockAssembler{chainstate, *static_cast<CTxMemPool*>(&tx_pool), options};
+ auto assembler = BlockAssembler{chainstate, &tx_pool, options};
auto block_template = assembler.CreateNewBlock(CScript{} << OP_TRUE);
Assert(block_template->block.vtx.size() >= 1);
}
diff --git a/src/test/fuzz/util.cpp b/src/test/fuzz/util.cpp
index 033c6e18d5..883698aff1 100644
--- a/src/test/fuzz/util.cpp
+++ b/src/test/fuzz/util.cpp
@@ -223,6 +223,15 @@ bool FuzzedSock::Wait(std::chrono::milliseconds timeout, Event requested, Event*
return true;
}
+bool FuzzedSock::WaitMany(std::chrono::milliseconds timeout, EventsPerSock& events_per_sock) const
+{
+ for (auto& [sock, events] : events_per_sock) {
+ (void)sock;
+ events.occurred = m_fuzzed_data_provider.ConsumeBool() ? events.requested : 0;
+ }
+ return true;
+}
+
bool FuzzedSock::IsConnected(std::string& errmsg) const
{
if (m_fuzzed_data_provider.ConsumeBool()) {
diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h
index 3fc6fa1cd5..66d00b1767 100644
--- a/src/test/fuzz/util.h
+++ b/src/test/fuzz/util.h
@@ -71,6 +71,8 @@ public:
bool Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred = nullptr) const override;
+ bool WaitMany(std::chrono::milliseconds timeout, EventsPerSock& events_per_sock) const override;
+
bool IsConnected(std::string& errmsg) const override;
};
diff --git a/src/test/logging_tests.cpp b/src/test/logging_tests.cpp
index 3f6a605945..5a5e3b3f1f 100644
--- a/src/test/logging_tests.cpp
+++ b/src/test/logging_tests.cpp
@@ -103,6 +103,7 @@ BOOST_FIXTURE_TEST_CASE(logging_LogPrintMacros, LogSetup)
LogPrintLevel(BCLog::NET, BCLog::Level::Info, "foo8: %s\n", "bar8");
LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "foo9: %s\n", "bar9");
LogPrintLevel(BCLog::NET, BCLog::Level::Error, "foo10: %s\n", "bar10");
+ LogPrintfCategory(BCLog::VALIDATION, "foo11: %s\n", "bar11");
std::ifstream file{tmp_log_path};
std::vector<std::string> log_lines;
for (std::string log; std::getline(file, log);) {
@@ -114,7 +115,9 @@ BOOST_FIXTURE_TEST_CASE(logging_LogPrintMacros, LogSetup)
"[net:debug] foo7: bar7",
"[net:info] foo8: bar8",
"[net:warning] foo9: bar9",
- "[net:error] foo10: bar10"};
+ "[net:error] foo10: bar10",
+ "[validation] foo11: bar11",
+ };
BOOST_CHECK_EQUAL_COLLECTIONS(log_lines.begin(), log_lines.end(), expected.begin(), expected.end());
}
diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp
index 89424a0cd2..bc63122025 100644
--- a/src/test/mempool_tests.cpp
+++ b/src/test/mempool_tests.cpp
@@ -56,7 +56,7 @@ BOOST_AUTO_TEST_CASE(MempoolRemoveTest)
}
- CTxMemPool testPool;
+ CTxMemPool& testPool = *Assert(m_node.mempool);
LOCK2(cs_main, testPool.cs);
// Nothing in pool, remove should do nothing:
@@ -108,12 +108,12 @@ BOOST_AUTO_TEST_CASE(MempoolRemoveTest)
BOOST_CHECK_EQUAL(testPool.size(), 0U);
}
-template<typename name>
-static void CheckSort(CTxMemPool &pool, std::vector<std::string> &sortedOrder) EXCLUSIVE_LOCKS_REQUIRED(pool.cs)
+template <typename name>
+static void CheckSort(CTxMemPool& pool, std::vector<std::string>& sortedOrder) EXCLUSIVE_LOCKS_REQUIRED(pool.cs)
{
BOOST_CHECK_EQUAL(pool.size(), sortedOrder.size());
typename CTxMemPool::indexed_transaction_set::index<name>::type::iterator it = pool.mapTx.get<name>().begin();
- int count=0;
+ int count = 0;
for (; it != pool.mapTx.get<name>().end(); ++it, ++count) {
BOOST_CHECK_EQUAL(it->GetTx().GetHash().ToString(), sortedOrder[count]);
}
@@ -121,7 +121,7 @@ static void CheckSort(CTxMemPool &pool, std::vector<std::string> &sortedOrder) E
BOOST_AUTO_TEST_CASE(MempoolIndexingTest)
{
- CTxMemPool pool;
+ CTxMemPool& pool = *Assert(m_node.mempool);
LOCK2(cs_main, pool.cs);
TestMemPoolEntryHelper entry;
@@ -294,7 +294,7 @@ BOOST_AUTO_TEST_CASE(MempoolIndexingTest)
BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest)
{
- CTxMemPool pool;
+ CTxMemPool& pool = *Assert(m_node.mempool);
LOCK2(cs_main, pool.cs);
TestMemPoolEntryHelper entry;
@@ -423,7 +423,7 @@ BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest)
BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
{
- CTxMemPool pool;
+ CTxMemPool& pool = *Assert(m_node.mempool);
LOCK2(cs_main, pool.cs);
TestMemPoolEntryHelper entry;
@@ -594,7 +594,7 @@ BOOST_AUTO_TEST_CASE(MempoolAncestryTests)
{
size_t ancestors, descendants;
- CTxMemPool pool;
+ CTxMemPool& pool = *Assert(m_node.mempool);
LOCK2(cs_main, pool.cs);
TestMemPoolEntryHelper entry;
@@ -753,7 +753,7 @@ BOOST_AUTO_TEST_CASE(MempoolAncestryTestsDiamond)
{
size_t ancestors, descendants;
- CTxMemPool pool;
+ CTxMemPool& pool = *Assert(m_node.mempool);
LOCK2(::cs_main, pool.cs);
TestMemPoolEntryHelper entry;
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index 439ad174b3..eca4fbf15c 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -52,7 +52,7 @@ BlockAssembler MinerTestingSetup::AssemblerForTest(const CChainParams& params)
options.nBlockMaxWeight = MAX_BLOCK_WEIGHT;
options.blockMinFeeRate = blockMinFeeRate;
- return BlockAssembler{m_node.chainman->ActiveChainstate(), *m_node.mempool, options};
+ return BlockAssembler{m_node.chainman->ActiveChainstate(), m_node.mempool.get(), options};
}
constexpr static struct {
diff --git a/src/test/policyestimator_tests.cpp b/src/test/policyestimator_tests.cpp
index 06877898a4..3f66a8fc46 100644
--- a/src/test/policyestimator_tests.cpp
+++ b/src/test/policyestimator_tests.cpp
@@ -12,12 +12,12 @@
#include <boost/test/unit_test.hpp>
-BOOST_FIXTURE_TEST_SUITE(policyestimator_tests, BasicTestingSetup)
+BOOST_FIXTURE_TEST_SUITE(policyestimator_tests, ChainTestingSetup)
BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
{
- CBlockPolicyEstimator feeEst;
- CTxMemPool mpool(&feeEst);
+ CBlockPolicyEstimator& feeEst = *Assert(m_node.fee_estimator);
+ CTxMemPool& mpool = *Assert(m_node.mempool);
LOCK2(cs_main, mpool.cs);
TestMemPoolEntryHelper entry;
CAmount basefee(2000);
diff --git a/src/test/util/mining.cpp b/src/test/util/mining.cpp
index a6d624fe84..88cf9647e7 100644
--- a/src/test/util/mining.cpp
+++ b/src/test/util/mining.cpp
@@ -77,7 +77,7 @@ CTxIn MineBlock(const NodeContext& node, const CScript& coinbase_scriptPubKey)
std::shared_ptr<CBlock> PrepareBlock(const NodeContext& node, const CScript& coinbase_scriptPubKey)
{
auto block = std::make_shared<CBlock>(
- BlockAssembler{Assert(node.chainman)->ActiveChainstate(), *Assert(node.mempool)}
+ BlockAssembler{Assert(node.chainman)->ActiveChainstate(), Assert(node.mempool.get())}
.CreateNewBlock(coinbase_scriptPubKey)
->block);
diff --git a/src/test/util/net.h b/src/test/util/net.h
index e980fe4967..37d278645a 100644
--- a/src/test/util/net.h
+++ b/src/test/util/net.h
@@ -162,6 +162,15 @@ public:
return true;
}
+ bool WaitMany(std::chrono::milliseconds timeout, EventsPerSock& events_per_sock) const override
+ {
+ for (auto& [sock, events] : events_per_sock) {
+ (void)sock;
+ events.occurred = events.requested;
+ }
+ return true;
+ }
+
private:
const std::string m_contents;
mutable size_t m_consumed;
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 8f03481f72..d9fff85bf5 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -277,8 +277,7 @@ CBlock TestChain100Setup::CreateBlock(
const CScript& scriptPubKey,
CChainState& chainstate)
{
- CTxMemPool empty_pool;
- CBlock block = BlockAssembler{chainstate, empty_pool}.CreateNewBlock(scriptPubKey)->block;
+ CBlock block = BlockAssembler{chainstate, nullptr}.CreateNewBlock(scriptPubKey)->block;
Assert(block.vtx.size() == 1);
for (const CMutableTransaction& tx : txns) {
diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp
index 331da691b5..7ade4d8195 100644
--- a/src/test/validation_block_tests.cpp
+++ b/src/test/validation_block_tests.cpp
@@ -65,7 +65,7 @@ std::shared_ptr<CBlock> MinerTestingSetup::Block(const uint256& prev_hash)
static int i = 0;
static uint64_t time = Params().GenesisBlock().nTime;
- auto ptemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), *m_node.mempool}.CreateNewBlock(CScript{} << i++ << OP_TRUE);
+ auto ptemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), m_node.mempool.get()}.CreateNewBlock(CScript{} << i++ << OP_TRUE);
auto pblock = std::make_shared<CBlock>(ptemplate->block);
pblock->hashPrevBlock = prev_hash;
pblock->nTime = ++time;
@@ -327,7 +327,7 @@ BOOST_AUTO_TEST_CASE(witness_commitment_index)
{
CScript pubKey;
pubKey << 1 << OP_TRUE;
- auto ptemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), *m_node.mempool}.CreateNewBlock(pubKey);
+ auto ptemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), m_node.mempool.get()}.CreateNewBlock(pubKey);
CBlock pblock = ptemplate->block;
CTxOut witness;
diff --git a/src/test/validation_chainstate_tests.cpp b/src/test/validation_chainstate_tests.cpp
index 98cb713a81..102de74389 100644
--- a/src/test/validation_chainstate_tests.cpp
+++ b/src/test/validation_chainstate_tests.cpp
@@ -30,7 +30,7 @@ BOOST_AUTO_TEST_CASE(validation_chainstate_resize_caches)
ChainstateManager manager{chainman_opts};
WITH_LOCK(::cs_main, manager.m_blockman.m_block_tree_db = std::make_unique<CBlockTreeDB>(1 << 20, true));
- CTxMemPool mempool;
+ CTxMemPool& mempool = *Assert(m_node.mempool);
//! Create and add a Coin with DynamicMemoryUsage of 80 bytes to the given view.
auto add_coin = [](CCoinsViewCache& coins_view) -> COutPoint {
diff --git a/src/test/validation_flush_tests.cpp b/src/test/validation_flush_tests.cpp
index a34895d4ae..012169b17e 100644
--- a/src/test/validation_flush_tests.cpp
+++ b/src/test/validation_flush_tests.cpp
@@ -20,7 +20,7 @@ BOOST_FIXTURE_TEST_SUITE(validation_flush_tests, ChainTestingSetup)
//!
BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
{
- CTxMemPool mempool;
+ CTxMemPool& mempool = *Assert(m_node.mempool);
BlockManager blockman{};
CChainState chainstate{&mempool, blockman, *Assert(m_node.chainman)};
chainstate.InitCoinsDB(/*cache_size_bytes=*/1 << 10, /*in_memory=*/true, /*should_wipe=*/false);
diff --git a/src/timedata.cpp b/src/timedata.cpp
index d0ca609a48..ceee08e68c 100644
--- a/src/timedata.cpp
+++ b/src/timedata.cpp
@@ -9,7 +9,7 @@
#include <timedata.h>
#include <netaddress.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <sync.h>
#include <tinyformat.h>
#include <util/system.h>
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index 82b8529d76..d6e792a55f 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -421,7 +421,7 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe
return;
}
service = LookupNumeric(std::string(service_id+".onion"), Params().GetDefaultPort());
- LogPrintf("tor: Got service ID %s, advertising service %s\n", service_id, service.ToString());
+ LogPrintfCategory(BCLog::TOR, "Got service ID %s, advertising service %s\n", service_id, service.ToString());
if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) {
LogPrint(BCLog::TOR, "Cached service private key to %s\n", fs::PathToString(GetPrivateKeyFile()));
} else {
diff --git a/src/univalue/.cirrus.yml b/src/univalue/.cirrus.yml
deleted file mode 100644
index f140fee12b..0000000000
--- a/src/univalue/.cirrus.yml
+++ /dev/null
@@ -1,44 +0,0 @@
-env:
- MAKEJOBS: "-j4"
- RUN_TESTS: "true"
- BASE_OUTDIR: "$CIRRUS_WORKING_DIR/out_dir_base"
- DEBIAN_FRONTEND: "noninteractive"
-
-task:
- container:
- image: ubuntu:focal
- cpu: 1
- memory: 1G
- greedy: true # https://medium.com/cirruslabs/introducing-greedy-container-instances-29aad06dc2b4
-
- matrix:
- - name: "gcc"
- env:
- CC: "gcc"
- CXX: "g++"
- APT_PKGS: "gcc"
- - name: "clang"
- env:
- CC: "clang"
- CXX: "clang++"
- APT_PKGS: "clang"
- - name: "mingw"
- env:
- CC: ""
- CXX: ""
- UNIVALUE_CONFIG: "--host=x86_64-w64-mingw32"
- APT_PKGS: "g++-mingw-w64-x86-64 gcc-mingw-w64-x86-64 binutils-mingw-w64-x86-64"
- RUN_TESTS: "false"
-
- install_script:
- - apt update
- - apt install -y pkg-config build-essential libtool autotools-dev automake bsdmainutils
- - apt install -y $APT_PKGS
- autogen_script:
- - ./autogen.sh
- configure_script:
- - ./configure --cache-file=config.cache --bindir=$BASE_OUTDIR/bin --libdir=$BASE_OUTDIR/lib $UNIVALUE_CONFIG
- make_script:
- - make $MAKEJOBS V=1
- test_script:
- - if [ "$RUN_TESTS" = "true" ]; then make $MAKEJOBS distcheck; fi
diff --git a/src/univalue/COPYING b/src/univalue/COPYING
deleted file mode 100644
index 1fb429f356..0000000000
--- a/src/univalue/COPYING
+++ /dev/null
@@ -1,19 +0,0 @@
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
diff --git a/src/univalue/Makefile.am b/src/univalue/Makefile.am
deleted file mode 100644
index 476f14b922..0000000000
--- a/src/univalue/Makefile.am
+++ /dev/null
@@ -1,58 +0,0 @@
-include sources.mk
-ACLOCAL_AMFLAGS = -I build-aux/m4
-.PHONY: gen FORCE
-.INTERMEDIATE: $(GENBIN)
-
-include_HEADERS = $(UNIVALUE_DIST_HEADERS_INT)
-noinst_HEADERS = $(UNIVALUE_LIB_HEADERS_INT)
-
-lib_LTLIBRARIES = libunivalue.la
-
-pkgconfigdir = $(libdir)/pkgconfig
-pkgconfig_DATA = pc/libunivalue.pc
-
-libunivalue_la_SOURCES = $(UNIVALUE_LIB_SOURCES_INT)
-
-libunivalue_la_LDFLAGS = \
- -version-info $(LIBUNIVALUE_CURRENT):$(LIBUNIVALUE_REVISION):$(LIBUNIVALUE_AGE) \
- -no-undefined
-libunivalue_la_CXXFLAGS = -I$(top_srcdir)/include
-
-TESTS = test/object test/unitester test/no_nul
-
-GENBIN = gen/gen$(BUILD_EXEEXT)
-GEN_SRCS = gen/gen.cpp
-
-$(GENBIN): $(GEN_SRCS)
- @echo Building $@
- $(AM_V_at)c++ -I$(top_srcdir)/include -o $@ $<
-
-gen: $(GENBIN) FORCE
- @echo Updating lib/univalue_escapes.h
- $(AM_V_at)$(GENBIN) > lib/univalue_escapes.h
-
-noinst_PROGRAMS = $(TESTS) test/test_json
-
-test_unitester_SOURCES = $(UNIVALUE_TEST_UNITESTER_INT)
-test_unitester_LDADD = libunivalue.la
-test_unitester_CXXFLAGS = -I$(top_srcdir)/include -DJSON_TEST_SRC=\"$(srcdir)/$(UNIVALUE_TEST_DATA_DIR_INT)\"
-test_unitester_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS)
-
-test_test_json_SOURCES = $(UNIVALUE_TEST_JSON_INT)
-test_test_json_LDADD = libunivalue.la
-test_test_json_CXXFLAGS = -I$(top_srcdir)/include
-test_test_json_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS)
-
-test_no_nul_SOURCES = $(UNIVALUE_TEST_NO_NUL_INT)
-test_no_nul_LDADD = libunivalue.la
-test_no_nul_CXXFLAGS = -I$(top_srcdir)/include
-test_no_nul_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS)
-
-test_object_SOURCES = $(UNIVALUE_TEST_OBJECT_INT)
-test_object_LDADD = libunivalue.la
-test_object_CXXFLAGS = -I$(top_srcdir)/include
-test_object_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS)
-
-TEST_FILES = $(UNIVALUE_TEST_FILES_INT)
-
-EXTRA_DIST=$(UNIVALUE_TEST_FILES_INT) $(GEN_SRCS)
diff --git a/src/univalue/README.md b/src/univalue/README.md
deleted file mode 100644
index d622f5b1e0..0000000000
--- a/src/univalue/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# UniValue
-
-## Summary
-
-A universal value class, with JSON encoding and decoding.
-
-UniValue is an abstract data type that may be a null, boolean, string,
-number, array container, or a key/value dictionary container, nested to
-an arbitrary depth.
-
-This class is aligned with the JSON standard, [RFC
-7159](https://tools.ietf.org/html/rfc7159.html).
-
-## Library usage
-
-This is a fork of univalue used by Bitcoin Core. It is not maintained for usage
-by other projects. Notably, the API is broken in non-backward-compatible ways.
-
-Other projects looking for a maintained library should use the upstream
-univalue at https://github.com/jgarzik/univalue.
diff --git a/src/univalue/autogen.sh b/src/univalue/autogen.sh
deleted file mode 100755
index 4b38721faa..0000000000
--- a/src/univalue/autogen.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/sh
-set -e
-srcdir="$(dirname $0)"
-cd "$srcdir"
-if [ -z ${LIBTOOLIZE} ] && GLIBTOOLIZE="`which glibtoolize 2>/dev/null`"; then
- LIBTOOLIZE="${GLIBTOOLIZE}"
- export LIBTOOLIZE
-fi
-autoreconf --install --force
diff --git a/src/univalue/build-aux/m4/.gitignore b/src/univalue/build-aux/m4/.gitignore
deleted file mode 100644
index f063686524..0000000000
--- a/src/univalue/build-aux/m4/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-/*.m4
diff --git a/src/univalue/build-aux/m4/ax_cxx_compile_stdcxx.m4 b/src/univalue/build-aux/m4/ax_cxx_compile_stdcxx.m4
deleted file mode 100644
index f7e5137003..0000000000
--- a/src/univalue/build-aux/m4/ax_cxx_compile_stdcxx.m4
+++ /dev/null
@@ -1,962 +0,0 @@
-# ===========================================================================
-# https://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html
-# ===========================================================================
-#
-# SYNOPSIS
-#
-# AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional])
-#
-# DESCRIPTION
-#
-# Check for baseline language coverage in the compiler for the specified
-# version of the C++ standard. If necessary, add switches to CXX and
-# CXXCPP to enable support. VERSION may be '11' (for the C++11 standard)
-# or '14' (for the C++14 standard).
-#
-# The second argument, if specified, indicates whether you insist on an
-# extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g.
-# -std=c++11). If neither is specified, you get whatever works, with
-# preference for no added switch, and then for an extended mode.
-#
-# The third argument, if specified 'mandatory' or if left unspecified,
-# indicates that baseline support for the specified C++ standard is
-# required and that the macro should error out if no mode with that
-# support is found. If specified 'optional', then configuration proceeds
-# regardless, after defining HAVE_CXX${VERSION} if and only if a
-# supporting mode is found.
-#
-# LICENSE
-#
-# Copyright (c) 2008 Benjamin Kosnik <bkoz@redhat.com>
-# Copyright (c) 2012 Zack Weinberg <zackw@panix.com>
-# Copyright (c) 2013 Roy Stogner <roystgnr@ices.utexas.edu>
-# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <sokolov@google.com>
-# Copyright (c) 2015 Paul Norman <penorman@mac.com>
-# Copyright (c) 2015 Moritz Klammler <moritz@klammler.eu>
-# Copyright (c) 2016, 2018 Krzesimir Nowak <qdlacz@gmail.com>
-# Copyright (c) 2019 Enji Cooper <yaneurabeya@gmail.com>
-# Copyright (c) 2020 Jason Merrill <jason@redhat.com>
-#
-# Copying and distribution of this file, with or without modification, are
-# permitted in any medium without royalty provided the copyright notice
-# and this notice are preserved. This file is offered as-is, without any
-# warranty.
-
-#serial 12
-
-dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro
-dnl (serial version number 13).
-
-AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
- m4_if([$1], [11], [ax_cxx_compile_alternatives="11 0x"],
- [$1], [14], [ax_cxx_compile_alternatives="14 1y"],
- [$1], [17], [ax_cxx_compile_alternatives="17 1z"],
- [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl
- m4_if([$2], [], [],
- [$2], [ext], [],
- [$2], [noext], [],
- [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl
- m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true],
- [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true],
- [$3], [optional], [ax_cxx_compile_cxx$1_required=false],
- [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])])
- AC_LANG_PUSH([C++])dnl
- ac_success=no
-
- m4_if([$2], [], [dnl
- AC_CACHE_CHECK(whether $CXX supports C++$1 features by default,
- ax_cv_cxx_compile_cxx$1,
- [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
- [ax_cv_cxx_compile_cxx$1=yes],
- [ax_cv_cxx_compile_cxx$1=no])])
- if test x$ax_cv_cxx_compile_cxx$1 = xyes; then
- ac_success=yes
- fi])
-
- m4_if([$2], [noext], [], [dnl
- if test x$ac_success = xno; then
- for alternative in ${ax_cxx_compile_alternatives}; do
- switch="-std=gnu++${alternative}"
- cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
- AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
- $cachevar,
- [ac_save_CXX="$CXX"
- CXX="$CXX $switch"
- AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
- [eval $cachevar=yes],
- [eval $cachevar=no])
- CXX="$ac_save_CXX"])
- if eval test x\$$cachevar = xyes; then
- CXX="$CXX $switch"
- if test -n "$CXXCPP" ; then
- CXXCPP="$CXXCPP $switch"
- fi
- ac_success=yes
- break
- fi
- done
- fi])
-
- m4_if([$2], [ext], [], [dnl
- if test x$ac_success = xno; then
- dnl HP's aCC needs +std=c++11 according to:
- dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf
- dnl Cray's crayCC needs "-h std=c++11"
- for alternative in ${ax_cxx_compile_alternatives}; do
- for switch in -std=c++${alternative} +std=c++${alternative} "-h std=c++${alternative}"; do
- cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
- AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
- $cachevar,
- [ac_save_CXX="$CXX"
- CXX="$CXX $switch"
- AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
- [eval $cachevar=yes],
- [eval $cachevar=no])
- CXX="$ac_save_CXX"])
- if eval test x\$$cachevar = xyes; then
- CXX="$CXX $switch"
- if test -n "$CXXCPP" ; then
- CXXCPP="$CXXCPP $switch"
- fi
- ac_success=yes
- break
- fi
- done
- if test x$ac_success = xyes; then
- break
- fi
- done
- fi])
- AC_LANG_POP([C++])
- if test x$ax_cxx_compile_cxx$1_required = xtrue; then
- if test x$ac_success = xno; then
- AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.])
- fi
- fi
- if test x$ac_success = xno; then
- HAVE_CXX$1=0
- AC_MSG_NOTICE([No compiler with C++$1 support was found])
- else
- HAVE_CXX$1=1
- AC_DEFINE(HAVE_CXX$1,1,
- [define if the compiler supports basic C++$1 syntax])
- fi
- AC_SUBST(HAVE_CXX$1)
-])
-
-
-dnl Test body for checking C++11 support
-
-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11],
- _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
-)
-
-
-dnl Test body for checking C++14 support
-
-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14],
- _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
- _AX_CXX_COMPILE_STDCXX_testbody_new_in_14
-)
-
-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_17],
- _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
- _AX_CXX_COMPILE_STDCXX_testbody_new_in_14
- _AX_CXX_COMPILE_STDCXX_testbody_new_in_17
-)
-
-dnl Tests for new features in C++11
-
-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[
-
-// If the compiler admits that it is not ready for C++11, why torture it?
-// Hopefully, this will speed up the test.
-
-#ifndef __cplusplus
-
-#error "This is not a C++ compiler"
-
-#elif __cplusplus < 201103L
-
-#error "This is not a C++11 compiler"
-
-#else
-
-namespace cxx11
-{
-
- namespace test_static_assert
- {
-
- template <typename T>
- struct check
- {
- static_assert(sizeof(int) <= sizeof(T), "not big enough");
- };
-
- }
-
- namespace test_final_override
- {
-
- struct Base
- {
- virtual ~Base() {}
- virtual void f() {}
- };
-
- struct Derived : public Base
- {
- virtual ~Derived() override {}
- virtual void f() override {}
- };
-
- }
-
- namespace test_double_right_angle_brackets
- {
-
- template < typename T >
- struct check {};
-
- typedef check<void> single_type;
- typedef check<check<void>> double_type;
- typedef check<check<check<void>>> triple_type;
- typedef check<check<check<check<void>>>> quadruple_type;
-
- }
-
- namespace test_decltype
- {
-
- int
- f()
- {
- int a = 1;
- decltype(a) b = 2;
- return a + b;
- }
-
- }
-
- namespace test_type_deduction
- {
-
- template < typename T1, typename T2 >
- struct is_same
- {
- static const bool value = false;
- };
-
- template < typename T >
- struct is_same<T, T>
- {
- static const bool value = true;
- };
-
- template < typename T1, typename T2 >
- auto
- add(T1 a1, T2 a2) -> decltype(a1 + a2)
- {
- return a1 + a2;
- }
-
- int
- test(const int c, volatile int v)
- {
- static_assert(is_same<int, decltype(0)>::value == true, "");
- static_assert(is_same<int, decltype(c)>::value == false, "");
- static_assert(is_same<int, decltype(v)>::value == false, "");
- auto ac = c;
- auto av = v;
- auto sumi = ac + av + 'x';
- auto sumf = ac + av + 1.0;
- static_assert(is_same<int, decltype(ac)>::value == true, "");
- static_assert(is_same<int, decltype(av)>::value == true, "");
- static_assert(is_same<int, decltype(sumi)>::value == true, "");
- static_assert(is_same<int, decltype(sumf)>::value == false, "");
- static_assert(is_same<int, decltype(add(c, v))>::value == true, "");
- return (sumf > 0.0) ? sumi : add(c, v);
- }
-
- }
-
- namespace test_noexcept
- {
-
- int f() { return 0; }
- int g() noexcept { return 0; }
-
- static_assert(noexcept(f()) == false, "");
- static_assert(noexcept(g()) == true, "");
-
- }
-
- namespace test_constexpr
- {
-
- template < typename CharT >
- unsigned long constexpr
- strlen_c_r(const CharT *const s, const unsigned long acc) noexcept
- {
- return *s ? strlen_c_r(s + 1, acc + 1) : acc;
- }
-
- template < typename CharT >
- unsigned long constexpr
- strlen_c(const CharT *const s) noexcept
- {
- return strlen_c_r(s, 0UL);
- }
-
- static_assert(strlen_c("") == 0UL, "");
- static_assert(strlen_c("1") == 1UL, "");
- static_assert(strlen_c("example") == 7UL, "");
- static_assert(strlen_c("another\0example") == 7UL, "");
-
- }
-
- namespace test_rvalue_references
- {
-
- template < int N >
- struct answer
- {
- static constexpr int value = N;
- };
-
- answer<1> f(int&) { return answer<1>(); }
- answer<2> f(const int&) { return answer<2>(); }
- answer<3> f(int&&) { return answer<3>(); }
-
- void
- test()
- {
- int i = 0;
- const int c = 0;
- static_assert(decltype(f(i))::value == 1, "");
- static_assert(decltype(f(c))::value == 2, "");
- static_assert(decltype(f(0))::value == 3, "");
- }
-
- }
-
- namespace test_uniform_initialization
- {
-
- struct test
- {
- static const int zero {};
- static const int one {1};
- };
-
- static_assert(test::zero == 0, "");
- static_assert(test::one == 1, "");
-
- }
-
- namespace test_lambdas
- {
-
- void
- test1()
- {
- auto lambda1 = [](){};
- auto lambda2 = lambda1;
- lambda1();
- lambda2();
- }
-
- int
- test2()
- {
- auto a = [](int i, int j){ return i + j; }(1, 2);
- auto b = []() -> int { return '0'; }();
- auto c = [=](){ return a + b; }();
- auto d = [&](){ return c; }();
- auto e = [a, &b](int x) mutable {
- const auto identity = [](int y){ return y; };
- for (auto i = 0; i < a; ++i)
- a += b--;
- return x + identity(a + b);
- }(0);
- return a + b + c + d + e;
- }
-
- int
- test3()
- {
- const auto nullary = [](){ return 0; };
- const auto unary = [](int x){ return x; };
- using nullary_t = decltype(nullary);
- using unary_t = decltype(unary);
- const auto higher1st = [](nullary_t f){ return f(); };
- const auto higher2nd = [unary](nullary_t f1){
- return [unary, f1](unary_t f2){ return f2(unary(f1())); };
- };
- return higher1st(nullary) + higher2nd(nullary)(unary);
- }
-
- }
-
- namespace test_variadic_templates
- {
-
- template <int...>
- struct sum;
-
- template <int N0, int... N1toN>
- struct sum<N0, N1toN...>
- {
- static constexpr auto value = N0 + sum<N1toN...>::value;
- };
-
- template <>
- struct sum<>
- {
- static constexpr auto value = 0;
- };
-
- static_assert(sum<>::value == 0, "");
- static_assert(sum<1>::value == 1, "");
- static_assert(sum<23>::value == 23, "");
- static_assert(sum<1, 2>::value == 3, "");
- static_assert(sum<5, 5, 11>::value == 21, "");
- static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, "");
-
- }
-
- // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae
- // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function
- // because of this.
- namespace test_template_alias_sfinae
- {
-
- struct foo {};
-
- template<typename T>
- using member = typename T::member_type;
-
- template<typename T>
- void func(...) {}
-
- template<typename T>
- void func(member<T>*) {}
-
- void test();
-
- void test() { func<foo>(0); }
-
- }
-
-} // namespace cxx11
-
-#endif // __cplusplus >= 201103L
-
-]])
-
-
-dnl Tests for new features in C++14
-
-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[
-
-// If the compiler admits that it is not ready for C++14, why torture it?
-// Hopefully, this will speed up the test.
-
-#ifndef __cplusplus
-
-#error "This is not a C++ compiler"
-
-#elif __cplusplus < 201402L
-
-#error "This is not a C++14 compiler"
-
-#else
-
-namespace cxx14
-{
-
- namespace test_polymorphic_lambdas
- {
-
- int
- test()
- {
- const auto lambda = [](auto&&... args){
- const auto istiny = [](auto x){
- return (sizeof(x) == 1UL) ? 1 : 0;
- };
- const int aretiny[] = { istiny(args)... };
- return aretiny[0];
- };
- return lambda(1, 1L, 1.0f, '1');
- }
-
- }
-
- namespace test_binary_literals
- {
-
- constexpr auto ivii = 0b0000000000101010;
- static_assert(ivii == 42, "wrong value");
-
- }
-
- namespace test_generalized_constexpr
- {
-
- template < typename CharT >
- constexpr unsigned long
- strlen_c(const CharT *const s) noexcept
- {
- auto length = 0UL;
- for (auto p = s; *p; ++p)
- ++length;
- return length;
- }
-
- static_assert(strlen_c("") == 0UL, "");
- static_assert(strlen_c("x") == 1UL, "");
- static_assert(strlen_c("test") == 4UL, "");
- static_assert(strlen_c("another\0test") == 7UL, "");
-
- }
-
- namespace test_lambda_init_capture
- {
-
- int
- test()
- {
- auto x = 0;
- const auto lambda1 = [a = x](int b){ return a + b; };
- const auto lambda2 = [a = lambda1(x)](){ return a; };
- return lambda2();
- }
-
- }
-
- namespace test_digit_separators
- {
-
- constexpr auto ten_million = 100'000'000;
- static_assert(ten_million == 100000000, "");
-
- }
-
- namespace test_return_type_deduction
- {
-
- auto f(int& x) { return x; }
- decltype(auto) g(int& x) { return x; }
-
- template < typename T1, typename T2 >
- struct is_same
- {
- static constexpr auto value = false;
- };
-
- template < typename T >
- struct is_same<T, T>
- {
- static constexpr auto value = true;
- };
-
- int
- test()
- {
- auto x = 0;
- static_assert(is_same<int, decltype(f(x))>::value, "");
- static_assert(is_same<int&, decltype(g(x))>::value, "");
- return x;
- }
-
- }
-
-} // namespace cxx14
-
-#endif // __cplusplus >= 201402L
-
-]])
-
-
-dnl Tests for new features in C++17
-
-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_17], [[
-
-// If the compiler admits that it is not ready for C++17, why torture it?
-// Hopefully, this will speed up the test.
-
-#ifndef __cplusplus
-
-#error "This is not a C++ compiler"
-
-#elif __cplusplus < 201703L
-
-#error "This is not a C++17 compiler"
-
-#else
-
-#include <initializer_list>
-#include <utility>
-#include <type_traits>
-
-namespace cxx17
-{
-
- namespace test_constexpr_lambdas
- {
-
- constexpr int foo = [](){return 42;}();
-
- }
-
- namespace test::nested_namespace::definitions
- {
-
- }
-
- namespace test_fold_expression
- {
-
- template<typename... Args>
- int multiply(Args... args)
- {
- return (args * ... * 1);
- }
-
- template<typename... Args>
- bool all(Args... args)
- {
- return (args && ...);
- }
-
- }
-
- namespace test_extended_static_assert
- {
-
- static_assert (true);
-
- }
-
- namespace test_auto_brace_init_list
- {
-
- auto foo = {5};
- auto bar {5};
-
- static_assert(std::is_same<std::initializer_list<int>, decltype(foo)>::value);
- static_assert(std::is_same<int, decltype(bar)>::value);
- }
-
- namespace test_typename_in_template_template_parameter
- {
-
- template<template<typename> typename X> struct D;
-
- }
-
- namespace test_fallthrough_nodiscard_maybe_unused_attributes
- {
-
- int f1()
- {
- return 42;
- }
-
- [[nodiscard]] int f2()
- {
- [[maybe_unused]] auto unused = f1();
-
- switch (f1())
- {
- case 17:
- f1();
- [[fallthrough]];
- case 42:
- f1();
- }
- return f1();
- }
-
- }
-
- namespace test_extended_aggregate_initialization
- {
-
- struct base1
- {
- int b1, b2 = 42;
- };
-
- struct base2
- {
- base2() {
- b3 = 42;
- }
- int b3;
- };
-
- struct derived : base1, base2
- {
- int d;
- };
-
- derived d1 {{1, 2}, {}, 4}; // full initialization
- derived d2 {{}, {}, 4}; // value-initialized bases
-
- }
-
- namespace test_general_range_based_for_loop
- {
-
- struct iter
- {
- int i;
-
- int& operator* ()
- {
- return i;
- }
-
- const int& operator* () const
- {
- return i;
- }
-
- iter& operator++()
- {
- ++i;
- return *this;
- }
- };
-
- struct sentinel
- {
- int i;
- };
-
- bool operator== (const iter& i, const sentinel& s)
- {
- return i.i == s.i;
- }
-
- bool operator!= (const iter& i, const sentinel& s)
- {
- return !(i == s);
- }
-
- struct range
- {
- iter begin() const
- {
- return {0};
- }
-
- sentinel end() const
- {
- return {5};
- }
- };
-
- void f()
- {
- range r {};
-
- for (auto i : r)
- {
- [[maybe_unused]] auto v = i;
- }
- }
-
- }
-
- namespace test_lambda_capture_asterisk_this_by_value
- {
-
- struct t
- {
- int i;
- int foo()
- {
- return [*this]()
- {
- return i;
- }();
- }
- };
-
- }
-
- namespace test_enum_class_construction
- {
-
- enum class byte : unsigned char
- {};
-
- byte foo {42};
-
- }
-
- namespace test_constexpr_if
- {
-
- template <bool cond>
- int f ()
- {
- if constexpr(cond)
- {
- return 13;
- }
- else
- {
- return 42;
- }
- }
-
- }
-
- namespace test_selection_statement_with_initializer
- {
-
- int f()
- {
- return 13;
- }
-
- int f2()
- {
- if (auto i = f(); i > 0)
- {
- return 3;
- }
-
- switch (auto i = f(); i + 4)
- {
- case 17:
- return 2;
-
- default:
- return 1;
- }
- }
-
- }
-
- namespace test_template_argument_deduction_for_class_templates
- {
-
- template <typename T1, typename T2>
- struct pair
- {
- pair (T1 p1, T2 p2)
- : m1 {p1},
- m2 {p2}
- {}
-
- T1 m1;
- T2 m2;
- };
-
- void f()
- {
- [[maybe_unused]] auto p = pair{13, 42u};
- }
-
- }
-
- namespace test_non_type_auto_template_parameters
- {
-
- template <auto n>
- struct B
- {};
-
- B<5> b1;
- B<'a'> b2;
-
- }
-
- namespace test_structured_bindings
- {
-
- int arr[2] = { 1, 2 };
- std::pair<int, int> pr = { 1, 2 };
-
- auto f1() -> int(&)[2]
- {
- return arr;
- }
-
- auto f2() -> std::pair<int, int>&
- {
- return pr;
- }
-
- struct S
- {
- int x1 : 2;
- volatile double y1;
- };
-
- S f3()
- {
- return {};
- }
-
- auto [ x1, y1 ] = f1();
- auto& [ xr1, yr1 ] = f1();
- auto [ x2, y2 ] = f2();
- auto& [ xr2, yr2 ] = f2();
- const auto [ x3, y3 ] = f3();
-
- }
-
- namespace test_exception_spec_type_system
- {
-
- struct Good {};
- struct Bad {};
-
- void g1() noexcept;
- void g2();
-
- template<typename T>
- Bad
- f(T*, T*);
-
- template<typename T1, typename T2>
- Good
- f(T1*, T2*);
-
- static_assert (std::is_same_v<Good, decltype(f(g1, g2))>);
-
- }
-
- namespace test_inline_variables
- {
-
- template<class T> void f(T)
- {}
-
- template<class T> inline T g(T)
- {
- return T{};
- }
-
- template<> inline void f<>(int)
- {}
-
- template<> int g<>(int)
- {
- return 5;
- }
-
- }
-
-} // namespace cxx17
-
-#endif // __cplusplus < 201703L
-
-]])
diff --git a/src/univalue/configure.ac b/src/univalue/configure.ac
deleted file mode 100644
index ed9c5f0c5c..0000000000
--- a/src/univalue/configure.ac
+++ /dev/null
@@ -1,72 +0,0 @@
-m4_define([libunivalue_major_version], [1])
-m4_define([libunivalue_minor_version], [1])
-m4_define([libunivalue_micro_version], [4])
-m4_define([libunivalue_interface_age], [4])
-# If you need a modifier for the version number.
-# Normally empty, but can be used to make "fixup" releases.
-m4_define([libunivalue_extraversion], [])
-
-dnl libtool versioning from libunivalue
-m4_define([libunivalue_current], [m4_eval(100 * libunivalue_minor_version + libunivalue_micro_version - libunivalue_interface_age)])
-m4_define([libunivalue_binary_age], [m4_eval(100 * libunivalue_minor_version + libunivalue_micro_version)])
-m4_define([libunivalue_revision], [libunivalue_interface_age])
-m4_define([libunivalue_age], [m4_eval(libunivalue_binary_age - libunivalue_interface_age)])
-m4_define([libunivalue_version], [libunivalue_major_version().libunivalue_minor_version().libunivalue_micro_version()libunivalue_extraversion()])
-
-
-AC_INIT([univalue], [1.0.4],
- [http://github.com/jgarzik/univalue/])
-
-dnl make the compilation flags quiet unless V=1 is used
-m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
-
-AC_PREREQ(2.60)
-AC_CONFIG_SRCDIR([lib/univalue.cpp])
-AC_CONFIG_AUX_DIR([build-aux])
-AC_CONFIG_MACRO_DIR([build-aux/m4])
-AC_CONFIG_HEADERS([univalue-config.h])
-AM_INIT_AUTOMAKE([subdir-objects foreign])
-
-LIBUNIVALUE_MAJOR_VERSION=libunivalue_major_version
-LIBUNIVALUE_MINOR_VERSION=libunivalue_minor_version
-LIBUNIVALUE_MICRO_VERSION=libunivalue_micro_version
-LIBUNIVALUE_INTERFACE_AGE=libunivalue_interface_age
-
-# ABI version
-# http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
-LIBUNIVALUE_CURRENT=libunivalue_current
-LIBUNIVALUE_REVISION=libunivalue_revision
-LIBUNIVALUE_AGE=libunivalue_age
-
-AC_SUBST(LIBUNIVALUE_CURRENT)
-AC_SUBST(LIBUNIVALUE_REVISION)
-AC_SUBST(LIBUNIVALUE_AGE)
-
-LT_INIT
-LT_LANG([C++])
-
-dnl Require C++17 compiler (no GNU extensions)
-AX_CXX_COMPILE_STDCXX([17], [noext], [mandatory], [nodefault])
-
-case $host in
- *mingw*)
- LIBTOOL_APP_LDFLAGS="$LIBTOOL_APP_LDFLAGS -all-static"
- ;;
-esac
-
-BUILD_EXEEXT=
-case $build in
- *mingw*)
- BUILD_EXEEXT=".exe"
- ;;
-esac
-
-AC_CONFIG_FILES([
- Makefile
- pc/libunivalue.pc
- pc/libunivalue-uninstalled.pc])
-
-AC_SUBST(LIBTOOL_APP_LDFLAGS)
-AC_SUBST(BUILD_EXEEXT)
-AC_OUTPUT
-
diff --git a/src/univalue/gen/gen.cpp b/src/univalue/gen/gen.cpp
deleted file mode 100644
index ca5b470ddc..0000000000
--- a/src/univalue/gen/gen.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2014 BitPay Inc.
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or https://opensource.org/licenses/mit-license.php.
-
-//
-// To re-create univalue_escapes.h:
-// $ g++ -o gen gen.cpp
-// $ ./gen > univalue_escapes.h
-//
-
-#include <univalue.h>
-
-#include <cstdio>
-#include <cstring>
-#include <string>
-
-static bool initEscapes;
-static std::string escapes[256];
-
-static void initJsonEscape()
-{
- // Escape all lower control characters (some get overridden with smaller sequences below)
- for (int ch=0x00; ch<0x20; ++ch) {
- char tmpbuf[20];
- snprintf(tmpbuf, sizeof(tmpbuf), "\\u%04x", ch);
- escapes[ch] = std::string(tmpbuf);
- }
-
- escapes[(int)'"'] = "\\\"";
- escapes[(int)'\\'] = "\\\\";
- escapes[(int)'\b'] = "\\b";
- escapes[(int)'\f'] = "\\f";
- escapes[(int)'\n'] = "\\n";
- escapes[(int)'\r'] = "\\r";
- escapes[(int)'\t'] = "\\t";
- escapes[(int)'\x7f'] = "\\u007f"; // U+007F DELETE
-
- initEscapes = true;
-}
-
-static void outputEscape()
-{
- printf( "// Automatically generated file. Do not modify.\n"
- "#ifndef BITCOIN_UNIVALUE_UNIVALUE_ESCAPES_H\n"
- "#define BITCOIN_UNIVALUE_UNIVALUE_ESCAPES_H\n"
- "static const char *escapes[256] = {\n");
-
- for (unsigned int i = 0; i < 256; i++) {
- if (escapes[i].empty()) {
- printf("\tnullptr,\n");
- } else {
- printf("\t\"");
-
- unsigned int si;
- for (si = 0; si < escapes[i].size(); si++) {
- char ch = escapes[i][si];
- switch (ch) {
- case '"':
- printf("\\\"");
- break;
- case '\\':
- printf("\\\\");
- break;
- default:
- printf("%c", escapes[i][si]);
- break;
- }
- }
-
- printf("\",\n");
- }
- }
-
- printf( "};\n"
- "#endif // BITCOIN_UNIVALUE_UNIVALUE_ESCAPES_H\n");
-}
-
-int main (int argc, char *argv[])
-{
- initJsonEscape();
- outputEscape();
- return 0;
-}
-
diff --git a/src/univalue/include/univalue.h b/src/univalue/include/univalue.h
index f0d4de2035..22be0311e8 100644
--- a/src/univalue/include/univalue.h
+++ b/src/univalue/include/univalue.h
@@ -3,8 +3,8 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://opensource.org/licenses/mit-license.php.
-#ifndef __UNIVALUE_H__
-#define __UNIVALUE_H__
+#ifndef BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_H
+#define BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_H
#include <charconv>
#include <cstdint>
@@ -194,4 +194,4 @@ extern const UniValue NullUniValue;
const UniValue& find_value( const UniValue& obj, const std::string& name);
-#endif // __UNIVALUE_H__
+#endif // BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_H
diff --git a/src/univalue/lib/univalue_escapes.h b/src/univalue/include/univalue_escapes.h
index 3f714f8e5b..83767e8ac5 100644
--- a/src/univalue/lib/univalue_escapes.h
+++ b/src/univalue/include/univalue_escapes.h
@@ -1,6 +1,5 @@
-// Automatically generated file. Do not modify.
-#ifndef BITCOIN_UNIVALUE_UNIVALUE_ESCAPES_H
-#define BITCOIN_UNIVALUE_UNIVALUE_ESCAPES_H
+#ifndef BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_ESCAPES_H
+#define BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_ESCAPES_H
static const char *escapes[256] = {
"\\u0000",
"\\u0001",
@@ -259,4 +258,4 @@ static const char *escapes[256] = {
nullptr,
nullptr,
};
-#endif // BITCOIN_UNIVALUE_UNIVALUE_ESCAPES_H
+#endif // BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_ESCAPES_H
diff --git a/src/univalue/lib/univalue_utffilter.h b/src/univalue/include/univalue_utffilter.h
index c24ac58eaf..f688eaaa30 100644
--- a/src/univalue/lib/univalue_utffilter.h
+++ b/src/univalue/include/univalue_utffilter.h
@@ -1,8 +1,8 @@
// Copyright 2016 Wladimir J. van der Laan
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://opensource.org/licenses/mit-license.php.
-#ifndef UNIVALUE_UTFFILTER_H
-#define UNIVALUE_UTFFILTER_H
+#ifndef BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_UTFFILTER_H
+#define BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_UTFFILTER_H
#include <string>
@@ -116,4 +116,4 @@ private:
}
};
-#endif
+#endif // BITCOIN_UNIVALUE_INCLUDE_UNIVALUE_UTFFILTER_H
diff --git a/src/univalue/lib/univalue_read.cpp b/src/univalue/lib/univalue_read.cpp
index a6ed75e57a..2f2385383c 100644
--- a/src/univalue/lib/univalue_read.cpp
+++ b/src/univalue/lib/univalue_read.cpp
@@ -3,7 +3,7 @@
// file COPYING or https://opensource.org/licenses/mit-license.php.
#include <univalue.h>
-#include "univalue_utffilter.h"
+#include <univalue_utffilter.h>
#include <cstdio>
#include <cstdint>
diff --git a/src/univalue/lib/univalue_write.cpp b/src/univalue/lib/univalue_write.cpp
index 18833077b7..4a3cbba20f 100644
--- a/src/univalue/lib/univalue_write.cpp
+++ b/src/univalue/lib/univalue_write.cpp
@@ -3,7 +3,7 @@
// file COPYING or https://opensource.org/licenses/mit-license.php.
#include <univalue.h>
-#include "univalue_escapes.h"
+#include <univalue_escapes.h>
#include <memory>
#include <string>
diff --git a/src/univalue/pc/libunivalue-uninstalled.pc.in b/src/univalue/pc/libunivalue-uninstalled.pc.in
deleted file mode 100644
index b7f53e875e..0000000000
--- a/src/univalue/pc/libunivalue-uninstalled.pc.in
+++ /dev/null
@@ -1,9 +0,0 @@
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
-
-Name: libunivalue
-Description: libunivalue, C++ universal value object and JSON library
-Version: @VERSION@
-Libs: ${pc_top_builddir}/${pcfiledir}/libunivalue.la
diff --git a/src/univalue/pc/libunivalue.pc.in b/src/univalue/pc/libunivalue.pc.in
deleted file mode 100644
index 358a2d5f73..0000000000
--- a/src/univalue/pc/libunivalue.pc.in
+++ /dev/null
@@ -1,10 +0,0 @@
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
-
-Name: libunivalue
-Description: libunivalue, C++ universal value object and JSON library
-Version: @VERSION@
-Libs: -L${libdir} -lunivalue
-Cflags: -I${includedir}
diff --git a/src/univalue/sources.mk b/src/univalue/sources.mk
index efab6d277f..e156216378 100644
--- a/src/univalue/sources.mk
+++ b/src/univalue/sources.mk
@@ -12,10 +12,8 @@ UNIVALUE_INCLUDE_DIR_INT = %reldir%/include
UNIVALUE_DIST_HEADERS_INT =
UNIVALUE_DIST_HEADERS_INT += %reldir%/include/univalue.h
-
-UNIVALUE_LIB_HEADERS_INT =
-UNIVALUE_LIB_HEADERS_INT += %reldir%/lib/univalue_utffilter.h
-UNIVALUE_LIB_HEADERS_INT += %reldir%/lib/univalue_escapes.h
+UNIVALUE_DIST_HEADERS_INT += %reldir%/include/univalue_utffilter.h
+UNIVALUE_DIST_HEADERS_INT += %reldir%/include/univalue_escapes.h
UNIVALUE_LIB_SOURCES_INT =
UNIVALUE_LIB_SOURCES_INT += %reldir%/lib/univalue.cpp
diff --git a/src/univalue/test/unitester.cpp b/src/univalue/test/unitester.cpp
index 81b1c5d3b1..94c149b39f 100644
--- a/src/univalue/test/unitester.cpp
+++ b/src/univalue/test/unitester.cpp
@@ -12,15 +12,7 @@
#error JSON_TEST_SRC must point to test source directory
#endif
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-#endif
-
std::string srcdir(JSON_TEST_SRC);
-static bool test_failed = false;
-
-#define d_assert(expr) { if (!(expr)) { test_failed = true; fprintf(stderr, "%s failed\n", filename.c_str()); } }
-#define f_assert(expr) { if (!(expr)) { test_failed = true; fprintf(stderr, "%s failed\n", __func__); } }
static std::string rtrim(std::string s)
{
@@ -41,9 +33,9 @@ static void runtest(std::string filename, const std::string& jdata)
bool testResult = val.read(jdata);
if (wantPass) {
- d_assert(testResult == true);
+ assert(testResult == true);
} else {
- d_assert(testResult == false);
+ assert(testResult == false);
}
if (wantRoundTrip) {
@@ -141,30 +133,30 @@ void unescape_unicode_test()
bool testResult;
// Escaped ASCII (quote)
testResult = val.read("[\"\\u0022\"]");
- f_assert(testResult);
- f_assert(val[0].get_str() == "\"");
+ assert(testResult);
+ assert(val[0].get_str() == "\"");
// Escaped Basic Plane character, two-byte UTF-8
testResult = val.read("[\"\\u0191\"]");
- f_assert(testResult);
- f_assert(val[0].get_str() == "\xc6\x91");
+ assert(testResult);
+ assert(val[0].get_str() == "\xc6\x91");
// Escaped Basic Plane character, three-byte UTF-8
testResult = val.read("[\"\\u2191\"]");
- f_assert(testResult);
- f_assert(val[0].get_str() == "\xe2\x86\x91");
+ assert(testResult);
+ assert(val[0].get_str() == "\xe2\x86\x91");
// Escaped Supplementary Plane character U+1d161
testResult = val.read("[\"\\ud834\\udd61\"]");
- f_assert(testResult);
- f_assert(val[0].get_str() == "\xf0\x9d\x85\xa1");
+ assert(testResult);
+ assert(val[0].get_str() == "\xf0\x9d\x85\xa1");
}
int main (int argc, char *argv[])
{
- for (unsigned int fidx = 0; fidx < ARRAY_SIZE(filenames); fidx++) {
- runtest_file(filenames[fidx]);
+ for (const auto& f: filenames) {
+ runtest_file(f);
}
unescape_unicode_test();
- return test_failed ? 1 : 0;
+ return 0;
}
diff --git a/src/util/sock.cpp b/src/util/sock.cpp
index 3579af4458..7d5069423a 100644
--- a/src/util/sock.cpp
+++ b/src/util/sock.cpp
@@ -113,63 +113,103 @@ int Sock::SetSockOpt(int level, int opt_name, const void* opt_val, socklen_t opt
bool Sock::Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred) const
{
-#ifdef USE_POLL
- pollfd fd;
- fd.fd = m_socket;
- fd.events = 0;
- if (requested & RECV) {
- fd.events |= POLLIN;
- }
- if (requested & SEND) {
- fd.events |= POLLOUT;
- }
+ // We need a `shared_ptr` owning `this` for `WaitMany()`, but don't want
+ // `this` to be destroyed when the `shared_ptr` goes out of scope at the
+ // end of this function. Create it with a custom noop deleter.
+ std::shared_ptr<const Sock> shared{this, [](const Sock*) {}};
+
+ EventsPerSock events_per_sock{std::make_pair(shared, Events{requested})};
- if (poll(&fd, 1, count_milliseconds(timeout)) == SOCKET_ERROR) {
+ if (!WaitMany(timeout, events_per_sock)) {
return false;
}
if (occurred != nullptr) {
- *occurred = 0;
- if (fd.revents & POLLIN) {
- *occurred |= RECV;
+ *occurred = events_per_sock.begin()->second.occurred;
+ }
+
+ return true;
+}
+
+bool Sock::WaitMany(std::chrono::milliseconds timeout, EventsPerSock& events_per_sock) const
+{
+#ifdef USE_POLL
+ std::vector<pollfd> pfds;
+ for (const auto& [sock, events] : events_per_sock) {
+ pfds.emplace_back();
+ auto& pfd = pfds.back();
+ pfd.fd = sock->m_socket;
+ if (events.requested & RECV) {
+ pfd.events |= POLLIN;
}
- if (fd.revents & POLLOUT) {
- *occurred |= SEND;
+ if (events.requested & SEND) {
+ pfd.events |= POLLOUT;
}
}
- return true;
-#else
- if (!IsSelectableSocket(m_socket)) {
+ if (poll(pfds.data(), pfds.size(), count_milliseconds(timeout)) == SOCKET_ERROR) {
return false;
}
- fd_set fdset_recv;
- fd_set fdset_send;
- FD_ZERO(&fdset_recv);
- FD_ZERO(&fdset_send);
-
- if (requested & RECV) {
- FD_SET(m_socket, &fdset_recv);
+ assert(pfds.size() == events_per_sock.size());
+ size_t i{0};
+ for (auto& [sock, events] : events_per_sock) {
+ assert(sock->m_socket == static_cast<SOCKET>(pfds[i].fd));
+ events.occurred = 0;
+ if (pfds[i].revents & POLLIN) {
+ events.occurred |= RECV;
+ }
+ if (pfds[i].revents & POLLOUT) {
+ events.occurred |= SEND;
+ }
+ if (pfds[i].revents & (POLLERR | POLLHUP)) {
+ events.occurred |= ERR;
+ }
+ ++i;
}
- if (requested & SEND) {
- FD_SET(m_socket, &fdset_send);
+ return true;
+#else
+ fd_set recv;
+ fd_set send;
+ fd_set err;
+ FD_ZERO(&recv);
+ FD_ZERO(&send);
+ FD_ZERO(&err);
+ SOCKET socket_max{0};
+
+ for (const auto& [sock, events] : events_per_sock) {
+ const auto& s = sock->m_socket;
+ if (!IsSelectableSocket(s)) {
+ return false;
+ }
+ if (events.requested & RECV) {
+ FD_SET(s, &recv);
+ }
+ if (events.requested & SEND) {
+ FD_SET(s, &send);
+ }
+ FD_SET(s, &err);
+ socket_max = std::max(socket_max, s);
}
- timeval timeout_struct = MillisToTimeval(timeout);
+ timeval tv = MillisToTimeval(timeout);
- if (select(m_socket + 1, &fdset_recv, &fdset_send, nullptr, &timeout_struct) == SOCKET_ERROR) {
+ if (select(socket_max + 1, &recv, &send, &err, &tv) == SOCKET_ERROR) {
return false;
}
- if (occurred != nullptr) {
- *occurred = 0;
- if (FD_ISSET(m_socket, &fdset_recv)) {
- *occurred |= RECV;
+ for (auto& [sock, events] : events_per_sock) {
+ const auto& s = sock->m_socket;
+ events.occurred = 0;
+ if (FD_ISSET(s, &recv)) {
+ events.occurred |= RECV;
+ }
+ if (FD_ISSET(s, &send)) {
+ events.occurred |= SEND;
}
- if (FD_ISSET(m_socket, &fdset_send)) {
- *occurred |= SEND;
+ if (FD_ISSET(s, &err)) {
+ events.occurred |= ERR;
}
}
diff --git a/src/util/sock.h b/src/util/sock.h
index dd2913a66c..3245820995 100644
--- a/src/util/sock.h
+++ b/src/util/sock.h
@@ -12,6 +12,7 @@
#include <chrono>
#include <memory>
#include <string>
+#include <unordered_map>
/**
* Maximum time to wait for I/O readiness.
@@ -130,26 +131,84 @@ public:
/**
* If passed to `Wait()`, then it will wait for readiness to read from the socket.
*/
- static constexpr Event RECV = 0b01;
+ static constexpr Event RECV = 0b001;
/**
* If passed to `Wait()`, then it will wait for readiness to send to the socket.
*/
- static constexpr Event SEND = 0b10;
+ static constexpr Event SEND = 0b010;
+
+ /**
+ * Ignored if passed to `Wait()`, but could be set in the occurred events if an
+ * exceptional condition has occurred on the socket or if it has been disconnected.
+ */
+ static constexpr Event ERR = 0b100;
/**
* Wait for readiness for input (recv) or output (send).
* @param[in] timeout Wait this much for at least one of the requested events to occur.
* @param[in] requested Wait for those events, bitwise-or of `RECV` and `SEND`.
- * @param[out] occurred If not nullptr and `true` is returned, then upon return this
- * indicates which of the requested events occurred. A timeout is indicated by return
- * value of `true` and `occurred` being set to 0.
- * @return true on success and false otherwise
+ * @param[out] occurred If not nullptr and the function returns `true`, then this
+ * indicates which of the requested events occurred (`ERR` will be added, even if
+ * not requested, if an exceptional event occurs on the socket).
+ * A timeout is indicated by return value of `true` and `occurred` being set to 0.
+ * @return true on success (or timeout, if `occurred` of 0 is returned), false otherwise
*/
[[nodiscard]] virtual bool Wait(std::chrono::milliseconds timeout,
Event requested,
Event* occurred = nullptr) const;
+ /**
+ * Auxiliary requested/occurred events to wait for in `WaitMany()`.
+ */
+ struct Events {
+ explicit Events(Event req) : requested{req}, occurred{0} {}
+ Event requested;
+ Event occurred;
+ };
+
+ struct HashSharedPtrSock {
+ size_t operator()(const std::shared_ptr<const Sock>& s) const
+ {
+ return s ? s->m_socket : std::numeric_limits<SOCKET>::max();
+ }
+ };
+
+ struct EqualSharedPtrSock {
+ bool operator()(const std::shared_ptr<const Sock>& lhs,
+ const std::shared_ptr<const Sock>& rhs) const
+ {
+ if (lhs && rhs) {
+ return lhs->m_socket == rhs->m_socket;
+ }
+ if (!lhs && !rhs) {
+ return true;
+ }
+ return false;
+ }
+ };
+
+ /**
+ * On which socket to wait for what events in `WaitMany()`.
+ * The `shared_ptr` is copied into the map to ensure that the `Sock` object
+ * is not destroyed (its destructor would close the underlying socket).
+ * If this happens shortly before or after we call `poll(2)` and a new
+ * socket gets created under the same file descriptor number then the report
+ * from `WaitMany()` will be bogus.
+ */
+ using EventsPerSock = std::unordered_map<std::shared_ptr<const Sock>, Events, HashSharedPtrSock, EqualSharedPtrSock>;
+
+ /**
+ * Same as `Wait()`, but wait on many sockets within the same timeout.
+ * @param[in] timeout Wait this long for at least one of the requested events to occur.
+ * @param[in,out] events_per_sock Wait for the requested events on these sockets and set
+ * `occurred` for the events that actually occurred.
+ * @return true on success (or timeout, if all `what[].occurred` are returned as 0),
+ * false otherwise
+ */
+ [[nodiscard]] virtual bool WaitMany(std::chrono::milliseconds timeout,
+ EventsPerSock& events_per_sock) const;
+
/* Higher level, convenience, methods. These may throw. */
/**
diff --git a/src/validation.cpp b/src/validation.cpp
index 1a8f501863..26ce286c63 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -22,7 +22,7 @@
#include <logging.h>
#include <logging/timer.h>
#include <node/blockstorage.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <node/utxo_snapshot.h>
#include <policy/policy.h>
#include <policy/rbf.h>
diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp
index 7f038eda84..174c68744c 100644
--- a/src/wallet/init.cpp
+++ b/src/wallet/init.cpp
@@ -9,7 +9,7 @@
#include <interfaces/wallet.h>
#include <net.h>
#include <node/context.h>
-#include <node/ui_interface.h>
+#include <node/interface_ui.h>
#include <outputtype.h>
#include <univalue.h>
#include <util/check.h>
diff --git a/src/wallet/rpc/backup.cpp b/src/wallet/rpc/backup.cpp
index aa9f23c886..5f1673fb12 100644
--- a/src/wallet/rpc/backup.cpp
+++ b/src/wallet/rpc/backup.cpp
@@ -198,14 +198,15 @@ RPCHelpMan importprivkey()
RPCHelpMan importaddress()
{
return RPCHelpMan{"importaddress",
- "\nAdds an address or script (in hex) that can be watched as if it were in your wallet but cannot be used to spend. Requires a new wallet backup.\n"
+ "\nAdds an address or script (in hex) that can be watched as if it were in your wallet but cannot be used to spend. Requires a new wallet backup.\n"
"\nNote: This call can take over an hour to complete if rescan is true, during that time, other rpc calls\n"
"may report that the imported address exists but related transactions are still missing, leading to temporarily incorrect/bogus balances and unspent outputs until rescan completes.\n"
"If you have the full public key, you should call importpubkey instead of this.\n"
"Hint: use importmulti to import more than one address.\n"
"\nNote: If you import a non-standard raw script in hex form, outputs sending to it will be treated\n"
"as change, and not show up in many RPCs.\n"
- "Note: Use \"getwalletinfo\" to query the scanning progress.\n",
+ "Note: Use \"getwalletinfo\" to query the scanning progress.\n"
+ "Note: This command is only compatible with legacy wallets. Use \"importdescriptors\" with \"addr(X)\" for descriptor wallets.\n",
{
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The Bitcoin address (or hex-encoded script)"},
{"label", RPCArg::Type::STR, RPCArg::Default{""}, "An optional label"},
diff --git a/src/wallet/rpc/spend.cpp b/src/wallet/rpc/spend.cpp
index d1a0ba50f6..2520cd2532 100644
--- a/src/wallet/rpc/spend.cpp
+++ b/src/wallet/rpc/spend.cpp
@@ -699,19 +699,6 @@ void FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& fee_out,
setSubtractFeeFromOutputs.insert(pos);
}
- // Fetch specified UTXOs from the UTXO set to get the scriptPubKeys and values of the outputs being selected
- // and to match with the given solving_data. Only used for non-wallet outputs.
- std::map<COutPoint, Coin> coins;
- for (const CTxIn& txin : tx.vin) {
- coins[txin.prevout]; // Create empty map entry keyed by prevout.
- }
- wallet.chain().findCoins(coins);
- for (const auto& coin : coins) {
- if (!coin.second.out.IsNull()) {
- coinControl.SelectExternal(coin.first, coin.second.out);
- }
- }
-
bilingual_str error;
if (!FundTransaction(wallet, tx, fee_out, change_position, error, lockUnspents, setSubtractFeeFromOutputs, coinControl)) {
diff --git a/src/wallet/rpc/util.cpp b/src/wallet/rpc/util.cpp
index 59683c5fd8..4fcb393226 100644
--- a/src/wallet/rpc/util.cpp
+++ b/src/wallet/rpc/util.cpp
@@ -101,7 +101,7 @@ LegacyScriptPubKeyMan& EnsureLegacyScriptPubKeyMan(CWallet& wallet, bool also_cr
spk_man = wallet.GetOrCreateLegacyScriptPubKeyMan();
}
if (!spk_man) {
- throw JSONRPCError(RPC_WALLET_ERROR, "This type of wallet does not support this command");
+ throw JSONRPCError(RPC_WALLET_ERROR, "Only legacy wallets are supported by this command");
}
return *spk_man;
}
@@ -110,7 +110,7 @@ const LegacyScriptPubKeyMan& EnsureConstLegacyScriptPubKeyMan(const CWallet& wal
{
const LegacyScriptPubKeyMan* spk_man = wallet.GetLegacyScriptPubKeyMan();
if (!spk_man) {
- throw JSONRPCError(RPC_WALLET_ERROR, "This type of wallet does not support this command");
+ throw JSONRPCError(RPC_WALLET_ERROR, "Only legacy wallets are supported by this command");
}
return *spk_man;
}
diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp
index 6f79ae4e9b..ce99aeefe5 100644
--- a/src/wallet/spend.cpp
+++ b/src/wallet/spend.cpp
@@ -1023,14 +1023,28 @@ bool FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& nFeeRet,
coinControl.fAllowOtherInputs = true;
- for (const CTxIn& txin : tx.vin) {
- coinControl.Select(txin.prevout);
- }
-
// Acquire the locks to prevent races to the new locked unspents between the
// CreateTransaction call and LockCoin calls (when lockUnspents is true).
LOCK(wallet.cs_wallet);
+ // Fetch specified UTXOs from the UTXO set to get the scriptPubKeys and values of the outputs being selected
+ // and to match with the given solving_data. Only used for non-wallet outputs.
+ std::map<COutPoint, Coin> coins;
+ for (const CTxIn& txin : tx.vin) {
+ coins[txin.prevout]; // Create empty map entry keyed by prevout.
+ }
+ wallet.chain().findCoins(coins);
+
+ for (const CTxIn& txin : tx.vin) {
+ // if it's not in the wallet and corresponding UTXO is found than select as external output
+ const auto& outPoint = txin.prevout;
+ if (wallet.mapWallet.find(outPoint.hash) == wallet.mapWallet.end() && !coins[outPoint].out.IsNull()) {
+ coinControl.SelectExternal(outPoint, coins[outPoint].out);
+ } else {
+ coinControl.Select(outPoint);
+ }
+ }
+
FeeCalculation fee_calc_out;
std::optional<CreatedTransactionResult> txr = CreateTransaction(wallet, vecSend, nChangePosInOut, error, coinControl, fee_calc_out, false);
if (!txr) return false;
diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py
index a8492bd6eb..4c1e86e93d 100755
--- a/test/functional/feature_rbf.py
+++ b/test/functional/feature_rbf.py
@@ -684,7 +684,6 @@ class ReplaceByFeeTest(BitcoinTestFramework):
utxo_to_spend=parent_utxo,
sequence=SEQUENCE_FINAL,
fee_rate=Decimal('0.02'),
- mempool_valid=False,
)
# Broadcast replacement child tx
diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py
index 3619e05761..e92f73304b 100755
--- a/test/functional/mempool_limit.py
+++ b/test/functional/mempool_limit.py
@@ -7,12 +7,12 @@
from decimal import Decimal
from test_framework.blocktools import COINBASE_MATURITY
-from test_framework.messages import COIN
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
+ create_lots_of_big_transactions,
gen_return_txouts,
)
from test_framework.wallet import MiniWallet
@@ -29,16 +29,6 @@ class MempoolLimitTest(BitcoinTestFramework):
]]
self.supports_cli = False
- def send_large_txs(self, node, miniwallet, txouts, fee, tx_batch_size):
- for _ in range(tx_batch_size):
- tx = miniwallet.create_self_transfer(from_node=node, fee_rate=0, mempool_valid=False)['tx']
- for txout in txouts:
- tx.vout.append(txout)
- tx.vout[0].nValue -= int(fee * COIN)
- res = node.testmempoolaccept([tx.serialize().hex()])[0]
- assert_equal(res['fees']['base'], fee)
- miniwallet.sendrawtransaction(from_node=node, tx_hex=tx.serialize().hex())
-
def run_test(self):
txouts = gen_return_txouts()
node = self.nodes[0]
@@ -71,7 +61,7 @@ class MempoolLimitTest(BitcoinTestFramework):
self.log.info("Fill up the mempool with txs with higher fee rate")
for batch_of_txid in range(num_of_batches):
fee = (batch_of_txid + 1) * base_fee
- self.send_large_txs(node, miniwallet, txouts, fee, tx_batch_size)
+ create_lots_of_big_transactions(miniwallet, node, fee, tx_batch_size, txouts)
self.log.info('The tx should be evicted by now')
# The number of transactions created should be greater than the ones present in the mempool
@@ -85,7 +75,11 @@ class MempoolLimitTest(BitcoinTestFramework):
# Deliberately try to create a tx with a fee less than the minimum mempool fee to assert that it does not get added to the mempool
self.log.info('Create a mempool tx that will not pass mempoolminfee')
- assert_raises_rpc_error(-26, "mempool min fee not met", miniwallet.send_self_transfer, from_node=node, fee_rate=relayfee, mempool_valid=False)
+ assert_raises_rpc_error(-26, "mempool min fee not met", miniwallet.send_self_transfer, from_node=node, fee_rate=relayfee)
+
+ self.log.info('Test passing a value below the minimum (5 MB) to -maxmempool throws an error')
+ self.stop_node(0)
+ self.nodes[0].assert_start_raises_init_error(["-maxmempool=4"], "Error: -maxmempool must be at least 5 MB")
if __name__ == '__main__':
diff --git a/test/functional/mempool_reorg.py b/test/functional/mempool_reorg.py
index 91f2d0051a..8dd6cc9cea 100755
--- a/test/functional/mempool_reorg.py
+++ b/test/functional/mempool_reorg.py
@@ -53,8 +53,7 @@ class MempoolCoinbaseTest(BitcoinTestFramework):
utxo = wallet.get_utxo(txid=coinbase_txids[0])
timelock_tx = wallet.create_self_transfer(
utxo_to_spend=utxo,
- mempool_valid=False,
- locktime=self.nodes[0].getblockcount() + 2
+ locktime=self.nodes[0].getblockcount() + 2,
)['hex']
self.log.info("Check that the time-locked transaction is too immature to spend")
diff --git a/test/functional/mempool_spend_coinbase.py b/test/functional/mempool_spend_coinbase.py
index 9c43ddaf6f..3585871350 100755
--- a/test/functional/mempool_spend_coinbase.py
+++ b/test/functional/mempool_spend_coinbase.py
@@ -40,7 +40,7 @@ class MempoolSpendCoinbaseTest(BitcoinTestFramework):
spend_mature_id = wallet.send_self_transfer(from_node=self.nodes[0], utxo_to_spend=utxo_mature)["txid"]
# other coinbase should be too immature to spend
- immature_tx = wallet.create_self_transfer(utxo_to_spend=utxo_immature, mempool_valid=False)
+ immature_tx = wallet.create_self_transfer(utxo_to_spend=utxo_immature)
assert_raises_rpc_error(-26,
"bad-txns-premature-spend-of-coinbase",
lambda: self.nodes[0].sendrawtransaction(immature_tx['hex']))
diff --git a/test/functional/mining_prioritisetransaction.py b/test/functional/mining_prioritisetransaction.py
index a15fbe5a24..e0f2446b2d 100755
--- a/test/functional/mining_prioritisetransaction.py
+++ b/test/functional/mining_prioritisetransaction.py
@@ -7,16 +7,22 @@
from decimal import Decimal
import time
-from test_framework.blocktools import COINBASE_MATURITY
-from test_framework.messages import COIN, MAX_BLOCK_WEIGHT
+from test_framework.messages import (
+ COIN,
+ MAX_BLOCK_WEIGHT,
+)
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import assert_equal, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts
+from test_framework.util import (
+ assert_equal,
+ assert_raises_rpc_error,
+ create_lots_of_big_transactions,
+ gen_return_txouts,
+)
from test_framework.wallet import MiniWallet
class PrioritiseTransactionTest(BitcoinTestFramework):
def set_test_params(self):
- self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-printpriority=1",
@@ -24,12 +30,8 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
]] * self.num_nodes
self.supports_cli = False
- def skip_test_if_missing_module(self):
- self.skip_if_no_wallet()
-
def test_diamond(self):
self.log.info("Test diamond-shape package with priority")
- self.generate(self.wallet, COINBASE_MATURITY + 1)
mock_time = int(time.time())
self.nodes[0].setmocktime(mock_time)
@@ -104,6 +106,7 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
def run_test(self):
self.wallet = MiniWallet(self.nodes[0])
+ self.wallet.rescan_utxos()
# Test `prioritisetransaction` required parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction)
@@ -131,7 +134,10 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
utxo_count = 90
- utxos = create_confirmed_utxos(self, self.relayfee, self.nodes[0], utxo_count)
+ utxos = self.wallet.send_self_transfer_multi(from_node=self.nodes[0], num_outputs=utxo_count)['new_utxos']
+ self.generate(self.wallet, 1)
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
+
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
@@ -141,7 +147,13 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
- txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
+ txids[i] = create_lots_of_big_transactions(
+ self.wallet,
+ self.nodes[0],
+ (i+1) * base_fee,
+ end_range - start_range,
+ self.txouts,
+ utxos[start_range:end_range])
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_WEIGHT // 4 -- otherwise the test needs to be revised to
@@ -200,17 +212,9 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
assert x not in mempool
# Create a free transaction. Should be rejected.
- utxo_list = self.nodes[0].listunspent()
- assert len(utxo_list) > 0
- utxo = utxo_list[0]
-
- inputs = []
- outputs = {}
- inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
- outputs[self.nodes[0].getnewaddress()] = utxo["amount"]
- raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
- tx_hex = self.nodes[0].signrawtransactionwithwallet(raw_tx)["hex"]
- tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"]
+ tx_res = self.wallet.create_self_transfer(from_node=self.nodes[0], fee_rate=0)
+ tx_hex = tx_res['hex']
+ tx_id = tx_res['txid']
# This will raise an exception due to min relay fee not being met
assert_raises_rpc_error(-26, "min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex)
diff --git a/test/functional/p2p_blocksonly.py b/test/functional/p2p_blocksonly.py
index 12ee4b3c27..8ac38bff3a 100755
--- a/test/functional/p2p_blocksonly.py
+++ b/test/functional/p2p_blocksonly.py
@@ -89,6 +89,11 @@ class P2PBlocksOnly(BitcoinTestFramework):
assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], False)
_, txid, _, tx_hex = self.check_p2p_tx_violation()
+ self.log.info("Tests with node in normal mode with block-relay-only connection, sending an inv")
+ conn = self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="block-relay-only")
+ assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], False)
+ self.check_p2p_inv_violation(conn)
+
self.log.info("Check that txs from RPC are not sent to blockrelay connection")
conn = self.nodes[0].add_outbound_p2p_connection(P2PTxInvStore(), p2p_idx=1, connection_type="block-relay-only")
@@ -100,6 +105,13 @@ class P2PBlocksOnly(BitcoinTestFramework):
conn.sync_send_with_ping()
assert(int(txid, 16) not in conn.get_invs())
+ def check_p2p_inv_violation(self, peer):
+ self.log.info("Check that tx-invs from P2P are rejected and result in disconnect")
+ with self.nodes[0].assert_debug_log(["inv sent in violation of protocol, disconnecting peer"]):
+ peer.send_message(msg_inv([CInv(t=MSG_WTX, h=0x12345)]))
+ peer.wait_for_disconnect()
+ self.nodes[0].disconnect_p2ps()
+
def check_p2p_tx_violation(self):
self.log.info('Check that txs from P2P are rejected and result in disconnect')
spendtx = self.miniwallet.create_self_transfer()
@@ -108,9 +120,7 @@ class P2PBlocksOnly(BitcoinTestFramework):
self.nodes[0].p2ps[0].send_message(msg_tx(spendtx['tx']))
self.nodes[0].p2ps[0].wait_for_disconnect()
assert_equal(self.nodes[0].getmempoolinfo()['size'], 0)
-
- # Remove the disconnected peer
- del self.nodes[0].p2ps[0]
+ self.nodes[0].disconnect_p2ps()
return spendtx['tx'], spendtx['txid'], spendtx['wtxid'], spendtx['hex']
diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py
index b9ac3c32c5..5e50e1ebce 100755
--- a/test/functional/p2p_compactblocks.py
+++ b/test/functional/p2p_compactblocks.py
@@ -606,6 +606,15 @@ class CompactBlocksTest(BitcoinTestFramework):
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
+ # Request with out-of-bounds tx index results in disconnect
+ bad_peer = self.nodes[0].add_p2p_connection(TestP2PConn())
+ block_hash = node.getblockhash(chain_height)
+ block = from_hex(CBlock(), node.getblock(block_hash, False))
+ msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [len(block.vtx)])
+ with node.assert_debug_log(['getblocktxn with out-of-bounds tx indices']):
+ bad_peer.send_message(msg)
+ bad_peer.wait_for_disconnect()
+
def test_compactblocks_not_at_tip(self, test_node):
node = self.nodes[0]
# Test that requesting old compactblocks doesn't work.
diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py
index 759e43194b..948deaaec4 100755
--- a/test/functional/rpc_fundrawtransaction.py
+++ b/test/functional/rpc_fundrawtransaction.py
@@ -11,6 +11,9 @@ from math import ceil
from test_framework.descriptors import descsum_create
from test_framework.key import ECKey
+from test_framework.messages import (
+ COIN,
+)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_approx,
@@ -103,6 +106,7 @@ class RawTransactionsTest(BitcoinTestFramework):
self.generate(self.nodes[2], 1)
self.generate(self.nodes[0], 121)
+ self.test_weight_calculation()
self.test_change_position()
self.test_simple()
self.test_simple_two_coins()
@@ -1069,6 +1073,27 @@ class RawTransactionsTest(BitcoinTestFramework):
self.nodes[2].unloadwallet("extfund")
+ def test_weight_calculation(self):
+ self.log.info("Test weight calculation with external inputs")
+
+ self.nodes[2].createwallet("test_weight_calculation")
+ wallet = self.nodes[2].get_wallet_rpc("test_weight_calculation")
+
+ addr = wallet.getnewaddress()
+ txid = self.nodes[0].sendtoaddress(addr, 5)
+ vout = find_vout_for_address(self.nodes[0], txid, addr)
+
+ self.nodes[0].sendtoaddress(wallet.getnewaddress(), 5)
+ self.generate(self.nodes[0], 1)
+
+ rawtx = wallet.createrawtransaction([{'txid': txid, 'vout': vout}], [{self.nodes[0].getnewaddress(): 9.999}])
+ fundedtx = wallet.fundrawtransaction(rawtx, {'fee_rate': 10})
+ # with 71-byte signatures we should expect following tx size
+ tx_size = 10 + 41*2 + 31*2 + (2 + 107*2)/4
+ assert_equal(fundedtx['fee'] * COIN, tx_size * 10)
+
+ self.nodes[2].unloadwallet("test_weight_calculation")
+
def test_include_unsafe(self):
self.log.info("Test fundrawtxn with unsafe inputs")
diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py
index 1f95814e18..dee6d777af 100755
--- a/test/functional/rpc_rawtransaction.py
+++ b/test/functional/rpc_rawtransaction.py
@@ -285,7 +285,7 @@ class RawTransactionsTest(BitcoinTestFramework):
# Test a transaction with a large fee.
# Fee rate is 0.20000000 BTC/kvB
- tx = self.wallet.create_self_transfer(mempool_valid=False, from_node=self.nodes[0], fee_rate=Decimal('0.20000000'))
+ tx = self.wallet.create_self_transfer(from_node=self.nodes[0], fee_rate=Decimal("0.20000000"))
# Thus, testmempoolaccept should reject
testres = self.nodes[2].testmempoolaccept([tx['hex']])[0]
assert_equal(testres['allowed'], False)
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index b043d1a70d..1c64fa4028 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -476,39 +476,6 @@ def find_output(node, txid, amount, *, blockhash=None):
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
-# Helper to create at least "count" utxos
-# Pass in a fee that is sufficient for relay and mining new transactions.
-def create_confirmed_utxos(test_framework, fee, node, count, **kwargs):
- to_generate = int(0.5 * count) + 101
- while to_generate > 0:
- test_framework.generate(node, min(25, to_generate), **kwargs)
- to_generate -= 25
- utxos = node.listunspent()
- iterations = count - len(utxos)
- addr1 = node.getnewaddress()
- addr2 = node.getnewaddress()
- if iterations <= 0:
- return utxos
- for _ in range(iterations):
- t = utxos.pop()
- inputs = []
- inputs.append({"txid": t["txid"], "vout": t["vout"]})
- outputs = {}
- send_value = t['amount'] - fee
- outputs[addr1] = satoshi_round(send_value / 2)
- outputs[addr2] = satoshi_round(send_value / 2)
- raw_tx = node.createrawtransaction(inputs, outputs)
- signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
- node.sendrawtransaction(signed_tx)
-
- while (node.getmempoolinfo()['size'] > 0):
- test_framework.generate(node, 1, **kwargs)
-
- utxos = node.listunspent()
- assert len(utxos) >= count
- return utxos
-
-
def chain_transaction(node, parent_txids, vouts, value, fee, num_outputs):
"""Build and send a transaction that spends the given inputs (specified
by lists of parent_txid:vout each), with the desired total value and fee,
@@ -553,24 +520,22 @@ def gen_return_txouts():
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
-def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
- addr = node.getnewaddress()
+def create_lots_of_big_transactions(mini_wallet, node, fee, tx_batch_size, txouts, utxos=None):
+ from .messages import COIN
+ fee_sats = int(fee * COIN)
txids = []
- from .messages import tx_from_hex
- for _ in range(num):
- t = utxos.pop()
- inputs = [{"txid": t["txid"], "vout": t["vout"]}]
- outputs = {}
- change = t['amount'] - fee
- outputs[addr] = satoshi_round(change)
- rawtx = node.createrawtransaction(inputs, outputs)
- tx = tx_from_hex(rawtx)
- for txout in txouts:
- tx.vout.append(txout)
- newtx = tx.serialize().hex()
- signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
- txid = node.sendrawtransaction(signresult["hex"], 0)
- txids.append(txid)
+ use_internal_utxos = utxos is None
+ for _ in range(tx_batch_size):
+ tx = mini_wallet.create_self_transfer(
+ from_node=node,
+ utxo_to_spend=None if use_internal_utxos else utxos.pop(),
+ fee_rate=0,
+ )["tx"]
+ tx.vout[0].nValue -= fee_sats
+ tx.vout.extend(txouts)
+ res = node.testmempoolaccept([tx.serialize().hex()])[0]
+ assert_equal(res['fees']['base'], fee)
+ txids.append(node.sendrawtransaction(tx.serialize().hex()))
return txids
@@ -578,13 +543,8 @@ def mine_large_block(test_framework, mini_wallet, node):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
txouts = gen_return_txouts()
- from .messages import COIN
- fee = 100 * int(node.getnetworkinfo()["relayfee"] * COIN)
- for _ in range(14):
- tx = mini_wallet.create_self_transfer(from_node=node, fee_rate=0, mempool_valid=False)['tx']
- tx.vout[0].nValue -= fee
- tx.vout.extend(txouts)
- mini_wallet.sendrawtransaction(from_node=node, tx_hex=tx.serialize().hex())
+ fee = 100 * node.getnetworkinfo()["relayfee"]
+ create_lots_of_big_transactions(mini_wallet, node, fee, 14, txouts)
test_framework.generate(node, 1)
diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py
index e43dd9f61a..93f3ea9e81 100644
--- a/test/functional/test_framework/wallet.py
+++ b/test/functional/test_framework/wallet.py
@@ -193,7 +193,7 @@ class MiniWallet:
Returns a tuple (txid, n) referring to the created external utxo outpoint.
"""
- tx = self.create_self_transfer(from_node=from_node, fee_rate=0, mempool_valid=False)['tx']
+ tx = self.create_self_transfer(from_node=from_node, fee_rate=0)["tx"]
assert_greater_than_or_equal(tx.vout[0].nValue, amount + fee)
tx.vout[0].nValue -= (amount + fee) # change output -> MiniWallet
tx.vout.append(CTxOut(amount, scriptPubKey)) # arbitrary output -> to be returned
@@ -230,7 +230,7 @@ class MiniWallet:
# create simple tx template (1 input, 1 output)
tx = self.create_self_transfer(
fee_rate=0, from_node=from_node,
- utxo_to_spend=utxos_to_spend[0], sequence=sequence, mempool_valid=False)['tx']
+ utxo_to_spend=utxos_to_spend[0], sequence=sequence)["tx"]
# duplicate inputs, witnesses and outputs
tx.vin = [deepcopy(tx.vin[0]) for _ in range(len(utxos_to_spend))]
@@ -248,9 +248,8 @@ class MiniWallet:
o.nValue = outputs_value_total // num_outputs
return tx
- def create_self_transfer(self, *, fee_rate=Decimal("0.003"), from_node=None, utxo_to_spend=None, mempool_valid=True, locktime=0, sequence=0):
- """Create and return a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed.
- Checking mempool validity via the testmempoolaccept RPC can be skipped by setting mempool_valid to False."""
+ def create_self_transfer(self, *, fee_rate=Decimal("0.003"), from_node=None, utxo_to_spend=None, locktime=0, sequence=0):
+ """Create and return a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed."""
from_node = from_node or self._test_node
utxo_to_spend = utxo_to_spend or self.get_utxo()
if self._priv_key is None:
@@ -277,11 +276,7 @@ class MiniWallet:
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE]), bytes([LEAF_VERSION_TAPSCRIPT]) + self._internal_key]
tx_hex = tx.serialize().hex()
- if mempool_valid:
- tx_info = from_node.testmempoolaccept([tx_hex])[0]
- assert_equal(tx_info['allowed'], True)
- assert_equal(tx_info['vsize'], vsize)
- assert_equal(tx_info['fees']['base'], utxo_to_spend['value'] - Decimal(send_value) / COIN)
+ assert_equal(tx.get_vsize(), vsize)
return {'txid': tx.rehash(), 'wtxid': tx.getwtxid(), 'hex': tx_hex, 'tx': tx}
diff --git a/test/functional/wallet_descriptor.py b/test/functional/wallet_descriptor.py
index e47d021210..5dc23ba245 100755
--- a/test/functional/wallet_descriptor.py
+++ b/test/functional/wallet_descriptor.py
@@ -93,15 +93,15 @@ class WalletDescriptorTest(BitcoinTestFramework):
# Make sure things are disabled
self.log.info("Test disabled RPCs")
- assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importprivkey, "cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW")
- assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importpubkey, send_wrpc.getaddressinfo(send_wrpc.getnewaddress()))
- assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importaddress, recv_wrpc.getnewaddress())
- assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importmulti, [])
- assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.addmultisigaddress, 1, [recv_wrpc.getnewaddress()])
- assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.dumpprivkey, recv_wrpc.getnewaddress())
- assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.dumpwallet, 'wallet.dump')
- assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importwallet, 'wallet.dump')
- assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.sethdseed)
+ assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importprivkey, "cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW")
+ assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importpubkey, send_wrpc.getaddressinfo(send_wrpc.getnewaddress()))
+ assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importaddress, recv_wrpc.getnewaddress())
+ assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importmulti, [])
+ assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.addmultisigaddress, 1, [recv_wrpc.getnewaddress()])
+ assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.dumpprivkey, recv_wrpc.getnewaddress())
+ assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.dumpwallet, 'wallet.dump')
+ assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importwallet, 'wallet.dump')
+ assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.sethdseed)
self.log.info("Test encryption")
# Get the master fingerprint before encrypt
diff --git a/test/get_previous_releases.py b/test/get_previous_releases.py
index cbdb67216c..7b7cfbfef5 100755
--- a/test/get_previous_releases.py
+++ b/test/get_previous_releases.py
@@ -106,7 +106,7 @@ def download_binary(tag, args) -> int:
bin_path = 'bin/bitcoin-core-{}/test.{}'.format(
match.group(1), match.group(2))
platform = args.platform
- if tag < "v23" and platform in ["x86_64-apple-darwin", "aarch64-apple-darwin"]:
+ if tag < "v23" and platform in ["x86_64-apple-darwin", "arm64-apple-darwin"]:
platform = "osx64"
tarball = 'bitcoin-{tag}-{platform}.tar.gz'.format(
tag=tag[1:], platform=platform)
@@ -214,7 +214,7 @@ def check_host(args) -> int:
'aarch64-*-linux*': 'aarch64-linux-gnu',
'x86_64-*-linux*': 'x86_64-linux-gnu',
'x86_64-apple-darwin*': 'x86_64-apple-darwin',
- 'aarch64-apple-darwin*': 'aarch64-apple-darwin',
+ 'aarch64-apple-darwin*': 'arm64-apple-darwin',
}
args.platform = ''
for pattern, target in platforms.items():
diff --git a/test/lint/README.md b/test/lint/README.md
index a23211a72b..8d592c3282 100644
--- a/test/lint/README.md
+++ b/test/lint/README.md
@@ -28,7 +28,6 @@ To do a full check with `-r`, make sure that you have fetched the upstream repos
maintained:
* for `src/secp256k1`: https://github.com/bitcoin-core/secp256k1.git (branch master)
* for `src/leveldb`: https://github.com/bitcoin-core/leveldb-subtree.git (branch bitcoin-fork)
-* for `src/univalue`: https://github.com/bitcoin-core/univalue-subtree.git (branch master)
* for `src/crypto/ctaes`: https://github.com/bitcoin-core/ctaes.git (branch master)
* for `src/crc32c`: https://github.com/bitcoin-core/crc32c-subtree.git (branch bitcoin-fork)
* for `src/minisketch`: https://github.com/sipa/minisketch.git (branch master)
diff --git a/test/lint/lint-files.py b/test/lint/lint-files.py
index dbb51ce54e..123bee2cbc 100755
--- a/test/lint/lint-files.py
+++ b/test/lint/lint-files.py
@@ -21,7 +21,7 @@ ALL_SOURCE_FILENAMES_REGEXP = r"^.*\.(cpp|h|py|sh)$"
ALLOWED_FILENAME_REGEXP = "^[a-zA-Z0-9/_.@][a-zA-Z0-9/_.@-]*$"
ALLOWED_SOURCE_FILENAME_REGEXP = "^[a-z0-9_./-]+$"
ALLOWED_SOURCE_FILENAME_EXCEPTION_REGEXP = (
- "^src/(secp256k1/|minisketch/|univalue/|test/fuzz/FuzzedDataProvider.h)"
+ "^src/(secp256k1/|minisketch/|test/fuzz/FuzzedDataProvider.h)"
)
ALLOWED_PERMISSION_NON_EXECUTABLES = 0o644
ALLOWED_PERMISSION_EXECUTABLES = 0o755
diff --git a/test/lint/lint-format-strings.py b/test/lint/lint-format-strings.py
index 412cf86791..f54126e023 100755
--- a/test/lint/lint-format-strings.py
+++ b/test/lint/lint-format-strings.py
@@ -22,6 +22,7 @@ FUNCTION_NAMES_AND_NUMBER_OF_LEADING_ARGUMENTS = [
'LogConnectFailure,1',
'LogPrint,1',
'LogPrintf,0',
+ 'LogPrintfCategory,1',
'LogPrintLevel,2',
'printf,0',
'snprintf,2',
@@ -76,7 +77,7 @@ def main():
matching_files_filtered = []
for matching_file in matching_files:
- if not re.search('^src/(leveldb|secp256k1|minisketch|tinyformat|univalue|test/fuzz/strprintf.cpp)', matching_file):
+ if not re.search('^src/(leveldb|secp256k1|minisketch|tinyformat|test/fuzz/strprintf.cpp)', matching_file):
matching_files_filtered.append(matching_file)
matching_files_filtered.sort()
diff --git a/test/lint/lint-include-guards.py b/test/lint/lint-include-guards.py
index 86284517d5..5867aae028 100755
--- a/test/lint/lint-include-guards.py
+++ b/test/lint/lint-include-guards.py
@@ -22,7 +22,6 @@ EXCLUDE_FILES_WITH_PREFIX = ['src/crypto/ctaes',
'src/crc32c',
'src/secp256k1',
'src/minisketch',
- 'src/univalue',
'src/tinyformat.h',
'src/bench/nanobench.h',
'src/test/fuzz/FuzzedDataProvider.h']
diff --git a/test/lint/lint-includes.py b/test/lint/lint-includes.py
index ae62994642..afdca0d418 100755
--- a/test/lint/lint-includes.py
+++ b/test/lint/lint-includes.py
@@ -19,7 +19,7 @@ EXCLUDED_DIRS = ["src/leveldb/",
"src/crc32c/",
"src/secp256k1/",
"src/minisketch/",
- "src/univalue/"]
+ ]
EXPECTED_BOOST_INCLUDES = ["boost/algorithm/string/replace.hpp",
"boost/date_time/posix_time/posix_time.hpp",
diff --git a/test/lint/lint-locale-dependence.py b/test/lint/lint-locale-dependence.py
index 9b2cf4587a..4876ac2e2d 100755
--- a/test/lint/lint-locale-dependence.py
+++ b/test/lint/lint-locale-dependence.py
@@ -60,7 +60,6 @@ REGEXP_EXTERNAL_DEPENDENCIES_EXCLUSIONS = [
"src/secp256k1/",
"src/minisketch/",
"src/tinyformat.h",
- "src/univalue/"
]
LOCALE_DEPENDENT_FUNCTIONS = [
diff --git a/test/lint/lint-logs.py b/test/lint/lint-logs.py
index de53729b4e..aaf697467d 100755
--- a/test/lint/lint-logs.py
+++ b/test/lint/lint-logs.py
@@ -16,12 +16,12 @@ from subprocess import check_output
def main():
- logs_list = check_output(["git", "grep", "--extended-regexp", r"(LogPrintLevel|LogPrintf?)\(", "--", "*.cpp"], universal_newlines=True, encoding="utf8").splitlines()
+ logs_list = check_output(["git", "grep", "--extended-regexp", r"(LogPrintLevel|LogPrintfCategory|LogPrintf?)\(", "--", "*.cpp"], universal_newlines=True, encoding="utf8").splitlines()
unterminated_logs = [line for line in logs_list if not re.search(r'(\\n"|/\* Continued \*/)', line)]
if unterminated_logs != []:
- print("All calls to LogPrintf(), LogPrint(), LogPrintLevel(), and WalletLogPrintf() should be terminated with \"\\n\".")
+ print("All calls to LogPrintf(), LogPrintfCategory(), LogPrint(), LogPrintLevel(), and WalletLogPrintf() should be terminated with \"\\n\".")
print("")
for line in unterminated_logs:
diff --git a/test/lint/lint-shell-locale.py b/test/lint/lint-shell-locale.py
index f3dfe18a95..309c426374 100755
--- a/test/lint/lint-shell-locale.py
+++ b/test/lint/lint-shell-locale.py
@@ -41,7 +41,7 @@ def main():
exit_code = 0
shell_files = get_shell_files_list()
for file_path in shell_files:
- if re.search('src/(secp256k1|minisketch|univalue)/', file_path):
+ if re.search('src/(secp256k1|minisketch)/', file_path):
continue
with open(file_path, 'r', encoding='utf-8') as file_obj:
diff --git a/test/lint/lint-shell.py b/test/lint/lint-shell.py
index f1e4494350..ed95024ef5 100755
--- a/test/lint/lint-shell.py
+++ b/test/lint/lint-shell.py
@@ -68,7 +68,7 @@ def main():
]
files = get_files(files_cmd)
# remove everything that doesn't match this regex
- reg = re.compile(r'src/[leveldb,secp256k1,minisketch,univalue]')
+ reg = re.compile(r'src/[leveldb,secp256k1,minisketch]')
files[:] = [file for file in files if not reg.match(file)]
# build the `shellcheck` command
diff --git a/test/lint/lint-spelling.py b/test/lint/lint-spelling.py
index 5da1b243f7..14d7d13a75 100755
--- a/test/lint/lint-spelling.py
+++ b/test/lint/lint-spelling.py
@@ -12,7 +12,7 @@ Note: Will exit successfully regardless of spelling errors.
from subprocess import check_output, STDOUT, CalledProcessError
IGNORE_WORDS_FILE = 'test/lint/spelling.ignore-words.txt'
-FILES_ARGS = ['git', 'ls-files', '--', ":(exclude)build-aux/m4/", ":(exclude)contrib/seeds/*.txt", ":(exclude)depends/", ":(exclude)doc/release-notes/", ":(exclude)src/leveldb/", ":(exclude)src/crc32c/", ":(exclude)src/qt/locale/", ":(exclude)src/qt/*.qrc", ":(exclude)src/secp256k1/", ":(exclude)src/minisketch/", ":(exclude)src/univalue/", ":(exclude)contrib/builder-keys/keys.txt", ":(exclude)contrib/guix/patches"]
+FILES_ARGS = ['git', 'ls-files', '--', ":(exclude)build-aux/m4/", ":(exclude)contrib/seeds/*.txt", ":(exclude)depends/", ":(exclude)doc/release-notes/", ":(exclude)src/leveldb/", ":(exclude)src/crc32c/", ":(exclude)src/qt/locale/", ":(exclude)src/qt/*.qrc", ":(exclude)src/secp256k1/", ":(exclude)src/minisketch/", ":(exclude)contrib/builder-keys/keys.txt", ":(exclude)contrib/guix/patches"]
def check_codespell_install():
diff --git a/test/lint/lint-whitespace.py b/test/lint/lint-whitespace.py
index d98fc8d9a2..3fb5b80013 100755
--- a/test/lint/lint-whitespace.py
+++ b/test/lint/lint-whitespace.py
@@ -22,7 +22,6 @@ EXCLUDED_DIRS = ["depends/patches/",
"src/crc32c/",
"src/secp256k1/",
"src/minisketch/",
- "src/univalue/",
"doc/release-notes/",
"src/qt/locale"]