aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml2
-rwxr-xr-xci/test/00_setup_env_mac.sh4
-rwxr-xr-xcontrib/devtools/security-check.py23
-rwxr-xr-xcontrib/devtools/symbol-check.py2
-rwxr-xr-xcontrib/devtools/test-security-check.py90
-rw-r--r--contrib/guix/README.md4
-rwxr-xr-xcontrib/guix/guix-build4
-rwxr-xr-xcontrib/guix/guix-codesign2
-rw-r--r--contrib/guix/manifest.scm7
-rw-r--r--contrib/macdeploy/README.md20
-rw-r--r--depends/README.md1
-rw-r--r--depends/hosts/darwin.mk6
-rw-r--r--depends/packages/boost.mk3
-rw-r--r--depends/packages/zeromq.mk3
-rw-r--r--depends/patches/zeromq/netbsd_kevent_void.patch2
-rw-r--r--doc/REST-interface.md1
-rw-r--r--doc/dependencies.md2
-rw-r--r--doc/release-notes-23508.md9
-rw-r--r--src/bench/bench.cpp4
-rw-r--r--src/bench/checkblock.cpp8
-rw-r--r--src/bench/rpc_blockchain.cpp4
-rw-r--r--src/bitcoin-tx.cpp10
-rw-r--r--src/chain.h29
-rw-r--r--src/common/bloom.cpp4
-rw-r--r--src/core_write.cpp2
-rw-r--r--src/dbwrapper.h6
-rw-r--r--src/fs.cpp4
-rw-r--r--src/hash.h15
-rw-r--r--src/index/txindex.cpp4
-rw-r--r--src/interfaces/chain.h3
-rw-r--r--src/interfaces/wallet.h1
-rw-r--r--src/net.cpp43
-rw-r--r--src/net.h23
-rw-r--r--src/net_processing.cpp85
-rw-r--r--src/net_processing.h9
-rw-r--r--src/node/blockstorage.cpp18
-rw-r--r--src/node/blockstorage.h9
-rw-r--r--src/node/interfaces.cpp5
-rw-r--r--src/policy/packages.h1
-rw-r--r--src/psbt.cpp2
-rw-r--r--src/pubkey.h4
-rw-r--r--src/qt/guiconstants.h2
-rw-r--r--src/qt/psbtoperationsdialog.cpp2
-rw-r--r--src/qt/sendcoinsdialog.cpp2
-rw-r--r--src/qt/transactiondesc.cpp9
-rw-r--r--src/qt/transactionrecord.cpp15
-rw-r--r--src/qt/transactionrecord.h2
-rw-r--r--src/qt/transactiontablemodel.cpp9
-rw-r--r--src/qt/walletframe.cpp2
-rw-r--r--src/random.cpp7
-rw-r--r--src/random.h14
-rw-r--r--src/rpc/blockchain.cpp235
-rw-r--r--src/rpc/client.cpp2
-rw-r--r--src/rpc/rawtransaction.cpp7
-rw-r--r--src/rpc/util.cpp16
-rw-r--r--src/rpc/util.h9
-rw-r--r--src/script/bitcoinconsensus.cpp17
-rw-r--r--src/script/interpreter.cpp17
-rw-r--r--src/script/interpreter.h12
-rw-r--r--src/script/sign.cpp2
-rw-r--r--src/serialize.h77
-rw-r--r--src/span.h9
-rw-r--r--src/streams.h86
-rw-r--r--src/support/allocators/zeroafterfree.h2
-rw-r--r--src/test/bloom_tests.cpp12
-rw-r--r--src/test/fuzz/autofile.cpp8
-rw-r--r--src/test/fuzz/buffered_file.cpp6
-rw-r--r--src/test/fuzz/chain.cpp23
-rw-r--r--src/test/fuzz/connman.cpp6
-rw-r--r--src/test/fuzz/fuzz.cpp2
-rw-r--r--src/test/fuzz/integer.cpp5
-rw-r--r--src/test/fuzz/p2p_transport_serialization.cpp4
-rw-r--r--src/test/fuzz/rpc.cpp3
-rw-r--r--src/test/fuzz/signature_checker.cpp2
-rw-r--r--src/test/fuzz/util.h6
-rw-r--r--src/test/fuzz/versionbits.cpp35
-rw-r--r--src/test/interfaces_tests.cpp1
-rw-r--r--src/test/script_tests.cpp20
-rw-r--r--src/test/serialize_tests.cpp52
-rw-r--r--src/test/streams_tests.cpp10
-rw-r--r--src/test/txpackage_tests.cpp232
-rw-r--r--src/test/util/blockfilter.cpp2
-rw-r--r--src/test/validation_chainstatemanager_tests.cpp3
-rw-r--r--src/txdb.cpp14
-rw-r--r--src/txmempool.cpp32
-rw-r--r--src/txmempool.h68
-rw-r--r--src/uint256.h6
-rw-r--r--src/util/system.cpp2
-rw-r--r--src/validation.cpp75
-rw-r--r--src/validation.h18
-rw-r--r--src/versionbits.cpp31
-rw-r--r--src/versionbits.h12
-rw-r--r--src/wallet/bdb.cpp6
-rw-r--r--src/wallet/coincontrol.h19
-rw-r--r--src/wallet/dump.cpp12
-rw-r--r--src/wallet/interfaces.cpp1
-rw-r--r--src/wallet/receive.cpp2
-rw-r--r--src/wallet/rpc/coins.cpp4
-rw-r--r--src/wallet/rpc/spend.cpp79
-rw-r--r--src/wallet/rpc/transactions.cpp4
-rw-r--r--src/wallet/spend.cpp14
-rw-r--r--src/wallet/sqlite.cpp18
-rw-r--r--src/wallet/test/spend_tests.cpp51
-rw-r--r--src/wallet/test/wallet_tests.cpp17
-rw-r--r--src/wallet/wallet.cpp51
-rw-r--r--src/wallet/wallet.h2
-rw-r--r--test/functional/data/rpc_decodescript.json11
-rwxr-xr-xtest/functional/feature_cltv.py2
-rwxr-xr-xtest/functional/feature_dersig.py2
-rwxr-xr-xtest/functional/feature_maxtipage.py56
-rwxr-xr-xtest/functional/mempool_updatefromblock.py2
-rwxr-xr-xtest/functional/rpc_blockchain.py57
-rwxr-xr-xtest/functional/rpc_fundrawtransaction.py53
-rwxr-xr-xtest/functional/rpc_getblockfrompeer.py16
-rwxr-xr-xtest/functional/rpc_psbt.py89
-rwxr-xr-xtest/functional/rpc_rawtransaction.py39
-rwxr-xr-xtest/functional/rpc_signrawtransaction.py4
-rw-r--r--test/functional/test_framework/util.py2
-rwxr-xr-xtest/functional/test_runner.py2
-rwxr-xr-xtest/functional/wallet_send.py40
-rwxr-xr-xtest/functional/wallet_timelock.py50
-rwxr-xr-xtest/lint/commit-script-check.sh5
-rw-r--r--test/sanitizer_suppressions/ubsan5
-rw-r--r--test/util/data/tt-delin1-out.json2
-rw-r--r--test/util/data/tt-delout1-out.json1
-rw-r--r--test/util/data/tt-locktime317000-out.json2
-rw-r--r--test/util/data/txcreate1.json2
-rw-r--r--test/util/data/txcreate2.json1
-rw-r--r--test/util/data/txcreatedata1.json2
-rw-r--r--test/util/data/txcreatedata2.json2
-rw-r--r--test/util/data/txcreatedata_seq0.json1
-rw-r--r--test/util/data/txcreatedata_seq1.json1
-rw-r--r--test/util/data/txcreatemultisig1.json1
-rw-r--r--test/util/data/txcreatemultisig2.json1
-rw-r--r--test/util/data/txcreatemultisig3.json1
-rw-r--r--test/util/data/txcreatemultisig4.json1
-rw-r--r--test/util/data/txcreatemultisig5.json1
-rw-r--r--test/util/data/txcreateoutpubkey1.json1
-rw-r--r--test/util/data/txcreateoutpubkey2.json1
-rw-r--r--test/util/data/txcreateoutpubkey3.json1
-rw-r--r--test/util/data/txcreatescript1.json1
-rw-r--r--test/util/data/txcreatescript2.json1
-rw-r--r--test/util/data/txcreatescript3.json1
-rw-r--r--test/util/data/txcreatescript4.json1
-rw-r--r--test/util/data/txcreatesignv1.json1
145 files changed, 1756 insertions, 709 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 240e2cf705..dcc63c6e32 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -277,7 +277,7 @@ task:
container:
image: ubuntu:focal
env:
- MACOS_SDK: "Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers"
+ MACOS_SDK: "Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers"
<< : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV
FILE_ENV: "./ci/test/00_setup_env_mac.sh"
diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh
index d70b993b99..c4f22c8f9e 100755
--- a/ci/test/00_setup_env_mac.sh
+++ b/ci/test/00_setup_env_mac.sh
@@ -10,8 +10,8 @@ export CONTAINER_NAME=ci_macos_cross
export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to macos
export HOST=x86_64-apple-darwin
export PACKAGES="cmake libz-dev libtinfo5 python3-setuptools xorriso"
-export XCODE_VERSION=12.1
-export XCODE_BUILD_ID=12A7403
+export XCODE_VERSION=12.2
+export XCODE_BUILD_ID=12B45b
export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
export GOAL="deploy"
diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py
index 137fe377da..e6a29b73b9 100755
--- a/contrib/devtools/security-check.py
+++ b/contrib/devtools/security-check.py
@@ -111,6 +111,17 @@ def check_ELF_separate_code(binary):
return False
return True
+def check_ELF_control_flow(binary) -> bool:
+ '''
+ Check for control flow instrumentation
+ '''
+ main = binary.get_function_address('main')
+ content = binary.get_content_from_virtual_address(main, 4, lief.Binary.VA_TYPES.AUTO)
+
+ if content == [243, 15, 30, 250]: # endbr64
+ return True
+ return False
+
def check_PE_DYNAMIC_BASE(binary) -> bool:
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
return lief.PE.DLL_CHARACTERISTICS.DYNAMIC_BASE in binary.optional_header.dll_characteristics_lists
@@ -172,7 +183,7 @@ def check_NX(binary) -> bool:
'''
return binary.has_nx
-def check_control_flow(binary) -> bool:
+def check_MACHO_control_flow(binary) -> bool:
'''
Check for control flow instrumentation
'''
@@ -200,17 +211,14 @@ BASE_PE = [
]
BASE_MACHO = [
- ('PIE', check_PIE),
('NOUNDEFS', check_MACHO_NOUNDEFS),
- ('NX', check_NX),
('LAZY_BINDINGS', check_MACHO_LAZY_BINDINGS),
('Canary', check_MACHO_Canary),
- ('CONTROL_FLOW', check_control_flow),
]
CHECKS = {
lief.EXE_FORMATS.ELF: {
- lief.ARCHITECTURES.X86: BASE_ELF,
+ lief.ARCHITECTURES.X86: BASE_ELF + [('CONTROL_FLOW', check_ELF_control_flow)],
lief.ARCHITECTURES.ARM: BASE_ELF,
lief.ARCHITECTURES.ARM64: BASE_ELF,
lief.ARCHITECTURES.PPC: BASE_ELF,
@@ -220,7 +228,10 @@ CHECKS = {
lief.ARCHITECTURES.X86: BASE_PE,
},
lief.EXE_FORMATS.MACHO: {
- lief.ARCHITECTURES.X86: BASE_MACHO,
+ lief.ARCHITECTURES.X86: BASE_MACHO + [('PIE', check_PIE),
+ ('NX', check_NX),
+ ('CONTROL_FLOW', check_MACHO_control_flow)],
+ lief.ARCHITECTURES.ARM64: BASE_MACHO,
}
}
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index 4b695b3530..461132ae63 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -229,7 +229,7 @@ def check_MACHO_min_os(binary) -> bool:
return False
def check_MACHO_sdk(binary) -> bool:
- if binary.build_version.sdk == [10, 15, 6]:
+ if binary.build_version.sdk == [11, 0, 0]:
return True
return False
diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py
index 6b748e8743..d3d225f3ab 100755
--- a/contrib/devtools/test-security-check.py
+++ b/contrib/devtools/test-security-check.py
@@ -5,6 +5,7 @@
'''
Test script for security-check.py
'''
+import lief #type:ignore
import os
import subprocess
from typing import List
@@ -41,25 +42,49 @@ def call_security_check(cc, source, executable, options):
p = subprocess.run(['./contrib/devtools/security-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True)
return (p.returncode, p.stdout.rstrip())
+def get_arch(cc, source, executable):
+ subprocess.run([*cc, source, '-o', executable], check=True)
+ binary = lief.parse(executable)
+ arch = binary.abstract.header.architecture
+ os.remove(executable)
+ return arch
+
class TestSecurityChecks(unittest.TestCase):
def test_ELF(self):
source = 'test1.c'
executable = 'test1'
cc = determine_wellknown_cmd('CC', 'gcc')
write_testcode(source)
+ arch = get_arch(cc, source, executable)
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
- (1, executable+': failed PIE NX RELRO Canary'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
- (1, executable+': failed PIE RELRO Canary'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
- (1, executable+': failed PIE RELRO'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
- (1, executable+': failed RELRO'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
- (1, executable+': failed separate_code'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
- (0, ''))
+ if arch == lief.ARCHITECTURES.X86:
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE NX RELRO Canary CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE RELRO Canary CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE RELRO CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed RELRO CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
+ (1, executable+': failed separate_code CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code', '-fcf-protection=full']),
+ (0, ''))
+ else:
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE NX RELRO Canary'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE RELRO Canary'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE RELRO'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed RELRO'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
+ (1, executable+': failed separate_code'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
+ (0, ''))
clean_files(source, executable)
@@ -91,21 +116,34 @@ class TestSecurityChecks(unittest.TestCase):
executable = 'test1'
cc = determine_wellknown_cmd('CC', 'clang')
write_testcode(source)
+ arch = get_arch(cc, source, executable)
+
+ if arch == lief.ARCHITECTURES.X86:
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector']),
+ (1, executable+': failed NOUNDEFS LAZY_BINDINGS Canary PIE NX CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all']),
+ (1, executable+': failed NOUNDEFS LAZY_BINDINGS PIE NX CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all']),
+ (1, executable+': failed NOUNDEFS LAZY_BINDINGS PIE CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all']),
+ (1, executable+': failed LAZY_BINDINGS PIE CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all']),
+ (1, executable+': failed PIE CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']),
+ (1, executable+': failed PIE'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']),
+ (0, ''))
+ else:
+ # arm64 darwin doesn't support non-PIE binaries, control flow or executable stacks
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector']),
+ (1, executable+': failed NOUNDEFS LAZY_BINDINGS Canary'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fstack-protector-all']),
+ (1, executable+': failed NOUNDEFS LAZY_BINDINGS'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-fstack-protector-all']),
+ (1, executable+': failed LAZY_BINDINGS'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-bind_at_load','-fstack-protector-all']),
+ (0, ''))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector']),
- (1, executable+': failed PIE NOUNDEFS NX LAZY_BINDINGS Canary CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all']),
- (1, executable+': failed PIE NOUNDEFS NX LAZY_BINDINGS CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all']),
- (1, executable+': failed PIE NOUNDEFS LAZY_BINDINGS CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all']),
- (1, executable+': failed PIE LAZY_BINDINGS CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all']),
- (1, executable+': failed PIE CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']),
- (1, executable+': failed PIE'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']),
- (0, ''))
clean_files(source, executable)
diff --git a/contrib/guix/README.md b/contrib/guix/README.md
index 7cfa0de70f..90289f9d40 100644
--- a/contrib/guix/README.md
+++ b/contrib/guix/README.md
@@ -224,7 +224,7 @@ details.
_(defaults to "x86\_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu
riscv64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu
- x86\_64-w64-mingw32 x86\_64-apple-darwin")_
+ x86\_64-w64-mingw32 x86\_64-apple-darwin arm64-apple-darwin")_
* _**SOURCES_PATH**_
@@ -249,7 +249,7 @@ details.
Set the path where _extracted_ SDKs can be found. This is passed through to
the depends tree. Note that this is should be set to the _parent_ directory of
the actual SDK (e.g. `SDK_PATH=$HOME/Downloads/macOS-SDKs` instead of
- `$HOME/Downloads/macOS-SDKs/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers`).
+ `$HOME/Downloads/macOS-SDKs/Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers`).
The path that this environment variable points to **must be a directory**, and
**NOT a symlink to a directory**.
diff --git a/contrib/guix/guix-build b/contrib/guix/guix-build
index 98381f3e24..bfffbda742 100755
--- a/contrib/guix/guix-build
+++ b/contrib/guix/guix-build
@@ -76,7 +76,7 @@ mkdir -p "$VERSION_BASE"
# Default to building for all supported HOSTs (overridable by environment)
export HOSTS="${HOSTS:-x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu
x86_64-w64-mingw32
- x86_64-apple-darwin}"
+ x86_64-apple-darwin arm64-apple-darwin}"
# Usage: distsrc_for_host HOST
#
@@ -239,7 +239,7 @@ SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git -c log.showSignature=false log --f
time-machine() {
# shellcheck disable=SC2086
guix time-machine --url=https://git.savannah.gnu.org/git/guix.git \
- --commit=6ba510d76d6847065be725e958718002f3b13c7a \
+ --commit=1ef7a03a148cf5f83ab1820444f6bd50d8e732d1 \
--cores="$JOBS" \
--keep-failed \
--fallback \
diff --git a/contrib/guix/guix-codesign b/contrib/guix/guix-codesign
index e52ad30b8d..2dd30bfa64 100755
--- a/contrib/guix/guix-codesign
+++ b/contrib/guix/guix-codesign
@@ -91,7 +91,7 @@ fi
################
# Default to building for all supported HOSTs (overridable by environment)
-export HOSTS="${HOSTS:-x86_64-w64-mingw32 x86_64-apple-darwin}"
+export HOSTS="${HOSTS:-x86_64-w64-mingw32 x86_64-apple-darwin arm64-apple-darwin}"
# Usage: distsrc_for_host HOST
#
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
index 3528030bec..22b922dc9b 100644
--- a/contrib/guix/manifest.scm
+++ b/contrib/guix/manifest.scm
@@ -397,6 +397,11 @@ thus should be able to compile on most platforms where these exist.")
(string-append indent
"@unittest.skip(\"Disabled by Guix\")\n"
line)))
+ (substitute* "tests/test_validate.py"
+ (("^(.*)def test_revocation_mode_soft" line indent)
+ (string-append indent
+ "@unittest.skip(\"Disabled by Guix\")\n"
+ line)))
#t))
(replace 'check
(lambda _
@@ -574,7 +579,7 @@ inspecting signatures in Mach-O binaries.")
;; Build tools
gnu-make
libtool
- autoconf
+ autoconf-2.71
automake
pkg-config
bison
diff --git a/contrib/macdeploy/README.md b/contrib/macdeploy/README.md
index a685aac1c0..df7eac3ec8 100644
--- a/contrib/macdeploy/README.md
+++ b/contrib/macdeploy/README.md
@@ -13,13 +13,13 @@ When complete, it will have produced `Bitcoin-Core.dmg`.
### Step 1: Obtaining `Xcode.app`
Our current macOS SDK
-(`Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz`) can be
+(`Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz`) can be
extracted from
-[Xcode_12.1.xip](https://download.developer.apple.com/Developer_Tools/Xcode_12.1/Xcode_12.1.xip).
+[Xcode_12.2.xip](https://download.developer.apple.com/Developer_Tools/Xcode_12.2/Xcode_12.2.xip).
Alternatively, after logging in to your account go to 'Downloads', then 'More'
-and look for [`Xcode_12.1`](https://download.developer.apple.com/Developer_Tools/Xcode_12.1/Xcode_12.1.xip).
+and look for [`Xcode_12.2`](https://download.developer.apple.com/Developer_Tools/Xcode_12.2/Xcode_12.2.xip).
An Apple ID and cookies enabled for the hostname are needed to download this.
-The `sha256sum` of the archive should be `612443b1894b39368a596ea1607f30cbb0481ad44d5e29c75edb71a6d2cf050f`.
+The `sha256sum` of the archive should be `28d352f8c14a43d9b8a082ac6338dc173cb153f964c6e8fb6ba389e5be528bd0`.
After Xcode version 7.x, Apple started shipping the `Xcode.app` in a `.xip`
archive. This makes the SDK less-trivial to extract on non-macOS machines. One
@@ -30,25 +30,25 @@ approach (tested on Debian Buster) is outlined below:
apt install cpio
git clone https://github.com/bitcoin-core/apple-sdk-tools.git
-# Unpack Xcode_12.1.xip and place the resulting Xcode.app in your current
+# Unpack Xcode_12.2.xip and place the resulting Xcode.app in your current
# working directory
-python3 apple-sdk-tools/extract_xcode.py -f Xcode_12.1.xip | cpio -d -i
+python3 apple-sdk-tools/extract_xcode.py -f Xcode_12.2.xip | cpio -d -i
```
On macOS the process is more straightforward:
```bash
-xip -x Xcode_12.1.xip
+xip -x Xcode_12.2.xip
```
-### Step 2: Generating `Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz` from `Xcode.app`
+### Step 2: Generating `Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz` from `Xcode.app`
-To generate `Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz`, run
+To generate `Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz`, run
the script [`gen-sdk`](./gen-sdk) with the path to `Xcode.app` (extracted in the
previous stage) as the first argument.
```bash
-# Generate a Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz from
+# Generate a Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz from
# the supplied Xcode.app
./contrib/macdeploy/gen-sdk '/path/to/Xcode.app'
```
diff --git a/depends/README.md b/depends/README.md
index 9f0b60adf8..6b6af99dee 100644
--- a/depends/README.md
+++ b/depends/README.md
@@ -29,6 +29,7 @@ Common `host-platform-triplet`s for cross compilation are:
- `x86_64-pc-linux-gnu` for x86 Linux
- `x86_64-w64-mingw32` for Win64
- `x86_64-apple-darwin` for macOS
+- `arm64-apple-darwin` for ARM macOS
- `arm-linux-gnueabihf` for Linux ARM 32 bit
- `aarch64-linux-gnu` for Linux ARM 64 bit
- `powerpc64-linux-gnu` for Linux POWER 64-bit (big endian)
diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk
index ea92bb7793..6bf30b499a 100644
--- a/depends/hosts/darwin.mk
+++ b/depends/hosts/darwin.mk
@@ -1,7 +1,7 @@
OSX_MIN_VERSION=10.15
-OSX_SDK_VERSION=10.15.6
-XCODE_VERSION=12.1
-XCODE_BUILD_ID=12A7403
+OSX_SDK_VERSION=11.0
+XCODE_VERSION=12.2
+XCODE_BUILD_ID=12B45b
LD64_VERSION=609
OSX_SDK=$(SDK_PATH)/Xcode-$(XCODE_VERSION)-$(XCODE_BUILD_ID)-extracted-SDK-with-libcxx-headers
diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk
index 5fe2b2bbb8..fe2425ffaf 100644
--- a/depends/packages/boost.mk
+++ b/depends/packages/boost.mk
@@ -26,8 +26,7 @@ $(package)_config_libraries=filesystem,system,test
$(package)_cxxflags+=-std=c++17
$(package)_cxxflags_linux=-fPIC
$(package)_cxxflags_android=-fPIC
-$(package)_cxxflags_x86_64_darwin=-fcf-protection=full
-$(package)_cxxflags_mingw32=-fcf-protection=full
+$(package)_cxxflags_x86_64=-fcf-protection=full
endef
define $(package)_preprocess_cmds
diff --git a/depends/packages/zeromq.mk b/depends/packages/zeromq.mk
index 94e92431a2..f5478a88c4 100644
--- a/depends/packages/zeromq.mk
+++ b/depends/packages/zeromq.mk
@@ -6,7 +6,8 @@ $(package)_sha256_hash=c593001a89f5a85dd2ddf564805deb860e02471171b3f204944857336
$(package)_patches=remove_libstd_link.patch netbsd_kevent_void.patch
define $(package)_set_vars
- $(package)_config_opts=--without-docs --disable-shared --disable-curve --disable-curve-keygen --disable-perf
+ $(package)_config_opts = --without-docs --disable-shared --disable-valgrind
+ $(package)_config_opts += --disable-perf --disable-curve-keygen --disable-curve --disable-libbsd
$(package)_config_opts += --without-libsodium --without-libgssapi_krb5 --without-pgm --without-norm --without-vmci
$(package)_config_opts += --disable-libunwind --disable-radix-tree --without-gcov --disable-dependency-tracking
$(package)_config_opts += --disable-Werror --disable-drafts --enable-option-checking
diff --git a/depends/patches/zeromq/netbsd_kevent_void.patch b/depends/patches/zeromq/netbsd_kevent_void.patch
index 4e36a363fb..845c6bdda6 100644
--- a/depends/patches/zeromq/netbsd_kevent_void.patch
+++ b/depends/patches/zeromq/netbsd_kevent_void.patch
@@ -10,7 +10,7 @@ diff --git a/configure.ac b/configure.ac
index 1a571291..402f8b86 100644
--- a/configure.ac
+++ b/configure.ac
-@@ -308,6 +308,27 @@ case "${host_os}" in
+@@ -307,6 +307,27 @@ case "${host_os}" in
if test "x$libzmq_netbsd_has_atomic" = "xno"; then
AC_DEFINE(ZMQ_FORCE_MUTEXES, 1, [Force to use mutexes])
fi
diff --git a/doc/REST-interface.md b/doc/REST-interface.md
index 51a73b89fc..1f0a07a284 100644
--- a/doc/REST-interface.md
+++ b/doc/REST-interface.md
@@ -108,6 +108,7 @@ $ curl localhost:18332/rest/getutxos/checkmempool/b2cdfd7b89def827ff8af7cd9bff76
"value" : 8.8687,
"scriptPubKey" : {
"asm" : "OP_DUP OP_HASH160 1c7cebb529b86a04c683dfa87be49de35bcf589e OP_EQUALVERIFY OP_CHECKSIG",
+ "desc" : "addr(mi7as51dvLJsizWnTMurtRmrP8hG2m1XvD)#gj9tznmy"
"hex" : "76a9141c7cebb529b86a04c683dfa87be49de35bcf589e88ac",
"type" : "pubkeyhash",
"address" : "mi7as51dvLJsizWnTMurtRmrP8hG2m1XvD"
diff --git a/doc/dependencies.md b/doc/dependencies.md
index 490ffd3c00..63315cdcc2 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -20,7 +20,7 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct
| PCRE | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) |
| Python (tests) | | [3.6](https://www.python.org/downloads) | | | |
| qrencode | [3.4.4](https://fukuchi.org/works/qrencode) | | No | | |
-| Qt | [5.12.11](https://download.qt.io/official_releases/qt/) | [5.9.5](https://github.com/bitcoin/bitcoin/issues/20104) | No | | |
+| Qt | [5.15.2](https://download.qt.io/official_releases/qt/) | [5.9.5](https://github.com/bitcoin/bitcoin/issues/20104) | No | | |
| SQLite | [3.32.1](https://sqlite.org/download.html) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | | | |
| XCB | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) (Linux only) |
| systemtap ([tracing](tracing.md))| [4.5](https://sourceware.org/systemtap/ftp/releases/) | | | | |
diff --git a/doc/release-notes-23508.md b/doc/release-notes-23508.md
new file mode 100644
index 0000000000..098654e00b
--- /dev/null
+++ b/doc/release-notes-23508.md
@@ -0,0 +1,9 @@
+Updated RPCs
+------------
+
+- Information on soft fork status has been moved from `getblockchaininfo`
+ to `getdeploymentinfo` which allows querying soft fork status at any
+ block, rather than just at the chain tip. Inclusion of soft fork
+ status in `getblockchaininfo` can currently be restored using the
+ configuration `-deprecatedrpc=softforks`, but this will be removed in
+ a future release. (#23508)
diff --git a/src/bench/bench.cpp b/src/bench/bench.cpp
index d7b4228566..9bd176f0a0 100644
--- a/src/bench/bench.cpp
+++ b/src/bench/bench.cpp
@@ -4,10 +4,10 @@
#include <bench/bench.h>
+#include <fs.h>
#include <test/util/setup_common.h>
#include <chrono>
-#include <fstream>
#include <functional>
#include <iostream>
#include <map>
@@ -29,7 +29,7 @@ void GenerateTemplateResults(const std::vector<ankerl::nanobench::Result>& bench
// nothing to write, bail out
return;
}
- std::ofstream fout(filename);
+ fsbridge::ofstream fout{fs::PathFromString(filename)};
if (fout.is_open()) {
ankerl::nanobench::render(tpl, benchmarkResults, fout);
} else {
diff --git a/src/bench/checkblock.cpp b/src/bench/checkblock.cpp
index a9f3f5f84d..52e5cb743f 100644
--- a/src/bench/checkblock.cpp
+++ b/src/bench/checkblock.cpp
@@ -17,8 +17,8 @@
static void DeserializeBlockTest(benchmark::Bench& bench)
{
CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION);
- char a = '\0';
- stream.write(&a, 1); // Prevent compaction
+ std::byte a{0};
+ stream.write({&a, 1}); // Prevent compaction
bench.unit("block").run([&] {
CBlock block;
@@ -31,8 +31,8 @@ static void DeserializeBlockTest(benchmark::Bench& bench)
static void DeserializeAndCheckBlockTest(benchmark::Bench& bench)
{
CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION);
- char a = '\0';
- stream.write(&a, 1); // Prevent compaction
+ std::byte a{0};
+ stream.write({&a, 1}); // Prevent compaction
ArgsManager bench_args;
const auto chainParams = CreateChainParams(bench_args, CBaseChainParams::MAIN);
diff --git a/src/bench/rpc_blockchain.cpp b/src/bench/rpc_blockchain.cpp
index 9bc31461d2..2143bcf950 100644
--- a/src/bench/rpc_blockchain.cpp
+++ b/src/bench/rpc_blockchain.cpp
@@ -23,8 +23,8 @@ struct TestBlockAndIndex {
TestBlockAndIndex()
{
CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION);
- char a = '\0';
- stream.write(&a, 1); // Prevent compaction
+ std::byte a{0};
+ stream.write({&a, 1}); // Prevent compaction
stream >> block;
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index edec883264..ec07114d6e 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -12,6 +12,7 @@
#include <consensus/consensus.h>
#include <core_io.h>
#include <key_io.h>
+#include <fs.h>
#include <policy/policy.h>
#include <policy/rbf.h>
#include <primitives/transaction.h>
@@ -158,7 +159,7 @@ static void RegisterLoad(const std::string& strInput)
std::string key = strInput.substr(0, pos);
std::string filename = strInput.substr(pos + 1, std::string::npos);
- FILE *f = fopen(filename.c_str(), "r");
+ FILE *f = fsbridge::fopen(filename.c_str(), "r");
if (!f) {
std::string strErr = "Cannot open file " + filename;
throw std::runtime_error(strErr);
@@ -433,13 +434,16 @@ static void MutateTxAddOutData(CMutableTransaction& tx, const std::string& strIn
if (pos==0)
throw std::runtime_error("TX output value not specified");
- if (pos != std::string::npos) {
+ if (pos == std::string::npos) {
+ pos = 0;
+ } else {
// Extract and validate VALUE
value = ExtractAndValidateValue(strInput.substr(0, pos));
+ ++pos;
}
// extract and validate DATA
- std::string strData = strInput.substr(pos + 1, std::string::npos);
+ const std::string strData{strInput.substr(pos, std::string::npos)};
if (!IsHex(strData))
throw std::runtime_error("invalid TX output data");
diff --git a/src/chain.h b/src/chain.h
index 55bdf4cd56..8b03e66a96 100644
--- a/src/chain.h
+++ b/src/chain.h
@@ -10,6 +10,7 @@
#include <consensus/params.h>
#include <flatfile.h>
#include <primitives/block.h>
+#include <sync.h>
#include <tinyformat.h>
#include <uint256.h>
@@ -37,6 +38,8 @@ static constexpr int64_t TIMESTAMP_WINDOW = MAX_FUTURE_BLOCK_TIME;
*/
static constexpr int64_t MAX_BLOCK_TIME_GAP = 90 * 60;
+extern RecursiveMutex cs_main;
+
class CBlockFileInfo
{
public:
@@ -161,13 +164,13 @@ public:
int nHeight{0};
//! Which # file this block is stored in (blk?????.dat)
- int nFile{0};
+ int nFile GUARDED_BY(::cs_main){0};
//! Byte offset within blk?????.dat where this block's data is stored
- unsigned int nDataPos{0};
+ unsigned int nDataPos GUARDED_BY(::cs_main){0};
//! Byte offset within rev?????.dat where this block's undo data is stored
- unsigned int nUndoPos{0};
+ unsigned int nUndoPos GUARDED_BY(::cs_main){0};
//! (memory only) Total amount of work (expected number of hashes) in the chain up to and including this block
arith_uint256 nChainWork{};
@@ -195,7 +198,7 @@ public:
//! load to avoid the block index being spuriously rewound.
//! @sa NeedsRedownload
//! @sa ActivateSnapshot
- uint32_t nStatus{0};
+ uint32_t nStatus GUARDED_BY(::cs_main){0};
//! block header
int32_t nVersion{0};
@@ -223,8 +226,9 @@ public:
{
}
- FlatFilePos GetBlockPos() const
+ FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
+ AssertLockHeld(::cs_main);
FlatFilePos ret;
if (nStatus & BLOCK_HAVE_DATA) {
ret.nFile = nFile;
@@ -233,8 +237,9 @@ public:
return ret;
}
- FlatFilePos GetUndoPos() const
+ FlatFilePos GetUndoPos() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
+ AssertLockHeld(::cs_main);
FlatFilePos ret;
if (nStatus & BLOCK_HAVE_UNDO) {
ret.nFile = nFile;
@@ -306,7 +311,9 @@ public:
//! Check whether this block index entry is valid up to the passed validity level.
bool IsValid(enum BlockStatus nUpTo = BLOCK_VALID_TRANSACTIONS) const
+ EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
+ AssertLockHeld(::cs_main);
assert(!(nUpTo & ~BLOCK_VALID_MASK)); // Only validity flags allowed.
if (nStatus & BLOCK_FAILED_MASK)
return false;
@@ -315,12 +322,17 @@ public:
//! @returns true if the block is assumed-valid; this means it is queued to be
//! validated by a background chainstate.
- bool IsAssumedValid() const { return nStatus & BLOCK_ASSUMED_VALID; }
+ bool IsAssumedValid() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
+ {
+ AssertLockHeld(::cs_main);
+ return nStatus & BLOCK_ASSUMED_VALID;
+ }
//! Raise the validity level of this block index entry.
//! Returns true if the validity was changed.
- bool RaiseValidity(enum BlockStatus nUpTo)
+ bool RaiseValidity(enum BlockStatus nUpTo) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
+ AssertLockHeld(::cs_main);
assert(!(nUpTo & ~BLOCK_VALID_MASK)); // Only validity flags allowed.
if (nStatus & BLOCK_FAILED_MASK) return false;
@@ -370,6 +382,7 @@ public:
SERIALIZE_METHODS(CDiskBlockIndex, obj)
{
+ LOCK(::cs_main);
int _nVersion = s.GetVersion();
if (!(s.GetType() & SER_GETHASH)) READWRITE(VARINT_MODE(_nVersion, VarIntMode::NONNEGATIVE_SIGNED));
diff --git a/src/common/bloom.cpp b/src/common/bloom.cpp
index 0bb72dbcbb..c744d05a0e 100644
--- a/src/common/bloom.cpp
+++ b/src/common/bloom.cpp
@@ -62,7 +62,7 @@ void CBloomFilter::insert(const COutPoint& outpoint)
{
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << outpoint;
- insert(stream);
+ insert(MakeUCharSpan(stream));
}
bool CBloomFilter::contains(Span<const unsigned char> vKey) const
@@ -83,7 +83,7 @@ bool CBloomFilter::contains(const COutPoint& outpoint) const
{
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << outpoint;
- return contains(stream);
+ return contains(MakeUCharSpan(stream));
}
bool CBloomFilter::IsWithinSizeConstraints() const
diff --git a/src/core_write.cpp b/src/core_write.cpp
index 067f1e4f4e..5ea62cf3ed 100644
--- a/src/core_write.cpp
+++ b/src/core_write.cpp
@@ -8,6 +8,7 @@
#include <consensus/consensus.h>
#include <consensus/validation.h>
#include <key_io.h>
+#include <script/descriptor.h>
#include <script/script.h>
#include <script/standard.h>
#include <serialize.h>
@@ -152,6 +153,7 @@ void ScriptPubKeyToUniv(const CScript& scriptPubKey, UniValue& out, bool include
CTxDestination address;
out.pushKV("asm", ScriptToAsmStr(scriptPubKey));
+ out.pushKV("desc", InferDescriptor(scriptPubKey, DUMMY_SIGNING_PROVIDER)->ToString());
if (include_hex) out.pushKV("hex", HexStr(scriptPubKey));
std::vector<std::vector<unsigned char>> solns;
diff --git a/src/dbwrapper.h b/src/dbwrapper.h
index 12db0fffcc..1109cb5888 100644
--- a/src/dbwrapper.h
+++ b/src/dbwrapper.h
@@ -147,7 +147,7 @@ public:
template<typename K> bool GetKey(K& key) {
leveldb::Slice slKey = piter->key();
try {
- CDataStream ssKey(MakeUCharSpan(slKey), SER_DISK, CLIENT_VERSION);
+ CDataStream ssKey{MakeByteSpan(slKey), SER_DISK, CLIENT_VERSION};
ssKey >> key;
} catch (const std::exception&) {
return false;
@@ -158,7 +158,7 @@ public:
template<typename V> bool GetValue(V& value) {
leveldb::Slice slValue = piter->value();
try {
- CDataStream ssValue(MakeUCharSpan(slValue), SER_DISK, CLIENT_VERSION);
+ CDataStream ssValue{MakeByteSpan(slValue), SER_DISK, CLIENT_VERSION};
ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent));
ssValue >> value;
} catch (const std::exception&) {
@@ -244,7 +244,7 @@ public:
dbwrapper_private::HandleError(status);
}
try {
- CDataStream ssValue(MakeUCharSpan(strValue), SER_DISK, CLIENT_VERSION);
+ CDataStream ssValue{MakeByteSpan(strValue), SER_DISK, CLIENT_VERSION};
ssValue.Xor(obfuscate_key);
ssValue >> value;
} catch (const std::exception&) {
diff --git a/src/fs.cpp b/src/fs.cpp
index 34a0348578..8fcadcb3ef 100644
--- a/src/fs.cpp
+++ b/src/fs.cpp
@@ -7,7 +7,6 @@
#ifndef WIN32
#include <cstring>
#include <fcntl.h>
-#include <string>
#include <sys/file.h>
#include <sys/utsname.h>
#include <unistd.h>
@@ -20,6 +19,9 @@
#include <windows.h>
#endif
+#include <cassert>
+#include <string>
+
namespace fsbridge {
FILE *fopen(const fs::path& p, const char *mode)
diff --git a/src/hash.h b/src/hash.h
index 1456a899d8..9f582842c1 100644
--- a/src/hash.h
+++ b/src/hash.h
@@ -111,8 +111,9 @@ public:
int GetType() const { return nType; }
int GetVersion() const { return nVersion; }
- void write(const char *pch, size_t size) {
- ctx.Write((const unsigned char*)pch, size);
+ void write(Span<const std::byte> src)
+ {
+ ctx.Write(UCharCast(src.data()), src.size());
}
/** Compute the double-SHA256 hash of all data written to this object.
@@ -162,18 +163,18 @@ private:
public:
explicit CHashVerifier(Source* source_) : CHashWriter(source_->GetType(), source_->GetVersion()), source(source_) {}
- void read(char* pch, size_t nSize)
+ void read(Span<std::byte> dst)
{
- source->read(pch, nSize);
- this->write(pch, nSize);
+ source->read(dst);
+ this->write(dst);
}
void ignore(size_t nSize)
{
- char data[1024];
+ std::byte data[1024];
while (nSize > 0) {
size_t now = std::min<size_t>(nSize, 1024);
- read(data, now);
+ read({data, now});
nSize -= now;
}
}
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index e9aeb58194..e1d807f39a 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -59,7 +59,9 @@ bool TxIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
// Exclude genesis block transaction because outputs are not spendable.
if (pindex->nHeight == 0) return true;
- CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size()));
+ CDiskTxPos pos{
+ WITH_LOCK(::cs_main, return pindex->GetBlockPos()),
+ GetSizeOfCompactSize(block.vtx.size())};
std::vector<std::pair<uint256, CDiskTxPos>> vPos;
vPos.reserve(block.vtx.size());
for (const auto& tx : block.vtx) {
diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h
index 4f5105a5c1..ddfb4bda95 100644
--- a/src/interfaces/chain.h
+++ b/src/interfaces/chain.h
@@ -116,9 +116,6 @@ public:
//! or one of its ancestors.
virtual std::optional<int> findLocatorFork(const CBlockLocator& locator) = 0;
- //! Check if transaction will be final given chain height current time.
- virtual bool checkFinalTx(const CTransaction& tx) = 0;
-
//! Return whether node has the block and optionally return block metadata
//! or contents.
virtual bool findBlock(const uint256& hash, const FoundBlock& block={}) = 0;
diff --git a/src/interfaces/wallet.h b/src/interfaces/wallet.h
index aa33a3c951..f26ac866dc 100644
--- a/src/interfaces/wallet.h
+++ b/src/interfaces/wallet.h
@@ -405,7 +405,6 @@ struct WalletTxStatus
int depth_in_main_chain;
unsigned int time_received;
uint32_t lock_time;
- bool is_final;
bool is_trusted;
bool is_abandoned;
bool is_coinbase;
diff --git a/src/net.cpp b/src/net.cpp
index 9f0e28df42..be56d1e2d2 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -553,12 +553,14 @@ std::string ConnectionTypeAsString(ConnectionType conn_type)
CService CNode::GetAddrLocal() const
{
- LOCK(cs_addrLocal);
+ AssertLockNotHeld(m_addr_local_mutex);
+ LOCK(m_addr_local_mutex);
return addrLocal;
}
void CNode::SetAddrLocal(const CService& addrLocalIn) {
- LOCK(cs_addrLocal);
+ AssertLockNotHeld(m_addr_local_mutex);
+ LOCK(m_addr_local_mutex);
if (addrLocal.IsValid()) {
error("Addr local already set for node: %i. Refusing to change from %s to %s", id, addrLocal.ToString(), addrLocalIn.ToString());
} else {
@@ -657,7 +659,7 @@ bool CNode::ReceiveMsgBytes(Span<const uint8_t> msg_bytes, bool& complete)
// Store received bytes per message command
// to prevent a memory DOS, only allow valid commands
- auto i = mapRecvBytesPerMsgCmd.find(msg.m_command);
+ auto i = mapRecvBytesPerMsgCmd.find(msg.m_type);
if (i == mapRecvBytesPerMsgCmd.end()) {
i = mapRecvBytesPerMsgCmd.find(NET_MESSAGE_COMMAND_OTHER);
}
@@ -747,7 +749,7 @@ CNetMessage V1TransportDeserializer::GetMessage(const std::chrono::microseconds
CNetMessage msg(std::move(vRecv));
// store command string, time, and sizes
- msg.m_command = hdr.GetCommand();
+ msg.m_type = hdr.GetCommand();
msg.m_time = time;
msg.m_message_size = hdr.nMessageSize;
msg.m_raw_message_size = hdr.nMessageSize + CMessageHeader::HEADER_SIZE;
@@ -760,7 +762,7 @@ CNetMessage V1TransportDeserializer::GetMessage(const std::chrono::microseconds
// Check checksum and header command string
if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) {
LogPrint(BCLog::NET, "Header error: Wrong checksum (%s, %u bytes), expected %s was %s, peer=%d\n",
- SanitizeString(msg.m_command), msg.m_message_size,
+ SanitizeString(msg.m_type), msg.m_message_size,
HexStr(Span{hash}.first(CMessageHeader::CHECKSUM_SIZE)),
HexStr(hdr.pchChecksum),
m_node_id);
@@ -1878,8 +1880,8 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
auto start = GetTime<std::chrono::microseconds>();
// Minimum time before next feeler connection (in microseconds).
- auto next_feeler = PoissonNextSend(start, FEELER_INTERVAL);
- auto next_extra_block_relay = PoissonNextSend(start, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
+ auto next_feeler = GetExponentialRand(start, FEELER_INTERVAL);
+ auto next_extra_block_relay = GetExponentialRand(start, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
const bool dnsseed = gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED);
bool add_fixed_seeds = gArgs.GetBoolArg("-fixedseeds", DEFAULT_FIXEDSEEDS);
@@ -1999,7 +2001,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
//
// This is similar to the logic for trying extra outbound (full-relay)
// peers, except:
- // - we do this all the time on a poisson timer, rather than just when
+ // - we do this all the time on an exponential timer, rather than just when
// our tip is stale
// - we potentially disconnect our next-youngest block-relay-only peer, if our
// newest block-relay-only peer delivers a block more recently.
@@ -2008,10 +2010,10 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// Because we can promote these connections to block-relay-only
// connections, they do not get their own ConnectionType enum
// (similar to how we deal with extra outbound peers).
- next_extra_block_relay = PoissonNextSend(now, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
+ next_extra_block_relay = GetExponentialRand(now, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
conn_type = ConnectionType::BLOCK_RELAY;
} else if (now > next_feeler) {
- next_feeler = PoissonNextSend(now, FEELER_INTERVAL);
+ next_feeler = GetExponentialRand(now, FEELER_INTERVAL);
conn_type = ConnectionType::FEELER;
fFeeler = true;
} else {
@@ -3058,23 +3060,6 @@ bool CConnman::ForNode(NodeId id, std::function<bool(CNode* pnode)> func)
return found != nullptr && NodeFullyConnected(found) && func(found);
}
-std::chrono::microseconds CConnman::PoissonNextSendInbound(std::chrono::microseconds now, std::chrono::seconds average_interval)
-{
- if (m_next_send_inv_to_incoming.load() < now) {
- // If this function were called from multiple threads simultaneously
- // it would possible that both update the next send variable, and return a different result to their caller.
- // This is not possible in practice as only the net processing thread invokes this function.
- m_next_send_inv_to_incoming = PoissonNextSend(now, average_interval);
- }
- return m_next_send_inv_to_incoming;
-}
-
-std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval)
-{
- double unscaled = -log1p(GetRand(1ULL << 48) * -0.0000000000000035527136788 /* -1/2^48 */);
- return now + std::chrono::duration_cast<std::chrono::microseconds>(unscaled * average_interval + 0.5us);
-}
-
CSipHasher CConnman::GetDeterministicRandomizer(uint64_t id) const
{
return CSipHasher(nSeed0, nSeed1).Write(id);
@@ -3106,11 +3091,11 @@ void CaptureMessage(const CAddress& addr, const std::string& msg_type, const Spa
CAutoFile f(fsbridge::fopen(path, "ab"), SER_DISK, CLIENT_VERSION);
ser_writedata64(f, now.count());
- f.write(msg_type.data(), msg_type.length());
+ f.write(MakeByteSpan(msg_type));
for (auto i = msg_type.length(); i < CMessageHeader::COMMAND_SIZE; ++i) {
f << uint8_t{'\0'};
}
uint32_t size = data.size();
ser_writedata32(f, size);
- f.write((const char*)data.data(), data.size());
+ f.write(AsBytes(data));
}
diff --git a/src/net.h b/src/net.h
index 1f0ebedcf9..4301733525 100644
--- a/src/net.h
+++ b/src/net.h
@@ -278,7 +278,7 @@ public:
/** Transport protocol agnostic message container.
* Ideally it should only contain receive time, payload,
- * command and size.
+ * type and size.
*/
class CNetMessage {
public:
@@ -286,7 +286,7 @@ public:
std::chrono::microseconds m_time{0}; //!< time of message receipt
uint32_t m_message_size{0}; //!< size of the payload
uint32_t m_raw_message_size{0}; //!< used wire size of the message (including header/checksum)
- std::string m_command;
+ std::string m_type;
CNetMessage(CDataStream&& recv_in) : m_recv(std::move(recv_in)) {}
@@ -618,9 +618,9 @@ public:
return m_greatest_common_version;
}
- CService GetAddrLocal() const;
+ CService GetAddrLocal() const LOCKS_EXCLUDED(m_addr_local_mutex);
//! May not be called more than once
- void SetAddrLocal(const CService& addrLocalIn);
+ void SetAddrLocal(const CService& addrLocalIn) LOCKS_EXCLUDED(m_addr_local_mutex);
CNode* AddRef()
{
@@ -693,8 +693,8 @@ private:
std::list<CNetMessage> vRecvMsg; // Used only by SocketHandler thread
// Our address, as reported by the peer
- CService addrLocal GUARDED_BY(cs_addrLocal);
- mutable RecursiveMutex cs_addrLocal;
+ CService addrLocal GUARDED_BY(m_addr_local_mutex);
+ mutable Mutex m_addr_local_mutex;
mapMsgCmdSize mapSendBytesPerMsgCmd GUARDED_BY(cs_vSend);
mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv);
@@ -936,12 +936,6 @@ public:
void WakeMessageHandler();
- /** Attempts to obfuscate tx time through exponentially distributed emitting.
- Works assuming that a single interval is used.
- Variable intervals will result in privacy decrease.
- */
- std::chrono::microseconds PoissonNextSendInbound(std::chrono::microseconds now, std::chrono::seconds average_interval);
-
/** Return true if we should disconnect the peer for failing an inactivity check. */
bool ShouldRunInactivityChecks(const CNode& node, std::chrono::seconds now) const;
@@ -1221,8 +1215,6 @@ private:
*/
std::atomic_bool m_start_extra_block_relay_peers{false};
- std::atomic<std::chrono::microseconds> m_next_send_inv_to_incoming{0us};
-
/**
* A vector of -bind=<address>:<port>=onion arguments each of which is
* an address and port that are designated for incoming Tor connections.
@@ -1270,9 +1262,6 @@ private:
friend struct ConnmanTestMsg;
};
-/** Return a timestamp in the future (in microseconds) for exponentially distributed events. */
-std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval);
-
/** Dump binary message to file, with timestamp */
void CaptureMessage(const CAddress& addr, const std::string& msg_type, const Span<const unsigned char>& data, bool is_incoming);
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 273cb4fccb..3cebca1a77 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -320,7 +320,7 @@ public:
/** Implement PeerManager */
void StartScheduledTasks(CScheduler& scheduler) override;
void CheckForStaleTipAndEvictPeers() override;
- bool FetchBlock(NodeId id, const uint256& hash, const CBlockIndex& index) override;
+ std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override;
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override;
bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; }
void SendPings() override;
@@ -450,6 +450,8 @@ private:
*/
std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
+ std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
+
/** Number of nodes with fSyncStarted. */
int nSyncStarted GUARDED_BY(cs_main) = 0;
@@ -524,6 +526,15 @@ private:
Mutex m_recent_confirmed_transactions_mutex;
CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex){48'000, 0.000'001};
+ /**
+ * For sending `inv`s to inbound peers, we use a single (exponentially
+ * distributed) timer for all peers. If we used a separate timer for each
+ * peer, a spy node could make multiple inbound connections to us to
+ * accurately determine when we received the transaction (and potentially
+ * determine the transaction's origin). */
+ std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now,
+ std::chrono::seconds average_interval);
+
/** Have we requested this block from a peer */
bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@@ -825,6 +836,18 @@ static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUS
nPreferredDownload += state->fPreferredDownload;
}
+std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
+ std::chrono::seconds average_interval)
+{
+ if (m_next_inv_to_inbounds.load() < now) {
+ // If this function were called from multiple threads simultaneously
+ // it would possible that both update the next send variable, and return a different result to their caller.
+ // This is not possible in practice as only the net processing thread invokes this function.
+ m_next_inv_to_inbounds = GetExponentialRand(now, average_interval);
+ }
+ return m_next_inv_to_inbounds;
+}
+
bool PeerManagerImpl::IsBlockRequested(const uint256& hash)
{
return mapBlocksInFlight.find(hash) != mapBlocksInFlight.end();
@@ -1437,39 +1460,39 @@ bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
(GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
}
-bool PeerManagerImpl::FetchBlock(NodeId id, const uint256& hash, const CBlockIndex& index)
+std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index)
{
- if (fImporting || fReindex) return false;
+ if (fImporting) return "Importing...";
+ if (fReindex) return "Reindexing...";
LOCK(cs_main);
// Ensure this peer exists and hasn't been disconnected
- CNodeState* state = State(id);
- if (state == nullptr) return false;
+ CNodeState* state = State(peer_id);
+ if (state == nullptr) return "Peer does not exist";
// Ignore pre-segwit peers
- if (!state->fHaveWitness) return false;
+ if (!state->fHaveWitness) return "Pre-SegWit peer";
- // Mark block as in-flight unless it already is
- if (!BlockRequested(id, index)) return false;
+ // Mark block as in-flight unless it already is (for this peer).
+ // If a block was already in-flight for a different peer, its BLOCKTXN
+ // response will be dropped.
+ if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer";
// Construct message to request the block
+ const uint256& hash{block_index.GetBlockHash()};
std::vector<CInv> invs{CInv(MSG_BLOCK | MSG_WITNESS_FLAG, hash)};
// Send block request message to the peer
- bool success = m_connman.ForNode(id, [this, &invs](CNode* node) {
+ bool success = m_connman.ForNode(peer_id, [this, &invs](CNode* node) {
const CNetMsgMaker msgMaker(node->GetCommonVersion());
this->m_connman.PushMessage(node, msgMaker.Make(NetMsgType::GETDATA, invs));
return true;
});
- if (success) {
- LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
- hash.ToString(), id);
- } else {
- RemoveBlockRequest(hash);
- LogPrint(BCLog::NET, "Failed to request block %s from peer=%d\n",
- hash.ToString(), id);
- }
- return success;
+ if (!success) return "Peer not fully connected";
+
+ LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
+ hash.ToString(), peer_id);
+ return std::nullopt;
}
std::unique_ptr<PeerManager> PeerManager::make(const CChainParams& chainparams, CConnman& connman, AddrMan& addrman,
@@ -1857,7 +1880,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
// Fast-path: in this case it is possible to serve the block directly from disk,
// as the network format matches the format on disk
std::vector<uint8_t> block_data;
- if (!ReadRawBlockFromDisk(block_data, pindex, m_chainparams.MessageStart())) {
+ if (!ReadRawBlockFromDisk(block_data, pindex->GetBlockPos(), m_chainparams.MessageStart())) {
assert(!"cannot load block from disk");
}
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, Span{block_data}));
@@ -4154,32 +4177,28 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt
pfrom->GetId(),
pfrom->m_addr_name.c_str(),
pfrom->ConnectionTypeAsString().c_str(),
- msg.m_command.c_str(),
+ msg.m_type.c_str(),
msg.m_recv.size(),
msg.m_recv.data()
);
if (gArgs.GetBoolArg("-capturemessages", false)) {
- CaptureMessage(pfrom->addr, msg.m_command, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true);
+ CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true);
}
msg.SetVersion(pfrom->GetCommonVersion());
- const std::string& msg_type = msg.m_command;
-
- // Message size
- unsigned int nMessageSize = msg.m_message_size;
try {
- ProcessMessage(*pfrom, msg_type, msg.m_recv, msg.m_time, interruptMsgProc);
+ ProcessMessage(*pfrom, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc);
if (interruptMsgProc) return false;
{
LOCK(peer->m_getdata_requests_mutex);
if (!peer->m_getdata_requests.empty()) fMoreWork = true;
}
} catch (const std::exception& e) {
- LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg_type), nMessageSize, e.what(), typeid(e).name());
+ LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name());
} catch (...) {
- LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg_type), nMessageSize);
+ LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size);
}
return fMoreWork;
@@ -4434,13 +4453,13 @@ void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::micros
FastRandomContext insecure_rand;
PushAddress(peer, *local_addr, insecure_rand);
}
- peer.m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
+ peer.m_next_local_addr_send = GetExponentialRand(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
}
// We sent an `addr` message to this peer recently. Nothing more to do.
if (current_time <= peer.m_next_addr_send) return;
- peer.m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
+ peer.m_next_addr_send = GetExponentialRand(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) {
// Should be impossible since we always check size before adding to
@@ -4512,7 +4531,7 @@ void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, std::chrono::microseconds c
m_connman.PushMessage(&pto, CNetMsgMaker(pto.GetCommonVersion()).Make(NetMsgType::FEEFILTER, filterToSend));
pto.m_tx_relay->lastSentFeeFilter = filterToSend;
}
- pto.m_tx_relay->m_next_send_feefilter = PoissonNextSend(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
+ pto.m_tx_relay->m_next_send_feefilter = GetExponentialRand(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
}
// If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
// until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
@@ -4792,9 +4811,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (pto->m_tx_relay->nNextInvSend < current_time) {
fSendTrickle = true;
if (pto->IsInboundConn()) {
- pto->m_tx_relay->nNextInvSend = m_connman.PoissonNextSendInbound(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
+ pto->m_tx_relay->nNextInvSend = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
} else {
- pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
+ pto->m_tx_relay->nNextInvSend = GetExponentialRand(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
}
}
diff --git a/src/net_processing.h b/src/net_processing.h
index 27775cea97..e30f9f516c 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -45,12 +45,11 @@ public:
/**
* Attempt to manually fetch block from a given peer. We must already have the header.
*
- * @param[in] id The peer id
- * @param[in] hash The block hash
- * @param[in] pindex The blockindex
- * @returns Whether a request was successfully made
+ * @param[in] peer_id The peer id
+ * @param[in] block_index The blockindex
+ * @returns std::nullopt if a request was successfully made, otherwise an error message
*/
- virtual bool FetchBlock(NodeId id, const uint256& hash, const CBlockIndex& pindex) = 0;
+ virtual std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) = 0;
/** Begin running background tasks, should only be called once */
virtual void StartScheduledTasks(CScheduler& scheduler) = 0;
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index cbfdcb6f11..7691c9a5ce 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -429,6 +429,7 @@ CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
bool IsBlockPruned(const CBlockIndex* pblockindex)
{
+ AssertLockHeld(::cs_main);
return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
}
@@ -513,7 +514,8 @@ static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const
bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
{
- FlatFilePos pos = pindex->GetUndoPos();
+ const FlatFilePos pos{WITH_LOCK(::cs_main, return pindex->GetUndoPos())};
+
if (pos.IsNull()) {
return error("%s: no undo data available", __func__);
}
@@ -712,6 +714,7 @@ static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessa
bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
{
+ AssertLockHeld(::cs_main);
// Write undo information to disk
if (pindex->GetUndoPos().IsNull()) {
FlatFilePos _pos;
@@ -810,7 +813,7 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c
}
block.resize(blk_size); // Zeroing of memory is intentional here
- filein.read((char*)block.data(), blk_size);
+ filein.read(MakeWritableByteSpan(block));
} catch (const std::exception& e) {
return error("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString());
}
@@ -818,17 +821,6 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c
return true;
}
-bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start)
-{
- FlatFilePos block_pos;
- {
- LOCK(cs_main);
- block_pos = pindex->GetBlockPos();
- }
-
- return ReadRawBlockFromDisk(block, block_pos, message_start);
-}
-
/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, CChain& active_chain, const CChainParams& chainparams, const FlatFilePos* dbp)
{
diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h
index 78c9210892..42e46797d2 100644
--- a/src/node/blockstorage.h
+++ b/src/node/blockstorage.h
@@ -7,12 +7,15 @@
#include <fs.h>
#include <protocol.h> // For CMessageHeader::MessageStartChars
+#include <sync.h>
#include <txdb.h>
#include <atomic>
#include <cstdint>
#include <vector>
+extern RecursiveMutex cs_main;
+
class ArgsManager;
class BlockValidationState;
class CBlock;
@@ -146,7 +149,8 @@ public:
/** Get block file info entry for one block file */
CBlockFileInfo* GetBlockFileInfo(size_t n);
- bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams);
+ bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
+ EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, CChain& active_chain, const CChainParams& chainparams, const FlatFilePos* dbp);
@@ -163,7 +167,7 @@ public:
};
//! Check whether the block associated with this index entry is pruned or not.
-bool IsBlockPruned(const CBlockIndex* pblockindex);
+bool IsBlockPruned(const CBlockIndex* pblockindex) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
void CleanupBlockRevFiles();
@@ -181,7 +185,6 @@ void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune);
bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::Params& consensusParams);
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams);
bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start);
-bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start);
bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex);
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index 1a48957f0f..ffad289fa9 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -486,11 +486,6 @@ public:
const CChain& active = Assert(m_node.chainman)->ActiveChain();
return active.GetLocator();
}
- bool checkFinalTx(const CTransaction& tx) override
- {
- LOCK(cs_main);
- return CheckFinalTx(chainman().ActiveChain().Tip(), tx);
- }
std::optional<int> findLocatorFork(const CBlockLocator& locator) override
{
LOCK(cs_main);
diff --git a/src/policy/packages.h b/src/policy/packages.h
index d2744f1265..9f274f6b7d 100644
--- a/src/policy/packages.h
+++ b/src/policy/packages.h
@@ -25,6 +25,7 @@ enum class PackageValidationResult {
PCKG_RESULT_UNSET = 0, //!< Initial value. The package has not yet been rejected.
PCKG_POLICY, //!< The package itself is invalid (e.g. too many transactions).
PCKG_TX, //!< At least one tx is invalid.
+ PCKG_MEMPOOL_ERROR, //!< Mempool logic error.
};
/** A package is an ordered list of transactions. The transactions cannot conflict with (spend the
diff --git a/src/psbt.cpp b/src/psbt.cpp
index 8248609ba6..c8c73e130b 100644
--- a/src/psbt.cpp
+++ b/src/psbt.cpp
@@ -399,7 +399,7 @@ bool DecodeBase64PSBT(PartiallySignedTransaction& psbt, const std::string& base6
bool DecodeRawPSBT(PartiallySignedTransaction& psbt, const std::string& tx_data, std::string& error)
{
- CDataStream ss_data(MakeUCharSpan(tx_data), SER_NETWORK, PROTOCOL_VERSION);
+ CDataStream ss_data(MakeByteSpan(tx_data), SER_NETWORK, PROTOCOL_VERSION);
try {
ss_data >> psbt;
if (!ss_data.empty()) {
diff --git a/src/pubkey.h b/src/pubkey.h
index 349081205b..dfe06f834c 100644
--- a/src/pubkey.h
+++ b/src/pubkey.h
@@ -142,14 +142,14 @@ public:
{
unsigned int len = size();
::WriteCompactSize(s, len);
- s.write((char*)vch, len);
+ s.write(AsBytes(Span{vch, len}));
}
template <typename Stream>
void Unserialize(Stream& s)
{
const unsigned int len(::ReadCompactSize(s));
if (len <= SIZE) {
- s.read((char*)vch, len);
+ s.read(AsWritableBytes(Span{vch, len}));
if (len != size()) {
Invalidate();
}
diff --git a/src/qt/guiconstants.h b/src/qt/guiconstants.h
index 1adcd5b6b9..fcdf6056c9 100644
--- a/src/qt/guiconstants.h
+++ b/src/qt/guiconstants.h
@@ -33,8 +33,6 @@ static const bool DEFAULT_SPLASHSCREEN = true;
#define COLOR_NEGATIVE QColor(255, 0, 0)
/* Transaction list -- bare address (without label) */
#define COLOR_BAREADDRESS QColor(140, 140, 140)
-/* Transaction list -- TX status decoration - open until date */
-#define COLOR_TX_STATUS_OPENUNTILDATE QColor(64, 64, 255)
/* Transaction list -- TX status decoration - danger, tx needs attention */
#define COLOR_TX_STATUS_DANGER QColor(200, 100, 100)
/* Transaction list -- TX status decoration - default color */
diff --git a/src/qt/psbtoperationsdialog.cpp b/src/qt/psbtoperationsdialog.cpp
index 0962dfe9db..d328290cbc 100644
--- a/src/qt/psbtoperationsdialog.cpp
+++ b/src/qt/psbtoperationsdialog.cpp
@@ -158,7 +158,7 @@ void PSBTOperationsDialog::saveTransaction() {
if (filename.isEmpty()) {
return;
}
- std::ofstream out(filename.toLocal8Bit().data(), std::ofstream::out | std::ofstream::binary);
+ fsbridge::ofstream out{filename.toLocal8Bit().data(), fsbridge::ofstream::out | fsbridge::ofstream::binary};
out << ssTx.str();
out.close();
showStatus(tr("PSBT saved to disk."), StatusLevel::INFO);
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index 50436a46d8..1206f610cd 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -509,7 +509,7 @@ void SendCoinsDialog::sendButtonClicked([[maybe_unused]] bool checked)
if (filename.isEmpty()) {
return;
}
- std::ofstream out(filename.toLocal8Bit().data(), std::ofstream::out | std::ofstream::binary);
+ fsbridge::ofstream out{filename.toLocal8Bit().data(), fsbridge::ofstream::out | fsbridge::ofstream::binary};
out << ssTx.str();
out.close();
Q_EMIT message(tr("PSBT saved"), "PSBT saved to disk", CClientUIInterface::MSG_INFORMATION);
diff --git a/src/qt/transactiondesc.cpp b/src/qt/transactiondesc.cpp
index 0504639cde..be5851d627 100644
--- a/src/qt/transactiondesc.cpp
+++ b/src/qt/transactiondesc.cpp
@@ -18,7 +18,6 @@
#include <interfaces/wallet.h>
#include <key_io.h>
#include <policy/policy.h>
-#include <script/script.h>
#include <util/system.h>
#include <validation.h>
#include <wallet/ismine.h>
@@ -35,14 +34,6 @@ using wallet::isminetype;
QString TransactionDesc::FormatTxStatus(const interfaces::WalletTx& wtx, const interfaces::WalletTxStatus& status, bool inMempool, int numBlocks)
{
- if (!status.is_final)
- {
- if (wtx.tx->nLockTime < LOCKTIME_THRESHOLD)
- return tr("Open for %n more block(s)", "", wtx.tx->nLockTime - numBlocks);
- else
- return tr("Open until %1").arg(GUIUtil::dateTimeStr(wtx.tx->nLockTime));
- }
- else
{
int nDepth = status.depth_in_main_chain;
if (nDepth < 0) {
diff --git a/src/qt/transactionrecord.cpp b/src/qt/transactionrecord.cpp
index 5386569973..26144ba197 100644
--- a/src/qt/transactionrecord.cpp
+++ b/src/qt/transactionrecord.cpp
@@ -179,21 +179,8 @@ void TransactionRecord::updateStatus(const interfaces::WalletTxStatus& wtx, cons
status.depth = wtx.depth_in_main_chain;
status.m_cur_block_hash = block_hash;
- const bool up_to_date = ((int64_t)QDateTime::currentMSecsSinceEpoch() / 1000 - block_time < MAX_BLOCK_TIME_GAP);
- if (up_to_date && !wtx.is_final) {
- if (wtx.lock_time < LOCKTIME_THRESHOLD) {
- status.status = TransactionStatus::OpenUntilBlock;
- status.open_for = wtx.lock_time - numBlocks;
- }
- else
- {
- status.status = TransactionStatus::OpenUntilDate;
- status.open_for = wtx.lock_time;
- }
- }
// For generated transactions, determine maturity
- else if(type == TransactionRecord::Generated)
- {
+ if (type == TransactionRecord::Generated) {
if (wtx.blocks_to_maturity > 0)
{
status.status = TransactionStatus::Immature;
diff --git a/src/qt/transactionrecord.h b/src/qt/transactionrecord.h
index 1c139efabc..dd34656d5f 100644
--- a/src/qt/transactionrecord.h
+++ b/src/qt/transactionrecord.h
@@ -30,8 +30,6 @@ public:
enum Status {
Confirmed, /**< Have 6 or more confirmations (normal tx) or fully mature (mined tx) **/
/// Normal (sent/received) transactions
- OpenUntilDate, /**< Transaction not yet final, waiting for date */
- OpenUntilBlock, /**< Transaction not yet final, waiting for block */
Unconfirmed, /**< Not yet mined into a block **/
Confirming, /**< Confirmed, but waiting for the recommended number of confirmations **/
Conflicted, /**< Conflicts with other transaction or mempool **/
diff --git a/src/qt/transactiontablemodel.cpp b/src/qt/transactiontablemodel.cpp
index b42c3f8c24..44b4fee2e7 100644
--- a/src/qt/transactiontablemodel.cpp
+++ b/src/qt/transactiontablemodel.cpp
@@ -316,12 +316,6 @@ QString TransactionTableModel::formatTxStatus(const TransactionRecord *wtx) cons
switch(wtx->status.status)
{
- case TransactionStatus::OpenUntilBlock:
- status = tr("Open for %n more block(s)","",wtx->status.open_for);
- break;
- case TransactionStatus::OpenUntilDate:
- status = tr("Open until %1").arg(GUIUtil::dateTimeStr(wtx->status.open_for));
- break;
case TransactionStatus::Unconfirmed:
status = tr("Unconfirmed");
break;
@@ -475,9 +469,6 @@ QVariant TransactionTableModel::txStatusDecoration(const TransactionRecord *wtx)
{
switch(wtx->status.status)
{
- case TransactionStatus::OpenUntilBlock:
- case TransactionStatus::OpenUntilDate:
- return COLOR_TX_STATUS_OPENUNTILDATE;
case TransactionStatus::Unconfirmed:
return QIcon(":/icons/transaction_0");
case TransactionStatus::Abandoned:
diff --git a/src/qt/walletframe.cpp b/src/qt/walletframe.cpp
index 98f5ebce99..fba83dd510 100644
--- a/src/qt/walletframe.cpp
+++ b/src/qt/walletframe.cpp
@@ -210,7 +210,7 @@ void WalletFrame::gotoLoadPSBT(bool from_clipboard)
Q_EMIT message(tr("Error"), tr("PSBT file must be smaller than 100 MiB"), CClientUIInterface::MSG_ERROR);
return;
}
- std::ifstream in(filename.toLocal8Bit().data(), std::ios::binary);
+ fsbridge::ifstream in{filename.toLocal8Bit().data(), std::ios::binary};
data = std::string(std::istreambuf_iterator<char>{in}, {});
}
diff --git a/src/random.cpp b/src/random.cpp
index 6eb06c5d47..5dae80fe31 100644
--- a/src/random.cpp
+++ b/src/random.cpp
@@ -19,6 +19,7 @@
#include <sync.h> // for Mutex
#include <util/time.h> // for GetTimeMicros()
+#include <cmath>
#include <stdlib.h>
#include <thread>
@@ -714,3 +715,9 @@ void RandomInit()
ReportHardwareRand();
}
+
+std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval)
+{
+ double unscaled = -std::log1p(GetRand(uint64_t{1} << 48) * -0.0000000000000035527136788 /* -1/2^48 */);
+ return now + std::chrono::duration_cast<std::chrono::microseconds>(unscaled * average_interval + 0.5us);
+}
diff --git a/src/random.h b/src/random.h
index 0c6dc24983..5174c553fb 100644
--- a/src/random.h
+++ b/src/random.h
@@ -10,7 +10,7 @@
#include <crypto/common.h>
#include <uint256.h>
-#include <chrono> // For std::chrono::microseconds
+#include <chrono>
#include <cstdint>
#include <limits>
@@ -82,6 +82,18 @@ D GetRandomDuration(typename std::common_type<D>::type max) noexcept
};
constexpr auto GetRandMicros = GetRandomDuration<std::chrono::microseconds>;
constexpr auto GetRandMillis = GetRandomDuration<std::chrono::milliseconds>;
+
+/**
+ * Return a timestamp in the future sampled from an exponential distribution
+ * (https://en.wikipedia.org/wiki/Exponential_distribution). This distribution
+ * is memoryless and should be used for repeated network events (e.g. sending a
+ * certain type of message) to minimize leaking information to observers.
+ *
+ * The probability of an event occuring before time x is 1 - e^-(x/a) where a
+ * is the average interval between events.
+ * */
+std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval);
+
int GetRandInt(int nMax) noexcept;
uint256 GetRandHash() noexcept;
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index ccc859619d..f4930cd839 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -185,7 +185,7 @@ UniValue blockToJSON(const CBlock& block, const CBlockIndex* tip, const CBlockIn
case TxVerbosity::SHOW_DETAILS:
case TxVerbosity::SHOW_DETAILS_AND_PREVOUT:
CBlockUndo blockUndo;
- const bool have_undo = !IsBlockPruned(blockindex) && UndoReadFromDisk(blockUndo, blockindex);
+ const bool have_undo{WITH_LOCK(::cs_main, return !IsBlockPruned(blockindex) && UndoReadFromDisk(blockUndo, blockindex))};
for (size_t i = 0; i < block.vtx.size(); ++i) {
const CTransactionRef& tx = block.vtx.at(i);
@@ -790,17 +790,15 @@ static RPCHelpMan getblockfrompeer()
{
return RPCHelpMan{
"getblockfrompeer",
- "\nAttempt to fetch block from a given peer.\n"
+ "Attempt to fetch block from a given peer.\n"
"\nWe must have the header for this block, e.g. using submitheader.\n"
- "\nReturns {} if a block-request was successfully scheduled\n",
+ "Subsequent calls for the same block and a new peer will cause the response from the previous peer to be ignored.\n"
+ "\nReturns an empty JSON object if the request was successfully scheduled.",
{
- {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The block hash"},
- {"nodeid", RPCArg::Type::NUM, RPCArg::Optional::NO, "The node ID (see getpeerinfo for node IDs)"},
+ {"block_hash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The block hash to try to fetch"},
+ {"peer_id", RPCArg::Type::NUM, RPCArg::Optional::NO, "The peer to fetch it from (see getpeerinfo for peer IDs)"},
},
- RPCResult{RPCResult::Type::OBJ, "", "",
- {
- {RPCResult::Type::STR, "warnings", /*optional=*/true, "any warnings"},
- }},
+ RPCResult{RPCResult::Type::OBJ, "", /*optional=*/false, "", {}},
RPCExamples{
HelpExampleCli("getblockfrompeer", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\" 0")
+ HelpExampleRpc("getblockfrompeer", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\" 0")
@@ -810,31 +808,25 @@ static RPCHelpMan getblockfrompeer()
const NodeContext& node = EnsureAnyNodeContext(request.context);
ChainstateManager& chainman = EnsureChainman(node);
PeerManager& peerman = EnsurePeerman(node);
- CConnman& connman = EnsureConnman(node);
- uint256 hash(ParseHashV(request.params[0], "hash"));
+ const uint256& block_hash{ParseHashV(request.params[0], "block_hash")};
+ const NodeId peer_id{request.params[1].get_int64()};
- const NodeId nodeid = static_cast<NodeId>(request.params[1].get_int64());
-
- // Check that the peer with nodeid exists
- if (!connman.ForNode(nodeid, [](CNode* node) {return true;})) {
- throw JSONRPCError(RPC_MISC_ERROR, strprintf("Peer nodeid %d does not exist", nodeid));
- }
-
- const CBlockIndex* const index = WITH_LOCK(cs_main, return chainman.m_blockman.LookupBlockIndex(hash););
+ const CBlockIndex* const index = WITH_LOCK(cs_main, return chainman.m_blockman.LookupBlockIndex(block_hash););
if (!index) {
throw JSONRPCError(RPC_MISC_ERROR, "Block header missing");
}
- UniValue result = UniValue::VOBJ;
+ const bool block_has_data = WITH_LOCK(::cs_main, return index->nStatus & BLOCK_HAVE_DATA);
+ if (block_has_data) {
+ throw JSONRPCError(RPC_MISC_ERROR, "Block already downloaded");
+ }
- if (index->nStatus & BLOCK_HAVE_DATA) {
- result.pushKV("warnings", "Block already downloaded");
- } else if (!peerman.FetchBlock(nodeid, hash, *index)) {
- throw JSONRPCError(RPC_MISC_ERROR, "Failed to fetch block from peer");
+ if (const auto err{peerman.FetchBlock(peer_id, *index)}) {
+ throw JSONRPCError(RPC_MISC_ERROR, err.value());
}
- return result;
+ return UniValue::VOBJ;
},
};
}
@@ -938,8 +930,9 @@ static RPCHelpMan getblockheader()
};
}
-static CBlock GetBlockChecked(const CBlockIndex* pblockindex)
+static CBlock GetBlockChecked(const CBlockIndex* pblockindex) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
+ AssertLockHeld(::cs_main);
CBlock block;
if (IsBlockPruned(pblockindex)) {
throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)");
@@ -955,8 +948,9 @@ static CBlock GetBlockChecked(const CBlockIndex* pblockindex)
return block;
}
-static CBlockUndo GetUndoChecked(const CBlockIndex* pblockindex)
+static CBlockUndo GetUndoChecked(const CBlockIndex* pblockindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
+ AssertLockHeld(::cs_main);
CBlockUndo blockUndo;
if (IsBlockPruned(pblockindex)) {
throw JSONRPCError(RPC_MISC_ERROR, "Undo data not available (pruned data)");
@@ -1340,6 +1334,7 @@ static RPCHelpMan gettxout()
{RPCResult::Type::STR_AMOUNT, "value", "The transaction value in " + CURRENCY_UNIT},
{RPCResult::Type::OBJ, "scriptPubKey", "", {
{RPCResult::Type::STR, "asm", ""},
+ {RPCResult::Type::STR, "desc", "Inferred descriptor for the output"},
{RPCResult::Type::STR_HEX, "hex", ""},
{RPCResult::Type::STR, "type", "The type, eg pubkeyhash"},
{RPCResult::Type::STR, "address", /*optional=*/true, "The Bitcoin address (only if a well-defined address exists)"},
@@ -1443,7 +1438,7 @@ static void SoftForkDescPushBack(const CBlockIndex* active_chain_tip, UniValue&
UniValue rv(UniValue::VOBJ);
rv.pushKV("type", "buried");
- // getblockchaininfo reports the softfork as active from when the chain height is
+ // getdeploymentinfo reports the softfork as active from when the chain height is
// one below the activation height
rv.pushKV("active", DeploymentActiveAfter(active_chain_tip, params, dep));
rv.pushKV("height", params.DeploymentHeight(dep));
@@ -1455,51 +1450,82 @@ static void SoftForkDescPushBack(const CBlockIndex* active_chain_tip, UniValue&
// For BIP9 deployments.
if (!DeploymentEnabled(consensusParams, id)) return;
+ if (active_chain_tip == nullptr) return;
+
+ auto get_state_name = [](const ThresholdState state) -> std::string {
+ switch (state) {
+ case ThresholdState::DEFINED: return "defined";
+ case ThresholdState::STARTED: return "started";
+ case ThresholdState::LOCKED_IN: return "locked_in";
+ case ThresholdState::ACTIVE: return "active";
+ case ThresholdState::FAILED: return "failed";
+ }
+ return "invalid";
+ };
UniValue bip9(UniValue::VOBJ);
- const ThresholdState thresholdState = g_versionbitscache.State(active_chain_tip, consensusParams, id);
- switch (thresholdState) {
- case ThresholdState::DEFINED: bip9.pushKV("status", "defined"); break;
- case ThresholdState::STARTED: bip9.pushKV("status", "started"); break;
- case ThresholdState::LOCKED_IN: bip9.pushKV("status", "locked_in"); break;
- case ThresholdState::ACTIVE: bip9.pushKV("status", "active"); break;
- case ThresholdState::FAILED: bip9.pushKV("status", "failed"); break;
- }
- const bool has_signal = (ThresholdState::STARTED == thresholdState || ThresholdState::LOCKED_IN == thresholdState);
+
+ const ThresholdState next_state = g_versionbitscache.State(active_chain_tip, consensusParams, id);
+ const ThresholdState current_state = g_versionbitscache.State(active_chain_tip->pprev, consensusParams, id);
+
+ const bool has_signal = (ThresholdState::STARTED == current_state || ThresholdState::LOCKED_IN == current_state);
+
+ // BIP9 parameters
if (has_signal) {
bip9.pushKV("bit", consensusParams.vDeployments[id].bit);
}
bip9.pushKV("start_time", consensusParams.vDeployments[id].nStartTime);
bip9.pushKV("timeout", consensusParams.vDeployments[id].nTimeout);
- int64_t since_height = g_versionbitscache.StateSinceHeight(active_chain_tip, consensusParams, id);
- bip9.pushKV("since", since_height);
+ bip9.pushKV("min_activation_height", consensusParams.vDeployments[id].min_activation_height);
+
+ // BIP9 status
+ bip9.pushKV("status", get_state_name(current_state));
+ bip9.pushKV("since", g_versionbitscache.StateSinceHeight(active_chain_tip->pprev, consensusParams, id));
+ bip9.pushKV("status-next", get_state_name(next_state));
+
+ // BIP9 signalling status, if applicable
if (has_signal) {
UniValue statsUV(UniValue::VOBJ);
- BIP9Stats statsStruct = g_versionbitscache.Statistics(active_chain_tip, consensusParams, id);
+ std::vector<bool> signals;
+ BIP9Stats statsStruct = g_versionbitscache.Statistics(active_chain_tip, consensusParams, id, &signals);
statsUV.pushKV("period", statsStruct.period);
statsUV.pushKV("elapsed", statsStruct.elapsed);
statsUV.pushKV("count", statsStruct.count);
- if (ThresholdState::LOCKED_IN != thresholdState) {
+ if (ThresholdState::LOCKED_IN != current_state) {
statsUV.pushKV("threshold", statsStruct.threshold);
statsUV.pushKV("possible", statsStruct.possible);
}
bip9.pushKV("statistics", statsUV);
+
+ std::string sig;
+ sig.reserve(signals.size());
+ for (const bool s : signals) {
+ sig.push_back(s ? '#' : '-');
+ }
+ bip9.pushKV("signalling", sig);
}
- bip9.pushKV("min_activation_height", consensusParams.vDeployments[id].min_activation_height);
UniValue rv(UniValue::VOBJ);
rv.pushKV("type", "bip9");
- rv.pushKV("bip9", bip9);
- if (ThresholdState::ACTIVE == thresholdState) {
- rv.pushKV("height", since_height);
+ if (ThresholdState::ACTIVE == next_state) {
+ rv.pushKV("height", g_versionbitscache.StateSinceHeight(active_chain_tip, consensusParams, id));
}
- rv.pushKV("active", ThresholdState::ACTIVE == thresholdState);
+ rv.pushKV("active", ThresholdState::ACTIVE == next_state);
+ rv.pushKV("bip9", bip9);
softforks.pushKV(DeploymentName(id), rv);
}
+namespace {
+/* TODO: when -dprecatedrpc=softforks is removed, drop these */
+UniValue DeploymentInfo(const CBlockIndex* tip, const Consensus::Params& consensusParams);
+extern const std::vector<RPCResult> RPCHelpForDeployment;
+}
+
+// used by rest.cpp:rest_chaininfo, so cannot be static
RPCHelpMan getblockchaininfo()
{
+ /* TODO: from v24, remove -deprecatedrpc=softforks */
return RPCHelpMan{"getblockchaininfo",
"Returns an object containing various state info regarding blockchain processing.\n",
{},
@@ -1521,31 +1547,11 @@ RPCHelpMan getblockchaininfo()
{RPCResult::Type::NUM, "pruneheight", /*optional=*/true, "lowest-height complete block stored (only present if pruning is enabled)"},
{RPCResult::Type::BOOL, "automatic_pruning", /*optional=*/true, "whether automatic pruning is enabled (only present if pruning is enabled)"},
{RPCResult::Type::NUM, "prune_target_size", /*optional=*/true, "the target size used by pruning (only present if automatic pruning is enabled)"},
- {RPCResult::Type::OBJ_DYN, "softforks", "status of softforks",
+ {RPCResult::Type::OBJ_DYN, "softforks", "(DEPRECATED, returned only if config option -deprecatedrpc=softforks is passed) status of softforks",
{
{RPCResult::Type::OBJ, "xxxx", "name of the softfork",
- {
- {RPCResult::Type::STR, "type", "one of \"buried\", \"bip9\""},
- {RPCResult::Type::OBJ, "bip9", /*optional=*/true, "status of bip9 softforks (only for \"bip9\" type)",
- {
- {RPCResult::Type::STR, "status", "one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\""},
- {RPCResult::Type::NUM, "bit", /*optional=*/true, "the bit (0-28) in the block version field used to signal this softfork (only for \"started\" and \"locked_in\" status)"},
- {RPCResult::Type::NUM_TIME, "start_time", "the minimum median time past of a block at which the bit gains its meaning"},
- {RPCResult::Type::NUM_TIME, "timeout", "the median time past of a block at which the deployment is considered failed if not yet locked in"},
- {RPCResult::Type::NUM, "since", "height of the first block to which the status applies"},
- {RPCResult::Type::NUM, "min_activation_height", "minimum height of blocks for which the rules may be enforced"},
- {RPCResult::Type::OBJ, "statistics", /*optional=*/true, "numeric statistics about signalling for a softfork (only for \"started\" and \"locked_in\" status)",
- {
- {RPCResult::Type::NUM, "period", "the length in blocks of the signalling period"},
- {RPCResult::Type::NUM, "threshold", /*optional=*/true, "the number of blocks with the version bit set required to activate the feature (only for \"started\" status)"},
- {RPCResult::Type::NUM, "elapsed", "the number of blocks elapsed since the beginning of the current period"},
- {RPCResult::Type::NUM, "count", "the number of blocks with the version bit set in the current period"},
- {RPCResult::Type::BOOL, "possible", /*optional=*/true, "returns false if there are not enough blocks left in this period to pass activation threshold (only for \"started\" status)"},
- }},
- }},
- {RPCResult::Type::NUM, "height", /*optional=*/true, "height of the first block which the rules are or will be enforced (only for \"buried\" type, or \"bip9\" type with \"active\" status)"},
- {RPCResult::Type::BOOL, "active", "true if the rules are enforced for the mempool and the next block"},
- }},
+ RPCHelpForDeployment
+ },
}},
{RPCResult::Type::STR, "warnings", "any network and blockchain warnings"},
}},
@@ -1593,7 +1599,45 @@ RPCHelpMan getblockchaininfo()
}
}
- const Consensus::Params& consensusParams = Params().GetConsensus();
+ if (IsDeprecatedRPCEnabled("softforks")) {
+ const Consensus::Params& consensusParams = Params().GetConsensus();
+ obj.pushKV("softforks", DeploymentInfo(tip, consensusParams));
+ }
+
+ obj.pushKV("warnings", GetWarnings(false).original);
+ return obj;
+},
+ };
+}
+
+namespace {
+const std::vector<RPCResult> RPCHelpForDeployment{
+ {RPCResult::Type::STR, "type", "one of \"buried\", \"bip9\""},
+ {RPCResult::Type::NUM, "height", /*optional=*/true, "height of the first block which the rules are or will be enforced (only for \"buried\" type, or \"bip9\" type with \"active\" status)"},
+ {RPCResult::Type::BOOL, "active", "true if the rules are enforced for the mempool and the next block"},
+ {RPCResult::Type::OBJ, "bip9", /*optional=*/true, "status of bip9 softforks (only for \"bip9\" type)",
+ {
+ {RPCResult::Type::NUM, "bit", /*optional=*/true, "the bit (0-28) in the block version field used to signal this softfork (only for \"started\" and \"locked_in\" status)"},
+ {RPCResult::Type::NUM_TIME, "start_time", "the minimum median time past of a block at which the bit gains its meaning"},
+ {RPCResult::Type::NUM_TIME, "timeout", "the median time past of a block at which the deployment is considered failed if not yet locked in"},
+ {RPCResult::Type::NUM, "min_activation_height", "minimum height of blocks for which the rules may be enforced"},
+ {RPCResult::Type::STR, "status", "bip9 status of specified block (one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\")"},
+ {RPCResult::Type::NUM, "since", "height of the first block to which the status applies"},
+ {RPCResult::Type::STR, "status-next", "bip9 status of next block"},
+ {RPCResult::Type::OBJ, "statistics", /*optional=*/true, "numeric statistics about signalling for a softfork (only for \"started\" and \"locked_in\" status)",
+ {
+ {RPCResult::Type::NUM, "period", "the length in blocks of the signalling period"},
+ {RPCResult::Type::NUM, "threshold", /*optional=*/true, "the number of blocks with the version bit set required to activate the feature (only for \"started\" status)"},
+ {RPCResult::Type::NUM, "elapsed", "the number of blocks elapsed since the beginning of the current period"},
+ {RPCResult::Type::NUM, "count", "the number of blocks with the version bit set in the current period"},
+ {RPCResult::Type::BOOL, "possible", /*optional=*/true, "returns false if there are not enough blocks left in this period to pass activation threshold (only for \"started\" status)"},
+ }},
+ {RPCResult::Type::STR, "signalling", "indicates blocks that signalled with a # and blocks that did not with a -"},
+ }},
+};
+
+UniValue DeploymentInfo(const CBlockIndex* tip, const Consensus::Params& consensusParams)
+{
UniValue softforks(UniValue::VOBJ);
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_HEIGHTINCB);
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_DERSIG);
@@ -1602,11 +1646,53 @@ RPCHelpMan getblockchaininfo()
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_SEGWIT);
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_TESTDUMMY);
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_TAPROOT);
- obj.pushKV("softforks", softforks);
+ return softforks;
+}
+} // anon namespace
- obj.pushKV("warnings", GetWarnings(false).original);
- return obj;
-},
+static RPCHelpMan getdeploymentinfo()
+{
+ return RPCHelpMan{"getdeploymentinfo",
+ "Returns an object containing various state info regarding soft-forks.",
+ {
+ {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Default{"chain tip"}, "The block hash at which to query fork state"},
+ },
+ RPCResult{
+ RPCResult::Type::OBJ, "", "", {
+ {RPCResult::Type::STR, "hash", "requested block hash (or tip)"},
+ {RPCResult::Type::NUM, "height", "requested block height (or tip)"},
+ {RPCResult::Type::OBJ, "deployments", "", {
+ {RPCResult::Type::OBJ, "xxxx", "name of the deployment", RPCHelpForDeployment}
+ }},
+ }
+ },
+ RPCExamples{ HelpExampleCli("getdeploymentinfo", "") + HelpExampleRpc("getdeploymentinfo", "") },
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+ {
+ ChainstateManager& chainman = EnsureAnyChainman(request.context);
+ LOCK(cs_main);
+ CChainState& active_chainstate = chainman.ActiveChainstate();
+
+ const CBlockIndex* tip;
+ if (request.params[0].isNull()) {
+ tip = active_chainstate.m_chain.Tip();
+ CHECK_NONFATAL(tip);
+ } else {
+ uint256 hash(ParseHashV(request.params[0], "blockhash"));
+ tip = chainman.m_blockman.LookupBlockIndex(hash);
+ if (!tip) {
+ throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
+ }
+ }
+
+ const Consensus::Params& consensusParams = Params().GetConsensus();
+
+ UniValue deploymentinfo(UniValue::VOBJ);
+ deploymentinfo.pushKV("hash", tip->GetBlockHash().ToString());
+ deploymentinfo.pushKV("height", tip->nHeight);
+ deploymentinfo.pushKV("deployments", DeploymentInfo(tip, consensusParams));
+ return deploymentinfo;
+ },
};
}
@@ -2756,6 +2842,7 @@ static const CRPCCommand commands[] =
{ "blockchain", &getblockheader, },
{ "blockchain", &getchaintips, },
{ "blockchain", &getdifficulty, },
+ { "blockchain", &getdeploymentinfo, },
{ "blockchain", &getmempoolancestors, },
{ "blockchain", &getmempooldescendants, },
{ "blockchain", &getmempoolentry, },
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index 003ba8bb20..c480a093a4 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -60,7 +60,7 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "getbalance", 1, "minconf" },
{ "getbalance", 2, "include_watchonly" },
{ "getbalance", 3, "avoid_reuse" },
- { "getblockfrompeer", 1, "nodeid" },
+ { "getblockfrompeer", 1, "peer_id" },
{ "getblockhash", 0, "height" },
{ "waitforblockheight", 0, "height" },
{ "waitforblockheight", 1, "timeout" },
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index f227fde0f7..ff0d8a4e0f 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -179,6 +179,7 @@ static RPCHelpMan getrawtransaction()
{RPCResult::Type::OBJ, "scriptPubKey", "",
{
{RPCResult::Type::STR, "asm", "the asm"},
+ {RPCResult::Type::STR, "desc", "Inferred descriptor for the output"},
{RPCResult::Type::STR, "hex", "the hex"},
{RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
{RPCResult::Type::STR, "address", /*optional=*/true, "The Bitcoin address (only if a well-defined address exists)"},
@@ -240,7 +241,8 @@ static RPCHelpMan getrawtransaction()
if (!tx) {
std::string errmsg;
if (blockindex) {
- if (!(blockindex->nStatus & BLOCK_HAVE_DATA)) {
+ const bool block_has_data = WITH_LOCK(::cs_main, return blockindex->nStatus & BLOCK_HAVE_DATA);
+ if (!block_has_data) {
throw JSONRPCError(RPC_MISC_ERROR, "Block not available");
}
errmsg = "No such transaction found in the provided block";
@@ -506,6 +508,7 @@ static RPCHelpMan decoderawtransaction()
{RPCResult::Type::OBJ, "scriptPubKey", "",
{
{RPCResult::Type::STR, "asm", "the asm"},
+ {RPCResult::Type::STR, "desc", "Inferred descriptor for the output"},
{RPCResult::Type::STR_HEX, "hex", "the hex"},
{RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
{RPCResult::Type::STR, "address", /*optional=*/true, "The Bitcoin address (only if a well-defined address exists)"},
@@ -561,6 +564,7 @@ static RPCHelpMan decodescript()
RPCResult::Type::OBJ, "", "",
{
{RPCResult::Type::STR, "asm", "Script public key"},
+ {RPCResult::Type::STR, "desc", "Inferred descriptor for the script"},
{RPCResult::Type::STR, "type", "The output type (e.g. " + GetAllOutputTypes() + ")"},
{RPCResult::Type::STR, "address", /*optional=*/true, "The Bitcoin address (only if a well-defined address exists)"},
{RPCResult::Type::STR, "p2sh", /*optional=*/true,
@@ -572,6 +576,7 @@ static RPCHelpMan decodescript()
{RPCResult::Type::STR_HEX, "hex", "Hex string of the script public key"},
{RPCResult::Type::STR, "type", "The type of the script public key (e.g. witness_v0_keyhash or witness_v0_scripthash)"},
{RPCResult::Type::STR, "address", /*optional=*/true, "The Bitcoin address (only if a well-defined address exists)"},
+ {RPCResult::Type::STR, "desc", "Inferred descriptor for the script"},
{RPCResult::Type::STR, "p2sh-segwit", "address of the P2SH script wrapping this witness redeem script"},
}},
},
diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp
index 57e3da0351..5ef7e26ce8 100644
--- a/src/rpc/util.cpp
+++ b/src/rpc/util.cpp
@@ -831,11 +831,14 @@ void RPCResult::ToSections(Sections& sections, const OuterType outer_type, const
}
case Type::OBJ_DYN:
case Type::OBJ: {
+ if (m_inner.empty()) {
+ sections.PushSection({indent + maybe_key + "{}", Description("empty JSON object")});
+ return;
+ }
sections.PushSection({indent + maybe_key + "{", Description("json object")});
for (const auto& i : m_inner) {
i.ToSections(sections, OuterType::OBJ, current_indent + 2);
}
- CHECK_NONFATAL(!m_inner.empty());
if (m_type == Type::OBJ_DYN && m_inner.back().m_type != Type::ELISION) {
// If the dictionary keys are dynamic, use three dots for continuation
sections.PushSection({indent_next + "...", ""});
@@ -886,6 +889,17 @@ bool RPCResult::MatchesType(const UniValue& result) const
CHECK_NONFATAL(false);
}
+void RPCResult::CheckInnerDoc() const
+{
+ if (m_type == Type::OBJ) {
+ // May or may not be empty
+ return;
+ }
+ // Everything else must either be empty or not
+ const bool inner_needed{m_type == Type::ARR || m_type == Type::ARR_FIXED || m_type == Type::OBJ_DYN};
+ CHECK_NONFATAL(inner_needed != m_inner.empty());
+}
+
std::string RPCArg::ToStringObj(const bool oneline) const
{
std::string res;
diff --git a/src/rpc/util.h b/src/rpc/util.h
index d43ee33b0f..25ebf78fa1 100644
--- a/src/rpc/util.h
+++ b/src/rpc/util.h
@@ -267,8 +267,7 @@ struct RPCResult {
m_cond{std::move(cond)}
{
CHECK_NONFATAL(!m_cond.empty());
- const bool inner_needed{type == Type::ARR || type == Type::ARR_FIXED || type == Type::OBJ || type == Type::OBJ_DYN};
- CHECK_NONFATAL(inner_needed != inner.empty());
+ CheckInnerDoc();
}
RPCResult(
@@ -292,8 +291,7 @@ struct RPCResult {
m_description{std::move(description)},
m_cond{}
{
- const bool inner_needed{type == Type::ARR || type == Type::ARR_FIXED || type == Type::OBJ || type == Type::OBJ_DYN};
- CHECK_NONFATAL(inner_needed != inner.empty());
+ CheckInnerDoc();
}
RPCResult(
@@ -311,6 +309,9 @@ struct RPCResult {
std::string ToDescriptionString() const;
/** Check whether the result JSON type matches. */
bool MatchesType(const UniValue& result) const;
+
+private:
+ void CheckInnerDoc() const;
};
struct RPCResults {
diff --git a/src/script/bitcoinconsensus.cpp b/src/script/bitcoinconsensus.cpp
index dd15e6104c..f7f9dfc262 100644
--- a/src/script/bitcoinconsensus.cpp
+++ b/src/script/bitcoinconsensus.cpp
@@ -22,20 +22,23 @@ public:
m_remaining(txToLen)
{}
- void read(char* pch, size_t nSize)
+ void read(Span<std::byte> dst)
{
- if (nSize > m_remaining)
+ if (dst.size() > m_remaining) {
throw std::ios_base::failure(std::string(__func__) + ": end of data");
+ }
- if (pch == nullptr)
+ if (dst.data() == nullptr) {
throw std::ios_base::failure(std::string(__func__) + ": bad destination buffer");
+ }
- if (m_data == nullptr)
+ if (m_data == nullptr) {
throw std::ios_base::failure(std::string(__func__) + ": bad source buffer");
+ }
- memcpy(pch, m_data, nSize);
- m_remaining -= nSize;
- m_data += nSize;
+ memcpy(dst.data(), m_data, dst.size());
+ m_remaining -= dst.size();
+ m_data += dst.size();
}
template<typename T>
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index 95ffe40a74..11b1a1c887 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -1303,12 +1303,12 @@ public:
it = itBegin;
while (scriptCode.GetOp(it, opcode)) {
if (opcode == OP_CODESEPARATOR) {
- s.write((char*)&itBegin[0], it-itBegin-1);
+ s.write(AsBytes(Span{&itBegin[0], size_t(it - itBegin - 1)}));
itBegin = it;
}
}
if (itBegin != scriptCode.end())
- s.write((char*)&itBegin[0], it-itBegin);
+ s.write(AsBytes(Span{&itBegin[0], size_t(it - itBegin)}));
}
/** Serialize an input of txTo */
@@ -1500,7 +1500,7 @@ static bool HandleMissingData(MissingDataBehavior mdb)
}
template<typename T>
-bool SignatureHashSchnorr(uint256& hash_out, const ScriptExecutionData& execdata, const T& tx_to, uint32_t in_pos, uint8_t hash_type, SigVersion sigversion, const PrecomputedTransactionData& cache, MissingDataBehavior mdb)
+bool SignatureHashSchnorr(uint256& hash_out, ScriptExecutionData& execdata, const T& tx_to, uint32_t in_pos, uint8_t hash_type, SigVersion sigversion, const PrecomputedTransactionData& cache, MissingDataBehavior mdb)
{
uint8_t ext_flag, key_version;
switch (sigversion) {
@@ -1568,9 +1568,12 @@ bool SignatureHashSchnorr(uint256& hash_out, const ScriptExecutionData& execdata
// Data about the output (if only one).
if (output_type == SIGHASH_SINGLE) {
if (in_pos >= tx_to.vout.size()) return false;
- CHashWriter sha_single_output(SER_GETHASH, 0);
- sha_single_output << tx_to.vout[in_pos];
- ss << sha_single_output.GetSHA256();
+ if (!execdata.m_output_hash) {
+ CHashWriter sha_single_output(SER_GETHASH, 0);
+ sha_single_output << tx_to.vout[in_pos];
+ execdata.m_output_hash = sha_single_output.GetSHA256();
+ }
+ ss << execdata.m_output_hash.value();
}
// Additional data for BIP 342 signatures
@@ -1692,7 +1695,7 @@ bool GenericTransactionSignatureChecker<T>::CheckECDSASignature(const std::vecto
}
template <class T>
-bool GenericTransactionSignatureChecker<T>::CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey_in, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror) const
+bool GenericTransactionSignatureChecker<T>::CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey_in, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror) const
{
assert(sigversion == SigVersion::TAPROOT || sigversion == SigVersion::TAPSCRIPT);
// Schnorr signatures have 32-byte public keys. The caller is responsible for enforcing this.
diff --git a/src/script/interpreter.h b/src/script/interpreter.h
index 2a28f1a2d3..cf1953ad22 100644
--- a/src/script/interpreter.h
+++ b/src/script/interpreter.h
@@ -11,6 +11,7 @@
#include <span.h>
#include <primitives/transaction.h>
+#include <optional>
#include <vector>
#include <stdint.h>
@@ -215,6 +216,9 @@ struct ScriptExecutionData
bool m_validation_weight_left_init = false;
//! How much validation weight is left (decremented for every successful non-empty signature check).
int64_t m_validation_weight_left;
+
+ //! The hash of the corresponding output
+ std::optional<uint256> m_output_hash;
};
/** Signature hash sizes */
@@ -244,7 +248,7 @@ public:
return false;
}
- virtual bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror = nullptr) const
+ virtual bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror = nullptr) const
{
return false;
}
@@ -272,7 +276,7 @@ enum class MissingDataBehavior
};
template<typename T>
-bool SignatureHashSchnorr(uint256& hash_out, const ScriptExecutionData& execdata, const T& tx_to, uint32_t in_pos, uint8_t hash_type, SigVersion sigversion, const PrecomputedTransactionData& cache, MissingDataBehavior mdb);
+bool SignatureHashSchnorr(uint256& hash_out, ScriptExecutionData& execdata, const T& tx_to, uint32_t in_pos, uint8_t hash_type, SigVersion sigversion, const PrecomputedTransactionData& cache, MissingDataBehavior mdb);
template <class T>
class GenericTransactionSignatureChecker : public BaseSignatureChecker
@@ -292,7 +296,7 @@ public:
GenericTransactionSignatureChecker(const T* txToIn, unsigned int nInIn, const CAmount& amountIn, MissingDataBehavior mdb) : txTo(txToIn), m_mdb(mdb), nIn(nInIn), amount(amountIn), txdata(nullptr) {}
GenericTransactionSignatureChecker(const T* txToIn, unsigned int nInIn, const CAmount& amountIn, const PrecomputedTransactionData& txdataIn, MissingDataBehavior mdb) : txTo(txToIn), m_mdb(mdb), nIn(nInIn), amount(amountIn), txdata(&txdataIn) {}
bool CheckECDSASignature(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override;
- bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror = nullptr) const override;
+ bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror = nullptr) const override;
bool CheckLockTime(const CScriptNum& nLockTime) const override;
bool CheckSequence(const CScriptNum& nSequence) const override;
};
@@ -313,7 +317,7 @@ public:
return m_checker.CheckECDSASignature(scriptSig, vchPubKey, scriptCode, sigversion);
}
- bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror = nullptr) const override
+ bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror = nullptr) const override
{
return m_checker.CheckSchnorrSignature(sig, pubkey, sigversion, execdata, serror);
}
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index 8e08448480..371a937bc8 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -542,7 +542,7 @@ class DummySignatureChecker final : public BaseSignatureChecker
public:
DummySignatureChecker() {}
bool CheckECDSASignature(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override { return true; }
- bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror) const override { return true; }
+ bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror) const override { return true; }
};
const DummySignatureChecker DUMMY_CHECKER;
diff --git a/src/serialize.h b/src/serialize.h
index 4cc4b0338c..44bb471f25 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -47,79 +47,72 @@ static const unsigned int MAX_VECTOR_ALLOCATE = 5000000;
struct deserialize_type {};
constexpr deserialize_type deserialize {};
-//! Safely convert odd char pointer types to standard ones.
-inline char* CharCast(char* c) { return c; }
-inline char* CharCast(unsigned char* c) { return (char*)c; }
-inline const char* CharCast(const char* c) { return c; }
-inline const char* CharCast(const unsigned char* c) { return (const char*)c; }
-
/*
* Lowest-level serialization and conversion.
- * @note Sizes of these types are verified in the tests
*/
template<typename Stream> inline void ser_writedata8(Stream &s, uint8_t obj)
{
- s.write((char*)&obj, 1);
+ s.write(AsBytes(Span{&obj, 1}));
}
template<typename Stream> inline void ser_writedata16(Stream &s, uint16_t obj)
{
obj = htole16(obj);
- s.write((char*)&obj, 2);
+ s.write(AsBytes(Span{&obj, 1}));
}
template<typename Stream> inline void ser_writedata16be(Stream &s, uint16_t obj)
{
obj = htobe16(obj);
- s.write((char*)&obj, 2);
+ s.write(AsBytes(Span{&obj, 1}));
}
template<typename Stream> inline void ser_writedata32(Stream &s, uint32_t obj)
{
obj = htole32(obj);
- s.write((char*)&obj, 4);
+ s.write(AsBytes(Span{&obj, 1}));
}
template<typename Stream> inline void ser_writedata32be(Stream &s, uint32_t obj)
{
obj = htobe32(obj);
- s.write((char*)&obj, 4);
+ s.write(AsBytes(Span{&obj, 1}));
}
template<typename Stream> inline void ser_writedata64(Stream &s, uint64_t obj)
{
obj = htole64(obj);
- s.write((char*)&obj, 8);
+ s.write(AsBytes(Span{&obj, 1}));
}
template<typename Stream> inline uint8_t ser_readdata8(Stream &s)
{
uint8_t obj;
- s.read((char*)&obj, 1);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return obj;
}
template<typename Stream> inline uint16_t ser_readdata16(Stream &s)
{
uint16_t obj;
- s.read((char*)&obj, 2);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return le16toh(obj);
}
template<typename Stream> inline uint16_t ser_readdata16be(Stream &s)
{
uint16_t obj;
- s.read((char*)&obj, 2);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return be16toh(obj);
}
template<typename Stream> inline uint32_t ser_readdata32(Stream &s)
{
uint32_t obj;
- s.read((char*)&obj, 4);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return le32toh(obj);
}
template<typename Stream> inline uint32_t ser_readdata32be(Stream &s)
{
uint32_t obj;
- s.read((char*)&obj, 4);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return be32toh(obj);
}
template<typename Stream> inline uint64_t ser_readdata64(Stream &s)
{
uint64_t obj;
- s.read((char*)&obj, 8);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return le64toh(obj);
}
@@ -127,7 +120,7 @@ template<typename Stream> inline uint64_t ser_readdata64(Stream &s)
/////////////////////////////////////////////////////////////////
//
// Templates for serializing to anything that looks like a stream,
-// i.e. anything that supports .read(char*, size_t) and .write(char*, size_t)
+// i.e. anything that supports .read(Span<std::byte>) and .write(Span<const std::byte>)
//
class CSizeComputer;
@@ -196,7 +189,7 @@ template<typename X> const X& ReadWriteAsHelper(const X& x) { return x; }
FORMATTER_METHODS(cls, obj)
#ifndef CHAR_EQUALS_INT8
-template<typename Stream> inline void Serialize(Stream& s, char a ) { ser_writedata8(s, a); } // TODO Get rid of bare char
+template <typename Stream> void Serialize(Stream&, char) = delete; // char serialization forbidden. Use uint8_t or int8_t
#endif
template<typename Stream> inline void Serialize(Stream& s, int8_t a ) { ser_writedata8(s, a); }
template<typename Stream> inline void Serialize(Stream& s, uint8_t a ) { ser_writedata8(s, a); }
@@ -206,13 +199,13 @@ template<typename Stream> inline void Serialize(Stream& s, int32_t a ) { ser_wri
template<typename Stream> inline void Serialize(Stream& s, uint32_t a) { ser_writedata32(s, a); }
template<typename Stream> inline void Serialize(Stream& s, int64_t a ) { ser_writedata64(s, a); }
template<typename Stream> inline void Serialize(Stream& s, uint64_t a) { ser_writedata64(s, a); }
-template<typename Stream, int N> inline void Serialize(Stream& s, const char (&a)[N]) { s.write(a, N); }
-template<typename Stream, int N> inline void Serialize(Stream& s, const unsigned char (&a)[N]) { s.write(CharCast(a), N); }
-template<typename Stream> inline void Serialize(Stream& s, const Span<const unsigned char>& span) { s.write(CharCast(span.data()), span.size()); }
-template<typename Stream> inline void Serialize(Stream& s, const Span<unsigned char>& span) { s.write(CharCast(span.data()), span.size()); }
+template<typename Stream, int N> inline void Serialize(Stream& s, const char (&a)[N]) { s.write(MakeByteSpan(a)); }
+template<typename Stream, int N> inline void Serialize(Stream& s, const unsigned char (&a)[N]) { s.write(MakeByteSpan(a)); }
+template<typename Stream> inline void Serialize(Stream& s, const Span<const unsigned char>& span) { s.write(AsBytes(span)); }
+template<typename Stream> inline void Serialize(Stream& s, const Span<unsigned char>& span) { s.write(AsBytes(span)); }
#ifndef CHAR_EQUALS_INT8
-template<typename Stream> inline void Unserialize(Stream& s, char& a ) { a = ser_readdata8(s); } // TODO Get rid of bare char
+template <typename Stream> void Unserialize(Stream&, char) = delete; // char serialization forbidden. Use uint8_t or int8_t
#endif
template<typename Stream> inline void Unserialize(Stream& s, int8_t& a ) { a = ser_readdata8(s); }
template<typename Stream> inline void Unserialize(Stream& s, uint8_t& a ) { a = ser_readdata8(s); }
@@ -222,9 +215,9 @@ template<typename Stream> inline void Unserialize(Stream& s, int32_t& a ) { a =
template<typename Stream> inline void Unserialize(Stream& s, uint32_t& a) { a = ser_readdata32(s); }
template<typename Stream> inline void Unserialize(Stream& s, int64_t& a ) { a = ser_readdata64(s); }
template<typename Stream> inline void Unserialize(Stream& s, uint64_t& a) { a = ser_readdata64(s); }
-template<typename Stream, int N> inline void Unserialize(Stream& s, char (&a)[N]) { s.read(a, N); }
-template<typename Stream, int N> inline void Unserialize(Stream& s, unsigned char (&a)[N]) { s.read(CharCast(a), N); }
-template<typename Stream> inline void Unserialize(Stream& s, Span<unsigned char>& span) { s.read(CharCast(span.data()), span.size()); }
+template<typename Stream, int N> inline void Unserialize(Stream& s, char (&a)[N]) { s.read(MakeWritableByteSpan(a)); }
+template<typename Stream, int N> inline void Unserialize(Stream& s, unsigned char (&a)[N]) { s.read(MakeWritableByteSpan(a)); }
+template<typename Stream> inline void Unserialize(Stream& s, Span<unsigned char>& span) { s.read(AsWritableBytes(span)); }
template <typename Stream> inline void Serialize(Stream& s, bool a) { uint8_t f = a; ser_writedata8(s, f); }
template <typename Stream> inline void Unserialize(Stream& s, bool& a) { uint8_t f = ser_readdata8(s); a = f; }
@@ -479,10 +472,10 @@ struct CustomUintFormatter
if (v < 0 || v > MAX) throw std::ios_base::failure("CustomUintFormatter value out of range");
if (BigEndian) {
uint64_t raw = htobe64(v);
- s.write(((const char*)&raw) + 8 - Bytes, Bytes);
+ s.write({BytePtr(&raw) + 8 - Bytes, Bytes});
} else {
uint64_t raw = htole64(v);
- s.write((const char*)&raw, Bytes);
+ s.write({BytePtr(&raw), Bytes});
}
}
@@ -492,10 +485,10 @@ struct CustomUintFormatter
static_assert(std::numeric_limits<U>::max() >= MAX && std::numeric_limits<U>::min() <= 0, "Assigned type too small");
uint64_t raw = 0;
if (BigEndian) {
- s.read(((char*)&raw) + 8 - Bytes, Bytes);
+ s.read({BytePtr(&raw) + 8 - Bytes, Bytes});
v = static_cast<I>(be64toh(raw));
} else {
- s.read((char*)&raw, Bytes);
+ s.read({BytePtr(&raw), Bytes});
v = static_cast<I>(le64toh(raw));
}
}
@@ -551,7 +544,7 @@ struct LimitedStringFormatter
throw std::ios_base::failure("String length limit exceeded");
}
v.resize(size);
- if (size != 0) s.read((char*)v.data(), size);
+ if (size != 0) s.read(MakeWritableByteSpan(v));
}
template<typename Stream>
@@ -715,7 +708,7 @@ void Serialize(Stream& os, const std::basic_string<C>& str)
{
WriteCompactSize(os, str.size());
if (!str.empty())
- os.write((char*)str.data(), str.size() * sizeof(C));
+ os.write(MakeByteSpan(str));
}
template<typename Stream, typename C>
@@ -724,7 +717,7 @@ void Unserialize(Stream& is, std::basic_string<C>& str)
unsigned int nSize = ReadCompactSize(is);
str.resize(nSize);
if (nSize != 0)
- is.read((char*)str.data(), nSize * sizeof(C));
+ is.read(MakeWritableByteSpan(str));
}
@@ -737,7 +730,7 @@ void Serialize_impl(Stream& os, const prevector<N, T>& v, const unsigned char&)
{
WriteCompactSize(os, v.size());
if (!v.empty())
- os.write((char*)v.data(), v.size() * sizeof(T));
+ os.write(MakeByteSpan(v));
}
template<typename Stream, unsigned int N, typename T, typename V>
@@ -764,7 +757,7 @@ void Unserialize_impl(Stream& is, prevector<N, T>& v, const unsigned char&)
{
unsigned int blk = std::min(nSize - i, (unsigned int)(1 + 4999999 / sizeof(T)));
v.resize_uninitialized(i + blk);
- is.read((char*)&v[i], blk * sizeof(T));
+ is.read(AsWritableBytes(Span{&v[i], blk}));
i += blk;
}
}
@@ -791,7 +784,7 @@ void Serialize_impl(Stream& os, const std::vector<T, A>& v, const unsigned char&
{
WriteCompactSize(os, v.size());
if (!v.empty())
- os.write((char*)v.data(), v.size() * sizeof(T));
+ os.write(MakeByteSpan(v));
}
template<typename Stream, typename T, typename A>
@@ -830,7 +823,7 @@ void Unserialize_impl(Stream& is, std::vector<T, A>& v, const unsigned char&)
{
unsigned int blk = std::min(nSize - i, (unsigned int)(1 + 4999999 / sizeof(T)));
v.resize(i + blk);
- is.read((char*)&v[i], blk * sizeof(T));
+ is.read(AsWritableBytes(Span{&v[i], blk}));
i += blk;
}
}
@@ -995,9 +988,9 @@ protected:
public:
explicit CSizeComputer(int nVersionIn) : nSize(0), nVersion(nVersionIn) {}
- void write(const char *psz, size_t _nSize)
+ void write(Span<const std::byte> src)
{
- this->nSize += _nSize;
+ this->nSize += src.size();
}
/** Pretend _nSize bytes are written, without specifying them. */
diff --git a/src/span.h b/src/span.h
index 47390a5bb2..b627b993c2 100644
--- a/src/span.h
+++ b/src/span.h
@@ -243,16 +243,21 @@ T& SpanPopBack(Span<T>& span)
return back;
}
+//! Convert a data pointer to a std::byte data pointer.
+//! Where possible, please use the safer AsBytes helpers.
+inline const std::byte* BytePtr(const void* data) { return reinterpret_cast<const std::byte*>(data); }
+inline std::byte* BytePtr(void* data) { return reinterpret_cast<std::byte*>(data); }
+
// From C++20 as_bytes and as_writeable_bytes
template <typename T>
Span<const std::byte> AsBytes(Span<T> s) noexcept
{
- return {reinterpret_cast<const std::byte*>(s.data()), s.size_bytes()};
+ return {BytePtr(s.data()), s.size_bytes()};
}
template <typename T>
Span<std::byte> AsWritableBytes(Span<T> s) noexcept
{
- return {reinterpret_cast<std::byte*>(s.data()), s.size_bytes()};
+ return {BytePtr(s.data()), s.size_bytes()};
}
template <typename V>
diff --git a/src/streams.h b/src/streams.h
index 384bb632b0..2f26be6dd8 100644
--- a/src/streams.h
+++ b/src/streams.h
@@ -49,14 +49,14 @@ public:
return (*this);
}
- void write(const char* pch, size_t nSize)
+ void write(Span<const std::byte> src)
{
- stream->write(pch, nSize);
+ stream->write(src);
}
- void read(char* pch, size_t nSize)
+ void read(Span<std::byte> dst)
{
- stream->read(pch, nSize);
+ stream->read(dst);
}
int GetVersion() const { return nVersion; }
@@ -94,17 +94,17 @@ class CVectorWriter
{
::SerializeMany(*this, std::forward<Args>(args)...);
}
- void write(const char* pch, size_t nSize)
+ void write(Span<const std::byte> src)
{
assert(nPos <= vchData.size());
- size_t nOverwrite = std::min(nSize, vchData.size() - nPos);
+ size_t nOverwrite = std::min(src.size(), vchData.size() - nPos);
if (nOverwrite) {
- memcpy(vchData.data() + nPos, reinterpret_cast<const unsigned char*>(pch), nOverwrite);
+ memcpy(vchData.data() + nPos, src.data(), nOverwrite);
}
- if (nOverwrite < nSize) {
- vchData.insert(vchData.end(), reinterpret_cast<const unsigned char*>(pch) + nOverwrite, reinterpret_cast<const unsigned char*>(pch) + nSize);
+ if (nOverwrite < src.size()) {
+ vchData.insert(vchData.end(), UCharCast(src.data()) + nOverwrite, UCharCast(src.end()));
}
- nPos += nSize;
+ nPos += src.size();
}
template<typename T>
CVectorWriter& operator<<(const T& obj)
@@ -161,18 +161,18 @@ public:
size_t size() const { return m_data.size(); }
bool empty() const { return m_data.empty(); }
- void read(char* dst, size_t n)
+ void read(Span<std::byte> dst)
{
- if (n == 0) {
+ if (dst.size() == 0) {
return;
}
// Read from the beginning of the buffer
- if (n > m_data.size()) {
+ if (dst.size() > m_data.size()) {
throw std::ios_base::failure("SpanReader::read(): end of data");
}
- memcpy(dst, m_data.data(), n);
- m_data = m_data.subspan(n);
+ memcpy(dst.data(), m_data.data(), dst.size());
+ m_data = m_data.subspan(dst.size());
}
};
@@ -206,6 +206,7 @@ public:
: nType{nTypeIn},
nVersion{nVersionIn} {}
+ explicit CDataStream(Span<const uint8_t> sp, int type, int version) : CDataStream{AsBytes(sp), type, version} {}
explicit CDataStream(Span<const value_type> sp, int nTypeIn, int nVersionIn)
: vch(sp.data(), sp.data() + sp.size()),
nType{nTypeIn},
@@ -221,7 +222,7 @@ public:
std::string str() const
{
- return (std::string(begin(), end()));
+ return std::string{UCharCast(data()), UCharCast(data() + size())};
}
@@ -342,16 +343,16 @@ public:
void SetVersion(int n) { nVersion = n; }
int GetVersion() const { return nVersion; }
- void read(char* pch, size_t nSize)
+ void read(Span<value_type> dst)
{
- if (nSize == 0) return;
+ if (dst.size() == 0) return;
// Read from the beginning of the buffer
- unsigned int nReadPosNext = nReadPos + nSize;
+ unsigned int nReadPosNext = nReadPos + dst.size();
if (nReadPosNext > vch.size()) {
throw std::ios_base::failure("CDataStream::read(): end of data");
}
- memcpy(pch, &vch[nReadPos], nSize);
+ memcpy(dst.data(), &vch[nReadPos], dst.size());
if (nReadPosNext == vch.size())
{
nReadPos = 0;
@@ -379,10 +380,10 @@ public:
nReadPos = nReadPosNext;
}
- void write(const char* pch, size_t nSize)
+ void write(Span<const value_type> src)
{
// Write to the end of the buffer
- vch.insert(vch.end(), pch, pch + nSize);
+ vch.insert(vch.end(), src.begin(), src.end());
}
template<typename Stream>
@@ -390,7 +391,7 @@ public:
{
// Special case: stream << stream concatenates like stream += stream
if (!vch.empty())
- s.write((char*)vch.data(), vch.size() * sizeof(value_type));
+ s.write(MakeByteSpan(vch));
}
template<typename T>
@@ -421,7 +422,7 @@ public:
}
for (size_type i = 0, j = 0; i != size(); i++) {
- vch[i] ^= key[j++];
+ vch[i] ^= std::byte{key[j++]};
// This potentially acts on very many bytes of data, so it's
// important that we calculate `j`, i.e. the `key` index in this
@@ -594,12 +595,13 @@ public:
int GetType() const { return nType; }
int GetVersion() const { return nVersion; }
- void read(char* pch, size_t nSize)
+ void read(Span<std::byte> dst)
{
if (!file)
throw std::ios_base::failure("CAutoFile::read: file handle is nullptr");
- if (fread(pch, 1, nSize, file) != nSize)
+ if (fread(dst.data(), 1, dst.size(), file) != dst.size()) {
throw std::ios_base::failure(feof(file) ? "CAutoFile::read: end of file" : "CAutoFile::read: fread failed");
+ }
}
void ignore(size_t nSize)
@@ -615,12 +617,13 @@ public:
}
}
- void write(const char* pch, size_t nSize)
+ void write(Span<const std::byte> src)
{
if (!file)
throw std::ios_base::failure("CAutoFile::write: file handle is nullptr");
- if (fwrite(pch, 1, nSize, file) != nSize)
+ if (fwrite(src.data(), 1, src.size(), file) != src.size()) {
throw std::ios_base::failure("CAutoFile::write: write failed");
+ }
}
template<typename T>
@@ -661,7 +664,7 @@ private:
uint64_t nReadPos; //!< how many bytes have been read from this
uint64_t nReadLimit; //!< up to which position we're allowed to read
uint64_t nRewind; //!< how many bytes we guarantee to rewind
- std::vector<char> vchBuf; //!< the buffer
+ std::vector<std::byte> vchBuf; //!< the buffer
protected:
//! read data from the source to fill the buffer
@@ -682,8 +685,8 @@ protected:
}
public:
- CBufferedFile(FILE *fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nTypeIn, int nVersionIn) :
- nType(nTypeIn), nVersion(nVersionIn), nSrcPos(0), nReadPos(0), nReadLimit(std::numeric_limits<uint64_t>::max()), nRewind(nRewindIn), vchBuf(nBufSize, 0)
+ CBufferedFile(FILE* fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nTypeIn, int nVersionIn)
+ : nType(nTypeIn), nVersion(nVersionIn), nSrcPos(0), nReadPos(0), nReadLimit(std::numeric_limits<uint64_t>::max()), nRewind(nRewindIn), vchBuf(nBufSize, std::byte{0})
{
if (nRewindIn >= nBufSize)
throw std::ios_base::failure("Rewind limit must be less than buffer size");
@@ -716,22 +719,23 @@ public:
}
//! read a number of bytes
- void read(char *pch, size_t nSize) {
- if (nSize + nReadPos > nReadLimit)
+ void read(Span<std::byte> dst)
+ {
+ if (dst.size() + nReadPos > nReadLimit) {
throw std::ios_base::failure("Read attempted past buffer limit");
- while (nSize > 0) {
+ }
+ while (dst.size() > 0) {
if (nReadPos == nSrcPos)
Fill();
unsigned int pos = nReadPos % vchBuf.size();
- size_t nNow = nSize;
+ size_t nNow = dst.size();
if (nNow + pos > vchBuf.size())
nNow = vchBuf.size() - pos;
if (nNow + nReadPos > nSrcPos)
nNow = nSrcPos - nReadPos;
- memcpy(pch, &vchBuf[pos], nNow);
+ memcpy(dst.data(), &vchBuf[pos], nNow);
nReadPos += nNow;
- pch += nNow;
- nSize -= nNow;
+ dst = dst.subspan(nNow);
}
}
@@ -774,12 +778,14 @@ public:
}
//! search for a given byte in the stream, and remain positioned on it
- void FindByte(char ch) {
+ void FindByte(uint8_t ch)
+ {
while (true) {
if (nReadPos == nSrcPos)
Fill();
- if (vchBuf[nReadPos % vchBuf.size()] == ch)
+ if (vchBuf[nReadPos % vchBuf.size()] == std::byte{ch}) {
break;
+ }
nReadPos++;
}
}
diff --git a/src/support/allocators/zeroafterfree.h b/src/support/allocators/zeroafterfree.h
index bc9c95eb53..0befe0ffcd 100644
--- a/src/support/allocators/zeroafterfree.h
+++ b/src/support/allocators/zeroafterfree.h
@@ -41,6 +41,6 @@ struct zero_after_free_allocator : public std::allocator<T> {
};
/** Byte-vector that clears its contents before deletion. */
-using SerializeData = std::vector<uint8_t, zero_after_free_allocator<uint8_t>>;
+using SerializeData = std::vector<std::byte, zero_after_free_allocator<std::byte>>;
#endif // BITCOIN_SUPPORT_ALLOCATORS_ZEROAFTERFREE_H
diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp
index bd579db205..35c4108caa 100644
--- a/src/test/bloom_tests.cpp
+++ b/src/test/bloom_tests.cpp
@@ -43,8 +43,9 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize)
stream << filter;
std::vector<uint8_t> expected = ParseHex("03614e9b050000000000000001");
+ auto result{MakeUCharSpan(stream)};
- BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(), expected.begin(), expected.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(), expected.begin(), expected.end());
BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter doesn't contain just-inserted object!");
}
@@ -69,8 +70,9 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize_with_tweak)
stream << filter;
std::vector<uint8_t> expected = ParseHex("03ce4299050000000100008001");
+ auto result{MakeUCharSpan(stream)};
- BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(), expected.begin(), expected.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(), expected.begin(), expected.end());
}
BOOST_AUTO_TEST_CASE(bloom_create_insert_key)
@@ -89,8 +91,9 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_key)
stream << filter;
std::vector<unsigned char> expected = ParseHex("038fc16b080000000000000001");
+ auto result{MakeUCharSpan(stream)};
- BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(), expected.begin(), expected.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(), expected.begin(), expected.end());
}
BOOST_AUTO_TEST_CASE(bloom_match)
@@ -341,8 +344,9 @@ BOOST_AUTO_TEST_CASE(merkle_block_3_and_serialize)
merkleStream << merkleBlock;
std::vector<uint8_t> expected = ParseHex("0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b4b8b0000000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f196367291b4d4c86041b8fa45d630100000001b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f19630101");
+ auto result{MakeUCharSpan(merkleStream)};
- BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), merkleStream.begin(), merkleStream.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), result.begin(), result.end());
}
BOOST_AUTO_TEST_CASE(merkle_block_4)
diff --git a/src/test/fuzz/autofile.cpp b/src/test/fuzz/autofile.cpp
index 0cc2d12d29..3b410930ed 100644
--- a/src/test/fuzz/autofile.cpp
+++ b/src/test/fuzz/autofile.cpp
@@ -23,16 +23,16 @@ FUZZ_TARGET(autofile)
CallOneOf(
fuzzed_data_provider,
[&] {
- std::array<uint8_t, 4096> arr{};
+ std::array<std::byte, 4096> arr{};
try {
- auto_file.read((char*)arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096));
+ auto_file.read({arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096)});
} catch (const std::ios_base::failure&) {
}
},
[&] {
- const std::array<uint8_t, 4096> arr{};
+ const std::array<std::byte, 4096> arr{};
try {
- auto_file.write((const char*)arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096));
+ auto_file.write({arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096)});
} catch (const std::ios_base::failure&) {
}
},
diff --git a/src/test/fuzz/buffered_file.cpp b/src/test/fuzz/buffered_file.cpp
index c3c2e4050f..a8c3318629 100644
--- a/src/test/fuzz/buffered_file.cpp
+++ b/src/test/fuzz/buffered_file.cpp
@@ -33,9 +33,9 @@ FUZZ_TARGET(buffered_file)
CallOneOf(
fuzzed_data_provider,
[&] {
- std::array<uint8_t, 4096> arr{};
+ std::array<std::byte, 4096> arr{};
try {
- opt_buffered_file->read((char*)arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096));
+ opt_buffered_file->read({arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096)});
} catch (const std::ios_base::failure&) {
}
},
@@ -53,7 +53,7 @@ FUZZ_TARGET(buffered_file)
return;
}
try {
- opt_buffered_file->FindByte(fuzzed_data_provider.ConsumeIntegral<char>());
+ opt_buffered_file->FindByte(fuzzed_data_provider.ConsumeIntegral<uint8_t>());
} catch (const std::ios_base::failure&) {
}
},
diff --git a/src/test/fuzz/chain.cpp b/src/test/fuzz/chain.cpp
index 326904a811..8c0ed32d51 100644
--- a/src/test/fuzz/chain.cpp
+++ b/src/test/fuzz/chain.cpp
@@ -21,15 +21,18 @@ FUZZ_TARGET(chain)
const uint256 zero{};
disk_block_index->phashBlock = &zero;
- (void)disk_block_index->GetBlockHash();
- (void)disk_block_index->GetBlockPos();
- (void)disk_block_index->GetBlockTime();
- (void)disk_block_index->GetBlockTimeMax();
- (void)disk_block_index->GetMedianTimePast();
- (void)disk_block_index->GetUndoPos();
- (void)disk_block_index->HaveTxsDownloaded();
- (void)disk_block_index->IsValid();
- (void)disk_block_index->ToString();
+ {
+ LOCK(::cs_main);
+ (void)disk_block_index->GetBlockHash();
+ (void)disk_block_index->GetBlockPos();
+ (void)disk_block_index->GetBlockTime();
+ (void)disk_block_index->GetBlockTimeMax();
+ (void)disk_block_index->GetMedianTimePast();
+ (void)disk_block_index->GetUndoPos();
+ (void)disk_block_index->HaveTxsDownloaded();
+ (void)disk_block_index->IsValid();
+ (void)disk_block_index->ToString();
+ }
const CBlockHeader block_header = disk_block_index->GetBlockHeader();
(void)CDiskBlockIndex{*disk_block_index};
@@ -55,7 +58,7 @@ FUZZ_TARGET(chain)
if (block_status & ~BLOCK_VALID_MASK) {
continue;
}
- (void)disk_block_index->RaiseValidity(block_status);
+ WITH_LOCK(::cs_main, (void)disk_block_index->RaiseValidity(block_status));
}
CBlockIndex block_index{block_header};
diff --git a/src/test/fuzz/connman.cpp b/src/test/fuzz/connman.cpp
index 240274664a..a14d28f4ef 100644
--- a/src/test/fuzz/connman.cpp
+++ b/src/test/fuzz/connman.cpp
@@ -98,12 +98,6 @@ FUZZ_TARGET_INIT(connman, initialize_connman)
(void)connman.OutboundTargetReached(fuzzed_data_provider.ConsumeBool());
},
[&] {
- // Limit now to int32_t to avoid signed integer overflow
- (void)connman.PoissonNextSendInbound(
- std::chrono::microseconds{fuzzed_data_provider.ConsumeIntegral<int32_t>()},
- std::chrono::seconds{fuzzed_data_provider.ConsumeIntegral<int>()});
- },
- [&] {
CSerializedNetMsg serialized_net_msg;
serialized_net_msg.m_type = fuzzed_data_provider.ConsumeRandomLengthString(CMessageHeader::COMMAND_SIZE);
serialized_net_msg.data = ConsumeRandomLengthByteVector(fuzzed_data_provider);
diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp
index e9debd8c45..60c48e7c22 100644
--- a/src/test/fuzz/fuzz.cpp
+++ b/src/test/fuzz/fuzz.cpp
@@ -80,7 +80,7 @@ void initialize()
}
if (const char* out_path = std::getenv("WRITE_ALL_FUZZ_TARGETS_AND_ABORT")) {
std::cout << "Writing all fuzz target names to '" << out_path << "'." << std::endl;
- std::ofstream out_stream(out_path, std::ios::binary);
+ fsbridge::ofstream out_stream{out_path, std::ios::binary};
for (const auto& t : FuzzTargets()) {
if (std::get<2>(t.second)) continue;
out_stream << t.first << std::endl;
diff --git a/src/test/fuzz/integer.cpp b/src/test/fuzz/integer.cpp
index 3087f11771..72574612a2 100644
--- a/src/test/fuzz/integer.cpp
+++ b/src/test/fuzz/integer.cpp
@@ -206,11 +206,6 @@ FUZZ_TARGET_INIT(integer, initialize_integer)
stream >> deserialized_i8;
assert(i8 == deserialized_i8 && stream.empty());
- char deserialized_ch;
- stream << ch;
- stream >> deserialized_ch;
- assert(ch == deserialized_ch && stream.empty());
-
bool deserialized_b;
stream << b;
stream >> deserialized_b;
diff --git a/src/test/fuzz/p2p_transport_serialization.cpp b/src/test/fuzz/p2p_transport_serialization.cpp
index a7b2b8bfc1..88c22ca305 100644
--- a/src/test/fuzz/p2p_transport_serialization.cpp
+++ b/src/test/fuzz/p2p_transport_serialization.cpp
@@ -70,13 +70,13 @@ FUZZ_TARGET_INIT(p2p_transport_serialization, initialize_p2p_transport_serializa
const std::chrono::microseconds m_time{std::numeric_limits<int64_t>::max()};
bool reject_message{false};
CNetMessage msg = deserializer.GetMessage(m_time, reject_message);
- assert(msg.m_command.size() <= CMessageHeader::COMMAND_SIZE);
+ assert(msg.m_type.size() <= CMessageHeader::COMMAND_SIZE);
assert(msg.m_raw_message_size <= mutable_msg_bytes.size());
assert(msg.m_raw_message_size == CMessageHeader::HEADER_SIZE + msg.m_message_size);
assert(msg.m_time == m_time);
std::vector<unsigned char> header;
- auto msg2 = CNetMsgMaker{msg.m_recv.GetVersion()}.Make(msg.m_command, MakeUCharSpan(msg.m_recv));
+ auto msg2 = CNetMsgMaker{msg.m_recv.GetVersion()}.Make(msg.m_type, MakeUCharSpan(msg.m_recv));
serializer.prepareForTransport(msg2, header);
}
}
diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp
index b6ecf1c492..03a84b697d 100644
--- a/src/test/fuzz/rpc.cpp
+++ b/src/test/fuzz/rpc.cpp
@@ -120,6 +120,7 @@ const std::vector<std::string> RPC_COMMANDS_SAFE_FOR_FUZZING{
"getchaintips",
"getchaintxstats",
"getconnectioncount",
+ "getdeploymentinfo",
"getdescriptorinfo",
"getdifficulty",
"getindexinfo",
@@ -271,7 +272,7 @@ std::string ConsumeScalarRPCArgument(FuzzedDataProvider& fuzzed_data_provider)
}
CDataStream data_stream{SER_NETWORK, PROTOCOL_VERSION};
data_stream << *opt_psbt;
- r = EncodeBase64({data_stream.begin(), data_stream.end()});
+ r = EncodeBase64(data_stream);
},
[&] {
// base58 encoded key
diff --git a/src/test/fuzz/signature_checker.cpp b/src/test/fuzz/signature_checker.cpp
index deffe26b17..f6c591aca4 100644
--- a/src/test/fuzz/signature_checker.cpp
+++ b/src/test/fuzz/signature_checker.cpp
@@ -34,7 +34,7 @@ public:
return m_fuzzed_data_provider.ConsumeBool();
}
- bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror = nullptr) const override
+ bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror = nullptr) const override
{
return m_fuzzed_data_provider.ConsumeBool();
}
diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h
index fd7f40c01d..3bc62878bd 100644
--- a/src/test/fuzz/util.h
+++ b/src/test/fuzz/util.h
@@ -328,7 +328,6 @@ void WriteToStream(FuzzedDataProvider& fuzzed_data_provider, Stream& stream) noe
CallOneOf(
fuzzed_data_provider,
WRITE_TO_STREAM_CASE(bool, fuzzed_data_provider.ConsumeBool()),
- WRITE_TO_STREAM_CASE(char, fuzzed_data_provider.ConsumeIntegral<char>()),
WRITE_TO_STREAM_CASE(int8_t, fuzzed_data_provider.ConsumeIntegral<int8_t>()),
WRITE_TO_STREAM_CASE(uint8_t, fuzzed_data_provider.ConsumeIntegral<uint8_t>()),
WRITE_TO_STREAM_CASE(int16_t, fuzzed_data_provider.ConsumeIntegral<int16_t>()),
@@ -338,7 +337,7 @@ void WriteToStream(FuzzedDataProvider& fuzzed_data_provider, Stream& stream) noe
WRITE_TO_STREAM_CASE(int64_t, fuzzed_data_provider.ConsumeIntegral<int64_t>()),
WRITE_TO_STREAM_CASE(uint64_t, fuzzed_data_provider.ConsumeIntegral<uint64_t>()),
WRITE_TO_STREAM_CASE(std::string, fuzzed_data_provider.ConsumeRandomLengthString(32)),
- WRITE_TO_STREAM_CASE(std::vector<char>, ConsumeRandomLengthIntegralVector<char>(fuzzed_data_provider)));
+ WRITE_TO_STREAM_CASE(std::vector<uint8_t>, ConsumeRandomLengthIntegralVector<uint8_t>(fuzzed_data_provider)));
} catch (const std::ios_base::failure&) {
break;
}
@@ -358,7 +357,6 @@ void ReadFromStream(FuzzedDataProvider& fuzzed_data_provider, Stream& stream) no
CallOneOf(
fuzzed_data_provider,
READ_FROM_STREAM_CASE(bool),
- READ_FROM_STREAM_CASE(char),
READ_FROM_STREAM_CASE(int8_t),
READ_FROM_STREAM_CASE(uint8_t),
READ_FROM_STREAM_CASE(int16_t),
@@ -368,7 +366,7 @@ void ReadFromStream(FuzzedDataProvider& fuzzed_data_provider, Stream& stream) no
READ_FROM_STREAM_CASE(int64_t),
READ_FROM_STREAM_CASE(uint64_t),
READ_FROM_STREAM_CASE(std::string),
- READ_FROM_STREAM_CASE(std::vector<char>));
+ READ_FROM_STREAM_CASE(std::vector<uint8_t>));
} catch (const std::ios_base::failure&) {
break;
}
diff --git a/src/test/fuzz/versionbits.cpp b/src/test/fuzz/versionbits.cpp
index cf95c0b9bf..95eb71099d 100644
--- a/src/test/fuzz/versionbits.cpp
+++ b/src/test/fuzz/versionbits.cpp
@@ -51,7 +51,7 @@ public:
ThresholdState GetStateFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateFor(pindexPrev, dummy_params, m_cache); }
int GetStateSinceHeightFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateSinceHeightFor(pindexPrev, dummy_params, m_cache); }
- BIP9Stats GetStateStatisticsFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateStatisticsFor(pindexPrev, dummy_params); }
+ BIP9Stats GetStateStatisticsFor(const CBlockIndex* pindex, std::vector<bool>* signals=nullptr) const { return AbstractThresholdConditionChecker::GetStateStatisticsFor(pindex, dummy_params, signals); }
bool Condition(int32_t version) const
{
@@ -220,7 +220,14 @@ FUZZ_TARGET_INIT(versionbits, initialize)
CBlockIndex* prev = blocks.tip();
const int exp_since = checker.GetStateSinceHeightFor(prev);
const ThresholdState exp_state = checker.GetStateFor(prev);
- BIP9Stats last_stats = checker.GetStateStatisticsFor(prev);
+
+ // get statistics from end of previous period, then reset
+ BIP9Stats last_stats;
+ last_stats.period = period;
+ last_stats.threshold = threshold;
+ last_stats.count = last_stats.elapsed = 0;
+ last_stats.possible = (period >= threshold);
+ std::vector<bool> last_signals{};
int prev_next_height = (prev == nullptr ? 0 : prev->nHeight + 1);
assert(exp_since <= prev_next_height);
@@ -241,17 +248,25 @@ FUZZ_TARGET_INIT(versionbits, initialize)
assert(state == exp_state);
assert(since == exp_since);
- // GetStateStatistics may crash when state is not STARTED
- if (state != ThresholdState::STARTED) continue;
-
// check that after mining this block stats change as expected
- const BIP9Stats stats = checker.GetStateStatisticsFor(current_block);
+ std::vector<bool> signals;
+ const BIP9Stats stats = checker.GetStateStatisticsFor(current_block, &signals);
+ const BIP9Stats stats_no_signals = checker.GetStateStatisticsFor(current_block);
+ assert(stats.period == stats_no_signals.period && stats.threshold == stats_no_signals.threshold
+ && stats.elapsed == stats_no_signals.elapsed && stats.count == stats_no_signals.count
+ && stats.possible == stats_no_signals.possible);
+
assert(stats.period == period);
assert(stats.threshold == threshold);
assert(stats.elapsed == b);
assert(stats.count == last_stats.count + (signal ? 1 : 0));
assert(stats.possible == (stats.count + period >= stats.elapsed + threshold));
last_stats = stats;
+
+ assert(signals.size() == last_signals.size() + 1);
+ assert(signals.back() == signal);
+ last_signals.push_back(signal);
+ assert(signals == last_signals);
}
if (exp_state == ThresholdState::STARTED) {
@@ -265,14 +280,12 @@ FUZZ_TARGET_INIT(versionbits, initialize)
CBlockIndex* current_block = blocks.mine_block(signal);
assert(checker.Condition(current_block) == signal);
- // GetStateStatistics is safe on a period boundary
- // and has progressed to a new period
const BIP9Stats stats = checker.GetStateStatisticsFor(current_block);
assert(stats.period == period);
assert(stats.threshold == threshold);
- assert(stats.elapsed == 0);
- assert(stats.count == 0);
- assert(stats.possible == true);
+ assert(stats.elapsed == period);
+ assert(stats.count == blocks_sig);
+ assert(stats.possible == (stats.count + period >= stats.elapsed + threshold));
// More interesting is whether the state changed.
const ThresholdState state = checker.GetStateFor(current_block);
diff --git a/src/test/interfaces_tests.cpp b/src/test/interfaces_tests.cpp
index f4bf6ff8c9..49b7d2003b 100644
--- a/src/test/interfaces_tests.cpp
+++ b/src/test/interfaces_tests.cpp
@@ -123,6 +123,7 @@ BOOST_AUTO_TEST_CASE(findCommonAncestor)
BOOST_AUTO_TEST_CASE(hasBlocks)
{
+ LOCK(::cs_main);
auto& chain = m_node.chain;
const CChain& active = Assert(m_node.chainman)->ActiveChain();
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index eacd7ae894..4906bd2386 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -155,10 +155,10 @@ void DoTest(const CScript& scriptPubKey, const CScript& scriptSig, const CScript
if (libconsensus_flags == flags) {
int expectedSuccessCode = expect ? 1 : 0;
if (flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS) {
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), txCredit.vout[0].nValue, stream.data(), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), txCredit.vout[0].nValue, UCharCast(stream.data()), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
} else {
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), 0, stream.data(), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), 0, UCharCast(stream.data()), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
}
}
#endif
@@ -923,7 +923,7 @@ BOOST_AUTO_TEST_CASE(script_build)
}
#ifdef UPDATE_JSON_TESTS
- FILE* file = fopen("script_tests.json.gen", "w");
+ FILE* file = fsbridge::fopen("script_tests.json.gen", "w");
fputs(strGen.c_str(), file);
fclose(file);
#endif
@@ -1520,7 +1520,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_returns_true)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 1);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_OK);
}
@@ -1543,7 +1543,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_tx_index_err)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_TX_INDEX);
}
@@ -1566,7 +1566,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_tx_size)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size() * 2, nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()), stream.size() * 2, nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_TX_SIZE_MISMATCH);
}
@@ -1589,7 +1589,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_tx_serialization)
stream << 0xffffffff;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_TX_DESERIALIZE);
}
@@ -1612,7 +1612,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_amount_required_err)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_AMOUNT_REQUIRED);
}
@@ -1635,7 +1635,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_invalid_flags)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_INVALID_FLAGS);
}
diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp
index e91c203c26..8b8133b689 100644
--- a/src/test/serialize_tests.cpp
+++ b/src/test/serialize_tests.cpp
@@ -61,7 +61,7 @@ public:
BOOST_AUTO_TEST_CASE(sizes)
{
- BOOST_CHECK_EQUAL(sizeof(char), GetSerializeSize(char(0), 0));
+ BOOST_CHECK_EQUAL(sizeof(unsigned char), GetSerializeSize((unsigned char)0, 0));
BOOST_CHECK_EQUAL(sizeof(int8_t), GetSerializeSize(int8_t(0), 0));
BOOST_CHECK_EQUAL(sizeof(uint8_t), GetSerializeSize(uint8_t(0), 0));
BOOST_CHECK_EQUAL(sizeof(int16_t), GetSerializeSize(int16_t(0), 0));
@@ -74,7 +74,7 @@ BOOST_AUTO_TEST_CASE(sizes)
BOOST_CHECK_EQUAL(sizeof(uint8_t), GetSerializeSize(bool(0), 0));
// Sanity-check GetSerializeSize and c++ type matching
- BOOST_CHECK_EQUAL(GetSerializeSize(char(0), 0), 1U);
+ BOOST_CHECK_EQUAL(GetSerializeSize((unsigned char)0, 0), 1U);
BOOST_CHECK_EQUAL(GetSerializeSize(int8_t(0), 0), 1U);
BOOST_CHECK_EQUAL(GetSerializeSize(uint8_t(0), 0), 1U);
BOOST_CHECK_EQUAL(GetSerializeSize(int16_t(0), 0), 2U);
@@ -186,76 +186,78 @@ BOOST_AUTO_TEST_CASE(noncanonical)
std::vector<char>::size_type n;
// zero encoded with three bytes:
- ss.write("\xfd\x00\x00", 3);
+ ss.write(MakeByteSpan("\xfd\x00\x00").first(3));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
// 0xfc encoded with three bytes:
- ss.write("\xfd\xfc\x00", 3);
+ ss.write(MakeByteSpan("\xfd\xfc\x00").first(3));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
// 0xfd encoded with three bytes is OK:
- ss.write("\xfd\xfd\x00", 3);
+ ss.write(MakeByteSpan("\xfd\xfd\x00").first(3));
n = ReadCompactSize(ss);
BOOST_CHECK(n == 0xfd);
// zero encoded with five bytes:
- ss.write("\xfe\x00\x00\x00\x00", 5);
+ ss.write(MakeByteSpan("\xfe\x00\x00\x00\x00").first(5));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
// 0xffff encoded with five bytes:
- ss.write("\xfe\xff\xff\x00\x00", 5);
+ ss.write(MakeByteSpan("\xfe\xff\xff\x00\x00").first(5));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
// zero encoded with nine bytes:
- ss.write("\xff\x00\x00\x00\x00\x00\x00\x00\x00", 9);
+ ss.write(MakeByteSpan("\xff\x00\x00\x00\x00\x00\x00\x00\x00").first(9));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
// 0x01ffffff encoded with nine bytes:
- ss.write("\xff\xff\xff\xff\x01\x00\x00\x00\x00", 9);
+ ss.write(MakeByteSpan("\xff\xff\xff\xff\x01\x00\x00\x00\x00").first(9));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
}
BOOST_AUTO_TEST_CASE(insert_delete)
{
+ constexpr auto B2I{[](std::byte b) { return std::to_integer<uint8_t>(b); }};
+
// Test inserting/deleting bytes.
CDataStream ss(SER_DISK, 0);
BOOST_CHECK_EQUAL(ss.size(), 0U);
- ss.write("\x00\x01\x02\xff", 4);
+ ss.write(MakeByteSpan("\x00\x01\x02\xff").first(4));
BOOST_CHECK_EQUAL(ss.size(), 4U);
- char c = (char)11;
+ uint8_t c{11};
// Inserting at beginning/end/middle:
- ss.insert(ss.begin(), c);
+ ss.insert(ss.begin(), std::byte{c});
BOOST_CHECK_EQUAL(ss.size(), 5U);
- BOOST_CHECK_EQUAL(ss[0], c);
- BOOST_CHECK_EQUAL(ss[1], 0);
+ BOOST_CHECK_EQUAL(B2I(ss[0]), c);
+ BOOST_CHECK_EQUAL(B2I(ss[1]), 0);
- ss.insert(ss.end(), c);
+ ss.insert(ss.end(), std::byte{c});
BOOST_CHECK_EQUAL(ss.size(), 6U);
- BOOST_CHECK_EQUAL(ss[4], 0xff);
- BOOST_CHECK_EQUAL(ss[5], c);
+ BOOST_CHECK_EQUAL(B2I(ss[4]), 0xff);
+ BOOST_CHECK_EQUAL(B2I(ss[5]), c);
- ss.insert(ss.begin()+2, c);
+ ss.insert(ss.begin() + 2, std::byte{c});
BOOST_CHECK_EQUAL(ss.size(), 7U);
- BOOST_CHECK_EQUAL(ss[2], c);
+ BOOST_CHECK_EQUAL(B2I(ss[2]), c);
// Delete at beginning/end/middle
ss.erase(ss.begin());
BOOST_CHECK_EQUAL(ss.size(), 6U);
- BOOST_CHECK_EQUAL(ss[0], 0);
+ BOOST_CHECK_EQUAL(B2I(ss[0]), 0);
ss.erase(ss.begin()+ss.size()-1);
BOOST_CHECK_EQUAL(ss.size(), 5U);
- BOOST_CHECK_EQUAL(ss[4], 0xff);
+ BOOST_CHECK_EQUAL(B2I(ss[4]), 0xff);
ss.erase(ss.begin()+1);
BOOST_CHECK_EQUAL(ss.size(), 4U);
- BOOST_CHECK_EQUAL(ss[0], 0);
- BOOST_CHECK_EQUAL(ss[1], 1);
- BOOST_CHECK_EQUAL(ss[2], 2);
- BOOST_CHECK_EQUAL(ss[3], 0xff);
+ BOOST_CHECK_EQUAL(B2I(ss[0]), 0);
+ BOOST_CHECK_EQUAL(B2I(ss[1]), 1);
+ BOOST_CHECK_EQUAL(B2I(ss[2]), 2);
+ BOOST_CHECK_EQUAL(B2I(ss[3]), 0xff);
}
BOOST_AUTO_TEST_CASE(class_methods)
diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp
index 3571927397..a8f6cdd4a0 100644
--- a/src/test/streams_tests.cpp
+++ b/src/test/streams_tests.cpp
@@ -160,7 +160,7 @@ BOOST_AUTO_TEST_CASE(bitstream_reader_writer)
BOOST_AUTO_TEST_CASE(streams_serializedata_xor)
{
- std::vector<uint8_t> in;
+ std::vector<std::byte> in;
std::vector<char> expected_xor;
std::vector<unsigned char> key;
CDataStream ds(in, 0, 0);
@@ -174,8 +174,8 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor)
std::string(expected_xor.begin(), expected_xor.end()),
ds.str());
- in.push_back('\x0f');
- in.push_back('\xf0');
+ in.push_back(std::byte{0x0f});
+ in.push_back(std::byte{0xf0});
expected_xor.push_back('\xf0');
expected_xor.push_back('\x0f');
@@ -195,8 +195,8 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor)
in.clear();
expected_xor.clear();
- in.push_back('\xf0');
- in.push_back('\x0f');
+ in.push_back(std::byte{0xf0});
+ in.push_back(std::byte{0x0f});
expected_xor.push_back('\x0f');
expected_xor.push_back('\x00');
diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp
index 6f78b43826..560efb6b42 100644
--- a/src/test/txpackage_tests.cpp
+++ b/src/test/txpackage_tests.cpp
@@ -327,4 +327,236 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup)
BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(tx_child->GetHash())));
}
}
+
+// Tests for packages containing transactions that have same-txid-different-witness equivalents in
+// the mempool.
+BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup)
+{
+ // Mine blocks to mature coinbases.
+ mineBlocks(5);
+ LOCK(cs_main);
+
+ // Transactions with a same-txid-different-witness transaction in the mempool should be ignored,
+ // and the mempool entry's wtxid returned.
+ CScript witnessScript = CScript() << OP_DROP << OP_TRUE;
+ CScript scriptPubKey = GetScriptForDestination(WitnessV0ScriptHash(witnessScript));
+ auto mtx_parent = CreateValidMempoolTransaction(/*input_transaction=*/ m_coinbase_txns[0], /*vout=*/ 0,
+ /*input_height=*/ 0, /*input_signing_key=*/ coinbaseKey,
+ /*output_destination=*/ scriptPubKey,
+ /*output_amount=*/ CAmount(49 * COIN), /*submit=*/ false);
+ CTransactionRef ptx_parent = MakeTransactionRef(mtx_parent);
+
+ // Make two children with the same txid but different witnesses.
+ CScriptWitness witness1;
+ witness1.stack.push_back(std::vector<unsigned char>(1));
+ witness1.stack.push_back(std::vector<unsigned char>(witnessScript.begin(), witnessScript.end()));
+
+ CScriptWitness witness2(witness1);
+ witness2.stack.push_back(std::vector<unsigned char>(2));
+ witness2.stack.push_back(std::vector<unsigned char>(witnessScript.begin(), witnessScript.end()));
+
+ CKey child_key;
+ child_key.MakeNewKey(true);
+ CScript child_locking_script = GetScriptForDestination(WitnessV0KeyHash(child_key.GetPubKey()));
+ CMutableTransaction mtx_child1;
+ mtx_child1.nVersion = 1;
+ mtx_child1.vin.resize(1);
+ mtx_child1.vin[0].prevout.hash = ptx_parent->GetHash();
+ mtx_child1.vin[0].prevout.n = 0;
+ mtx_child1.vin[0].scriptSig = CScript();
+ mtx_child1.vin[0].scriptWitness = witness1;
+ mtx_child1.vout.resize(1);
+ mtx_child1.vout[0].nValue = CAmount(48 * COIN);
+ mtx_child1.vout[0].scriptPubKey = child_locking_script;
+
+ CMutableTransaction mtx_child2{mtx_child1};
+ mtx_child2.vin[0].scriptWitness = witness2;
+
+ CTransactionRef ptx_child1 = MakeTransactionRef(mtx_child1);
+ CTransactionRef ptx_child2 = MakeTransactionRef(mtx_child2);
+
+ // child1 and child2 have the same txid
+ BOOST_CHECK_EQUAL(ptx_child1->GetHash(), ptx_child2->GetHash());
+ // child1 and child2 have different wtxids
+ BOOST_CHECK(ptx_child1->GetWitnessHash() != ptx_child2->GetWitnessHash());
+
+ // Try submitting Package1{parent, child1} and Package2{parent, child2} where the children are
+ // same-txid-different-witness.
+ {
+ const auto submit_witness1 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
+ {ptx_parent, ptx_child1}, /*test_accept=*/ false);
+ BOOST_CHECK_MESSAGE(submit_witness1.m_state.IsValid(),
+ "Package validation unexpectedly failed: " << submit_witness1.m_state.GetRejectReason());
+ auto it_parent1 = submit_witness1.m_tx_results.find(ptx_parent->GetWitnessHash());
+ auto it_child1 = submit_witness1.m_tx_results.find(ptx_child1->GetWitnessHash());
+ BOOST_CHECK(it_parent1 != submit_witness1.m_tx_results.end());
+ BOOST_CHECK_MESSAGE(it_parent1->second.m_state.IsValid(),
+ "Transaction unexpectedly failed: " << it_parent1->second.m_state.GetRejectReason());
+ BOOST_CHECK(it_child1 != submit_witness1.m_tx_results.end());
+ BOOST_CHECK_MESSAGE(it_child1->second.m_state.IsValid(),
+ "Transaction unexpectedly failed: " << it_child1->second.m_state.GetRejectReason());
+
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_parent->GetHash())));
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_child1->GetHash())));
+
+ const auto submit_witness2 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
+ {ptx_parent, ptx_child2}, /*test_accept=*/ false);
+ BOOST_CHECK_MESSAGE(submit_witness2.m_state.IsValid(),
+ "Package validation unexpectedly failed: " << submit_witness2.m_state.GetRejectReason());
+ auto it_parent2_deduped = submit_witness2.m_tx_results.find(ptx_parent->GetWitnessHash());
+ auto it_child2 = submit_witness2.m_tx_results.find(ptx_child2->GetWitnessHash());
+ BOOST_CHECK(it_parent2_deduped != submit_witness2.m_tx_results.end());
+ BOOST_CHECK(it_parent2_deduped->second.m_result_type == MempoolAcceptResult::ResultType::MEMPOOL_ENTRY);
+ BOOST_CHECK(it_child2 != submit_witness2.m_tx_results.end());
+ BOOST_CHECK(it_child2->second.m_result_type == MempoolAcceptResult::ResultType::DIFFERENT_WITNESS);
+ BOOST_CHECK_EQUAL(ptx_child1->GetWitnessHash(), it_child2->second.m_other_wtxid.value());
+
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_child2->GetHash())));
+ BOOST_CHECK(!m_node.mempool->exists(GenTxid::Wtxid(ptx_child2->GetWitnessHash())));
+ }
+
+ // Try submitting Package1{child2, grandchild} where child2 is same-txid-different-witness as
+ // the in-mempool transaction, child1. Since child1 exists in the mempool and its outputs are
+ // available, child2 should be ignored and grandchild should be accepted.
+ //
+ // This tests a potential censorship vector in which an attacker broadcasts a competing package
+ // where a parent's witness is mutated. The honest package should be accepted despite the fact
+ // that we don't allow witness replacement.
+ CKey grandchild_key;
+ grandchild_key.MakeNewKey(true);
+ CScript grandchild_locking_script = GetScriptForDestination(WitnessV0KeyHash(grandchild_key.GetPubKey()));
+ auto mtx_grandchild = CreateValidMempoolTransaction(/*input_transaction=*/ ptx_child2, /* vout=*/ 0,
+ /*input_height=*/ 0, /*input_signing_key=*/ child_key,
+ /*output_destination=*/ grandchild_locking_script,
+ /*output_amount=*/ CAmount(47 * COIN), /*submit=*/ false);
+ CTransactionRef ptx_grandchild = MakeTransactionRef(mtx_grandchild);
+
+ // We already submitted child1 above.
+ {
+ const auto submit_spend_ignored = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
+ {ptx_child2, ptx_grandchild}, /*test_accept=*/ false);
+ BOOST_CHECK_MESSAGE(submit_spend_ignored.m_state.IsValid(),
+ "Package validation unexpectedly failed: " << submit_spend_ignored.m_state.GetRejectReason());
+ auto it_child2_ignored = submit_spend_ignored.m_tx_results.find(ptx_child2->GetWitnessHash());
+ auto it_grandchild = submit_spend_ignored.m_tx_results.find(ptx_grandchild->GetWitnessHash());
+ BOOST_CHECK(it_child2_ignored != submit_spend_ignored.m_tx_results.end());
+ BOOST_CHECK(it_child2_ignored->second.m_result_type == MempoolAcceptResult::ResultType::DIFFERENT_WITNESS);
+ BOOST_CHECK(it_grandchild != submit_spend_ignored.m_tx_results.end());
+ BOOST_CHECK(it_grandchild->second.m_result_type == MempoolAcceptResult::ResultType::VALID);
+
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_child2->GetHash())));
+ BOOST_CHECK(!m_node.mempool->exists(GenTxid::Wtxid(ptx_child2->GetWitnessHash())));
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Wtxid(ptx_grandchild->GetWitnessHash())));
+ }
+
+ // A package Package{parent1, parent2, parent3, child} where the parents are a mixture of
+ // identical-tx-in-mempool, same-txid-different-witness-in-mempool, and new transactions.
+ Package package_mixed;
+
+ // Give all the parents anyone-can-spend scripts so we don't have to deal with signing the child.
+ CScript acs_script = CScript() << OP_TRUE;
+ CScript acs_spk = GetScriptForDestination(WitnessV0ScriptHash(acs_script));
+ CScriptWitness acs_witness;
+ acs_witness.stack.push_back(std::vector<unsigned char>(acs_script.begin(), acs_script.end()));
+
+ // parent1 will already be in the mempool
+ auto mtx_parent1 = CreateValidMempoolTransaction(/*input_transaction=*/ m_coinbase_txns[1], /*vout=*/ 0,
+ /*input_height=*/ 0, /*input_signing_key=*/ coinbaseKey,
+ /*output_destination=*/ acs_spk,
+ /*output_amount=*/ CAmount(49 * COIN), /*submit=*/ true);
+ CTransactionRef ptx_parent1 = MakeTransactionRef(mtx_parent1);
+ package_mixed.push_back(ptx_parent1);
+
+ // parent2 will have a same-txid-different-witness tx already in the mempool
+ CScript grandparent2_script = CScript() << OP_DROP << OP_TRUE;
+ CScript grandparent2_spk = GetScriptForDestination(WitnessV0ScriptHash(grandparent2_script));
+ CScriptWitness parent2_witness1;
+ parent2_witness1.stack.push_back(std::vector<unsigned char>(1));
+ parent2_witness1.stack.push_back(std::vector<unsigned char>(grandparent2_script.begin(), grandparent2_script.end()));
+ CScriptWitness parent2_witness2;
+ parent2_witness2.stack.push_back(std::vector<unsigned char>(2));
+ parent2_witness2.stack.push_back(std::vector<unsigned char>(grandparent2_script.begin(), grandparent2_script.end()));
+
+ // Create grandparent2 creating an output with multiple spending paths. Submit to mempool.
+ auto mtx_grandparent2 = CreateValidMempoolTransaction(/*input_transaction=*/ m_coinbase_txns[2], /* vout=*/ 0,
+ /*input_height=*/ 0, /*input_signing_key=*/ coinbaseKey,
+ /*output_destination=*/ grandparent2_spk,
+ /*output_amount=*/ CAmount(49 * COIN), /*submit=*/ true);
+ CTransactionRef ptx_grandparent2 = MakeTransactionRef(mtx_grandparent2);
+
+ CMutableTransaction mtx_parent2_v1;
+ mtx_parent2_v1.nVersion = 1;
+ mtx_parent2_v1.vin.resize(1);
+ mtx_parent2_v1.vin[0].prevout.hash = ptx_grandparent2->GetHash();
+ mtx_parent2_v1.vin[0].prevout.n = 0;
+ mtx_parent2_v1.vin[0].scriptSig = CScript();
+ mtx_parent2_v1.vin[0].scriptWitness = parent2_witness1;
+ mtx_parent2_v1.vout.resize(1);
+ mtx_parent2_v1.vout[0].nValue = CAmount(48 * COIN);
+ mtx_parent2_v1.vout[0].scriptPubKey = acs_spk;
+
+ CMutableTransaction mtx_parent2_v2{mtx_parent2_v1};
+ mtx_parent2_v2.vin[0].scriptWitness = parent2_witness2;
+
+ CTransactionRef ptx_parent2_v1 = MakeTransactionRef(mtx_parent2_v1);
+ CTransactionRef ptx_parent2_v2 = MakeTransactionRef(mtx_parent2_v2);
+ // Put parent2_v1 in the package, submit parent2_v2 to the mempool.
+ const MempoolAcceptResult parent2_v2_result = m_node.chainman->ProcessTransaction(ptx_parent2_v2);
+ BOOST_CHECK(parent2_v2_result.m_result_type == MempoolAcceptResult::ResultType::VALID);
+ package_mixed.push_back(ptx_parent2_v1);
+
+ // parent3 will be a new transaction
+ auto mtx_parent3 = CreateValidMempoolTransaction(/*input_transaction=*/ m_coinbase_txns[3], /*vout=*/ 0,
+ /*input_height=*/ 0, /*input_signing_key=*/ coinbaseKey,
+ /*output_destination=*/ acs_spk,
+ /*output_amount=*/ CAmount(49 * COIN), /*submit=*/ false);
+ CTransactionRef ptx_parent3 = MakeTransactionRef(mtx_parent3);
+ package_mixed.push_back(ptx_parent3);
+
+ // child spends parent1, parent2, and parent3
+ CKey mixed_grandchild_key;
+ mixed_grandchild_key.MakeNewKey(true);
+ CScript mixed_child_spk = GetScriptForDestination(WitnessV0KeyHash(mixed_grandchild_key.GetPubKey()));
+
+ CMutableTransaction mtx_mixed_child;
+ mtx_mixed_child.vin.push_back(CTxIn(COutPoint(ptx_parent1->GetHash(), 0)));
+ mtx_mixed_child.vin.push_back(CTxIn(COutPoint(ptx_parent2_v1->GetHash(), 0)));
+ mtx_mixed_child.vin.push_back(CTxIn(COutPoint(ptx_parent3->GetHash(), 0)));
+ mtx_mixed_child.vin[0].scriptWitness = acs_witness;
+ mtx_mixed_child.vin[1].scriptWitness = acs_witness;
+ mtx_mixed_child.vin[2].scriptWitness = acs_witness;
+ mtx_mixed_child.vout.push_back(CTxOut(145 * COIN, mixed_child_spk));
+ CTransactionRef ptx_mixed_child = MakeTransactionRef(mtx_mixed_child);
+ package_mixed.push_back(ptx_mixed_child);
+
+ // Submit package:
+ // parent1 should be ignored
+ // parent2_v1 should be ignored (and v2 wtxid returned)
+ // parent3 should be accepted
+ // child should be accepted
+ {
+ const auto mixed_result = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_mixed, false);
+ BOOST_CHECK_MESSAGE(mixed_result.m_state.IsValid(), mixed_result.m_state.GetRejectReason());
+ auto it_parent1 = mixed_result.m_tx_results.find(ptx_parent1->GetWitnessHash());
+ auto it_parent2 = mixed_result.m_tx_results.find(ptx_parent2_v1->GetWitnessHash());
+ auto it_parent3 = mixed_result.m_tx_results.find(ptx_parent3->GetWitnessHash());
+ auto it_child = mixed_result.m_tx_results.find(ptx_mixed_child->GetWitnessHash());
+ BOOST_CHECK(it_parent1 != mixed_result.m_tx_results.end());
+ BOOST_CHECK(it_parent2 != mixed_result.m_tx_results.end());
+ BOOST_CHECK(it_parent3 != mixed_result.m_tx_results.end());
+ BOOST_CHECK(it_child != mixed_result.m_tx_results.end());
+
+ BOOST_CHECK(it_parent1->second.m_result_type == MempoolAcceptResult::ResultType::MEMPOOL_ENTRY);
+ BOOST_CHECK(it_parent2->second.m_result_type == MempoolAcceptResult::ResultType::DIFFERENT_WITNESS);
+ BOOST_CHECK(it_parent3->second.m_result_type == MempoolAcceptResult::ResultType::VALID);
+ BOOST_CHECK(it_child->second.m_result_type == MempoolAcceptResult::ResultType::VALID);
+ BOOST_CHECK_EQUAL(ptx_parent2_v2->GetWitnessHash(), it_parent2->second.m_other_wtxid.value());
+
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_parent1->GetHash())));
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_parent2_v1->GetHash())));
+ BOOST_CHECK(!m_node.mempool->exists(GenTxid::Wtxid(ptx_parent2_v1->GetWitnessHash())));
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_parent3->GetHash())));
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_mixed_child->GetHash())));
+ }
+}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/util/blockfilter.cpp b/src/test/util/blockfilter.cpp
index 538981ce36..3ae22921b9 100644
--- a/src/test/util/blockfilter.cpp
+++ b/src/test/util/blockfilter.cpp
@@ -13,6 +13,8 @@ using node::UndoReadFromDisk;
bool ComputeFilter(BlockFilterType filter_type, const CBlockIndex* block_index, BlockFilter& filter)
{
+ LOCK(::cs_main);
+
CBlock block;
if (!ReadBlockFromDisk(block, block_index->GetBlockPos(), Params().GetConsensus())) {
return false;
diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp
index f5742b65a1..26392e690d 100644
--- a/src/test/validation_chainstatemanager_tests.cpp
+++ b/src/test/validation_chainstatemanager_tests.cpp
@@ -235,7 +235,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_activate_snapshot, TestChain100Setup)
*chainman.SnapshotBlockhash());
// Ensure that the genesis block was not marked assumed-valid.
- BOOST_CHECK(!chainman.ActiveChain().Genesis()->IsAssumedValid());
+ BOOST_CHECK(WITH_LOCK(::cs_main, return !chainman.ActiveChain().Genesis()->IsAssumedValid()));
const AssumeutxoData& au_data = *ExpectedAssumeutxo(snapshot_height, ::Params());
const CBlockIndex* tip = chainman.ActiveTip();
@@ -356,6 +356,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup)
// Mark some region of the chain assumed-valid.
for (int i = 0; i <= cs1.m_chain.Height(); ++i) {
+ LOCK(::cs_main);
auto index = cs1.m_chain[i];
if (i < last_assumed_valid_idx && i >= assumed_valid_start_idx) {
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 85eea888cc..e2aed1da7c 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -311,19 +311,23 @@ bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams,
CBlockIndex* pindexNew = insertBlockIndex(diskindex.GetBlockHash());
pindexNew->pprev = insertBlockIndex(diskindex.hashPrev);
pindexNew->nHeight = diskindex.nHeight;
- pindexNew->nFile = diskindex.nFile;
- pindexNew->nDataPos = diskindex.nDataPos;
- pindexNew->nUndoPos = diskindex.nUndoPos;
pindexNew->nVersion = diskindex.nVersion;
pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
pindexNew->nTime = diskindex.nTime;
pindexNew->nBits = diskindex.nBits;
pindexNew->nNonce = diskindex.nNonce;
- pindexNew->nStatus = diskindex.nStatus;
pindexNew->nTx = diskindex.nTx;
+ {
+ LOCK(::cs_main);
+ pindexNew->nFile = diskindex.nFile;
+ pindexNew->nDataPos = diskindex.nDataPos;
+ pindexNew->nUndoPos = diskindex.nUndoPos;
+ pindexNew->nStatus = diskindex.nStatus;
+ }
- if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams))
+ if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
return error("%s: CheckProofOfWork failed: %s", __func__, pindexNew->ToString());
+ }
pcursor->Next();
} else {
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index dc2769b81e..fb5652d0a0 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -116,10 +116,9 @@ size_t CTxMemPoolEntry::GetTxSize() const
return GetVirtualTransactionSize(nTxWeight, sigOpCost);
}
-// Update the given tx for any in-mempool descendants.
-// Assumes that CTxMemPool::m_children is correct for the given tx and all
-// descendants.
-void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set<uint256> &setExclude)
+void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendants,
+ const std::set<uint256>& setExclude, std::set<uint256>& descendants_to_remove,
+ uint64_t ancestor_size_limit, uint64_t ancestor_count_limit)
{
CTxMemPoolEntry::Children stageEntries, descendants;
stageEntries = updateIt->GetMemPoolChildrenConst();
@@ -156,17 +155,18 @@ void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendan
cachedDescendants[updateIt].insert(mapTx.iterator_to(descendant));
// Update ancestor state for each descendant
mapTx.modify(mapTx.iterator_to(descendant), update_ancestor_state(updateIt->GetTxSize(), updateIt->GetModifiedFee(), 1, updateIt->GetSigOpCost()));
+ // Don't directly remove the transaction here -- doing so would
+ // invalidate iterators in cachedDescendants. Mark it for removal
+ // by inserting into descendants_to_remove.
+ if (descendant.GetCountWithAncestors() > ancestor_count_limit || descendant.GetSizeWithAncestors() > ancestor_size_limit) {
+ descendants_to_remove.insert(descendant.GetTx().GetHash());
+ }
}
}
mapTx.modify(updateIt, update_descendant_state(modifySize, modifyFee, modifyCount));
}
-// vHashesToUpdate is the set of transaction hashes from a disconnected block
-// which has been re-added to the mempool.
-// for each entry, look for descendants that are outside vHashesToUpdate, and
-// add fee/size information for such descendants to the parent.
-// for each such descendant, also update the ancestor state to include the parent.
-void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashesToUpdate)
+void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashesToUpdate, uint64_t ancestor_size_limit, uint64_t ancestor_count_limit)
{
AssertLockHeld(cs);
// For each entry in vHashesToUpdate, store the set of in-mempool, but not
@@ -178,6 +178,8 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes
// accounted for in the state of their ancestors)
std::set<uint256> setAlreadyIncluded(vHashesToUpdate.begin(), vHashesToUpdate.end());
+ std::set<uint256> descendants_to_remove;
+
// Iterate in reverse, so that whenever we are looking at a transaction
// we are sure that all in-mempool descendants have already been processed.
// This maximizes the benefit of the descendant cache and guarantees that
@@ -207,7 +209,15 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes
}
}
} // release epoch guard for UpdateForDescendants
- UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded);
+ UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded, descendants_to_remove, ancestor_size_limit, ancestor_count_limit);
+ }
+
+ for (const auto& txid : descendants_to_remove) {
+ // This txid may have been removed already in a prior call to removeRecursive.
+ // Therefore we ensure it is not yet removed already.
+ if (const std::optional<txiter> txiter = GetIter(txid)) {
+ removeRecursive((*txiter)->GetTx(), MemPoolRemovalReason::SIZELIMIT);
+ }
}
}
diff --git a/src/txmempool.h b/src/txmempool.h
index e025dafd91..e7e5a3c402 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -636,16 +636,25 @@ public:
*/
void RemoveStaged(setEntries& stage, bool updateDescendants, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs);
- /** When adding transactions from a disconnected block back to the mempool,
- * new mempool entries may have children in the mempool (which is generally
- * not the case when otherwise adding transactions).
- * UpdateTransactionsFromBlock() will find child transactions and update the
- * descendant state for each transaction in vHashesToUpdate (excluding any
- * child transactions present in vHashesToUpdate, which are already accounted
- * for). Note: vHashesToUpdate should be the set of transactions from the
- * disconnected block that have been accepted back into the mempool.
+ /** UpdateTransactionsFromBlock is called when adding transactions from a
+ * disconnected block back to the mempool, new mempool entries may have
+ * children in the mempool (which is generally not the case when otherwise
+ * adding transactions).
+ * @post updated descendant state for descendants of each transaction in
+ * vHashesToUpdate (excluding any child transactions present in
+ * vHashesToUpdate, which are already accounted for). Updated state
+ * includes add fee/size information for such descendants to the
+ * parent and updated ancestor state to include the parent.
+ *
+ * @param[in] vHashesToUpdate The set of txids from the
+ * disconnected block that have been accepted back into the mempool.
+ * @param[in] ancestor_size_limit The maximum allowed size in virtual
+ * bytes of an entry and its ancestors
+ * @param[in] ancestor_count_limit The maximum allowed number of
+ * transactions including the entry and its ancestors.
*/
- void UpdateTransactionsFromBlock(const std::vector<uint256>& vHashesToUpdate) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main) LOCKS_EXCLUDED(m_epoch);
+ void UpdateTransactionsFromBlock(const std::vector<uint256>& vHashesToUpdate,
+ uint64_t ancestor_size_limit, uint64_t ancestor_count_limit) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main) LOCKS_EXCLUDED(m_epoch);
/** Try to calculate all in-mempool ancestors of entry.
* (these are all calculated including the tx itself)
@@ -794,19 +803,38 @@ private:
/** UpdateForDescendants is used by UpdateTransactionsFromBlock to update
* the descendants for a single transaction that has been added to the
* mempool but may have child transactions in the mempool, eg during a
- * chain reorg. setExclude is the set of descendant transactions in the
- * mempool that must not be accounted for (because any descendants in
- * setExclude were added to the mempool after the transaction being
- * updated and hence their state is already reflected in the parent
- * state).
+ * chain reorg.
+ *
+ * @pre CTxMemPool::m_children is correct for the given tx and all
+ * descendants.
+ * @pre cachedDescendants is an accurate cache where each entry has all
+ * descendants of the corresponding key, including those that should
+ * be removed for violation of ancestor limits.
+ * @post if updateIt has any non-excluded descendants, cachedDescendants has
+ * a new cache line for updateIt.
+ * @post descendants_to_remove has a new entry for any descendant which exceeded
+ * ancestor limits relative to updateIt.
*
- * cachedDescendants will be updated with the descendants of the transaction
- * being updated, so that future invocations don't need to walk the
- * same transaction again, if encountered in another transaction chain.
+ * @param[in] updateIt the entry to update for its descendants
+ * @param[in,out] cachedDescendants a cache where each line corresponds to all
+ * descendants. It will be updated with the descendants of the transaction
+ * being updated, so that future invocations don't need to walk the same
+ * transaction again, if encountered in another transaction chain.
+ * @param[in] setExclude the set of descendant transactions in the mempool
+ * that must not be accounted for (because any descendants in setExclude
+ * were added to the mempool after the transaction being updated and hence
+ * their state is already reflected in the parent state).
+ * @param[out] descendants_to_remove Populated with the txids of entries that
+ * exceed ancestor limits. It's the responsibility of the caller to
+ * removeRecursive them.
+ * @param[in] ancestor_size_limit the max number of ancestral bytes allowed
+ * for any descendant
+ * @param[in] ancestor_count_limit the max number of ancestor transactions
+ * allowed for any descendant
*/
- void UpdateForDescendants(txiter updateIt,
- cacheMap &cachedDescendants,
- const std::set<uint256> &setExclude) EXCLUSIVE_LOCKS_REQUIRED(cs);
+ void UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendants,
+ const std::set<uint256>& setExclude, std::set<uint256>& descendants_to_remove,
+ uint64_t ancestor_size_limit, uint64_t ancestor_count_limit) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Update ancestors of hash to add/remove it as a descendant transaction. */
void UpdateAncestorsOf(bool add, txiter hash, setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Set ancestor state for an entry */
diff --git a/src/uint256.h b/src/uint256.h
index 72681d09c9..5c3a2f5409 100644
--- a/src/uint256.h
+++ b/src/uint256.h
@@ -6,6 +6,8 @@
#ifndef BITCOIN_UINT256_H
#define BITCOIN_UINT256_H
+#include <span.h>
+
#include <assert.h>
#include <cstring>
#include <stdint.h>
@@ -96,13 +98,13 @@ public:
template<typename Stream>
void Serialize(Stream& s) const
{
- s.write((char*)m_data, sizeof(m_data));
+ s.write(MakeByteSpan(m_data));
}
template<typename Stream>
void Unserialize(Stream& s)
{
- s.read((char*)m_data, sizeof(m_data));
+ s.read(MakeWritableByteSpan(m_data));
}
};
diff --git a/src/util/system.cpp b/src/util/system.cpp
index e34cdc7fb9..19de08d1ea 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -146,7 +146,7 @@ bool CheckDiskSpace(const fs::path& dir, uint64_t additional_bytes)
}
std::streampos GetFileSize(const char* path, std::streamsize max) {
- std::ifstream file(path, std::ios::binary);
+ fsbridge::ifstream file{path, std::ios::binary};
file.ignore(max);
return file.gcount();
}
diff --git a/src/validation.cpp b/src/validation.cpp
index fff7cfc07b..5fca1551c8 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -347,7 +347,9 @@ void CChainState::MaybeUpdateMempoolForReorg(
// previously-confirmed transactions back to the mempool.
// UpdateTransactionsFromBlock finds descendants of any transactions in
// the disconnectpool that were added back and cleans up the mempool state.
- m_mempool->UpdateTransactionsFromBlock(vHashUpdate);
+ const uint64_t ancestor_count_limit = gArgs.GetIntArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
+ const uint64_t ancestor_size_limit = gArgs.GetIntArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * 1000;
+ m_mempool->UpdateTransactionsFromBlock(vHashUpdate, ancestor_size_limit, ancestor_count_limit);
// Predicate to use for filtering transactions in removeForReorg.
// Checks whether the transaction is still final and, if it spends a coinbase output, mature.
@@ -384,10 +386,10 @@ void CChainState::MaybeUpdateMempoolForReorg(
auto it2 = m_mempool->mapTx.find(txin.prevout.hash);
if (it2 != m_mempool->mapTx.end())
continue;
- const Coin &coin = CoinsTip().AccessCoin(txin.prevout);
+ const Coin& coin{CoinsTip().AccessCoin(txin.prevout)};
assert(!coin.IsSpent());
const auto mempool_spend_height{m_chain.Tip()->nHeight + 1};
- if (coin.IsSpent() || (coin.IsCoinBase() && mempool_spend_height - coin.nHeight < COINBASE_MATURITY)) {
+ if (coin.IsCoinBase() && mempool_spend_height - coin.nHeight < COINBASE_MATURITY) {
return true;
}
}
@@ -619,10 +621,10 @@ private:
// Submit all transactions to the mempool and call ConsensusScriptChecks to add to the script
// cache - should only be called after successful validation of all transactions in the package.
- // The package may end up partially-submitted after size limitting; returns true if all
+ // The package may end up partially-submitted after size limiting; returns true if all
// transactions are successfully added to the mempool, false otherwise.
- bool FinalizePackage(const ATMPArgs& args, std::vector<Workspace>& workspaces, PackageValidationState& package_state,
- std::map<const uint256, const MempoolAcceptResult>& results)
+ bool SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& workspaces, PackageValidationState& package_state,
+ std::map<const uint256, const MempoolAcceptResult>& results)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
// Compare a package's feerate against minimum allowed.
@@ -1061,12 +1063,17 @@ bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
return true;
}
-bool MemPoolAccept::FinalizePackage(const ATMPArgs& args, std::vector<Workspace>& workspaces,
- PackageValidationState& package_state,
- std::map<const uint256, const MempoolAcceptResult>& results)
+bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& workspaces,
+ PackageValidationState& package_state,
+ std::map<const uint256, const MempoolAcceptResult>& results)
{
AssertLockHeld(cs_main);
AssertLockHeld(m_pool.cs);
+ // Sanity check: none of the transactions should be in the mempool, and none of the transactions
+ // should have a same-txid-different-witness equivalent in the mempool.
+ assert(std::all_of(workspaces.cbegin(), workspaces.cend(), [this](const auto& ws){
+ return !m_pool.exists(GenTxid::Txid(ws.m_ptx->GetHash())); }));
+
bool all_submitted = true;
// ConsensusScriptChecks adds to the script cache and is therefore consensus-critical;
// CheckInputsFromMempoolAndCache asserts that transactions only spend coins available from the
@@ -1076,18 +1083,24 @@ bool MemPoolAccept::FinalizePackage(const ATMPArgs& args, std::vector<Workspace>
if (!ConsensusScriptChecks(args, ws)) {
results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
// Since PolicyScriptChecks() passed, this should never fail.
- all_submitted = Assume(false);
+ all_submitted = false;
+ package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
+ strprintf("BUG! PolicyScriptChecks succeeded but ConsensusScriptChecks failed: %s",
+ ws.m_ptx->GetHash().ToString()));
}
// Re-calculate mempool ancestors to call addUnchecked(). They may have changed since the
// last calculation done in PreChecks, since package ancestors have already been submitted.
- std::string err_string;
+ std::string unused_err_string;
if(!m_pool.CalculateMemPoolAncestors(*ws.m_entry, ws.m_ancestors, m_limit_ancestors,
m_limit_ancestor_size, m_limit_descendants,
- m_limit_descendant_size, err_string)) {
+ m_limit_descendant_size, unused_err_string)) {
results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
// Since PreChecks() and PackageMempoolChecks() both enforce limits, this should never fail.
- all_submitted = Assume(false);
+ all_submitted = false;
+ package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
+ strprintf("BUG! Mempool ancestors or descendants were underestimated: %s",
+ ws.m_ptx->GetHash().ToString()));
}
// If we call LimitMempoolSize() for each individual Finalize(), the mempool will not take
// the transaction's descendant feerate into account because it hasn't seen them yet. Also,
@@ -1097,7 +1110,9 @@ bool MemPoolAccept::FinalizePackage(const ATMPArgs& args, std::vector<Workspace>
if (!Finalize(args, ws)) {
results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
// Since LimitMempoolSize() won't be called, this should never fail.
- all_submitted = Assume(false);
+ all_submitted = false;
+ package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
+ strprintf("BUG! Adding to mempool failed: %s", ws.m_ptx->GetHash().ToString()));
}
}
@@ -1106,7 +1121,6 @@ bool MemPoolAccept::FinalizePackage(const ATMPArgs& args, std::vector<Workspace>
LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(),
gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
std::chrono::hours{gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
- if (!all_submitted) return false;
// Find the wtxids of the transactions that made it into the mempool. Allow partial submission,
// but don't report success unless they all made it into the mempool.
@@ -1211,8 +1225,8 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::
if (args.m_test_accept) return PackageMempoolAcceptResult(package_state, std::move(results));
- if (!FinalizePackage(args, workspaces, package_state, results)) {
- package_state.Invalid(PackageValidationResult::PCKG_TX, "submission failed");
+ if (!SubmitPackage(args, workspaces, package_state, results)) {
+ // PackageValidationState filled in by SubmitPackage().
return PackageMempoolAcceptResult(package_state, std::move(results));
}
@@ -1237,11 +1251,13 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package,
return PackageMempoolAcceptResult(package_state, {});
}
- const auto& child = package[package.size() - 1];
+ // IsChildWithParents() guarantees the package is > 1 transactions.
+ assert(package.size() > 1);
// The package must be 1 child with all of its unconfirmed parents. The package is expected to
// be sorted, so the last transaction is the child.
+ const auto& child = package.back();
std::unordered_set<uint256, SaltedTxidHasher> unconfirmed_parent_txids;
- std::transform(package.cbegin(), package.end() - 1,
+ std::transform(package.cbegin(), package.cend() - 1,
std::inserter(unconfirmed_parent_txids, unconfirmed_parent_txids.end()),
[](const auto& tx) { return tx->GetHash(); });
@@ -1273,10 +1289,14 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package,
LOCK(m_pool.cs);
std::map<const uint256, const MempoolAcceptResult> results;
- // As node operators are free to set their mempool policies however they please, it's possible
- // for package transaction(s) to already be in the mempool, and we don't want to reject the
- // entire package in that case (as that could be a censorship vector). Filter the transactions
- // that are already in mempool and add their information to results, since we already have them.
+ // Node operators are free to set their mempool policies however they please, nodes may receive
+ // transactions in different orders, and malicious counterparties may try to take advantage of
+ // policy differences to pin or delay propagation of transactions. As such, it's possible for
+ // some package transaction(s) to already be in the mempool, and we don't want to reject the
+ // entire package in that case (as that could be a censorship vector). De-duplicate the
+ // transactions that are already in the mempool, and only call AcceptMultipleTransactions() with
+ // the new transactions. This ensures we don't double-count transaction counts and sizes when
+ // checking ancestor/descendant limits, or double-count transaction fees for fee-related policy.
std::vector<CTransactionRef> txns_new;
for (const auto& tx : package) {
const auto& wtxid = tx->GetWitnessHash();
@@ -1297,9 +1317,10 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package,
// transaction for the mempool one. Note that we are ignoring the validity of the
// package transaction passed in.
// TODO: allow witness replacement in packages.
- auto iter = m_pool.GetIter(wtxid);
+ auto iter = m_pool.GetIter(txid);
assert(iter != std::nullopt);
- results.emplace(txid, MempoolAcceptResult::MempoolTx(iter.value()->GetTxSize(), iter.value()->GetFee()));
+ // Provide the wtxid of the mempool tx so that the caller can look it up in the mempool.
+ results.emplace(wtxid, MempoolAcceptResult::MempoolTxDifferentWitness(iter.value()->GetTx().GetWitnessHash()));
} else {
// Transaction does not already exist in the mempool.
txns_new.push_back(tx);
@@ -1366,12 +1387,12 @@ PackageMempoolAcceptResult ProcessNewPackage(CChainState& active_chainstate, CTx
}();
// Uncache coins pertaining to transactions that were not submitted to the mempool.
- // Ensure the coins cache is still within limits.
if (test_accept || result.m_state.IsInvalid()) {
for (const COutPoint& hashTx : coins_to_uncache) {
active_chainstate.CoinsTip().Uncache(hashTx);
}
}
+ // Ensure the coins cache is still within limits.
BlockValidationState state_dummy;
active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC);
return result;
@@ -4083,7 +4104,7 @@ void CChainState::LoadExternalBlockFile(FILE* fileIn, FlatFilePos* dbp)
try {
// locate a header
unsigned char buf[CMessageHeader::MESSAGE_START_SIZE];
- blkdat.FindByte(char(m_params.MessageStart()[0]));
+ blkdat.FindByte(m_params.MessageStart()[0]);
nRewind = blkdat.GetPos() + 1;
blkdat >> buf;
if (memcmp(buf, m_params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE)) {
diff --git a/src/validation.h b/src/validation.h
index edbd68f783..6e9912cada 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -163,8 +163,12 @@ struct MempoolAcceptResult {
VALID, //!> Fully validated, valid.
INVALID, //!> Invalid.
MEMPOOL_ENTRY, //!> Valid, transaction was already in the mempool.
+ DIFFERENT_WITNESS, //!> Not validated. A same-txid-different-witness tx (see m_other_wtxid) already exists in the mempool and was not replaced.
};
+ /** Result type. Present in all MempoolAcceptResults. */
const ResultType m_result_type;
+
+ /** Contains information about why the transaction failed. */
const TxValidationState m_state;
// The following fields are only present when m_result_type = ResultType::VALID or MEMPOOL_ENTRY
@@ -175,6 +179,10 @@ struct MempoolAcceptResult {
/** Raw base fees in satoshis. */
const std::optional<CAmount> m_base_fees;
+ // The following field is only present when m_result_type = ResultType::DIFFERENT_WITNESS
+ /** The wtxid of the transaction in the mempool which has the same txid but different witness. */
+ const std::optional<uint256> m_other_wtxid;
+
static MempoolAcceptResult Failure(TxValidationState state) {
return MempoolAcceptResult(state);
}
@@ -187,6 +195,10 @@ struct MempoolAcceptResult {
return MempoolAcceptResult(vsize, fees);
}
+ static MempoolAcceptResult MempoolTxDifferentWitness(const uint256& other_wtxid) {
+ return MempoolAcceptResult(other_wtxid);
+ }
+
// Private constructors. Use static methods MempoolAcceptResult::Success, etc. to construct.
private:
/** Constructor for failure case */
@@ -203,6 +215,10 @@ private:
/** Constructor for already-in-mempool case. It wouldn't replace any transactions. */
explicit MempoolAcceptResult(int64_t vsize, CAmount fees)
: m_result_type(ResultType::MEMPOOL_ENTRY), m_vsize{vsize}, m_base_fees(fees) {}
+
+ /** Constructor for witness-swapped case. */
+ explicit MempoolAcceptResult(const uint256& other_wtxid)
+ : m_result_type(ResultType::DIFFERENT_WITNESS), m_other_wtxid(other_wtxid) {}
};
/**
@@ -212,7 +228,7 @@ struct PackageMempoolAcceptResult
{
const PackageValidationState m_state;
/**
- * Map from (w)txid to finished MempoolAcceptResults. The client is responsible
+ * Map from wtxid to finished MempoolAcceptResults. The client is responsible
* for keeping track of the transaction objects themselves. If a result is not
* present, it means validation was unfinished for that transaction. If there
* was a package-wide error (see result in m_state), m_tx_results will be empty.
diff --git a/src/versionbits.cpp b/src/versionbits.cpp
index a9264e6116..36815fba17 100644
--- a/src/versionbits.cpp
+++ b/src/versionbits.cpp
@@ -98,29 +98,38 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex*
return state;
}
-BIP9Stats AbstractThresholdConditionChecker::GetStateStatisticsFor(const CBlockIndex* pindex, const Consensus::Params& params) const
+BIP9Stats AbstractThresholdConditionChecker::GetStateStatisticsFor(const CBlockIndex* pindex, const Consensus::Params& params, std::vector<bool>* signalling_blocks) const
{
BIP9Stats stats = {};
stats.period = Period(params);
stats.threshold = Threshold(params);
- if (pindex == nullptr)
- return stats;
+ if (pindex == nullptr) return stats;
// Find beginning of period
- const CBlockIndex* pindexEndOfPrevPeriod = pindex->GetAncestor(pindex->nHeight - ((pindex->nHeight + 1) % stats.period));
- stats.elapsed = pindex->nHeight - pindexEndOfPrevPeriod->nHeight;
+ int blocks_in_period = 1 + (pindex->nHeight % stats.period);
+
+ // Reset signalling_blocks
+ if (signalling_blocks) {
+ signalling_blocks->assign(blocks_in_period, false);
+ }
// Count from current block to beginning of period
+ int elapsed = 0;
int count = 0;
const CBlockIndex* currentIndex = pindex;
- while (pindexEndOfPrevPeriod->nHeight != currentIndex->nHeight){
- if (Condition(currentIndex, params))
- count++;
+ do {
+ ++elapsed;
+ --blocks_in_period;
+ if (Condition(currentIndex, params)) {
+ ++count;
+ if (signalling_blocks) signalling_blocks->at(blocks_in_period) = true;
+ }
currentIndex = currentIndex->pprev;
- }
+ } while(blocks_in_period > 0);
+ stats.elapsed = elapsed;
stats.count = count;
stats.possible = (stats.period - stats.threshold ) >= (stats.elapsed - count);
@@ -196,9 +205,9 @@ ThresholdState VersionBitsCache::State(const CBlockIndex* pindexPrev, const Cons
return VersionBitsConditionChecker(pos).GetStateFor(pindexPrev, params, m_caches[pos]);
}
-BIP9Stats VersionBitsCache::Statistics(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos)
+BIP9Stats VersionBitsCache::Statistics(const CBlockIndex* pindex, const Consensus::Params& params, Consensus::DeploymentPos pos, std::vector<bool>* signalling_blocks)
{
- return VersionBitsConditionChecker(pos).GetStateStatisticsFor(pindexPrev, params);
+ return VersionBitsConditionChecker(pos).GetStateStatisticsFor(pindex, params, signalling_blocks);
}
int VersionBitsCache::StateSinceHeight(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos)
diff --git a/src/versionbits.h b/src/versionbits.h
index 25ddd6fa5d..1b3fa11e61 100644
--- a/src/versionbits.h
+++ b/src/versionbits.h
@@ -64,8 +64,10 @@ protected:
virtual int Threshold(const Consensus::Params& params) const =0;
public:
- /** Returns the numerical statistics of an in-progress BIP9 softfork in the current period */
- BIP9Stats GetStateStatisticsFor(const CBlockIndex* pindex, const Consensus::Params& params) const;
+ /** Returns the numerical statistics of an in-progress BIP9 softfork in the period including pindex
+ * If provided, signalling_blocks is set to true/false based on whether each block in the period signalled
+ */
+ BIP9Stats GetStateStatisticsFor(const CBlockIndex* pindex, const Consensus::Params& params, std::vector<bool>* signalling_blocks = nullptr) const;
/** Returns the state for pindex A based on parent pindexPrev B. Applies any state transition if conditions are present.
* Caches state from first block of period. */
ThresholdState GetStateFor(const CBlockIndex* pindexPrev, const Consensus::Params& params, ThresholdConditionCache& cache) const;
@@ -82,8 +84,10 @@ private:
ThresholdConditionCache m_caches[Consensus::MAX_VERSION_BITS_DEPLOYMENTS] GUARDED_BY(m_mutex);
public:
- /** Get the numerical statistics for a given deployment for the signalling period that includes the block after pindexPrev. */
- static BIP9Stats Statistics(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos);
+ /** Get the numerical statistics for a given deployment for the signalling period that includes pindex.
+ * If provided, signalling_blocks is set to true/false based on whether each block in the period signalled
+ */
+ static BIP9Stats Statistics(const CBlockIndex* pindex, const Consensus::Params& params, Consensus::DeploymentPos pos, std::vector<bool>* signalling_blocks = nullptr);
static uint32_t Mask(const Consensus::Params& params, Consensus::DeploymentPos pos);
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index e0be914a2b..cea120a81e 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -681,10 +681,10 @@ bool BerkeleyBatch::ReadAtCursor(CDataStream& ssKey, CDataStream& ssValue, bool&
// Convert to streams
ssKey.SetType(SER_DISK);
ssKey.clear();
- ssKey.write((char*)datKey.get_data(), datKey.get_size());
+ ssKey.write({BytePtr(datKey.get_data()), datKey.get_size()});
ssValue.SetType(SER_DISK);
ssValue.clear();
- ssValue.write((char*)datValue.get_data(), datValue.get_size());
+ ssValue.write({BytePtr(datValue.get_data()), datValue.get_size()});
return true;
}
@@ -756,7 +756,7 @@ bool BerkeleyBatch::ReadKey(CDataStream&& key, CDataStream& value)
SafeDbt datValue;
int ret = pdb->get(activeTxn, datKey, datValue, 0);
if (ret == 0 && datValue.get_data() != nullptr) {
- value.write((char*)datValue.get_data(), datValue.get_size());
+ value.write({BytePtr(datValue.get_data()), datValue.get_size()});
return true;
}
return false;
diff --git a/src/wallet/coincontrol.h b/src/wallet/coincontrol.h
index 5ef2295c88..65a5c83366 100644
--- a/src/wallet/coincontrol.h
+++ b/src/wallet/coincontrol.h
@@ -115,9 +115,28 @@ public:
vOutpoints.assign(setSelected.begin(), setSelected.end());
}
+ void SetInputWeight(const COutPoint& outpoint, int64_t weight)
+ {
+ m_input_weights[outpoint] = weight;
+ }
+
+ bool HasInputWeight(const COutPoint& outpoint) const
+ {
+ return m_input_weights.count(outpoint) > 0;
+ }
+
+ int64_t GetInputWeight(const COutPoint& outpoint) const
+ {
+ auto it = m_input_weights.find(outpoint);
+ assert(it != m_input_weights.end());
+ return it->second;
+ }
+
private:
std::set<COutPoint> setSelected;
std::map<COutPoint, CTxOut> m_external_txouts;
+ //! Map of COutPoints to the maximum weight for that input
+ std::map<COutPoint, int64_t> m_input_weights;
};
} // namespace wallet
diff --git a/src/wallet/dump.cpp b/src/wallet/dump.cpp
index 7dfb1d8839..3e34a2f776 100644
--- a/src/wallet/dump.cpp
+++ b/src/wallet/dump.cpp
@@ -47,12 +47,12 @@ bool DumpWallet(CWallet& wallet, bilingual_str& error)
// Write out a magic string with version
std::string line = strprintf("%s,%u\n", DUMP_MAGIC, DUMP_VERSION);
dump_file.write(line.data(), line.size());
- hasher.write(line.data(), line.size());
+ hasher.write(MakeByteSpan(line));
// Write out the file format
line = strprintf("%s,%s\n", "format", db.Format());
dump_file.write(line.data(), line.size());
- hasher.write(line.data(), line.size());
+ hasher.write(MakeByteSpan(line));
if (ret) {
@@ -73,7 +73,7 @@ bool DumpWallet(CWallet& wallet, bilingual_str& error)
std::string value_str = HexStr(ss_value);
line = strprintf("%s,%s\n", key_str, value_str);
dump_file.write(line.data(), line.size());
- hasher.write(line.data(), line.size());
+ hasher.write(MakeByteSpan(line));
}
}
@@ -150,7 +150,7 @@ bool CreateFromDump(const std::string& name, const fs::path& wallet_path, biling
return false;
}
std::string magic_hasher_line = strprintf("%s,%s\n", magic_key, version_value);
- hasher.write(magic_hasher_line.data(), magic_hasher_line.size());
+ hasher.write(MakeByteSpan(magic_hasher_line));
// Get the stored file format
std::string format_key;
@@ -181,7 +181,7 @@ bool CreateFromDump(const std::string& name, const fs::path& wallet_path, biling
warnings.push_back(strprintf(_("Warning: Dumpfile wallet format \"%s\" does not match command line specified format \"%s\"."), format_value, file_format));
}
std::string format_hasher_line = strprintf("%s,%s\n", format_key, format_value);
- hasher.write(format_hasher_line.data(), format_hasher_line.size());
+ hasher.write(MakeByteSpan(format_hasher_line));
DatabaseOptions options;
DatabaseStatus status;
@@ -225,7 +225,7 @@ bool CreateFromDump(const std::string& name, const fs::path& wallet_path, biling
}
std::string line = strprintf("%s,%s\n", key, value);
- hasher.write(line.data(), line.size());
+ hasher.write(MakeByteSpan(line));
if (key.empty() || value.empty()) {
continue;
diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp
index b1466869b9..9083c304b2 100644
--- a/src/wallet/interfaces.cpp
+++ b/src/wallet/interfaces.cpp
@@ -90,7 +90,6 @@ WalletTxStatus MakeWalletTxStatus(const CWallet& wallet, const CWalletTx& wtx)
result.depth_in_main_chain = wallet.GetTxDepthInMainChain(wtx);
result.time_received = wtx.nTimeReceived;
result.lock_time = wtx.tx->nLockTime;
- result.is_final = wallet.chain().checkFinalTx(*wtx.tx);
result.is_trusted = CachedTxIsTrusted(wallet, wtx);
result.is_abandoned = wtx.isAbandoned();
result.is_coinbase = wtx.IsCoinBase();
diff --git a/src/wallet/receive.cpp b/src/wallet/receive.cpp
index e598d6f979..1a6f06213c 100644
--- a/src/wallet/receive.cpp
+++ b/src/wallet/receive.cpp
@@ -279,8 +279,6 @@ bool CachedTxIsFromMe(const CWallet& wallet, const CWalletTx& wtx, const isminef
bool CachedTxIsTrusted(const CWallet& wallet, const CWalletTx& wtx, std::set<uint256>& trusted_parents)
{
AssertLockHeld(wallet.cs_wallet);
- // Quick answer in most cases
- if (!wallet.chain().checkFinalTx(*wtx.tx)) return false;
int nDepth = wallet.GetTxDepthInMainChain(wtx);
if (nDepth >= 1) return true;
if (nDepth < 0) return false;
diff --git a/src/wallet/rpc/coins.cpp b/src/wallet/rpc/coins.cpp
index f10de11662..035541babd 100644
--- a/src/wallet/rpc/coins.cpp
+++ b/src/wallet/rpc/coins.cpp
@@ -60,8 +60,8 @@ static CAmount GetReceived(const CWallet& wallet, const UniValue& params, bool b
if (depth < min_depth
// Coinbase with less than 1 confirmation is no longer in the main chain
|| (wtx.IsCoinBase() && (depth < 1 || !include_coinbase))
- || (wallet.IsTxImmatureCoinBase(wtx) && !include_immature_coinbase)
- || !wallet.chain().checkFinalTx(*wtx.tx)) {
+ || (wallet.IsTxImmatureCoinBase(wtx) && !include_immature_coinbase))
+ {
continue;
}
diff --git a/src/wallet/rpc/spend.cpp b/src/wallet/rpc/spend.cpp
index cae3542a5e..433b5a1815 100644
--- a/src/wallet/rpc/spend.cpp
+++ b/src/wallet/rpc/spend.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <consensus/validation.h>
#include <core_io.h>
#include <key_io.h>
#include <policy/policy.h>
@@ -429,6 +430,7 @@ void FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& fee_out,
{"replaceable", UniValueType(UniValue::VBOOL)},
{"conf_target", UniValueType(UniValue::VNUM)},
{"estimate_mode", UniValueType(UniValue::VSTR)},
+ {"input_weights", UniValueType(UniValue::VARR)},
},
true, true);
@@ -548,6 +550,37 @@ void FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& fee_out,
}
}
+ if (options.exists("input_weights")) {
+ for (const UniValue& input : options["input_weights"].get_array().getValues()) {
+ uint256 txid = ParseHashO(input, "txid");
+
+ const UniValue& vout_v = find_value(input, "vout");
+ if (!vout_v.isNum()) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, missing vout key");
+ }
+ int vout = vout_v.get_int();
+ if (vout < 0) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, vout cannot be negative");
+ }
+
+ const UniValue& weight_v = find_value(input, "weight");
+ if (!weight_v.isNum()) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, missing weight key");
+ }
+ int64_t weight = weight_v.get_int64();
+ const int64_t min_input_weight = GetTransactionInputWeight(CTxIn());
+ CHECK_NONFATAL(min_input_weight == 165);
+ if (weight < min_input_weight) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, weight cannot be less than 165 (41 bytes (size of outpoint + sequence + empty scriptSig) * 4 (witness scaling factor)) + 1 (empty witness)");
+ }
+ if (weight > MAX_STANDARD_TX_WEIGHT) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Invalid parameter, weight cannot be greater than the maximum standard tx weight of %d", MAX_STANDARD_TX_WEIGHT));
+ }
+
+ coinControl.SetInputWeight(COutPoint(txid, vout), weight);
+ }
+ }
+
if (tx.vout.size() == 0)
throw JSONRPCError(RPC_INVALID_PARAMETER, "TX must have at least one output");
@@ -585,6 +618,23 @@ void FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& fee_out,
}
}
+static void SetOptionsInputWeights(const UniValue& inputs, UniValue& options)
+{
+ if (options.exists("input_weights")) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Input weights should be specified in inputs rather than in options.");
+ }
+ if (inputs.size() == 0) {
+ return;
+ }
+ UniValue weights(UniValue::VARR);
+ for (const UniValue& input : inputs.getValues()) {
+ if (input.exists("weight")) {
+ weights.push_back(input);
+ }
+ }
+ options.pushKV("input_weights", weights);
+}
+
RPCHelpMan fundrawtransaction()
{
return RPCHelpMan{"fundrawtransaction",
@@ -626,6 +676,17 @@ RPCHelpMan fundrawtransaction()
{"vout_index", RPCArg::Type::NUM, RPCArg::Optional::OMITTED, "The zero-based output index, before a change output is added."},
},
},
+ {"input_weights", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "Inputs and their corresponding weights",
+ {
+ {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id"},
+ {"vout", RPCArg::Type::NUM, RPCArg::Optional::NO, "The output index"},
+ {"weight", RPCArg::Type::NUM, RPCArg::Optional::NO, "The maximum weight for this input, "
+ "including the weight of the outpoint and sequence number. "
+ "Note that serialized signature sizes are not guaranteed to be consistent, "
+ "so the maximum DER signatures size of 73 bytes should be used when considering ECDSA signatures."
+ "Remember to convert serialized sizes to weight units when necessary."},
+ },
+ },
},
FundTxDoc()),
"options"},
@@ -1007,6 +1068,11 @@ RPCHelpMan send()
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id"},
{"vout", RPCArg::Type::NUM, RPCArg::Optional::NO, "The output number"},
{"sequence", RPCArg::Type::NUM, RPCArg::Optional::NO, "The sequence number"},
+ {"weight", RPCArg::Type::NUM, RPCArg::DefaultHint{"Calculated from wallet and solving data"}, "The maximum weight for this input, "
+ "including the weight of the outpoint and sequence number. "
+ "Note that signature sizes are not guaranteed to be consistent, "
+ "so the maximum DER signatures size of 73 bytes should be used when considering ECDSA signatures."
+ "Remember to convert serialized sizes to weight units when necessary."},
},
},
{"locktime", RPCArg::Type::NUM, RPCArg::Default{0}, "Raw locktime. Non-0 value also locktime-activates inputs"},
@@ -1110,6 +1176,7 @@ RPCHelpMan send()
// Automatically select coins, unless at least one is manually selected. Can
// be overridden by options.add_inputs.
coin_control.m_add_inputs = rawTx.vin.size() == 0;
+ SetOptionsInputWeights(options["inputs"], options);
FundTransaction(*pwallet, rawTx, fee, change_position, options, coin_control, /* override_min_fee */ false);
bool add_to_wallet = true;
@@ -1250,6 +1317,11 @@ RPCHelpMan walletcreatefundedpsbt()
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id"},
{"vout", RPCArg::Type::NUM, RPCArg::Optional::NO, "The output number"},
{"sequence", RPCArg::Type::NUM, RPCArg::DefaultHint{"depends on the value of the 'locktime' and 'options.replaceable' arguments"}, "The sequence number"},
+ {"weight", RPCArg::Type::NUM, RPCArg::DefaultHint{"Calculated from wallet and solving data"}, "The maximum weight for this input, "
+ "including the weight of the outpoint and sequence number. "
+ "Note that signature sizes are not guaranteed to be consistent, "
+ "so the maximum DER signatures size of 73 bytes should be used when considering ECDSA signatures."
+ "Remember to convert serialized sizes to weight units when necessary."},
},
},
},
@@ -1330,10 +1402,12 @@ RPCHelpMan walletcreatefundedpsbt()
}, true
);
+ UniValue options = request.params[3];
+
CAmount fee;
int change_position;
bool rbf{wallet.m_signal_rbf};
- const UniValue &replaceable_arg = request.params[3]["replaceable"];
+ const UniValue &replaceable_arg = options["replaceable"];
if (!replaceable_arg.isNull()) {
RPCTypeCheckArgument(replaceable_arg, UniValue::VBOOL);
rbf = replaceable_arg.isTrue();
@@ -1343,7 +1417,8 @@ RPCHelpMan walletcreatefundedpsbt()
// Automatically select coins, unless at least one is manually selected. Can
// be overridden by options.add_inputs.
coin_control.m_add_inputs = rawTx.vin.size() == 0;
- FundTransaction(wallet, rawTx, fee, change_position, request.params[3], coin_control, /* override_min_fee */ true);
+ SetOptionsInputWeights(request.params[0], options);
+ FundTransaction(wallet, rawTx, fee, change_position, options, coin_control, /* override_min_fee */ true);
// Make a blank psbt
PartiallySignedTransaction psbtx(rawTx);
diff --git a/src/wallet/rpc/transactions.cpp b/src/wallet/rpc/transactions.cpp
index d9034808f4..eef2c13ee1 100644
--- a/src/wallet/rpc/transactions.cpp
+++ b/src/wallet/rpc/transactions.cpp
@@ -114,8 +114,8 @@ static UniValue ListReceived(const CWallet& wallet, const UniValue& params, cons
// Coinbase with less than 1 confirmation is no longer in the main chain
if ((wtx.IsCoinBase() && (nDepth < 1 || !include_coinbase))
- || (wallet.IsTxImmatureCoinBase(wtx) && !include_immature_coinbase)
- || !wallet.chain().checkFinalTx(*wtx.tx)) {
+ || (wallet.IsTxImmatureCoinBase(wtx) && !include_immature_coinbase))
+ {
continue;
}
diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp
index d87bdc8679..a42df8262c 100644
--- a/src/wallet/spend.cpp
+++ b/src/wallet/spend.cpp
@@ -105,10 +105,6 @@ void AvailableCoins(const CWallet& wallet, std::vector<COutput>& vCoins, const C
const uint256& wtxid = entry.first;
const CWalletTx& wtx = entry.second;
- if (!wallet.chain().checkFinalTx(*wtx.tx)) {
- continue;
- }
-
if (wallet.IsTxImmatureCoinBase(wtx))
continue;
@@ -455,15 +451,17 @@ std::optional<SelectionResult> SelectCoins(const CWallet& wallet, const std::vec
}
input_bytes = GetTxSpendSize(wallet, wtx, outpoint.n, false);
txout = wtx.tx->vout.at(outpoint.n);
- }
- if (input_bytes == -1) {
- // The input is external. We either did not find the tx in mapWallet, or we did but couldn't compute the input size with wallet data
+ } else {
+ // The input is external. We did not find the tx in mapWallet.
if (!coin_control.GetExternalOutput(outpoint, txout)) {
- // Not ours, and we don't have solving data.
return std::nullopt;
}
input_bytes = CalculateMaximumSignedInputSize(txout, &coin_control.m_external_provider, /* use_max_sig */ true);
}
+ // If available, override calculated size with coin control specified size
+ if (coin_control.HasInputWeight(outpoint)) {
+ input_bytes = GetVirtualTransactionSize(coin_control.GetInputWeight(outpoint), 0, 0);
+ }
CInputCoin coin(outpoint, txout, input_bytes);
if (coin.m_input_bytes == -1) {
diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp
index 07e387f177..2b2181e70b 100644
--- a/src/wallet/sqlite.cpp
+++ b/src/wallet/sqlite.cpp
@@ -395,9 +395,9 @@ bool SQLiteBatch::ReadKey(CDataStream&& key, CDataStream& value)
return false;
}
// Leftmost column in result is index 0
- const char* data = reinterpret_cast<const char*>(sqlite3_column_blob(m_read_stmt, 0));
- int data_size = sqlite3_column_bytes(m_read_stmt, 0);
- value.write(data, data_size);
+ const std::byte* data{BytePtr(sqlite3_column_blob(m_read_stmt, 0))};
+ size_t data_size(sqlite3_column_bytes(m_read_stmt, 0));
+ value.write({data, data_size});
sqlite3_clear_bindings(m_read_stmt);
sqlite3_reset(m_read_stmt);
@@ -512,12 +512,12 @@ bool SQLiteBatch::ReadAtCursor(CDataStream& key, CDataStream& value, bool& compl
}
// Leftmost column in result is index 0
- const char* key_data = reinterpret_cast<const char*>(sqlite3_column_blob(m_cursor_stmt, 0));
- int key_data_size = sqlite3_column_bytes(m_cursor_stmt, 0);
- key.write(key_data, key_data_size);
- const char* value_data = reinterpret_cast<const char*>(sqlite3_column_blob(m_cursor_stmt, 1));
- int value_data_size = sqlite3_column_bytes(m_cursor_stmt, 1);
- value.write(value_data, value_data_size);
+ const std::byte* key_data{BytePtr(sqlite3_column_blob(m_cursor_stmt, 0))};
+ size_t key_data_size(sqlite3_column_bytes(m_cursor_stmt, 0));
+ key.write({key_data, key_data_size});
+ const std::byte* value_data{BytePtr(sqlite3_column_blob(m_cursor_stmt, 1))};
+ size_t value_data_size(sqlite3_column_bytes(m_cursor_stmt, 1));
+ value.write({value_data, value_data_size});
return true;
}
diff --git a/src/wallet/test/spend_tests.cpp b/src/wallet/test/spend_tests.cpp
index b2a0697c21..334bd5b8bc 100644
--- a/src/wallet/test/spend_tests.cpp
+++ b/src/wallet/test/spend_tests.cpp
@@ -63,5 +63,56 @@ BOOST_FIXTURE_TEST_CASE(SubtractFee, TestChain100Setup)
BOOST_CHECK_EQUAL(fee, check_tx(fee + 123));
}
+static void TestFillInputToWeight(int64_t additional_weight, std::vector<int64_t> expected_stack_sizes)
+{
+ static const int64_t EMPTY_INPUT_WEIGHT = GetTransactionInputWeight(CTxIn());
+
+ CTxIn input;
+ int64_t target_weight = EMPTY_INPUT_WEIGHT + additional_weight;
+ BOOST_CHECK(FillInputToWeight(input, target_weight));
+ BOOST_CHECK_EQUAL(GetTransactionInputWeight(input), target_weight);
+ BOOST_CHECK_EQUAL(input.scriptWitness.stack.size(), expected_stack_sizes.size());
+ for (unsigned int i = 0; i < expected_stack_sizes.size(); ++i) {
+ BOOST_CHECK_EQUAL(input.scriptWitness.stack[i].size(), expected_stack_sizes[i]);
+ }
+}
+
+BOOST_FIXTURE_TEST_CASE(FillInputToWeightTest, BasicTestingSetup)
+{
+ {
+ // Less than or equal minimum of 165 should not add any witness data
+ CTxIn input;
+ BOOST_CHECK(!FillInputToWeight(input, -1));
+ BOOST_CHECK_EQUAL(GetTransactionInputWeight(input), 165);
+ BOOST_CHECK_EQUAL(input.scriptWitness.stack.size(), 0);
+ BOOST_CHECK(!FillInputToWeight(input, 0));
+ BOOST_CHECK_EQUAL(GetTransactionInputWeight(input), 165);
+ BOOST_CHECK_EQUAL(input.scriptWitness.stack.size(), 0);
+ BOOST_CHECK(!FillInputToWeight(input, 164));
+ BOOST_CHECK_EQUAL(GetTransactionInputWeight(input), 165);
+ BOOST_CHECK_EQUAL(input.scriptWitness.stack.size(), 0);
+ BOOST_CHECK(FillInputToWeight(input, 165));
+ BOOST_CHECK_EQUAL(GetTransactionInputWeight(input), 165);
+ BOOST_CHECK_EQUAL(input.scriptWitness.stack.size(), 0);
+ }
+
+ // Make sure we can add at least one weight
+ TestFillInputToWeight(1, {0});
+
+ // 1 byte compact size uint boundary
+ TestFillInputToWeight(252, {251});
+ TestFillInputToWeight(253, {83, 168});
+ TestFillInputToWeight(262, {86, 174});
+ TestFillInputToWeight(263, {260});
+
+ // 3 byte compact size uint boundary
+ TestFillInputToWeight(65535, {65532});
+ TestFillInputToWeight(65536, {21842, 43688});
+ TestFillInputToWeight(65545, {21845, 43694});
+ TestFillInputToWeight(65546, {65541});
+
+ // Note: We don't test the next boundary because of memory allocation constraints.
+}
+
BOOST_AUTO_TEST_SUITE_END()
} // namespace wallet
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index bb6021b857..9a74545fb5 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -140,11 +140,13 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
}
// Prune the older block file.
+ int file_number;
{
LOCK(cs_main);
- Assert(m_node.chainman)->m_blockman.PruneOneBlockFile(oldTip->GetBlockPos().nFile);
+ file_number = oldTip->GetBlockPos().nFile;
+ Assert(m_node.chainman)->m_blockman.PruneOneBlockFile(file_number);
}
- UnlinkPrunedFiles({oldTip->GetBlockPos().nFile});
+ UnlinkPrunedFiles({file_number});
// Verify ScanForWalletTransactions only picks transactions in the new block
// file.
@@ -169,9 +171,10 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
// Prune the remaining block file.
{
LOCK(cs_main);
- Assert(m_node.chainman)->m_blockman.PruneOneBlockFile(newTip->GetBlockPos().nFile);
+ file_number = newTip->GetBlockPos().nFile;
+ Assert(m_node.chainman)->m_blockman.PruneOneBlockFile(file_number);
}
- UnlinkPrunedFiles({newTip->GetBlockPos().nFile});
+ UnlinkPrunedFiles({file_number});
// Verify ScanForWalletTransactions scans no blocks.
{
@@ -202,11 +205,13 @@ BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup)
CBlockIndex* newTip = m_node.chainman->ActiveChain().Tip();
// Prune the older block file.
+ int file_number;
{
LOCK(cs_main);
- Assert(m_node.chainman)->m_blockman.PruneOneBlockFile(oldTip->GetBlockPos().nFile);
+ file_number = oldTip->GetBlockPos().nFile;
+ Assert(m_node.chainman)->m_blockman.PruneOneBlockFile(file_number);
}
- UnlinkPrunedFiles({oldTip->GetBlockPos().nFile});
+ UnlinkPrunedFiles({file_number});
// Verify importmulti RPC returns failure for a key whose creation time is
// before the missing block, and success for a key whose creation time is
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 3fcb086d2d..337a5139e1 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -1507,6 +1507,49 @@ bool DummySignInput(const SigningProvider& provider, CTxIn &tx_in, const CTxOut
return true;
}
+bool FillInputToWeight(CTxIn& txin, int64_t target_weight)
+{
+ assert(txin.scriptSig.empty());
+ assert(txin.scriptWitness.IsNull());
+
+ int64_t txin_weight = GetTransactionInputWeight(txin);
+
+ // Do nothing if the weight that should be added is less than the weight that already exists
+ if (target_weight < txin_weight) {
+ return false;
+ }
+ if (target_weight == txin_weight) {
+ return true;
+ }
+
+ // Subtract current txin weight, which should include empty witness stack
+ int64_t add_weight = target_weight - txin_weight;
+ assert(add_weight > 0);
+
+ // We will want to subtract the size of the Compact Size UInt that will also be serialized.
+ // However doing so when the size is near a boundary can result in a problem where it is not
+ // possible to have a stack element size and combination to exactly equal a target.
+ // To avoid this possibility, if the weight to add is less than 10 bytes greater than
+ // a boundary, the size will be split so that 2/3rds will be in one stack element, and
+ // the remaining 1/3rd in another. Using 3rds allows us to avoid additional boundaries.
+ // 10 bytes is used because that accounts for the maximum size. This does not need to be super precise.
+ if ((add_weight >= 253 && add_weight < 263)
+ || (add_weight > std::numeric_limits<uint16_t>::max() && add_weight <= std::numeric_limits<uint16_t>::max() + 10)
+ || (add_weight > std::numeric_limits<uint32_t>::max() && add_weight <= std::numeric_limits<uint32_t>::max() + 10)) {
+ int64_t first_weight = add_weight / 3;
+ add_weight -= first_weight;
+
+ first_weight -= GetSizeOfCompactSize(first_weight);
+ txin.scriptWitness.stack.emplace(txin.scriptWitness.stack.end(), first_weight, 0);
+ }
+
+ add_weight -= GetSizeOfCompactSize(add_weight);
+ txin.scriptWitness.stack.emplace(txin.scriptWitness.stack.end(), add_weight, 0);
+ assert(GetTransactionInputWeight(txin) == target_weight);
+
+ return true;
+}
+
// Helper for producing a bunch of max-sized low-S low-R signatures (eg 71 bytes)
bool CWallet::DummySignTx(CMutableTransaction &txNew, const std::vector<CTxOut> &txouts, const CCoinControl* coin_control) const
{
@@ -1515,6 +1558,14 @@ bool CWallet::DummySignTx(CMutableTransaction &txNew, const std::vector<CTxOut>
for (const auto& txout : txouts)
{
CTxIn& txin = txNew.vin[nIn];
+ // If weight was provided, fill the input to that weight
+ if (coin_control && coin_control->HasInputWeight(txin.prevout)) {
+ if (!FillInputToWeight(txin, coin_control->GetInputWeight(txin.prevout))) {
+ return false;
+ }
+ nIn++;
+ continue;
+ }
// Use max sig if watch only inputs were used or if this particular input is an external input
// to ensure a sufficient fee is attained for the requested feerate.
const bool use_max_sig = coin_control && (coin_control->fAllowWatchOnly || coin_control->IsExternalSelected(txin.prevout));
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index 00a1865a0e..e2c5c69c91 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -939,6 +939,8 @@ bool AddWalletSetting(interfaces::Chain& chain, const std::string& wallet_name);
bool RemoveWalletSetting(interfaces::Chain& chain, const std::string& wallet_name);
bool DummySignInput(const SigningProvider& provider, CTxIn &tx_in, const CTxOut &txout, bool use_max_sig);
+
+bool FillInputToWeight(CTxIn& txin, int64_t target_weight);
} // namespace wallet
#endif // BITCOIN_WALLET_WALLET_H
diff --git a/test/functional/data/rpc_decodescript.json b/test/functional/data/rpc_decodescript.json
index d1aa9ab00d..8903f5efac 100644
--- a/test/functional/data/rpc_decodescript.json
+++ b/test/functional/data/rpc_decodescript.json
@@ -4,6 +4,7 @@
{
"asm": "1 eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
"address": "bcrt1pamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhqz6nvlh",
+ "desc": "addr(bcrt1pamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhqz6nvlh)#v52jnujz",
"type": "witness_v1_taproot"
}
],
@@ -12,6 +13,7 @@
{
"asm": "1 -28398",
"address": "bcrt1pamhqk96edn",
+ "desc": "addr(bcrt1pamhqk96edn)#vkh8uj5a",
"type": "witness_unknown"
}
],
@@ -20,6 +22,7 @@
{
"asm": "0 eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
"address": "bcrt1qamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhqgdn98t",
+ "desc": "addr(bcrt1qamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhqgdn98t)#afaecevx",
"type": "witness_v0_scripthash",
"p2sh": "2MwGk8mw1GBP6U9D5X8gTvgvXpuknmAK3fo"
}
@@ -29,6 +32,7 @@
{
"asm": "OP_HASH160 eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee OP_EQUAL",
"address": "2NF2b3KS8xXb9XHvbRMXdZh8s5g92rUZHtp",
+ "desc": "addr(2NF2b3KS8xXb9XHvbRMXdZh8s5g92rUZHtp)#ywfcpmh9",
"type": "scripthash"
}
],
@@ -36,6 +40,7 @@
"6a00",
{
"asm": "OP_RETURN 0",
+ "desc": "raw(6a00)#ncfmkl43",
"type": "nulldata"
}
],
@@ -43,6 +48,7 @@
"6aee",
{
"asm": "OP_RETURN OP_UNKNOWN",
+ "desc": "raw(6aee)#vsyzgqdt",
"type": "nonstandard"
}
],
@@ -50,6 +56,7 @@
"6a02ee",
{
"asm": "OP_RETURN [error]",
+ "desc": "raw(6a02ee)#gvdwnlzl",
"type": "nonstandard"
}
],
@@ -57,10 +64,12 @@
"02eeee",
{
"asm": "-28398",
+ "desc": "raw(02eeee)#5xzck7pr",
"type": "nonstandard",
"p2sh": "2N34iiGoUUkVSPiaaTFpJjB1FR9TXQu3PGM",
"segwit": {
"asm": "0 96c2368fc30514a438a8bd909f93c49a1549d77198ccbdb792043b666cb24f42",
+ "desc": "addr(bcrt1qjmprdr7rq522gw9ghkgfly7yng25n4m3nrxtmdujqsakvm9jfapqk795l5)#5akkdska",
"hex": "002096c2368fc30514a438a8bd909f93c49a1549d77198ccbdb792043b666cb24f42",
"address": "bcrt1qjmprdr7rq522gw9ghkgfly7yng25n4m3nrxtmdujqsakvm9jfapqk795l5",
"type": "witness_v0_scripthash",
@@ -72,6 +81,7 @@
"ba",
{
"asm": "OP_CHECKSIGADD",
+ "desc": "raw(ba)#yy0eg44l",
"type": "nonstandard"
}
],
@@ -79,6 +89,7 @@
"50",
{
"asm": "OP_RESERVED",
+ "desc": "raw(50)#a7tu03xf",
"type": "nonstandard"
}
]
diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py
index 7fd0d0140b..9d32749a08 100755
--- a/test/functional/feature_cltv.py
+++ b/test/functional/feature_cltv.py
@@ -92,7 +92,7 @@ class BIP65Test(BitcoinTestFramework):
self.rpc_timeout = 480
def test_cltv_info(self, *, is_active):
- assert_equal(self.nodes[0].getblockchaininfo()['softforks']['bip65'], {
+ assert_equal(self.nodes[0].getdeploymentinfo()['deployments']['bip65'], {
"active": is_active,
"height": CLTV_HEIGHT,
"type": "buried",
diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py
index f35ce7e0c9..9a46839969 100755
--- a/test/functional/feature_dersig.py
+++ b/test/functional/feature_dersig.py
@@ -60,7 +60,7 @@ class BIP66Test(BitcoinTestFramework):
return self.miniwallet.create_self_transfer(utxo_to_spend=utxo_to_spend)['tx']
def test_dersig_info(self, *, is_active):
- assert_equal(self.nodes[0].getblockchaininfo()['softforks']['bip66'],
+ assert_equal(self.nodes[0].getdeploymentinfo()['deployments']['bip66'],
{
"active": is_active,
"height": DERSIG_HEIGHT,
diff --git a/test/functional/feature_maxtipage.py b/test/functional/feature_maxtipage.py
new file mode 100755
index 0000000000..87f9d6962d
--- /dev/null
+++ b/test/functional/feature_maxtipage.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python3
+# Copyright (c) 2022 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test logic for setting nMaxTipAge on command line.
+
+Nodes don't consider themselves out of "initial block download" as long as
+their best known block header time is more than nMaxTipAge in the past.
+"""
+
+import time
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import assert_equal
+
+
+DEFAULT_MAX_TIP_AGE = 24 * 60 * 60
+
+
+class MaxTipAgeTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 2
+
+ def test_maxtipage(self, maxtipage, set_parameter=True):
+ node_miner = self.nodes[0]
+ node_ibd = self.nodes[1]
+
+ self.restart_node(1, [f'-maxtipage={maxtipage}'] if set_parameter else None)
+ self.connect_nodes(0, 1)
+
+ # tips older than maximum age -> stay in IBD
+ cur_time = int(time.time())
+ node_ibd.setmocktime(cur_time)
+ for delta in [5, 4, 3, 2, 1]:
+ node_miner.setmocktime(cur_time - maxtipage - delta)
+ self.generate(node_miner, 1)
+ assert_equal(node_ibd.getblockchaininfo()['initialblockdownload'], True)
+
+ # tip within maximum age -> leave IBD
+ node_miner.setmocktime(cur_time - maxtipage)
+ self.generate(node_miner, 1)
+ assert_equal(node_ibd.getblockchaininfo()['initialblockdownload'], False)
+
+ def run_test(self):
+ self.log.info("Test IBD with maximum tip age of 24 hours (default).")
+ self.test_maxtipage(DEFAULT_MAX_TIP_AGE, set_parameter=False)
+
+ for hours in [20, 10, 5, 2, 1]:
+ maxtipage = hours * 60 * 60
+ self.log.info(f"Test IBD with maximum tip age of {hours} hours (-maxtipage={maxtipage}).")
+ self.test_maxtipage(maxtipage)
+
+
+if __name__ == '__main__':
+ MaxTipAgeTest().main()
diff --git a/test/functional/mempool_updatefromblock.py b/test/functional/mempool_updatefromblock.py
index 16c15e3f74..51de582ce0 100755
--- a/test/functional/mempool_updatefromblock.py
+++ b/test/functional/mempool_updatefromblock.py
@@ -17,7 +17,7 @@ from test_framework.util import assert_equal
class MempoolUpdateFromBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
- self.extra_args = [['-limitdescendantsize=1000', '-limitancestorsize=1000']]
+ self.extra_args = [['-limitdescendantsize=1000', '-limitancestorsize=1000', '-limitancestorcount=100']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py
index 4dd4899f74..2d96ba74b5 100755
--- a/test/functional/rpc_blockchain.py
+++ b/test/functional/rpc_blockchain.py
@@ -6,6 +6,7 @@
Test the following RPCs:
- getblockchaininfo
+ - getdeploymentinfo
- getchaintxstats
- gettxoutsetinfo
- getblockheader
@@ -79,6 +80,7 @@ class BlockchainTest(BitcoinTestFramework):
self._test_stopatheight()
self._test_waitforblockheight()
self._test_getblock()
+ self._test_getdeploymentinfo()
assert self.nodes[0].verifychain(4, 0)
def mine_chain(self):
@@ -115,7 +117,6 @@ class BlockchainTest(BitcoinTestFramework):
'mediantime',
'pruned',
'size_on_disk',
- 'softforks',
'time',
'verificationprogress',
'warnings',
@@ -159,11 +160,6 @@ class BlockchainTest(BitcoinTestFramework):
self.start_node(0, extra_args=[
'-stopatheight=207',
'-prune=550',
- '-testactivationheight=bip34@2',
- '-testactivationheight=dersig@3',
- '-testactivationheight=cltv@4',
- '-testactivationheight=csv@5',
- '-testactivationheight=segwit@6',
])
res = self.nodes[0].getblockchaininfo()
@@ -177,7 +173,13 @@ class BlockchainTest(BitcoinTestFramework):
assert_equal(res['prune_target_size'], 576716800)
assert_greater_than(res['size_on_disk'], 0)
- assert_equal(res['softforks'], {
+ def check_signalling_deploymentinfo_result(self, gdi_result, height, blockhash, status_next):
+ assert height >= 144 and height <= 287
+
+ assert_equal(gdi_result, {
+ "hash": blockhash,
+ "height": height,
+ "deployments": {
'bip34': {'type': 'buried', 'active': True, 'height': 2},
'bip66': {'type': 'buried', 'active': True, 'height': 3},
'bip65': {'type': 'buried', 'active': True, 'height': 4},
@@ -186,36 +188,65 @@ class BlockchainTest(BitcoinTestFramework):
'testdummy': {
'type': 'bip9',
'bip9': {
- 'status': 'started',
'bit': 28,
'start_time': 0,
'timeout': 0x7fffffffffffffff, # testdummy does not have a timeout so is set to the max int64 value
+ 'min_activation_height': 0,
+ 'status': 'started',
+ 'status-next': status_next,
'since': 144,
'statistics': {
'period': 144,
'threshold': 108,
- 'elapsed': HEIGHT - 143,
- 'count': HEIGHT - 143,
+ 'elapsed': height - 143,
+ 'count': height - 143,
'possible': True,
},
- 'min_activation_height': 0,
+ 'signalling': '#'*(height-143),
},
'active': False
},
'taproot': {
'type': 'bip9',
'bip9': {
- 'status': 'active',
'start_time': -1,
'timeout': 9223372036854775807,
- 'since': 0,
'min_activation_height': 0,
+ 'status': 'active',
+ 'status-next': 'active',
+ 'since': 0,
},
'height': 0,
'active': True
}
+ }
})
+ def _test_getdeploymentinfo(self):
+ # Note: continues past -stopatheight height, so must be invoked
+ # after _test_stopatheight
+
+ self.log.info("Test getdeploymentinfo")
+ self.stop_node(0)
+ self.start_node(0, extra_args=[
+ '-testactivationheight=bip34@2',
+ '-testactivationheight=dersig@3',
+ '-testactivationheight=cltv@4',
+ '-testactivationheight=csv@5',
+ '-testactivationheight=segwit@6',
+ ])
+
+ gbci207 = self.nodes[0].getblockchaininfo()
+ self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(), gbci207["blocks"], gbci207["bestblockhash"], "started")
+
+ # block just prior to lock in
+ self.generate(self.wallet, 287 - gbci207["blocks"])
+ gbci287 = self.nodes[0].getblockchaininfo()
+ self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(), gbci287["blocks"], gbci287["bestblockhash"], "locked_in")
+
+ # calling with an explicit hash works
+ self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(gbci207["bestblockhash"]), gbci207["blocks"], gbci207["bestblockhash"], "started")
+
def _test_getchaintxstats(self):
self.log.info("Test getchaintxstats")
diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py
index a8e6acea45..759e43194b 100755
--- a/test/functional/rpc_fundrawtransaction.py
+++ b/test/functional/rpc_fundrawtransaction.py
@@ -4,8 +4,10 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
+
from decimal import Decimal
from itertools import product
+from math import ceil
from test_framework.descriptors import descsum_create
from test_framework.key import ECKey
@@ -1003,7 +1005,7 @@ class RawTransactionsTest(BitcoinTestFramework):
ext_utxo = self.nodes[0].listunspent(addresses=[addr])[0]
# An external input without solving data should result in an error
- raw_tx = wallet.createrawtransaction([ext_utxo], {self.nodes[0].getnewaddress(): 15})
+ raw_tx = wallet.createrawtransaction([ext_utxo], {self.nodes[0].getnewaddress(): ext_utxo["amount"] / 2})
assert_raises_rpc_error(-4, "Insufficient funds", wallet.fundrawtransaction, raw_tx)
# Error conditions
@@ -1011,6 +1013,12 @@ class RawTransactionsTest(BitcoinTestFramework):
assert_raises_rpc_error(-5, "'01234567890a0b0c0d0e0f' is not a valid public key", wallet.fundrawtransaction, raw_tx, {"solving_data": {"pubkeys":["01234567890a0b0c0d0e0f"]}})
assert_raises_rpc_error(-5, "'not a script' is not hex", wallet.fundrawtransaction, raw_tx, {"solving_data": {"scripts":["not a script"]}})
assert_raises_rpc_error(-8, "Unable to parse descriptor 'not a descriptor'", wallet.fundrawtransaction, raw_tx, {"solving_data": {"descriptors":["not a descriptor"]}})
+ assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", wallet.fundrawtransaction, raw_tx, {"input_weights": [{"txid": ext_utxo["txid"]}]})
+ assert_raises_rpc_error(-8, "Invalid parameter, vout cannot be negative", wallet.fundrawtransaction, raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": -1}]})
+ assert_raises_rpc_error(-8, "Invalid parameter, missing weight key", wallet.fundrawtransaction, raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"]}]})
+ assert_raises_rpc_error(-8, "Invalid parameter, weight cannot be less than 165", wallet.fundrawtransaction, raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 164}]})
+ assert_raises_rpc_error(-8, "Invalid parameter, weight cannot be less than 165", wallet.fundrawtransaction, raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": -1}]})
+ assert_raises_rpc_error(-8, "Invalid parameter, weight cannot be greater than", wallet.fundrawtransaction, raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 400001}]})
# But funding should work when the solving data is provided
funded_tx = wallet.fundrawtransaction(raw_tx, {"solving_data": {"pubkeys": [addr_info['pubkey']], "scripts": [addr_info["embedded"]["scriptPubKey"]]}})
@@ -1020,10 +1028,45 @@ class RawTransactionsTest(BitcoinTestFramework):
assert signed_tx['complete']
funded_tx = wallet.fundrawtransaction(raw_tx, {"solving_data": {"descriptors": [desc]}})
- signed_tx = wallet.signrawtransactionwithwallet(funded_tx['hex'])
- assert not signed_tx['complete']
- signed_tx = self.nodes[0].signrawtransactionwithwallet(signed_tx['hex'])
- assert signed_tx['complete']
+ signed_tx1 = wallet.signrawtransactionwithwallet(funded_tx['hex'])
+ assert not signed_tx1['complete']
+ signed_tx2 = self.nodes[0].signrawtransactionwithwallet(signed_tx1['hex'])
+ assert signed_tx2['complete']
+
+ unsigned_weight = self.nodes[0].decoderawtransaction(signed_tx1["hex"])["weight"]
+ signed_weight = self.nodes[0].decoderawtransaction(signed_tx2["hex"])["weight"]
+ # Input's weight is difference between weight of signed and unsigned,
+ # and the weight of stuff that didn't change (prevout, sequence, 1 byte of scriptSig)
+ input_weight = signed_weight - unsigned_weight + (41 * 4)
+ low_input_weight = input_weight // 2
+ high_input_weight = input_weight * 2
+
+ # Funding should also work if the input weight is provided
+ funded_tx = wallet.fundrawtransaction(raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}]})
+ signed_tx = wallet.signrawtransactionwithwallet(funded_tx["hex"])
+ signed_tx = self.nodes[0].signrawtransactionwithwallet(signed_tx["hex"])
+ assert_equal(self.nodes[0].testmempoolaccept([signed_tx["hex"]])[0]["allowed"], True)
+ assert_equal(signed_tx["complete"], True)
+ # Reducing the weight should have a lower fee
+ funded_tx2 = wallet.fundrawtransaction(raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": low_input_weight}]})
+ assert_greater_than(funded_tx["fee"], funded_tx2["fee"])
+ # Increasing the weight should have a higher fee
+ funded_tx2 = wallet.fundrawtransaction(raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}]})
+ assert_greater_than(funded_tx2["fee"], funded_tx["fee"])
+ # The provided weight should override the calculated weight when solving data is provided
+ funded_tx3 = wallet.fundrawtransaction(raw_tx, {"solving_data": {"descriptors": [desc]}, "input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}]})
+ assert_equal(funded_tx2["fee"], funded_tx3["fee"])
+ # The feerate should be met
+ funded_tx4 = wallet.fundrawtransaction(raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}], "fee_rate": 10})
+ input_add_weight = high_input_weight - (41 * 4)
+ tx4_weight = wallet.decoderawtransaction(funded_tx4["hex"])["weight"] + input_add_weight
+ tx4_vsize = int(ceil(tx4_weight / 4))
+ assert_fee_amount(funded_tx4["fee"], tx4_vsize, Decimal(0.0001))
+
+ # Funding with weight at csuint boundaries should not cause problems
+ funded_tx = wallet.fundrawtransaction(raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 255}]})
+ funded_tx = wallet.fundrawtransaction(raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 65539}]})
+
self.nodes[2].unloadwallet("extfund")
def test_include_unsafe(self):
diff --git a/test/functional/rpc_getblockfrompeer.py b/test/functional/rpc_getblockfrompeer.py
index effcebe854..b65322d920 100755
--- a/test/functional/rpc_getblockfrompeer.py
+++ b/test/functional/rpc_getblockfrompeer.py
@@ -40,12 +40,8 @@ class GetBlockFromPeerTest(BitcoinTestFramework):
self.sync_blocks()
self.log.info("Node 0 should only have the header for node 1's block 3")
- for x in self.nodes[0].getchaintips():
- if x['hash'] == short_tip:
- assert_equal(x['status'], "headers-only")
- break
- else:
- raise AssertionError("short tip not synced")
+ x = next(filter(lambda x: x['hash'] == short_tip, self.nodes[0].getchaintips()))
+ assert_equal(x['status'], "headers-only")
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, short_tip)
self.log.info("Fetch block from node 1")
@@ -60,17 +56,15 @@ class GetBlockFromPeerTest(BitcoinTestFramework):
assert_raises_rpc_error(-1, "Block header missing", self.nodes[0].getblockfrompeer, "00" * 32, 0)
self.log.info("Non-existent peer generates error")
- assert_raises_rpc_error(-1, f"Peer nodeid {peer_0_peer_1_id + 1} does not exist", self.nodes[0].getblockfrompeer, short_tip, peer_0_peer_1_id + 1)
+ assert_raises_rpc_error(-1, "Peer does not exist", self.nodes[0].getblockfrompeer, short_tip, peer_0_peer_1_id + 1)
self.log.info("Successful fetch")
result = self.nodes[0].getblockfrompeer(short_tip, peer_0_peer_1_id)
self.wait_until(lambda: self.check_for_block(short_tip), timeout=1)
- assert(not "warnings" in result)
+ assert_equal(result, {})
self.log.info("Don't fetch blocks we already have")
- result = self.nodes[0].getblockfrompeer(short_tip, peer_0_peer_1_id)
- assert("warnings" in result)
- assert_equal(result["warnings"], "Block already downloaded")
+ assert_raises_rpc_error(-1, "Block already downloaded", self.nodes[0].getblockfrompeer, short_tip, peer_0_peer_1_id)
if __name__ == '__main__':
GetBlockFromPeerTest().main()
diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py
index 153a201e95..b037807b53 100755
--- a/test/functional/rpc_psbt.py
+++ b/test/functional/rpc_psbt.py
@@ -606,11 +606,15 @@ class PSBTTest(BitcoinTestFramework):
assert_raises_rpc_error(-25, 'Inputs missing or spent', self.nodes[0].walletprocesspsbt, 'cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
- # Test that we can fund psbts with external inputs specified
+ self.log.info("Test that we can fund psbts with external inputs specified")
+
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
+ self.nodes[1].createwallet("extfund")
+ wallet = self.nodes[1].get_wallet_rpc("extfund")
+
# Make a weird but signable script. sh(pkh()) descriptor accomplishes this
desc = descsum_create("sh(pkh({}))".format(privkey))
if self.options.descriptors:
@@ -622,26 +626,97 @@ class PSBTTest(BitcoinTestFramework):
addr_info = self.nodes[0].getaddressinfo(addr)
self.nodes[0].sendtoaddress(addr, 10)
+ self.nodes[0].sendtoaddress(wallet.getnewaddress(), 10)
self.generate(self.nodes[0], 6)
ext_utxo = self.nodes[0].listunspent(addresses=[addr])[0]
# An external input without solving data should result in an error
- assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[1].walletcreatefundedpsbt, [ext_utxo], {self.nodes[0].getnewaddress(): 10 + ext_utxo['amount']}, 0, {'add_inputs': True})
+ assert_raises_rpc_error(-4, "Insufficient funds", wallet.walletcreatefundedpsbt, [ext_utxo], {self.nodes[0].getnewaddress(): 15})
# But funding should work when the solving data is provided
- psbt = self.nodes[1].walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {'add_inputs': True, "solving_data": {"pubkeys": [addr_info['pubkey']], "scripts": [addr_info["embedded"]["scriptPubKey"]]}})
- signed = self.nodes[1].walletprocesspsbt(psbt['psbt'])
+ psbt = wallet.walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {"add_inputs": True, "solving_data": {"pubkeys": [addr_info['pubkey']], "scripts": [addr_info["embedded"]["scriptPubKey"]]}})
+ signed = wallet.walletprocesspsbt(psbt['psbt'])
assert not signed['complete']
signed = self.nodes[0].walletprocesspsbt(signed['psbt'])
assert signed['complete']
self.nodes[0].finalizepsbt(signed['psbt'])
- psbt = self.nodes[1].walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {'add_inputs': True, "solving_data":{"descriptors": [desc]}})
- signed = self.nodes[1].walletprocesspsbt(psbt['psbt'])
+ psbt = wallet.walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {"add_inputs": True, "solving_data":{"descriptors": [desc]}})
+ signed = wallet.walletprocesspsbt(psbt['psbt'])
assert not signed['complete']
signed = self.nodes[0].walletprocesspsbt(signed['psbt'])
assert signed['complete']
- self.nodes[0].finalizepsbt(signed['psbt'])
+ final = self.nodes[0].finalizepsbt(signed['psbt'], False)
+
+ dec = self.nodes[0].decodepsbt(signed["psbt"])
+ for i, txin in enumerate(dec["tx"]["vin"]):
+ if txin["txid"] == ext_utxo["txid"] and txin["vout"] == ext_utxo["vout"]:
+ input_idx = i
+ break
+ psbt_in = dec["inputs"][input_idx]
+ # Calculate the input weight
+ # (prevout + sequence + length of scriptSig + 2 bytes buffer) * 4 + len of scriptwitness
+ len_scriptsig = len(psbt_in["final_scriptSig"]["hex"]) // 2 if "final_scriptSig" in psbt_in else 0
+ len_scriptwitness = len(psbt_in["final_scriptwitness"]["hex"]) // 2 if "final_scriptwitness" in psbt_in else 0
+ input_weight = ((41 + len_scriptsig + 2) * 4) + len_scriptwitness
+ low_input_weight = input_weight // 2
+ high_input_weight = input_weight * 2
+
+ # Input weight error conditions
+ assert_raises_rpc_error(
+ -8,
+ "Input weights should be specified in inputs rather than in options.",
+ wallet.walletcreatefundedpsbt,
+ inputs=[ext_utxo],
+ outputs={self.nodes[0].getnewaddress(): 15},
+ options={"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 1000}]}
+ )
+
+ # Funding should also work if the input weight is provided
+ psbt = wallet.walletcreatefundedpsbt(
+ inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}],
+ outputs={self.nodes[0].getnewaddress(): 15},
+ options={"add_inputs": True}
+ )
+ signed = wallet.walletprocesspsbt(psbt["psbt"])
+ signed = self.nodes[0].walletprocesspsbt(signed["psbt"])
+ final = self.nodes[0].finalizepsbt(signed["psbt"])
+ assert self.nodes[0].testmempoolaccept([final["hex"]])[0]["allowed"]
+ # Reducing the weight should have a lower fee
+ psbt2 = wallet.walletcreatefundedpsbt(
+ inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": low_input_weight}],
+ outputs={self.nodes[0].getnewaddress(): 15},
+ options={"add_inputs": True}
+ )
+ assert_greater_than(psbt["fee"], psbt2["fee"])
+ # Increasing the weight should have a higher fee
+ psbt2 = wallet.walletcreatefundedpsbt(
+ inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}],
+ outputs={self.nodes[0].getnewaddress(): 15},
+ options={"add_inputs": True}
+ )
+ assert_greater_than(psbt2["fee"], psbt["fee"])
+ # The provided weight should override the calculated weight when solving data is provided
+ psbt3 = wallet.walletcreatefundedpsbt(
+ inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}],
+ outputs={self.nodes[0].getnewaddress(): 15},
+ options={'add_inputs': True, "solving_data":{"descriptors": [desc]}}
+ )
+ assert_equal(psbt2["fee"], psbt3["fee"])
+
+ # Import the external utxo descriptor so that we can sign for it from the test wallet
+ if self.options.descriptors:
+ res = wallet.importdescriptors([{"desc": desc, "timestamp": "now"}])
+ else:
+ res = wallet.importmulti([{"desc": desc, "timestamp": "now"}])
+ assert res[0]["success"]
+ # The provided weight should override the calculated weight for a wallet input
+ psbt3 = wallet.walletcreatefundedpsbt(
+ inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}],
+ outputs={self.nodes[0].getnewaddress(): 15},
+ options={"add_inputs": True}
+ )
+ assert_equal(psbt2["fee"], psbt3["fee"])
if __name__ == '__main__':
PSBTTest().main()
diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py
index 96691b2686..a839af0288 100755
--- a/test/functional/rpc_rawtransaction.py
+++ b/test/functional/rpc_rawtransaction.py
@@ -99,25 +99,36 @@ class RawTransactionsTest(BitcoinTestFramework):
rawTx = self.nodes[1].createrawtransaction([{'txid': txid, 'vout': vout}], {self.nodes[1].getnewaddress(): 9.999})
rawTxSigned = self.nodes[1].signrawtransactionwithwallet(rawTx)
txId = self.nodes[1].sendrawtransaction(rawTxSigned['hex'])
- self.generate(self.nodes[0], 1)
+ self.generateblock(self.nodes[0], output=self.nodes[0].getnewaddress(), transactions=[rawTxSigned['hex']])
+ err_msg = (
+ "No such mempool transaction. Use -txindex or provide a block hash to enable"
+ " blockchain transaction queries. Use gettransaction for wallet transactions."
+ )
for n in [0, 3]:
self.log.info(f"Test getrawtransaction {'with' if n == 0 else 'without'} -txindex")
- # 1. valid parameters - only supply txid
- assert_equal(self.nodes[n].getrawtransaction(txId), rawTxSigned['hex'])
- # 2. valid parameters - supply txid and 0 for non-verbose
- assert_equal(self.nodes[n].getrawtransaction(txId, 0), rawTxSigned['hex'])
+ if n == 0:
+ # With -txindex.
+ # 1. valid parameters - only supply txid
+ assert_equal(self.nodes[n].getrawtransaction(txId), rawTxSigned['hex'])
- # 3. valid parameters - supply txid and False for non-verbose
- assert_equal(self.nodes[n].getrawtransaction(txId, False), rawTxSigned['hex'])
+ # 2. valid parameters - supply txid and 0 for non-verbose
+ assert_equal(self.nodes[n].getrawtransaction(txId, 0), rawTxSigned['hex'])
- # 4. valid parameters - supply txid and 1 for verbose.
- # We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
- assert_equal(self.nodes[n].getrawtransaction(txId, 1)["hex"], rawTxSigned['hex'])
+ # 3. valid parameters - supply txid and False for non-verbose
+ assert_equal(self.nodes[n].getrawtransaction(txId, False), rawTxSigned['hex'])
- # 5. valid parameters - supply txid and True for non-verbose
- assert_equal(self.nodes[n].getrawtransaction(txId, True)["hex"], rawTxSigned['hex'])
+ # 4. valid parameters - supply txid and 1 for verbose.
+ # We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
+ assert_equal(self.nodes[n].getrawtransaction(txId, 1)["hex"], rawTxSigned['hex'])
+
+ # 5. valid parameters - supply txid and True for non-verbose
+ assert_equal(self.nodes[n].getrawtransaction(txId, True)["hex"], rawTxSigned['hex'])
+ else:
+ # Without -txindex, expect to raise.
+ for verbose in [None, 0, False, 1, True]:
+ assert_raises_rpc_error(-5, err_msg, self.nodes[n].getrawtransaction, txId, verbose)
# 6. invalid parameters - supply txid and invalid boolean values (strings) for verbose
for value in ["True", "False"]:
@@ -145,10 +156,6 @@ class RawTransactionsTest(BitcoinTestFramework):
assert 'in_active_chain' not in gottx
else:
self.log.info("Test getrawtransaction without -txindex, without blockhash: expect the call to raise")
- err_msg = (
- "No such mempool transaction. Use -txindex or provide a block hash to enable"
- " blockchain transaction queries. Use gettransaction for wallet transactions."
- )
assert_raises_rpc_error(-5, err_msg, self.nodes[n].getrawtransaction, txid=tx, verbose=True)
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[n].getrawtransaction, txid=tx, blockhash=block2)
diff --git a/test/functional/rpc_signrawtransaction.py b/test/functional/rpc_signrawtransaction.py
index e648040278..a2091b4ece 100755
--- a/test/functional/rpc_signrawtransaction.py
+++ b/test/functional/rpc_signrawtransaction.py
@@ -270,7 +270,7 @@ class SignRawTransactionsTest(BitcoinTestFramework):
getcontext().prec = 8
# Make sure CSV is active
- assert self.nodes[0].getblockchaininfo()['softforks']['csv']['active']
+ assert self.nodes[0].getdeploymentinfo()['deployments']['csv']['active']
# Create a P2WSH script with CSV
script = CScript([1, OP_CHECKSEQUENCEVERIFY, OP_DROP])
@@ -305,7 +305,7 @@ class SignRawTransactionsTest(BitcoinTestFramework):
getcontext().prec = 8
# Make sure CLTV is active
- assert self.nodes[0].getblockchaininfo()['softforks']['bip65']['active']
+ assert self.nodes[0].getdeploymentinfo()['deployments']['bip65']['active']
# Create a P2WSH script with CLTV
script = CScript([100, OP_CHECKLOCKTIMEVERIFY, OP_DROP])
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 195af14914..dabde13bf1 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -438,7 +438,7 @@ def delete_cookie_file(datadir, chain):
def softfork_active(node, key):
"""Return whether a softfork is active."""
- return node.getblockchaininfo()['softforks'][key]['active']
+ return node.getdeploymentinfo()['deployments'][key]['active']
def set_node_times(nodes, t):
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index eb2d030f4a..e833128063 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -198,6 +198,7 @@ BASE_SCRIPTS = [
'wallet_keypool.py --legacy-wallet',
'wallet_keypool.py --descriptors',
'wallet_descriptor.py --descriptors',
+ 'feature_maxtipage.py',
'p2p_nobloomfilter_messages.py',
'p2p_filter.py',
'rpc_setban.py',
@@ -308,6 +309,7 @@ BASE_SCRIPTS = [
'feature_coinstatsindex.py --legacy-wallet',
'feature_coinstatsindex.py --descriptors',
'wallet_orphanedreward.py',
+ 'wallet_timelock.py',
'p2p_node_network_limited.py',
'p2p_permissions.py',
'feature_blocksdir.py',
diff --git a/test/functional/wallet_send.py b/test/functional/wallet_send.py
index d77d554baa..843a9f52b7 100755
--- a/test/functional/wallet_send.py
+++ b/test/functional/wallet_send.py
@@ -518,5 +518,45 @@ class WalletSendTest(BitcoinTestFramework):
assert signed["complete"]
self.nodes[0].finalizepsbt(signed["psbt"])
+ dec = self.nodes[0].decodepsbt(signed["psbt"])
+ for i, txin in enumerate(dec["tx"]["vin"]):
+ if txin["txid"] == ext_utxo["txid"] and txin["vout"] == ext_utxo["vout"]:
+ input_idx = i
+ break
+ psbt_in = dec["inputs"][input_idx]
+ # Calculate the input weight
+ # (prevout + sequence + length of scriptSig + 2 bytes buffer) * 4 + len of scriptwitness
+ len_scriptsig = len(psbt_in["final_scriptSig"]["hex"]) // 2 if "final_scriptSig" in psbt_in else 0
+ len_scriptwitness = len(psbt_in["final_scriptwitness"]["hex"]) // 2 if "final_scriptwitness" in psbt_in else 0
+ input_weight = ((41 + len_scriptsig + 2) * 4) + len_scriptwitness
+
+ # Input weight error conditions
+ assert_raises_rpc_error(
+ -8,
+ "Input weights should be specified in inputs rather than in options.",
+ ext_wallet.send,
+ outputs={self.nodes[0].getnewaddress(): 15},
+ options={"inputs": [ext_utxo], "input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 1000}]}
+ )
+
+ # Funding should also work when input weights are provided
+ res = self.test_send(
+ from_wallet=ext_wallet,
+ to_wallet=self.nodes[0],
+ amount=15,
+ inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}],
+ add_inputs=True,
+ psbt=True,
+ include_watching=True,
+ fee_rate=10
+ )
+ signed = ext_wallet.walletprocesspsbt(res["psbt"])
+ signed = ext_fund.walletprocesspsbt(res["psbt"])
+ assert signed["complete"]
+ tx = self.nodes[0].finalizepsbt(signed["psbt"])
+ testres = self.nodes[0].testmempoolaccept([tx["hex"]])[0]
+ assert_equal(testres["allowed"], True)
+ assert_fee_amount(testres["fees"]["base"], testres["vsize"], Decimal(0.0001))
+
if __name__ == '__main__':
WalletSendTest().main()
diff --git a/test/functional/wallet_timelock.py b/test/functional/wallet_timelock.py
new file mode 100755
index 0000000000..cf233a00ef
--- /dev/null
+++ b/test/functional/wallet_timelock.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python3
+# Copyright (c) 2022 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import assert_equal
+
+
+class WalletLocktimeTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+
+ def run_test(self):
+ node = self.nodes[0]
+
+ mtp_tip = node.getblockheader(node.getbestblockhash())["mediantime"]
+
+ self.log.info("Get new address with label")
+ label = "timelock⌛🔓"
+ address = node.getnewaddress(label=label)
+
+ self.log.info("Send to new address with locktime")
+ node.send(
+ outputs={address: 5},
+ options={"locktime": mtp_tip - 1},
+ )
+ self.generate(node, 1)
+
+ self.log.info("Check that clock can not change finality of confirmed txs")
+ amount_before_ad = node.getreceivedbyaddress(address)
+ amount_before_lb = node.getreceivedbylabel(label)
+ list_before_ad = node.listreceivedbyaddress(address_filter=address)
+ list_before_lb = node.listreceivedbylabel(include_empty=False)
+ balance_before = node.getbalances()["mine"]["trusted"]
+ coin_before = node.listunspent(maxconf=1)
+ node.setmocktime(mtp_tip - 1)
+ assert_equal(node.getreceivedbyaddress(address), amount_before_ad)
+ assert_equal(node.getreceivedbylabel(label), amount_before_lb)
+ assert_equal(node.listreceivedbyaddress(address_filter=address), list_before_ad)
+ assert_equal(node.listreceivedbylabel(include_empty=False), list_before_lb)
+ assert_equal(node.getbalances()["mine"]["trusted"], balance_before)
+ assert_equal(node.listunspent(maxconf=1), coin_before)
+
+
+if __name__ == "__main__":
+ WalletLocktimeTest().main()
diff --git a/test/lint/commit-script-check.sh b/test/lint/commit-script-check.sh
index 6a8a15d05c..9449b393f1 100755
--- a/test/lint/commit-script-check.sh
+++ b/test/lint/commit-script-check.sh
@@ -17,6 +17,11 @@ if test -z "$1"; then
exit 1
fi
+if ! sed --help 2>&1 | grep -q 'GNU'; then
+ echo "Error: the installed sed package is not compatible. Please make sure you have GNU sed installed in your system.";
+ exit 1;
+fi
+
RET=0
PREV_BRANCH=$(git name-rev --name-only HEAD)
PREV_HEAD=$(git rev-parse HEAD)
diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan
index 393278bd6a..980e1f5296 100644
--- a/test/sanitizer_suppressions/ubsan
+++ b/test/sanitizer_suppressions/ubsan
@@ -45,7 +45,6 @@ shift-base:test/fuzz/crypto_diff_fuzz_chacha20.cpp
# job.
unsigned-integer-overflow:addrman.cpp
unsigned-integer-overflow:arith_uint256.h
-unsigned-integer-overflow:bitcoin-tx.cpp
unsigned-integer-overflow:common/bloom.cpp
unsigned-integer-overflow:chain.cpp
unsigned-integer-overflow:chain.h
@@ -85,10 +84,6 @@ implicit-signed-integer-truncation:addrman.cpp
implicit-signed-integer-truncation:addrman.h
implicit-signed-integer-truncation:chain.h
implicit-signed-integer-truncation:crypto/
-implicit-signed-integer-truncation:node/miner.cpp
-implicit-signed-integer-truncation:net.cpp
-implicit-signed-integer-truncation:streams.h
-implicit-signed-integer-truncation:torcontrol.cpp
implicit-unsigned-integer-truncation:crypto/
shift-base:arith_uint256.cpp
shift-base:crypto/
diff --git a/test/util/data/tt-delin1-out.json b/test/util/data/tt-delin1-out.json
index c5b9f6df01..6e053fe2b9 100644
--- a/test/util/data/tt-delin1-out.json
+++ b/test/util/data/tt-delin1-out.json
@@ -194,6 +194,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 8fd139bb39ced713f231c58a4d07bf6954d1c201 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o)#xvg87vgr",
"hex": "76a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac",
"address": "1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o",
"type": "pubkeyhash"
@@ -204,6 +205,7 @@
"n": 1,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 6c772e9cf96371bba3da8cb733da70a2fcf20078 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(1AtWkdmfmYkErU16d3KYykJUbEp9MAj9Sb)#tsyprkms",
"hex": "76a9146c772e9cf96371bba3da8cb733da70a2fcf2007888ac",
"address": "1AtWkdmfmYkErU16d3KYykJUbEp9MAj9Sb",
"type": "pubkeyhash"
diff --git a/test/util/data/tt-delout1-out.json b/test/util/data/tt-delout1-out.json
index 3863416430..e61b9c79db 100644
--- a/test/util/data/tt-delout1-out.json
+++ b/test/util/data/tt-delout1-out.json
@@ -203,6 +203,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 8fd139bb39ced713f231c58a4d07bf6954d1c201 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o)#xvg87vgr",
"hex": "76a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac",
"address": "1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o",
"type": "pubkeyhash"
diff --git a/test/util/data/tt-locktime317000-out.json b/test/util/data/tt-locktime317000-out.json
index 62e785f7d0..873628e124 100644
--- a/test/util/data/tt-locktime317000-out.json
+++ b/test/util/data/tt-locktime317000-out.json
@@ -203,6 +203,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 8fd139bb39ced713f231c58a4d07bf6954d1c201 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o)#xvg87vgr",
"hex": "76a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac",
"address": "1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o",
"type": "pubkeyhash"
@@ -213,6 +214,7 @@
"n": 1,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 6c772e9cf96371bba3da8cb733da70a2fcf20078 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(1AtWkdmfmYkErU16d3KYykJUbEp9MAj9Sb)#tsyprkms",
"hex": "76a9146c772e9cf96371bba3da8cb733da70a2fcf2007888ac",
"address": "1AtWkdmfmYkErU16d3KYykJUbEp9MAj9Sb",
"type": "pubkeyhash"
diff --git a/test/util/data/txcreate1.json b/test/util/data/txcreate1.json
index 96d77ef273..c4a76f22a6 100644
--- a/test/util/data/txcreate1.json
+++ b/test/util/data/txcreate1.json
@@ -41,6 +41,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(13tuJJDR2RgArmgfv6JScSdreahzgc4T6o)#ztmwxg4c",
"hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
"address": "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
"type": "pubkeyhash"
@@ -51,6 +52,7 @@
"n": 1,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 f2d4db28cad6502226ee484ae24505c2885cb12d OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(1P8yWvZW8jVihP1bzHeqfE4aoXNX8AVa46)#vdmdu766",
"hex": "76a914f2d4db28cad6502226ee484ae24505c2885cb12d88ac",
"address": "1P8yWvZW8jVihP1bzHeqfE4aoXNX8AVa46",
"type": "pubkeyhash"
diff --git a/test/util/data/txcreate2.json b/test/util/data/txcreate2.json
index ee9b9c3c17..95953cc90e 100644
--- a/test/util/data/txcreate2.json
+++ b/test/util/data/txcreate2.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "",
+ "desc": "raw()#58lrscpx",
"hex": "",
"type": "nonstandard"
}
diff --git a/test/util/data/txcreatedata1.json b/test/util/data/txcreatedata1.json
index 87fc7e9cf7..1454ffdab7 100644
--- a/test/util/data/txcreatedata1.json
+++ b/test/util/data/txcreatedata1.json
@@ -23,6 +23,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(13tuJJDR2RgArmgfv6JScSdreahzgc4T6o)#ztmwxg4c",
"hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
"address": "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
"type": "pubkeyhash"
@@ -33,6 +34,7 @@
"n": 1,
"scriptPubKey": {
"asm": "OP_RETURN 54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e",
+ "desc": "raw(6a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e)#zf2avljj",
"hex": "6a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e",
"type": "nulldata"
}
diff --git a/test/util/data/txcreatedata2.json b/test/util/data/txcreatedata2.json
index d03b1c8244..ca20d2aa45 100644
--- a/test/util/data/txcreatedata2.json
+++ b/test/util/data/txcreatedata2.json
@@ -23,6 +23,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(13tuJJDR2RgArmgfv6JScSdreahzgc4T6o)#ztmwxg4c",
"hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
"address": "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
"type": "pubkeyhash"
@@ -33,6 +34,7 @@
"n": 1,
"scriptPubKey": {
"asm": "OP_RETURN 54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e",
+ "desc": "raw(6a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e)#zf2avljj",
"hex": "6a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e",
"type": "nulldata"
}
diff --git a/test/util/data/txcreatedata_seq0.json b/test/util/data/txcreatedata_seq0.json
index 8a123f1ba8..9838383c06 100644
--- a/test/util/data/txcreatedata_seq0.json
+++ b/test/util/data/txcreatedata_seq0.json
@@ -23,6 +23,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(13tuJJDR2RgArmgfv6JScSdreahzgc4T6o)#ztmwxg4c",
"hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
"address": "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
"type": "pubkeyhash"
diff --git a/test/util/data/txcreatedata_seq1.json b/test/util/data/txcreatedata_seq1.json
index 006fd7259f..c729f8dcfb 100644
--- a/test/util/data/txcreatedata_seq1.json
+++ b/test/util/data/txcreatedata_seq1.json
@@ -32,6 +32,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(13tuJJDR2RgArmgfv6JScSdreahzgc4T6o)#ztmwxg4c",
"hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
"address": "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
"type": "pubkeyhash"
diff --git a/test/util/data/txcreatemultisig1.json b/test/util/data/txcreatemultisig1.json
index baa290c2b1..9632b20ece 100644
--- a/test/util/data/txcreatemultisig1.json
+++ b/test/util/data/txcreatemultisig1.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "2 02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397 021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d 02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485 3 OP_CHECKMULTISIG",
+ "desc": "multi(2,02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397,021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d,02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485)#8s88p9pl",
"hex": "522102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff39721021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d2102df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb48553ae",
"type": "multisig"
}
diff --git a/test/util/data/txcreatemultisig2.json b/test/util/data/txcreatemultisig2.json
index 6685512587..021cf539a8 100644
--- a/test/util/data/txcreatemultisig2.json
+++ b/test/util/data/txcreatemultisig2.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_HASH160 1c6fbaf46d64221e80cbae182c33ddf81b9294ac OP_EQUAL",
+ "desc": "addr(34HNh57oBCRKkxNyjTuWAJkTbuGh6jg2Ms)#ngnz8933",
"hex": "a9141c6fbaf46d64221e80cbae182c33ddf81b9294ac87",
"address": "34HNh57oBCRKkxNyjTuWAJkTbuGh6jg2Ms",
"type": "scripthash"
diff --git a/test/util/data/txcreatemultisig3.json b/test/util/data/txcreatemultisig3.json
index be96f4c704..3c20a88a91 100644
--- a/test/util/data/txcreatemultisig3.json
+++ b/test/util/data/txcreatemultisig3.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "0 e15a86a23178f433d514dbbce042e87d72662b8b5edcacfd2e37ab7a2d135f05",
+ "desc": "addr(bc1qu9dgdg330r6r84g5mw7wqshg04exv2uttmw2elfwx74h5tgntuzs44gyfg)#yvt39j9m",
"hex": "0020e15a86a23178f433d514dbbce042e87d72662b8b5edcacfd2e37ab7a2d135f05",
"address": "bc1qu9dgdg330r6r84g5mw7wqshg04exv2uttmw2elfwx74h5tgntuzs44gyfg",
"type": "witness_v0_scripthash"
diff --git a/test/util/data/txcreatemultisig4.json b/test/util/data/txcreatemultisig4.json
index 08831ecdca..7ae18fd90a 100644
--- a/test/util/data/txcreatemultisig4.json
+++ b/test/util/data/txcreatemultisig4.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_HASH160 6edf12858999f0dae74f9c692e6694ee3621b2ac OP_EQUAL",
+ "desc": "addr(3BoFUz1StqcNcgUTZE5cC1eFhuYFzj3fGH)#466tx6fn",
"hex": "a9146edf12858999f0dae74f9c692e6694ee3621b2ac87",
"address": "3BoFUz1StqcNcgUTZE5cC1eFhuYFzj3fGH",
"type": "scripthash"
diff --git a/test/util/data/txcreatemultisig5.json b/test/util/data/txcreatemultisig5.json
index 93048cf261..98a5c2d8d1 100644
--- a/test/util/data/txcreatemultisig5.json
+++ b/test/util/data/txcreatemultisig5.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_HASH160 a4051c02398868af83f28f083208fae99a769263 OP_EQUAL",
+ "desc": "addr(3GeGs1eHUxPz5YyuFe9WPpXid2UsUb5Jos)#juhnnegr",
"hex": "a914a4051c02398868af83f28f083208fae99a76926387",
"address": "3GeGs1eHUxPz5YyuFe9WPpXid2UsUb5Jos",
"type": "scripthash"
diff --git a/test/util/data/txcreateoutpubkey1.json b/test/util/data/txcreateoutpubkey1.json
index 42b519bb21..3baf479991 100644
--- a/test/util/data/txcreateoutpubkey1.json
+++ b/test/util/data/txcreateoutpubkey1.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397 OP_CHECKSIG",
+ "desc": "pk(02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397)#rk5v7uqw",
"hex": "2102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397ac",
"type": "pubkey"
}
diff --git a/test/util/data/txcreateoutpubkey2.json b/test/util/data/txcreateoutpubkey2.json
index 52168a889b..78acf1658b 100644
--- a/test/util/data/txcreateoutpubkey2.json
+++ b/test/util/data/txcreateoutpubkey2.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "0 a2516e770582864a6a56ed21a102044e388c62e3",
+ "desc": "addr(bc1q5fgkuac9s2ry56jka5s6zqsyfcugcchry5cwu0)#gm7zhxq2",
"hex": "0014a2516e770582864a6a56ed21a102044e388c62e3",
"address": "bc1q5fgkuac9s2ry56jka5s6zqsyfcugcchry5cwu0",
"type": "witness_v0_keyhash"
diff --git a/test/util/data/txcreateoutpubkey3.json b/test/util/data/txcreateoutpubkey3.json
index fce210f8a3..632ed52ccf 100644
--- a/test/util/data/txcreateoutpubkey3.json
+++ b/test/util/data/txcreateoutpubkey3.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_HASH160 a5ab14c9804d0d8bf02f1aea4e82780733ad0a83 OP_EQUAL",
+ "desc": "addr(3GnzN8FqgvYGYdhj8NW6UNxxVv3Uj1ApQn)#zsln680u",
"hex": "a914a5ab14c9804d0d8bf02f1aea4e82780733ad0a8387",
"address": "3GnzN8FqgvYGYdhj8NW6UNxxVv3Uj1ApQn",
"type": "scripthash"
diff --git a/test/util/data/txcreatescript1.json b/test/util/data/txcreatescript1.json
index af1c4c35e2..cdee9dbbfa 100644
--- a/test/util/data/txcreatescript1.json
+++ b/test/util/data/txcreatescript1.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DROP",
+ "desc": "raw(75)#ppey0zqj",
"hex": "75",
"type": "nonstandard"
}
diff --git a/test/util/data/txcreatescript2.json b/test/util/data/txcreatescript2.json
index 2cde70fdf7..1fbae62f4b 100644
--- a/test/util/data/txcreatescript2.json
+++ b/test/util/data/txcreatescript2.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_HASH160 71ed53322d470bb96657deb786b94f97dd46fb15 OP_EQUAL",
+ "desc": "addr(3C5QarEGh9feKbDJ3QbMf2YNjnMoiPDhNp)#5mx9waq3",
"hex": "a91471ed53322d470bb96657deb786b94f97dd46fb1587",
"address": "3C5QarEGh9feKbDJ3QbMf2YNjnMoiPDhNp",
"type": "scripthash"
diff --git a/test/util/data/txcreatescript3.json b/test/util/data/txcreatescript3.json
index 7a282faf4f..502fe91692 100644
--- a/test/util/data/txcreatescript3.json
+++ b/test/util/data/txcreatescript3.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "0 0bfe935e70c321c7ca3afc75ce0d0ca2f98b5422e008bb31c00c6d7f1f1c0ad6",
+ "desc": "addr(bc1qp0lfxhnscvsu0j36l36uurgv5tuck4pzuqytkvwqp3kh78cupttqyf705v)#s4fdh9tu",
"hex": "00200bfe935e70c321c7ca3afc75ce0d0ca2f98b5422e008bb31c00c6d7f1f1c0ad6",
"address": "bc1qp0lfxhnscvsu0j36l36uurgv5tuck4pzuqytkvwqp3kh78cupttqyf705v",
"type": "witness_v0_scripthash"
diff --git a/test/util/data/txcreatescript4.json b/test/util/data/txcreatescript4.json
index 298b37bb4a..1ed89dfff2 100644
--- a/test/util/data/txcreatescript4.json
+++ b/test/util/data/txcreatescript4.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_HASH160 6a2c482f4985f57e702f325816c90e3723ca81ae OP_EQUAL",
+ "desc": "addr(3BNQbeFeJJGMAyDxPwWPuqxPMrjsFLjk3f)#fdleltnv",
"hex": "a9146a2c482f4985f57e702f325816c90e3723ca81ae87",
"address": "3BNQbeFeJJGMAyDxPwWPuqxPMrjsFLjk3f",
"type": "scripthash"
diff --git a/test/util/data/txcreatesignv1.json b/test/util/data/txcreatesignv1.json
index ca5e003110..56ef9b195e 100644
--- a/test/util/data/txcreatesignv1.json
+++ b/test/util/data/txcreatesignv1.json
@@ -23,6 +23,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 5834479edbbe0539b31ffd3a8f8ebadc2165ed01 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(193P6LtvS4nCnkDvM9uXn1gsSRqh4aDAz7)#nw04wh58",
"hex": "76a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac",
"address": "193P6LtvS4nCnkDvM9uXn1gsSRqh4aDAz7",
"type": "pubkeyhash"