diff options
254 files changed, 3627 insertions, 2126 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index b37fce7002..f5874744b5 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -27,6 +27,13 @@ env: # Global defaults # The above machine types are matched to each task by their label. Refer to the # Cirrus CI docs for more details. # +# When a contributor maintains a fork of the repo, any pull request they make +# to their own fork, or to the main repository, will trigger two CI runs: +# one for the branch push and one for the pull request. +# This can be avoided by setting SKIP_BRANCH_PUSH=true as a custom env variable +# in Cirrus repository settings, accessible from +# https://cirrus-ci.com/github/my-organization/my-repository +# # On machines that are persisted between CI jobs, RESTART_CI_DOCKER_BEFORE_RUN=1 # ensures that previous containers and artifacts are cleared before each run. # This requires installing Podman instead of Docker. @@ -59,7 +66,10 @@ env: # Global defaults # https://cirrus-ci.org/guide/tips-and-tricks/#sharing-configuration-between-tasks filter_template: &FILTER_TEMPLATE - skip: $CIRRUS_REPO_FULL_NAME == "bitcoin-core/gui" && $CIRRUS_PR == "" # No need to run on the read-only mirror, unless it is a PR. https://cirrus-ci.org/guide/writing-tasks/#conditional-task-execution + # Allow forks to specify SKIP_BRANCH_PUSH=true and skip CI runs when a branch is pushed, + # but still run CI when a PR is created. + # https://cirrus-ci.org/guide/writing-tasks/#conditional-task-execution + skip: $SKIP_BRANCH_PUSH == "true" && $CIRRUS_PR == "" stateful: false # https://cirrus-ci.org/guide/writing-tasks/#stateful-tasks base_template: &BASE_TEMPLATE diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 54795332e8..ab9704c0af 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,7 +58,13 @@ jobs: # and the ^ prefix is used to exclude these parents and all their # ancestors from the rev-list output as described in: # https://git-scm.com/docs/git-rev-list - echo "TEST_BASE=$(git rev-list -n$((${{ env.MAX_COUNT }} + 1)) --reverse HEAD ^$(git rev-list -n1 --merges HEAD)^@ | head -1)" >> "$GITHUB_ENV" + MERGE_BASE=$(git rev-list -n1 --merges HEAD) + EXCLUDE_MERGE_BASE_ANCESTORS= + # MERGE_BASE can be empty due to limited fetch-depth + if test -n "$MERGE_BASE"; then + EXCLUDE_MERGE_BASE_ANCESTORS=^${MERGE_BASE}^@ + fi + echo "TEST_BASE=$(git rev-list -n$((${{ env.MAX_COUNT }} + 1)) --reverse HEAD $EXCLUDE_MERGE_BASE_ANCESTORS | head -1)" >> "$GITHUB_ENV" - run: | sudo apt-get update sudo apt-get install clang ccache build-essential libtool autotools-dev automake pkg-config bsdmainutils python3-zmq libevent-dev libboost-dev libsqlite3-dev libdb++-dev systemtap-sdt-dev libminiupnpc-dev libnatpmp-dev qtbase5-dev qttools5-dev qttools5-dev-tools qtwayland5 libqrencode-dev -y diff --git a/Makefile.am b/Makefile.am index 2ff6dd0a11..b746299a42 100644 --- a/Makefile.am +++ b/Makefile.am @@ -334,14 +334,14 @@ clean-local: clean-docs test-security-check: if TARGET_DARWIN - $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_MACHO - $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_MACHO + $(AM_V_at) CXX='$(CXX)' CXXFLAGS='$(CXXFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_MACHO + $(AM_V_at) CXX='$(CXX)' CXXFLAGS='$(CXXFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_MACHO endif if TARGET_WINDOWS - $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_PE - $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_PE + $(AM_V_at) CXX='$(CXX)' CXXFLAGS='$(CXXFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_PE + $(AM_V_at) CXX='$(CXX)' CXXFLAGS='$(CXXFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_PE endif if TARGET_LINUX - $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_ELF - $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_ELF + $(AM_V_at) CXX='$(CXX)' CXXFLAGS='$(CXXFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_ELF + $(AM_V_at) CXX='$(CXX)' CXXFLAGS='$(CXXFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_ELF endif diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh index 87e3a8fa9b..550c7b8c92 100755 --- a/ci/lint/04_install.sh +++ b/ci/lint/04_install.sh @@ -58,7 +58,7 @@ curl -sL "https://github.com/koalaman/shellcheck/releases/download/${SHELLCHECK_ tar --xz -xf - --directory /tmp/ mv "/tmp/shellcheck-${SHELLCHECK_VERSION}/shellcheck" /usr/bin/ -MLC_VERSION=v0.16.3 +MLC_VERSION=v0.18.0 MLC_BIN=mlc-x86_64-linux curl -sL "https://github.com/becheran/mlc/releases/download/${MLC_VERSION}/${MLC_BIN}" -o "/usr/bin/mlc" chmod +x /usr/bin/mlc diff --git a/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh b/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh index 6425120afb..49660aac0c 100755 --- a/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh +++ b/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh @@ -7,9 +7,9 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_nowallet_libbitcoinkernel -export CI_IMAGE_NAME_TAG="docker.io/ubuntu:22.04" -# Use minimum supported python3.9 (or best-effort 3.10) and clang-15, see doc/dependencies.md -export PACKAGES="python3-zmq clang-15 llvm-15 libc++abi-15-dev libc++-15-dev" -export DEP_OPTS="NO_WALLET=1 CC=clang-15 CXX='clang++-15 -stdlib=libc++'" +export CI_IMAGE_NAME_TAG="docker.io/debian:bullseye" +# Use minimum supported python3.9 and clang-16, see doc/dependencies.md +export PACKAGES="python3-zmq clang-16 llvm-16 libc++abi-16-dev libc++-16-dev" +export DEP_OPTS="NO_WALLET=1 CC=clang-16 CXX='clang++-16 -stdlib=libc++'" export GOAL="install" export BITCOIN_CONFIG="--enable-reduce-exports --enable-experimental-util-chainstate --with-experimental-kernel-lib --enable-shared" diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index f16321ba55..bb99fc30e9 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -65,6 +65,10 @@ if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then -S /msan/llvm-project/runtimes ninja -C /msan/cxx_build/ "-j$( nproc )" # Use nproc, because MAKEJOBS is the default in docker image builds + + # Clear no longer needed source folder + du -sh /msan/llvm-project + rm -rf /msan/llvm-project fi if [[ "${RUN_TIDY}" == "true" ]]; then diff --git a/configure.ac b/configure.ac index af0d1d1505..439bb508e7 100644 --- a/configure.ac +++ b/configure.ac @@ -482,11 +482,12 @@ TEMP_CXXFLAGS="$CXXFLAGS" CXXFLAGS="$SSE41_CXXFLAGS $CXXFLAGS" AC_MSG_CHECKING([for SSE4.1 intrinsics]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - #include <stdint.h> #include <immintrin.h> ]],[[ - __m128i l = _mm_set1_epi32(0); - return _mm_extract_epi32(l, 3); + __m128i a = _mm_set1_epi32(0); + __m128i b = _mm_set1_epi32(1); + __m128i r = _mm_blend_epi16(a, b, 0xFF); + return _mm_extract_epi32(r, 3); ]])], [ AC_MSG_RESULT([yes]); enable_sse41=yes; AC_DEFINE([ENABLE_SSE41], [1], [Define this symbol to build code that uses SSE4.1 intrinsics]) ], [ AC_MSG_RESULT([no])] @@ -1317,6 +1318,7 @@ if test "$use_reduce_exports" = "yes"; then AX_CHECK_COMPILE_FLAG([-fvisibility=hidden], [CORE_CXXFLAGS="$CORE_CXXFLAGS -fvisibility=hidden"], [AC_MSG_ERROR([Cannot set hidden symbol visibility. Use --disable-reduce-exports.])], [$CXXFLAG_WERROR]) AX_CHECK_LINK_FLAG([-Wl,--exclude-libs,ALL], [RELDFLAGS="-Wl,--exclude-libs,ALL"], [], [$LDFLAG_WERROR]) + AX_CHECK_LINK_FLAG([-Wl,-no_exported_symbols], [LIBTOOL_APP_LDFLAGS="$LIBTOOL_APP_LDFLAGS -Wl,-no_exported_symbols"], [], [$LDFLAG_WERROR]) fi if test "$use_tests" = "yes"; then diff --git a/contrib/devtools/bitcoin-tidy/CMakeLists.txt b/contrib/devtools/bitcoin-tidy/CMakeLists.txt index 1260c71423..95345b4782 100644 --- a/contrib/devtools/bitcoin-tidy/CMakeLists.txt +++ b/contrib/devtools/bitcoin-tidy/CMakeLists.txt @@ -25,7 +25,7 @@ find_program(CLANG_TIDY_EXE NAMES "clang-tidy-${LLVM_VERSION_MAJOR}" "clang-tidy message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") message(STATUS "Found clang-tidy: ${CLANG_TIDY_EXE}") -add_library(bitcoin-tidy MODULE bitcoin-tidy.cpp logprintf.cpp) +add_library(bitcoin-tidy MODULE bitcoin-tidy.cpp logprintf.cpp nontrivial-threadlocal.cpp) target_include_directories(bitcoin-tidy SYSTEM PRIVATE ${LLVM_INCLUDE_DIRS}) # Disable RTTI and exceptions as necessary @@ -58,7 +58,7 @@ else() endif() # Create a dummy library that runs clang-tidy tests as a side-effect of building -add_library(bitcoin-tidy-tests OBJECT EXCLUDE_FROM_ALL example_logprintf.cpp) +add_library(bitcoin-tidy-tests OBJECT EXCLUDE_FROM_ALL example_logprintf.cpp example_nontrivial-threadlocal.cpp) add_dependencies(bitcoin-tidy-tests bitcoin-tidy) set_target_properties(bitcoin-tidy-tests PROPERTIES CXX_CLANG_TIDY "${CLANG_TIDY_COMMAND}") diff --git a/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp b/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp index 0f34d37793..1ef4494973 100644 --- a/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp +++ b/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp @@ -3,6 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "logprintf.h" +#include "nontrivial-threadlocal.h" #include <clang-tidy/ClangTidyModule.h> #include <clang-tidy/ClangTidyModuleRegistry.h> @@ -13,6 +14,7 @@ public: void addCheckFactories(clang::tidy::ClangTidyCheckFactories& CheckFactories) override { CheckFactories.registerCheck<bitcoin::LogPrintfCheck>("bitcoin-unterminated-logprintf"); + CheckFactories.registerCheck<bitcoin::NonTrivialThreadLocal>("bitcoin-nontrivial-threadlocal"); } }; diff --git a/contrib/devtools/bitcoin-tidy/example_nontrivial-threadlocal.cpp b/contrib/devtools/bitcoin-tidy/example_nontrivial-threadlocal.cpp new file mode 100644 index 0000000000..2b74df5d0e --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/example_nontrivial-threadlocal.cpp @@ -0,0 +1,2 @@ +#include <string> +thread_local std::string foo; diff --git a/contrib/devtools/bitcoin-tidy/nontrivial-threadlocal.cpp b/contrib/devtools/bitcoin-tidy/nontrivial-threadlocal.cpp new file mode 100644 index 0000000000..d2bc78a31b --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/nontrivial-threadlocal.cpp @@ -0,0 +1,44 @@ +// Copyright (c) 2023 Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "nontrivial-threadlocal.h" + +#include <clang/AST/ASTContext.h> +#include <clang/ASTMatchers/ASTMatchFinder.h> + + +// Copied from clang-tidy's UnusedRaiiCheck +namespace { +AST_MATCHER(clang::CXXRecordDecl, hasNonTrivialDestructor) { + // TODO: If the dtor is there but empty we don't want to warn either. + return Node.hasDefinition() && Node.hasNonTrivialDestructor(); +} +} // namespace + +namespace bitcoin { + +void NonTrivialThreadLocal::registerMatchers(clang::ast_matchers::MatchFinder* finder) +{ + using namespace clang::ast_matchers; + + /* + thread_local std::string foo; + */ + + finder->addMatcher( + varDecl( + hasThreadStorageDuration(), + hasType(hasCanonicalType(recordType(hasDeclaration(cxxRecordDecl(hasNonTrivialDestructor()))))) + ).bind("nontrivial_threadlocal"), + this); +} + +void NonTrivialThreadLocal::check(const clang::ast_matchers::MatchFinder::MatchResult& Result) +{ + if (const clang::VarDecl* var = Result.Nodes.getNodeAs<clang::VarDecl>("nontrivial_threadlocal")) { + const auto user_diag = diag(var->getBeginLoc(), "Variable with non-trivial destructor cannot be thread_local."); + } +} + +} // namespace bitcoin diff --git a/contrib/devtools/bitcoin-tidy/nontrivial-threadlocal.h b/contrib/devtools/bitcoin-tidy/nontrivial-threadlocal.h new file mode 100644 index 0000000000..c853073467 --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/nontrivial-threadlocal.h @@ -0,0 +1,29 @@ +// Copyright (c) 2023 Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef NONTRIVIAL_THREADLOCAL_CHECK_H +#define NONTRIVIAL_THREADLOCAL_CHECK_H + +#include <clang-tidy/ClangTidyCheck.h> + +namespace bitcoin { + +// Warn about any thread_local variable with a non-trivial destructor. +class NonTrivialThreadLocal final : public clang::tidy::ClangTidyCheck +{ +public: + NonTrivialThreadLocal(clang::StringRef Name, clang::tidy::ClangTidyContext* Context) + : clang::tidy::ClangTidyCheck(Name, Context) {} + + bool isLanguageVersionSupported(const clang::LangOptions& LangOpts) const override + { + return LangOpts.CPlusPlus; + } + void registerMatchers(clang::ast_matchers::MatchFinder* Finder) override; + void check(const clang::ast_matchers::MatchFinder::MatchResult& Result) override; +}; + +} // namespace bitcoin + +#endif // NONTRIVIAL_THREADLOCAL_CHECK_H diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py index 7bfd4d98da..de372cbd39 100755 --- a/contrib/devtools/test-security-check.py +++ b/contrib/devtools/test-security-check.py @@ -15,10 +15,10 @@ from utils import determine_wellknown_cmd def write_testcode(filename): with open(filename, 'w', encoding="utf8") as f: f.write(''' - #include <stdio.h> + #include <cstdio> int main() { - printf("the quick brown fox jumps over the lazy god\\n"); + std::printf("the quick brown fox jumps over the lazy god\\n"); return 0; } ''') @@ -34,17 +34,17 @@ def env_flags() -> list[str]: # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for # reference. flags: list[str] = [] - for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']: + for var in ['CXXFLAGS', 'CPPFLAGS', 'LDFLAGS']: flags += filter(None, os.environ.get(var, '').split(' ')) return flags -def call_security_check(cc: str, source: str, executable: str, options) -> tuple: - subprocess.run([*cc,source,'-o',executable] + env_flags() + options, check=True) +def call_security_check(cxx: str, source: str, executable: str, options) -> tuple: + subprocess.run([*cxx,source,'-o',executable] + env_flags() + options, check=True) p = subprocess.run([os.path.join(os.path.dirname(__file__), 'security-check.py'), executable], stdout=subprocess.PIPE, text=True) return (p.returncode, p.stdout.rstrip()) -def get_arch(cc, source, executable): - subprocess.run([*cc, source, '-o', executable] + env_flags(), check=True) +def get_arch(cxx, source, executable): + subprocess.run([*cxx, source, '-o', executable] + env_flags(), check=True) binary = lief.parse(executable) arch = binary.abstract.header.architecture os.remove(executable) @@ -52,93 +52,93 @@ def get_arch(cc, source, executable): class TestSecurityChecks(unittest.TestCase): def test_ELF(self): - source = 'test1.c' + source = 'test1.cpp' executable = 'test1' - cc = determine_wellknown_cmd('CC', 'gcc') + cxx = determine_wellknown_cmd('CXX', 'g++') write_testcode(source) - arch = get_arch(cc, source, executable) + arch = get_arch(cxx, source, executable) if arch == lief.ARCHITECTURES.X86: - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-zexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), (1, executable+': failed PIE NX RELRO CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), (1, executable+': failed PIE RELRO CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), (1, executable+': failed PIE RELRO CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']), (1, executable+': failed RELRO CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']), (1, executable+': failed separate_code CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']), (1, executable+': failed CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code', '-fcf-protection=full']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code', '-fcf-protection=full']), (0, '')) else: - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-zexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), (1, executable+': failed PIE NX RELRO')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), (1, executable+': failed PIE RELRO')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), (1, executable+': failed PIE RELRO')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']), (1, executable+': failed RELRO')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']), (1, executable+': failed separate_code')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']), (0, '')) clean_files(source, executable) def test_PE(self): - source = 'test1.c' + source = 'test1.cpp' executable = 'test1.exe' - cc = determine_wellknown_cmd('CC', 'x86_64-w64-mingw32-gcc') + cxx = determine_wellknown_cmd('CXX', 'x86_64-w64-mingw32-g++') write_testcode(source) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--disable-nxcompat','-Wl,--disable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE','-fno-stack-protector']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,--disable-nxcompat','-Wl,--disable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE','-fno-stack-protector']), (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA NX RELOC_SECTION CONTROL_FLOW Canary')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--disable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE','-fstack-protector-all', '-lssp']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,--nxcompat','-Wl,--disable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE','-fstack-protector-all', '-lssp']), (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA RELOC_SECTION CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE','-fstack-protector-all', '-lssp']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE','-fstack-protector-all', '-lssp']), (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-pie','-fPIE','-fstack-protector-all', '-lssp']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-pie','-fPIE','-fstack-protector-all', '-lssp']), (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA CONTROL_FLOW')) # -pie -fPIE does nothing unless --dynamicbase is also supplied - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--disable-high-entropy-va','-pie','-fPIE','-fstack-protector-all', '-lssp']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--disable-high-entropy-va','-pie','-fPIE','-fstack-protector-all', '-lssp']), (1, executable+': failed HIGH_ENTROPY_VA CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE','-fstack-protector-all', '-lssp']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE','-fstack-protector-all', '-lssp']), (1, executable+': failed CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE', '-fcf-protection=full','-fstack-protector-all', '-lssp']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE', '-fcf-protection=full','-fstack-protector-all', '-lssp']), (0, '')) clean_files(source, executable) def test_MACHO(self): - source = 'test1.c' + source = 'test1.cpp' executable = 'test1' - cc = determine_wellknown_cmd('CC', 'clang') + cxx = determine_wellknown_cmd('CXX', 'clang++') write_testcode(source) - arch = get_arch(cc, source, executable) + arch = get_arch(cxx, source, executable) if arch == lief.ARCHITECTURES.X86: - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fno-stack-protector', '-Wl,-no_fixup_chains']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fno-stack-protector', '-Wl,-no_fixup_chains']), (1, executable+': failed NOUNDEFS Canary FIXUP_CHAINS PIE CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector', '-Wl,-fixup_chains']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector', '-Wl,-fixup_chains']), (1, executable+': failed NOUNDEFS Canary CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fstack-protector-all', '-Wl,-fixup_chains']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-flat_namespace','-fstack-protector-all', '-Wl,-fixup_chains']), (1, executable+': failed NOUNDEFS CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-fstack-protector-all', '-Wl,-fixup_chains']), + self.assertEqual(call_security_check(cxx, source, executable, ['-fstack-protector-all', '-Wl,-fixup_chains']), (1, executable+': failed CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-fstack-protector-all', '-fcf-protection=full', '-Wl,-fixup_chains']), + self.assertEqual(call_security_check(cxx, source, executable, ['-fstack-protector-all', '-fcf-protection=full', '-Wl,-fixup_chains']), (0, '')) else: # arm64 darwin doesn't support non-PIE binaries, control flow or executable stacks - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector', '-Wl,-no_fixup_chains']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector', '-Wl,-no_fixup_chains']), (1, executable+': failed NOUNDEFS Canary FIXUP_CHAINS BRANCH_PROTECTION')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector', '-Wl,-fixup_chains', '-mbranch-protection=bti']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector', '-Wl,-fixup_chains', '-mbranch-protection=bti']), (1, executable+': failed NOUNDEFS Canary')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fstack-protector-all', '-Wl,-fixup_chains', '-mbranch-protection=bti']), + self.assertEqual(call_security_check(cxx, source, executable, ['-Wl,-flat_namespace','-fstack-protector-all', '-Wl,-fixup_chains', '-mbranch-protection=bti']), (1, executable+': failed NOUNDEFS')) - self.assertEqual(call_security_check(cc, source, executable, ['-fstack-protector-all', '-Wl,-fixup_chains', '-mbranch-protection=bti']), + self.assertEqual(call_security_check(cxx, source, executable, ['-fstack-protector-all', '-Wl,-fixup_chains', '-mbranch-protection=bti']), (0, '')) diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py index b00004586c..454dbc6bd2 100755 --- a/contrib/devtools/test-symbol-check.py +++ b/contrib/devtools/test-symbol-check.py @@ -11,17 +11,17 @@ import unittest from utils import determine_wellknown_cmd -def call_symbol_check(cc: list[str], source, executable, options): +def call_symbol_check(cxx: list[str], source, executable, options): # This should behave the same as AC_TRY_LINK, so arrange well-known flags # in the same order as autoconf would. # # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for # reference. env_flags: list[str] = [] - for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']: + for var in ['CXXFLAGS', 'CPPFLAGS', 'LDFLAGS']: env_flags += filter(None, os.environ.get(var, '').split(' ')) - subprocess.run([*cc,source,'-o',executable] + env_flags + options, check=True) + subprocess.run([*cxx,source,'-o',executable] + env_flags + options, check=True) p = subprocess.run([os.path.join(os.path.dirname(__file__), 'symbol-check.py'), executable], stdout=subprocess.PIPE, text=True) os.remove(source) os.remove(executable) @@ -29,13 +29,13 @@ def call_symbol_check(cc: list[str], source, executable, options): class TestSymbolChecks(unittest.TestCase): def test_ELF(self): - source = 'test1.c' + source = 'test1.cpp' executable = 'test1' - cc = determine_wellknown_cmd('CC', 'gcc') + cxx = determine_wellknown_cmd('CXX', 'g++') # -lutil is part of the libc6 package so a safe bet that it's installed # it's also out of context enough that it's unlikely to ever become a real dependency - source = 'test2.c' + source = 'test2.cpp' executable = 'test2' with open(source, 'w', encoding="utf8") as f: f.write(''' @@ -48,31 +48,31 @@ class TestSymbolChecks(unittest.TestCase): } ''') - self.assertEqual(call_symbol_check(cc, source, executable, ['-lutil']), + self.assertEqual(call_symbol_check(cxx, source, executable, ['-lutil']), (1, executable + ': libutil.so.1 is not in ALLOWED_LIBRARIES!\n' + executable + ': failed LIBRARY_DEPENDENCIES')) # finally, check a simple conforming binary - source = 'test3.c' + source = 'test3.cpp' executable = 'test3' with open(source, 'w', encoding="utf8") as f: f.write(''' - #include <stdio.h> + #include <cstdio> int main() { - printf("42"); + std::printf("42"); return 0; } ''') - self.assertEqual(call_symbol_check(cc, source, executable, []), + self.assertEqual(call_symbol_check(cxx, source, executable, []), (0, '')) def test_MACHO(self): - source = 'test1.c' + source = 'test1.cpp' executable = 'test1' - cc = determine_wellknown_cmd('CC', 'clang') + cxx = determine_wellknown_cmd('CXX', 'clang++') with open(source, 'w', encoding="utf8") as f: f.write(''' @@ -86,11 +86,11 @@ class TestSymbolChecks(unittest.TestCase): ''') - self.assertEqual(call_symbol_check(cc, source, executable, ['-lexpat', '-Wl,-platform_version','-Wl,macos', '-Wl,11.4', '-Wl,11.4']), + self.assertEqual(call_symbol_check(cxx, source, executable, ['-lexpat', '-Wl,-platform_version','-Wl,macos', '-Wl,11.4', '-Wl,11.4']), (1, 'libexpat.1.dylib is not in ALLOWED_LIBRARIES!\n' + f'{executable}: failed DYNAMIC_LIBRARIES MIN_OS SDK')) - source = 'test2.c' + source = 'test2.cpp' executable = 'test2' with open(source, 'w', encoding="utf8") as f: f.write(''' @@ -103,10 +103,10 @@ class TestSymbolChecks(unittest.TestCase): } ''') - self.assertEqual(call_symbol_check(cc, source, executable, ['-framework', 'CoreGraphics', '-Wl,-platform_version','-Wl,macos', '-Wl,11.4', '-Wl,11.4']), + self.assertEqual(call_symbol_check(cxx, source, executable, ['-framework', 'CoreGraphics', '-Wl,-platform_version','-Wl,macos', '-Wl,11.4', '-Wl,11.4']), (1, f'{executable}: failed MIN_OS SDK')) - source = 'test3.c' + source = 'test3.cpp' executable = 'test3' with open(source, 'w', encoding="utf8") as f: f.write(''' @@ -116,13 +116,13 @@ class TestSymbolChecks(unittest.TestCase): } ''') - self.assertEqual(call_symbol_check(cc, source, executable, ['-Wl,-platform_version','-Wl,macos', '-Wl,11.0', '-Wl,11.4']), + self.assertEqual(call_symbol_check(cxx, source, executable, ['-Wl,-platform_version','-Wl,macos', '-Wl,11.0', '-Wl,11.4']), (1, f'{executable}: failed SDK')) def test_PE(self): - source = 'test1.c' + source = 'test1.cpp' executable = 'test1.exe' - cc = determine_wellknown_cmd('CC', 'x86_64-w64-mingw32-gcc') + cxx = determine_wellknown_cmd('CXX', 'x86_64-w64-mingw32-g++') with open(source, 'w', encoding="utf8") as f: f.write(''' @@ -135,11 +135,11 @@ class TestSymbolChecks(unittest.TestCase): } ''') - self.assertEqual(call_symbol_check(cc, source, executable, ['-lpdh', '-Wl,--major-subsystem-version', '-Wl,6', '-Wl,--minor-subsystem-version', '-Wl,1']), + self.assertEqual(call_symbol_check(cxx, source, executable, ['-lpdh', '-Wl,--major-subsystem-version', '-Wl,6', '-Wl,--minor-subsystem-version', '-Wl,1']), (1, 'pdh.dll is not in ALLOWED_LIBRARIES!\n' + executable + ': failed DYNAMIC_LIBRARIES')) - source = 'test2.c' + source = 'test2.cpp' executable = 'test2.exe' with open(source, 'w', encoding="utf8") as f: @@ -150,10 +150,10 @@ class TestSymbolChecks(unittest.TestCase): } ''') - self.assertEqual(call_symbol_check(cc, source, executable, ['-Wl,--major-subsystem-version', '-Wl,9', '-Wl,--minor-subsystem-version', '-Wl,9']), + self.assertEqual(call_symbol_check(cxx, source, executable, ['-Wl,--major-subsystem-version', '-Wl,9', '-Wl,--minor-subsystem-version', '-Wl,9']), (1, executable + ': failed SUBSYSTEM_VERSION')) - source = 'test3.c' + source = 'test3.cpp' executable = 'test3.exe' with open(source, 'w', encoding="utf8") as f: f.write(''' @@ -166,7 +166,7 @@ class TestSymbolChecks(unittest.TestCase): } ''') - self.assertEqual(call_symbol_check(cc, source, executable, ['-lole32', '-Wl,--major-subsystem-version', '-Wl,6', '-Wl,--minor-subsystem-version', '-Wl,1']), + self.assertEqual(call_symbol_check(cxx, source, executable, ['-lole32', '-Wl,--major-subsystem-version', '-Wl,6', '-Wl,--minor-subsystem-version', '-Wl,1']), (0, '')) diff --git a/depends/packages/libevent.mk b/depends/packages/libevent.mk index d764be5d0a..60e1ee469e 100644 --- a/depends/packages/libevent.mk +++ b/depends/packages/libevent.mk @@ -3,14 +3,16 @@ $(package)_version=2.1.12-stable $(package)_download_path=https://github.com/libevent/libevent/releases/download/release-$($(package)_version)/ $(package)_file_name=$(package)-$($(package)_version).tar.gz $(package)_sha256_hash=92e6de1be9ec176428fd2367677e61ceffc2ee1cb119035037a27d346b0403bb +$(package)_patches=cmake_fixups.patch +$(package)_build_subdir=build # When building for Windows, we set _WIN32_WINNT to target the same Windows # version as we do in configure. Due to quirks in libevents build system, this # is also required to enable support for ipv6. See #19375. define $(package)_set_vars - $(package)_config_opts=--disable-shared --disable-openssl --disable-libevent-regress --disable-samples - $(package)_config_opts += --disable-dependency-tracking --enable-option-checking - $(package)_config_opts_release=--disable-debug-mode + $(package)_config_opts=-DEVENT__DISABLE_BENCHMARK=ON -DEVENT__DISABLE_OPENSSL=ON + $(package)_config_opts+=-DEVENT__DISABLE_SAMPLES=ON -DEVENT__DISABLE_REGRESS=ON + $(package)_config_opts+=-DEVENT__DISABLE_TESTS=ON -DEVENT__LIBRARY_TYPE=STATIC $(package)_cppflags_mingw32=-D_WIN32_WINNT=0x0601 ifeq ($(NO_HARDEN),) @@ -19,11 +21,11 @@ define $(package)_set_vars endef define $(package)_preprocess_cmds - cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub build-aux + patch -p1 < $($(package)_patch_dir)/cmake_fixups.patch endef define $(package)_config_cmds - $($(package)_autoconf) + $($(package)_cmake) -S .. -B . endef define $(package)_build_cmds @@ -35,7 +37,7 @@ define $(package)_stage_cmds endef define $(package)_postprocess_cmds - rm lib/*.la && \ + rm bin/event_rpcgen.py && \ rm include/ev*.h && \ rm include/event2/*_compat.h endef diff --git a/depends/patches/libevent/cmake_fixups.patch b/depends/patches/libevent/cmake_fixups.patch new file mode 100644 index 0000000000..d80c1a9489 --- /dev/null +++ b/depends/patches/libevent/cmake_fixups.patch @@ -0,0 +1,35 @@ +cmake: set minimum version to 3.5 + +Fix generated pkg-config files, see +https://github.com/libevent/libevent/pull/1165. + +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -19,7 +19,7 @@ + # start libevent.sln + # + +-cmake_minimum_required(VERSION 3.1 FATAL_ERROR) ++cmake_minimum_required(VERSION 3.5 FATAL_ERROR) + + if (POLICY CMP0054) + cmake_policy(SET CMP0054 NEW) +diff --git a/cmake/AddEventLibrary.cmake b/cmake/AddEventLibrary.cmake +index 04f5837e..d8ea42c4 100644 +--- a/cmake/AddEventLibrary.cmake ++++ b/cmake/AddEventLibrary.cmake +@@ -20,12 +20,12 @@ macro(generate_pkgconfig LIB_NAME) + + set(LIBS "") + foreach (LIB ${LIB_PLATFORM}) +- set(LIBS "${LIBS} -L${LIB}") ++ set(LIBS "${LIBS} -l${LIB}") + endforeach() + + set(OPENSSL_LIBS "") + foreach(LIB ${OPENSSL_LIBRARIES}) +- set(OPENSSL_LIBS "${OPENSSL_LIBS} -L${LIB}") ++ set(OPENSSL_LIBS "${OPENSSL_LIBS} -l${LIB}") + endforeach() + + configure_file("lib${LIB_NAME}.pc.in" "lib${LIB_NAME}.pc" @ONLY) diff --git a/depends/patches/qt/dont_hardcode_pwd.patch b/depends/patches/qt/dont_hardcode_pwd.patch index a74e9cb098..f6955b2f20 100644 --- a/depends/patches/qt/dont_hardcode_pwd.patch +++ b/depends/patches/qt/dont_hardcode_pwd.patch @@ -1,13 +1,13 @@ -commit 0e953866fc4672486e29e1ba6d83b4207e7b2f0b -Author: fanquake <fanquake@gmail.com> -Date: Tue Aug 18 15:09:06 2020 +0800 +Do not assume FHS in scripts - Don't hardcode pwd path +On systems that do not follow the Filesystem Hierarchy Standard, such as +guix, the hardcoded `/bin/pwd` will fail to be found so that the script +will fail. - Let a man use his builtins if he wants to! Also, removes the unnecessary - assumption that pwd lives under /bin/pwd. +Use `pwd`, instead, so that the command can be found through the normal +path search mechanism. - See #15581. +See https://github.com/qt/qtbase/commit/3388de698bfb9bbc456c08f03e83bf3e749df35c. diff --git a/qtbase/configure b/qtbase/configure index 08b49a8d..faea5b55 100755 diff --git a/doc/dependencies.md b/doc/dependencies.md index 63c505c9cb..3bc931e8f6 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -8,7 +8,7 @@ You can find installation instructions in the `build-*.md` file for your platfor | --- | --- | | [Autoconf](https://www.gnu.org/software/autoconf/) | [2.69](https://github.com/bitcoin/bitcoin/pull/17769) | | [Automake](https://www.gnu.org/software/automake/) | [1.13](https://github.com/bitcoin/bitcoin/pull/18290) | -| [Clang](https://clang.llvm.org) | [15.0](https://github.com/bitcoin/bitcoin/pull/29165) | +| [Clang](https://clang.llvm.org) | [16.0](https://github.com/bitcoin/bitcoin/pull/30263) | | [GCC](https://gcc.gnu.org) | [11.1](https://github.com/bitcoin/bitcoin/pull/29091) | | [Python](https://www.python.org) (scripts, tests) | [3.9](https://github.com/bitcoin/bitcoin/pull/28211) | | [systemtap](https://sourceware.org/systemtap/) ([tracing](tracing.md))| N/A | diff --git a/doc/descriptors.md b/doc/descriptors.md index 3b94ec03e4..5e8e4a24b0 100644 --- a/doc/descriptors.md +++ b/doc/descriptors.md @@ -63,6 +63,7 @@ Output descriptors currently support: - `wsh(sortedmulti(1,xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/1/0/*,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/0/0/*))` describes a set of *1-of-2* P2WSH multisig outputs where one multisig key is the *1/0/`i`* child of the first specified xpub and the other multisig key is the *0/0/`i`* child of the second specified xpub, and `i` is any number in a configurable range (`0-1000` by default). The order of public keys in the resulting witnessScripts is determined by the lexicographic order of the public keys at that index. - `tr(c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5,{pk(fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556),pk(e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd13)})` describes a P2TR output with the `c6...` x-only pubkey as internal key, and two script paths. - `tr(c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5,sortedmulti_a(2,2f8bde4d1a07209355b4a7250a5c5128e88b84bddc619ab7cba8d569b240efe4,5cbdf0646e5db4eaa398f365f2ea7a0e3d419b7e0330e39ce92bddedcac4f9bc))` describes a P2TR output with the `c6...` x-only pubkey as internal key, and a single `multi_a` script that needs 2 signatures with 2 specified x-only keys, which will be sorted lexicographically. +- `wsh(sortedmulti(2,[6f53d49c/44h/1h/0h]tpubDDjsCRDQ9YzyaAq9rspCfq8RZFrWoBpYnLxK6sS2hS2yukqSczgcYiur8Scx4Hd5AZatxTuzMtJQJhchufv1FRFanLqUP7JHwusSSpfcEp2/0/*,[e6807791/44h/1h/0h]tpubDDAfvogaaAxaFJ6c15ht7Tq6ZmiqFYfrSmZsHu7tHXBgnjMZSHAeHSwhvjARNA6Qybon4ksPksjRbPDVp7yXA1KjTjSd5x18KHqbppnXP1s/0/*,[367c9cfa/44h/1h/0h]tpubDDtPnSgWYk8dDnaDwnof4ehcnjuL5VoUt1eW2MoAed1grPHuXPDnkX1fWMvXfcz3NqFxPbhqNZ3QBdYjLz2hABeM9Z2oqMR1Gt2HHYDoCgh/0/*))#av0kxgw0` describes a *2-of-3* multisig. For brevity, the internal "change" descriptor accompanying the above external "receiving" descriptor is not included here, but it typically differs only in the xpub derivation steps, ending in `/1/*` for change addresses. ## Reference @@ -167,9 +168,9 @@ The basic steps are: the participant's signer wallet. Avoid reusing this wallet for any purpose other than signing transactions from the corresponding multisig we are about to create. Hint: extract the wallet's xpubs using `listdescriptors` and pick the one from the `pkh` descriptor since it's least likely to be accidentally reused (legacy addresses) - 2. Create a watch-only descriptor wallet (blank, private keys disabled). Now the multisig is created by importing the two descriptors: + 2. Create a watch-only descriptor wallet (blank, private keys disabled). Now the multisig is created by importing the external and internal descriptors: `wsh(sortedmulti(<M>,XPUB1/0/*,XPUB2/0/*,…,XPUBN/0/*))` and `wsh(sortedmulti(<M>,XPUB1/1/*,XPUB2/1/*,…,XPUBN/1/*))` - (one descriptor w/ `0` for receiving addresses and another w/ `1` for change). Every participant does this + (one descriptor w/ `0` for receiving addresses and another w/ `1` for change). Every participant does this. All key origin information (master key fingerprint and all derivation steps) should be included with xpubs for proper support of hardware devices / external signers 3. A receiving address is generated for the multisig. As a check to ensure step 2 was done correctly, every participant should verify they get the same addresses 4. Funds are sent to the resulting address diff --git a/doc/init.md b/doc/init.md index 64ab971557..b570fed22c 100644 --- a/doc/init.md +++ b/doc/init.md @@ -35,8 +35,10 @@ it will use a special cookie file for authentication. The cookie is generated wi content when the daemon starts, and deleted when it exits. Read access to this file controls who can access it through RPC. -By default the cookie is stored in the data directory, but it's location can be overridden -with the option '-rpccookiefile'. +By default the cookie is stored in the data directory, but its location can be +overridden with the option `-rpccookiefile`. Default file permissions for the +cookie are "owner" (i.e. user read/writeable) via default application-wide file +umask of `0077`, but these can be overridden with the `-rpccookieperms` option. This allows for running bitcoind without having to do any manual configuration. diff --git a/doc/policy/packages.md b/doc/policy/packages.md index a220bdd17f..9b321799f1 100644 --- a/doc/policy/packages.md +++ b/doc/policy/packages.md @@ -38,11 +38,11 @@ The following rules are enforced for all packages: * Only limited package replacements are currently considered. (#28984) - - All direct conflicts must signal replacement (or have `-mempoolfullrbf=1` set). + - All direct conflicts must signal replacement (or the node must have `-mempoolfullrbf=1` set). - Packages are 1-parent-1-child, with no in-mempool ancestors of the package. - - All conflicting clusters(connected components of mempool transactions) must be clusters of up to size 2. + - All conflicting clusters (connected components of mempool transactions) must be clusters of up to size 2. - No more than MAX_REPLACEMENT_CANDIDATES transactions can be replaced, analogous to regular [replacement rule](./mempool-replacements.md) 5). @@ -56,7 +56,7 @@ The following rules are enforced for all packages: - *Rationale*: Basic support for package RBF can be used by wallets by making chains of no longer than two, then directly conflicting - those chains when needed. Combined with V3 transactions this can + those chains when needed. Combined with TRUC transactions this can result in more robust fee bumping. More general package RBF may be enabled in the future. diff --git a/doc/release-notes-27307.md b/doc/release-notes-27307.md new file mode 100644 index 0000000000..58fc7098b5 --- /dev/null +++ b/doc/release-notes-27307.md @@ -0,0 +1,8 @@ +Wallet +--- + +The wallet now detects when wallet transactions conflict with the mempool. Mempool +conflicting transactions can be seen in the `"mempoolconflicts"` field of +`gettransaction`. The inputs of mempool conflicted transactions can now be respent +without manually abandoning the transactions when the parent transaction is dropped +from the mempool, which can cause wallet balances to appear higher. diff --git a/doc/release-notes-29091-29165.md b/doc/release-notes-29091-29165.md index 9c9f8e4e50..e13d29adc6 100644 --- a/doc/release-notes-29091-29165.md +++ b/doc/release-notes-29091-29165.md @@ -1,5 +1,5 @@ Build ----- -GCC 11.1 or later, or Clang 15+ or later, +GCC 11.1 or later, or Clang 16.0 or later, are now required to compile Bitcoin Core. diff --git a/doc/release-notes-29496.md b/doc/release-notes-29496.md new file mode 100644 index 0000000000..799b2ca01d --- /dev/null +++ b/doc/release-notes-29496.md @@ -0,0 +1,11 @@ +Mempool Policy Changes +---------------------- + +- Transactions with version number set to 3 are now treated as standard on all networks (#29496), + subject to Opt-in Topologically Restricted Until Confirmation (TRUC) Transactions policy as + described in [BIP 431](https://github.com/bitcoin/bips/blob/master/bip-0431.mediawiki). The + policy includes limits on spending unconfirmed outputs (#28948), eviction of a previous descendant + if a more incentive-compatible one is submitted (#29306), and a maximum transaction size of 10,000vB + (#29873). These restrictions simplify the assessment of incentive compatibility of accepting or + replacing TRUC transactions, thus ensuring any replacements are more profitable for the node and + making fee-bumping more reliable. diff --git a/doc/release-notes/release-notes-26.2.md b/doc/release-notes/release-notes-26.2.md new file mode 100644 index 0000000000..67d8512dd0 --- /dev/null +++ b/doc/release-notes/release-notes-26.2.md @@ -0,0 +1,94 @@ +26.2 Release Notes +================== + +Bitcoin Core version 26.2 is now available from: + + <https://bitcoincore.org/bin/bitcoin-core-26.2/> + +This release includes new features, various bug fixes and performance +improvements, as well as updated translations. + +Please report bugs using the issue tracker at GitHub: + + <https://github.com/bitcoin/bitcoin/issues> + +To receive security and update notifications, please subscribe to: + + <https://bitcoincore.org/en/list/announcements/join/> + +How to Upgrade +============== + +If you are running an older version, shut it down. Wait until it has completely +shut down (which might take a few minutes in some cases), then run the +installer (on Windows) or just copy over `/Applications/Bitcoin-Qt` (on macOS) +or `bitcoind`/`bitcoin-qt` (on Linux). + +Upgrading directly from a version of Bitcoin Core that has reached its EOL is +possible, but it might take some time if the data directory needs to be migrated. Old +wallet versions of Bitcoin Core are generally supported. + +Compatibility +============== + +Bitcoin Core is supported and extensively tested on operating systems +using the Linux kernel, macOS 11.0+, and Windows 7 and newer. Bitcoin +Core should also work on most other Unix-like systems but is not as +frequently tested on them. It is not recommended to use Bitcoin Core on +unsupported systems. + +Notable changes +=============== + +### Script + +- #29853: sign: don't assume we are parsing a sane TapMiniscript + +### P2P and network changes + +- #29691: Change Luke Dashjr seed to dashjr-list-of-p2p-nodes.us +- #30085: p2p: detect addnode cjdns peers in GetAddedNodeInfo() + +### RPC + +- #29869: rpc, bugfix: Enforce maximum value for setmocktime +- #28554: bugfix: throw an error if an invalid parameter is passed to getnetworkhashps RPC +- #30094: rpc: move UniValue in blockToJSON +- #29870: rpc: Reword SighashFromStr error message + +### Build + +- #29747: depends: fix mingw-w64 Qt DEBUG=1 build +- #29985: depends: Fix build of Qt for 32-bit platforms with recent glibc +- #30151: depends: Fetch miniupnpc sources from an alternative website +- #30283: upnp: fix build with miniupnpc 2.2.8 + +### Misc + +- #29776: ThreadSanitizer: Fix #29767 +- #29856: ci: Bump s390x to ubuntu:24.04 +- #29764: doc: Suggest installing dev packages for debian/ubuntu qt5 build +- #30149: contrib: Renew Windows code signing certificate + +Credits +======= + +Thanks to everyone who directly contributed to this release: + +- Antoine Poinsot +- Ava Chow +- Cory Fields +- dergoegge +- fanquake +- glozow +- Hennadii Stepanov +- Jameson Lopp +- jonatack +- laanwj +- Luke Dashjr +- MarcoFalke +- nanlour +- willcl-ark + +As well as to everyone that helped with translations on +[Transifex](https://www.transifex.com/bitcoin/bitcoin/). diff --git a/src/.clang-tidy b/src/.clang-tidy index 61adce1d50..3569dd04b1 100644 --- a/src/.clang-tidy +++ b/src/.clang-tidy @@ -6,10 +6,12 @@ bugprone-move-forwarding-reference, bugprone-string-constructor, bugprone-use-after-move, bugprone-lambda-function-name, +bugprone-unhandled-self-assignment, misc-unused-using-decls, misc-no-recursion, modernize-use-default-member-init, modernize-use-emplace, +modernize-use-equals-default, modernize-use-noexcept, modernize-use-nullptr, performance-*, @@ -23,8 +25,10 @@ readability-const-return-type, readability-redundant-declaration, readability-redundant-string-init, ' +HeaderFilterRegex: '.' WarningsAsErrors: '*' CheckOptions: - key: performance-move-const-arg.CheckTriviallyCopyableMove value: false -HeaderFilterRegex: '.' + - key: bugprone-unhandled-self-assignment.WarnOnlyIfThisHasSuspiciousField + value: false diff --git a/src/Makefile.am b/src/Makefile.am index 0c47a737d0..72dd942c40 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -51,15 +51,15 @@ LIBBITCOIN_CRYPTO = $(LIBBITCOIN_CRYPTO_BASE) if ENABLE_SSE41 LIBBITCOIN_CRYPTO_SSE41 = crypto/libbitcoin_crypto_sse41.la LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_SSE41) +if ENABLE_X86_SHANI +LIBBITCOIN_CRYPTO_X86_SHANI = crypto/libbitcoin_crypto_x86_shani.la +LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_X86_SHANI) +endif endif if ENABLE_AVX2 LIBBITCOIN_CRYPTO_AVX2 = crypto/libbitcoin_crypto_avx2.la LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_AVX2) endif -if ENABLE_X86_SHANI -LIBBITCOIN_CRYPTO_X86_SHANI = crypto/libbitcoin_crypto_x86_shani.la -LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_X86_SHANI) -endif if ENABLE_ARM_SHANI LIBBITCOIN_CRYPTO_ARM_SHANI = crypto/libbitcoin_crypto_arm_shani.la LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_ARM_SHANI) @@ -192,11 +192,9 @@ BITCOIN_CORE_H = \ kernel/mempool_entry.h \ kernel/mempool_limits.h \ kernel/mempool_options.h \ - kernel/mempool_persist.h \ kernel/mempool_removal_reason.h \ kernel/messagestartchars.h \ kernel/notifications_interface.h \ - kernel/validation_cache_sizes.h \ kernel/warning.h \ key.h \ key_io.h \ @@ -228,6 +226,7 @@ BITCOIN_CORE_H = \ node/interface_ui.h \ node/kernel_notifications.h \ node/mempool_args.h \ + node/mempool_persist.h \ node/mempool_persist_args.h \ node/miner.h \ node/mini_miner.h \ @@ -240,11 +239,9 @@ BITCOIN_CORE_H = \ node/txreconciliation.h \ node/types.h \ node/utxo_snapshot.h \ - node/validation_cache_args.h \ node/warnings.h \ noui.h \ outputtype.h \ - policy/v3_policy.h \ policy/feerate.h \ policy/fees.h \ policy/fees_args.h \ @@ -252,6 +249,7 @@ BITCOIN_CORE_H = \ policy/policy.h \ policy/rbf.h \ policy/settings.h \ + policy/truc_policy.h \ pow.h \ protocol.h \ psbt.h \ @@ -413,7 +411,6 @@ libbitcoin_node_a_SOURCES = \ kernel/context.cpp \ kernel/cs_main.cpp \ kernel/disconnected_transactions.cpp \ - kernel/mempool_persist.cpp \ kernel/mempool_removal_reason.cpp \ mapport.cpp \ net.cpp \ @@ -435,6 +432,7 @@ libbitcoin_node_a_SOURCES = \ node/interfaces.cpp \ node/kernel_notifications.cpp \ node/mempool_args.cpp \ + node/mempool_persist.cpp \ node/mempool_persist_args.cpp \ node/miner.cpp \ node/mini_miner.cpp \ @@ -445,15 +443,14 @@ libbitcoin_node_a_SOURCES = \ node/transaction.cpp \ node/txreconciliation.cpp \ node/utxo_snapshot.cpp \ - node/validation_cache_args.cpp \ node/warnings.cpp \ noui.cpp \ - policy/v3_policy.cpp \ policy/fees.cpp \ policy/fees_args.cpp \ policy/packages.cpp \ policy/rbf.cpp \ policy/settings.cpp \ + policy/truc_policy.cpp \ pow.cpp \ rest.cpp \ rpc/blockchain.cpp \ @@ -625,7 +622,7 @@ crypto_libbitcoin_crypto_x86_shani_la_LDFLAGS = $(AM_LDFLAGS) -static crypto_libbitcoin_crypto_x86_shani_la_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) -static crypto_libbitcoin_crypto_x86_shani_la_CPPFLAGS = $(AM_CPPFLAGS) crypto_libbitcoin_crypto_x86_shani_la_CXXFLAGS += $(X86_SHANI_CXXFLAGS) -crypto_libbitcoin_crypto_x86_shani_la_CPPFLAGS += -DENABLE_X86_SHANI +crypto_libbitcoin_crypto_x86_shani_la_CPPFLAGS += -DENABLE_SSE41 -DENABLE_X86_SHANI crypto_libbitcoin_crypto_x86_shani_la_SOURCES = crypto/sha256_x86_shani.cpp # See explanation for -static in crypto_libbitcoin_crypto_base_la's LDFLAGS and @@ -708,7 +705,6 @@ libbitcoin_common_a_SOURCES = \ netbase.cpp \ net_permissions.cpp \ outputtype.cpp \ - policy/v3_policy.cpp \ policy/feerate.cpp \ policy/policy.cpp \ protocol.cpp \ @@ -950,18 +946,17 @@ libbitcoinkernel_la_SOURCES = \ kernel/context.cpp \ kernel/cs_main.cpp \ kernel/disconnected_transactions.cpp \ - kernel/mempool_persist.cpp \ kernel/mempool_removal_reason.cpp \ logging.cpp \ node/blockstorage.cpp \ node/chainstate.cpp \ node/utxo_snapshot.cpp \ - policy/v3_policy.cpp \ policy/feerate.cpp \ policy/packages.cpp \ policy/policy.cpp \ policy/rbf.cpp \ policy/settings.cpp \ + policy/truc_policy.cpp \ pow.cpp \ primitives/block.cpp \ primitives/transaction.cpp \ diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include index 2ba72c9e76..7e3aa369c7 100644 --- a/src/Makefile.bench.include +++ b/src/Makefile.bench.include @@ -49,10 +49,12 @@ bench_bench_bitcoin_SOURCES = \ bench/poly1305.cpp \ bench/pool.cpp \ bench/prevector.cpp \ + bench/random.cpp \ bench/readblock.cpp \ bench/rollingbloom.cpp \ bench/rpc_blockchain.cpp \ bench/rpc_mempool.cpp \ + bench/sign_transaction.cpp \ bench/streams_findbyte.cpp \ bench/strencodings.cpp \ bench/util_time.cpp \ diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 633d0776f5..0993a65eff 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -173,8 +173,7 @@ BITCOIN_TESTS =\ test/validation_flush_tests.cpp \ test/validation_tests.cpp \ test/validationinterface_tests.cpp \ - test/versionbits_tests.cpp \ - test/xoroshiro128plusplus_tests.cpp + test/versionbits_tests.cpp if ENABLE_WALLET BITCOIN_TESTS += \ @@ -310,6 +309,7 @@ test_fuzz_fuzz_SOURCES = \ test/fuzz/crypto_aes256.cpp \ test/fuzz/crypto_aes256cbc.cpp \ test/fuzz/crypto_chacha20.cpp \ + test/fuzz/crypto_chacha20poly1305.cpp \ test/fuzz/crypto_common.cpp \ test/fuzz/crypto_diff_fuzz_chacha20.cpp \ test/fuzz/crypto_hkdf_hmac_sha256_l32.cpp \ diff --git a/src/Makefile.test_util.include b/src/Makefile.test_util.include index 6a1fd712bd..960eb078c8 100644 --- a/src/Makefile.test_util.include +++ b/src/Makefile.test_util.include @@ -23,8 +23,7 @@ TEST_UTIL_H = \ test/util/str.h \ test/util/transaction_utils.h \ test/util/txmempool.h \ - test/util/validation.h \ - test/util/xoroshiro128plusplus.h + test/util/validation.h if ENABLE_WALLET TEST_UTIL_H += wallet/test/util.h diff --git a/src/addrdb.cpp b/src/addrdb.cpp index 4d34c24ba9..e9838d7222 100644 --- a/src/addrdb.cpp +++ b/src/addrdb.cpp @@ -53,7 +53,7 @@ template <typename Data> bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data& data) { // Generate random temporary filename - const uint16_t randv{GetRand<uint16_t>()}; + const uint16_t randv{FastRandomContext().rand<uint16_t>()}; std::string tmpfn = strprintf("%s.%04x", prefix, randv); // open temp output file diff --git a/src/addrman.cpp b/src/addrman.cpp index d0b820ee65..054a9bee32 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -776,7 +776,7 @@ std::pair<CAddress, NodeSeconds> AddrManImpl::Select_(bool new_only, std::option const AddrInfo& info{it_found->second}; // With probability GetChance() * chance_factor, return the entry. - if (insecure_rand.randbits(30) < chance_factor * info.GetChance() * (1 << 30)) { + if (insecure_rand.randbits<30>() < chance_factor * info.GetChance() * (1 << 30)) { LogPrint(BCLog::ADDRMAN, "Selected %s from %s\n", info.ToStringAddrPort(), search_tried ? "tried" : "new"); return {info, info.m_last_try}; } diff --git a/src/arith_uint256.h b/src/arith_uint256.h index ba36cebbdc..538fbccab9 100644 --- a/src/arith_uint256.h +++ b/src/arith_uint256.h @@ -43,8 +43,10 @@ public: base_uint& operator=(const base_uint& b) { - for (int i = 0; i < WIDTH; i++) - pn[i] = b.pn[i]; + if (this != &b) { + for (int i = 0; i < WIDTH; i++) + pn[i] = b.pn[i]; + } return *this; } @@ -240,7 +242,7 @@ public: /** 256-bit unsigned big integer. */ class arith_uint256 : public base_uint<256> { public: - arith_uint256() {} + arith_uint256() = default; arith_uint256(const base_uint<256>& b) : base_uint<256>(b) {} arith_uint256(uint64_t b) : base_uint<256>(b) {} diff --git a/src/bench/crypto_hash.cpp b/src/bench/crypto_hash.cpp index 1685a120b4..2551ff3593 100644 --- a/src/bench/crypto_hash.cpp +++ b/src/bench/crypto_hash.cpp @@ -196,22 +196,6 @@ static void SipHash_32b(benchmark::Bench& bench) }); } -static void FastRandom_32bit(benchmark::Bench& bench) -{ - FastRandomContext rng(true); - bench.run([&] { - rng.rand32(); - }); -} - -static void FastRandom_1bit(benchmark::Bench& bench) -{ - FastRandomContext rng(true); - bench.run([&] { - rng.randbool(); - }); -} - static void MuHash(benchmark::Bench& bench) { MuHash3072 acc; @@ -274,8 +258,6 @@ BENCHMARK(SHA256D64_1024_STANDARD, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256D64_1024_SSE4, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256D64_1024_AVX2, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256D64_1024_SHANI, benchmark::PriorityLevel::HIGH); -BENCHMARK(FastRandom_32bit, benchmark::PriorityLevel::HIGH); -BENCHMARK(FastRandom_1bit, benchmark::PriorityLevel::HIGH); BENCHMARK(MuHash, benchmark::PriorityLevel::HIGH); BENCHMARK(MuHashMul, benchmark::PriorityLevel::HIGH); diff --git a/src/bench/logging.cpp b/src/bench/logging.cpp index c97c4e151b..8a745a0ba7 100644 --- a/src/bench/logging.cpp +++ b/src/bench/logging.cpp @@ -20,7 +20,7 @@ static void Logging(benchmark::Bench& bench, const std::vector<const char*>& ext TestingSetup test_setup{ ChainType::REGTEST, - extra_args, + {.extra_args = extra_args}, }; bench.run([&] { log(); }); diff --git a/src/bench/mempool_stress.cpp b/src/bench/mempool_stress.cpp index fe3e204fb3..3c82f55c19 100644 --- a/src/bench/mempool_stress.cpp +++ b/src/bench/mempool_stress.cpp @@ -106,7 +106,7 @@ static void ComplexMemPool(benchmark::Bench& bench) static void MempoolCheck(benchmark::Bench& bench) { FastRandomContext det_rand{true}; - auto testing_setup = MakeNoLogFileContext<TestChain100Setup>(ChainType::REGTEST, {"-checkmempool=1"}); + auto testing_setup = MakeNoLogFileContext<TestChain100Setup>(ChainType::REGTEST, {.extra_args = {"-checkmempool=1"}}); CTxMemPool& pool = *testing_setup.get()->m_node.mempool; LOCK2(cs_main, pool.cs); testing_setup->PopulateMempool(det_rand, 400, true); diff --git a/src/bench/random.cpp b/src/bench/random.cpp new file mode 100644 index 0000000000..cff215d5a7 --- /dev/null +++ b/src/bench/random.cpp @@ -0,0 +1,103 @@ +// Copyright (c) The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <bench/bench.h> +#include <random.h> + +#include <cstdint> +#include <numeric> + +namespace { + +template<typename RNG> +void BenchRandom_rand64(benchmark::Bench& bench, RNG&& rng) noexcept +{ + bench.batch(1).unit("number").run([&] { + rng.rand64(); + }); +} + +template<typename RNG> +void BenchRandom_rand32(benchmark::Bench& bench, RNG&& rng) noexcept +{ + bench.batch(1).unit("number").run([&] { + rng.rand32(); + }); +} + +template<typename RNG> +void BenchRandom_randbool(benchmark::Bench& bench, RNG&& rng) noexcept +{ + bench.batch(1).unit("number").run([&] { + rng.randbool(); + }); +} + +template<typename RNG> +void BenchRandom_randbits(benchmark::Bench& bench, RNG&& rng) noexcept +{ + bench.batch(64).unit("number").run([&] { + for (int i = 1; i <= 64; ++i) { + rng.randbits(i); + } + }); +} + +template<int RANGE, typename RNG> +void BenchRandom_randrange(benchmark::Bench& bench, RNG&& rng) noexcept +{ + bench.batch(RANGE).unit("number").run([&] { + for (int i = 1; i <= RANGE; ++i) { + rng.randrange(i); + } + }); +} + +template<int RANGE, typename RNG> +void BenchRandom_stdshuffle(benchmark::Bench& bench, RNG&& rng) noexcept +{ + uint64_t data[RANGE]; + std::iota(std::begin(data), std::end(data), uint64_t(0)); + bench.batch(RANGE).unit("number").run([&] { + std::shuffle(std::begin(data), std::end(data), rng); + }); +} + +void FastRandom_rand64(benchmark::Bench& bench) { BenchRandom_rand64(bench, FastRandomContext(true)); } +void FastRandom_rand32(benchmark::Bench& bench) { BenchRandom_rand32(bench, FastRandomContext(true)); } +void FastRandom_randbool(benchmark::Bench& bench) { BenchRandom_randbool(bench, FastRandomContext(true)); } +void FastRandom_randbits(benchmark::Bench& bench) { BenchRandom_randbits(bench, FastRandomContext(true)); } +void FastRandom_randrange100(benchmark::Bench& bench) { BenchRandom_randrange<100>(bench, FastRandomContext(true)); } +void FastRandom_randrange1000(benchmark::Bench& bench) { BenchRandom_randrange<1000>(bench, FastRandomContext(true)); } +void FastRandom_randrange1000000(benchmark::Bench& bench) { BenchRandom_randrange<1000000>(bench, FastRandomContext(true)); } +void FastRandom_stdshuffle100(benchmark::Bench& bench) { BenchRandom_stdshuffle<100>(bench, FastRandomContext(true)); } + +void InsecureRandom_rand64(benchmark::Bench& bench) { BenchRandom_rand64(bench, InsecureRandomContext(251438)); } +void InsecureRandom_rand32(benchmark::Bench& bench) { BenchRandom_rand32(bench, InsecureRandomContext(251438)); } +void InsecureRandom_randbool(benchmark::Bench& bench) { BenchRandom_randbool(bench, InsecureRandomContext(251438)); } +void InsecureRandom_randbits(benchmark::Bench& bench) { BenchRandom_randbits(bench, InsecureRandomContext(251438)); } +void InsecureRandom_randrange100(benchmark::Bench& bench) { BenchRandom_randrange<100>(bench, InsecureRandomContext(251438)); } +void InsecureRandom_randrange1000(benchmark::Bench& bench) { BenchRandom_randrange<1000>(bench, InsecureRandomContext(251438)); } +void InsecureRandom_randrange1000000(benchmark::Bench& bench) { BenchRandom_randrange<1000000>(bench, InsecureRandomContext(251438)); } +void InsecureRandom_stdshuffle100(benchmark::Bench& bench) { BenchRandom_stdshuffle<100>(bench, InsecureRandomContext(251438)); } + +} // namespace + +BENCHMARK(FastRandom_rand64, benchmark::PriorityLevel::HIGH); +BENCHMARK(FastRandom_rand32, benchmark::PriorityLevel::HIGH); +BENCHMARK(FastRandom_randbool, benchmark::PriorityLevel::HIGH); +BENCHMARK(FastRandom_randbits, benchmark::PriorityLevel::HIGH); +BENCHMARK(FastRandom_randrange100, benchmark::PriorityLevel::HIGH); +BENCHMARK(FastRandom_randrange1000, benchmark::PriorityLevel::HIGH); +BENCHMARK(FastRandom_randrange1000000, benchmark::PriorityLevel::HIGH); +BENCHMARK(FastRandom_stdshuffle100, benchmark::PriorityLevel::HIGH); + +BENCHMARK(InsecureRandom_rand64, benchmark::PriorityLevel::HIGH); +BENCHMARK(InsecureRandom_rand32, benchmark::PriorityLevel::HIGH); +BENCHMARK(InsecureRandom_randbool, benchmark::PriorityLevel::HIGH); +BENCHMARK(InsecureRandom_randbits, benchmark::PriorityLevel::HIGH); +BENCHMARK(InsecureRandom_randrange100, benchmark::PriorityLevel::HIGH); +BENCHMARK(InsecureRandom_randrange1000, benchmark::PriorityLevel::HIGH); +BENCHMARK(InsecureRandom_randrange1000000, benchmark::PriorityLevel::HIGH); +BENCHMARK(InsecureRandom_stdshuffle100, benchmark::PriorityLevel::HIGH); diff --git a/src/bench/sign_transaction.cpp b/src/bench/sign_transaction.cpp new file mode 100644 index 0000000000..7a2e26e339 --- /dev/null +++ b/src/bench/sign_transaction.cpp @@ -0,0 +1,70 @@ +// Copyright (c) 2023 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <bench/bench.h> +#include <addresstype.h> +#include <coins.h> +#include <key.h> +#include <primitives/transaction.h> +#include <pubkey.h> +#include <script/interpreter.h> +#include <script/script.h> +#include <script/sign.h> +#include <uint256.h> +#include <util/translation.h> + +enum class InputType { + P2WPKH, // segwitv0, witness-pubkey-hash (ECDSA signature) + P2TR, // segwitv1, taproot key-path spend (Schnorr signature) +}; + +static void SignTransactionSingleInput(benchmark::Bench& bench, InputType input_type) +{ + ECC_Context ecc_context{}; + + FlatSigningProvider keystore; + std::vector<CScript> prev_spks; + + // Create a bunch of keys / UTXOs to avoid signing with the same key repeatedly + for (int i = 0; i < 32; i++) { + CKey privkey = GenerateRandomKey(); + CPubKey pubkey = privkey.GetPubKey(); + CKeyID key_id = pubkey.GetID(); + keystore.keys.emplace(key_id, privkey); + keystore.pubkeys.emplace(key_id, pubkey); + + // Create specified locking script type + CScript prev_spk; + switch (input_type) { + case InputType::P2WPKH: prev_spk = GetScriptForDestination(WitnessV0KeyHash(pubkey)); break; + case InputType::P2TR: prev_spk = GetScriptForDestination(WitnessV1Taproot(XOnlyPubKey{pubkey})); break; + default: assert(false); + } + prev_spks.push_back(prev_spk); + } + + // Simple 1-input tx with artificial outpoint + // (note that for the purpose of signing with SIGHASH_ALL we don't need any outputs) + COutPoint prevout{/*hashIn=*/Txid::FromUint256(uint256::ONE), /*nIn=*/1337}; + CMutableTransaction unsigned_tx; + unsigned_tx.vin.emplace_back(prevout); + + // Benchmark. + int iter = 0; + bench.minEpochIterations(100).run([&] { + CMutableTransaction tx{unsigned_tx}; + std::map<COutPoint, Coin> coins; + CScript prev_spk = prev_spks[(iter++) % prev_spks.size()]; + coins[prevout] = Coin(CTxOut(10000, prev_spk), /*nHeightIn=*/100, /*fCoinBaseIn=*/false); + std::map<int, bilingual_str> input_errors; + bool complete = SignTransaction(tx, &keystore, coins, SIGHASH_ALL, input_errors); + assert(complete); + }); +} + +static void SignTransactionECDSA(benchmark::Bench& bench) { SignTransactionSingleInput(bench, InputType::P2WPKH); } +static void SignTransactionSchnorr(benchmark::Bench& bench) { SignTransactionSingleInput(bench, InputType::P2TR); } + +BENCHMARK(SignTransactionECDSA, benchmark::PriorityLevel::HIGH); +BENCHMARK(SignTransactionSchnorr, benchmark::PriorityLevel::HIGH); diff --git a/src/bitcoin-chainstate.cpp b/src/bitcoin-chainstate.cpp index ecbdcd48bb..98af162b4d 100644 --- a/src/bitcoin-chainstate.cpp +++ b/src/bitcoin-chainstate.cpp @@ -15,7 +15,6 @@ #include <kernel/chainstatemanager_opts.h> #include <kernel/checks.h> #include <kernel/context.h> -#include <kernel/validation_cache_sizes.h> #include <kernel/warning.h> #include <consensus/validation.h> @@ -63,13 +62,6 @@ int main(int argc, char* argv[]) // properly assert(kernel::SanityChecks(kernel_context)); - // Necessary for CheckInputScripts (eventually called by ProcessNewBlock), - // which will try the script cache first and fall back to actually - // performing the check with the signature cache. - kernel::ValidationCacheSizes validation_cache_sizes{}; - Assert(InitSignatureCache(validation_cache_sizes.signature_cache_bytes)); - Assert(InitScriptExecutionCache(validation_cache_sizes.script_execution_cache_bytes)); - ValidationSignals validation_signals{std::make_unique<util::ImmediateTaskRunner>()}; class KernelNotifications : public kernel::Notifications diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp index 5a4513d281..2bcb4b0c3d 100644 --- a/src/blockencodings.cpp +++ b/src/blockencodings.cpp @@ -17,8 +17,8 @@ #include <unordered_map> -CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block) : - nonce(GetRand<uint64_t>()), +CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, const uint64_t nonce) : + nonce(nonce), shorttxids(block.vtx.size() - 1), prefilledtxn(1), header(block) { FillShortTxIDSelector(); //TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase diff --git a/src/blockencodings.h b/src/blockencodings.h index 2b1fabadd6..c92aa05e80 100644 --- a/src/blockencodings.h +++ b/src/blockencodings.h @@ -59,7 +59,7 @@ public: uint256 blockhash; std::vector<CTransactionRef> txn; - BlockTransactions() {} + BlockTransactions() = default; explicit BlockTransactions(const BlockTransactionsRequest& req) : blockhash(req.blockhash), txn(req.indexes.size()) {} @@ -106,10 +106,15 @@ public: CBlockHeader header; - // Dummy for deserialization - CBlockHeaderAndShortTxIDs() {} + /** + * Dummy for deserialization + */ + CBlockHeaderAndShortTxIDs() = default; - CBlockHeaderAndShortTxIDs(const CBlock& block); + /** + * @param[in] nonce This should be randomly generated, and is used for the siphash secret key + */ + CBlockHeaderAndShortTxIDs(const CBlock& block, const uint64_t nonce); uint64_t GetShortID(const Wtxid& wtxid) const; @@ -141,7 +146,7 @@ public: explicit PartiallyDownloadedBlock(CTxMemPool* poolIn) : pool(poolIn) {} - // extra_txn is a list of extra transactions to look at, in <witness hash, reference> form + // extra_txn is a list of extra orphan/conflicted/etc transactions to look at ReadStatus InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector<CTransactionRef>& extra_txn); bool IsTxAvailable(size_t index) const; ReadStatus FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing); diff --git a/src/chain.h b/src/chain.h index bb70dbd8bc..e8a8c066c8 100644 --- a/src/chain.h +++ b/src/chain.h @@ -66,7 +66,7 @@ public: READWRITE(VARINT(obj.nTimeLast)); } - CBlockFileInfo() {} + CBlockFileInfo() = default; std::string ToString() const; diff --git a/src/coins.h b/src/coins.h index c798cc38ba..76e64b641d 100644 --- a/src/coins.h +++ b/src/coins.h @@ -154,7 +154,7 @@ class CCoinsViewCursor { public: CCoinsViewCursor(const uint256 &hashBlockIn): hashBlock(hashBlockIn) {} - virtual ~CCoinsViewCursor() {} + virtual ~CCoinsViewCursor() = default; virtual bool GetKey(COutPoint &key) const = 0; virtual bool GetValue(Coin &coin) const = 0; @@ -198,7 +198,7 @@ public: virtual std::unique_ptr<CCoinsViewCursor> Cursor() const; //! As we use CCoinsViews polymorphically, have a virtual destructor - virtual ~CCoinsView() {} + virtual ~CCoinsView() = default; //! Estimate database size (0 if not implemented) virtual size_t EstimateSize() const { return 0; } diff --git a/src/common/bloom.cpp b/src/common/bloom.cpp index ca6af90b76..076ee40635 100644 --- a/src/common/bloom.cpp +++ b/src/common/bloom.cpp @@ -239,7 +239,7 @@ bool CRollingBloomFilter::contains(Span<const unsigned char> vKey) const void CRollingBloomFilter::reset() { - nTweak = GetRand<unsigned int>(); + nTweak = FastRandomContext().rand<unsigned int>(); nEntriesThisGeneration = 0; nGeneration = 1; std::fill(data.begin(), data.end(), 0); diff --git a/src/core_read.cpp b/src/core_read.cpp index 114f92fc45..0ba271a8d2 100644 --- a/src/core_read.cpp +++ b/src/core_read.cpp @@ -245,7 +245,7 @@ bool ParseHashStr(const std::string& strHex, uint256& result) util::Result<int> SighashFromStr(const std::string& sighash) { - static std::map<std::string, int> map_sighash_values = { + static const std::map<std::string, int> map_sighash_values = { {std::string("DEFAULT"), int(SIGHASH_DEFAULT)}, {std::string("ALL"), int(SIGHASH_ALL)}, {std::string("ALL|ANYONECANPAY"), int(SIGHASH_ALL|SIGHASH_ANYONECANPAY)}, diff --git a/src/crypto/muhash.h b/src/crypto/muhash.h index cb53e1743e..222b866b6d 100644 --- a/src/crypto/muhash.h +++ b/src/crypto/muhash.h @@ -97,7 +97,7 @@ private: public: /* The empty set. */ - MuHash3072() noexcept {}; + MuHash3072() noexcept = default; /* A singleton with variable sized data in it. */ explicit MuHash3072(Span<const unsigned char> in) noexcept; diff --git a/src/crypto/sha256.cpp b/src/crypto/sha256.cpp index c883bd2f03..89d7204808 100644 --- a/src/crypto/sha256.cpp +++ b/src/crypto/sha256.cpp @@ -621,7 +621,7 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem } } -#if defined(ENABLE_X86_SHANI) +#if defined(ENABLE_SSE41) && defined(ENABLE_X86_SHANI) if (have_x86_shani) { Transform = sha256_x86_shani::Transform; TransformD64 = TransformD64Wrapper<sha256_x86_shani::Transform>; diff --git a/src/crypto/sha256_x86_shani.cpp b/src/crypto/sha256_x86_shani.cpp index 79871bfcc1..7471828193 100644 --- a/src/crypto/sha256_x86_shani.cpp +++ b/src/crypto/sha256_x86_shani.cpp @@ -6,7 +6,7 @@ // Written and placed in public domain by Jeffrey Walton. // Based on code from Intel, and by Sean Gulley for the miTLS project. -#ifdef ENABLE_X86_SHANI +#if defined(ENABLE_SSE41) && defined(ENABLE_X86_SHANI) #include <stdint.h> #include <immintrin.h> diff --git a/src/crypto/sha3.h b/src/crypto/sha3.h index e8e91f1ee4..a28c5311ff 100644 --- a/src/crypto/sha3.h +++ b/src/crypto/sha3.h @@ -32,7 +32,7 @@ private: public: static constexpr size_t OUTPUT_SIZE = 32; - SHA3_256() {} + SHA3_256() = default; SHA3_256& Write(Span<const unsigned char> data); SHA3_256& Finalize(Span<unsigned char> output); SHA3_256& Reset(); diff --git a/src/cuckoocache.h b/src/cuckoocache.h index df320ed465..8370179395 100644 --- a/src/cuckoocache.h +++ b/src/cuckoocache.h @@ -14,7 +14,6 @@ #include <cstring> #include <limits> #include <memory> -#include <optional> #include <utility> #include <vector> @@ -360,16 +359,15 @@ public: * structure * @returns A pair of the maximum number of elements storable (see setup() * documentation for more detail) and the approximate total size of these - * elements in bytes or std::nullopt if the size requested is too large. + * elements in bytes. */ - std::optional<std::pair<uint32_t, size_t>> setup_bytes(size_t bytes) + std::pair<uint32_t, size_t> setup_bytes(size_t bytes) { - size_t requested_num_elems = bytes / sizeof(Element); - if (std::numeric_limits<uint32_t>::max() < requested_num_elems) { - return std::nullopt; - } + uint32_t requested_num_elems(std::min<size_t>( + bytes / sizeof(Element), + std::numeric_limits<uint32_t>::max())); - auto num_elems = setup(bytes/sizeof(Element)); + auto num_elems = setup(requested_num_elems); size_t approx_size_bytes = num_elems * sizeof(Element); return std::make_pair(num_elems, approx_size_bytes); diff --git a/src/flatfile.h b/src/flatfile.h index 26b466db71..a9d7edd306 100644 --- a/src/flatfile.h +++ b/src/flatfile.h @@ -18,7 +18,7 @@ struct FlatFilePos SERIALIZE_METHODS(FlatFilePos, obj) { READWRITE(VARINT_MODE(obj.nFile, VarIntMode::NONNEGATIVE_SIGNED), VARINT(obj.nPos)); } - FlatFilePos() {} + FlatFilePos() = default; FlatFilePos(int nFileIn, unsigned int nPosIn) : nFile(nFileIn), diff --git a/src/headerssync.cpp b/src/headerssync.cpp index e14de004f5..b41fe07754 100644 --- a/src/headerssync.cpp +++ b/src/headerssync.cpp @@ -25,7 +25,7 @@ static_assert(sizeof(CompressedHeader) == 48); HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus_params, const CBlockIndex* chain_start, const arith_uint256& minimum_required_work) : - m_commit_offset(GetRand<unsigned>(HEADER_COMMITMENT_PERIOD)), + m_commit_offset(FastRandomContext().randrange<unsigned>(HEADER_COMMITMENT_PERIOD)), m_id(id), m_consensus_params(consensus_params), m_chain_start(chain_start), m_minimum_required_work(minimum_required_work), diff --git a/src/headerssync.h b/src/headerssync.h index e93f67e6da..5e399eb861 100644 --- a/src/headerssync.h +++ b/src/headerssync.h @@ -100,7 +100,7 @@ struct CompressedHeader { class HeadersSyncState { public: - ~HeadersSyncState() {} + ~HeadersSyncState() = default; enum class State { /** PRESYNC means the peer has not yet demonstrated their chain has diff --git a/src/httprpc.cpp b/src/httprpc.cpp index 128597157d..af809eaf38 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -11,6 +11,8 @@ #include <netaddress.h> #include <rpc/protocol.h> #include <rpc/server.h> +#include <util/fs.h> +#include <util/fs_helpers.h> #include <util/strencodings.h> #include <util/string.h> #include <walletinitinterface.h> @@ -19,6 +21,7 @@ #include <iterator> #include <map> #include <memory> +#include <optional> #include <set> #include <string> #include <vector> @@ -291,8 +294,20 @@ static bool InitRPCAuthentication() { if (gArgs.GetArg("-rpcpassword", "") == "") { - LogPrintf("Using random cookie authentication.\n"); - if (!GenerateAuthCookie(&strRPCUserColonPass)) { + LogInfo("Using random cookie authentication.\n"); + + std::optional<fs::perms> cookie_perms{std::nullopt}; + auto cookie_perms_arg{gArgs.GetArg("-rpccookieperms")}; + if (cookie_perms_arg) { + auto perm_opt = InterpretPermString(*cookie_perms_arg); + if (!perm_opt) { + LogInfo("Invalid -rpccookieperms=%s; must be one of 'owner', 'group', or 'all'.\n", *cookie_perms_arg); + return false; + } + cookie_perms = *perm_opt; + } + + if (!GenerateAuthCookie(&strRPCUserColonPass, cookie_perms)) { return false; } } else { diff --git a/src/httpserver.h b/src/httpserver.h index 991081bab8..33216a0119 100644 --- a/src/httpserver.h +++ b/src/httpserver.h @@ -131,7 +131,7 @@ public: */ void WriteReply(int nStatus, std::string_view reply = "") { - WriteReply(nStatus, std::as_bytes(std::span{reply.data(), reply.size()})); + WriteReply(nStatus, std::as_bytes(std::span{reply})); } void WriteReply(int nStatus, std::span<const std::byte> reply); }; @@ -156,7 +156,7 @@ class HTTPClosure { public: virtual void operator()() = 0; - virtual ~HTTPClosure() {} + virtual ~HTTPClosure() = default; }; /** Event class. This can be used either as a cross-thread trigger or as a timer. diff --git a/src/index/disktxpos.h b/src/index/disktxpos.h index 1004f7ae87..a03638469e 100644 --- a/src/index/disktxpos.h +++ b/src/index/disktxpos.h @@ -20,7 +20,7 @@ struct CDiskTxPos : public FlatFilePos CDiskTxPos(const FlatFilePos &blockIn, unsigned int nTxOffsetIn) : FlatFilePos(blockIn.nFile, blockIn.nPos), nTxOffset(nTxOffsetIn) { } - CDiskTxPos() {} + CDiskTxPos() = default; }; #endif // BITCOIN_INDEX_DISKTXPOS_H diff --git a/src/init.cpp b/src/init.cpp index c6ef62372e..9e570d6128 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -8,8 +8,6 @@ #include <init.h> #include <kernel/checks.h> -#include <kernel/mempool_persist.h> -#include <kernel/validation_cache_sizes.h> #include <addrman.h> #include <banman.h> @@ -51,10 +49,10 @@ #include <node/interface_ui.h> #include <node/kernel_notifications.h> #include <node/mempool_args.h> +#include <node/mempool_persist.h> #include <node/mempool_persist_args.h> #include <node/miner.h> #include <node/peerman_args.h> -#include <node/validation_cache_args.h> #include <policy/feerate.h> #include <policy/fees.h> #include <policy/fees_args.h> @@ -119,17 +117,16 @@ using common::AmountErrMsg; using common::InvalidPortErrMsg; using common::ResolveErrMsg; -using kernel::DumpMempool; -using kernel::LoadMempool; -using kernel::ValidationCacheSizes; using node::ApplyArgsManOptions; using node::BlockManager; using node::CacheSizes; using node::CalculateCacheSizes; using node::DEFAULT_PERSIST_MEMPOOL; -using node::DEFAULT_PRINTPRIORITY; +using node::DEFAULT_PRINT_MODIFIED_FEE; using node::DEFAULT_STOPATHEIGHT; +using node::DumpMempool; +using node::LoadMempool; using node::KernelNotifications; using node::LoadChainstate; using node::MempoolPath; @@ -299,10 +296,11 @@ void Shutdown(NodeContext& node) StopTorControl(); + if (node.chainman && node.chainman->m_thread_load.joinable()) node.chainman->m_thread_load.join(); // After everything has been shut down, but before things get flushed, stop the - // scheduler and load block thread. + // the scheduler. After this point, SyncWithValidationInterfaceQueue() should not be called anymore + // as this would prevent the shutdown from completing. if (node.scheduler) node.scheduler->stop(); - if (node.chainman && node.chainman->m_thread_load.joinable()) node.chainman->m_thread_load.join(); // After the threads that potentially access these pointers have been stopped, // destruct and reset all to nullptr. @@ -619,12 +617,12 @@ void SetupServerArgs(ArgsManager& argsman) argsman.AddArg("-test=<option>", "Pass a test-only option. Options include : " + Join(TEST_OPTIONS_DOC, ", ") + ".", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-capturemessages", "Capture all P2P messages to disk", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-mocktime=<n>", "Replace actual time with " + UNIX_EPOCH_TIME + " (default: 0)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - argsman.AddArg("-maxsigcachesize=<n>", strprintf("Limit sum of signature cache and script execution cache sizes to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_BYTES >> 20), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-maxsigcachesize=<n>", strprintf("Limit sum of signature cache and script execution cache sizes to <n> MiB (default: %u)", DEFAULT_VALIDATION_CACHE_BYTES >> 20), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-maxtipage=<n>", strprintf("Maximum tip age in seconds to consider node in initial block download (default: %u)", Ticks<std::chrono::seconds>(DEFAULT_MAX_TIP_AGE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - argsman.AddArg("-printpriority", strprintf("Log transaction fee rate in " + CURRENCY_UNIT + "/kvB when mining blocks (default: %u)", DEFAULT_PRINTPRIORITY), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-printpriority", strprintf("Log transaction fee rate in " + CURRENCY_UNIT + "/kvB when mining blocks (default: %u)", DEFAULT_PRINT_MODIFIED_FEE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-uacomment=<cmt>", "Append comment to the user agent string", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); SetupChainParamsBaseOptions(argsman); @@ -659,6 +657,7 @@ void SetupServerArgs(ArgsManager& argsman) argsman.AddArg("-rpcbind=<addr>[:port]", "Bind to given address to listen for JSON-RPC connections. Do not expose the RPC server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC); argsman.AddArg("-rpcdoccheck", strprintf("Throw a non-fatal error at runtime if the documentation for an RPC is incorrect (default: %u)", DEFAULT_RPC_DOC_CHECK), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC); argsman.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); + argsman.AddArg("-rpccookieperms=<readable-by>", strprintf("Set permissions on the RPC auth cookie file so that it is readable by [owner|group|all] (default: owner [via umask 0077])"), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); argsman.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC); argsman.AddArg("-rpcport=<port>", strprintf("Listen for JSON-RPC connections on <port> (default: %u, testnet: %u, signet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), signetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC); argsman.AddArg("-rpcservertimeout=<n>", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC); @@ -731,73 +730,73 @@ void InitParameterInteraction(ArgsManager& args) // even when -connect or -proxy is specified if (args.IsArgSet("-bind")) { if (args.SoftSetBoolArg("-listen", true)) - LogPrintf("%s: parameter interaction: -bind set -> setting -listen=1\n", __func__); + LogInfo("parameter interaction: -bind set -> setting -listen=1\n"); } if (args.IsArgSet("-whitebind")) { if (args.SoftSetBoolArg("-listen", true)) - LogPrintf("%s: parameter interaction: -whitebind set -> setting -listen=1\n", __func__); + LogInfo("parameter interaction: -whitebind set -> setting -listen=1\n"); } if (args.IsArgSet("-connect") || args.GetIntArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS) <= 0) { // when only connecting to trusted nodes, do not seed via DNS, or listen by default if (args.SoftSetBoolArg("-dnsseed", false)) - LogPrintf("%s: parameter interaction: -connect or -maxconnections=0 set -> setting -dnsseed=0\n", __func__); + LogInfo("parameter interaction: -connect or -maxconnections=0 set -> setting -dnsseed=0\n"); if (args.SoftSetBoolArg("-listen", false)) - LogPrintf("%s: parameter interaction: -connect or -maxconnections=0 set -> setting -listen=0\n", __func__); + LogInfo("parameter interaction: -connect or -maxconnections=0 set -> setting -listen=0\n"); } std::string proxy_arg = args.GetArg("-proxy", ""); if (proxy_arg != "" && proxy_arg != "0") { // to protect privacy, do not listen by default if a default proxy server is specified if (args.SoftSetBoolArg("-listen", false)) - LogPrintf("%s: parameter interaction: -proxy set -> setting -listen=0\n", __func__); + LogInfo("parameter interaction: -proxy set -> setting -listen=0\n"); // to protect privacy, do not map ports when a proxy is set. The user may still specify -listen=1 // to listen locally, so don't rely on this happening through -listen below. if (args.SoftSetBoolArg("-upnp", false)) - LogPrintf("%s: parameter interaction: -proxy set -> setting -upnp=0\n", __func__); + LogInfo("parameter interaction: -proxy set -> setting -upnp=0\n"); if (args.SoftSetBoolArg("-natpmp", false)) { - LogPrintf("%s: parameter interaction: -proxy set -> setting -natpmp=0\n", __func__); + LogInfo("parameter interaction: -proxy set -> setting -natpmp=0\n"); } // to protect privacy, do not discover addresses by default if (args.SoftSetBoolArg("-discover", false)) - LogPrintf("%s: parameter interaction: -proxy set -> setting -discover=0\n", __func__); + LogInfo("parameter interaction: -proxy set -> setting -discover=0\n"); } if (!args.GetBoolArg("-listen", DEFAULT_LISTEN)) { // do not map ports or try to retrieve public IP when not listening (pointless) if (args.SoftSetBoolArg("-upnp", false)) - LogPrintf("%s: parameter interaction: -listen=0 -> setting -upnp=0\n", __func__); + LogInfo("parameter interaction: -listen=0 -> setting -upnp=0\n"); if (args.SoftSetBoolArg("-natpmp", false)) { - LogPrintf("%s: parameter interaction: -listen=0 -> setting -natpmp=0\n", __func__); + LogInfo("parameter interaction: -listen=0 -> setting -natpmp=0\n"); } if (args.SoftSetBoolArg("-discover", false)) - LogPrintf("%s: parameter interaction: -listen=0 -> setting -discover=0\n", __func__); + LogInfo("parameter interaction: -listen=0 -> setting -discover=0\n"); if (args.SoftSetBoolArg("-listenonion", false)) - LogPrintf("%s: parameter interaction: -listen=0 -> setting -listenonion=0\n", __func__); + LogInfo("parameter interaction: -listen=0 -> setting -listenonion=0\n"); if (args.SoftSetBoolArg("-i2pacceptincoming", false)) { - LogPrintf("%s: parameter interaction: -listen=0 -> setting -i2pacceptincoming=0\n", __func__); + LogInfo("parameter interaction: -listen=0 -> setting -i2pacceptincoming=0\n"); } } if (args.IsArgSet("-externalip")) { // if an explicit public IP is specified, do not try to find others if (args.SoftSetBoolArg("-discover", false)) - LogPrintf("%s: parameter interaction: -externalip set -> setting -discover=0\n", __func__); + LogInfo("parameter interaction: -externalip set -> setting -discover=0\n"); } if (args.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) { // disable whitelistrelay in blocksonly mode if (args.SoftSetBoolArg("-whitelistrelay", false)) - LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -whitelistrelay=0\n", __func__); + LogInfo("parameter interaction: -blocksonly=1 -> setting -whitelistrelay=0\n"); // Reduce default mempool size in blocksonly mode to avoid unexpected resource usage if (args.SoftSetArg("-maxmempool", ToString(DEFAULT_BLOCKSONLY_MAX_MEMPOOL_SIZE_MB))) - LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -maxmempool=%d\n", __func__, DEFAULT_BLOCKSONLY_MAX_MEMPOOL_SIZE_MB); + LogInfo("parameter interaction: -blocksonly=1 -> setting -maxmempool=%d\n", DEFAULT_BLOCKSONLY_MAX_MEMPOOL_SIZE_MB); } // Forcing relay from whitelisted hosts implies we will accept relays from them in the first place. if (args.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) { if (args.SoftSetBoolArg("-whitelistrelay", true)) - LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> setting -whitelistrelay=1\n", __func__); + LogInfo("parameter interaction: -whitelistforcerelay=1 -> setting -whitelistrelay=1\n"); } if (args.IsArgSet("-onlynet")) { const auto onlynets = args.GetArgs("-onlynet"); @@ -806,7 +805,7 @@ void InitParameterInteraction(ArgsManager& args) return n == NET_IPV4 || n == NET_IPV6; }); if (!clearnet_reachable && args.SoftSetBoolArg("-dnsseed", false)) { - LogPrintf("%s: parameter interaction: -onlynet excludes IPv4 and IPv6 -> setting -dnsseed=0\n", __func__); + LogInfo("parameter interaction: -onlynet excludes IPv4 and IPv6 -> setting -dnsseed=0\n"); } } } @@ -838,7 +837,7 @@ std::set<BlockFilterType> g_enabled_filter_types; { // Rather than throwing std::bad-alloc if allocation fails, terminate // immediately to (try to) avoid chain corruption. - // Since LogPrintf may itself allocate memory, set the handler directly + // Since logging may itself allocate memory, set the handler directly // to terminate first. std::set_new_handler(std::terminate); LogError("Out of memory. Terminating.\n"); @@ -1153,14 +1152,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) args.GetArg("-datadir", ""), fs::PathToString(fs::current_path())); } - ValidationCacheSizes validation_cache_sizes{}; - ApplyArgsManOptions(args, validation_cache_sizes); - if (!InitSignatureCache(validation_cache_sizes.signature_cache_bytes) - || !InitScriptExecutionCache(validation_cache_sizes.script_execution_cache_bytes)) - { - return InitError(strprintf(_("Unable to allocate memory for -maxsigcachesize: '%s' MiB"), args.GetIntArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_BYTES >> 20))); - } - assert(!node.scheduler); node.scheduler = std::make_unique<CScheduler>(); auto& scheduler = *node.scheduler; @@ -1272,11 +1263,12 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) node.addrman = std::move(*addrman); } + FastRandomContext rng; assert(!node.banman); node.banman = std::make_unique<BanMan>(args.GetDataDirNet() / "banlist", &uiInterface, args.GetIntArg("-bantime", DEFAULT_MISBEHAVING_BANTIME)); assert(!node.connman); - node.connman = std::make_unique<CConnman>(GetRand<uint64_t>(), - GetRand<uint64_t>(), + node.connman = std::make_unique<CConnman>(rng.rand64(), + rng.rand64(), *node.addrman, *node.netgroupman, chainparams, args.GetBoolArg("-networkactive", true)); assert(!node.fee_estimator); @@ -1889,6 +1881,8 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) CService onion_service_target; if (!connOptions.onion_binds.empty()) { onion_service_target = connOptions.onion_binds.front(); + } else if (!connOptions.vBinds.empty()) { + onion_service_target = connOptions.vBinds.front(); } else { onion_service_target = DefaultOnionServiceTarget(); connOptions.onion_binds.push_back(onion_service_target); diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h index 9da5cb9637..af45f81f95 100644 --- a/src/interfaces/chain.h +++ b/src/interfaces/chain.h @@ -123,7 +123,7 @@ struct BlockInfo { class Chain { public: - virtual ~Chain() {} + virtual ~Chain() = default; //! Get current chain height, not including genesis block (returns 0 if //! chain only contains genesis block, nullopt if chain does not contain @@ -309,7 +309,7 @@ public: class Notifications { public: - virtual ~Notifications() {} + virtual ~Notifications() = default; virtual void transactionAddedToMempool(const CTransactionRef& tx) {} virtual void transactionRemovedFromMempool(const CTransactionRef& tx, MemPoolRemovalReason reason) {} virtual void blockConnected(ChainstateRole role, const BlockInfo& block) {} @@ -371,7 +371,7 @@ public: class ChainClient { public: - virtual ~ChainClient() {} + virtual ~ChainClient() = default; //! Register rpcs. virtual void registerRpcs() = 0; diff --git a/src/interfaces/echo.h b/src/interfaces/echo.h index 5578d9d9e6..964dbb02fa 100644 --- a/src/interfaces/echo.h +++ b/src/interfaces/echo.h @@ -13,7 +13,7 @@ namespace interfaces { class Echo { public: - virtual ~Echo() {} + virtual ~Echo() = default; //! Echo provided string. virtual std::string echo(const std::string& echo) = 0; diff --git a/src/interfaces/handler.h b/src/interfaces/handler.h index 7751d82347..6fc14ed0b4 100644 --- a/src/interfaces/handler.h +++ b/src/interfaces/handler.h @@ -22,7 +22,7 @@ namespace interfaces { class Handler { public: - virtual ~Handler() {} + virtual ~Handler() = default; //! Disconnect the handler. virtual void disconnect() = 0; diff --git a/src/interfaces/mining.h b/src/interfaces/mining.h index b96881f67c..974490561a 100644 --- a/src/interfaces/mining.h +++ b/src/interfaces/mining.h @@ -5,6 +5,7 @@ #ifndef BITCOIN_INTERFACES_MINING_H #define BITCOIN_INTERFACES_MINING_H +#include <memory> #include <optional> #include <uint256.h> @@ -25,7 +26,7 @@ namespace interfaces { class Mining { public: - virtual ~Mining() {} + virtual ~Mining() = default; //! If this chain is exclusively used for testing virtual bool isTestChain() = 0; @@ -44,6 +45,7 @@ public: * @returns a block template */ virtual std::unique_ptr<node::CBlockTemplate> createNewBlock(const CScript& script_pub_key, bool use_mempool = true) = 0; + /** * Processes new block. A valid new block is automatically relayed to peers. * @@ -62,12 +64,12 @@ public: * Only works on top of our current best block. * Does not check proof-of-work. * - * @param[out] state details of why a block failed to validate * @param[in] block the block to validate * @param[in] check_merkle_root call CheckMerkleRoot() - * @returns false if any of the checks fail + * @param[out] state details of why a block failed to validate + * @returns false if it does not build on the current tip, or any of the checks fail */ - virtual bool testBlockValidity(BlockValidationState& state, const CBlock& block, bool check_merkle_root = true) = 0; + virtual bool testBlockValidity(const CBlock& block, bool check_merkle_root, BlockValidationState& state) = 0; //! Get internal node context. Useful for RPC and testing, //! but not accessible across processes. diff --git a/src/interfaces/node.h b/src/interfaces/node.h index 2bb895dd47..b9b2306ce3 100644 --- a/src/interfaces/node.h +++ b/src/interfaces/node.h @@ -59,7 +59,7 @@ struct BlockAndHeaderTipInfo class ExternalSigner { public: - virtual ~ExternalSigner() {}; + virtual ~ExternalSigner() = default; //! Get signer display name virtual std::string getName() = 0; @@ -69,7 +69,7 @@ public: class Node { public: - virtual ~Node() {} + virtual ~Node() = default; //! Init logging. virtual void initLogging() = 0; @@ -162,6 +162,9 @@ public: //! Get mempool dynamic usage. virtual size_t getMempoolDynamicUsage() = 0; + //! Get mempool maximum memory usage. + virtual size_t getMempoolMaxUsage() = 0; + //! Get header tip height and time. virtual bool getHeaderTip(int& height, int64_t& block_time) = 0; diff --git a/src/interfaces/wallet.h b/src/interfaces/wallet.h index f7bcca58cf..c573d6aa65 100644 --- a/src/interfaces/wallet.h +++ b/src/interfaces/wallet.h @@ -65,7 +65,7 @@ using WalletValueMap = std::map<std::string, std::string>; class Wallet { public: - virtual ~Wallet() {} + virtual ~Wallet() = default; //! Encrypt wallet. virtual bool encryptWallet(const SecureString& wallet_passphrase) = 0; diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index bf3a340cb8..2b729e3b7a 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -495,13 +495,20 @@ public: }; m_assumeutxo_data = { - { + { // For use by unit tests .height = 110, .hash_serialized = AssumeutxoHash{uint256S("0x6657b736d4fe4db0cbc796789e812d5dba7f5c143764b1b6905612f1830609d1")}, .nChainTx = 111, .blockhash = uint256S("0x696e92821f65549c7ee134edceeeeaaa4105647a3c4fd9f298c0aec0ab50425c") }, { + // For use by fuzz target src/test/fuzz/utxo_snapshot.cpp + .height = 200, + .hash_serialized = AssumeutxoHash{uint256S("0x4f34d431c3e482f6b0d67b64609ece3964dc8d7976d02ac68dd7c9c1421738f2")}, + .nChainTx = 201, + .blockhash = uint256S("0x5e93653318f294fb5aa339d00bbf8cf1c3515488ad99412c37608b139ea63b27"), + }, + { // For use by test/functional/feature_assumeutxo.py .height = 299, .hash_serialized = AssumeutxoHash{uint256S("0xa4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27")}, diff --git a/src/kernel/chainparams.h b/src/kernel/chainparams.h index 05ebd07ec7..5d45a1fa9c 100644 --- a/src/kernel/chainparams.h +++ b/src/kernel/chainparams.h @@ -163,7 +163,7 @@ public: static std::unique_ptr<const CChainParams> TestNet(); protected: - CChainParams() {} + CChainParams() = default; Consensus::Params consensus; MessageStartChars pchMessageStart; diff --git a/src/kernel/chainstatemanager_opts.h b/src/kernel/chainstatemanager_opts.h index 076841c3c9..1b605f3d55 100644 --- a/src/kernel/chainstatemanager_opts.h +++ b/src/kernel/chainstatemanager_opts.h @@ -9,6 +9,7 @@ #include <arith_uint256.h> #include <dbwrapper.h> +#include <script/sigcache.h> #include <txdb.h> #include <uint256.h> #include <util/time.h> @@ -48,6 +49,8 @@ struct ChainstateManagerOpts { ValidationSignals* signals{nullptr}; //! Number of script check worker threads. Zero means no parallel verification. int worker_threads_num{0}; + size_t script_execution_cache_bytes{DEFAULT_SCRIPT_EXECUTION_CACHE_BYTES}; + size_t signature_cache_bytes{DEFAULT_SIGNATURE_CACHE_BYTES}; }; } // namespace kernel diff --git a/src/kernel/notifications_interface.h b/src/kernel/notifications_interface.h index 8e090dd7db..ef72d9bdb6 100644 --- a/src/kernel/notifications_interface.h +++ b/src/kernel/notifications_interface.h @@ -35,7 +35,7 @@ bool IsInterrupted(const T& result) class Notifications { public: - virtual ~Notifications(){}; + virtual ~Notifications() = default; [[nodiscard]] virtual InterruptResult blockTip(SynchronizationState state, CBlockIndex& index) { return {}; } virtual void headerTip(SynchronizationState state, int64_t height, int64_t timestamp, bool presync) {} diff --git a/src/kernel/validation_cache_sizes.h b/src/kernel/validation_cache_sizes.h deleted file mode 100644 index 72e4d1a52c..0000000000 --- a/src/kernel/validation_cache_sizes.h +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2022 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#ifndef BITCOIN_KERNEL_VALIDATION_CACHE_SIZES_H -#define BITCOIN_KERNEL_VALIDATION_CACHE_SIZES_H - -#include <script/sigcache.h> - -#include <cstddef> -#include <limits> - -namespace kernel { -struct ValidationCacheSizes { - size_t signature_cache_bytes{DEFAULT_MAX_SIG_CACHE_BYTES / 2}; - size_t script_execution_cache_bytes{DEFAULT_MAX_SIG_CACHE_BYTES / 2}; -}; -} - -#endif // BITCOIN_KERNEL_VALIDATION_CACHE_SIZES_H @@ -75,13 +75,15 @@ public: CKey& operator=(const CKey& other) { - if (other.keydata) { - MakeKeyData(); - *keydata = *other.keydata; - } else { - ClearKeyData(); + if (this != &other) { + if (other.keydata) { + MakeKeyData(); + *keydata = *other.keydata; + } else { + ClearKeyData(); + } + fCompressed = other.fCompressed; } - fCompressed = other.fCompressed; return *this; } diff --git a/src/merkleblock.h b/src/merkleblock.h index 12b41a581e..945b7d3341 100644 --- a/src/merkleblock.h +++ b/src/merkleblock.h @@ -147,7 +147,7 @@ public: // Create from a CBlock, matching the txids in the set CMerkleBlock(const CBlock& block, const std::set<Txid>& txids) : CMerkleBlock{block, nullptr, &txids} {} - CMerkleBlock() {} + CMerkleBlock() = default; SERIALIZE_METHODS(CMerkleBlock, obj) { READWRITE(obj.header, obj.txn); } diff --git a/src/net.cpp b/src/net.cpp index 990c58ee3d..3d3f9f4ba7 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -415,7 +415,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo if (pszDest) { std::vector<CService> resolved{Lookup(pszDest, default_port, fNameLookup && !HaveNameProxy(), 256)}; if (!resolved.empty()) { - Shuffle(resolved.begin(), resolved.end(), FastRandomContext()); + std::shuffle(resolved.begin(), resolved.end(), FastRandomContext()); // If the connection is made by name, it can be the case that the name resolves to more than one address. // We don't want to connect any more of them if we are already connected to one for (const auto& r : resolved) { @@ -1983,7 +1983,11 @@ bool CConnman::InactivityCheck(const CNode& node) const } if (!node.fSuccessfullyConnected) { - LogPrint(BCLog::NET, "version handshake timeout peer=%d\n", node.GetId()); + if (node.m_transport->GetInfo().transport_type == TransportProtocolType::DETECTING) { + LogPrint(BCLog::NET, "V2 handshake timeout peer=%d\n", node.GetId()); + } else { + LogPrint(BCLog::NET, "version handshake timeout peer=%d\n", node.GetId()); + } return true; } @@ -2208,7 +2212,7 @@ void CConnman::ThreadDNSAddressSeed() FastRandomContext rng; std::vector<std::string> seeds = m_params.DNSSeeds(); - Shuffle(seeds.begin(), seeds.end(), rng); + std::shuffle(seeds.begin(), seeds.end(), rng); int seeds_right_now = 0; // Number of seeds left before testing if we have enough connections if (gArgs.GetBoolArg("-forcednsseed", DEFAULT_FORCEDNSSEED)) { @@ -2435,7 +2439,7 @@ bool CConnman::MultipleManualOrFullOutboundConns(Network net) const bool CConnman::MaybePickPreferredNetwork(std::optional<Network>& network) { std::array<Network, 5> nets{NET_IPV4, NET_IPV6, NET_ONION, NET_I2P, NET_CJDNS}; - Shuffle(nets.begin(), nets.end(), FastRandomContext()); + std::shuffle(nets.begin(), nets.end(), FastRandomContext()); LOCK(m_nodes_mutex); for (const auto net : nets) { @@ -2481,9 +2485,9 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) auto start = GetTime<std::chrono::microseconds>(); // Minimum time before next feeler connection (in microseconds). - auto next_feeler = GetExponentialRand(start, FEELER_INTERVAL); - auto next_extra_block_relay = GetExponentialRand(start, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); - auto next_extra_network_peer{GetExponentialRand(start, EXTRA_NETWORK_PEER_INTERVAL)}; + auto next_feeler = start + rng.rand_exp_duration(FEELER_INTERVAL); + auto next_extra_block_relay = start + rng.rand_exp_duration(EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); + auto next_extra_network_peer{start + rng.rand_exp_duration(EXTRA_NETWORK_PEER_INTERVAL)}; const bool dnsseed = gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED); bool add_fixed_seeds = gArgs.GetBoolArg("-fixedseeds", DEFAULT_FIXEDSEEDS); const bool use_seednodes{gArgs.IsArgSet("-seednode")}; @@ -2642,10 +2646,10 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) // Because we can promote these connections to block-relay-only // connections, they do not get their own ConnectionType enum // (similar to how we deal with extra outbound peers). - next_extra_block_relay = GetExponentialRand(now, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); + next_extra_block_relay = now + rng.rand_exp_duration(EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); conn_type = ConnectionType::BLOCK_RELAY; } else if (now > next_feeler) { - next_feeler = GetExponentialRand(now, FEELER_INTERVAL); + next_feeler = now + rng.rand_exp_duration(FEELER_INTERVAL); conn_type = ConnectionType::FEELER; fFeeler = true; } else if (nOutboundFullRelay == m_max_outbound_full_relay && @@ -2658,7 +2662,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) // This is not attempted if the user changed -maxconnections to a value // so low that less than MAX_OUTBOUND_FULL_RELAY_CONNECTIONS are made, // to prevent interactions with otherwise protected outbound peers. - next_extra_network_peer = GetExponentialRand(now, EXTRA_NETWORK_PEER_INTERVAL); + next_extra_network_peer = now + rng.rand_exp_duration(EXTRA_NETWORK_PEER_INTERVAL); } else { // skip to next iteration of while loop continue; @@ -3198,24 +3202,36 @@ bool CConnman::Bind(const CService& addr_, unsigned int flags, NetPermissionFlag bool CConnman::InitBinds(const Options& options) { - bool fBound = false; for (const auto& addrBind : options.vBinds) { - fBound |= Bind(addrBind, BF_REPORT_ERROR, NetPermissionFlags::None); + if (!Bind(addrBind, BF_REPORT_ERROR, NetPermissionFlags::None)) { + return false; + } } for (const auto& addrBind : options.vWhiteBinds) { - fBound |= Bind(addrBind.m_service, BF_REPORT_ERROR, addrBind.m_flags); + if (!Bind(addrBind.m_service, BF_REPORT_ERROR, addrBind.m_flags)) { + return false; + } } for (const auto& addr_bind : options.onion_binds) { - fBound |= Bind(addr_bind, BF_DONT_ADVERTISE, NetPermissionFlags::None); + if (!Bind(addr_bind, BF_REPORT_ERROR | BF_DONT_ADVERTISE, NetPermissionFlags::None)) { + return false; + } } if (options.bind_on_any) { + // Don't consider errors to bind on IPv6 "::" fatal because the host OS + // may not have IPv6 support and the user did not explicitly ask us to + // bind on that. + const CService ipv6_any{in6_addr(IN6ADDR_ANY_INIT), GetListenPort()}; // :: + Bind(ipv6_any, BF_NONE, NetPermissionFlags::None); + struct in_addr inaddr_any; inaddr_any.s_addr = htonl(INADDR_ANY); - struct in6_addr inaddr6_any = IN6ADDR_ANY_INIT; - fBound |= Bind(CService(inaddr6_any, GetListenPort()), BF_NONE, NetPermissionFlags::None); - fBound |= Bind(CService(inaddr_any, GetListenPort()), !fBound ? BF_REPORT_ERROR : BF_NONE, NetPermissionFlags::None); + const CService ipv4_any{inaddr_any, GetListenPort()}; // 0.0.0.0 + if (!Bind(ipv4_any, BF_REPORT_ERROR, NetPermissionFlags::None)) { + return false; + } } - return fBound; + return true; } bool CConnman::Start(CScheduler& scheduler, const Options& connOptions) @@ -3475,7 +3491,8 @@ std::vector<CAddress> CConnman::GetAddresses(CNode& requestor, size_t max_addres // nodes to be "terrible" (see IsTerrible()) if the timestamps are older than 30 days, // max. 24 hours of "penalty" due to cache shouldn't make any meaningful difference // in terms of the freshness of the response. - cache_entry.m_cache_entry_expiration = current_time + std::chrono::hours(21) + GetRandMillis(std::chrono::hours(6)); + cache_entry.m_cache_entry_expiration = current_time + + 21h + FastRandomContext().randrange<std::chrono::microseconds>(6h); } return cache_entry.m_addrs_response_cache; } @@ -250,7 +250,7 @@ public: /** The Transport converts one connection's sent messages to wire bytes, and received bytes back. */ class Transport { public: - virtual ~Transport() {} + virtual ~Transport() = default; struct Info { @@ -991,8 +991,8 @@ public: /** Mutex for anything that is only accessed via the msg processing thread */ static Mutex g_msgproc_mutex; - /** Initialize a peer (setup state, queue any initial messages) */ - virtual void InitializeNode(CNode& node, ServiceFlags our_services) = 0; + /** Initialize a peer (setup state) */ + virtual void InitializeNode(const CNode& node, ServiceFlags our_services) = 0; /** Handle removal of a peer (clear state) */ virtual void FinalizeNode(const CNode& node) = 0; @@ -1625,7 +1625,7 @@ private: } } if (shuffle) { - Shuffle(m_nodes_copy.begin(), m_nodes_copy.end(), FastRandomContext{}); + std::shuffle(m_nodes_copy.begin(), m_nodes_copy.end(), FastRandomContext{}); } } diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 89b9488584..d674758abd 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -243,6 +243,9 @@ struct Peer { * Most peers use headers-first syncing, which doesn't use this mechanism */ uint256 m_continuation_block GUARDED_BY(m_block_inv_mutex) {}; + /** Set to true once initial VERSION message was sent (only relevant for outbound peers). */ + bool m_outbound_version_message_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; + /** This peer's reported block height when we connected */ std::atomic<int> m_starting_height{-1}; @@ -498,7 +501,7 @@ public: EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex); /** Implement NetEventsInterface */ - void InitializeNode(CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void InitializeNode(const CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex); bool HasAllDesirableServiceFlags(ServiceFlags services) const override; bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override @@ -936,7 +939,7 @@ private: * accurately determine when we received the transaction (and potentially * determine the transaction's origin). */ std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now, - std::chrono::seconds average_interval); + std::chrono::seconds average_interval) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); // All of the following cache a recent block, and are protected by m_most_recent_block_mutex @@ -1096,7 +1099,7 @@ private: bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main); bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv) - EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex); + EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex); /** * Validation logic for compact filters request handling. @@ -1244,7 +1247,7 @@ std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::micros // If this function were called from multiple threads simultaneously // it would possible that both update the next send variable, and return a different result to their caller. // This is not possible in practice as only the net processing thread invokes this function. - m_next_inv_to_inbounds = GetExponentialRand(now, average_interval); + m_next_inv_to_inbounds = now + m_rng.rand_exp_duration(average_interval); } return m_next_inv_to_inbounds; } @@ -1659,7 +1662,7 @@ void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_s if (state) state->m_last_block_announcement = time_in_seconds; } -void PeerManagerImpl::InitializeNode(CNode& node, ServiceFlags our_services) +void PeerManagerImpl::InitializeNode(const CNode& node, ServiceFlags our_services) { NodeId nodeid = node.GetId(); { @@ -1677,9 +1680,6 @@ void PeerManagerImpl::InitializeNode(CNode& node, ServiceFlags our_services) LOCK(m_peer_mutex); m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer); } - if (!node.IsInboundConn()) { - PushNodeVersion(node, *peer); - } } void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler) @@ -1698,7 +1698,7 @@ void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler) // Schedule next run for 10-15 minutes in the future. // We add randomness on every cycle to avoid the possibility of P2P fingerprinting. - const std::chrono::milliseconds delta = 10min + GetRandMillis(5min); + const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min); scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta); } @@ -2050,7 +2050,7 @@ void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler) scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL}); // schedule next run for 10-15 minutes in the future - const std::chrono::milliseconds delta = 10min + GetRandMillis(5min); + const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min); scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta); } @@ -2124,7 +2124,7 @@ void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &blo */ void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) { - auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock); + auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock, FastRandomContext().rand64()); LOCK(cs_main); @@ -2522,7 +2522,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) { MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, *a_recent_compact_block); } else { - CBlockHeaderAndShortTxIDs cmpctblock{*pblock}; + CBlockHeaderAndShortTxIDs cmpctblock{*pblock, m_rng.rand64()}; MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, cmpctblock); } } else { @@ -3339,7 +3339,7 @@ std::optional<PeerManagerImpl::PackageToValidate> PeerManagerImpl::Find1P1CPacka // Create a random permutation of the indices. std::vector<size_t> tx_indices(cpfp_candidates_different_peer.size()); std::iota(tx_indices.begin(), tx_indices.end(), 0); - Shuffle(tx_indices.begin(), tx_indices.end(), m_rng); + std::shuffle(tx_indices.begin(), tx_indices.end(), m_rng); for (const auto index : tx_indices) { // If we already tried a package and failed for any reason, the combined hash was @@ -4106,7 +4106,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr); uint64_t num_proc = 0; uint64_t num_rate_limit = 0; - Shuffle(vAddr.begin(), vAddr.end(), m_rng); + std::shuffle(vAddr.begin(), vAddr.end(), m_rng); for (CAddress& addr : vAddr) { if (interruptMsgProc) @@ -5326,6 +5326,10 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt PeerRef peer = GetPeerRef(pfrom->GetId()); if (peer == nullptr) return false; + // For outbound connections, ensure that the initial VERSION message + // has been sent first before processing any incoming messages + if (!pfrom->IsInboundConn() && !peer->m_outbound_version_message_sent) return false; + { LOCK(peer->m_getdata_requests_mutex); if (!peer->m_getdata_requests.empty()) { @@ -5617,7 +5621,7 @@ void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::mic if (pingSend) { uint64_t nonce; do { - nonce = GetRand<uint64_t>(); + nonce = FastRandomContext().rand64(); } while (nonce == 0); peer.m_ping_queued = false; peer.m_ping_start = now; @@ -5654,13 +5658,13 @@ void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::micros CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()}; PushAddress(peer, local_addr); } - peer.m_next_local_addr_send = GetExponentialRand(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); + peer.m_next_local_addr_send = current_time + m_rng.rand_exp_duration(AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); } // We sent an `addr` message to this peer recently. Nothing more to do. if (current_time <= peer.m_next_addr_send) return; - peer.m_next_addr_send = GetExponentialRand(current_time, AVG_ADDRESS_BROADCAST_INTERVAL); + peer.m_next_addr_send = current_time + m_rng.rand_exp_duration(AVG_ADDRESS_BROADCAST_INTERVAL); if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) { // Should be impossible since we always check size before adding to @@ -5747,13 +5751,13 @@ void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::mi MakeAndPushMessage(pto, NetMsgType::FEEFILTER, filterToSend); peer.m_fee_filter_sent = filterToSend; } - peer.m_next_send_feefilter = GetExponentialRand(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL); + peer.m_next_send_feefilter = current_time + m_rng.rand_exp_duration(AVG_FEEFILTER_BROADCAST_INTERVAL); } // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY. else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_next_send_feefilter && (currentFilter < 3 * peer.m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_fee_filter_sent / 3)) { - peer.m_next_send_feefilter = current_time + GetRandomDuration<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY); + peer.m_next_send_feefilter = current_time + m_rng.randrange<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY); } } @@ -5817,6 +5821,12 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // disconnect misbehaving peers even before the version handshake is complete. if (MaybeDiscourageAndDisconnect(*pto, *peer)) return true; + // Initiate version handshake for outbound connections + if (!pto->IsInboundConn() && !peer->m_outbound_version_message_sent) { + PushNodeVersion(*pto, *peer); + peer->m_outbound_version_message_sent = true; + } + // Don't send anything until the version handshake is complete if (!pto->fSuccessfullyConnected || pto->fDisconnect) return true; @@ -5984,7 +5994,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) CBlock block; const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, *pBestIndex)}; assert(ret); - CBlockHeaderAndShortTxIDs cmpctblock{block}; + CBlockHeaderAndShortTxIDs cmpctblock{block, m_rng.rand64()}; MakeAndPushMessage(*pto, NetMsgType::CMPCTBLOCK, cmpctblock); } state.pindexBestHeaderSent = pBestIndex; @@ -6059,7 +6069,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (pto->IsInboundConn()) { tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL); } else { - tx_relay->m_next_inv_send_time = GetExponentialRand(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL); + tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL); } } @@ -6244,10 +6254,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // before the background chainstate to prioritize getting to network tip. FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload, staller); if (m_chainman.BackgroundSyncInProgress() && !IsLimitedPeer(*peer)) { + // If the background tip is not an ancestor of the snapshot block, + // we need to start requesting blocks from their last common ancestor. + const CBlockIndex *from_tip = LastCommonAncestor(m_chainman.GetBackgroundSyncTip(), m_chainman.GetSnapshotBaseBlock()); TryDownloadingHistoricalBlocks( *peer, get_inflight_budget(), - vToDownload, m_chainman.GetBackgroundSyncTip(), + vToDownload, from_tip, Assert(m_chainman.GetSnapshotBaseBlock())); } for (const CBlockIndex *pindex : vToDownload) { diff --git a/src/net_processing.h b/src/net_processing.h index bf9698ee02..a413db98e8 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -76,7 +76,7 @@ public: static std::unique_ptr<PeerManager> make(CConnman& connman, AddrMan& addrman, BanMan* banman, ChainstateManager& chainman, CTxMemPool& pool, node::Warnings& warnings, Options opts); - virtual ~PeerManager() { } + virtual ~PeerManager() = default; /** * Attempt to manually fetch block from a given peer. We must already have the header. diff --git a/src/net_types.h b/src/net_types.h index b9e019d8fd..21ef835b4e 100644 --- a/src/net_types.h +++ b/src/net_types.h @@ -19,7 +19,7 @@ public: int64_t nCreateTime{0}; int64_t nBanUntil{0}; - CBanEntry() {} + CBanEntry() = default; explicit CBanEntry(int64_t nCreateTimeIn) : nCreateTime{nCreateTimeIn} {} diff --git a/src/netaddress.h b/src/netaddress.h index 52fecada1c..24f5c3fb96 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -567,8 +567,8 @@ class CServiceHash { public: CServiceHash() - : m_salt_k0{GetRand<uint64_t>()}, - m_salt_k1{GetRand<uint64_t>()} + : m_salt_k0{FastRandomContext().rand64()}, + m_salt_k1{FastRandomContext().rand64()} { } diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index fb62e78138..c50625f58d 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -588,18 +588,18 @@ const CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data) return nullptr; } -bool BlockManager::IsBlockPruned(const CBlockIndex& block) +bool BlockManager::IsBlockPruned(const CBlockIndex& block) const { AssertLockHeld(::cs_main); return m_have_pruned && !(block.nStatus & BLOCK_HAVE_DATA) && (block.nTx > 0); } -const CBlockIndex* BlockManager::GetFirstStoredBlock(const CBlockIndex& upper_block, const CBlockIndex* lower_block) +const CBlockIndex* BlockManager::GetFirstBlock(const CBlockIndex& upper_block, uint32_t status_mask, const CBlockIndex* lower_block) const { AssertLockHeld(::cs_main); const CBlockIndex* last_block = &upper_block; - assert(last_block->nStatus & BLOCK_HAVE_DATA); // 'upper_block' must have data - while (last_block->pprev && (last_block->pprev->nStatus & BLOCK_HAVE_DATA)) { + assert((last_block->nStatus & status_mask) == status_mask); // 'upper_block' must satisfy the status mask + while (last_block->pprev && ((last_block->pprev->nStatus & status_mask) == status_mask)) { if (lower_block) { // Return if we reached the lower_block if (last_block == lower_block) return lower_block; @@ -616,7 +616,7 @@ const CBlockIndex* BlockManager::GetFirstStoredBlock(const CBlockIndex& upper_bl bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block) { if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false; - return GetFirstStoredBlock(upper_block, &lower_block) == &lower_block; + return GetFirstBlock(upper_block, BLOCK_HAVE_DATA, &lower_block) == &lower_block; } // If we're using -prune with -reindex, then delete block files that will be ignored by the @@ -703,15 +703,10 @@ bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& in { const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())}; - if (pos.IsNull()) { - LogError("%s: no undo data available\n", __func__); - return false; - } - // Open history file to read AutoFile filein{OpenUndoFile(pos, true)}; if (filein.IsNull()) { - LogError("%s: OpenUndoFile failed\n", __func__); + LogError("%s: OpenUndoFile failed for %s\n", __func__, pos.ToString()); return false; } @@ -723,13 +718,13 @@ bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& in verifier >> blockundo; filein >> hashChecksum; } catch (const std::exception& e) { - LogError("%s: Deserialize or I/O error - %s\n", __func__, e.what()); + LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString()); return false; } // Verify checksum if (hashChecksum != verifier.GetHash()) { - LogError("%s: Checksum mismatch\n", __func__); + LogError("%s: Checksum mismatch at %s\n", __func__, pos.ToString()); return false; } @@ -986,7 +981,7 @@ bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const // Open history file to append AutoFile fileout{OpenBlockFile(pos)}; if (fileout.IsNull()) { - LogError("WriteBlockToDisk: OpenBlockFile failed\n"); + LogError("%s: OpenBlockFile failed\n", __func__); return false; } @@ -997,7 +992,7 @@ bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const // Write block long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) { - LogError("WriteBlockToDisk: ftell failed\n"); + LogError("%s: ftell failed\n", __func__); return false; } pos.nPos = (unsigned int)fileOutPos; @@ -1016,7 +1011,7 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid if (block.GetUndoPos().IsNull()) { FlatFilePos _pos; if (!FindUndoPos(state, block.nFile, _pos, ::GetSerializeSize(blockundo) + 40)) { - LogError("ConnectBlock(): FindUndoPos failed\n"); + LogError("%s: FindUndoPos failed\n", __func__); return false; } if (!UndoWriteToDisk(blockundo, _pos, block.pprev->GetBlockHash())) { @@ -1055,7 +1050,7 @@ bool BlockManager::ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos) cons // Open history file to read AutoFile filein{OpenBlockFile(pos, true)}; if (filein.IsNull()) { - LogError("ReadBlockFromDisk: OpenBlockFile failed for %s\n", pos.ToString()); + LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString()); return false; } @@ -1069,13 +1064,13 @@ bool BlockManager::ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos) cons // Check the header if (!CheckProofOfWork(block.GetHash(), block.nBits, GetConsensus())) { - LogError("ReadBlockFromDisk: Errors in block header at %s\n", pos.ToString()); + LogError("%s: Errors in block header at %s\n", __func__, pos.ToString()); return false; } // Signet only: check block solution if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())) { - LogError("ReadBlockFromDisk: Errors in block solution at %s\n", pos.ToString()); + LogError("%s: Errors in block solution at %s\n", __func__, pos.ToString()); return false; } @@ -1090,8 +1085,7 @@ bool BlockManager::ReadBlockFromDisk(CBlock& block, const CBlockIndex& index) co return false; } if (block.GetHash() != index.GetBlockHash()) { - LogError("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s\n", - index.ToString(), block_pos.ToString()); + LogError("%s: GetHash() doesn't match index for %s at %s\n", __func__, index.ToString(), block_pos.ToString()); return false; } return true; diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 108a08a72b..a946b4ea94 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -372,16 +372,39 @@ public: //! (part of the same chain). bool CheckBlockDataAvailability(const CBlockIndex& upper_block LIFETIMEBOUND, const CBlockIndex& lower_block LIFETIMEBOUND) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); - //! Find the first stored ancestor of start_block immediately after the last - //! pruned ancestor. Return value will never be null. Caller is responsible - //! for ensuring that start_block has data is not pruned. - const CBlockIndex* GetFirstStoredBlock(const CBlockIndex& start_block LIFETIMEBOUND, const CBlockIndex* lower_block=nullptr) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + /** + * @brief Returns the earliest block with specified `status_mask` flags set after + * the latest block _not_ having those flags. + * + * This function starts from `upper_block`, which must have all + * `status_mask` flags set, and iterates backwards through its ancestors. It + * continues as long as each block has all `status_mask` flags set, until + * reaching the oldest ancestor or `lower_block`. + * + * @pre `upper_block` must have all `status_mask` flags set. + * @pre `lower_block` must be null or an ancestor of `upper_block` + * + * @param upper_block The starting block for the search, which must have all + * `status_mask` flags set. + * @param status_mask Bitmask specifying required status flags. + * @param lower_block The earliest possible block to return. If null, the + * search can extend to the genesis block. + * + * @return A non-null pointer to the earliest block between `upper_block` + * and `lower_block`, inclusive, such that every block between the + * returned block and `upper_block` has `status_mask` flags set. + */ + const CBlockIndex* GetFirstBlock( + const CBlockIndex& upper_block LIFETIMEBOUND, + uint32_t status_mask, + const CBlockIndex* lower_block = nullptr + ) const EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** True if any block files have ever been pruned. */ bool m_have_pruned = false; //! Check whether the block associated with this index entry is pruned or not. - bool IsBlockPruned(const CBlockIndex& block) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + bool IsBlockPruned(const CBlockIndex& block) const EXCLUSIVE_LOCKS_REQUIRED(::cs_main); //! Create or update a prune lock identified by its name void UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); diff --git a/src/node/chainstatemanager_args.cpp b/src/node/chainstatemanager_args.cpp index bc4a815a3e..39b5f3ad3e 100644 --- a/src/node/chainstatemanager_args.cpp +++ b/src/node/chainstatemanager_args.cpp @@ -56,6 +56,16 @@ util::Result<void> ApplyArgsManOptions(const ArgsManager& args, ChainstateManage opts.worker_threads_num = std::clamp(script_threads - 1, 0, MAX_SCRIPTCHECK_THREADS); LogPrintf("Script verification uses %d additional threads\n", opts.worker_threads_num); + if (auto max_size = args.GetIntArg("-maxsigcachesize")) { + // 1. When supplied with a max_size of 0, both the signature cache and + // script execution cache create the minimum possible cache (2 + // elements). Therefore, we can use 0 as a floor here. + // 2. Multiply first, divide after to avoid integer truncation. + size_t clamped_size_each = std::max<int64_t>(*max_size, 0) * (1 << 20) / 2; + opts.script_execution_cache_bytes = clamped_size_each; + opts.signature_cache_bytes = clamped_size_each; + } + return {}; } } // namespace node diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index e0bab6e22e..ef12ffe34b 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -278,6 +278,7 @@ public: int64_t getTotalBytesSent() override { return m_context->connman ? m_context->connman->GetTotalBytesSent() : 0; } size_t getMempoolSize() override { return m_context->mempool ? m_context->mempool->size() : 0; } size_t getMempoolDynamicUsage() override { return m_context->mempool ? m_context->mempool->DynamicMemoryUsage() : 0; } + size_t getMempoolMaxUsage() override { return m_context->mempool ? m_context->mempool->m_opts.max_size_bytes : 0; } bool getHeaderTip(int& height, int64_t& block_time) override { LOCK(::cs_main); @@ -870,10 +871,17 @@ public: return context()->mempool->GetTransactionsUpdated(); } - bool testBlockValidity(BlockValidationState& state, const CBlock& block, bool check_merkle_root) override + bool testBlockValidity(const CBlock& block, bool check_merkle_root, BlockValidationState& state) override { - LOCK(::cs_main); - return TestBlockValidity(state, chainman().GetParams(), chainman().ActiveChainstate(), block, chainman().ActiveChain().Tip(), /*fCheckPOW=*/false, check_merkle_root); + LOCK(cs_main); + CBlockIndex* tip{chainman().ActiveChain().Tip()}; + // Fail if the tip updated before the lock was taken + if (block.hashPrevBlock != tip->GetBlockHash()) { + state.Error("Block does not connect to current chain tip."); + return false; + } + + return TestBlockValidity(state, chainman().GetParams(), chainman().ActiveChainstate(), block, tip, /*fCheckPOW=*/false, check_merkle_root); } std::unique_ptr<CBlockTemplate> createNewBlock(const CScript& script_pub_key, bool use_mempool) override @@ -881,7 +889,6 @@ public: BlockAssembler::Options options; ApplyArgsManOptions(gArgs, options); - LOCK(::cs_main); return BlockAssembler{chainman().ActiveChainstate(), use_mempool ? context()->mempool.get() : nullptr, options}.CreateNewBlock(script_pub_key); } diff --git a/src/kernel/mempool_persist.cpp b/src/node/mempool_persist.cpp index 53028a45ae..a265c2e12d 100644 --- a/src/kernel/mempool_persist.cpp +++ b/src/node/mempool_persist.cpp @@ -2,7 +2,7 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include <kernel/mempool_persist.h> +#include <node/mempool_persist.h> #include <clientversion.h> #include <consensus/amount.h> @@ -33,7 +33,7 @@ using fsbridge::FopenFn; -namespace kernel { +namespace node { static const uint64_t MEMPOOL_DUMP_VERSION_NO_XOR_KEY{1}; static const uint64_t MEMPOOL_DUMP_VERSION{2}; @@ -218,4 +218,4 @@ bool DumpMempool(const CTxMemPool& pool, const fs::path& dump_path, FopenFn mock return true; } -} // namespace kernel +} // namespace node diff --git a/src/kernel/mempool_persist.h b/src/node/mempool_persist.h index e124a8eadf..7c5754a90c 100644 --- a/src/kernel/mempool_persist.h +++ b/src/node/mempool_persist.h @@ -2,15 +2,15 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#ifndef BITCOIN_KERNEL_MEMPOOL_PERSIST_H -#define BITCOIN_KERNEL_MEMPOOL_PERSIST_H +#ifndef BITCOIN_NODE_MEMPOOL_PERSIST_H +#define BITCOIN_NODE_MEMPOOL_PERSIST_H #include <util/fs.h> class Chainstate; class CTxMemPool; -namespace kernel { +namespace node { /** Dump the mempool to a file. */ bool DumpMempool(const CTxMemPool& pool, const fs::path& dump_path, @@ -28,7 +28,7 @@ bool LoadMempool(CTxMemPool& pool, const fs::path& load_path, Chainstate& active_chainstate, ImportMempoolOptions&& opts); -} // namespace kernel +} // namespace node -#endif // BITCOIN_KERNEL_MEMPOOL_PERSIST_H +#endif // BITCOIN_NODE_MEMPOOL_PERSIST_H diff --git a/src/node/miner.cpp b/src/node/miner.cpp index 03c6d74deb..291f1d5fc7 100644 --- a/src/node/miner.cpp +++ b/src/node/miner.cpp @@ -79,6 +79,7 @@ void ApplyArgsManOptions(const ArgsManager& args, BlockAssembler::Options& optio if (const auto blockmintxfee{args.GetArg("-blockmintxfee")}) { if (const auto parsed{ParseMoney(*blockmintxfee)}) options.blockMinFeeRate = CFeeRate{*parsed}; } + options.print_modified_fee = args.GetBoolArg("-printpriority", options.print_modified_fee); } void BlockAssembler::resetBlock() @@ -222,8 +223,7 @@ void BlockAssembler::AddToBlock(CTxMemPool::txiter iter) nFees += iter->GetFee(); inBlock.insert(iter->GetSharedTx()->GetHash()); - bool fPrintPriority = gArgs.GetBoolArg("-printpriority", DEFAULT_PRINTPRIORITY); - if (fPrintPriority) { + if (m_options.print_modified_fee) { LogPrintf("fee rate %s txid %s\n", CFeeRate(iter->GetModifiedFee(), iter->GetTxSize()).ToString(), iter->GetTx().GetHash().ToString()); diff --git a/src/node/miner.h b/src/node/miner.h index c3178a7532..622ca16c8f 100644 --- a/src/node/miner.h +++ b/src/node/miner.h @@ -30,7 +30,7 @@ class ChainstateManager; namespace Consensus { struct Params; }; namespace node { -static const bool DEFAULT_PRINTPRIORITY = false; +static const bool DEFAULT_PRINT_MODIFIED_FEE = false; struct CBlockTemplate { @@ -159,6 +159,7 @@ public: CFeeRate blockMinFeeRate{DEFAULT_BLOCK_MIN_TX_FEE}; // Whether to call TestBlockValidity() at the end of CreateNewBlock(). bool test_block_validity{true}; + bool print_modified_fee{DEFAULT_PRINT_MODIFIED_FEE}; }; explicit BlockAssembler(Chainstate& chainstate, const CTxMemPool* mempool, const Options& options); diff --git a/src/node/mini_miner.cpp b/src/node/mini_miner.cpp index 58422c4439..d7d15554b3 100644 --- a/src/node/mini_miner.cpp +++ b/src/node/mini_miner.cpp @@ -174,7 +174,7 @@ MiniMiner::MiniMiner(const std::vector<MiniMinerMempoolEntry>& manual_entries, SanityCheck(); } -// Compare by min(ancestor feerate, individual feerate), then iterator +// Compare by min(ancestor feerate, individual feerate), then txid // // Under the ancestor-based mining approach, high-feerate children can pay for parents, but high-feerate // parents do not incentive inclusion of their children. Therefore the mining algorithm only considers @@ -183,21 +183,13 @@ struct AncestorFeerateComparator { template<typename I> bool operator()(const I& a, const I& b) const { - auto min_feerate = [](const MiniMinerMempoolEntry& e) -> CFeeRate { - const CAmount ancestor_fee{e.GetModFeesWithAncestors()}; - const int64_t ancestor_size{e.GetSizeWithAncestors()}; - const CAmount tx_fee{e.GetModifiedFee()}; - const int64_t tx_size{e.GetTxSize()}; - // Comparing ancestor feerate with individual feerate: - // ancestor_fee / ancestor_size <= tx_fee / tx_size - // Avoid division and possible loss of precision by - // multiplying both sides by the sizes: - return ancestor_fee * tx_size < tx_fee * ancestor_size ? - CFeeRate(ancestor_fee, ancestor_size) : - CFeeRate(tx_fee, tx_size); + auto min_feerate = [](const MiniMinerMempoolEntry& e) -> FeeFrac { + FeeFrac self_feerate(e.GetModifiedFee(), e.GetTxSize()); + FeeFrac ancestor_feerate(e.GetModFeesWithAncestors(), e.GetSizeWithAncestors()); + return std::min(ancestor_feerate, self_feerate); }; - CFeeRate a_feerate{min_feerate(a->second)}; - CFeeRate b_feerate{min_feerate(b->second)}; + FeeFrac a_feerate{min_feerate(a->second)}; + FeeFrac b_feerate{min_feerate(b->second)}; if (a_feerate != b_feerate) { return a_feerate > b_feerate; } diff --git a/src/node/mini_miner.h b/src/node/mini_miner.h index de62c0af75..aec2aaf6b6 100644 --- a/src/node/mini_miner.h +++ b/src/node/mini_miner.h @@ -63,7 +63,7 @@ struct IteratorComparator template<typename I> bool operator()(const I& a, const I& b) const { - return &(*a) < &(*b); + return a->first < b->first; } }; diff --git a/src/node/txreconciliation.cpp b/src/node/txreconciliation.cpp index d62046daaa..e6e19c5756 100644 --- a/src/node/txreconciliation.cpp +++ b/src/node/txreconciliation.cpp @@ -85,7 +85,7 @@ public: LOCK(m_txreconciliation_mutex); LogPrintLevel(BCLog::TXRECONCILIATION, BCLog::Level::Debug, "Pre-register peer=%d\n", peer_id); - const uint64_t local_salt{GetRand(UINT64_MAX)}; + const uint64_t local_salt{FastRandomContext().rand64()}; // We do this exactly once per peer (which are unique by NodeId, see GetNewNodeId) so it's // safe to assume we don't have this record yet. diff --git a/src/node/validation_cache_args.cpp b/src/node/validation_cache_args.cpp deleted file mode 100644 index ddf24f798d..0000000000 --- a/src/node/validation_cache_args.cpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2022 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#include <node/validation_cache_args.h> - -#include <kernel/validation_cache_sizes.h> - -#include <common/args.h> - -#include <algorithm> -#include <cstddef> -#include <cstdint> -#include <memory> -#include <optional> - -using kernel::ValidationCacheSizes; - -namespace node { -void ApplyArgsManOptions(const ArgsManager& argsman, ValidationCacheSizes& cache_sizes) -{ - if (auto max_size = argsman.GetIntArg("-maxsigcachesize")) { - // 1. When supplied with a max_size of 0, both InitSignatureCache and - // InitScriptExecutionCache create the minimum possible cache (2 - // elements). Therefore, we can use 0 as a floor here. - // 2. Multiply first, divide after to avoid integer truncation. - size_t clamped_size_each = std::max<int64_t>(*max_size, 0) * (1 << 20) / 2; - cache_sizes = { - .signature_cache_bytes = clamped_size_each, - .script_execution_cache_bytes = clamped_size_each, - }; - } -} -} // namespace node diff --git a/src/node/validation_cache_args.h b/src/node/validation_cache_args.h deleted file mode 100644 index f447c13b49..0000000000 --- a/src/node/validation_cache_args.h +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2022 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#ifndef BITCOIN_NODE_VALIDATION_CACHE_ARGS_H -#define BITCOIN_NODE_VALIDATION_CACHE_ARGS_H - -class ArgsManager; -namespace kernel { -struct ValidationCacheSizes; -}; - -namespace node { -void ApplyArgsManOptions(const ArgsManager& argsman, kernel::ValidationCacheSizes& cache_sizes); -} // namespace node - -#endif // BITCOIN_NODE_VALIDATION_CACHE_ARGS_H diff --git a/src/node/warnings.cpp b/src/node/warnings.cpp index b99c845900..87389e472b 100644 --- a/src/node/warnings.cpp +++ b/src/node/warnings.cpp @@ -28,8 +28,7 @@ Warnings::Warnings() } bool Warnings::Set(warning_type id, bilingual_str message) { - LOCK(m_mutex); - const auto& [_, inserted]{m_warnings.insert({id, std::move(message)})}; + const auto& [_, inserted]{WITH_LOCK(m_mutex, return m_warnings.insert({id, std::move(message)}))}; if (inserted) uiInterface.NotifyAlertChanged(); return inserted; } diff --git a/src/policy/fees.h b/src/policy/fees.h index f34f66d3f0..a95cc19dd4 100644 --- a/src/policy/fees.h +++ b/src/policy/fees.h @@ -283,7 +283,7 @@ private: { unsigned int blockHeight{0}; unsigned int bucketIndex{0}; - TxStatsInfo() {} + TxStatsInfo() = default; }; // map of txids to information about that transaction diff --git a/src/policy/v3_policy.cpp b/src/policy/truc_policy.cpp index 6bd043b8e3..69e8d5ed1d 100644 --- a/src/policy/v3_policy.cpp +++ b/src/policy/truc_policy.cpp @@ -2,7 +2,7 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include <policy/v3_policy.h> +#include <policy/truc_policy.h> #include <coins.h> #include <consensus/amount.h> @@ -14,7 +14,7 @@ #include <numeric> #include <vector> -/** Helper for PackageV3Checks: Returns a vector containing the indices of transactions (within +/** Helper for PackageTRUCChecks: Returns a vector containing the indices of transactions (within * package) that are direct parents of ptx. */ std::vector<size_t> FindInPackageParents(const Package& package, const CTransactionRef& ptx) { @@ -37,13 +37,13 @@ std::vector<size_t> FindInPackageParents(const Package& package, const CTransact return in_package_parents; } -/** Helper for PackageV3Checks, storing info for a mempool or package parent. */ +/** Helper for PackageTRUCChecks, storing info for a mempool or package parent. */ struct ParentInfo { /** Txid used to identify this parent by prevout */ const Txid& m_txid; /** Wtxid used for debug string */ const Wtxid& m_wtxid; - /** version used to check inheritance of v3 and non-v3 */ + /** version used to check inheritance of TRUC and non-TRUC */ decltype(CTransaction::version) m_version; /** If parent is in mempool, whether it has any descendants in mempool. */ bool m_has_mempool_descendant; @@ -55,36 +55,36 @@ struct ParentInfo { {} }; -std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t vsize, +std::optional<std::string> PackageTRUCChecks(const CTransactionRef& ptx, int64_t vsize, const Package& package, const CTxMemPool::setEntries& mempool_ancestors) { // This function is specialized for these limits, and must be reimplemented if they ever change. - static_assert(V3_ANCESTOR_LIMIT == 2); - static_assert(V3_DESCENDANT_LIMIT == 2); + static_assert(TRUC_ANCESTOR_LIMIT == 2); + static_assert(TRUC_DESCENDANT_LIMIT == 2); const auto in_package_parents{FindInPackageParents(package, ptx)}; - // Now we have all ancestors, so we can start checking v3 rules. + // Now we have all ancestors, so we can start checking TRUC rules. if (ptx->version == TRUC_VERSION) { - // SingleV3Checks should have checked this already. - if (!Assume(vsize <= V3_MAX_VSIZE)) { - return strprintf("v3 tx %s (wtxid=%s) is too big: %u > %u virtual bytes", - ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), vsize, V3_MAX_VSIZE); + // SingleTRUCChecks should have checked this already. + if (!Assume(vsize <= TRUC_MAX_VSIZE)) { + return strprintf("version=3 tx %s (wtxid=%s) is too big: %u > %u virtual bytes", + ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), vsize, TRUC_MAX_VSIZE); } - if (mempool_ancestors.size() + in_package_parents.size() + 1 > V3_ANCESTOR_LIMIT) { + if (mempool_ancestors.size() + in_package_parents.size() + 1 > TRUC_ANCESTOR_LIMIT) { return strprintf("tx %s (wtxid=%s) would have too many ancestors", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString()); } const bool has_parent{mempool_ancestors.size() + in_package_parents.size() > 0}; if (has_parent) { - // A v3 child cannot be too large. - if (vsize > V3_CHILD_MAX_VSIZE) { - return strprintf("v3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes", + // A TRUC child cannot be too large. + if (vsize > TRUC_CHILD_MAX_VSIZE) { + return strprintf("version=3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), - vsize, V3_CHILD_MAX_VSIZE); + vsize, TRUC_CHILD_MAX_VSIZE); } // Exactly 1 parent exists, either in mempool or package. Find it. @@ -107,7 +107,7 @@ std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t v // If there is a parent, it must have the right version. if (parent_info.m_version != TRUC_VERSION) { - return strprintf("v3 tx %s (wtxid=%s) cannot spend from non-v3 tx %s (wtxid=%s)", + return strprintf("version=3 tx %s (wtxid=%s) cannot spend from non-version=3 tx %s (wtxid=%s)", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), parent_info.m_txid.ToString(), parent_info.m_wtxid.ToString()); } @@ -118,7 +118,7 @@ std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t v for (auto& input : package_tx->vin) { // Fail if we find another tx with the same parent. We don't check whether the - // sibling is to-be-replaced (done in SingleV3Checks) because these transactions + // sibling is to-be-replaced (done in SingleTRUCChecks) because these transactions // are within the same package. if (input.prevout.hash == parent_info.m_txid) { return strprintf("tx %s (wtxid=%s) would exceed descendant count limit", @@ -140,17 +140,17 @@ std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t v } } } else { - // Non-v3 transactions cannot have v3 parents. + // Non-TRUC transactions cannot have TRUC parents. for (auto it : mempool_ancestors) { if (it->GetTx().version == TRUC_VERSION) { - return strprintf("non-v3 tx %s (wtxid=%s) cannot spend from v3 tx %s (wtxid=%s)", + return strprintf("non-version=3 tx %s (wtxid=%s) cannot spend from version=3 tx %s (wtxid=%s)", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), it->GetSharedTx()->GetHash().ToString(), it->GetSharedTx()->GetWitnessHash().ToString()); } } for (const auto& index: in_package_parents) { if (package.at(index)->version == TRUC_VERSION) { - return strprintf("non-v3 tx %s (wtxid=%s) cannot spend from v3 tx %s (wtxid=%s)", + return strprintf("non-version=3 tx %s (wtxid=%s) cannot spend from version=3 tx %s (wtxid=%s)", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), package.at(index)->GetHash().ToString(), @@ -161,20 +161,20 @@ std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t v return std::nullopt; } -std::optional<std::pair<std::string, CTransactionRef>> SingleV3Checks(const CTransactionRef& ptx, +std::optional<std::pair<std::string, CTransactionRef>> SingleTRUCChecks(const CTransactionRef& ptx, const CTxMemPool::setEntries& mempool_ancestors, const std::set<Txid>& direct_conflicts, int64_t vsize) { - // Check v3 and non-v3 inheritance. + // Check TRUC and non-TRUC inheritance. for (const auto& entry : mempool_ancestors) { if (ptx->version != TRUC_VERSION && entry->GetTx().version == TRUC_VERSION) { - return std::make_pair(strprintf("non-v3 tx %s (wtxid=%s) cannot spend from v3 tx %s (wtxid=%s)", + return std::make_pair(strprintf("non-version=3 tx %s (wtxid=%s) cannot spend from version=3 tx %s (wtxid=%s)", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), entry->GetSharedTx()->GetHash().ToString(), entry->GetSharedTx()->GetWitnessHash().ToString()), nullptr); } else if (ptx->version == TRUC_VERSION && entry->GetTx().version != TRUC_VERSION) { - return std::make_pair(strprintf("v3 tx %s (wtxid=%s) cannot spend from non-v3 tx %s (wtxid=%s)", + return std::make_pair(strprintf("version=3 tx %s (wtxid=%s) cannot spend from non-version=3 tx %s (wtxid=%s)", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), entry->GetSharedTx()->GetHash().ToString(), entry->GetSharedTx()->GetWitnessHash().ToString()), nullptr); @@ -182,20 +182,20 @@ std::optional<std::pair<std::string, CTransactionRef>> SingleV3Checks(const CTra } // This function is specialized for these limits, and must be reimplemented if they ever change. - static_assert(V3_ANCESTOR_LIMIT == 2); - static_assert(V3_DESCENDANT_LIMIT == 2); + static_assert(TRUC_ANCESTOR_LIMIT == 2); + static_assert(TRUC_DESCENDANT_LIMIT == 2); // The rest of the rules only apply to transactions with version=3. if (ptx->version != TRUC_VERSION) return std::nullopt; - if (vsize > V3_MAX_VSIZE) { - return std::make_pair(strprintf("v3 tx %s (wtxid=%s) is too big: %u > %u virtual bytes", - ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), vsize, V3_MAX_VSIZE), + if (vsize > TRUC_MAX_VSIZE) { + return std::make_pair(strprintf("version=3 tx %s (wtxid=%s) is too big: %u > %u virtual bytes", + ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), vsize, TRUC_MAX_VSIZE), nullptr); } - // Check that V3_ANCESTOR_LIMIT would not be violated. - if (mempool_ancestors.size() + 1 > V3_ANCESTOR_LIMIT) { + // Check that TRUC_ANCESTOR_LIMIT would not be violated. + if (mempool_ancestors.size() + 1 > TRUC_ANCESTOR_LIMIT) { return std::make_pair(strprintf("tx %s (wtxid=%s) would have too many ancestors", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString()), nullptr); @@ -203,10 +203,10 @@ std::optional<std::pair<std::string, CTransactionRef>> SingleV3Checks(const CTra // Remaining checks only pertain to transactions with unconfirmed ancestors. if (mempool_ancestors.size() > 0) { - // If this transaction spends V3 parents, it cannot be too large. - if (vsize > V3_CHILD_MAX_VSIZE) { - return std::make_pair(strprintf("v3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes", - ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), vsize, V3_CHILD_MAX_VSIZE), + // If this transaction spends TRUC parents, it cannot be too large. + if (vsize > TRUC_CHILD_MAX_VSIZE) { + return std::make_pair(strprintf("version=3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes", + ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), vsize, TRUC_CHILD_MAX_VSIZE), nullptr); } @@ -217,14 +217,14 @@ std::optional<std::pair<std::string, CTransactionRef>> SingleV3Checks(const CTra // possible through a reorg. const auto& children = parent_entry->GetMemPoolChildrenConst(); // Don't double-count a transaction that is going to be replaced. This logic assumes that - // any descendant of the V3 transaction is a direct child, which makes sense because a V3 - // transaction can only have 1 descendant. + // any descendant of the TRUC transaction is a direct child, which makes sense because a + // TRUC transaction can only have 1 descendant. const bool child_will_be_replaced = !children.empty() && std::any_of(children.cbegin(), children.cend(), [&direct_conflicts](const CTxMemPoolEntry& child){return direct_conflicts.count(child.GetTx().GetHash()) > 0;}); - if (parent_entry->GetCountWithDescendants() + 1 > V3_DESCENDANT_LIMIT && !child_will_be_replaced) { - // Allow sibling eviction for v3 transaction: if another child already exists, even if - // we don't conflict inputs with it, consider evicting it under RBF rules. We rely on v3 rules + if (parent_entry->GetCountWithDescendants() + 1 > TRUC_DESCENDANT_LIMIT && !child_will_be_replaced) { + // Allow sibling eviction for TRUC transaction: if another child already exists, even if + // we don't conflict inputs with it, consider evicting it under RBF rules. We rely on TRUC rules // only permitting 1 descendant, as otherwise we would need to have logic for deciding // which descendant to evict. Skip if this isn't true, e.g. if the transaction has // multiple children or the sibling also has descendants due to a reorg. diff --git a/src/policy/truc_policy.h b/src/policy/truc_policy.h new file mode 100644 index 0000000000..dbc77696c6 --- /dev/null +++ b/src/policy/truc_policy.h @@ -0,0 +1,94 @@ +// Copyright (c) 2022 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_POLICY_TRUC_POLICY_H +#define BITCOIN_POLICY_TRUC_POLICY_H + +#include <consensus/amount.h> +#include <policy/packages.h> +#include <policy/policy.h> +#include <primitives/transaction.h> +#include <txmempool.h> +#include <util/result.h> + +#include <set> +#include <string> + +// This module enforces rules for BIP 431 TRUC transactions which help make +// RBF abilities more robust. A transaction with version=3 is treated as TRUC. +static constexpr decltype(CTransaction::version) TRUC_VERSION{3}; + +// TRUC only allows 1 parent and 1 child when unconfirmed. This translates to a descendant set size +// of 2 and ancestor set size of 2. +/** Maximum number of transactions including an unconfirmed tx and its descendants. */ +static constexpr unsigned int TRUC_DESCENDANT_LIMIT{2}; +/** Maximum number of transactions including a TRUC tx and all its mempool ancestors. */ +static constexpr unsigned int TRUC_ANCESTOR_LIMIT{2}; + +/** Maximum sigop-adjusted virtual size of all v3 transactions. */ +static constexpr int64_t TRUC_MAX_VSIZE{10000}; +/** Maximum sigop-adjusted virtual size of a tx which spends from an unconfirmed TRUC transaction. */ +static constexpr int64_t TRUC_CHILD_MAX_VSIZE{1000}; +// These limits are within the default ancestor/descendant limits. +static_assert(TRUC_MAX_VSIZE + TRUC_CHILD_MAX_VSIZE <= DEFAULT_ANCESTOR_SIZE_LIMIT_KVB * 1000); +static_assert(TRUC_MAX_VSIZE + TRUC_CHILD_MAX_VSIZE <= DEFAULT_DESCENDANT_SIZE_LIMIT_KVB * 1000); + +/** Must be called for every transaction, even if not TRUC. Not strictly necessary for transactions + * accepted through AcceptMultipleTransactions. + * + * Checks the following rules: + * 1. A TRUC tx must only have TRUC unconfirmed ancestors. + * 2. A non-TRUC tx must only have non-TRUC unconfirmed ancestors. + * 3. A TRUC's ancestor set, including itself, must be within TRUC_ANCESTOR_LIMIT. + * 4. A TRUC's descendant set, including itself, must be within TRUC_DESCENDANT_LIMIT. + * 5. If a TRUC tx has any unconfirmed ancestors, the tx's sigop-adjusted vsize must be within + * TRUC_CHILD_MAX_VSIZE. + * 6. A TRUC tx must be within TRUC_MAX_VSIZE. + * + * + * @param[in] mempool_ancestors The in-mempool ancestors of ptx. + * @param[in] direct_conflicts In-mempool transactions this tx conflicts with. These conflicts + * are used to more accurately calculate the resulting descendant + * count of in-mempool ancestors. + * @param[in] vsize The sigop-adjusted virtual size of ptx. + * + * @returns 3 possibilities: + * - std::nullopt if all TRUC checks were applied successfully + * - debug string + pointer to a mempool sibling if this transaction would be the second child in a + * 1-parent-1-child cluster; the caller may consider evicting the specified sibling or return an + * error with the debug string. + * - debug string + nullptr if this transaction violates some TRUC rule and sibling eviction is not + * applicable. + */ +std::optional<std::pair<std::string, CTransactionRef>> SingleTRUCChecks(const CTransactionRef& ptx, + const CTxMemPool::setEntries& mempool_ancestors, + const std::set<Txid>& direct_conflicts, + int64_t vsize); + +/** Must be called for every transaction that is submitted within a package, even if not TRUC. + * + * For each transaction in a package: + * If it's not a TRUC transaction, verify it has no direct TRUC parents in the mempool or the package. + + * If it is a TRUC transaction, verify that any direct parents in the mempool or the package are TRUC. + * If such a parent exists, verify that parent has no other children in the package or the mempool, + * and that the transaction itself has no children in the package. + * + * If any TRUC violations in the package exist, this test will fail for one of them: + * - if a TRUC transaction T has a parent in the mempool and a child in the package, then PTRUCC(T) will fail + * - if a TRUC transaction T has a parent in the package and a child in the package, then PTRUCC(T) will fail + * - if a TRUC transaction T and a TRUC (sibling) transaction U have some parent in the mempool, + * then PTRUCC(T) and PTRUCC(U) will fail + * - if a TRUC transaction T and a TRUC (sibling) transaction U have some parent in the package, + * then PTRUCC(T) and PTRUCC(U) will fail + * - if a TRUC transaction T has a parent P and a grandparent G in the package, then + * PTRUCC(P) will fail (though PTRUCC(G) and PTRUCC(T) might succeed). + * + * @returns debug string if an error occurs, std::nullopt otherwise. + * */ +std::optional<std::string> PackageTRUCChecks(const CTransactionRef& ptx, int64_t vsize, + const Package& package, + const CTxMemPool::setEntries& mempool_ancestors); + +#endif // BITCOIN_POLICY_TRUC_POLICY_H diff --git a/src/policy/v3_policy.h b/src/policy/v3_policy.h deleted file mode 100644 index 90eaeda46f..0000000000 --- a/src/policy/v3_policy.h +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (c) 2022 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#ifndef BITCOIN_POLICY_V3_POLICY_H -#define BITCOIN_POLICY_V3_POLICY_H - -#include <consensus/amount.h> -#include <policy/packages.h> -#include <policy/policy.h> -#include <primitives/transaction.h> -#include <txmempool.h> -#include <util/result.h> - -#include <set> -#include <string> - -// This module enforces rules for BIP 431 TRUC transactions (with version=3) which help make -// RBF abilities more robust. -static constexpr decltype(CTransaction::version) TRUC_VERSION{3}; - -// v3 only allows 1 parent and 1 child when unconfirmed. -/** Maximum number of transactions including an unconfirmed tx and its descendants. */ -static constexpr unsigned int V3_DESCENDANT_LIMIT{2}; -/** Maximum number of transactions including a V3 tx and all its mempool ancestors. */ -static constexpr unsigned int V3_ANCESTOR_LIMIT{2}; - -/** Maximum sigop-adjusted virtual size of all v3 transactions. */ -static constexpr int64_t V3_MAX_VSIZE{10000}; -/** Maximum sigop-adjusted virtual size of a tx which spends from an unconfirmed v3 transaction. */ -static constexpr int64_t V3_CHILD_MAX_VSIZE{1000}; -// These limits are within the default ancestor/descendant limits. -static_assert(V3_MAX_VSIZE + V3_CHILD_MAX_VSIZE <= DEFAULT_ANCESTOR_SIZE_LIMIT_KVB * 1000); -static_assert(V3_MAX_VSIZE + V3_CHILD_MAX_VSIZE <= DEFAULT_DESCENDANT_SIZE_LIMIT_KVB * 1000); - -/** Must be called for every transaction, even if not v3. Not strictly necessary for transactions - * accepted through AcceptMultipleTransactions. - * - * Checks the following rules: - * 1. A v3 tx must only have v3 unconfirmed ancestors. - * 2. A non-v3 tx must only have non-v3 unconfirmed ancestors. - * 3. A v3's ancestor set, including itself, must be within V3_ANCESTOR_LIMIT. - * 4. A v3's descendant set, including itself, must be within V3_DESCENDANT_LIMIT. - * 5. If a v3 tx has any unconfirmed ancestors, the tx's sigop-adjusted vsize must be within - * V3_CHILD_MAX_VSIZE. - * 6. A v3 tx must be within V3_MAX_VSIZE. - * - * - * @param[in] mempool_ancestors The in-mempool ancestors of ptx. - * @param[in] direct_conflicts In-mempool transactions this tx conflicts with. These conflicts - * are used to more accurately calculate the resulting descendant - * count of in-mempool ancestors. - * @param[in] vsize The sigop-adjusted virtual size of ptx. - * - * @returns 3 possibilities: - * - std::nullopt if all v3 checks were applied successfully - * - debug string + pointer to a mempool sibling if this transaction would be the second child in a - * 1-parent-1-child cluster; the caller may consider evicting the specified sibling or return an - * error with the debug string. - * - debug string + nullptr if this transaction violates some v3 rule and sibling eviction is not - * applicable. - */ -std::optional<std::pair<std::string, CTransactionRef>> SingleV3Checks(const CTransactionRef& ptx, - const CTxMemPool::setEntries& mempool_ancestors, - const std::set<Txid>& direct_conflicts, - int64_t vsize); - -/** Must be called for every transaction that is submitted within a package, even if not v3. - * - * For each transaction in a package: - * If it's not a v3 transaction, verify it has no direct v3 parents in the mempool or the package. - - * If it is a v3 transaction, verify that any direct parents in the mempool or the package are v3. - * If such a parent exists, verify that parent has no other children in the package or the mempool, - * and that the transaction itself has no children in the package. - * - * If any v3 violations in the package exist, this test will fail for one of them: - * - if a v3 transaction T has a parent in the mempool and a child in the package, then PV3C(T) will fail - * - if a v3 transaction T has a parent in the package and a child in the package, then PV3C(T) will fail - * - if a v3 transaction T and a v3 (sibling) transaction U have some parent in the mempool, - * then PV3C(T) and PV3C(U) will fail - * - if a v3 transaction T and a v3 (sibling) transaction U have some parent in the package, - * then PV3C(T) and PV3C(U) will fail - * - if a v3 transaction T has a parent P and a grandparent G in the package, then - * PV3C(P) will fail (though PV3C(G) and PV3C(T) might succeed). - * - * @returns debug string if an error occurs, std::nullopt otherwise. - * */ -std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t vsize, - const Package& package, - const CTxMemPool::setEntries& mempool_ancestors); - -#endif // BITCOIN_POLICY_V3_POLICY_H diff --git a/src/prevector.h b/src/prevector.h index 4776db789b..6dcc305268 100644 --- a/src/prevector.h +++ b/src/prevector.h @@ -242,7 +242,7 @@ public: fill(item_ptr(0), first, last); } - prevector() {} + prevector() = default; explicit prevector(size_type n) { resize(n); diff --git a/src/primitives/block.h b/src/primitives/block.h index 832f8a03f7..207d2b2980 100644 --- a/src/primitives/block.h +++ b/src/primitives/block.h @@ -133,7 +133,7 @@ struct CBlockLocator std::vector<uint256> vHave; - CBlockLocator() {} + CBlockLocator() = default; explicit CBlockLocator(std::vector<uint256>&& have) : vHave(std::move(have)) {} diff --git a/src/psbt.h b/src/psbt.h index 4607304046..6d49864b3c 100644 --- a/src/psbt.h +++ b/src/psbt.h @@ -225,7 +225,7 @@ struct PSBTInput void FillSignatureData(SignatureData& sigdata) const; void FromSignatureData(const SignatureData& sigdata); void Merge(const PSBTInput& input); - PSBTInput() {} + PSBTInput() = default; template <typename Stream> inline void Serialize(Stream& s) const { @@ -726,7 +726,7 @@ struct PSBTOutput void FillSignatureData(SignatureData& sigdata) const; void FromSignatureData(const SignatureData& sigdata); void Merge(const PSBTOutput& output); - PSBTOutput() {} + PSBTOutput() = default; template <typename Stream> inline void Serialize(Stream& s) const { @@ -967,7 +967,7 @@ struct PartiallySignedTransaction [[nodiscard]] bool Merge(const PartiallySignedTransaction& psbt); bool AddInput(const CTxIn& txin, PSBTInput& psbtin); bool AddOutput(const CTxOut& txout, const PSBTOutput& psbtout); - PartiallySignedTransaction() {} + PartiallySignedTransaction() = default; explicit PartiallySignedTransaction(const CMutableTransaction& tx); /** * Finds the UTXO for a given input index @@ -1177,8 +1177,13 @@ struct PartiallySignedTransaction inputs.push_back(input); // Make sure the non-witness utxo matches the outpoint - if (input.non_witness_utxo && input.non_witness_utxo->GetHash() != tx->vin[i].prevout.hash) { - throw std::ios_base::failure("Non-witness UTXO does not match outpoint hash"); + if (input.non_witness_utxo) { + if (input.non_witness_utxo->GetHash() != tx->vin[i].prevout.hash) { + throw std::ios_base::failure("Non-witness UTXO does not match outpoint hash"); + } + if (tx->vin[i].prevout.n >= input.non_witness_utxo->vout.size()) { + throw std::ios_base::failure("Input specifies output index that does not exist"); + } } ++i; } diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp index 2f3bad37e6..0b03e3071c 100644 --- a/src/qt/clientmodel.cpp +++ b/src/qt/clientmodel.cpp @@ -53,7 +53,7 @@ ClientModel::ClientModel(interfaces::Node& node, OptionsModel *_optionsModel, QO connect(timer, &QTimer::timeout, [this] { // no locking required at this point // the following calls will acquire the required lock - Q_EMIT mempoolSizeChanged(m_node.getMempoolSize(), m_node.getMempoolDynamicUsage()); + Q_EMIT mempoolSizeChanged(m_node.getMempoolSize(), m_node.getMempoolDynamicUsage(), m_node.getMempoolMaxUsage()); Q_EMIT bytesChanged(m_node.getTotalBytesRecv(), m_node.getTotalBytesSent()); }); connect(m_thread, &QThread::finished, timer, &QObject::deleteLater); diff --git a/src/qt/clientmodel.h b/src/qt/clientmodel.h index 624056b5df..7727359f99 100644 --- a/src/qt/clientmodel.h +++ b/src/qt/clientmodel.h @@ -113,7 +113,7 @@ private: Q_SIGNALS: void numConnectionsChanged(int count); void numBlocksChanged(int count, const QDateTime& blockDate, double nVerificationProgress, SyncType header, SynchronizationState sync_state); - void mempoolSizeChanged(long count, size_t mempoolSizeInBytes); + void mempoolSizeChanged(long count, size_t mempoolSizeInBytes, size_t mempoolMaxSizeInBytes); void networkActiveChanged(bool networkActive); void alertsChanged(const QString &warnings); void bytesChanged(quint64 totalBytesIn, quint64 totalBytesOut); diff --git a/src/qt/modaloverlay.cpp b/src/qt/modaloverlay.cpp index 7bc6ccdc49..7580f6b47a 100644 --- a/src/qt/modaloverlay.cpp +++ b/src/qt/modaloverlay.cpp @@ -25,6 +25,7 @@ ModalOverlay::ModalOverlay(bool enable_wallet, QWidget* parent) parent->installEventFilter(this); raise(); } + ui->closeButton->installEventFilter(this); blockProcessTime.clear(); setVisible(false); @@ -60,6 +61,11 @@ bool ModalOverlay::eventFilter(QObject * obj, QEvent * ev) { raise(); } } + + if (obj == ui->closeButton && ev->type() == QEvent::FocusOut && layerIsVisible) { + ui->closeButton->setFocus(Qt::OtherFocusReason); + } + return QWidget::eventFilter(obj, ev); } @@ -187,6 +193,10 @@ void ModalOverlay::showHide(bool hide, bool userRequested) m_animation.setEndValue(QPoint(0, hide ? height() : 0)); m_animation.start(QAbstractAnimation::KeepWhenStopped); layerIsVisible = !hide; + + if (layerIsVisible) { + ui->closeButton->setFocus(Qt::OtherFocusReason); + } } void ModalOverlay::closeClicked() diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp index ee53a59bb5..949b1b7775 100644 --- a/src/qt/optionsdialog.cpp +++ b/src/qt/optionsdialog.cpp @@ -92,6 +92,8 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet) { ui->setupUi(this); + ui->verticalLayout->setStretchFactor(ui->tabWidget, 1); + /* Main elements init */ ui->databaseCache->setMinimum(nMinDbCache); ui->databaseCache->setMaximum(nMaxDbCache); diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index edf417a7cb..fb731e4e90 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -1000,15 +1000,16 @@ void RPCConsole::setNumBlocks(int count, const QDateTime& blockDate, double nVer } } -void RPCConsole::setMempoolSize(long numberOfTxs, size_t dynUsage) +void RPCConsole::setMempoolSize(long numberOfTxs, size_t dynUsage, size_t maxUsage) { ui->mempoolNumberTxs->setText(QString::number(numberOfTxs)); - if (dynUsage < 1000000) { - ui->mempoolSize->setText(QObject::tr("%1 kB").arg(dynUsage / 1000.0, 0, 'f', 2)); - } else { - ui->mempoolSize->setText(QObject::tr("%1 MB").arg(dynUsage / 1000000.0, 0, 'f', 2)); - } + const auto cur_usage_str = dynUsage < 1000000 ? + QObject::tr("%1 kB").arg(dynUsage / 1000.0, 0, 'f', 2) : + QObject::tr("%1 MB").arg(dynUsage / 1000000.0, 0, 'f', 2); + const auto max_usage_str = QObject::tr("%1 MB").arg(maxUsage / 1000000.0, 0, 'f', 2); + + ui->mempoolSize->setText(cur_usage_str + " / " + max_usage_str); } void RPCConsole::on_lineEdit_returnPressed() @@ -1400,4 +1401,4 @@ void RPCConsole::updateWindowTitle() const QString chainType = QString::fromStdString(Params().GetChainTypeString()); const QString title = tr("Node window - [%1]").arg(chainType); this->setWindowTitle(title); -}
\ No newline at end of file +} diff --git a/src/qt/rpcconsole.h b/src/qt/rpcconsole.h index d6a5035c33..4747e611d0 100644 --- a/src/qt/rpcconsole.h +++ b/src/qt/rpcconsole.h @@ -121,7 +121,7 @@ public Q_SLOTS: /** Set number of blocks and last block date shown in the UI */ void setNumBlocks(int count, const QDateTime& blockDate, double nVerificationProgress, SyncType synctype); /** Set size (number of transactions and memory usage) of the mempool in the UI */ - void setMempoolSize(long numberOfTxs, size_t dynUsage); + void setMempoolSize(long numberOfTxs, size_t dynUsage, size_t maxUsage); /** Go forward or back in history */ void browseHistory(int offset); /** Scroll console view to end */ diff --git a/src/qt/transactiondesc.h b/src/qt/transactiondesc.h index e64f2cace1..b92df67f41 100644 --- a/src/qt/transactiondesc.h +++ b/src/qt/transactiondesc.h @@ -29,7 +29,7 @@ public: static QString toHTML(interfaces::Node& node, interfaces::Wallet& wallet, TransactionRecord* rec, BitcoinUnit unit); private: - TransactionDesc() {} + TransactionDesc() = default; static QString FormatTxStatus(const interfaces::WalletTxStatus& status, bool inMempool); }; diff --git a/src/random.cpp b/src/random.cpp index 239d5bc6fe..7cb6098d54 100644 --- a/src/random.cpp +++ b/src/random.cpp @@ -23,6 +23,7 @@ #include <array> #include <cmath> #include <cstdlib> +#include <optional> #include <thread> #ifdef WIN32 @@ -44,13 +45,23 @@ #include <sys/auxv.h> #endif -[[noreturn]] static void RandFailure() +namespace { + +/* Number of random bytes returned by GetOSRand. + * When changing this constant make sure to change all call sites, and make + * sure that the underlying OS APIs for all platforms support the number. + * (many cap out at 256 bytes). + */ +static const int NUM_OS_RANDOM_BYTES = 32; + + +[[noreturn]] void RandFailure() { - LogPrintf("Failed to read randomness, aborting\n"); + LogError("Failed to read randomness, aborting\n"); std::abort(); } -static inline int64_t GetPerformanceCounter() noexcept +inline int64_t GetPerformanceCounter() noexcept { // Read the hardware time stamp counter when available. // See https://en.wikipedia.org/wiki/Time_Stamp_Counter for more information. @@ -71,10 +82,10 @@ static inline int64_t GetPerformanceCounter() noexcept } #ifdef HAVE_GETCPUID -static bool g_rdrand_supported = false; -static bool g_rdseed_supported = false; -static constexpr uint32_t CPUID_F1_ECX_RDRAND = 0x40000000; -static constexpr uint32_t CPUID_F7_EBX_RDSEED = 0x00040000; +bool g_rdrand_supported = false; +bool g_rdseed_supported = false; +constexpr uint32_t CPUID_F1_ECX_RDRAND = 0x40000000; +constexpr uint32_t CPUID_F7_EBX_RDSEED = 0x00040000; #ifdef bit_RDRND static_assert(CPUID_F1_ECX_RDRAND == bit_RDRND, "Unexpected value for bit_RDRND"); #endif @@ -82,7 +93,7 @@ static_assert(CPUID_F1_ECX_RDRAND == bit_RDRND, "Unexpected value for bit_RDRND" static_assert(CPUID_F7_EBX_RDSEED == bit_RDSEED, "Unexpected value for bit_RDSEED"); #endif -static void InitHardwareRand() +void InitHardwareRand() { uint32_t eax, ebx, ecx, edx; GetCPUID(1, 0, eax, ebx, ecx, edx); @@ -95,7 +106,7 @@ static void InitHardwareRand() } } -static void ReportHardwareRand() +void ReportHardwareRand() { // This must be done in a separate function, as InitHardwareRand() may be indirectly called // from global constructors, before logging is initialized. @@ -111,7 +122,7 @@ static void ReportHardwareRand() * * Must only be called when RdRand is supported. */ -static uint64_t GetRdRand() noexcept +uint64_t GetRdRand() noexcept { // RdRand may very rarely fail. Invoke it up to 10 times in a loop to reduce this risk. #ifdef __i386__ @@ -146,7 +157,7 @@ static uint64_t GetRdRand() noexcept * * Must only be called when RdSeed is supported. */ -static uint64_t GetRdSeed() noexcept +uint64_t GetRdSeed() noexcept { // RdSeed may fail when the HW RNG is overloaded. Loop indefinitely until enough entropy is gathered, // but pause after every failure. @@ -180,16 +191,16 @@ static uint64_t GetRdSeed() noexcept #elif defined(__aarch64__) && defined(HWCAP2_RNG) -static bool g_rndr_supported = false; +bool g_rndr_supported = false; -static void InitHardwareRand() +void InitHardwareRand() { if (getauxval(AT_HWCAP2) & HWCAP2_RNG) { g_rndr_supported = true; } } -static void ReportHardwareRand() +void ReportHardwareRand() { // This must be done in a separate function, as InitHardwareRand() may be indirectly called // from global constructors, before logging is initialized. @@ -202,7 +213,7 @@ static void ReportHardwareRand() * * Must only be called when RNDR is supported. */ -static uint64_t GetRNDR() noexcept +uint64_t GetRNDR() noexcept { uint8_t ok; uint64_t r1; @@ -220,7 +231,7 @@ static uint64_t GetRNDR() noexcept * * Must only be called when RNDRRS is supported. */ -static uint64_t GetRNDRRS() noexcept +uint64_t GetRNDRRS() noexcept { uint8_t ok; uint64_t r1; @@ -240,12 +251,12 @@ static uint64_t GetRNDRRS() noexcept * Slower sources should probably be invoked separately, and/or only from * RandAddPeriodic (which is called once a minute). */ -static void InitHardwareRand() {} -static void ReportHardwareRand() {} +void InitHardwareRand() {} +void ReportHardwareRand() {} #endif /** Add 64 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */ -static void SeedHardwareFast(CSHA512& hasher) noexcept { +void SeedHardwareFast(CSHA512& hasher) noexcept { #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) if (g_rdrand_supported) { uint64_t out = GetRdRand(); @@ -262,7 +273,7 @@ static void SeedHardwareFast(CSHA512& hasher) noexcept { } /** Add 256 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */ -static void SeedHardwareSlow(CSHA512& hasher) noexcept { +void SeedHardwareSlow(CSHA512& hasher) noexcept { #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) // When we want 256 bits of entropy, prefer RdSeed over RdRand, as it's // guaranteed to produce independent randomness on every call. @@ -295,7 +306,7 @@ static void SeedHardwareSlow(CSHA512& hasher) noexcept { } /** Use repeated SHA512 to strengthen the randomness in seed32, and feed into hasher. */ -static void Strengthen(const unsigned char (&seed)[32], SteadyClock::duration dur, CSHA512& hasher) noexcept +void Strengthen(const unsigned char (&seed)[32], SteadyClock::duration dur, CSHA512& hasher) noexcept { CSHA512 inner_hasher; inner_hasher.Write(seed, sizeof(seed)); @@ -326,7 +337,7 @@ static void Strengthen(const unsigned char (&seed)[32], SteadyClock::duration du /** Fallback: get 32 bytes of system entropy from /dev/urandom. The most * compatible way to get cryptographic randomness on UNIX-ish platforms. */ -[[maybe_unused]] static void GetDevURandom(unsigned char *ent32) +[[maybe_unused]] void GetDevURandom(unsigned char *ent32) { int f = open("/dev/urandom", O_RDONLY); if (f == -1) { @@ -401,8 +412,6 @@ void GetOSRand(unsigned char *ent32) #endif } -namespace { - class RNGState { Mutex m_mutex; /* The RNG state consists of 256 bits of entropy, taken from the output of @@ -417,6 +426,10 @@ class RNGState { uint64_t m_counter GUARDED_BY(m_mutex) = 0; bool m_strongly_seeded GUARDED_BY(m_mutex) = false; + /** If not nullopt, the output of this RNGState is redirected and drawn from here + * (unless always_use_real_rng is passed to MixExtract). */ + std::optional<ChaCha20> m_deterministic_prng GUARDED_BY(m_mutex); + Mutex m_events_mutex; CSHA256 m_events_hasher GUARDED_BY(m_events_mutex); @@ -457,11 +470,21 @@ public: m_events_hasher.Write(events_hash, 32); } + /** Make the output of MixExtract (unless always_use_real_rng) deterministic, with specified seed. */ + void MakeDeterministic(const uint256& seed) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + LOCK(m_mutex); + m_deterministic_prng.emplace(MakeByteSpan(seed)); + } + /** Extract up to 32 bytes of entropy from the RNG state, mixing in new entropy from hasher. * * If this function has never been called with strong_seed = true, false is returned. + * + * If always_use_real_rng is false, and MakeDeterministic has been called before, output + * from the deterministic PRNG instead. */ - bool MixExtract(unsigned char* out, size_t num, CSHA512&& hasher, bool strong_seed) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + bool MixExtract(unsigned char* out, size_t num, CSHA512&& hasher, bool strong_seed, bool always_use_real_rng) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) { assert(num <= 32); unsigned char buf[64]; @@ -479,6 +502,13 @@ public: hasher.Finalize(buf); // Store the last 32 bytes of the hash output as new RNG state. memcpy(m_state, buf + 32, 32); + // Handle requests for deterministic randomness. + if (!always_use_real_rng && m_deterministic_prng.has_value()) [[unlikely]] { + // Overwrite the beginning of buf, which will be used for output. + m_deterministic_prng->Keystream(AsWritableBytes(Span{buf, num})); + // Do not require strong seeding for deterministic output. + ret = true; + } } // If desired, copy (up to) the first 32 bytes of the hash output as output. if (num) { @@ -499,20 +529,19 @@ RNGState& GetRNGState() noexcept static std::vector<RNGState, secure_allocator<RNGState>> g_rng(1); return g_rng[0]; } -} /* A note on the use of noexcept in the seeding functions below: * * None of the RNG code should ever throw any exception. */ -static void SeedTimestamp(CSHA512& hasher) noexcept +void SeedTimestamp(CSHA512& hasher) noexcept { int64_t perfcounter = GetPerformanceCounter(); hasher.Write((const unsigned char*)&perfcounter, sizeof(perfcounter)); } -static void SeedFast(CSHA512& hasher) noexcept +void SeedFast(CSHA512& hasher) noexcept { unsigned char buffer[32]; @@ -527,7 +556,7 @@ static void SeedFast(CSHA512& hasher) noexcept SeedTimestamp(hasher); } -static void SeedSlow(CSHA512& hasher, RNGState& rng) noexcept +void SeedSlow(CSHA512& hasher, RNGState& rng) noexcept { unsigned char buffer[32]; @@ -549,16 +578,17 @@ static void SeedSlow(CSHA512& hasher, RNGState& rng) noexcept } /** Extract entropy from rng, strengthen it, and feed it into hasher. */ -static void SeedStrengthen(CSHA512& hasher, RNGState& rng, SteadyClock::duration dur) noexcept +void SeedStrengthen(CSHA512& hasher, RNGState& rng, SteadyClock::duration dur) noexcept { // Generate 32 bytes of entropy from the RNG, and a copy of the entropy already in hasher. + // Never use the deterministic PRNG for this, as the result is only used internally. unsigned char strengthen_seed[32]; - rng.MixExtract(strengthen_seed, sizeof(strengthen_seed), CSHA512(hasher), false); + rng.MixExtract(strengthen_seed, sizeof(strengthen_seed), CSHA512(hasher), false, /*always_use_real_rng=*/true); // Strengthen the seed, and feed it into hasher. Strengthen(strengthen_seed, dur, hasher); } -static void SeedPeriodic(CSHA512& hasher, RNGState& rng) noexcept +void SeedPeriodic(CSHA512& hasher, RNGState& rng) noexcept { // Everything that the 'fast' seeder includes SeedFast(hasher); @@ -578,7 +608,7 @@ static void SeedPeriodic(CSHA512& hasher, RNGState& rng) noexcept SeedStrengthen(hasher, rng, 10ms); } -static void SeedStartup(CSHA512& hasher, RNGState& rng) noexcept +void SeedStartup(CSHA512& hasher, RNGState& rng) noexcept { // Gather 256 bits of hardware randomness, if available SeedHardwareSlow(hasher); @@ -604,7 +634,7 @@ enum class RNGLevel { PERIODIC, //!< Called by RandAddPeriodic() }; -static void ProcRand(unsigned char* out, int num, RNGLevel level) noexcept +void ProcRand(unsigned char* out, int num, RNGLevel level, bool always_use_real_rng) noexcept { // Make sure the RNG is initialized first (as all Seed* function possibly need hwrand to be available). RNGState& rng = GetRNGState(); @@ -625,65 +655,61 @@ static void ProcRand(unsigned char* out, int num, RNGLevel level) noexcept } // Combine with and update state - if (!rng.MixExtract(out, num, std::move(hasher), false)) { + if (!rng.MixExtract(out, num, std::move(hasher), false, always_use_real_rng)) { // On the first invocation, also seed with SeedStartup(). CSHA512 startup_hasher; SeedStartup(startup_hasher, rng); - rng.MixExtract(out, num, std::move(startup_hasher), true); + rng.MixExtract(out, num, std::move(startup_hasher), true, always_use_real_rng); } } -void GetRandBytes(Span<unsigned char> bytes) noexcept { ProcRand(bytes.data(), bytes.size(), RNGLevel::FAST); } -void GetStrongRandBytes(Span<unsigned char> bytes) noexcept { ProcRand(bytes.data(), bytes.size(), RNGLevel::SLOW); } -void RandAddPeriodic() noexcept { ProcRand(nullptr, 0, RNGLevel::PERIODIC); } -void RandAddEvent(const uint32_t event_info) noexcept { GetRNGState().AddEvent(event_info); } +} // namespace -bool g_mock_deterministic_tests{false}; -uint64_t GetRandInternal(uint64_t nMax) noexcept +/** Internal function to set g_determinstic_rng. Only accessed from tests. */ +void MakeRandDeterministicDANGEROUS(const uint256& seed) noexcept { - return FastRandomContext(g_mock_deterministic_tests).randrange(nMax); + GetRNGState().MakeDeterministic(seed); } -uint256 GetRandHash() noexcept +void GetRandBytes(Span<unsigned char> bytes) noexcept { - uint256 hash; - GetRandBytes(hash); - return hash; + ProcRand(bytes.data(), bytes.size(), RNGLevel::FAST, /*always_use_real_rng=*/false); } -void FastRandomContext::RandomSeed() +void GetStrongRandBytes(Span<unsigned char> bytes) noexcept { - uint256 seed = GetRandHash(); - rng.SetKey(MakeByteSpan(seed)); - requires_seed = false; + ProcRand(bytes.data(), bytes.size(), RNGLevel::SLOW, /*always_use_real_rng=*/true); } -uint256 FastRandomContext::rand256() noexcept +void RandAddPeriodic() noexcept { - if (requires_seed) RandomSeed(); - uint256 ret; - rng.Keystream(MakeWritableByteSpan(ret)); - return ret; + ProcRand(nullptr, 0, RNGLevel::PERIODIC, /*always_use_real_rng=*/false); } -template <typename B> -std::vector<B> FastRandomContext::randbytes(size_t len) +void RandAddEvent(const uint32_t event_info) noexcept { GetRNGState().AddEvent(event_info); } + +void FastRandomContext::RandomSeed() noexcept { - std::vector<B> ret(len); - fillrand(MakeWritableByteSpan(ret)); - return ret; + uint256 seed = GetRandHash(); + rng.SetKey(MakeByteSpan(seed)); + requires_seed = false; } -template std::vector<unsigned char> FastRandomContext::randbytes(size_t); -template std::vector<std::byte> FastRandomContext::randbytes(size_t); -void FastRandomContext::fillrand(Span<std::byte> output) +void FastRandomContext::fillrand(Span<std::byte> output) noexcept { if (requires_seed) RandomSeed(); rng.Keystream(output); } -FastRandomContext::FastRandomContext(const uint256& seed) noexcept : requires_seed(false), rng(MakeByteSpan(seed)), bitbuf_size(0) {} +FastRandomContext::FastRandomContext(const uint256& seed) noexcept : requires_seed(false), rng(MakeByteSpan(seed)) {} + +void FastRandomContext::Reseed(const uint256& seed) noexcept +{ + FlushCache(); + requires_seed = false; + rng = {MakeByteSpan(seed)}; +} bool Random_SanityCheck() { @@ -726,41 +752,38 @@ bool Random_SanityCheck() CSHA512 to_add; to_add.Write((const unsigned char*)&start, sizeof(start)); to_add.Write((const unsigned char*)&stop, sizeof(stop)); - GetRNGState().MixExtract(nullptr, 0, std::move(to_add), false); + GetRNGState().MixExtract(nullptr, 0, std::move(to_add), false, /*always_use_real_rng=*/true); return true; } static constexpr std::array<std::byte, ChaCha20::KEYLEN> ZERO_KEY{}; -FastRandomContext::FastRandomContext(bool fDeterministic) noexcept : requires_seed(!fDeterministic), rng(ZERO_KEY), bitbuf_size(0) +FastRandomContext::FastRandomContext(bool fDeterministic) noexcept : requires_seed(!fDeterministic), rng(ZERO_KEY) { // Note that despite always initializing with ZERO_KEY, requires_seed is set to true if not // fDeterministic. That means the rng will be reinitialized with a secure random key upon first // use. } -FastRandomContext& FastRandomContext::operator=(FastRandomContext&& from) noexcept -{ - requires_seed = from.requires_seed; - rng = from.rng; - bitbuf = from.bitbuf; - bitbuf_size = from.bitbuf_size; - from.requires_seed = true; - from.bitbuf_size = 0; - return *this; -} - void RandomInit() { // Invoke RNG code to trigger initialization (if not already performed) - ProcRand(nullptr, 0, RNGLevel::FAST); + ProcRand(nullptr, 0, RNGLevel::FAST, /*always_use_real_rng=*/true); ReportHardwareRand(); } -std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval) +double MakeExponentiallyDistributed(uint64_t uniform) noexcept { - double unscaled = -std::log1p(GetRand(uint64_t{1} << 48) * -0.0000000000000035527136788 /* -1/2^48 */); - return now + std::chrono::duration_cast<std::chrono::microseconds>(unscaled * average_interval + 0.5us); + // To convert uniform into an exponentially-distributed double, we use two steps: + // - Convert uniform into a uniformly-distributed double in range [0, 1), use the expression + // ((uniform >> 11) * 0x1.0p-53), as described in https://prng.di.unimi.it/ under + // "Generating uniform doubles in the unit interval". Call this value x. + // - Given an x in uniformly distributed in [0, 1), we find an exponentially distributed value + // by applying the quantile function to it. For the exponential distribution with mean 1 this + // is F(x) = -log(1 - x). + // + // Combining the two, and using log1p(x) = log(1 + x), we obtain the following: + return -std::log1p((uniform >> 11) * -0x1.0p-53); } diff --git a/src/random.h b/src/random.h index f7c20ee4b0..536e697cca 100644 --- a/src/random.h +++ b/src/random.h @@ -10,12 +10,15 @@ #include <crypto/common.h> #include <span.h> #include <uint256.h> +#include <util/check.h> #include <bit> #include <cassert> #include <chrono> +#include <concepts> #include <cstdint> #include <limits> +#include <type_traits> #include <vector> /** @@ -25,8 +28,8 @@ * The following (classes of) functions interact with that state by mixing in new * entropy, and optionally extracting random output from it: * - * - The GetRand*() class of functions, as well as construction of FastRandomContext objects, - * perform 'fast' seeding, consisting of mixing in: + * - GetRandBytes, GetRandHash, GetRandDur, as well as construction of FastRandomContext + * objects, perform 'fast' seeding, consisting of mixing in: * - A stack pointer (indirectly committing to calling thread and call stack) * - A high-precision timestamp (rdtsc when available, c++ high_resolution_clock otherwise) * - 64 bits from the hardware RNG (rdrand) when available. @@ -35,7 +38,7 @@ * FastRandomContext on the other hand does not protect against this once created, but * is even faster (and acceptable to use inside tight loops). * - * - The GetStrongRand*() class of function perform 'slow' seeding, including everything + * - The GetStrongRandBytes() function performs 'slow' seeding, including everything * that fast seeding includes, but additionally: * - OS entropy (/dev/urandom, getrandom(), ...). The application will terminate if * this entropy source fails. @@ -50,253 +53,416 @@ * - Strengthen the entropy for 10 ms using repeated SHA512. * This is run once every minute. * - * On first use of the RNG (regardless of what function is called first), all entropy - * sources used in the 'slow' seeder are included, but also: - * - 256 bits from the hardware RNG (rdseed or rdrand) when available. - * - Dynamic environment data (performance monitoring, ...) - * - Static environment data - * - Strengthen the entropy for 100 ms using repeated SHA512. + * - On first use of the RNG (regardless of what function is called first), all entropy + * sources used in the 'slow' seeder are included, but also: + * - 256 bits from the hardware RNG (rdseed or rdrand) when available. + * - Dynamic environment data (performance monitoring, ...) + * - Static environment data + * - Strengthen the entropy for 100 ms using repeated SHA512. * * When mixing in new entropy, H = SHA512(entropy || old_rng_state) is computed, and * (up to) the first 32 bytes of H are produced as output, while the last 32 bytes * become the new RNG state. + * + * During tests, the RNG can be put into a special deterministic mode, in which the output + * of all RNG functions, with the exception of GetStrongRandBytes(), is replaced with the + * output of a deterministic RNG. This deterministic RNG does not gather entropy, and is + * unaffected by RandAddPeriodic() or RandAddEvent(). It produces pseudorandom data that + * only depends on the seed it was initialized with, possibly until it is reinitialized. */ + +/* ============================= INITIALIZATION AND ADDING ENTROPY ============================= */ + /** - * Generate random data via the internal PRNG. + * Initialize global RNG state and log any CPU features that are used. * - * These functions are designed to be fast (sub microsecond), but do not necessarily - * meaningfully add entropy to the PRNG state. + * Calling this function is optional. RNG state will be initialized when first + * needed if it is not called. + */ +void RandomInit(); + +/** + * Gather entropy from various expensive sources, and feed them to the PRNG state. * * Thread-safe. */ -void GetRandBytes(Span<unsigned char> bytes) noexcept; -/** Generate a uniform random integer in the range [0..range). Precondition: range > 0 */ -uint64_t GetRandInternal(uint64_t nMax) noexcept; -/** Generate a uniform random integer of type T in the range [0..nMax) - * nMax defaults to std::numeric_limits<T>::max() - * Precondition: nMax > 0, T is an integral type, no larger than uint64_t - */ -template<typename T> -T GetRand(T nMax=std::numeric_limits<T>::max()) noexcept { - static_assert(std::is_integral<T>(), "T must be integral"); - static_assert(std::numeric_limits<T>::max() <= std::numeric_limits<uint64_t>::max(), "GetRand only supports up to uint64_t"); - return T(GetRandInternal(nMax)); -} -/** Generate a uniform random duration in the range [0..max). Precondition: max.count() > 0 */ -template <typename D> -D GetRandomDuration(typename std::common_type<D>::type max) noexcept -// Having the compiler infer the template argument from the function argument -// is dangerous, because the desired return value generally has a different -// type than the function argument. So std::common_type is used to force the -// call site to specify the type of the return value. -{ - assert(max.count() > 0); - return D{GetRand(max.count())}; -}; -constexpr auto GetRandMicros = GetRandomDuration<std::chrono::microseconds>; -constexpr auto GetRandMillis = GetRandomDuration<std::chrono::milliseconds>; +void RandAddPeriodic() noexcept; /** - * Return a timestamp in the future sampled from an exponential distribution - * (https://en.wikipedia.org/wiki/Exponential_distribution). This distribution - * is memoryless and should be used for repeated network events (e.g. sending a - * certain type of message) to minimize leaking information to observers. + * Gathers entropy from the low bits of the time at which events occur. Should + * be called with a uint32_t describing the event at the time an event occurs. * - * The probability of an event occurring before time x is 1 - e^-(x/a) where a - * is the average interval between events. - * */ -std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval); + * Thread-safe. + */ +void RandAddEvent(const uint32_t event_info) noexcept; -uint256 GetRandHash() noexcept; + +/* =========================== BASE RANDOMNESS GENERATION FUNCTIONS =========================== + * + * All produced randomness is eventually generated by one of these functions. + */ /** - * Gather entropy from various sources, feed it into the internal PRNG, and - * generate random data using it. + * Generate random data via the internal PRNG. * - * This function will cause failure whenever the OS RNG fails. + * These functions are designed to be fast (sub microsecond), but do not necessarily + * meaningfully add entropy to the PRNG state. + * + * In test mode (see SeedRandomForTest in src/test/util/random.h), the normal PRNG state is + * bypassed, and a deterministic, seeded, PRNG is used instead. * * Thread-safe. */ -void GetStrongRandBytes(Span<unsigned char> bytes) noexcept; +void GetRandBytes(Span<unsigned char> bytes) noexcept; /** - * Gather entropy from various expensive sources, and feed them to the PRNG state. + * Gather entropy from various sources, feed it into the internal PRNG, and + * generate random data using it. + * + * This function will cause failure whenever the OS RNG fails. + * + * The normal PRNG is never bypassed here, even in test mode. * * Thread-safe. */ -void RandAddPeriodic() noexcept; +void GetStrongRandBytes(Span<unsigned char> bytes) noexcept; -/** - * Gathers entropy from the low bits of the time at which events occur. Should - * be called with a uint32_t describing the event at the time an event occurs. + +/* ============================= RANDOM NUMBER GENERATION CLASSES ============================= * - * Thread-safe. + * In this section, 3 classes are defined: + * - RandomMixin: a base class that adds functionality to all RNG classes. + * - FastRandomContext: a cryptographic RNG (seeded through GetRandBytes in its default + * constructor). + * - InsecureRandomContext: a non-cryptographic, very fast, RNG. */ -void RandAddEvent(const uint32_t event_info) noexcept; -/** - * Fast randomness source. This is seeded once with secure random data, but - * is completely deterministic and does not gather more entropy after that. +// Forward declaration of RandomMixin, used in RandomNumberGenerator concept. +template<typename T> +class RandomMixin; + +/** A concept for RandomMixin-based random number generators. */ +template<typename T> +concept RandomNumberGenerator = requires(T& rng, Span<std::byte> s) { + // A random number generator must provide rand64(). + { rng.rand64() } noexcept -> std::same_as<uint64_t>; + // A random number generator must derive from RandomMixin, which adds other rand* functions. + requires std::derived_from<std::remove_reference_t<T>, RandomMixin<std::remove_reference_t<T>>>; +}; + +/** A concept for C++ std::chrono durations. */ +template<typename T> +concept StdChronoDuration = requires { + []<class Rep, class Period>(std::type_identity<std::chrono::duration<Rep, Period>>){}( + std::type_identity<T>()); +}; + +/** Given a uniformly random uint64_t, return an exponentially distributed double with mean 1. */ +double MakeExponentiallyDistributed(uint64_t uniform) noexcept; + +/** Mixin class that provides helper randomness functions. * - * This class is not thread-safe. + * Intended to be used through CRTP: https://en.cppreference.com/w/cpp/language/crtp. + * An RNG class FunkyRNG would derive publicly from RandomMixin<FunkyRNG>. This permits + * RandomMixin from accessing the derived class's rand64() function, while also allowing + * the derived class to provide more. + * + * The derived class must satisfy the RandomNumberGenerator concept. */ -class FastRandomContext +template<typename T> +class RandomMixin { private: - bool requires_seed; - ChaCha20 rng; - - uint64_t bitbuf; - int bitbuf_size; + uint64_t bitbuf{0}; + int bitbuf_size{0}; - void RandomSeed(); + /** Access the underlying generator. + * + * This also enforces the RandomNumberGenerator concept. We cannot declare that in the template + * (no template<RandomNumberGenerator T>) because the type isn't fully instantiated yet there. + */ + RandomNumberGenerator auto& Impl() noexcept { return static_cast<T&>(*this); } - void FillBitBuffer() +protected: + constexpr void FlushCache() noexcept { - bitbuf = rand64(); - bitbuf_size = 64; + bitbuf = 0; + bitbuf_size = 0; } public: - explicit FastRandomContext(bool fDeterministic = false) noexcept; - - /** Initialize with explicit seed (only for testing) */ - explicit FastRandomContext(const uint256& seed) noexcept; + constexpr RandomMixin() noexcept = default; - // Do not permit copying a FastRandomContext (move it, or create a new one to get reseeded). - FastRandomContext(const FastRandomContext&) = delete; - FastRandomContext(FastRandomContext&&) = delete; - FastRandomContext& operator=(const FastRandomContext&) = delete; - - /** Move a FastRandomContext. If the original one is used again, it will be reseeded. */ - FastRandomContext& operator=(FastRandomContext&& from) noexcept; - - /** Generate a random 64-bit integer. */ - uint64_t rand64() noexcept - { - if (requires_seed) RandomSeed(); - std::array<std::byte, 8> buf; - rng.Keystream(buf); - return ReadLE64(UCharCast(buf.data())); - } + // Do not permit copying or moving an RNG. + RandomMixin(const RandomMixin&) = delete; + RandomMixin& operator=(const RandomMixin&) = delete; + RandomMixin(RandomMixin&&) = delete; + RandomMixin& operator=(RandomMixin&&) = delete; /** Generate a random (bits)-bit integer. */ uint64_t randbits(int bits) noexcept { - if (bits == 0) { - return 0; - } else if (bits > 32) { - return rand64() >> (64 - bits); - } else { - if (bitbuf_size < bits) FillBitBuffer(); - uint64_t ret = bitbuf & (~uint64_t{0} >> (64 - bits)); + Assume(bits <= 64); + // Requests for the full 64 bits are passed through. + if (bits == 64) return Impl().rand64(); + uint64_t ret; + if (bits <= bitbuf_size) { + // If there is enough entropy left in bitbuf, return its bottom bits bits. + ret = bitbuf; bitbuf >>= bits; bitbuf_size -= bits; - return ret; + } else { + // If not, return all of bitbuf, supplemented with the (bits - bitbuf_size) bottom + // bits of a newly generated 64-bit number on top. The remainder of that generated + // number becomes the new bitbuf. + uint64_t gen = Impl().rand64(); + ret = (gen << bitbuf_size) | bitbuf; + bitbuf = gen >> (bits - bitbuf_size); + bitbuf_size = 64 + bitbuf_size - bits; } + // Return the bottom bits bits of ret. + return ret & ((uint64_t{1} << bits) - 1); } - /** Generate a random integer in the range [0..range). - * Precondition: range > 0. - */ - uint64_t randrange(uint64_t range) noexcept + /** Same as above, but with compile-time fixed bits count. */ + template<int Bits> + uint64_t randbits() noexcept { - assert(range); - --range; - int bits = std::bit_width(range); + static_assert(Bits >= 0 && Bits <= 64); + if constexpr (Bits == 64) { + return Impl().rand64(); + } else { + uint64_t ret; + if (Bits <= bitbuf_size) { + ret = bitbuf; + bitbuf >>= Bits; + bitbuf_size -= Bits; + } else { + uint64_t gen = Impl().rand64(); + ret = (gen << bitbuf_size) | bitbuf; + bitbuf = gen >> (Bits - bitbuf_size); + bitbuf_size = 64 + bitbuf_size - Bits; + } + constexpr uint64_t MASK = (uint64_t{1} << Bits) - 1; + return ret & MASK; + } + } + + /** Generate a random integer in the range [0..range), with range > 0. */ + template<std::integral I> + I randrange(I range) noexcept + { + static_assert(std::numeric_limits<I>::max() <= std::numeric_limits<uint64_t>::max()); + Assume(range > 0); + uint64_t maxval = range - 1U; + int bits = std::bit_width(maxval); while (true) { - uint64_t ret = randbits(bits); - if (ret <= range) return ret; + uint64_t ret = Impl().randbits(bits); + if (ret <= maxval) return ret; } } - /** Generate random bytes. */ - template <typename B = unsigned char> - std::vector<B> randbytes(size_t len); + /** Fill a Span with random bytes. */ + void fillrand(Span<std::byte> span) noexcept + { + while (span.size() >= 8) { + uint64_t gen = Impl().rand64(); + WriteLE64(UCharCast(span.data()), gen); + span = span.subspan(8); + } + if (span.size() >= 4) { + uint32_t gen = Impl().rand32(); + WriteLE32(UCharCast(span.data()), gen); + span = span.subspan(4); + } + while (span.size()) { + span[0] = std::byte(Impl().template randbits<8>()); + span = span.subspan(1); + } + } - /** Fill a byte Span with random bytes. */ - void fillrand(Span<std::byte> output); + /** Generate a random integer in its entire (non-negative) range. */ + template<std::integral I> + I rand() noexcept + { + static_assert(std::numeric_limits<I>::max() <= std::numeric_limits<uint64_t>::max()); + static constexpr auto BITS = std::bit_width(uint64_t(std::numeric_limits<I>::max())); + static_assert(std::numeric_limits<I>::max() == std::numeric_limits<uint64_t>::max() >> (64 - BITS)); + return I(Impl().template randbits<BITS>()); + } + + /** Generate random bytes. */ + template <BasicByte B = unsigned char> + std::vector<B> randbytes(size_t len) noexcept + { + std::vector<B> ret(len); + Impl().fillrand(MakeWritableByteSpan(ret)); + return ret; + } /** Generate a random 32-bit integer. */ - uint32_t rand32() noexcept { return randbits(32); } + uint32_t rand32() noexcept { return Impl().template randbits<32>(); } /** generate a random uint256. */ - uint256 rand256() noexcept; + uint256 rand256() noexcept + { + uint256 ret; + Impl().fillrand(MakeWritableByteSpan(ret)); + return ret; + } /** Generate a random boolean. */ - bool randbool() noexcept { return randbits(1); } + bool randbool() noexcept { return Impl().template randbits<1>(); } /** Return the time point advanced by a uniform random duration. */ template <typename Tp> - Tp rand_uniform_delay(const Tp& time, typename Tp::duration range) + Tp rand_uniform_delay(const Tp& time, typename Tp::duration range) noexcept { - return time + rand_uniform_duration<Tp>(range); + return time + Impl().template rand_uniform_duration<Tp>(range); } /** Generate a uniform random duration in the range from 0 (inclusive) to range (exclusive). */ - template <typename Chrono> + template <typename Chrono> requires StdChronoDuration<typename Chrono::duration> typename Chrono::duration rand_uniform_duration(typename Chrono::duration range) noexcept { using Dur = typename Chrono::duration; - return range.count() > 0 ? /* interval [0..range) */ Dur{randrange(range.count())} : - range.count() < 0 ? /* interval (range..0] */ -Dur{randrange(-range.count())} : + return range.count() > 0 ? /* interval [0..range) */ Dur{Impl().randrange(range.count())} : + range.count() < 0 ? /* interval (range..0] */ -Dur{Impl().randrange(-range.count())} : /* interval [0..0] */ Dur{0}; }; + /** Generate a uniform random duration in the range [0..max). Precondition: max.count() > 0 */ + template <StdChronoDuration Dur> + Dur randrange(typename std::common_type_t<Dur> range) noexcept + // Having the compiler infer the template argument from the function argument + // is dangerous, because the desired return value generally has a different + // type than the function argument. So std::common_type is used to force the + // call site to specify the type of the return value. + { + return Dur{Impl().randrange(range.count())}; + } + + /** + * Return a duration sampled from an exponential distribution + * (https://en.wikipedia.org/wiki/Exponential_distribution). Successive events + * whose intervals are distributed according to this form a memoryless Poisson + * process. This should be used for repeated network events (e.g. sending a + * certain type of message) to minimize leaking information to observers. + * + * The probability of an event occurring before time x is 1 - e^-(x/a) where a + * is the average interval between events. + * */ + std::chrono::microseconds rand_exp_duration(std::chrono::microseconds mean) noexcept + { + using namespace std::chrono_literals; + auto unscaled = MakeExponentiallyDistributed(Impl().rand64()); + return std::chrono::duration_cast<std::chrono::microseconds>(unscaled * mean + 0.5us); + } + // Compatibility with the UniformRandomBitGenerator concept typedef uint64_t result_type; - static constexpr uint64_t min() { return 0; } - static constexpr uint64_t max() { return std::numeric_limits<uint64_t>::max(); } - inline uint64_t operator()() noexcept { return rand64(); } + static constexpr uint64_t min() noexcept { return 0; } + static constexpr uint64_t max() noexcept { return std::numeric_limits<uint64_t>::max(); } + inline uint64_t operator()() noexcept { return Impl().rand64(); } }; -/** More efficient than using std::shuffle on a FastRandomContext. - * - * This is more efficient as std::shuffle will consume entropy in groups of - * 64 bits at the time and throw away most. +/** + * Fast randomness source. This is seeded once with secure random data, but + * is completely deterministic and does not gather more entropy after that. * - * This also works around a bug in libstdc++ std::shuffle that may cause - * type::operator=(type&&) to be invoked on itself, which the library's - * debug mode detects and panics on. This is a known issue, see - * https://stackoverflow.com/questions/22915325/avoiding-self-assignment-in-stdshuffle + * This class is not thread-safe. */ -template <typename I, typename R> -void Shuffle(I first, I last, R&& rng) +class FastRandomContext : public RandomMixin<FastRandomContext> { - while (first != last) { - size_t j = rng.randrange(last - first); - if (j) { - using std::swap; - swap(*first, *(first + j)); - } - ++first; +private: + bool requires_seed; + ChaCha20 rng; + + void RandomSeed() noexcept; + +public: + /** Construct a FastRandomContext with GetRandHash()-based entropy (or zero key if fDeterministic). */ + explicit FastRandomContext(bool fDeterministic = false) noexcept; + + /** Initialize with explicit seed (only for testing) */ + explicit FastRandomContext(const uint256& seed) noexcept; + + /** Reseed with explicit seed (only for testing). */ + void Reseed(const uint256& seed) noexcept; + + /** Generate a random 64-bit integer. */ + uint64_t rand64() noexcept + { + if (requires_seed) RandomSeed(); + std::array<std::byte, 8> buf; + rng.Keystream(buf); + return ReadLE64(UCharCast(buf.data())); } -} -/* Number of random bytes returned by GetOSRand. - * When changing this constant make sure to change all call sites, and make - * sure that the underlying OS APIs for all platforms support the number. - * (many cap out at 256 bytes). - */ -static const int NUM_OS_RANDOM_BYTES = 32; + /** Fill a byte Span with random bytes. This overrides the RandomMixin version. */ + void fillrand(Span<std::byte> output) noexcept; +}; -/** Get 32 bytes of system entropy. Do not use this in application code: use - * GetStrongRandBytes instead. +/** xoroshiro128++ PRNG. Extremely fast, not appropriate for cryptographic purposes. + * + * Memory footprint is very small, period is 2^128 - 1. + * This class is not thread-safe. + * + * Reference implementation available at https://prng.di.unimi.it/xoroshiro128plusplus.c + * See https://prng.di.unimi.it/ */ -void GetOSRand(unsigned char* ent32); +class InsecureRandomContext : public RandomMixin<InsecureRandomContext> +{ + uint64_t m_s0; + uint64_t m_s1; + + [[nodiscard]] constexpr static uint64_t SplitMix64(uint64_t& seedval) noexcept + { + uint64_t z = (seedval += 0x9e3779b97f4a7c15); + z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9; + z = (z ^ (z >> 27)) * 0x94d049bb133111eb; + return z ^ (z >> 31); + } + +public: + constexpr explicit InsecureRandomContext(uint64_t seedval) noexcept + : m_s0(SplitMix64(seedval)), m_s1(SplitMix64(seedval)) {} + + constexpr void Reseed(uint64_t seedval) noexcept + { + FlushCache(); + m_s0 = SplitMix64(seedval); + m_s1 = SplitMix64(seedval); + } + + constexpr uint64_t rand64() noexcept + { + uint64_t s0 = m_s0, s1 = m_s1; + const uint64_t result = std::rotl(s0 + s1, 17) + s0; + s1 ^= s0; + m_s0 = std::rotl(s0, 49) ^ s1 ^ (s1 << 21); + m_s1 = std::rotl(s1, 28); + return result; + } +}; + + +/* ==================== CONVENIENCE FUNCTIONS FOR COMMONLY USED RANDOMNESS ==================== */ + +/** Generate a random uint256. */ +inline uint256 GetRandHash() noexcept +{ + uint256 hash; + GetRandBytes(hash); + return hash; +} + +/* ============================= MISCELLANEOUS TEST-ONLY FUNCTIONS ============================= */ /** Check that OS randomness is available and returning the requested number * of bytes. */ bool Random_SanityCheck(); -/** - * Initialize global RNG state and log any CPU features that are used. - * - * Calling this function is optional. RNG state will be initialized when first - * needed if it is not called. - */ -void RandomInit(); - #endif // BITCOIN_RANDOM_H diff --git a/src/randomenv.cpp b/src/randomenv.cpp index 49033deef2..93d30a27fd 100644 --- a/src/randomenv.cpp +++ b/src/randomenv.cpp @@ -69,10 +69,10 @@ void RandAddSeedPerfmon(CSHA512& hasher) // This can take up to 2 seconds, so only do it every 10 minutes. // Initialize last_perfmon to 0 seconds, we don't skip the first call. - static std::atomic<std::chrono::seconds> last_perfmon{0s}; + static std::atomic<SteadyClock::time_point> last_perfmon{SteadyClock::time_point{0s}}; auto last_time = last_perfmon.load(); - auto current_time = GetTime<std::chrono::seconds>(); - if (current_time < last_time + std::chrono::minutes{10}) return; + auto current_time = SteadyClock::now(); + if (current_time < last_time + 10min) return; last_perfmon = current_time; std::vector<unsigned char> vData(250000, 0); diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index e785678614..9899a13a1e 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -62,9 +62,7 @@ using kernel::CoinStatsHashType; using node::BlockManager; using node::NodeContext; using node::SnapshotMetadata; -using util::Join; using util::MakeUnorderedList; -using util::ToString; struct CUpdatedBlock { @@ -433,6 +431,7 @@ static RPCHelpMan getblockfrompeer() "getblockfrompeer", "Attempt to fetch block from a given peer.\n\n" "We must have the header for this block, e.g. using submitheader.\n" + "The block will not have any undo data which can limit the usage of the block data in a context where the undo data is needed.\n" "Subsequent calls for the same block may cause the response from the previous peer to be ignored.\n" "Peers generally ignore requests for a stale block that they never fully verified, or one that is more than a month old.\n" "When a peer does not respond with a block, we will disconnect.\n" @@ -786,6 +785,32 @@ static RPCHelpMan getblock() }; } +//! Return height of highest block that has been pruned, or std::nullopt if no blocks have been pruned +std::optional<int> GetPruneHeight(const BlockManager& blockman, const CChain& chain) { + AssertLockHeld(::cs_main); + + // Search for the last block missing block data or undo data. Don't let the + // search consider the genesis block, because the genesis block does not + // have undo data, but should not be considered pruned. + const CBlockIndex* first_block{chain[1]}; + const CBlockIndex* chain_tip{chain.Tip()}; + + // If there are no blocks after the genesis block, or no blocks at all, nothing is pruned. + if (!first_block || !chain_tip) return std::nullopt; + + // If the chain tip is pruned, everything is pruned. + if (!((chain_tip->nStatus & BLOCK_HAVE_MASK) == BLOCK_HAVE_MASK)) return chain_tip->nHeight; + + const auto& first_unpruned{*CHECK_NONFATAL(blockman.GetFirstBlock(*chain_tip, /*status_mask=*/BLOCK_HAVE_MASK, first_block))}; + if (&first_unpruned == first_block) { + // All blocks between first_block and chain_tip have data, so nothing is pruned. + return std::nullopt; + } + + // Block before the first unpruned block is the last pruned block. + return CHECK_NONFATAL(first_unpruned.pprev)->nHeight; +} + static RPCHelpMan pruneblockchain() { return RPCHelpMan{"pruneblockchain", "", @@ -838,8 +863,7 @@ static RPCHelpMan pruneblockchain() } PruneBlockFilesManual(active_chainstate, height); - const CBlockIndex& block{*CHECK_NONFATAL(active_chain.Tip())}; - return block.nStatus & BLOCK_HAVE_DATA ? active_chainstate.m_blockman.GetFirstStoredBlock(block)->nHeight - 1 : block.nHeight; + return GetPruneHeight(chainman.m_blockman, active_chain).value_or(-1); }, }; } @@ -1299,8 +1323,8 @@ RPCHelpMan getblockchaininfo() obj.pushKV("size_on_disk", chainman.m_blockman.CalculateCurrentUsage()); obj.pushKV("pruned", chainman.m_blockman.IsPruneMode()); if (chainman.m_blockman.IsPruneMode()) { - bool has_tip_data = tip.nStatus & BLOCK_HAVE_DATA; - obj.pushKV("pruneheight", has_tip_data ? chainman.m_blockman.GetFirstStoredBlock(tip)->nHeight : tip.nHeight + 1); + const auto prune_height{GetPruneHeight(chainman.m_blockman, active_chainstate.m_chain)}; + obj.pushKV("pruneheight", prune_height ? prune_height.value() + 1 : 0); const bool automatic_pruning{chainman.m_blockman.GetPruneTarget() != BlockManager::PRUNE_TARGET_MANUAL}; obj.pushKV("automatic_pruning", automatic_pruning); @@ -1645,13 +1669,19 @@ static RPCHelpMan getchaintxstats() RPCResult::Type::OBJ, "", "", { {RPCResult::Type::NUM_TIME, "time", "The timestamp for the final block in the window, expressed in " + UNIX_EPOCH_TIME}, - {RPCResult::Type::NUM, "txcount", "The total number of transactions in the chain up to that point"}, + {RPCResult::Type::NUM, "txcount", /*optional=*/true, + "The total number of transactions in the chain up to that point, if known. " + "It may be unknown when using assumeutxo."}, {RPCResult::Type::STR_HEX, "window_final_block_hash", "The hash of the final block in the window"}, {RPCResult::Type::NUM, "window_final_block_height", "The height of the final block in the window."}, {RPCResult::Type::NUM, "window_block_count", "Size of the window in number of blocks"}, - {RPCResult::Type::NUM, "window_tx_count", /*optional=*/true, "The number of transactions in the window. Only returned if \"window_block_count\" is > 0"}, {RPCResult::Type::NUM, "window_interval", /*optional=*/true, "The elapsed time in the window in seconds. Only returned if \"window_block_count\" is > 0"}, - {RPCResult::Type::NUM, "txrate", /*optional=*/true, "The average rate of transactions per second in the window. Only returned if \"window_interval\" is > 0"}, + {RPCResult::Type::NUM, "window_tx_count", /*optional=*/true, + "The number of transactions in the window. " + "Only returned if \"window_block_count\" is > 0 and if txcount exists for the start and end of the window."}, + {RPCResult::Type::NUM, "txrate", /*optional=*/true, + "The average rate of transactions per second in the window. " + "Only returned if \"window_interval\" is > 0 and if window_tx_count exists."}, }}, RPCExamples{ HelpExampleCli("getchaintxstats", "") @@ -1692,19 +1722,25 @@ static RPCHelpMan getchaintxstats() const CBlockIndex& past_block{*CHECK_NONFATAL(pindex->GetAncestor(pindex->nHeight - blockcount))}; const int64_t nTimeDiff{pindex->GetMedianTimePast() - past_block.GetMedianTimePast()}; - const int nTxDiff = pindex->nChainTx - past_block.nChainTx; + const auto window_tx_count{ + (pindex->nChainTx != 0 && past_block.nChainTx != 0) ? std::optional{pindex->nChainTx - past_block.nChainTx} : std::nullopt, + }; UniValue ret(UniValue::VOBJ); ret.pushKV("time", (int64_t)pindex->nTime); - ret.pushKV("txcount", (int64_t)pindex->nChainTx); + if (pindex->nChainTx) { + ret.pushKV("txcount", pindex->nChainTx); + } ret.pushKV("window_final_block_hash", pindex->GetBlockHash().GetHex()); ret.pushKV("window_final_block_height", pindex->nHeight); ret.pushKV("window_block_count", blockcount); if (blockcount > 0) { - ret.pushKV("window_tx_count", nTxDiff); ret.pushKV("window_interval", nTimeDiff); - if (nTimeDiff > 0) { - ret.pushKV("txrate", ((double)nTxDiff) / nTimeDiff); + if (window_tx_count) { + ret.pushKV("window_tx_count", *window_tx_count); + if (nTimeDiff > 0) { + ret.pushKV("txrate", double(*window_tx_count) / nTimeDiff); + } } } @@ -2804,7 +2840,7 @@ static RPCHelpMan loadtxoutset() { NodeContext& node = EnsureAnyNodeContext(request.context); ChainstateManager& chainman = EnsureChainman(node); - fs::path path{AbsPathForConfigVal(EnsureArgsman(node), fs::u8path(request.params[0].get_str()))}; + const fs::path path{AbsPathForConfigVal(EnsureArgsman(node), fs::u8path(self.Arg<std::string>("path")))}; FILE* file{fsbridge::fopen(path, "rb")}; AutoFile afile{file}; @@ -2821,34 +2857,15 @@ static RPCHelpMan loadtxoutset() throw JSONRPCError(RPC_DESERIALIZATION_ERROR, strprintf("Unable to parse metadata: %s", e.what())); } - uint256 base_blockhash = metadata.m_base_blockhash; - int base_blockheight = metadata.m_base_blockheight; - if (!chainman.GetParams().AssumeutxoForBlockhash(base_blockhash).has_value()) { - auto available_heights = chainman.GetParams().GetAvailableSnapshotHeights(); - std::string heights_formatted = Join(available_heights, ", ", [&](const auto& i) { return ToString(i); }); - throw JSONRPCError(RPC_INTERNAL_ERROR, strprintf("Unable to load UTXO snapshot, " - "assumeutxo block hash in snapshot metadata not recognized (hash: %s, height: %s). The following snapshot heights are available: %s.", - base_blockhash.ToString(), - base_blockheight, - heights_formatted)); - } - CBlockIndex* snapshot_start_block = WITH_LOCK(::cs_main, - return chainman.m_blockman.LookupBlockIndex(base_blockhash)); - - if (!snapshot_start_block) { - throw JSONRPCError( - RPC_INTERNAL_ERROR, - strprintf("The base block header (%s) must appear in the headers chain. Make sure all headers are syncing, and call this RPC again.", - base_blockhash.ToString())); - } - if (!chainman.ActivateSnapshot(afile, metadata, false)) { - throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to load UTXO snapshot " + fs::PathToString(path)); + auto activation_result{chainman.ActivateSnapshot(afile, metadata, false)}; + if (!activation_result) { + throw JSONRPCError(RPC_INTERNAL_ERROR, strprintf("Unable to load UTXO snapshot: %s. (%s)", util::ErrorString(activation_result).original, path.utf8string())); } UniValue result(UniValue::VOBJ); result.pushKV("coins_loaded", metadata.m_coins_count); - result.pushKV("tip_hash", snapshot_start_block->GetBlockHash().ToString()); - result.pushKV("base_height", snapshot_start_block->nHeight); + result.pushKV("tip_hash", metadata.m_base_blockhash.ToString()); + result.pushKV("base_height", metadata.m_base_blockheight); result.pushKV("path", fs::PathToString(path)); return result; }, diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h index c2021c3608..f6a7fe236c 100644 --- a/src/rpc/blockchain.h +++ b/src/rpc/blockchain.h @@ -21,6 +21,7 @@ class CBlockIndex; class Chainstate; class UniValue; namespace node { +class BlockManager; struct NodeContext; } // namespace node @@ -57,4 +58,7 @@ UniValue CreateUTXOSnapshot( const fs::path& path, const fs::path& tmppath); +//! Return height of highest block that has been pruned, or std::nullopt if no blocks have been pruned +std::optional<int> GetPruneHeight(const node::BlockManager& blockman, const CChain& chain) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + #endif // BITCOIN_RPC_BLOCKCHAIN_H diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index fd11f6cfeb..b67d272b65 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -5,7 +5,7 @@ #include <rpc/blockchain.h> -#include <kernel/mempool_persist.h> +#include <node/mempool_persist.h> #include <chainparams.h> #include <core_io.h> @@ -27,7 +27,7 @@ #include <utility> -using kernel::DumpMempool; +using node::DumpMempool; using node::DEFAULT_MAX_BURN_AMOUNT; using node::DEFAULT_MAX_RAW_TX_FEE_RATE; @@ -759,13 +759,13 @@ static RPCHelpMan importmempool() const UniValue& use_current_time{request.params[1]["use_current_time"]}; const UniValue& apply_fee_delta{request.params[1]["apply_fee_delta_priority"]}; const UniValue& apply_unbroadcast{request.params[1]["apply_unbroadcast_set"]}; - kernel::ImportMempoolOptions opts{ + node::ImportMempoolOptions opts{ .use_current_time = use_current_time.isNull() ? true : use_current_time.get_bool(), .apply_fee_delta_priority = apply_fee_delta.isNull() ? false : apply_fee_delta.get_bool(), .apply_unbroadcast_set = apply_unbroadcast.isNull() ? false : apply_unbroadcast.get_bool(), }; - if (!kernel::LoadMempool(mempool, load_path, chainstate, std::move(opts))) { + if (!node::LoadMempool(mempool, load_path, chainstate, std::move(opts))) { throw JSONRPCError(RPC_MISC_ERROR, "Unable to import mempool file, see debug.log for details."); } diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 2b93c18965..7e420dcd9b 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -371,8 +371,6 @@ static RPCHelpMan generateblock() ChainstateManager& chainman = EnsureChainman(node); { - LOCK(cs_main); - std::unique_ptr<CBlockTemplate> blocktemplate{miner.createNewBlock(coinbase_script, /*use_mempool=*/false)}; if (!blocktemplate) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block"); @@ -387,10 +385,8 @@ static RPCHelpMan generateblock() RegenerateCommitments(block, chainman); { - LOCK(cs_main); - BlockValidationState state; - if (!miner.testBlockValidity(state, block, /*check_merkle_root=*/false)) { + if (!miner.testBlockValidity(block, /*check_merkle_root=*/false, state)) { throw JSONRPCError(RPC_VERIFY_ERROR, strprintf("testBlockValidity failed: %s", state.ToString())); } } @@ -667,9 +663,7 @@ static RPCHelpMan getblocktemplate() ChainstateManager& chainman = EnsureChainman(node); Mining& miner = EnsureMining(node); LOCK(cs_main); - std::optional<uint256> maybe_tip{miner.getTipHash()}; - CHECK_NONFATAL(maybe_tip); - uint256 tip{maybe_tip.value()}; + uint256 tip{CHECK_NONFATAL(miner.getTipHash()).value()}; std::string strMode = "template"; UniValue lpval = NullUniValue; @@ -713,7 +707,7 @@ static RPCHelpMan getblocktemplate() return "inconclusive-not-best-prevblk"; } BlockValidationState state; - miner.testBlockValidity(state, block); + miner.testBlockValidity(block, /*check_merkle_root=*/true, state); return BIP22ValidationResult(state); } diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 75b538061d..ed9ef2c159 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -1790,8 +1790,8 @@ static RPCHelpMan joinpsbts() std::iota(output_indices.begin(), output_indices.end(), 0); // Shuffle input and output indices lists - Shuffle(input_indices.begin(), input_indices.end(), FastRandomContext()); - Shuffle(output_indices.begin(), output_indices.end(), FastRandomContext()); + std::shuffle(input_indices.begin(), input_indices.end(), FastRandomContext()); + std::shuffle(output_indices.begin(), output_indices.end(), FastRandomContext()); PartiallySignedTransaction shuffled_psbt; shuffled_psbt.tx = CMutableTransaction(); diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp index 87b9f18b33..083d1be44f 100644 --- a/src/rpc/request.cpp +++ b/src/rpc/request.cpp @@ -5,12 +5,11 @@ #include <rpc/request.h> -#include <util/fs.h> - #include <common/args.h> #include <logging.h> #include <random.h> #include <rpc/protocol.h> +#include <util/fs.h> #include <util/fs_helpers.h> #include <util/strencodings.h> @@ -95,7 +94,7 @@ static fs::path GetAuthCookieFile(bool temp=false) static bool g_generated_cookie = false; -bool GenerateAuthCookie(std::string *cookie_out) +bool GenerateAuthCookie(std::string* cookie_out, std::optional<fs::perms> cookie_perms) { const size_t COOKIE_SIZE = 32; unsigned char rand_pwd[COOKIE_SIZE]; @@ -109,7 +108,7 @@ bool GenerateAuthCookie(std::string *cookie_out) fs::path filepath_tmp = GetAuthCookieFile(true); file.open(filepath_tmp); if (!file.is_open()) { - LogPrintf("Unable to open cookie authentication file %s for writing\n", fs::PathToString(filepath_tmp)); + LogInfo("Unable to open cookie authentication file %s for writing\n", fs::PathToString(filepath_tmp)); return false; } file << cookie; @@ -117,11 +116,21 @@ bool GenerateAuthCookie(std::string *cookie_out) fs::path filepath = GetAuthCookieFile(false); if (!RenameOver(filepath_tmp, filepath)) { - LogPrintf("Unable to rename cookie authentication file %s to %s\n", fs::PathToString(filepath_tmp), fs::PathToString(filepath)); + LogInfo("Unable to rename cookie authentication file %s to %s\n", fs::PathToString(filepath_tmp), fs::PathToString(filepath)); return false; } + if (cookie_perms) { + std::error_code code; + fs::permissions(filepath, cookie_perms.value(), fs::perm_options::replace, code); + if (code) { + LogInfo("Unable to set permissions on cookie authentication file %s\n", fs::PathToString(filepath_tmp)); + return false; + } + } + g_generated_cookie = true; - LogPrintf("Generated RPC authentication cookie %s\n", fs::PathToString(filepath)); + LogInfo("Generated RPC authentication cookie %s\n", fs::PathToString(filepath)); + LogInfo("Permissions used for cookie: %s\n", PermsToSymbolicString(fs::status(filepath).permissions())); if (cookie_out) *cookie_out = cookie; diff --git a/src/rpc/request.h b/src/rpc/request.h index 9968426636..24887e8691 100644 --- a/src/rpc/request.h +++ b/src/rpc/request.h @@ -11,6 +11,7 @@ #include <string> #include <univalue.h> +#include <util/fs.h> enum class JSONRPCVersion { V1_LEGACY, @@ -23,7 +24,7 @@ UniValue JSONRPCReplyObj(UniValue result, UniValue error, std::optional<UniValue UniValue JSONRPCError(int code, const std::string& message); /** Generate a new RPC authentication cookie and write it to disk */ -bool GenerateAuthCookie(std::string *cookie_out); +bool GenerateAuthCookie(std::string* cookie_out, std::optional<fs::perms> cookie_perms=std::nullopt); /** Read the RPC authentication cookie from disk */ bool GetAuthCookie(std::string *cookie_out); /** Delete RPC authentication cookie from disk */ diff --git a/src/rpc/server.h b/src/rpc/server.h index 5735aff821..56e8a63088 100644 --- a/src/rpc/server.h +++ b/src/rpc/server.h @@ -48,7 +48,7 @@ bool RPCIsInWarmup(std::string *outStatus); class RPCTimerBase { public: - virtual ~RPCTimerBase() {} + virtual ~RPCTimerBase() = default; }; /** @@ -57,7 +57,7 @@ public: class RPCTimerInterface { public: - virtual ~RPCTimerInterface() {} + virtual ~RPCTimerInterface() = default; /** Implementation name */ virtual const char *Name() = 0; /** Factory function for timers. diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 0987db194c..ae9dba6a50 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -116,13 +116,13 @@ std::string DescriptorChecksum(const Span<const char>& span) * As a result, within-group-of-32 errors count as 1 symbol, as do cross-group errors that don't affect * the position within the groups. */ - static std::string INPUT_CHARSET = + static const std::string INPUT_CHARSET = "0123456789()[],'/*abcdefgh@:$%{}" "IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~" "ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "; /** The character set for the checksum itself (same as bech32). */ - static std::string CHECKSUM_CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"; + static const std::string CHECKSUM_CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"; uint64_t c = 1; int cls = 0; diff --git a/src/script/interpreter.h b/src/script/interpreter.h index 836c2e7982..8ba0018c23 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -265,7 +265,7 @@ public: return false; } - virtual ~BaseSignatureChecker() {} + virtual ~BaseSignatureChecker() = default; }; /** Enum to specify what *TransactionSignatureChecker's behavior should be diff --git a/src/script/miniscript.h b/src/script/miniscript.h index a269709e72..97912906d1 100644 --- a/src/script/miniscript.h +++ b/src/script/miniscript.h @@ -305,7 +305,7 @@ struct InputStack { //! Data elements. std::vector<std::vector<unsigned char>> stack; //! Construct an empty stack (valid). - InputStack() {} + InputStack() = default; //! Construct a valid single-element stack (with an element up to 75 bytes). InputStack(std::vector<unsigned char> in) : size(in.size() + 1), stack(Vector(std::move(in))) {} //! Change availability diff --git a/src/script/script.h b/src/script/script.h index 66d63fae89..035152ee51 100644 --- a/src/script/script.h +++ b/src/script/script.h @@ -430,7 +430,7 @@ protected: return *this; } public: - CScript() { } + CScript() = default; CScript(const_iterator pbegin, const_iterator pend) : CScriptBase(pbegin, pend) { } CScript(std::vector<unsigned char>::const_iterator pbegin, std::vector<unsigned char>::const_iterator pend) : CScriptBase(pbegin, pend) { } CScript(const unsigned char* pbegin, const unsigned char* pend) : CScriptBase(pbegin, pend) { } @@ -569,7 +569,7 @@ struct CScriptWitness std::vector<std::vector<unsigned char> > stack; // Some compilers complain without a default constructor - CScriptWitness() { } + CScriptWitness() = default; bool IsNull() const { return stack.empty(); } diff --git a/src/script/sigcache.cpp b/src/script/sigcache.cpp index 7c6c282cc4..33531e6bf5 100644 --- a/src/script/sigcache.cpp +++ b/src/script/sigcache.cpp @@ -5,125 +5,80 @@ #include <script/sigcache.h> -#include <common/system.h> +#include <crypto/sha256.h> #include <logging.h> #include <pubkey.h> #include <random.h> +#include <script/interpreter.h> +#include <span.h> #include <uint256.h> -#include <cuckoocache.h> - -#include <algorithm> #include <mutex> -#include <optional> #include <shared_mutex> #include <vector> -namespace { -/** - * Valid signature cache, to avoid doing expensive ECDSA signature checking - * twice for every transaction (once when accepted into memory pool, and - * again when accepted into the block chain) - */ -class CSignatureCache +SignatureCache::SignatureCache(const size_t max_size_bytes) { -private: - //! Entries are SHA256(nonce || 'E' or 'S' || 31 zero bytes || signature hash || public key || signature): - CSHA256 m_salted_hasher_ecdsa; - CSHA256 m_salted_hasher_schnorr; - typedef CuckooCache::cache<uint256, SignatureCacheHasher> map_type; - map_type setValid; - std::shared_mutex cs_sigcache; - -public: - CSignatureCache() - { - uint256 nonce = GetRandHash(); - // We want the nonce to be 64 bytes long to force the hasher to process - // this chunk, which makes later hash computations more efficient. We - // just write our 32-byte entropy, and then pad with 'E' for ECDSA and - // 'S' for Schnorr (followed by 0 bytes). - static constexpr unsigned char PADDING_ECDSA[32] = {'E'}; - static constexpr unsigned char PADDING_SCHNORR[32] = {'S'}; - m_salted_hasher_ecdsa.Write(nonce.begin(), 32); - m_salted_hasher_ecdsa.Write(PADDING_ECDSA, 32); - m_salted_hasher_schnorr.Write(nonce.begin(), 32); - m_salted_hasher_schnorr.Write(PADDING_SCHNORR, 32); - } - - void - ComputeEntryECDSA(uint256& entry, const uint256 &hash, const std::vector<unsigned char>& vchSig, const CPubKey& pubkey) const - { - CSHA256 hasher = m_salted_hasher_ecdsa; - hasher.Write(hash.begin(), 32).Write(pubkey.data(), pubkey.size()).Write(vchSig.data(), vchSig.size()).Finalize(entry.begin()); - } - - void - ComputeEntrySchnorr(uint256& entry, const uint256 &hash, Span<const unsigned char> sig, const XOnlyPubKey& pubkey) const - { - CSHA256 hasher = m_salted_hasher_schnorr; - hasher.Write(hash.begin(), 32).Write(pubkey.data(), pubkey.size()).Write(sig.data(), sig.size()).Finalize(entry.begin()); - } + uint256 nonce = GetRandHash(); + // We want the nonce to be 64 bytes long to force the hasher to process + // this chunk, which makes later hash computations more efficient. We + // just write our 32-byte entropy, and then pad with 'E' for ECDSA and + // 'S' for Schnorr (followed by 0 bytes). + static constexpr unsigned char PADDING_ECDSA[32] = {'E'}; + static constexpr unsigned char PADDING_SCHNORR[32] = {'S'}; + m_salted_hasher_ecdsa.Write(nonce.begin(), 32); + m_salted_hasher_ecdsa.Write(PADDING_ECDSA, 32); + m_salted_hasher_schnorr.Write(nonce.begin(), 32); + m_salted_hasher_schnorr.Write(PADDING_SCHNORR, 32); - bool - Get(const uint256& entry, const bool erase) - { - std::shared_lock<std::shared_mutex> lock(cs_sigcache); - return setValid.contains(entry, erase); - } + const auto [num_elems, approx_size_bytes] = setValid.setup_bytes(max_size_bytes); + LogPrintf("Using %zu MiB out of %zu MiB requested for signature cache, able to store %zu elements\n", + approx_size_bytes >> 20, max_size_bytes >> 20, num_elems); +} - void Set(const uint256& entry) - { - std::unique_lock<std::shared_mutex> lock(cs_sigcache); - setValid.insert(entry); - } - std::optional<std::pair<uint32_t, size_t>> setup_bytes(size_t n) - { - return setValid.setup_bytes(n); - } -}; +void SignatureCache::ComputeEntryECDSA(uint256& entry, const uint256& hash, const std::vector<unsigned char>& vchSig, const CPubKey& pubkey) const +{ + CSHA256 hasher = m_salted_hasher_ecdsa; + hasher.Write(hash.begin(), 32).Write(pubkey.data(), pubkey.size()).Write(vchSig.data(), vchSig.size()).Finalize(entry.begin()); +} -/* In previous versions of this code, signatureCache was a local static variable - * in CachingTransactionSignatureChecker::VerifySignature. We initialize - * signatureCache outside of VerifySignature to avoid the atomic operation per - * call overhead associated with local static variables even though - * signatureCache could be made local to VerifySignature. -*/ -static CSignatureCache signatureCache; -} // namespace +void SignatureCache::ComputeEntrySchnorr(uint256& entry, const uint256& hash, Span<const unsigned char> sig, const XOnlyPubKey& pubkey) const +{ + CSHA256 hasher = m_salted_hasher_schnorr; + hasher.Write(hash.begin(), 32).Write(pubkey.data(), pubkey.size()).Write(sig.data(), sig.size()).Finalize(entry.begin()); +} -// To be called once in AppInitMain/BasicTestingSetup to initialize the -// signatureCache. -bool InitSignatureCache(size_t max_size_bytes) +bool SignatureCache::Get(const uint256& entry, const bool erase) { - auto setup_results = signatureCache.setup_bytes(max_size_bytes); - if (!setup_results) return false; + std::shared_lock<std::shared_mutex> lock(cs_sigcache); + return setValid.contains(entry, erase); +} - const auto [num_elems, approx_size_bytes] = *setup_results; - LogPrintf("Using %zu MiB out of %zu MiB requested for signature cache, able to store %zu elements\n", - approx_size_bytes >> 20, max_size_bytes >> 20, num_elems); - return true; +void SignatureCache::Set(const uint256& entry) +{ + std::unique_lock<std::shared_mutex> lock(cs_sigcache); + setValid.insert(entry); } bool CachingTransactionSignatureChecker::VerifyECDSASignature(const std::vector<unsigned char>& vchSig, const CPubKey& pubkey, const uint256& sighash) const { uint256 entry; - signatureCache.ComputeEntryECDSA(entry, sighash, vchSig, pubkey); - if (signatureCache.Get(entry, !store)) + m_signature_cache.ComputeEntryECDSA(entry, sighash, vchSig, pubkey); + if (m_signature_cache.Get(entry, !store)) return true; if (!TransactionSignatureChecker::VerifyECDSASignature(vchSig, pubkey, sighash)) return false; if (store) - signatureCache.Set(entry); + m_signature_cache.Set(entry); return true; } bool CachingTransactionSignatureChecker::VerifySchnorrSignature(Span<const unsigned char> sig, const XOnlyPubKey& pubkey, const uint256& sighash) const { uint256 entry; - signatureCache.ComputeEntrySchnorr(entry, sighash, sig, pubkey); - if (signatureCache.Get(entry, !store)) return true; + m_signature_cache.ComputeEntrySchnorr(entry, sighash, sig, pubkey); + if (m_signature_cache.Get(entry, !store)) return true; if (!TransactionSignatureChecker::VerifySchnorrSignature(sig, pubkey, sighash)) return false; - if (store) signatureCache.Set(entry); + if (store) m_signature_cache.Set(entry); return true; } diff --git a/src/script/sigcache.h b/src/script/sigcache.h index d33d60d5bc..76802e6a7c 100644 --- a/src/script/sigcache.h +++ b/src/script/sigcache.h @@ -6,32 +6,71 @@ #ifndef BITCOIN_SCRIPT_SIGCACHE_H #define BITCOIN_SCRIPT_SIGCACHE_H +#include <consensus/amount.h> +#include <crypto/sha256.h> +#include <cuckoocache.h> #include <script/interpreter.h> #include <span.h> +#include <uint256.h> #include <util/hasher.h> -#include <optional> +#include <cstddef> +#include <shared_mutex> #include <vector> +class CPubKey; +class CTransaction; +class XOnlyPubKey; + // DoS prevention: limit cache size to 32MiB (over 1000000 entries on 64-bit // systems). Due to how we count cache size, actual memory usage is slightly // more (~32.25 MiB) -static constexpr size_t DEFAULT_MAX_SIG_CACHE_BYTES{32 << 20}; +static constexpr size_t DEFAULT_VALIDATION_CACHE_BYTES{32 << 20}; +static constexpr size_t DEFAULT_SIGNATURE_CACHE_BYTES{DEFAULT_VALIDATION_CACHE_BYTES / 2}; +static constexpr size_t DEFAULT_SCRIPT_EXECUTION_CACHE_BYTES{DEFAULT_VALIDATION_CACHE_BYTES / 2}; +static_assert(DEFAULT_VALIDATION_CACHE_BYTES == DEFAULT_SIGNATURE_CACHE_BYTES + DEFAULT_SCRIPT_EXECUTION_CACHE_BYTES); -class CPubKey; +/** + * Valid signature cache, to avoid doing expensive ECDSA signature checking + * twice for every transaction (once when accepted into memory pool, and + * again when accepted into the block chain) + */ +class SignatureCache +{ +private: + //! Entries are SHA256(nonce || 'E' or 'S' || 31 zero bytes || signature hash || public key || signature): + CSHA256 m_salted_hasher_ecdsa; + CSHA256 m_salted_hasher_schnorr; + typedef CuckooCache::cache<uint256, SignatureCacheHasher> map_type; + map_type setValid; + std::shared_mutex cs_sigcache; + +public: + SignatureCache(size_t max_size_bytes); + + SignatureCache(const SignatureCache&) = delete; + SignatureCache& operator=(const SignatureCache&) = delete; + + void ComputeEntryECDSA(uint256& entry, const uint256 &hash, const std::vector<unsigned char>& vchSig, const CPubKey& pubkey) const; + + void ComputeEntrySchnorr(uint256& entry, const uint256 &hash, Span<const unsigned char> sig, const XOnlyPubKey& pubkey) const; + + bool Get(const uint256& entry, const bool erase); + + void Set(const uint256& entry); +}; class CachingTransactionSignatureChecker : public TransactionSignatureChecker { private: bool store; + SignatureCache& m_signature_cache; public: - CachingTransactionSignatureChecker(const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, bool storeIn, PrecomputedTransactionData& txdataIn) : TransactionSignatureChecker(txToIn, nInIn, amountIn, txdataIn, MissingDataBehavior::ASSERT_FAIL), store(storeIn) {} + CachingTransactionSignatureChecker(const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, bool storeIn, SignatureCache& signature_cache, PrecomputedTransactionData& txdataIn) : TransactionSignatureChecker(txToIn, nInIn, amountIn, txdataIn, MissingDataBehavior::ASSERT_FAIL), store(storeIn), m_signature_cache(signature_cache) {} bool VerifyECDSASignature(const std::vector<unsigned char>& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const override; bool VerifySchnorrSignature(Span<const unsigned char> sig, const XOnlyPubKey& pubkey, const uint256& sighash) const override; }; -[[nodiscard]] bool InitSignatureCache(size_t max_size_bytes); - #endif // BITCOIN_SCRIPT_SIGCACHE_H diff --git a/src/script/sign.cpp b/src/script/sign.cpp index 22ac062a63..6e26ec11e0 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -831,7 +831,7 @@ bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore, } ScriptError serror = SCRIPT_ERR_OK; - if (!VerifyScript(txin.scriptSig, prevPubKey, &txin.scriptWitness, STANDARD_SCRIPT_VERIFY_FLAGS, TransactionSignatureChecker(&txConst, i, amount, txdata, MissingDataBehavior::FAIL), &serror)) { + if (!sigdata.complete && !VerifyScript(txin.scriptSig, prevPubKey, &txin.scriptWitness, STANDARD_SCRIPT_VERIFY_FLAGS, TransactionSignatureChecker(&txConst, i, amount, txdata, MissingDataBehavior::FAIL), &serror)) { if (serror == SCRIPT_ERR_INVALID_STACK_OPERATION) { // Unable to sign input and verification failed (possible attempt to partially sign). input_errors[i] = Untranslated("Unable to sign input, invalid stack size (possibly missing key)"); diff --git a/src/script/sign.h b/src/script/sign.h index ace2ba7856..4edd5bf326 100644 --- a/src/script/sign.h +++ b/src/script/sign.h @@ -27,7 +27,7 @@ struct CMutableTransaction; /** Interface for signature creators. */ class BaseSignatureCreator { public: - virtual ~BaseSignatureCreator() {} + virtual ~BaseSignatureCreator() = default; virtual const BaseSignatureChecker& Checker() const =0; /** Create a singular (non-script) signature. */ @@ -89,7 +89,7 @@ struct SignatureData { std::map<std::vector<uint8_t>, std::vector<uint8_t>> ripemd160_preimages; ///< Mapping from a RIPEMD160 hash to its preimage provided to solve a Script std::map<std::vector<uint8_t>, std::vector<uint8_t>> hash160_preimages; ///< Mapping from a HASH160 hash to its preimage provided to solve a Script - SignatureData() {} + SignatureData() = default; explicit SignatureData(const CScript& script) : scriptSig(script) {} void MergeSignatureData(SignatureData sigdata); }; diff --git a/src/script/signingprovider.h b/src/script/signingprovider.h index 3298376389..efdfd9ee56 100644 --- a/src/script/signingprovider.h +++ b/src/script/signingprovider.h @@ -150,7 +150,7 @@ std::optional<std::vector<std::tuple<int, std::vector<unsigned char>, int>>> Inf class SigningProvider { public: - virtual ~SigningProvider() {} + virtual ~SigningProvider() = default; virtual bool GetCScript(const CScriptID &scriptid, CScript& script) const { return false; } virtual bool HaveCScript(const CScriptID &scriptid) const { return false; } virtual bool GetPubKey(const CKeyID &address, CPubKey& pubkey) const { return false; } diff --git a/src/serialize.h b/src/serialize.h index 35519056a5..2af998f3c5 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -1061,7 +1061,7 @@ protected: size_t nSize{0}; public: - SizeComputer() {} + SizeComputer() = default; void write(Span<const std::byte> src) { diff --git a/src/streams.h b/src/streams.h index 57fc600646..c2a9dea287 100644 --- a/src/streams.h +++ b/src/streams.h @@ -161,7 +161,7 @@ public: typedef vector_type::const_iterator const_iterator; typedef vector_type::reverse_iterator reverse_iterator; - explicit DataStream() {} + explicit DataStream() = default; explicit DataStream(Span<const uint8_t> sp) : DataStream{AsBytes(sp)} {} explicit DataStream(Span<const value_type> sp) : vch(sp.data(), sp.data() + sp.size()) {} diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp index fe3ba38cde..01eef2b93d 100644 --- a/src/support/lockedpool.cpp +++ b/src/support/lockedpool.cpp @@ -46,9 +46,7 @@ Arena::Arena(void *base_in, size_t size_in, size_t alignment_in): chunks_free_end.emplace(static_cast<char*>(base) + size_in, it); } -Arena::~Arena() -{ -} +Arena::~Arena() = default; void* Arena::alloc(size_t size) { diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h index 81e0df513a..2363b1e4ef 100644 --- a/src/support/lockedpool.h +++ b/src/support/lockedpool.h @@ -19,7 +19,7 @@ class LockedPageAllocator { public: - virtual ~LockedPageAllocator() {} + virtual ~LockedPageAllocator() = default; /** Allocate and lock memory pages. * If len is not a multiple of the system page size, it is rounded up. * Returns nullptr in case of allocation failure. diff --git a/src/sync.h b/src/sync.h index dc63e3f2d0..b22956ef1a 100644 --- a/src/sync.h +++ b/src/sync.h @@ -206,7 +206,7 @@ public: protected: // needed for reverse_lock - UniqueLock() { } + UniqueLock() = default; public: /** diff --git a/src/test/blockchain_tests.cpp b/src/test/blockchain_tests.cpp index 9b8f419290..bc509d73ac 100644 --- a/src/test/blockchain_tests.cpp +++ b/src/test/blockchain_tests.cpp @@ -5,7 +5,9 @@ #include <boost/test/unit_test.hpp> #include <chain.h> +#include <node/blockstorage.h> #include <rpc/blockchain.h> +#include <sync.h> #include <test/util/setup_common.h> #include <util/string.h> @@ -76,4 +78,36 @@ BOOST_AUTO_TEST_CASE(get_difficulty_for_very_high_target) TestDifficulty(0x12345678, 5913134931067755359633408.0); } +//! Prune chain from height down to genesis block and check that +//! GetPruneHeight returns the correct value +static void CheckGetPruneHeight(node::BlockManager& blockman, CChain& chain, int height) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) +{ + AssertLockHeld(::cs_main); + + // Emulate pruning all blocks from `height` down to the genesis block + // by unsetting the `BLOCK_HAVE_DATA` flag from `nStatus` + for (CBlockIndex* it{chain[height]}; it != nullptr && it->nHeight > 0; it = it->pprev) { + it->nStatus &= ~BLOCK_HAVE_DATA; + } + + const auto prune_height{GetPruneHeight(blockman, chain)}; + BOOST_REQUIRE(prune_height.has_value()); + BOOST_CHECK_EQUAL(*prune_height, height); +} + +BOOST_FIXTURE_TEST_CASE(get_prune_height, TestChain100Setup) +{ + LOCK(::cs_main); + auto& chain = m_node.chainman->ActiveChain(); + auto& blockman = m_node.chainman->m_blockman; + + // Fresh chain of 100 blocks without any pruned blocks, so std::nullopt should be returned + BOOST_CHECK(!GetPruneHeight(blockman, chain).has_value()); + + // Start pruning + CheckGetPruneHeight(blockman, chain, 1); + CheckGetPruneHeight(blockman, chain, 99); + CheckGetPruneHeight(blockman, chain, 100); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp index 05355fb21d..b0749c851c 100644 --- a/src/test/blockencodings_tests.cpp +++ b/src/test/blockencodings_tests.cpp @@ -14,31 +14,36 @@ #include <boost/test/unit_test.hpp> -std::vector<CTransactionRef> extra_txn; +const std::vector<CTransactionRef> empty_extra_txn; BOOST_FIXTURE_TEST_SUITE(blockencodings_tests, RegTestingSetup) -static CBlock BuildBlockTestCase() { - CBlock block; +static CMutableTransaction BuildTransactionTestCase() { CMutableTransaction tx; tx.vin.resize(1); tx.vin[0].scriptSig.resize(10); tx.vout.resize(1); tx.vout[0].nValue = 42; + return tx; +} + +static CBlock BuildBlockTestCase(FastRandomContext& ctx) { + CBlock block; + CMutableTransaction tx = BuildTransactionTestCase(); block.vtx.resize(3); block.vtx[0] = MakeTransactionRef(tx); block.nVersion = 42; - block.hashPrevBlock = InsecureRand256(); + block.hashPrevBlock = ctx.rand256(); block.nBits = 0x207fffff; - tx.vin[0].prevout.hash = Txid::FromUint256(InsecureRand256()); + tx.vin[0].prevout.hash = Txid::FromUint256(ctx.rand256()); tx.vin[0].prevout.n = 0; block.vtx[1] = MakeTransactionRef(tx); tx.vin.resize(10); for (size_t i = 0; i < tx.vin.size(); i++) { - tx.vin[i].prevout.hash = Txid::FromUint256(InsecureRand256()); + tx.vin[i].prevout.hash = Txid::FromUint256(ctx.rand256()); tx.vin[i].prevout.n = 0; } block.vtx[2] = MakeTransactionRef(tx); @@ -58,7 +63,8 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest) { CTxMemPool& pool = *Assert(m_node.mempool); TestMemPoolEntryHelper entry; - CBlock block(BuildBlockTestCase()); + auto rand_ctx(FastRandomContext(uint256{42})); + CBlock block(BuildBlockTestCase(rand_ctx)); LOCK2(cs_main, pool.cs); pool.addUnchecked(entry.FromTx(block.vtx[2])); @@ -66,7 +72,7 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest) // Do a simple ShortTxIDs RT { - CBlockHeaderAndShortTxIDs shortIDs{block}; + CBlockHeaderAndShortTxIDs shortIDs{block, rand_ctx.rand64()}; DataStream stream{}; stream << shortIDs; @@ -75,7 +81,7 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest) stream >> shortIDs2; PartiallyDownloadedBlock partialBlock(&pool); - BOOST_CHECK(partialBlock.InitData(shortIDs2, extra_txn) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.InitData(shortIDs2, empty_extra_txn) == READ_STATUS_OK); BOOST_CHECK( partialBlock.IsTxAvailable(0)); BOOST_CHECK(!partialBlock.IsTxAvailable(1)); BOOST_CHECK( partialBlock.IsTxAvailable(2)); @@ -123,8 +129,8 @@ public: stream << orig; stream >> *this; } - explicit TestHeaderAndShortIDs(const CBlock& block) : - TestHeaderAndShortIDs(CBlockHeaderAndShortTxIDs{block}) {} + explicit TestHeaderAndShortIDs(const CBlock& block, FastRandomContext& ctx) : + TestHeaderAndShortIDs(CBlockHeaderAndShortTxIDs{block, ctx.rand64()}) {} uint64_t GetShortID(const Wtxid& txhash) const { DataStream stream{}; @@ -141,7 +147,8 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) { CTxMemPool& pool = *Assert(m_node.mempool); TestMemPoolEntryHelper entry; - CBlock block(BuildBlockTestCase()); + auto rand_ctx(FastRandomContext(uint256{42})); + CBlock block(BuildBlockTestCase(rand_ctx)); LOCK2(cs_main, pool.cs); pool.addUnchecked(entry.FromTx(block.vtx[2])); @@ -151,7 +158,7 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) // Test with pre-forwarding tx 1, but not coinbase { - TestHeaderAndShortIDs shortIDs(block); + TestHeaderAndShortIDs shortIDs(block, rand_ctx); shortIDs.prefilledtxn.resize(1); shortIDs.prefilledtxn[0] = {1, block.vtx[1]}; shortIDs.shorttxids.resize(2); @@ -165,7 +172,7 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) stream >> shortIDs2; PartiallyDownloadedBlock partialBlock(&pool); - BOOST_CHECK(partialBlock.InitData(shortIDs2, extra_txn) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.InitData(shortIDs2, empty_extra_txn) == READ_STATUS_OK); BOOST_CHECK(!partialBlock.IsTxAvailable(0)); BOOST_CHECK( partialBlock.IsTxAvailable(1)); BOOST_CHECK( partialBlock.IsTxAvailable(2)); @@ -211,7 +218,8 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) { CTxMemPool& pool = *Assert(m_node.mempool); TestMemPoolEntryHelper entry; - CBlock block(BuildBlockTestCase()); + auto rand_ctx(FastRandomContext(uint256{42})); + CBlock block(BuildBlockTestCase(rand_ctx)); LOCK2(cs_main, pool.cs); pool.addUnchecked(entry.FromTx(block.vtx[1])); @@ -221,7 +229,7 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) // Test with pre-forwarding coinbase + tx 2 with tx 1 in mempool { - TestHeaderAndShortIDs shortIDs(block); + TestHeaderAndShortIDs shortIDs(block, rand_ctx); shortIDs.prefilledtxn.resize(2); shortIDs.prefilledtxn[0] = {0, block.vtx[0]}; shortIDs.prefilledtxn[1] = {1, block.vtx[2]}; // id == 1 as it is 1 after index 1 @@ -235,7 +243,7 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) stream >> shortIDs2; PartiallyDownloadedBlock partialBlock(&pool); - BOOST_CHECK(partialBlock.InitData(shortIDs2, extra_txn) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.InitData(shortIDs2, empty_extra_txn) == READ_STATUS_OK); BOOST_CHECK( partialBlock.IsTxAvailable(0)); BOOST_CHECK( partialBlock.IsTxAvailable(1)); BOOST_CHECK( partialBlock.IsTxAvailable(2)); @@ -261,17 +269,14 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) { CTxMemPool& pool = *Assert(m_node.mempool); - CMutableTransaction coinbase; - coinbase.vin.resize(1); - coinbase.vin[0].scriptSig.resize(10); - coinbase.vout.resize(1); - coinbase.vout[0].nValue = 42; + CMutableTransaction coinbase = BuildTransactionTestCase(); CBlock block; + auto rand_ctx(FastRandomContext(uint256{42})); block.vtx.resize(1); block.vtx[0] = MakeTransactionRef(std::move(coinbase)); block.nVersion = 42; - block.hashPrevBlock = InsecureRand256(); + block.hashPrevBlock = rand_ctx.rand256(); block.nBits = 0x207fffff; bool mutated; @@ -281,7 +286,7 @@ BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) // Test simple header round-trip with only coinbase { - CBlockHeaderAndShortTxIDs shortIDs{block}; + CBlockHeaderAndShortTxIDs shortIDs{block, rand_ctx.rand64()}; DataStream stream{}; stream << shortIDs; @@ -290,7 +295,7 @@ BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) stream >> shortIDs2; PartiallyDownloadedBlock partialBlock(&pool); - BOOST_CHECK(partialBlock.InitData(shortIDs2, extra_txn) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.InitData(shortIDs2, empty_extra_txn) == READ_STATUS_OK); BOOST_CHECK(partialBlock.IsTxAvailable(0)); CBlock block2; @@ -302,6 +307,53 @@ BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) } } +BOOST_AUTO_TEST_CASE(ReceiveWithExtraTransactions) { + CTxMemPool& pool = *Assert(m_node.mempool); + TestMemPoolEntryHelper entry; + auto rand_ctx(FastRandomContext(uint256{42})); + + CMutableTransaction mtx = BuildTransactionTestCase(); + mtx.vin[0].prevout.hash = Txid::FromUint256(rand_ctx.rand256()); + mtx.vin[0].prevout.n = 0; + const CTransactionRef non_block_tx = MakeTransactionRef(std::move(mtx)); + + CBlock block(BuildBlockTestCase(rand_ctx)); + std::vector<CTransactionRef> extra_txn; + extra_txn.resize(10); + + LOCK2(cs_main, pool.cs); + pool.addUnchecked(entry.FromTx(block.vtx[2])); + BOOST_CHECK_EQUAL(pool.get(block.vtx[2]->GetHash()).use_count(), SHARED_TX_OFFSET + 0); + // Ensure the non_block_tx is actually not in the block + for (const auto &block_tx : block.vtx) { + BOOST_CHECK_NE(block_tx->GetHash(), non_block_tx->GetHash()); + } + // Ensure block.vtx[1] is not in pool + BOOST_CHECK_EQUAL(pool.get(block.vtx[1]->GetHash()), nullptr); + + { + const CBlockHeaderAndShortTxIDs cmpctblock{block, rand_ctx.rand64()}; + PartiallyDownloadedBlock partial_block(&pool); + PartiallyDownloadedBlock partial_block_with_extra(&pool); + + BOOST_CHECK(partial_block.InitData(cmpctblock, extra_txn) == READ_STATUS_OK); + BOOST_CHECK( partial_block.IsTxAvailable(0)); + BOOST_CHECK(!partial_block.IsTxAvailable(1)); + BOOST_CHECK( partial_block.IsTxAvailable(2)); + + // Add an unrelated tx to extra_txn: + extra_txn[0] = non_block_tx; + // and a tx from the block that's not in the mempool: + extra_txn[1] = block.vtx[1]; + + BOOST_CHECK(partial_block_with_extra.InitData(cmpctblock, extra_txn) == READ_STATUS_OK); + BOOST_CHECK(partial_block_with_extra.IsTxAvailable(0)); + // This transaction is now available via extra_txn: + BOOST_CHECK(partial_block_with_extra.IsTxAvailable(1)); + BOOST_CHECK(partial_block_with_extra.IsTxAvailable(2)); + } +} + BOOST_AUTO_TEST_CASE(TransactionsRequestSerializationTest) { BlockTransactionsRequest req1; req1.blockhash = InsecureRand256(); diff --git a/src/test/blockmanager_tests.cpp b/src/test/blockmanager_tests.cpp index 9eb7acc3ca..121f00bd25 100644 --- a/src/test/blockmanager_tests.cpp +++ b/src/test/blockmanager_tests.cpp @@ -2,6 +2,7 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include <chain.h> #include <chainparams.h> #include <clientversion.h> #include <node/blockstorage.h> @@ -113,7 +114,7 @@ BOOST_FIXTURE_TEST_CASE(blockmanager_block_data_availability, TestChain100Setup) }; // 1) Return genesis block when all blocks are available - BOOST_CHECK_EQUAL(blockman.GetFirstStoredBlock(tip), chainman->ActiveChain()[0]); + BOOST_CHECK_EQUAL(blockman.GetFirstBlock(tip, BLOCK_HAVE_DATA), chainman->ActiveChain()[0]); BOOST_CHECK(blockman.CheckBlockDataAvailability(tip, *chainman->ActiveChain()[0])); // 2) Check lower_block when all blocks are available @@ -127,7 +128,7 @@ BOOST_FIXTURE_TEST_CASE(blockmanager_block_data_availability, TestChain100Setup) func_prune_blocks(last_pruned_block); // 3) The last block not pruned is in-between upper-block and the genesis block - BOOST_CHECK_EQUAL(blockman.GetFirstStoredBlock(tip), first_available_block); + BOOST_CHECK_EQUAL(blockman.GetFirstBlock(tip, BLOCK_HAVE_DATA), first_available_block); BOOST_CHECK(blockman.CheckBlockDataAvailability(tip, *first_available_block)); BOOST_CHECK(!blockman.CheckBlockDataAvailability(tip, *last_pruned_block)); } diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp index cbf85277a8..6699afdbfa 100644 --- a/src/test/bloom_tests.cpp +++ b/src/test/bloom_tests.cpp @@ -463,8 +463,7 @@ static std::vector<unsigned char> RandomData() BOOST_AUTO_TEST_CASE(rolling_bloom) { - SeedInsecureRand(SeedRand::ZEROS); - g_mock_deterministic_tests = true; + SeedRandomForTest(SeedRand::ZEROS); // last-100-entry, 1% false positive: CRollingBloomFilter rb1(100, 0.01); @@ -491,7 +490,7 @@ BOOST_AUTO_TEST_CASE(rolling_bloom) ++nHits; } // Expect about 100 hits - BOOST_CHECK_EQUAL(nHits, 75U); + BOOST_CHECK_EQUAL(nHits, 71U); BOOST_CHECK(rb1.contains(data[DATASIZE-1])); rb1.reset(); @@ -519,7 +518,7 @@ BOOST_AUTO_TEST_CASE(rolling_bloom) ++nHits; } // Expect about 5 false positives - BOOST_CHECK_EQUAL(nHits, 6U); + BOOST_CHECK_EQUAL(nHits, 3U); // last-1000-entry, 0.01% false positive: CRollingBloomFilter rb2(1000, 0.001); @@ -530,7 +529,6 @@ BOOST_AUTO_TEST_CASE(rolling_bloom) for (int i = 0; i < DATASIZE; i++) { BOOST_CHECK(rb2.contains(data[i])); } - g_mock_deterministic_tests = false; } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/checkqueue_tests.cpp b/src/test/checkqueue_tests.cpp index 023a5e8e70..7810d91a77 100644 --- a/src/test/checkqueue_tests.cpp +++ b/src/test/checkqueue_tests.cpp @@ -28,7 +28,7 @@ struct NoLockLoggingTestingSetup : public TestingSetup { NoLockLoggingTestingSetup() #ifdef DEBUG_LOCKCONTENTION - : TestingSetup{ChainType::MAIN, /*extra_args=*/{"-debugexclude=lock"}} {} + : TestingSetup{ChainType::MAIN, {.extra_args = { "-debugexclude=lock" } }} {} #else : TestingSetup{ChainType::MAIN} {} #endif diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp index b6d3e7d567..a992e2fa03 100644 --- a/src/test/coins_tests.cpp +++ b/src/test/coins_tests.cpp @@ -307,8 +307,7 @@ UtxoData::iterator FindRandomFrom(const std::set<COutPoint> &utxoSet) { // has the expected effect (the other duplicate is overwritten at all cache levels) BOOST_AUTO_TEST_CASE(updatecoins_simulation_test) { - SeedInsecureRand(SeedRand::ZEROS); - g_mock_deterministic_tests = true; + SeedRandomForTest(SeedRand::ZEROS); bool spent_a_duplicate_coinbase = false; // A simple map to track what we expect the cache stack to represent. @@ -496,8 +495,6 @@ BOOST_AUTO_TEST_CASE(updatecoins_simulation_test) // Verify coverage. BOOST_CHECK(spent_a_duplicate_coinbase); - - g_mock_deterministic_tests = false; } BOOST_AUTO_TEST_CASE(ccoins_serialization) diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp index 46acc6fc9f..d78957e35a 100644 --- a/src/test/crypto_tests.cpp +++ b/src/test/crypto_tests.cpp @@ -1195,7 +1195,7 @@ BOOST_AUTO_TEST_CASE(muhash_tests) uint256 res; int table[4]; for (int i = 0; i < 4; ++i) { - table[i] = g_insecure_rand_ctx.randbits(3); + table[i] = g_insecure_rand_ctx.randbits<3>(); } for (int order = 0; order < 4; ++order) { MuHash3072 acc; @@ -1215,8 +1215,8 @@ BOOST_AUTO_TEST_CASE(muhash_tests) } } - MuHash3072 x = FromInt(g_insecure_rand_ctx.randbits(4)); // x=X - MuHash3072 y = FromInt(g_insecure_rand_ctx.randbits(4)); // x=X, y=Y + MuHash3072 x = FromInt(g_insecure_rand_ctx.randbits<4>()); // x=X + MuHash3072 y = FromInt(g_insecure_rand_ctx.randbits<4>()); // x=X, y=Y MuHash3072 z; // x=X, y=Y, z=1 z *= x; // x=X, y=Y, z=X z *= y; // x=X, y=Y, z=X*Y diff --git a/src/test/cuckoocache_tests.cpp b/src/test/cuckoocache_tests.cpp index eafbcf5681..fc22daeb57 100644 --- a/src/test/cuckoocache_tests.cpp +++ b/src/test/cuckoocache_tests.cpp @@ -33,11 +33,11 @@ BOOST_AUTO_TEST_SUITE(cuckoocache_tests); /* Test that no values not inserted into the cache are read out of it. * - * There are no repeats in the first 200000 insecure_GetRandHash calls + * There are no repeats in the first 200000 InsecureRand256() calls */ BOOST_AUTO_TEST_CASE(test_cuckoocache_no_fakes) { - SeedInsecureRand(SeedRand::ZEROS); + SeedRandomForTest(SeedRand::ZEROS); CuckooCache::cache<uint256, SignatureCacheHasher> cc{}; size_t megabytes = 4; cc.setup_bytes(megabytes << 20); @@ -55,7 +55,7 @@ BOOST_AUTO_TEST_CASE(test_cuckoocache_no_fakes) template <typename Cache> static double test_cache(size_t megabytes, double load) { - SeedInsecureRand(SeedRand::ZEROS); + SeedRandomForTest(SeedRand::ZEROS); std::vector<uint256> hashes; Cache set{}; size_t bytes = megabytes * (1 << 20); @@ -126,7 +126,7 @@ template <typename Cache> static void test_cache_erase(size_t megabytes) { double load = 1; - SeedInsecureRand(SeedRand::ZEROS); + SeedRandomForTest(SeedRand::ZEROS); std::vector<uint256> hashes; Cache set{}; size_t bytes = megabytes * (1 << 20); @@ -189,7 +189,7 @@ template <typename Cache> static void test_cache_erase_parallel(size_t megabytes) { double load = 1; - SeedInsecureRand(SeedRand::ZEROS); + SeedRandomForTest(SeedRand::ZEROS); std::vector<uint256> hashes; Cache set{}; size_t bytes = megabytes * (1 << 20); @@ -293,7 +293,7 @@ static void test_cache_generations() // iterations with non-deterministic values, so it isn't "overfit" to the // specific entropy in FastRandomContext(true) and implementation of the // cache. - SeedInsecureRand(SeedRand::ZEROS); + SeedRandomForTest(SeedRand::ZEROS); // block_activity models a chunk of network activity. n_insert elements are // added to the cache. The first and last n/4 are stored for removal later diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp index 8a54cc656d..dbec2bc858 100644 --- a/src/test/fuzz/addrman.cpp +++ b/src/test/fuzz/addrman.cpp @@ -124,7 +124,7 @@ public: explicit AddrManDeterministic(const NetGroupManager& netgroupman, FuzzedDataProvider& fuzzed_data_provider) : AddrMan(netgroupman, /*deterministic=*/true, GetCheckRatio()) { - WITH_LOCK(m_impl->cs, m_impl->insecure_rand = FastRandomContext{ConsumeUInt256(fuzzed_data_provider)}); + WITH_LOCK(m_impl->cs, m_impl->insecure_rand.Reseed(ConsumeUInt256(fuzzed_data_provider))); } /** diff --git a/src/test/fuzz/bip324.cpp b/src/test/fuzz/bip324.cpp index 8210e75cee..9892e7a81c 100644 --- a/src/test/fuzz/bip324.cpp +++ b/src/test/fuzz/bip324.cpp @@ -4,11 +4,11 @@ #include <bip324.h> #include <chainparams.h> +#include <random.h> #include <span.h> #include <test/fuzz/FuzzedDataProvider.h> #include <test/fuzz/fuzz.h> #include <test/fuzz/util.h> -#include <test/util/xoroshiro128plusplus.h> #include <cstdint> #include <vector> @@ -56,7 +56,7 @@ FUZZ_TARGET(bip324_cipher_roundtrip, .init=Initialize) // (potentially buggy) edge cases triggered by specific values of contents/AAD, so we can avoid // reading the actual data for those from the fuzzer input (which would need large amounts of // data). - XoRoShiRo128PlusPlus rng(provider.ConsumeIntegral<uint64_t>()); + InsecureRandomContext rng(provider.ConsumeIntegral<uint64_t>()); // Compare session IDs and garbage terminators. assert(initiator.GetSessionID() == responder.GetSessionID()); @@ -79,10 +79,8 @@ FUZZ_TARGET(bip324_cipher_roundtrip, .init=Initialize) unsigned length_bits = 2 * ((mode >> 5) & 7); unsigned length = provider.ConsumeIntegralInRange<unsigned>(0, (1 << length_bits) - 1); // Generate aad and content. - std::vector<std::byte> aad(aad_length); - for (auto& val : aad) val = std::byte{(uint8_t)rng()}; - std::vector<std::byte> contents(length); - for (auto& val : contents) val = std::byte{(uint8_t)rng()}; + auto aad = rng.randbytes<std::byte>(aad_length); + auto contents = rng.randbytes<std::byte>(length); // Pick sides. auto& sender{from_init ? initiator : responder}; diff --git a/src/test/fuzz/bitset.cpp b/src/test/fuzz/bitset.cpp index 7684337729..ce6be0499c 100644 --- a/src/test/fuzz/bitset.cpp +++ b/src/test/fuzz/bitset.cpp @@ -2,9 +2,9 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include <random.h> #include <span.h> #include <test/fuzz/util.h> -#include <test/util/xoroshiro128plusplus.h> #include <util/bitset.h> #include <bitset> @@ -29,7 +29,7 @@ void TestType(FuzzBufferType buffer) * bitsets and their simulations do not matter for the purpose of detecting edge cases, thus * these are taken from a deterministically-seeded RNG instead. To provide some level of * variation however, pick the seed based on the buffer size and size of the chosen bitset. */ - XoRoShiRo128PlusPlus rng(buffer.size() + 0x10000 * S::Size()); + InsecureRandomContext rng(buffer.size() + 0x10000 * S::Size()); using Sim = std::bitset<S::Size()>; // Up to 4 real BitSets (initially 2). @@ -124,7 +124,7 @@ void TestType(FuzzBufferType buffer) sim[dest].reset(); real[dest] = S{}; for (unsigned i = 0; i < S::Size(); ++i) { - if (rng() & 1) { + if (rng.randbool()) { sim[dest][i] = true; real[dest].Set(i); } @@ -132,9 +132,9 @@ void TestType(FuzzBufferType buffer) break; } else if (dest < sim.size() && command-- == 0) { /* Assign initializer list. */ - unsigned r1 = rng() % S::Size(); - unsigned r2 = rng() % S::Size(); - unsigned r3 = rng() % S::Size(); + unsigned r1 = rng.randrange(S::Size()); + unsigned r2 = rng.randrange(S::Size()); + unsigned r3 = rng.randrange(S::Size()); compare_fn(dest); sim[dest].reset(); real[dest] = {r1, r2, r3}; @@ -166,8 +166,8 @@ void TestType(FuzzBufferType buffer) break; } else if (sim.size() < 4 && command-- == 0) { /* Construct with initializer list. */ - unsigned r1 = rng() % S::Size(); - unsigned r2 = rng() % S::Size(); + unsigned r1 = rng.randrange(S::Size()); + unsigned r2 = rng.randrange(S::Size()); sim.emplace_back(); sim.back().set(r1); sim.back().set(r2); diff --git a/src/test/fuzz/crypto_chacha20.cpp b/src/test/fuzz/crypto_chacha20.cpp index 50c77bf699..d115a2b7e1 100644 --- a/src/test/fuzz/crypto_chacha20.cpp +++ b/src/test/fuzz/crypto_chacha20.cpp @@ -3,10 +3,10 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <crypto/chacha20.h> +#include <random.h> #include <test/fuzz/FuzzedDataProvider.h> #include <test/fuzz/fuzz.h> #include <test/fuzz/util.h> -#include <test/util/xoroshiro128plusplus.h> #include <array> #include <cstddef> @@ -53,7 +53,7 @@ namespace once for a large block at once, and then the same data in chunks, comparing the outcome. - If UseCrypt, seeded Xoroshiro128++ output is used as input to Crypt(). + If UseCrypt, seeded InsecureRandomContext output is used as input to Crypt(). If not, Keystream() is used directly, or sequences of 0x00 are encrypted. */ template<bool UseCrypt> @@ -78,25 +78,11 @@ void ChaCha20SplitFuzz(FuzzedDataProvider& provider) data1.resize(total_bytes); data2.resize(total_bytes); - // If using Crypt(), initialize data1 and data2 with the same Xoroshiro128++ based + // If using Crypt(), initialize data1 and data2 with the same InsecureRandomContext based // stream. if constexpr (UseCrypt) { - uint64_t seed = provider.ConsumeIntegral<uint64_t>(); - XoRoShiRo128PlusPlus rng(seed); - uint64_t bytes = 0; - while (bytes < (total_bytes & ~uint64_t{7})) { - uint64_t val = rng(); - WriteLE64(UCharCast(data1.data() + bytes), val); - WriteLE64(UCharCast(data2.data() + bytes), val); - bytes += 8; - } - if (bytes < total_bytes) { - std::byte valbytes[8]; - uint64_t val = rng(); - WriteLE64(UCharCast(valbytes), val); - std::copy(valbytes, valbytes + (total_bytes - bytes), data1.data() + bytes); - std::copy(valbytes, valbytes + (total_bytes - bytes), data2.data() + bytes); - } + InsecureRandomContext(provider.ConsumeIntegral<uint64_t>()).fillrand(data1); + std::copy(data1.begin(), data1.end(), data2.begin()); } // Whether UseCrypt is used or not, the two byte arrays must match. diff --git a/src/test/fuzz/crypto_chacha20poly1305.cpp b/src/test/fuzz/crypto_chacha20poly1305.cpp new file mode 100644 index 0000000000..2b39a06094 --- /dev/null +++ b/src/test/fuzz/crypto_chacha20poly1305.cpp @@ -0,0 +1,200 @@ +// Copyright (c) 2020-2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <crypto/chacha20poly1305.h> +#include <random.h> +#include <span.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> + +#include <cstddef> +#include <cstdint> +#include <vector> + +constexpr static inline void crypt_till_rekey(FSChaCha20Poly1305& aead, int rekey_interval, bool encrypt) +{ + for (int i = 0; i < rekey_interval; ++i) { + std::byte dummy_tag[FSChaCha20Poly1305::EXPANSION] = {{}}; + if (encrypt) { + aead.Encrypt(Span{dummy_tag}.first(0), Span{dummy_tag}.first(0), dummy_tag); + } else { + aead.Decrypt(dummy_tag, Span{dummy_tag}.first(0), Span{dummy_tag}.first(0)); + } + } +} + +FUZZ_TARGET(crypto_aeadchacha20poly1305) +{ + FuzzedDataProvider provider{buffer.data(), buffer.size()}; + + auto key = provider.ConsumeBytes<std::byte>(32); + key.resize(32); + AEADChaCha20Poly1305 aead(key); + + // Initialize RNG deterministically, to generate contents and AAD. We assume that there are no + // (potentially buggy) edge cases triggered by specific values of contents/AAD, so we can avoid + // reading the actual data for those from the fuzzer input (which would need large amounts of + // data). + InsecureRandomContext rng(provider.ConsumeIntegral<uint64_t>()); + + LIMITED_WHILE(provider.ConsumeBool(), 10000) + { + // Mode: + // - Bit 0: whether to use single-plain Encrypt/Decrypt; otherwise use a split at prefix. + // - Bit 2: whether this ciphertext will be corrupted (making it the last sent one) + // - Bit 3-4: controls the maximum aad length (max 511 bytes) + // - Bit 5-7: controls the maximum content length (max 16383 bytes, for performance reasons) + unsigned mode = provider.ConsumeIntegral<uint8_t>(); + bool use_splits = mode & 1; + bool damage = mode & 4; + unsigned aad_length_bits = 3 * ((mode >> 3) & 3); + unsigned aad_length = provider.ConsumeIntegralInRange<unsigned>(0, (1 << aad_length_bits) - 1); + unsigned length_bits = 2 * ((mode >> 5) & 7); + unsigned length = provider.ConsumeIntegralInRange<unsigned>(0, (1 << length_bits) - 1); + // Generate aad and content. + auto aad = rng.randbytes<std::byte>(aad_length); + auto plain = rng.randbytes<std::byte>(length); + std::vector<std::byte> cipher(length + AEADChaCha20Poly1305::EXPANSION); + // Generate nonce + AEADChaCha20Poly1305::Nonce96 nonce = {(uint32_t)rng(), rng()}; + + if (use_splits && length > 0) { + size_t split_index = provider.ConsumeIntegralInRange<size_t>(1, length); + aead.Encrypt(Span{plain}.first(split_index), Span{plain}.subspan(split_index), aad, nonce, cipher); + } else { + aead.Encrypt(plain, aad, nonce, cipher); + } + + // Test Keystream output + std::vector<std::byte> keystream(length); + aead.Keystream(nonce, keystream); + for (size_t i = 0; i < length; ++i) { + assert((plain[i] ^ keystream[i]) == cipher[i]); + } + + std::vector<std::byte> decrypted_contents(length); + bool ok{false}; + + // damage the key + unsigned key_position = provider.ConsumeIntegralInRange<unsigned>(0, 31); + std::byte damage_val{(uint8_t)(1U << (key_position & 7))}; + std::vector<std::byte> bad_key = key; + bad_key[key_position] ^= damage_val; + + AEADChaCha20Poly1305 bad_aead(bad_key); + ok = bad_aead.Decrypt(cipher, aad, nonce, decrypted_contents); + assert(!ok); + + // Optionally damage 1 bit in either the cipher (corresponding to a change in transit) + // or the aad (to make sure that decryption will fail if the AAD mismatches). + if (damage) { + unsigned damage_bit = provider.ConsumeIntegralInRange<unsigned>(0, (cipher.size() + aad.size()) * 8U - 1U); + unsigned damage_pos = damage_bit >> 3; + std::byte damage_val{(uint8_t)(1U << (damage_bit & 7))}; + if (damage_pos >= cipher.size()) { + aad[damage_pos - cipher.size()] ^= damage_val; + } else { + cipher[damage_pos] ^= damage_val; + } + } + + if (use_splits && length > 0) { + size_t split_index = provider.ConsumeIntegralInRange<size_t>(1, length); + ok = aead.Decrypt(cipher, aad, nonce, Span{decrypted_contents}.first(split_index), Span{decrypted_contents}.subspan(split_index)); + } else { + ok = aead.Decrypt(cipher, aad, nonce, decrypted_contents); + } + + // Decryption *must* fail if the packet was damaged, and succeed if it wasn't. + assert(!ok == damage); + if (!ok) break; + assert(decrypted_contents == plain); + } +} + +FUZZ_TARGET(crypto_fschacha20poly1305) +{ + FuzzedDataProvider provider{buffer.data(), buffer.size()}; + + uint32_t rekey_interval = provider.ConsumeIntegralInRange<size_t>(32, 512); + auto key = provider.ConsumeBytes<std::byte>(32); + key.resize(32); + FSChaCha20Poly1305 enc_aead(key, rekey_interval); + FSChaCha20Poly1305 dec_aead(key, rekey_interval); + + // Initialize RNG deterministically, to generate contents and AAD. We assume that there are no + // (potentially buggy) edge cases triggered by specific values of contents/AAD, so we can avoid + // reading the actual data for those from the fuzzer input (which would need large amounts of + // data). + InsecureRandomContext rng(provider.ConsumeIntegral<uint64_t>()); + + LIMITED_WHILE(provider.ConsumeBool(), 10000) + { + // Mode: + // - Bit 0: whether to use single-plain Encrypt/Decrypt; otherwise use a split at prefix. + // - Bit 2: whether this ciphertext will be corrupted (making it the last sent one) + // - Bit 3-4: controls the maximum aad length (max 511 bytes) + // - Bit 5-7: controls the maximum content length (max 16383 bytes, for performance reasons) + unsigned mode = provider.ConsumeIntegral<uint8_t>(); + bool use_splits = mode & 1; + bool damage = mode & 4; + unsigned aad_length_bits = 3 * ((mode >> 3) & 3); + unsigned aad_length = provider.ConsumeIntegralInRange<unsigned>(0, (1 << aad_length_bits) - 1); + unsigned length_bits = 2 * ((mode >> 5) & 7); + unsigned length = provider.ConsumeIntegralInRange<unsigned>(0, (1 << length_bits) - 1); + // Generate aad and content. + auto aad = rng.randbytes<std::byte>(aad_length); + auto plain = rng.randbytes<std::byte>(length); + std::vector<std::byte> cipher(length + FSChaCha20Poly1305::EXPANSION); + + crypt_till_rekey(enc_aead, rekey_interval, true); + if (use_splits && length > 0) { + size_t split_index = provider.ConsumeIntegralInRange<size_t>(1, length); + enc_aead.Encrypt(Span{plain}.first(split_index), Span{plain}.subspan(split_index), aad, cipher); + } else { + enc_aead.Encrypt(plain, aad, cipher); + } + + std::vector<std::byte> decrypted_contents(length); + bool ok{false}; + + // damage the key + unsigned key_position = provider.ConsumeIntegralInRange<unsigned>(0, 31); + std::byte damage_val{(uint8_t)(1U << (key_position & 7))}; + std::vector<std::byte> bad_key = key; + bad_key[key_position] ^= damage_val; + + FSChaCha20Poly1305 bad_fs_aead(bad_key, rekey_interval); + crypt_till_rekey(bad_fs_aead, rekey_interval, false); + ok = bad_fs_aead.Decrypt(cipher, aad, decrypted_contents); + assert(!ok); + + // Optionally damage 1 bit in either the cipher (corresponding to a change in transit) + // or the aad (to make sure that decryption will fail if the AAD mismatches). + if (damage) { + unsigned damage_bit = provider.ConsumeIntegralInRange<unsigned>(0, (cipher.size() + aad.size()) * 8U - 1U); + unsigned damage_pos = damage_bit >> 3; + std::byte damage_val{(uint8_t)(1U << (damage_bit & 7))}; + if (damage_pos >= cipher.size()) { + aad[damage_pos - cipher.size()] ^= damage_val; + } else { + cipher[damage_pos] ^= damage_val; + } + } + + crypt_till_rekey(dec_aead, rekey_interval, false); + if (use_splits && length > 0) { + size_t split_index = provider.ConsumeIntegralInRange<size_t>(1, length); + ok = dec_aead.Decrypt(cipher, aad, Span{decrypted_contents}.first(split_index), Span{decrypted_contents}.subspan(split_index)); + } else { + ok = dec_aead.Decrypt(cipher, aad, decrypted_contents); + } + + // Decryption *must* fail if the packet was damaged, and succeed if it wasn't. + assert(!ok == damage); + if (!ok) break; + assert(decrypted_contents == plain); + } +} diff --git a/src/test/fuzz/descriptor_parse.cpp b/src/test/fuzz/descriptor_parse.cpp index b9a5560ffb..6a3f4d6dfe 100644 --- a/src/test/fuzz/descriptor_parse.cpp +++ b/src/test/fuzz/descriptor_parse.cpp @@ -72,6 +72,14 @@ FUZZ_TARGET(mocked_descriptor_parse, .init = initialize_mocked_descriptor_parse) // out strings which could correspond to a descriptor containing a too large derivation path. if (HasDeepDerivPath(buffer)) return; + // Some fragments can take a virtually unlimited number of sub-fragments (thresh, multi_a) but + // may perform quadratic operations on them. Limit the number of sub-fragments per fragment. + if (HasTooManySubFrag(buffer)) return; + + // The script building logic performs quadratic copies in the number of nested wrappers. Limit + // the number of nested wrappers per fragment. + if (HasTooManyWrappers(buffer)) return; + const std::string mocked_descriptor{buffer.begin(), buffer.end()}; if (const auto descriptor = MOCKED_DESC_CONVERTER.GetDescriptor(mocked_descriptor)) { FlatSigningProvider signing_provider; @@ -83,8 +91,10 @@ FUZZ_TARGET(mocked_descriptor_parse, .init = initialize_mocked_descriptor_parse) FUZZ_TARGET(descriptor_parse, .init = initialize_descriptor_parse) { - // See comment above for rationale. + // See comments above for rationales. if (HasDeepDerivPath(buffer)) return; + if (HasTooManySubFrag(buffer)) return; + if (HasTooManyWrappers(buffer)) return; const std::string descriptor(buffer.begin(), buffer.end()); FlatSigningProvider signing_provider; diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp index c1c9945a04..96283a3e15 100644 --- a/src/test/fuzz/fuzz.cpp +++ b/src/test/fuzz/fuzz.cpp @@ -6,6 +6,7 @@ #include <netaddress.h> #include <netbase.h> +#include <test/util/random.h> #include <test/util/setup_common.h> #include <util/check.h> #include <util/fs.h> @@ -72,8 +73,8 @@ auto& FuzzTargets() void FuzzFrameworkRegisterTarget(std::string_view name, TypeTestOneInput target, FuzzTargetOptions opts) { - const auto it_ins{FuzzTargets().try_emplace(name, FuzzTarget /* temporary can be dropped after clang-16 */ {std::move(target), std::move(opts)})}; - Assert(it_ins.second); + const auto [it, ins]{FuzzTargets().try_emplace(name, FuzzTarget /* temporary can be dropped after Apple-Clang-16 ? */ {std::move(target), std::move(opts)})}; + Assert(ins); } static std::string_view g_fuzz_target; @@ -101,6 +102,12 @@ void ResetCoverageCounters() {} void initialize() { + // By default, make the RNG deterministic with a fixed seed. This will affect all + // randomness during the fuzz test, except: + // - GetStrongRandBytes(), which is used for the creation of private key material. + // - Creating a BasicTestingSetup or derived class will switch to a random seed. + SeedRandomForTest(SeedRand::ZEROS); + // Terminate immediately if a fuzzing harness ever tries to create a socket. // Individual tests can override this by pointing CreateSock to a mocked alternative. CreateSock = [](int, int, int) -> std::unique_ptr<Sock> { std::terminate(); }; diff --git a/src/test/fuzz/mini_miner.cpp b/src/test/fuzz/mini_miner.cpp index 3a1663364f..51de4d0166 100644 --- a/src/test/fuzz/mini_miner.cpp +++ b/src/test/fuzz/mini_miner.cpp @@ -188,9 +188,9 @@ FUZZ_TARGET(mini_miner_selection, .init = initialize_miner) auto mock_template_txids = mini_miner.GetMockTemplateTxids(); // MiniMiner doesn't add a coinbase tx. assert(mock_template_txids.count(blocktemplate->block.vtx[0]->GetHash()) == 0); - mock_template_txids.emplace(blocktemplate->block.vtx[0]->GetHash()); - assert(mock_template_txids.size() <= blocktemplate->block.vtx.size()); - assert(mock_template_txids.size() >= blocktemplate->block.vtx.size()); + auto [iter, new_entry] = mock_template_txids.emplace(blocktemplate->block.vtx[0]->GetHash()); + assert(new_entry); + assert(mock_template_txids.size() == blocktemplate->block.vtx.size()); for (const auto& tx : blocktemplate->block.vtx) { assert(mock_template_txids.count(tx->GetHash())); diff --git a/src/test/fuzz/p2p_transport_serialization.cpp b/src/test/fuzz/p2p_transport_serialization.cpp index 767238d103..93f77b6e5b 100644 --- a/src/test/fuzz/p2p_transport_serialization.cpp +++ b/src/test/fuzz/p2p_transport_serialization.cpp @@ -10,7 +10,6 @@ #include <test/fuzz/FuzzedDataProvider.h> #include <test/fuzz/fuzz.h> #include <test/fuzz/util.h> -#include <test/util/xoroshiro128plusplus.h> #include <util/chaintype.h> #include <cassert> @@ -104,7 +103,7 @@ FUZZ_TARGET(p2p_transport_serialization, .init = initialize_p2p_transport_serial namespace { -template<typename R> +template<RandomNumberGenerator R> void SimulationTest(Transport& initiator, Transport& responder, R& rng, FuzzedDataProvider& provider) { // Simulation test with two Transport objects, which send messages to each other, with @@ -165,8 +164,7 @@ void SimulationTest(Transport& initiator, Transport& responder, R& rng, FuzzedDa // Determine size of message to send (limited to 75 kB for performance reasons). size_t size = provider.ConsumeIntegralInRange<uint32_t>(0, 75000); // Get payload of message from RNG. - msg.data.resize(size); - for (auto& v : msg.data) v = uint8_t(rng()); + msg.data = rng.randbytes(size); // Return. return msg; }; @@ -337,7 +335,7 @@ std::unique_ptr<Transport> MakeV1Transport(NodeId nodeid) noexcept return std::make_unique<V1Transport>(nodeid); } -template<typename RNG> +template<RandomNumberGenerator RNG> std::unique_ptr<Transport> MakeV2Transport(NodeId nodeid, bool initiator, RNG& rng, FuzzedDataProvider& provider) { // Retrieve key @@ -353,8 +351,7 @@ std::unique_ptr<Transport> MakeV2Transport(NodeId nodeid, bool initiator, RNG& r } else { // If it's longer, generate it from the RNG. This avoids having large amounts of // (hopefully) irrelevant data needing to be stored in the fuzzer data. - garb.resize(garb_len); - for (auto& v : garb) v = uint8_t(rng()); + garb = rng.randbytes(garb_len); } // Retrieve entropy auto ent = provider.ConsumeBytes<std::byte>(32); @@ -378,7 +375,7 @@ FUZZ_TARGET(p2p_transport_bidirectional, .init = initialize_p2p_transport_serial { // Test with two V1 transports talking to each other. FuzzedDataProvider provider{buffer.data(), buffer.size()}; - XoRoShiRo128PlusPlus rng(provider.ConsumeIntegral<uint64_t>()); + InsecureRandomContext rng(provider.ConsumeIntegral<uint64_t>()); auto t1 = MakeV1Transport(NodeId{0}); auto t2 = MakeV1Transport(NodeId{1}); if (!t1 || !t2) return; @@ -389,7 +386,7 @@ FUZZ_TARGET(p2p_transport_bidirectional_v2, .init = initialize_p2p_transport_ser { // Test with two V2 transports talking to each other. FuzzedDataProvider provider{buffer.data(), buffer.size()}; - XoRoShiRo128PlusPlus rng(provider.ConsumeIntegral<uint64_t>()); + InsecureRandomContext rng(provider.ConsumeIntegral<uint64_t>()); auto t1 = MakeV2Transport(NodeId{0}, true, rng, provider); auto t2 = MakeV2Transport(NodeId{1}, false, rng, provider); if (!t1 || !t2) return; @@ -400,7 +397,7 @@ FUZZ_TARGET(p2p_transport_bidirectional_v1v2, .init = initialize_p2p_transport_s { // Test with a V1 initiator talking to a V2 responder. FuzzedDataProvider provider{buffer.data(), buffer.size()}; - XoRoShiRo128PlusPlus rng(provider.ConsumeIntegral<uint64_t>()); + InsecureRandomContext rng(provider.ConsumeIntegral<uint64_t>()); auto t1 = MakeV1Transport(NodeId{0}); auto t2 = MakeV2Transport(NodeId{1}, false, rng, provider); if (!t1 || !t2) return; diff --git a/src/test/fuzz/package_eval.cpp b/src/test/fuzz/package_eval.cpp index 53aedf23ea..652c7a7609 100644 --- a/src/test/fuzz/package_eval.cpp +++ b/src/test/fuzz/package_eval.cpp @@ -6,7 +6,7 @@ #include <node/context.h> #include <node/mempool_args.h> #include <node/miner.h> -#include <policy/v3_policy.h> +#include <policy/truc_policy.h> #include <test/fuzz/FuzzedDataProvider.h> #include <test/fuzz/fuzz.h> #include <test/fuzz/util.h> @@ -225,7 +225,7 @@ FUZZ_TARGET(tx_package_eval, .init = initialize_tx_pool) tx_mut.vin.emplace_back(); } - // Make a p2pk output to make sigops adjusted vsize to violate v3, potentially, which is never spent + // Make a p2pk output to make sigops adjusted vsize to violate TRUC rules, potentially, which is never spent if (last_tx && amount_in > 1000 && fuzzed_data_provider.ConsumeBool()) { tx_mut.vout.emplace_back(1000, CScript() << std::vector<unsigned char>(33, 0x02) << OP_CHECKSIG); // Don't add any other outputs. @@ -320,7 +320,7 @@ FUZZ_TARGET(tx_package_eval, .init = initialize_tx_pool) Assert(result_package.m_tx_results.size() == txs.size() || result_package.m_tx_results.empty()); } - CheckMempoolV3Invariants(tx_pool); + CheckMempoolTRUCInvariants(tx_pool); } node.validation_signals->UnregisterSharedValidationInterface(outpoints_updater); diff --git a/src/test/fuzz/partially_downloaded_block.cpp b/src/test/fuzz/partially_downloaded_block.cpp index 791d457710..77952cab9e 100644 --- a/src/test/fuzz/partially_downloaded_block.cpp +++ b/src/test/fuzz/partially_downloaded_block.cpp @@ -52,7 +52,7 @@ FUZZ_TARGET(partially_downloaded_block, .init = initialize_pdb) return; } - CBlockHeaderAndShortTxIDs cmpctblock{*block}; + CBlockHeaderAndShortTxIDs cmpctblock{*block, fuzzed_data_provider.ConsumeIntegral<uint64_t>()}; bilingual_str error; CTxMemPool pool{MemPoolOptionsForTest(g_setup->m_node), error}; diff --git a/src/test/fuzz/poolresource.cpp b/src/test/fuzz/poolresource.cpp index f764d9f8db..28bf7175c0 100644 --- a/src/test/fuzz/poolresource.cpp +++ b/src/test/fuzz/poolresource.cpp @@ -2,13 +2,13 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include <random.h> #include <span.h> #include <support/allocators/pool.h> #include <test/fuzz/FuzzedDataProvider.h> #include <test/fuzz/fuzz.h> #include <test/fuzz/util.h> #include <test/util/poolresourcetester.h> -#include <test/util/xoroshiro128plusplus.h> #include <cstdint> #include <tuple> @@ -71,41 +71,14 @@ public: void RandomContentFill(Entry& entry) { - XoRoShiRo128PlusPlus rng(entry.seed); - auto ptr = entry.span.data(); - auto size = entry.span.size(); - - while (size >= 8) { - auto r = rng(); - std::memcpy(ptr, &r, 8); - size -= 8; - ptr += 8; - } - if (size > 0) { - auto r = rng(); - std::memcpy(ptr, &r, size); - } + InsecureRandomContext(entry.seed).fillrand(entry.span); } void RandomContentCheck(const Entry& entry) { - XoRoShiRo128PlusPlus rng(entry.seed); - auto ptr = entry.span.data(); - auto size = entry.span.size(); - - std::byte buf[8]; - while (size >= 8) { - auto r = rng(); - std::memcpy(buf, &r, 8); - assert(std::memcmp(buf, ptr, 8) == 0); - size -= 8; - ptr += 8; - } - if (size > 0) { - auto r = rng(); - std::memcpy(buf, &r, size); - assert(std::memcmp(buf, ptr, size) == 0); - } + std::vector<std::byte> expect(entry.span.size()); + InsecureRandomContext(entry.seed).fillrand(expect); + assert(entry.span == expect); } void Deallocate(const Entry& entry) diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp index d10d9dafe8..6373eac1c3 100644 --- a/src/test/fuzz/process_message.cpp +++ b/src/test/fuzz/process_message.cpp @@ -42,7 +42,7 @@ void initialize_process_message() static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>( /*chain_type=*/ChainType::REGTEST, - /*extra_args=*/{"-txreconciliation"}); + {.extra_args = {"-txreconciliation"}}); g_setup = testing_setup.get(); for (int i = 0; i < 2 * COINBASE_MATURITY; i++) { MineBlock(g_setup->m_node, CScript() << OP_TRUE); diff --git a/src/test/fuzz/process_messages.cpp b/src/test/fuzz/process_messages.cpp index 38acd432fa..62f38967a3 100644 --- a/src/test/fuzz/process_messages.cpp +++ b/src/test/fuzz/process_messages.cpp @@ -32,7 +32,7 @@ void initialize_process_messages() { static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>( /*chain_type=*/ChainType::REGTEST, - /*extra_args=*/{"-txreconciliation"}); + {.extra_args = {"-txreconciliation"}}); g_setup = testing_setup.get(); for (int i = 0; i < 2 * COINBASE_MATURITY; i++) { MineBlock(g_setup->m_node, CScript() << OP_TRUE); diff --git a/src/test/fuzz/random.cpp b/src/test/fuzz/random.cpp index 96668734fd..6b2d42738b 100644 --- a/src/test/fuzz/random.cpp +++ b/src/test/fuzz/random.cpp @@ -26,6 +26,5 @@ FUZZ_TARGET(random) (void)fast_random_context(); std::vector<int64_t> integrals = ConsumeRandomLengthIntegralVector<int64_t>(fuzzed_data_provider); - Shuffle(integrals.begin(), integrals.end(), fast_random_context); std::shuffle(integrals.begin(), integrals.end(), fast_random_context); } diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp index 4e52c1c091..9122617e46 100644 --- a/src/test/fuzz/rpc.cpp +++ b/src/test/fuzz/rpc.cpp @@ -41,7 +41,7 @@ using util::ToString; namespace { struct RPCFuzzTestingSetup : public TestingSetup { - RPCFuzzTestingSetup(const ChainType chain_type, const std::vector<const char*>& extra_args) : TestingSetup{chain_type, extra_args} + RPCFuzzTestingSetup(const ChainType chain_type, TestOpts opts) : TestingSetup{chain_type, opts} { } diff --git a/src/test/fuzz/script_sigcache.cpp b/src/test/fuzz/script_sigcache.cpp index 5fdbc9e106..3248ebc4af 100644 --- a/src/test/fuzz/script_sigcache.cpp +++ b/src/test/fuzz/script_sigcache.cpp @@ -18,12 +18,15 @@ namespace { const BasicTestingSetup* g_setup; +SignatureCache* g_signature_cache; } // namespace void initialize_script_sigcache() { static const auto testing_setup = MakeNoLogFileContext<>(); + static SignatureCache signature_cache{DEFAULT_SIGNATURE_CACHE_BYTES}; g_setup = testing_setup.get(); + g_signature_cache = &signature_cache; } FUZZ_TARGET(script_sigcache, .init = initialize_script_sigcache) @@ -36,7 +39,7 @@ FUZZ_TARGET(script_sigcache, .init = initialize_script_sigcache) const CAmount amount = ConsumeMoney(fuzzed_data_provider); const bool store = fuzzed_data_provider.ConsumeBool(); PrecomputedTransactionData tx_data; - CachingTransactionSignatureChecker caching_transaction_signature_checker{mutable_transaction ? &tx : nullptr, n_in, amount, store, tx_data}; + CachingTransactionSignatureChecker caching_transaction_signature_checker{mutable_transaction ? &tx : nullptr, n_in, amount, store, *g_signature_cache, tx_data}; if (fuzzed_data_provider.ConsumeBool()) { const auto random_bytes = fuzzed_data_provider.ConsumeBytes<unsigned char>(64); const XOnlyPubKey pub_key(ConsumeUInt256(fuzzed_data_provider)); diff --git a/src/test/fuzz/string.cpp b/src/test/fuzz/string.cpp index 5b822b03f6..443d7241b5 100644 --- a/src/test/fuzz/string.cpp +++ b/src/test/fuzz/string.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2020-2022 The Bitcoin Core developers +// Copyright (c) 2020-present The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -101,7 +101,6 @@ FUZZ_TARGET(string) (void)TrimString(random_string_1, random_string_2); (void)UrlDecode(random_string_1); (void)ContainsNoNUL(random_string_1); - (void)_(random_string_1.c_str()); try { throw scriptnum_error{random_string_1}; } catch (const std::runtime_error&) { diff --git a/src/test/fuzz/tx_pool.cpp b/src/test/fuzz/tx_pool.cpp index b6b91445f9..64861311db 100644 --- a/src/test/fuzz/tx_pool.cpp +++ b/src/test/fuzz/tx_pool.cpp @@ -6,7 +6,7 @@ #include <node/context.h> #include <node/mempool_args.h> #include <node/miner.h> -#include <policy/v3_policy.h> +#include <policy/truc_policy.h> #include <test/fuzz/FuzzedDataProvider.h> #include <test/fuzz/fuzz.h> #include <test/fuzz/util.h> @@ -320,7 +320,7 @@ FUZZ_TARGET(tx_pool_standard, .init = initialize_tx_pool) if (accepted) { Assert(added.size() == 1); // For now, no package acceptance Assert(tx == *added.begin()); - CheckMempoolV3Invariants(tx_pool); + CheckMempoolTRUCInvariants(tx_pool); } else { // Do not consider rejected transaction removed removed.erase(tx); @@ -413,7 +413,7 @@ FUZZ_TARGET(tx_pool, .init = initialize_tx_pool) const bool accepted = res.m_result_type == MempoolAcceptResult::ResultType::VALID; if (accepted) { txids.push_back(tx->GetHash()); - CheckMempoolV3Invariants(tx_pool); + CheckMempoolTRUCInvariants(tx_pool); } } Finish(fuzzed_data_provider, tx_pool, chainstate); diff --git a/src/test/fuzz/util/descriptor.cpp b/src/test/fuzz/util/descriptor.cpp index 0fed2bc5e1..9e52e990a2 100644 --- a/src/test/fuzz/util/descriptor.cpp +++ b/src/test/fuzz/util/descriptor.cpp @@ -4,6 +4,9 @@ #include <test/fuzz/util/descriptor.h> +#include <ranges> +#include <stack> + void MockedDescriptorConverter::Init() { // The data to use as a private key or a seed for an xprv. std::array<std::byte, 32> key_data{std::byte{1}}; @@ -84,3 +87,59 @@ bool HasDeepDerivPath(const FuzzBufferType& buff, const int max_depth) } return false; } + +bool HasTooManySubFrag(const FuzzBufferType& buff, const int max_subs, const size_t max_nested_subs) +{ + // We use a stack because there may be many nested sub-frags. + std::stack<int> counts; + for (const auto& ch: buff) { + // The fuzzer may generate an input with a ton of parentheses. Rule out pathological cases. + if (counts.size() > max_nested_subs) return true; + + if (ch == '(') { + // A new fragment was opened, create a new sub-count for it and start as one since any fragment with + // parentheses has at least one sub. + counts.push(1); + } else if (ch == ',' && !counts.empty()) { + // When encountering a comma, account for an additional sub in the last opened fragment. If it exceeds the + // limit, bail. + if (++counts.top() > max_subs) return true; + } else if (ch == ')' && !counts.empty()) { + // Fragment closed! Drop its sub count and resume to counting the number of subs for its parent. + counts.pop(); + } + } + return false; +} + +bool HasTooManyWrappers(const FuzzBufferType& buff, const int max_wrappers) +{ + // The number of nested wrappers. Nested wrappers are always characters which follow each other so we don't have to + // use a stack as we do above when counting the number of sub-fragments. + std::optional<int> count; + + // We want to detect nested wrappers. A wrapper is a character prepended to a fragment, separated by a colon. There + // may be more than one wrapper, in which case the colon is not repeated. For instance `jjjjj:pk()`. To count + // wrappers we iterate in reverse and use the colon to detect the end of a wrapper expression and count how many + // characters there are since the beginning of the expression. We stop counting when we encounter a character + // indicating the beginning of a new expression. + for (const auto ch: buff | std::views::reverse) { + // A colon, start counting. + if (ch == ':') { + // The colon itself is not a wrapper so we start at 0. + count = 0; + } else if (count) { + // If we are counting wrappers, stop when we crossed the beginning of the wrapper expression. Otherwise keep + // counting and bail if we reached the limit. + // A wrapper may only ever occur as the first sub of a descriptor/miniscript expression ('('), as the + // first Taproot leaf in a pair ('{') or as the nth sub in each case (','). + if (ch == ',' || ch == '(' || ch == '{') { + count.reset(); + } else if (++*count > max_wrappers) { + return true; + } + } + } + + return false; +} diff --git a/src/test/fuzz/util/descriptor.h b/src/test/fuzz/util/descriptor.h index cd41dbafa3..ea928c39f0 100644 --- a/src/test/fuzz/util/descriptor.h +++ b/src/test/fuzz/util/descriptor.h @@ -55,4 +55,25 @@ constexpr int MAX_DEPTH{2}; */ bool HasDeepDerivPath(const FuzzBufferType& buff, const int max_depth = MAX_DEPTH); +//! Default maximum number of sub-fragments. +constexpr int MAX_SUBS{1'000}; +//! Maximum number of nested sub-fragments we'll allow in a descriptor. +constexpr size_t MAX_NESTED_SUBS{10'000}; + +/** + * Whether the buffer, if it represents a valid descriptor, contains a fragment with more + * sub-fragments than the given maximum. + */ +bool HasTooManySubFrag(const FuzzBufferType& buff, const int max_subs = MAX_SUBS, + const size_t max_nested_subs = MAX_NESTED_SUBS); + +//! Default maximum number of wrappers per fragment. +constexpr int MAX_WRAPPERS{100}; + +/** + * Whether the buffer, if it represents a valid descriptor, contains a fragment with more + * wrappers than the given maximum. + */ +bool HasTooManyWrappers(const FuzzBufferType& buff, const int max_wrappers = MAX_WRAPPERS); + #endif // BITCOIN_TEST_FUZZ_UTIL_DESCRIPTOR_H diff --git a/src/test/fuzz/util/net.cpp b/src/test/fuzz/util/net.cpp index 5e7aae670e..ad69c29d12 100644 --- a/src/test/fuzz/util/net.cpp +++ b/src/test/fuzz/util/net.cpp @@ -182,6 +182,12 @@ ssize_t FuzzedSock::Recv(void* buf, size_t len, int flags) const EWOULDBLOCK, }; assert(buf != nullptr || len == 0); + + // Do the latency before any of the "return" statements. + if (m_fuzzed_data_provider.ConsumeBool() && std::getenv("FUZZED_SOCKET_FAKE_LATENCY") != nullptr) { + std::this_thread::sleep_for(std::chrono::milliseconds{2}); + } + if (len == 0 || m_fuzzed_data_provider.ConsumeBool()) { const ssize_t r = m_fuzzed_data_provider.ConsumeBool() ? 0 : -1; if (r == -1) { @@ -189,47 +195,41 @@ ssize_t FuzzedSock::Recv(void* buf, size_t len, int flags) const } return r; } - std::vector<uint8_t> random_bytes; - bool pad_to_len_bytes{m_fuzzed_data_provider.ConsumeBool()}; - if (m_peek_data.has_value()) { - // `MSG_PEEK` was used in the preceding `Recv()` call, return `m_peek_data`. - random_bytes = m_peek_data.value(); + + size_t copied_so_far{0}; + + if (!m_peek_data.empty()) { + // `MSG_PEEK` was used in the preceding `Recv()` call, copy the first bytes from `m_peek_data`. + const size_t copy_len{std::min(len, m_peek_data.size())}; + std::memcpy(buf, m_peek_data.data(), copy_len); + copied_so_far += copy_len; if ((flags & MSG_PEEK) == 0) { - m_peek_data.reset(); + m_peek_data.erase(m_peek_data.begin(), m_peek_data.begin() + copy_len); } - pad_to_len_bytes = false; - } else if ((flags & MSG_PEEK) != 0) { - // New call with `MSG_PEEK`. - random_bytes = ConsumeRandomLengthByteVector(m_fuzzed_data_provider, len); - if (!random_bytes.empty()) { - m_peek_data = random_bytes; - pad_to_len_bytes = false; - } - } else { - random_bytes = ConsumeRandomLengthByteVector(m_fuzzed_data_provider, len); } - if (random_bytes.empty()) { - const ssize_t r = m_fuzzed_data_provider.ConsumeBool() ? 0 : -1; - if (r == -1) { - SetFuzzedErrNo(m_fuzzed_data_provider, recv_errnos); - } - return r; + + if (copied_so_far == len) { + return copied_so_far; } - // `random_bytes` might exceed the size of `buf` if e.g. Recv is called with - // len=N and MSG_PEEK first and afterwards called with len=M (M < N) and - // without MSG_PEEK. - size_t recv_len{std::min(random_bytes.size(), len)}; - std::memcpy(buf, random_bytes.data(), recv_len); - if (pad_to_len_bytes) { - if (len > random_bytes.size()) { - std::memset((char*)buf + random_bytes.size(), 0, len - random_bytes.size()); - } - return len; + + auto new_data = ConsumeRandomLengthByteVector(m_fuzzed_data_provider, len - copied_so_far); + if (new_data.empty()) return copied_so_far; + + std::memcpy(reinterpret_cast<uint8_t*>(buf) + copied_so_far, new_data.data(), new_data.size()); + copied_so_far += new_data.size(); + + if ((flags & MSG_PEEK) != 0) { + m_peek_data.insert(m_peek_data.end(), new_data.begin(), new_data.end()); } - if (m_fuzzed_data_provider.ConsumeBool() && std::getenv("FUZZED_SOCKET_FAKE_LATENCY") != nullptr) { - std::this_thread::sleep_for(std::chrono::milliseconds{2}); + + if (copied_so_far == len || m_fuzzed_data_provider.ConsumeBool()) { + return copied_so_far; } - return recv_len; + + // Pad to len bytes. + std::memset(reinterpret_cast<uint8_t*>(buf) + copied_so_far, 0x0, len - copied_so_far); + + return len; } int FuzzedSock::Connect(const sockaddr*, socklen_t) const diff --git a/src/test/fuzz/util/net.h b/src/test/fuzz/util/net.h index ed02680676..1a5902329e 100644 --- a/src/test/fuzz/util/net.h +++ b/src/test/fuzz/util/net.h @@ -43,7 +43,7 @@ class FuzzedSock : public Sock * If `MSG_PEEK` is used, then our `Recv()` returns some random data as usual, but on the next * `Recv()` call we must return the same data, thus we remember it here. */ - mutable std::optional<std::vector<uint8_t>> m_peek_data; + mutable std::vector<uint8_t> m_peek_data; /** * Whether to pretend that the socket is select(2)-able. This is randomly set in the diff --git a/src/test/fuzz/utxo_snapshot.cpp b/src/test/fuzz/utxo_snapshot.cpp index 8c9c67a91c..522c9c54ee 100644 --- a/src/test/fuzz/utxo_snapshot.cpp +++ b/src/test/fuzz/utxo_snapshot.cpp @@ -41,20 +41,46 @@ FUZZ_TARGET(utxo_snapshot, .init = initialize_chain) { AutoFile outfile{fsbridge::fopen(snapshot_path, "wb")}; - const auto file_data{ConsumeRandomLengthByteVector(fuzzed_data_provider)}; - outfile << Span{file_data}; + // Metadata + if (fuzzed_data_provider.ConsumeBool()) { + std::vector<uint8_t> metadata{ConsumeRandomLengthByteVector(fuzzed_data_provider)}; + outfile << Span{metadata}; + } else { + DataStream data_stream{}; + auto msg_start = chainman.GetParams().MessageStart(); + int base_blockheight{fuzzed_data_provider.ConsumeIntegralInRange<int>(1, 2 * COINBASE_MATURITY)}; + uint256 base_blockhash{g_chain->at(base_blockheight - 1)->GetHash()}; + uint64_t m_coins_count{fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(1, 3 * COINBASE_MATURITY)}; + SnapshotMetadata metadata{msg_start, base_blockhash, base_blockheight, m_coins_count}; + outfile << metadata; + } + // Coins + if (fuzzed_data_provider.ConsumeBool()) { + std::vector<uint8_t> file_data{ConsumeRandomLengthByteVector(fuzzed_data_provider)}; + outfile << Span{file_data}; + } else { + int height{0}; + for (const auto& block : *g_chain) { + auto coinbase{block->vtx.at(0)}; + outfile << coinbase->GetHash(); + WriteCompactSize(outfile, 1); // number of coins for the hash + WriteCompactSize(outfile, 0); // index of coin + outfile << Coin(coinbase->vout[0], height, /*fCoinBaseIn=*/1); + height++; + } + } } const auto ActivateFuzzedSnapshot{[&] { AutoFile infile{fsbridge::fopen(snapshot_path, "rb")}; - auto msg_start = Params().MessageStart(); + auto msg_start = chainman.GetParams().MessageStart(); SnapshotMetadata metadata{msg_start}; try { infile >> metadata; } catch (const std::ios_base::failure&) { return false; } - return chainman.ActivateSnapshot(infile, metadata, /*in_memory=*/true); + return !!chainman.ActivateSnapshot(infile, metadata, /*in_memory=*/true); }}; if (fuzzed_data_provider.ConsumeBool()) { @@ -73,16 +99,20 @@ FUZZ_TARGET(utxo_snapshot, .init = initialize_chain) Assert(*chainman.ActiveChainstate().m_from_snapshot_blockhash == *chainman.SnapshotBlockhash()); const auto& coinscache{chainman.ActiveChainstate().CoinsTip()}; - int64_t chain_tx{}; for (const auto& block : *g_chain) { Assert(coinscache.HaveCoin(COutPoint{block->vtx.at(0)->GetHash(), 0})); const auto* index{chainman.m_blockman.LookupBlockIndex(block->GetHash())}; - const auto num_tx{Assert(index)->nTx}; - Assert(num_tx == 1); - chain_tx += num_tx; + Assert(index); + Assert(index->nTx == 0); + if (index->nHeight == chainman.GetSnapshotBaseHeight()) { + auto params{chainman.GetParams().AssumeutxoForHeight(index->nHeight)}; + Assert(params.has_value()); + Assert(params.value().nChainTx == index->nChainTx); + } else { + Assert(index->nChainTx == 0); + } } Assert(g_chain->size() == coinscache.GetCacheSize()); - Assert(chain_tx == chainman.ActiveTip()->nChainTx); } else { Assert(!chainman.SnapshotBlockhash()); Assert(!chainman.ActiveChainstate().m_from_snapshot_blockhash); diff --git a/src/test/fuzz/utxo_total_supply.cpp b/src/test/fuzz/utxo_total_supply.cpp index 48ed266abe..b0f1a1251a 100644 --- a/src/test/fuzz/utxo_total_supply.cpp +++ b/src/test/fuzz/utxo_total_supply.cpp @@ -23,7 +23,7 @@ FUZZ_TARGET(utxo_total_supply) ChainTestingSetup test_setup{ ChainType::REGTEST, { - "-testactivationheight=bip34@2", + .extra_args = {"-testactivationheight=bip34@2"}, }, }; // Create chainstate diff --git a/src/test/fuzz/validation_load_mempool.cpp b/src/test/fuzz/validation_load_mempool.cpp index 51140ae039..c70d9ddf1e 100644 --- a/src/test/fuzz/validation_load_mempool.cpp +++ b/src/test/fuzz/validation_load_mempool.cpp @@ -2,7 +2,7 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include <kernel/mempool_persist.h> +#include <node/mempool_persist.h> #include <node/mempool_args.h> #include <node/mempool_persist_args.h> @@ -21,8 +21,8 @@ #include <cstdint> #include <vector> -using kernel::DumpMempool; -using kernel::LoadMempool; +using node::DumpMempool; +using node::LoadMempool; using node::MempoolPath; diff --git a/src/test/fuzz/vecdeque.cpp b/src/test/fuzz/vecdeque.cpp index 1d9a98931f..3bb858ee8a 100644 --- a/src/test/fuzz/vecdeque.cpp +++ b/src/test/fuzz/vecdeque.cpp @@ -2,9 +2,9 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include <random.h> #include <span.h> #include <test/fuzz/util.h> -#include <test/util/xoroshiro128plusplus.h> #include <util/vecdeque.h> #include <deque> @@ -28,7 +28,7 @@ void TestType(Span<const uint8_t> buffer, uint64_t rng_tweak) { FuzzedDataProvider provider(buffer.data(), buffer.size()); // Local RNG, only used for the seeds to initialize T objects with. - XoRoShiRo128PlusPlus rng(provider.ConsumeIntegral<uint64_t>() ^ rng_tweak); + InsecureRandomContext rng(provider.ConsumeIntegral<uint64_t>() ^ rng_tweak); // Real circular buffers. std::vector<VecDeque<T>> real; @@ -175,7 +175,7 @@ void TestType(Span<const uint8_t> buffer, uint64_t rng_tweak) } if (existing_buffer_non_full && command-- == 0) { /* push_back() (copying) */ - tmp = T(rng()); + tmp = T(rng.rand64()); size_t old_size = real[idx].size(); size_t old_cap = real[idx].capacity(); real[idx].push_back(*tmp); @@ -191,7 +191,7 @@ void TestType(Span<const uint8_t> buffer, uint64_t rng_tweak) } if (existing_buffer_non_full && command-- == 0) { /* push_back() (moving) */ - tmp = T(rng()); + tmp = T(rng.rand64()); size_t old_size = real[idx].size(); size_t old_cap = real[idx].capacity(); sim[idx].push_back(*tmp); @@ -207,7 +207,7 @@ void TestType(Span<const uint8_t> buffer, uint64_t rng_tweak) } if (existing_buffer_non_full && command-- == 0) { /* emplace_back() */ - uint64_t seed{rng()}; + uint64_t seed{rng.rand64()}; size_t old_size = real[idx].size(); size_t old_cap = real[idx].capacity(); sim[idx].emplace_back(seed); @@ -223,7 +223,7 @@ void TestType(Span<const uint8_t> buffer, uint64_t rng_tweak) } if (existing_buffer_non_full && command-- == 0) { /* push_front() (copying) */ - tmp = T(rng()); + tmp = T(rng.rand64()); size_t old_size = real[idx].size(); size_t old_cap = real[idx].capacity(); real[idx].push_front(*tmp); @@ -239,7 +239,7 @@ void TestType(Span<const uint8_t> buffer, uint64_t rng_tweak) } if (existing_buffer_non_full && command-- == 0) { /* push_front() (moving) */ - tmp = T(rng()); + tmp = T(rng.rand64()); size_t old_size = real[idx].size(); size_t old_cap = real[idx].capacity(); sim[idx].push_front(*tmp); @@ -255,7 +255,7 @@ void TestType(Span<const uint8_t> buffer, uint64_t rng_tweak) } if (existing_buffer_non_full && command-- == 0) { /* emplace_front() */ - uint64_t seed{rng()}; + uint64_t seed{rng.rand64()}; size_t old_size = real[idx].size(); size_t old_cap = real[idx].capacity(); sim[idx].emplace_front(seed); @@ -271,7 +271,7 @@ void TestType(Span<const uint8_t> buffer, uint64_t rng_tweak) } if (existing_buffer_non_empty && command-- == 0) { /* front() [modifying] */ - tmp = T(rng()); + tmp = T(rng.rand64()); size_t old_size = real[idx].size(); assert(sim[idx].front() == real[idx].front()); sim[idx].front() = *tmp; @@ -281,7 +281,7 @@ void TestType(Span<const uint8_t> buffer, uint64_t rng_tweak) } if (existing_buffer_non_empty && command-- == 0) { /* back() [modifying] */ - tmp = T(rng()); + tmp = T(rng.rand64()); size_t old_size = real[idx].size(); assert(sim[idx].back() == real[idx].back()); sim[idx].back() = *tmp; @@ -291,7 +291,7 @@ void TestType(Span<const uint8_t> buffer, uint64_t rng_tweak) } if (existing_buffer_non_empty && command-- == 0) { /* operator[] [modifying] */ - tmp = T(rng()); + tmp = T(rng.rand64()); size_t pos = provider.ConsumeIntegralInRange<size_t>(0, sim[idx].size() - 1); size_t old_size = real[idx].size(); assert(sim[idx][pos] == real[idx][pos]); diff --git a/src/test/i2p_tests.cpp b/src/test/i2p_tests.cpp index 0512c6134f..bb9ca88019 100644 --- a/src/test/i2p_tests.cpp +++ b/src/test/i2p_tests.cpp @@ -23,8 +23,8 @@ class EnvTestingSetup : public BasicTestingSetup { public: explicit EnvTestingSetup(const ChainType chainType = ChainType::MAIN, - const std::vector<const char*>& extra_args = {}) - : BasicTestingSetup{chainType, extra_args}, + TestOpts opts = {}) + : BasicTestingSetup{chainType, opts}, m_prev_log_level{LogInstance().LogLevel()}, m_create_sock_orig{CreateSock} { diff --git a/src/test/miniscript_tests.cpp b/src/test/miniscript_tests.cpp index 7e39e9e4de..c99a4594ce 100644 --- a/src/test/miniscript_tests.cpp +++ b/src/test/miniscript_tests.cpp @@ -346,7 +346,7 @@ void TestSatisfy(const KeyConverter& converter, const std::string& testcase, con auto challenges = FindChallenges(node); // Find all challenges in the generated miniscript. std::vector<Challenge> challist(challenges.begin(), challenges.end()); for (int iter = 0; iter < 3; ++iter) { - Shuffle(challist.begin(), challist.end(), g_insecure_rand_ctx); + std::shuffle(challist.begin(), challist.end(), g_insecure_rand_ctx); Satisfier satisfier(converter.MsContext()); TestSignatureChecker checker(satisfier); bool prev_mal_success = false, prev_nonmal_success = false; diff --git a/src/test/net_peer_connection_tests.cpp b/src/test/net_peer_connection_tests.cpp index 5f38ce112c..2dde6daee5 100644 --- a/src/test/net_peer_connection_tests.cpp +++ b/src/test/net_peer_connection_tests.cpp @@ -31,7 +31,7 @@ struct LogIPsTestingSetup : public TestingSetup { LogIPsTestingSetup() - : TestingSetup{ChainType::MAIN, /*extra_args=*/{"-logips"}} {} + : TestingSetup{ChainType::MAIN, {.extra_args = {"-logips"}}} {} }; BOOST_FIXTURE_TEST_SUITE(net_peer_connection_tests, LogIPsTestingSetup) diff --git a/src/test/net_peer_eviction_tests.cpp b/src/test/net_peer_eviction_tests.cpp index 51d6c4384a..d9e1c2332e 100644 --- a/src/test/net_peer_eviction_tests.cpp +++ b/src/test/net_peer_eviction_tests.cpp @@ -31,7 +31,7 @@ bool IsProtected(int num_peers, for (NodeEvictionCandidate& candidate : candidates) { candidate_setup_fn(candidate); } - Shuffle(candidates.begin(), candidates.end(), random_context); + std::shuffle(candidates.begin(), candidates.end(), random_context); const size_t size{candidates.size()}; const size_t expected{size - size / 2}; // Expect half the candidates will be protected. @@ -572,7 +572,7 @@ BOOST_AUTO_TEST_CASE(peer_protection_test) // Returns true if any of the node ids in node_ids are selected for eviction. bool IsEvicted(std::vector<NodeEvictionCandidate> candidates, const std::unordered_set<NodeId>& node_ids, FastRandomContext& random_context) { - Shuffle(candidates.begin(), candidates.end(), random_context); + std::shuffle(candidates.begin(), candidates.end(), random_context); const std::optional<NodeId> evicted_node_id = SelectNodeToEvict(std::move(candidates)); if (!evicted_node_id) { return false; diff --git a/src/test/orphanage_tests.cpp b/src/test/orphanage_tests.cpp index 450bf6a4fc..3459aa9f0e 100644 --- a/src/test/orphanage_tests.cpp +++ b/src/test/orphanage_tests.cpp @@ -106,7 +106,7 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans) // ecdsa_signature_parse_der_lax are executed during this test. // Specifically branches that run only when an ECDSA // signature's R and S values have leading zeros. - g_insecure_rand_ctx = FastRandomContext{uint256{33}}; + g_insecure_rand_ctx.Reseed(uint256{33}); TxOrphanageTest orphanage; CKey key; diff --git a/src/test/prevector_tests.cpp b/src/test/prevector_tests.cpp index 1559011fcd..1ac7abf492 100644 --- a/src/test/prevector_tests.cpp +++ b/src/test/prevector_tests.cpp @@ -210,9 +210,9 @@ public: } prevector_tester() { - SeedInsecureRand(); + SeedRandomForTest(); rand_seed = InsecureRand256(); - rand_cache = FastRandomContext(rand_seed); + rand_cache.Reseed(rand_seed); } }; diff --git a/src/test/random_tests.cpp b/src/test/random_tests.cpp index 43d887b5c9..3d8b543e64 100644 --- a/src/test/random_tests.cpp +++ b/src/test/random_tests.cpp @@ -20,28 +20,39 @@ BOOST_AUTO_TEST_CASE(osrandom_tests) BOOST_CHECK(Random_SanityCheck()); } -BOOST_AUTO_TEST_CASE(fastrandom_tests) +BOOST_AUTO_TEST_CASE(fastrandom_tests_deterministic) { // Check that deterministic FastRandomContexts are deterministic - g_mock_deterministic_tests = true; - FastRandomContext ctx1(true); - FastRandomContext ctx2(true); - - for (int i = 10; i > 0; --i) { - BOOST_CHECK_EQUAL(GetRand<uint64_t>(), uint64_t{10393729187455219830U}); - BOOST_CHECK_EQUAL(GetRand<int>(), int{769702006}); - BOOST_CHECK_EQUAL(GetRandMicros(std::chrono::hours{1}).count(), 2917185654); - BOOST_CHECK_EQUAL(GetRandMillis(std::chrono::hours{1}).count(), 2144374); + SeedRandomForTest(SeedRand::ZEROS); + FastRandomContext ctx1{true}; + FastRandomContext ctx2{true}; + + { + BOOST_CHECK_EQUAL(FastRandomContext().rand<uint64_t>(), uint64_t{9330418229102544152u}); + BOOST_CHECK_EQUAL(FastRandomContext().rand<int>(), int{618925161}); + BOOST_CHECK_EQUAL(FastRandomContext().randrange<std::chrono::microseconds>(1h).count(), 1271170921); + BOOST_CHECK_EQUAL(FastRandomContext().randrange<std::chrono::milliseconds>(1h).count(), 2803534); + + BOOST_CHECK_EQUAL(FastRandomContext().rand<uint64_t>(), uint64_t{10170981140880778086u}); + BOOST_CHECK_EQUAL(FastRandomContext().rand<int>(), int{1689082725}); + BOOST_CHECK_EQUAL(FastRandomContext().randrange<std::chrono::microseconds>(1h).count(), 2464643716); + BOOST_CHECK_EQUAL(FastRandomContext().randrange<std::chrono::milliseconds>(1h).count(), 2312205); + + BOOST_CHECK_EQUAL(FastRandomContext().rand<uint64_t>(), uint64_t{5689404004456455543u}); + BOOST_CHECK_EQUAL(FastRandomContext().rand<int>(), int{785839937}); + BOOST_CHECK_EQUAL(FastRandomContext().randrange<std::chrono::microseconds>(1h).count(), 93558804); + BOOST_CHECK_EQUAL(FastRandomContext().randrange<std::chrono::milliseconds>(1h).count(), 507022); } + { constexpr SteadySeconds time_point{1s}; FastRandomContext ctx{true}; BOOST_CHECK_EQUAL(7, ctx.rand_uniform_delay(time_point, 9s).time_since_epoch().count()); BOOST_CHECK_EQUAL(-6, ctx.rand_uniform_delay(time_point, -9s).time_since_epoch().count()); BOOST_CHECK_EQUAL(1, ctx.rand_uniform_delay(time_point, 0s).time_since_epoch().count()); - BOOST_CHECK_EQUAL(1467825113502396065, ctx.rand_uniform_delay(time_point, 9223372036854775807s).time_since_epoch().count()); - BOOST_CHECK_EQUAL(-970181367944767837, ctx.rand_uniform_delay(time_point, -9223372036854775807s).time_since_epoch().count()); - BOOST_CHECK_EQUAL(24761, ctx.rand_uniform_delay(time_point, 9h).time_since_epoch().count()); + BOOST_CHECK_EQUAL(4652286523065884857, ctx.rand_uniform_delay(time_point, 9223372036854775807s).time_since_epoch().count()); + BOOST_CHECK_EQUAL(-8813961240025683129, ctx.rand_uniform_delay(time_point, -9223372036854775807s).time_since_epoch().count()); + BOOST_CHECK_EQUAL(26443, ctx.rand_uniform_delay(time_point, 9h).time_since_epoch().count()); } BOOST_CHECK_EQUAL(ctx1.rand32(), ctx2.rand32()); BOOST_CHECK_EQUAL(ctx1.rand32(), ctx2.rand32()); @@ -65,15 +76,28 @@ BOOST_AUTO_TEST_CASE(fastrandom_tests) // Check with time-point type BOOST_CHECK_EQUAL(2782, ctx.rand_uniform_duration<SteadySeconds>(9h).count()); } +} +BOOST_AUTO_TEST_CASE(fastrandom_tests_nondeterministic) +{ // Check that a nondeterministic ones are not - g_mock_deterministic_tests = false; - for (int i = 10; i > 0; --i) { - BOOST_CHECK(GetRand<uint64_t>() != uint64_t{10393729187455219830U}); - BOOST_CHECK(GetRand<int>() != int{769702006}); - BOOST_CHECK(GetRandMicros(std::chrono::hours{1}) != std::chrono::microseconds{2917185654}); - BOOST_CHECK(GetRandMillis(std::chrono::hours{1}) != std::chrono::milliseconds{2144374}); + { + BOOST_CHECK(FastRandomContext().rand<uint64_t>() != uint64_t{9330418229102544152u}); + BOOST_CHECK(FastRandomContext().rand<int>() != int{618925161}); + BOOST_CHECK(FastRandomContext().randrange<std::chrono::microseconds>(1h).count() != 1271170921); + BOOST_CHECK(FastRandomContext().randrange<std::chrono::milliseconds>(1h).count() != 2803534); + + BOOST_CHECK(FastRandomContext().rand<uint64_t>() != uint64_t{10170981140880778086u}); + BOOST_CHECK(FastRandomContext().rand<int>() != int{1689082725}); + BOOST_CHECK(FastRandomContext().randrange<std::chrono::microseconds>(1h).count() != 2464643716); + BOOST_CHECK(FastRandomContext().randrange<std::chrono::milliseconds>(1h).count() != 2312205); + + BOOST_CHECK(FastRandomContext().rand<uint64_t>() != uint64_t{5689404004456455543u}); + BOOST_CHECK(FastRandomContext().rand<int>() != int{785839937}); + BOOST_CHECK(FastRandomContext().randrange<std::chrono::microseconds>(1h).count() != 93558804); + BOOST_CHECK(FastRandomContext().randrange<std::chrono::milliseconds>(1h).count() != 507022); } + { FastRandomContext ctx3, ctx4; BOOST_CHECK(ctx3.rand64() != ctx4.rand64()); // extremely unlikely to be equal @@ -103,6 +127,70 @@ BOOST_AUTO_TEST_CASE(fastrandom_randbits) } } +/** Verify that RandomMixin::randbits returns 0 and 1 for every requested bit. */ +BOOST_AUTO_TEST_CASE(randbits_test) +{ + FastRandomContext ctx_lens; //!< RNG for producing the lengths requested from ctx_test. + FastRandomContext ctx_test1(true), ctx_test2(true); //!< The RNGs being tested. + int ctx_test_bitsleft{0}; //!< (Assumed value of) ctx_test::bitbuf_len + + // Run the entire test 5 times. + for (int i = 0; i < 5; ++i) { + // count (first) how often it has occurred, and (second) how often it was true: + // - for every bit position, in every requested bits count (0 + 1 + 2 + ... + 64 = 2080) + // - for every value of ctx_test_bitsleft (0..63 = 64) + std::vector<std::pair<uint64_t, uint64_t>> seen(2080 * 64); + while (true) { + // Loop 1000 times, just to not continuously check std::all_of. + for (int j = 0; j < 1000; ++j) { + // Decide on a number of bits to request (0 through 64, inclusive; don't use randbits/randrange). + int bits = ctx_lens.rand64() % 65; + // Generate that many bits. + uint64_t gen = ctx_test1.randbits(bits); + // For certain bits counts, also test randbits<Bits> and compare. + uint64_t gen2; + if (bits == 0) { + gen2 = ctx_test2.randbits<0>(); + } else if (bits == 1) { + gen2 = ctx_test2.randbits<1>(); + } else if (bits == 7) { + gen2 = ctx_test2.randbits<7>(); + } else if (bits == 32) { + gen2 = ctx_test2.randbits<32>(); + } else if (bits == 51) { + gen2 = ctx_test2.randbits<51>(); + } else if (bits == 64) { + gen2 = ctx_test2.randbits<64>(); + } else { + gen2 = ctx_test2.randbits(bits); + } + BOOST_CHECK_EQUAL(gen, gen2); + // Make sure the result is in range. + if (bits < 64) BOOST_CHECK_EQUAL(gen >> bits, 0); + // Mark all the seen bits in the output. + for (int bit = 0; bit < bits; ++bit) { + int idx = bit + (bits * (bits - 1)) / 2 + 2080 * ctx_test_bitsleft; + seen[idx].first += 1; + seen[idx].second += (gen >> bit) & 1; + } + // Update ctx_test_bitself. + if (bits > ctx_test_bitsleft) { + ctx_test_bitsleft = ctx_test_bitsleft + 64 - bits; + } else { + ctx_test_bitsleft -= bits; + } + } + // Loop until every bit position/combination is seen 242 times. + if (std::all_of(seen.begin(), seen.end(), [](const auto& x) { return x.first >= 242; })) break; + } + // Check that each bit appears within 7.78 standard deviations of 50% + // (each will fail with P < 1/(2080 * 64 * 10^9)). + for (const auto& val : seen) { + assert(fabs(val.first * 0.5 - val.second) < sqrt(val.first * 0.25) * 7.78); + } + } +} + /** Does-it-compile test for compatibility with standard library RNG interface. */ BOOST_AUTO_TEST_CASE(stdrandom_test) { @@ -118,10 +206,6 @@ BOOST_AUTO_TEST_CASE(stdrandom_test) for (int j = 1; j <= 10; ++j) { BOOST_CHECK(std::find(test.begin(), test.end(), j) != test.end()); } - Shuffle(test.begin(), test.end(), ctx); - for (int j = 1; j <= 10; ++j) { - BOOST_CHECK(std::find(test.begin(), test.end(), j) != test.end()); - } } } @@ -132,7 +216,7 @@ BOOST_AUTO_TEST_CASE(shuffle_stat_test) uint32_t counts[5 * 5 * 5 * 5 * 5] = {0}; for (int i = 0; i < 12000; ++i) { int data[5] = {0, 1, 2, 3, 4}; - Shuffle(std::begin(data), std::end(data), ctx); + std::shuffle(std::begin(data), std::end(data), ctx); int pos = data[0] + data[1] * 5 + data[2] * 25 + data[3] * 125 + data[4] * 625; ++counts[pos]; } @@ -155,4 +239,21 @@ BOOST_AUTO_TEST_CASE(shuffle_stat_test) BOOST_CHECK_EQUAL(sum, 12000U); } +BOOST_AUTO_TEST_CASE(xoroshiro128plusplus_reference_values) +{ + // numbers generated from reference implementation + InsecureRandomContext rng(0); + BOOST_TEST(0x6f68e1e7e2646ee1 == rng()); + BOOST_TEST(0xbf971b7f454094ad == rng()); + BOOST_TEST(0x48f2de556f30de38 == rng()); + BOOST_TEST(0x6ea7c59f89bbfc75 == rng()); + + // seed with a random number + rng.Reseed(0x1a26f3fa8546b47a); + BOOST_TEST(0xc8dc5e08d844ac7d == rng()); + BOOST_TEST(0x5b5f1f6d499dad1b == rng()); + BOOST_TEST(0xbeb0031f93313d6f == rng()); + BOOST_TEST(0xbfbcf4f43a264497 == rng()); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/script_p2sh_tests.cpp b/src/test/script_p2sh_tests.cpp index 54dcc218b9..f91203cc48 100644 --- a/src/test/script_p2sh_tests.cpp +++ b/src/test/script_p2sh_tests.cpp @@ -113,13 +113,14 @@ BOOST_AUTO_TEST_CASE(sign) } // All of the above should be OK, and the txTos have valid signatures // Check to make sure signature verification fails if we use the wrong ScriptSig: + SignatureCache signature_cache{DEFAULT_SIGNATURE_CACHE_BYTES}; for (int i = 0; i < 8; i++) { PrecomputedTransactionData txdata(txTo[i]); for (int j = 0; j < 8; j++) { CScript sigSave = txTo[i].vin[0].scriptSig; txTo[i].vin[0].scriptSig = txTo[j].vin[0].scriptSig; - bool sigOK = CScriptCheck(txFrom.vout[txTo[i].vin[0].prevout.n], CTransaction(txTo[i]), 0, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_STRICTENC, false, &txdata)(); + bool sigOK = CScriptCheck(txFrom.vout[txTo[i].vin[0].prevout.n], CTransaction(txTo[i]), signature_cache, 0, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_STRICTENC, false, &txdata)(); if (i == j) BOOST_CHECK_MESSAGE(sigOK, strprintf("VerifySignature %d %d", i, j)); else diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index 39b53295e7..0d309469ef 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -1526,7 +1526,7 @@ static std::vector<unsigned int> AllConsensusFlags() /** Precomputed list of all valid combinations of consensus-relevant script validation flags. */ static const std::vector<unsigned int> ALL_CONSENSUS_FLAGS = AllConsensusFlags(); -static void AssetTest(const UniValue& test) +static void AssetTest(const UniValue& test, SignatureCache& signature_cache) { BOOST_CHECK(test.isObject()); @@ -1543,7 +1543,7 @@ static void AssetTest(const UniValue& test) CTransaction tx(mtx); PrecomputedTransactionData txdata; txdata.Init(tx, std::vector<CTxOut>(prevouts)); - CachingTransactionSignatureChecker txcheck(&tx, idx, prevouts[idx].nValue, true, txdata); + CachingTransactionSignatureChecker txcheck(&tx, idx, prevouts[idx].nValue, true, signature_cache, txdata); for (const auto flags : ALL_CONSENSUS_FLAGS) { // "final": true tests are valid for all flags. Others are only valid with flags that are @@ -1561,7 +1561,7 @@ static void AssetTest(const UniValue& test) CTransaction tx(mtx); PrecomputedTransactionData txdata; txdata.Init(tx, std::vector<CTxOut>(prevouts)); - CachingTransactionSignatureChecker txcheck(&tx, idx, prevouts[idx].nValue, true, txdata); + CachingTransactionSignatureChecker txcheck(&tx, idx, prevouts[idx].nValue, true, signature_cache, txdata); for (const auto flags : ALL_CONSENSUS_FLAGS) { // If a test is supposed to fail with test_flags, it should also fail with any superset thereof. @@ -1577,6 +1577,7 @@ BOOST_AUTO_TEST_CASE(script_assets_test) { // See src/test/fuzz/script_assets_test_minimizer.cpp for information on how to generate // the script_assets_test.json file used by this test. + SignatureCache signature_cache{DEFAULT_SIGNATURE_CACHE_BYTES}; const char* dir = std::getenv("DIR_UNIT_TEST_DATA"); BOOST_WARN_MESSAGE(dir != nullptr, "Variable DIR_UNIT_TEST_DATA unset, skipping script_assets_test"); @@ -1597,7 +1598,7 @@ BOOST_AUTO_TEST_CASE(script_assets_test) BOOST_CHECK(tests.size() > 0); for (size_t i = 0; i < tests.size(); i++) { - AssetTest(tests[i]); + AssetTest(tests[i], signature_cache); } file.close(); } diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index e666e11758..eed932b6d2 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -436,7 +436,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file_skip) BOOST_AUTO_TEST_CASE(streams_buffered_file_rand) { // Make this test deterministic. - SeedInsecureRand(SeedRand::ZEROS); + SeedRandomForTest(SeedRand::ZEROS); fs::path streams_test_filename = m_args.GetDataDirBase() / "streams_test_tmp"; for (int rep = 0; rep < 50; ++rep) { diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index 34176626f0..a7fda5865c 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -17,6 +17,7 @@ #include <policy/settings.h> #include <script/script.h> #include <script/script_error.h> +#include <script/sigcache.h> #include <script/sign.h> #include <script/signingprovider.h> #include <script/solver.h> @@ -578,9 +579,11 @@ BOOST_AUTO_TEST_CASE(test_big_witness_transaction) coins.emplace_back(std::move(coin)); } + SignatureCache signature_cache{DEFAULT_SIGNATURE_CACHE_BYTES}; + for(uint32_t i = 0; i < mtx.vin.size(); i++) { std::vector<CScriptCheck> vChecks; - vChecks.emplace_back(coins[tx.vin[i].prevout.n].out, tx, i, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false, &txdata); + vChecks.emplace_back(coins[tx.vin[i].prevout.n].out, tx, signature_cache, i, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false, &txdata); control.Add(std::move(vChecks)); } diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp index 478121cc6f..8c873c85a3 100644 --- a/src/test/txpackage_tests.cpp +++ b/src/test/txpackage_tests.cpp @@ -303,7 +303,7 @@ BOOST_FIXTURE_TEST_CASE(noncontextual_package_tests, TestChain100Setup) // The parents can be in any order. FastRandomContext rng; - Shuffle(package.begin(), package.end(), rng); + std::shuffle(package.begin(), package.end(), rng); package.push_back(MakeTransactionRef(child)); PackageValidationState state; diff --git a/src/test/txrequest_tests.cpp b/src/test/txrequest_tests.cpp index dc257a0d51..0ca70d2c7a 100644 --- a/src/test/txrequest_tests.cpp +++ b/src/test/txrequest_tests.cpp @@ -392,7 +392,7 @@ void BuildBigPriorityTest(Scenario& scenario, int peers) // Determine the announcement order randomly. std::vector<NodeId> announce_order = request_order; - Shuffle(announce_order.begin(), announce_order.end(), g_insecure_rand_ctx); + std::shuffle(announce_order.begin(), announce_order.end(), g_insecure_rand_ctx); // Find a gtxid whose txhash prioritization is consistent with the required ordering within pref_peers and // within npref_peers. @@ -697,7 +697,7 @@ void TestInterleavedScenarios() builders.emplace_back([](Scenario& scenario){ BuildWeirdRequestsTest(scenario); }); } // Randomly shuffle all those functions. - Shuffle(builders.begin(), builders.end(), g_insecure_rand_ctx); + std::shuffle(builders.begin(), builders.end(), g_insecure_rand_ctx); Runner runner; auto starttime = RandomTime1y(); diff --git a/src/test/txvalidation_tests.cpp b/src/test/txvalidation_tests.cpp index f429f94a2f..97b27ef370 100644 --- a/src/test/txvalidation_tests.cpp +++ b/src/test/txvalidation_tests.cpp @@ -4,9 +4,9 @@ #include <consensus/validation.h> #include <key_io.h> -#include <policy/v3_policy.h> #include <policy/packages.h> #include <policy/policy.h> +#include <policy/truc_policy.h> #include <primitives/transaction.h> #include <random.h> #include <script/script.h> @@ -91,7 +91,7 @@ static inline CTransactionRef make_tx(const std::vector<COutPoint>& inputs, int3 BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup) { - // Test V3 policy helper functions + // Test TRUC policy helper functions CTxMemPool& pool = *Assert(m_node.mempool); LOCK2(cs_main, pool.cs); TestMemPoolEntryHelper entry; @@ -105,77 +105,77 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup) // Default values. CTxMemPool::Limits m_limits{}; - // Cannot spend from an unconfirmed v3 transaction unless this tx is also v3. + // Cannot spend from an unconfirmed TRUC transaction unless this tx is also TRUC. { // mempool_tx_v3 // ^ // tx_v2_from_v3 auto tx_v2_from_v3 = make_tx({COutPoint{mempool_tx_v3->GetHash(), 0}}, /*version=*/2); auto ancestors_v2_from_v3{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v2_from_v3), m_limits)}; - const auto expected_error_str{strprintf("non-v3 tx %s (wtxid=%s) cannot spend from v3 tx %s (wtxid=%s)", + const auto expected_error_str{strprintf("non-version=3 tx %s (wtxid=%s) cannot spend from version=3 tx %s (wtxid=%s)", tx_v2_from_v3->GetHash().ToString(), tx_v2_from_v3->GetWitnessHash().ToString(), mempool_tx_v3->GetHash().ToString(), mempool_tx_v3->GetWitnessHash().ToString())}; - auto result_v2_from_v3{SingleV3Checks(tx_v2_from_v3, *ancestors_v2_from_v3, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v3))}; + auto result_v2_from_v3{SingleTRUCChecks(tx_v2_from_v3, *ancestors_v2_from_v3, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v3))}; BOOST_CHECK_EQUAL(result_v2_from_v3->first, expected_error_str); BOOST_CHECK_EQUAL(result_v2_from_v3->second, nullptr); Package package_v3_v2{mempool_tx_v3, tx_v2_from_v3}; - BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v2_from_v3, GetVirtualTransactionSize(*tx_v2_from_v3), package_v3_v2, empty_ancestors), expected_error_str); + BOOST_CHECK_EQUAL(*PackageTRUCChecks(tx_v2_from_v3, GetVirtualTransactionSize(*tx_v2_from_v3), package_v3_v2, empty_ancestors), expected_error_str); CTxMemPool::setEntries entries_mempool_v3{pool.GetIter(mempool_tx_v3->GetHash().ToUint256()).value()}; - BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v2_from_v3, GetVirtualTransactionSize(*tx_v2_from_v3), {tx_v2_from_v3}, entries_mempool_v3), expected_error_str); + BOOST_CHECK_EQUAL(*PackageTRUCChecks(tx_v2_from_v3, GetVirtualTransactionSize(*tx_v2_from_v3), {tx_v2_from_v3}, entries_mempool_v3), expected_error_str); // mempool_tx_v3 mempool_tx_v2 // ^ ^ // tx_v2_from_v2_and_v3 auto tx_v2_from_v2_and_v3 = make_tx({COutPoint{mempool_tx_v3->GetHash(), 0}, COutPoint{mempool_tx_v2->GetHash(), 0}}, /*version=*/2); auto ancestors_v2_from_both{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v2_from_v2_and_v3), m_limits)}; - const auto expected_error_str_2{strprintf("non-v3 tx %s (wtxid=%s) cannot spend from v3 tx %s (wtxid=%s)", + const auto expected_error_str_2{strprintf("non-version=3 tx %s (wtxid=%s) cannot spend from version=3 tx %s (wtxid=%s)", tx_v2_from_v2_and_v3->GetHash().ToString(), tx_v2_from_v2_and_v3->GetWitnessHash().ToString(), mempool_tx_v3->GetHash().ToString(), mempool_tx_v3->GetWitnessHash().ToString())}; - auto result_v2_from_both{SingleV3Checks(tx_v2_from_v2_and_v3, *ancestors_v2_from_both, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v2_and_v3))}; + auto result_v2_from_both{SingleTRUCChecks(tx_v2_from_v2_and_v3, *ancestors_v2_from_both, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v2_and_v3))}; BOOST_CHECK_EQUAL(result_v2_from_both->first, expected_error_str_2); BOOST_CHECK_EQUAL(result_v2_from_both->second, nullptr); Package package_v3_v2_v2{mempool_tx_v3, mempool_tx_v2, tx_v2_from_v2_and_v3}; - BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v2_from_v2_and_v3, GetVirtualTransactionSize(*tx_v2_from_v2_and_v3), package_v3_v2_v2, empty_ancestors), expected_error_str_2); + BOOST_CHECK_EQUAL(*PackageTRUCChecks(tx_v2_from_v2_and_v3, GetVirtualTransactionSize(*tx_v2_from_v2_and_v3), package_v3_v2_v2, empty_ancestors), expected_error_str_2); } - // V3 cannot spend from an unconfirmed non-v3 transaction. + // TRUC cannot spend from an unconfirmed non-TRUC transaction. { // mempool_tx_v2 // ^ // tx_v3_from_v2 auto tx_v3_from_v2 = make_tx({COutPoint{mempool_tx_v2->GetHash(), 0}}, /*version=*/3); auto ancestors_v3_from_v2{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_from_v2), m_limits)}; - const auto expected_error_str{strprintf("v3 tx %s (wtxid=%s) cannot spend from non-v3 tx %s (wtxid=%s)", + const auto expected_error_str{strprintf("version=3 tx %s (wtxid=%s) cannot spend from non-version=3 tx %s (wtxid=%s)", tx_v3_from_v2->GetHash().ToString(), tx_v3_from_v2->GetWitnessHash().ToString(), mempool_tx_v2->GetHash().ToString(), mempool_tx_v2->GetWitnessHash().ToString())}; - auto result_v3_from_v2{SingleV3Checks(tx_v3_from_v2, *ancestors_v3_from_v2, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v2))}; + auto result_v3_from_v2{SingleTRUCChecks(tx_v3_from_v2, *ancestors_v3_from_v2, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v2))}; BOOST_CHECK_EQUAL(result_v3_from_v2->first, expected_error_str); BOOST_CHECK_EQUAL(result_v3_from_v2->second, nullptr); Package package_v2_v3{mempool_tx_v2, tx_v3_from_v2}; - BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_from_v2, GetVirtualTransactionSize(*tx_v3_from_v2), package_v2_v3, empty_ancestors), expected_error_str); + BOOST_CHECK_EQUAL(*PackageTRUCChecks(tx_v3_from_v2, GetVirtualTransactionSize(*tx_v3_from_v2), package_v2_v3, empty_ancestors), expected_error_str); CTxMemPool::setEntries entries_mempool_v2{pool.GetIter(mempool_tx_v2->GetHash().ToUint256()).value()}; - BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_from_v2, GetVirtualTransactionSize(*tx_v3_from_v2), {tx_v3_from_v2}, entries_mempool_v2), expected_error_str); + BOOST_CHECK_EQUAL(*PackageTRUCChecks(tx_v3_from_v2, GetVirtualTransactionSize(*tx_v3_from_v2), {tx_v3_from_v2}, entries_mempool_v2), expected_error_str); // mempool_tx_v3 mempool_tx_v2 // ^ ^ // tx_v3_from_v2_and_v3 auto tx_v3_from_v2_and_v3 = make_tx({COutPoint{mempool_tx_v3->GetHash(), 0}, COutPoint{mempool_tx_v2->GetHash(), 0}}, /*version=*/3); auto ancestors_v3_from_both{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_from_v2_and_v3), m_limits)}; - const auto expected_error_str_2{strprintf("v3 tx %s (wtxid=%s) cannot spend from non-v3 tx %s (wtxid=%s)", + const auto expected_error_str_2{strprintf("version=3 tx %s (wtxid=%s) cannot spend from non-version=3 tx %s (wtxid=%s)", tx_v3_from_v2_and_v3->GetHash().ToString(), tx_v3_from_v2_and_v3->GetWitnessHash().ToString(), mempool_tx_v2->GetHash().ToString(), mempool_tx_v2->GetWitnessHash().ToString())}; - auto result_v3_from_both{SingleV3Checks(tx_v3_from_v2_and_v3, *ancestors_v3_from_both, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v2_and_v3))}; + auto result_v3_from_both{SingleTRUCChecks(tx_v3_from_v2_and_v3, *ancestors_v3_from_both, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v2_and_v3))}; BOOST_CHECK_EQUAL(result_v3_from_both->first, expected_error_str_2); BOOST_CHECK_EQUAL(result_v3_from_both->second, nullptr); - // tx_v3_from_v2_and_v3 also violates V3_ANCESTOR_LIMIT. + // tx_v3_from_v2_and_v3 also violates TRUC_ANCESTOR_LIMIT. const auto expected_error_str_3{strprintf("tx %s (wtxid=%s) would have too many ancestors", tx_v3_from_v2_and_v3->GetHash().ToString(), tx_v3_from_v2_and_v3->GetWitnessHash().ToString())}; Package package_v3_v2_v3{mempool_tx_v3, mempool_tx_v2, tx_v3_from_v2_and_v3}; - BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_from_v2_and_v3, GetVirtualTransactionSize(*tx_v3_from_v2_and_v3), package_v3_v2_v3, empty_ancestors), expected_error_str_3); + BOOST_CHECK_EQUAL(*PackageTRUCChecks(tx_v3_from_v2_and_v3, GetVirtualTransactionSize(*tx_v3_from_v2_and_v3), package_v3_v2_v3, empty_ancestors), expected_error_str_3); } // V3 from V3 is ok, and non-V3 from non-V3 is ok. { @@ -184,25 +184,25 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup) // tx_v3_from_v3 auto tx_v3_from_v3 = make_tx({COutPoint{mempool_tx_v3->GetHash(), 0}}, /*version=*/3); auto ancestors_v3{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_from_v3), m_limits)}; - BOOST_CHECK(SingleV3Checks(tx_v3_from_v3, *ancestors_v3, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v3)) + BOOST_CHECK(SingleTRUCChecks(tx_v3_from_v3, *ancestors_v3, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v3)) == std::nullopt); Package package_v3_v3{mempool_tx_v3, tx_v3_from_v3}; - BOOST_CHECK(PackageV3Checks(tx_v3_from_v3, GetVirtualTransactionSize(*tx_v3_from_v3), package_v3_v3, empty_ancestors) == std::nullopt); + BOOST_CHECK(PackageTRUCChecks(tx_v3_from_v3, GetVirtualTransactionSize(*tx_v3_from_v3), package_v3_v3, empty_ancestors) == std::nullopt); // mempool_tx_v2 // ^ // tx_v2_from_v2 auto tx_v2_from_v2 = make_tx({COutPoint{mempool_tx_v2->GetHash(), 0}}, /*version=*/2); auto ancestors_v2{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v2_from_v2), m_limits)}; - BOOST_CHECK(SingleV3Checks(tx_v2_from_v2, *ancestors_v2, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v2)) + BOOST_CHECK(SingleTRUCChecks(tx_v2_from_v2, *ancestors_v2, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v2)) == std::nullopt); Package package_v2_v2{mempool_tx_v2, tx_v2_from_v2}; - BOOST_CHECK(PackageV3Checks(tx_v2_from_v2, GetVirtualTransactionSize(*tx_v2_from_v2), package_v2_v2, empty_ancestors) == std::nullopt); + BOOST_CHECK(PackageTRUCChecks(tx_v2_from_v2, GetVirtualTransactionSize(*tx_v2_from_v2), package_v2_v2, empty_ancestors) == std::nullopt); } - // Tx spending v3 cannot have too many mempool ancestors + // Tx spending TRUC cannot have too many mempool ancestors // Configuration where the tx has multiple direct parents. { Package package_multi_parents; @@ -221,11 +221,11 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup) BOOST_CHECK_EQUAL(ancestors->size(), 3); const auto expected_error_str{strprintf("tx %s (wtxid=%s) would have too many ancestors", tx_v3_multi_parent->GetHash().ToString(), tx_v3_multi_parent->GetWitnessHash().ToString())}; - auto result{SingleV3Checks(tx_v3_multi_parent, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_multi_parent))}; + auto result{SingleTRUCChecks(tx_v3_multi_parent, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_multi_parent))}; BOOST_CHECK_EQUAL(result->first, expected_error_str); BOOST_CHECK_EQUAL(result->second, nullptr); - BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_multi_parent, GetVirtualTransactionSize(*tx_v3_multi_parent), package_multi_parents, empty_ancestors), + BOOST_CHECK_EQUAL(*PackageTRUCChecks(tx_v3_multi_parent, GetVirtualTransactionSize(*tx_v3_multi_parent), package_multi_parents, empty_ancestors), expected_error_str); } @@ -246,34 +246,34 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup) auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_multi_gen), m_limits)}; const auto expected_error_str{strprintf("tx %s (wtxid=%s) would have too many ancestors", tx_v3_multi_gen->GetHash().ToString(), tx_v3_multi_gen->GetWitnessHash().ToString())}; - auto result{SingleV3Checks(tx_v3_multi_gen, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_multi_gen))}; + auto result{SingleTRUCChecks(tx_v3_multi_gen, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_multi_gen))}; BOOST_CHECK_EQUAL(result->first, expected_error_str); BOOST_CHECK_EQUAL(result->second, nullptr); // Middle tx is what triggers a failure for the grandchild: - BOOST_CHECK_EQUAL(*PackageV3Checks(middle_tx, GetVirtualTransactionSize(*middle_tx), package_multi_gen, empty_ancestors), expected_error_str); - BOOST_CHECK(PackageV3Checks(tx_v3_multi_gen, GetVirtualTransactionSize(*tx_v3_multi_gen), package_multi_gen, empty_ancestors) == std::nullopt); + BOOST_CHECK_EQUAL(*PackageTRUCChecks(middle_tx, GetVirtualTransactionSize(*middle_tx), package_multi_gen, empty_ancestors), expected_error_str); + BOOST_CHECK(PackageTRUCChecks(tx_v3_multi_gen, GetVirtualTransactionSize(*tx_v3_multi_gen), package_multi_gen, empty_ancestors) == std::nullopt); } - // Tx spending v3 cannot be too large in virtual size. + // Tx spending TRUC cannot be too large in virtual size. auto many_inputs{random_outpoints(100)}; many_inputs.emplace_back(mempool_tx_v3->GetHash(), 0); { auto tx_v3_child_big = make_tx(many_inputs, /*version=*/3); const auto vsize{GetVirtualTransactionSize(*tx_v3_child_big)}; auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_child_big), m_limits)}; - const auto expected_error_str{strprintf("v3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes", - tx_v3_child_big->GetHash().ToString(), tx_v3_child_big->GetWitnessHash().ToString(), vsize, V3_CHILD_MAX_VSIZE)}; - auto result{SingleV3Checks(tx_v3_child_big, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child_big))}; + const auto expected_error_str{strprintf("version=3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes", + tx_v3_child_big->GetHash().ToString(), tx_v3_child_big->GetWitnessHash().ToString(), vsize, TRUC_CHILD_MAX_VSIZE)}; + auto result{SingleTRUCChecks(tx_v3_child_big, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child_big))}; BOOST_CHECK_EQUAL(result->first, expected_error_str); BOOST_CHECK_EQUAL(result->second, nullptr); Package package_child_big{mempool_tx_v3, tx_v3_child_big}; - BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_child_big, GetVirtualTransactionSize(*tx_v3_child_big), package_child_big, empty_ancestors), + BOOST_CHECK_EQUAL(*PackageTRUCChecks(tx_v3_child_big, GetVirtualTransactionSize(*tx_v3_child_big), package_child_big, empty_ancestors), expected_error_str); } - // Tx spending v3 cannot have too many sigops. + // Tx spending TRUC cannot have too many sigops. // This child has 10 P2WSH multisig inputs. auto multisig_outpoints{random_outpoints(10)}; multisig_outpoints.emplace_back(mempool_tx_v3->GetHash(), 0); @@ -302,34 +302,34 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup) BOOST_CHECK_EQUAL(total_sigops, tx_many_sigops->vin.size() * MAX_PUBKEYS_PER_MULTISIG); const int64_t bip141_vsize{GetVirtualTransactionSize(*tx_many_sigops)}; // Weight limit is not reached... - BOOST_CHECK(SingleV3Checks(tx_many_sigops, *ancestors, empty_conflicts_set, bip141_vsize) == std::nullopt); + BOOST_CHECK(SingleTRUCChecks(tx_many_sigops, *ancestors, empty_conflicts_set, bip141_vsize) == std::nullopt); // ...but sigop limit is. - const auto expected_error_str{strprintf("v3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes", + const auto expected_error_str{strprintf("version=3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes", tx_many_sigops->GetHash().ToString(), tx_many_sigops->GetWitnessHash().ToString(), - total_sigops * DEFAULT_BYTES_PER_SIGOP / WITNESS_SCALE_FACTOR, V3_CHILD_MAX_VSIZE)}; - auto result{SingleV3Checks(tx_many_sigops, *ancestors, empty_conflicts_set, + total_sigops * DEFAULT_BYTES_PER_SIGOP / WITNESS_SCALE_FACTOR, TRUC_CHILD_MAX_VSIZE)}; + auto result{SingleTRUCChecks(tx_many_sigops, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_many_sigops, /*nSigOpCost=*/total_sigops, /*bytes_per_sigop=*/ DEFAULT_BYTES_PER_SIGOP))}; BOOST_CHECK_EQUAL(result->first, expected_error_str); BOOST_CHECK_EQUAL(result->second, nullptr); Package package_child_sigops{mempool_tx_v3, tx_many_sigops}; - BOOST_CHECK_EQUAL(*PackageV3Checks(tx_many_sigops, total_sigops * DEFAULT_BYTES_PER_SIGOP / WITNESS_SCALE_FACTOR, package_child_sigops, empty_ancestors), + BOOST_CHECK_EQUAL(*PackageTRUCChecks(tx_many_sigops, total_sigops * DEFAULT_BYTES_PER_SIGOP / WITNESS_SCALE_FACTOR, package_child_sigops, empty_ancestors), expected_error_str); } - // Parent + child with v3 in the mempool. Child is allowed as long as it is under V3_CHILD_MAX_VSIZE. + // Parent + child with TRUC in the mempool. Child is allowed as long as it is under TRUC_CHILD_MAX_VSIZE. auto tx_mempool_v3_child = make_tx({COutPoint{mempool_tx_v3->GetHash(), 0}}, /*version=*/3); { - BOOST_CHECK(GetTransactionWeight(*tx_mempool_v3_child) <= V3_CHILD_MAX_VSIZE * WITNESS_SCALE_FACTOR); + BOOST_CHECK(GetTransactionWeight(*tx_mempool_v3_child) <= TRUC_CHILD_MAX_VSIZE * WITNESS_SCALE_FACTOR); auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_mempool_v3_child), m_limits)}; - BOOST_CHECK(SingleV3Checks(tx_mempool_v3_child, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_mempool_v3_child)) == std::nullopt); + BOOST_CHECK(SingleTRUCChecks(tx_mempool_v3_child, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_mempool_v3_child)) == std::nullopt); pool.addUnchecked(entry.FromTx(tx_mempool_v3_child)); Package package_v3_1p1c{mempool_tx_v3, tx_mempool_v3_child}; - BOOST_CHECK(PackageV3Checks(tx_mempool_v3_child, GetVirtualTransactionSize(*tx_mempool_v3_child), package_v3_1p1c, empty_ancestors) == std::nullopt); + BOOST_CHECK(PackageTRUCChecks(tx_mempool_v3_child, GetVirtualTransactionSize(*tx_mempool_v3_child), package_v3_1p1c, empty_ancestors) == std::nullopt); } - // A v3 transaction cannot have more than 1 descendant. Sibling is returned when exactly 1 exists. + // A TRUC transaction cannot have more than 1 descendant. Sibling is returned when exactly 1 exists. { auto tx_v3_child2 = make_tx({COutPoint{mempool_tx_v3->GetHash(), 1}}, /*version=*/3); @@ -337,17 +337,17 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup) auto ancestors_1sibling{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_child2), m_limits)}; const auto expected_error_str{strprintf("tx %s (wtxid=%s) would exceed descendant count limit", mempool_tx_v3->GetHash().ToString(), mempool_tx_v3->GetWitnessHash().ToString())}; - auto result_with_sibling_eviction{SingleV3Checks(tx_v3_child2, *ancestors_1sibling, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child2))}; + auto result_with_sibling_eviction{SingleTRUCChecks(tx_v3_child2, *ancestors_1sibling, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child2))}; BOOST_CHECK_EQUAL(result_with_sibling_eviction->first, expected_error_str); // The other mempool child is returned to allow for sibling eviction. BOOST_CHECK_EQUAL(result_with_sibling_eviction->second, tx_mempool_v3_child); // If directly replacing the child, make sure there is no double-counting. - BOOST_CHECK(SingleV3Checks(tx_v3_child2, *ancestors_1sibling, {tx_mempool_v3_child->GetHash()}, GetVirtualTransactionSize(*tx_v3_child2)) + BOOST_CHECK(SingleTRUCChecks(tx_v3_child2, *ancestors_1sibling, {tx_mempool_v3_child->GetHash()}, GetVirtualTransactionSize(*tx_v3_child2)) == std::nullopt); Package package_v3_1p2c{mempool_tx_v3, tx_mempool_v3_child, tx_v3_child2}; - BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_child2, GetVirtualTransactionSize(*tx_v3_child2), package_v3_1p2c, empty_ancestors), + BOOST_CHECK_EQUAL(*PackageTRUCChecks(tx_v3_child2, GetVirtualTransactionSize(*tx_v3_child2), package_v3_1p2c, empty_ancestors), expected_error_str); // Configuration where parent already has 2 other children in mempool (no sibling eviction allowed). This may happen as the result of a reorg. @@ -357,7 +357,7 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup) BOOST_CHECK_EQUAL(entry_mempool_parent->GetCountWithDescendants(), 3); auto ancestors_2siblings{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_child3), m_limits)}; - auto result_2children{SingleV3Checks(tx_v3_child3, *ancestors_2siblings, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child3))}; + auto result_2children{SingleTRUCChecks(tx_v3_child3, *ancestors_2siblings, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child3))}; BOOST_CHECK_EQUAL(result_2children->first, expected_error_str); // The other mempool child is not returned because sibling eviction is not allowed. BOOST_CHECK_EQUAL(result_2children->second, nullptr); @@ -377,7 +377,7 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup) auto ancestors_3gen{pool.CalculateMemPoolAncestors(entry.FromTx(tx_to_submit), m_limits)}; const auto expected_error_str{strprintf("tx %s (wtxid=%s) would exceed descendant count limit", tx_mempool_grandparent->GetHash().ToString(), tx_mempool_grandparent->GetWitnessHash().ToString())}; - auto result_3gen{SingleV3Checks(tx_to_submit, *ancestors_3gen, empty_conflicts_set, GetVirtualTransactionSize(*tx_to_submit))}; + auto result_3gen{SingleTRUCChecks(tx_to_submit, *ancestors_3gen, empty_conflicts_set, GetVirtualTransactionSize(*tx_to_submit))}; BOOST_CHECK_EQUAL(result_3gen->first, expected_error_str); // The other mempool child is not returned because sibling eviction is not allowed. BOOST_CHECK_EQUAL(result_3gen->second, nullptr); diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp index 78ef96a15d..af36a95693 100644 --- a/src/test/txvalidationcache_tests.cpp +++ b/src/test/txvalidationcache_tests.cpp @@ -5,6 +5,7 @@ #include <consensus/validation.h> #include <key.h> #include <random.h> +#include <script/sigcache.h> #include <script/sign.h> #include <script/signingprovider.h> #include <test/util/setup_common.h> @@ -16,12 +17,13 @@ struct Dersig100Setup : public TestChain100Setup { Dersig100Setup() - : TestChain100Setup{ChainType::REGTEST, {"-testactivationheight=dersig@102"}} {} + : TestChain100Setup{ChainType::REGTEST, {.extra_args = {"-testactivationheight=dersig@102"}}} {} }; bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, + ValidationCache& validation_cache, std::vector<CScriptCheck>* pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main); BOOST_AUTO_TEST_SUITE(txvalidationcache_tests) @@ -118,7 +120,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, Dersig100Setup) // should fail. // Capture this interaction with the upgraded_nop argument: set it when evaluating // any script flag that is implemented as an upgraded NOP code. -static void ValidateCheckInputsForAllFlags(const CTransaction &tx, uint32_t failing_flags, bool add_to_cache, CCoinsViewCache& active_coins_tip) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) +static void ValidateCheckInputsForAllFlags(const CTransaction &tx, uint32_t failing_flags, bool add_to_cache, CCoinsViewCache& active_coins_tip, ValidationCache& validation_cache) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { PrecomputedTransactionData txdata; @@ -140,7 +142,7 @@ static void ValidateCheckInputsForAllFlags(const CTransaction &tx, uint32_t fail // WITNESS requires P2SH test_flags |= SCRIPT_VERIFY_P2SH; } - bool ret = CheckInputScripts(tx, state, &active_coins_tip, test_flags, true, add_to_cache, txdata, nullptr); + bool ret = CheckInputScripts(tx, state, &active_coins_tip, test_flags, true, add_to_cache, txdata, validation_cache, nullptr); // CheckInputScripts should succeed iff test_flags doesn't intersect with // failing_flags bool expected_return_value = !(test_flags & failing_flags); @@ -150,13 +152,13 @@ static void ValidateCheckInputsForAllFlags(const CTransaction &tx, uint32_t fail if (ret && add_to_cache) { // Check that we get a cache hit if the tx was valid std::vector<CScriptCheck> scriptchecks; - BOOST_CHECK(CheckInputScripts(tx, state, &active_coins_tip, test_flags, true, add_to_cache, txdata, &scriptchecks)); + BOOST_CHECK(CheckInputScripts(tx, state, &active_coins_tip, test_flags, true, add_to_cache, txdata, validation_cache, &scriptchecks)); BOOST_CHECK(scriptchecks.empty()); } else { // Check that we get script executions to check, if the transaction // was invalid, or we didn't add to cache. std::vector<CScriptCheck> scriptchecks; - BOOST_CHECK(CheckInputScripts(tx, state, &active_coins_tip, test_flags, true, add_to_cache, txdata, &scriptchecks)); + BOOST_CHECK(CheckInputScripts(tx, state, &active_coins_tip, test_flags, true, add_to_cache, txdata, validation_cache, &scriptchecks)); BOOST_CHECK_EQUAL(scriptchecks.size(), tx.vin.size()); } } @@ -214,20 +216,20 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, Dersig100Setup) TxValidationState state; PrecomputedTransactionData ptd_spend_tx; - BOOST_CHECK(!CheckInputScripts(CTransaction(spend_tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG, true, true, ptd_spend_tx, nullptr)); + BOOST_CHECK(!CheckInputScripts(CTransaction(spend_tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG, true, true, ptd_spend_tx, m_node.chainman->m_validation_cache, nullptr)); // If we call again asking for scriptchecks (as happens in // ConnectBlock), we should add a script check object for this -- we're // not caching invalidity (if that changes, delete this test case). std::vector<CScriptCheck> scriptchecks; - BOOST_CHECK(CheckInputScripts(CTransaction(spend_tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG, true, true, ptd_spend_tx, &scriptchecks)); + BOOST_CHECK(CheckInputScripts(CTransaction(spend_tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG, true, true, ptd_spend_tx, m_node.chainman->m_validation_cache, &scriptchecks)); BOOST_CHECK_EQUAL(scriptchecks.size(), 1U); // Test that CheckInputScripts returns true iff DERSIG-enforcing flags are // not present. Don't add these checks to the cache, so that we can // test later that block validation works fine in the absence of cached // successes. - ValidateCheckInputsForAllFlags(CTransaction(spend_tx), SCRIPT_VERIFY_DERSIG | SCRIPT_VERIFY_LOW_S | SCRIPT_VERIFY_STRICTENC, false, m_node.chainman->ActiveChainstate().CoinsTip()); + ValidateCheckInputsForAllFlags(CTransaction(spend_tx), SCRIPT_VERIFY_DERSIG | SCRIPT_VERIFY_LOW_S | SCRIPT_VERIFY_STRICTENC, false, m_node.chainman->ActiveChainstate().CoinsTip(), m_node.chainman->m_validation_cache); } // And if we produce a block with this tx, it should be valid (DERSIG not @@ -253,7 +255,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, Dersig100Setup) std::vector<unsigned char> vchSig2(p2pk_scriptPubKey.begin(), p2pk_scriptPubKey.end()); invalid_under_p2sh_tx.vin[0].scriptSig << vchSig2; - ValidateCheckInputsForAllFlags(CTransaction(invalid_under_p2sh_tx), SCRIPT_VERIFY_P2SH, true, m_node.chainman->ActiveChainstate().CoinsTip()); + ValidateCheckInputsForAllFlags(CTransaction(invalid_under_p2sh_tx), SCRIPT_VERIFY_P2SH, true, m_node.chainman->ActiveChainstate().CoinsTip(), m_node.chainman->m_validation_cache); } // Test CHECKLOCKTIMEVERIFY @@ -276,13 +278,13 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, Dersig100Setup) vchSig.push_back((unsigned char)SIGHASH_ALL); invalid_with_cltv_tx.vin[0].scriptSig = CScript() << vchSig << 101; - ValidateCheckInputsForAllFlags(CTransaction(invalid_with_cltv_tx), SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY, true, m_node.chainman->ActiveChainstate().CoinsTip()); + ValidateCheckInputsForAllFlags(CTransaction(invalid_with_cltv_tx), SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY, true, m_node.chainman->ActiveChainstate().CoinsTip(), m_node.chainman->m_validation_cache); // Make it valid, and check again invalid_with_cltv_tx.vin[0].scriptSig = CScript() << vchSig << 100; TxValidationState state; PrecomputedTransactionData txdata; - BOOST_CHECK(CheckInputScripts(CTransaction(invalid_with_cltv_tx), state, m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY, true, true, txdata, nullptr)); + BOOST_CHECK(CheckInputScripts(CTransaction(invalid_with_cltv_tx), state, m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY, true, true, txdata, m_node.chainman->m_validation_cache, nullptr)); } // TEST CHECKSEQUENCEVERIFY @@ -304,13 +306,13 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, Dersig100Setup) vchSig.push_back((unsigned char)SIGHASH_ALL); invalid_with_csv_tx.vin[0].scriptSig = CScript() << vchSig << 101; - ValidateCheckInputsForAllFlags(CTransaction(invalid_with_csv_tx), SCRIPT_VERIFY_CHECKSEQUENCEVERIFY, true, m_node.chainman->ActiveChainstate().CoinsTip()); + ValidateCheckInputsForAllFlags(CTransaction(invalid_with_csv_tx), SCRIPT_VERIFY_CHECKSEQUENCEVERIFY, true, m_node.chainman->ActiveChainstate().CoinsTip(), m_node.chainman->m_validation_cache); // Make it valid, and check again invalid_with_csv_tx.vin[0].scriptSig = CScript() << vchSig << 100; TxValidationState state; PrecomputedTransactionData txdata; - BOOST_CHECK(CheckInputScripts(CTransaction(invalid_with_csv_tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_CHECKSEQUENCEVERIFY, true, true, txdata, nullptr)); + BOOST_CHECK(CheckInputScripts(CTransaction(invalid_with_csv_tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_CHECKSEQUENCEVERIFY, true, true, txdata, m_node.chainman->m_validation_cache, nullptr)); } // TODO: add tests for remaining script flags @@ -333,11 +335,11 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, Dersig100Setup) UpdateInput(valid_with_witness_tx.vin[0], sigdata); // This should be valid under all script flags. - ValidateCheckInputsForAllFlags(CTransaction(valid_with_witness_tx), 0, true, m_node.chainman->ActiveChainstate().CoinsTip()); + ValidateCheckInputsForAllFlags(CTransaction(valid_with_witness_tx), 0, true, m_node.chainman->ActiveChainstate().CoinsTip(), m_node.chainman->m_validation_cache); // Remove the witness, and check that it is now invalid. valid_with_witness_tx.vin[0].scriptWitness.SetNull(); - ValidateCheckInputsForAllFlags(CTransaction(valid_with_witness_tx), SCRIPT_VERIFY_WITNESS, true, m_node.chainman->ActiveChainstate().CoinsTip()); + ValidateCheckInputsForAllFlags(CTransaction(valid_with_witness_tx), SCRIPT_VERIFY_WITNESS, true, m_node.chainman->ActiveChainstate().CoinsTip(), m_node.chainman->m_validation_cache); } { @@ -362,7 +364,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, Dersig100Setup) } // This should be valid under all script flags - ValidateCheckInputsForAllFlags(CTransaction(tx), 0, true, m_node.chainman->ActiveChainstate().CoinsTip()); + ValidateCheckInputsForAllFlags(CTransaction(tx), 0, true, m_node.chainman->ActiveChainstate().CoinsTip(), m_node.chainman->m_validation_cache); // Check that if the second input is invalid, but the first input is // valid, the transaction is not cached. @@ -372,12 +374,12 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, Dersig100Setup) TxValidationState state; PrecomputedTransactionData txdata; // This transaction is now invalid under segwit, because of the second input. - BOOST_CHECK(!CheckInputScripts(CTransaction(tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true, true, txdata, nullptr)); + BOOST_CHECK(!CheckInputScripts(CTransaction(tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true, true, txdata, m_node.chainman->m_validation_cache, nullptr)); std::vector<CScriptCheck> scriptchecks; // Make sure this transaction was not cached (ie because the first // input was valid) - BOOST_CHECK(CheckInputScripts(CTransaction(tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true, true, txdata, &scriptchecks)); + BOOST_CHECK(CheckInputScripts(CTransaction(tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true, true, txdata, m_node.chainman->m_validation_cache, &scriptchecks)); // Should get 2 script checks back -- caching is on a whole-transaction basis. BOOST_CHECK_EQUAL(scriptchecks.size(), 2U); } diff --git a/src/test/util/chainstate.h b/src/test/util/chainstate.h index 03b44fc894..a4636365ca 100644 --- a/src/test/util/chainstate.h +++ b/src/test/util/chainstate.h @@ -124,11 +124,11 @@ CreateAndActivateUTXOSnapshot( new_active.m_chain.SetTip(*(tip->pprev)); } - bool res = node.chainman->ActivateSnapshot(auto_infile, metadata, in_memory_chainstate); + auto res = node.chainman->ActivateSnapshot(auto_infile, metadata, in_memory_chainstate); // Restore the old tip. new_active.m_chain.SetTip(*tip); - return res; + return !!res; } diff --git a/src/test/util/net.cpp b/src/test/util/net.cpp index 9257a4964a..beefc32bee 100644 --- a/src/test/util/net.cpp +++ b/src/test/util/net.cpp @@ -28,7 +28,8 @@ void ConnmanTestMsg::Handshake(CNode& node, auto& connman{*this}; peerman.InitializeNode(node, local_services); - FlushSendBuffer(node); // Drop the version message added by InitializeNode. + peerman.SendMessages(&node); + FlushSendBuffer(node); // Drop the version message added by SendMessages. CSerializedNetMsg msg_version{ NetMsg::Make(NetMsgType::VERSION, @@ -118,20 +119,20 @@ std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(int n_candida candidates.reserve(n_candidates); for (int id = 0; id < n_candidates; ++id) { candidates.push_back({ - /*id=*/id, - /*m_connected=*/std::chrono::seconds{random_context.randrange(100)}, - /*m_min_ping_time=*/std::chrono::microseconds{random_context.randrange(100)}, - /*m_last_block_time=*/std::chrono::seconds{random_context.randrange(100)}, - /*m_last_tx_time=*/std::chrono::seconds{random_context.randrange(100)}, - /*fRelevantServices=*/random_context.randbool(), - /*m_relay_txs=*/random_context.randbool(), - /*fBloomFilter=*/random_context.randbool(), - /*nKeyedNetGroup=*/random_context.randrange(100), - /*prefer_evict=*/random_context.randbool(), - /*m_is_local=*/random_context.randbool(), - /*m_network=*/ALL_NETWORKS[random_context.randrange(ALL_NETWORKS.size())], - /*m_noban=*/false, - /*m_conn_type=*/ConnectionType::INBOUND, + .id=id, + .m_connected=std::chrono::seconds{random_context.randrange(100)}, + .m_min_ping_time=std::chrono::microseconds{random_context.randrange(100)}, + .m_last_block_time=std::chrono::seconds{random_context.randrange(100)}, + .m_last_tx_time=std::chrono::seconds{random_context.randrange(100)}, + .fRelevantServices=random_context.randbool(), + .m_relay_txs=random_context.randbool(), + .fBloomFilter=random_context.randbool(), + .nKeyedNetGroup=random_context.randrange(100u), + .prefer_evict=random_context.randbool(), + .m_is_local=random_context.randbool(), + .m_network=ALL_NETWORKS[random_context.randrange(ALL_NETWORKS.size())], + .m_noban=false, + .m_conn_type=ConnectionType::INBOUND, }); } return candidates; diff --git a/src/test/util/random.cpp b/src/test/util/random.cpp index 4c87ab8df8..47d03055e2 100644 --- a/src/test/util/random.cpp +++ b/src/test/util/random.cpp @@ -13,21 +13,26 @@ FastRandomContext g_insecure_rand_ctx; -/** Return the unsigned from the environment var if available, otherwise 0 */ -static uint256 GetUintFromEnv(const std::string& env_name) -{ - const char* num = std::getenv(env_name.c_str()); - if (!num) return {}; - return uint256S(num); -} +extern void MakeRandDeterministicDANGEROUS(const uint256& seed) noexcept; -void Seed(FastRandomContext& ctx) +void SeedRandomForTest(SeedRand seedtype) { - // Should be enough to get the seed once for the process - static uint256 seed{}; static const std::string RANDOM_CTX_SEED{"RANDOM_CTX_SEED"}; - if (seed.IsNull()) seed = GetUintFromEnv(RANDOM_CTX_SEED); - if (seed.IsNull()) seed = GetRandHash(); + + // Do this once, on the first call, regardless of seedtype, because once + // MakeRandDeterministicDANGEROUS is called, the output of GetRandHash is + // no longer truly random. It should be enough to get the seed once for the + // process. + static const uint256 ctx_seed = []() { + // If RANDOM_CTX_SEED is set, use that as seed. + const char* num = std::getenv(RANDOM_CTX_SEED.c_str()); + if (num) return uint256S(num); + // Otherwise use a (truly) random value. + return GetRandHash(); + }(); + + const uint256& seed{seedtype == SeedRand::SEED ? ctx_seed : uint256::ZERO}; LogPrintf("%s: Setting random seed for current tests to %s=%s\n", __func__, RANDOM_CTX_SEED, seed.GetHex()); - ctx = FastRandomContext(seed); + MakeRandDeterministicDANGEROUS(seed); + g_insecure_rand_ctx.Reseed(GetRandHash()); } diff --git a/src/test/util/random.h b/src/test/util/random.h index 18ab425e48..09a475f8b3 100644 --- a/src/test/util/random.h +++ b/src/test/util/random.h @@ -19,27 +19,13 @@ */ extern FastRandomContext g_insecure_rand_ctx; -/** - * Flag to make GetRand in random.h return the same number - */ -extern bool g_mock_deterministic_tests; - enum class SeedRand { ZEROS, //!< Seed with a compile time constant of zeros - SEED, //!< Call the Seed() helper + SEED, //!< Use (and report) random seed from environment, or a (truly) random one. }; -/** Seed the given random ctx or use the seed passed in via an environment var */ -void Seed(FastRandomContext& ctx); - -static inline void SeedInsecureRand(SeedRand seed = SeedRand::SEED) -{ - if (seed == SeedRand::ZEROS) { - g_insecure_rand_ctx = FastRandomContext(/*fDeterministic=*/true); - } else { - Seed(g_insecure_rand_ctx); - } -} +/** Seed the RNG for testing. This affects all randomness, except GetStrongRandBytes(). */ +void SeedRandomForTest(SeedRand seed = SeedRand::SEED); static inline uint32_t InsecureRand32() { diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index cc7b2d6546..60f9261e7e 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -6,8 +6,6 @@ #include <test/util/setup_common.h> -#include <kernel/validation_cache_sizes.h> - #include <addrman.h> #include <banman.h> #include <chainparams.h> @@ -30,7 +28,6 @@ #include <node/mempool_args.h> #include <node/miner.h> #include <node/peerman_args.h> -#include <node/validation_cache_args.h> #include <node/warnings.h> #include <noui.h> #include <policy/fees.h> @@ -68,7 +65,6 @@ #include <stdexcept> using kernel::BlockTreeDB; -using kernel::ValidationCacheSizes; using node::ApplyArgsManOptions; using node::BlockAssembler; using node::BlockManager; @@ -112,7 +108,7 @@ static void ExitFailure(std::string_view str_err) exit(EXIT_FAILURE); } -BasicTestingSetup::BasicTestingSetup(const ChainType chainType, const std::vector<const char*>& extra_args) +BasicTestingSetup::BasicTestingSetup(const ChainType chainType, TestOpts opts) : m_args{} { m_node.shutdown = &m_interrupt; @@ -129,7 +125,7 @@ BasicTestingSetup::BasicTestingSetup(const ChainType chainType, const std::vecto "-debugexclude=libevent", "-debugexclude=leveldb", }, - extra_args); + opts.extra_args); if (G_TEST_COMMAND_LINE_ARGUMENTS) { arguments = Cat(arguments, G_TEST_COMMAND_LINE_ARGUMENTS()); } @@ -145,6 +141,10 @@ BasicTestingSetup::BasicTestingSetup(const ChainType chainType, const std::vecto } } + // Use randomly chosen seed for deterministic PRNG, so that (by default) test + // data directories use a random name that doesn't overlap with other tests. + SeedRandomForTest(SeedRand::SEED); + if (!m_node.args->IsArgSet("-testdatadir")) { // By default, the data directory has a random name const auto rand_str{g_insecure_rand_ctx_temp_path.rand256().ToString()}; @@ -178,7 +178,6 @@ BasicTestingSetup::BasicTestingSetup(const ChainType chainType, const std::vecto gArgs.ForceSetArg("-datadir", fs::PathToString(m_path_root)); SelectParams(chainType); - SeedInsecureRand(); if (G_TEST_LOG_FUN) LogInstance().PushBackCallback(G_TEST_LOG_FUN); InitLogging(*m_node.args); AppInitParameterInteraction(*m_node.args); @@ -188,11 +187,6 @@ BasicTestingSetup::BasicTestingSetup(const ChainType chainType, const std::vecto m_node.ecc_context = std::make_unique<ECC_Context>(); SetupEnvironment(); - ValidationCacheSizes validation_cache_sizes{}; - ApplyArgsManOptions(*m_node.args, validation_cache_sizes); - Assert(InitSignatureCache(validation_cache_sizes.signature_cache_bytes)); - Assert(InitScriptExecutionCache(validation_cache_sizes.script_execution_cache_bytes)); - m_node.chain = interfaces::MakeChain(m_node); static bool noui_connected = false; if (!noui_connected) { @@ -217,8 +211,8 @@ BasicTestingSetup::~BasicTestingSetup() gArgs.ClearArgs(); } -ChainTestingSetup::ChainTestingSetup(const ChainType chainType, const std::vector<const char*>& extra_args) - : BasicTestingSetup(chainType, extra_args) +ChainTestingSetup::ChainTestingSetup(const ChainType chainType, TestOpts opts) + : BasicTestingSetup(chainType, opts) { const CChainParams& chainparams = Params(); @@ -301,13 +295,11 @@ void ChainTestingSetup::LoadVerifyActivateChainstate() TestingSetup::TestingSetup( const ChainType chainType, - const std::vector<const char*>& extra_args, - const bool coins_db_in_memory, - const bool block_tree_db_in_memory) - : ChainTestingSetup(chainType, extra_args) + TestOpts opts) + : ChainTestingSetup(chainType, opts) { - m_coins_db_in_memory = coins_db_in_memory; - m_block_tree_db_in_memory = block_tree_db_in_memory; + m_coins_db_in_memory = opts.coins_db_in_memory; + m_block_tree_db_in_memory = opts.block_tree_db_in_memory; // Ideally we'd move all the RPC tests to the functional testing framework // instead of unit tests, but for now we need these here. RegisterAllCoreRPCCommands(tableRPC); @@ -336,11 +328,9 @@ TestingSetup::TestingSetup( } TestChain100Setup::TestChain100Setup( - const ChainType chain_type, - const std::vector<const char*>& extra_args, - const bool coins_db_in_memory, - const bool block_tree_db_in_memory) - : TestingSetup{ChainType::REGTEST, extra_args, coins_db_in_memory, block_tree_db_in_memory} + const ChainType chain_type, + TestOpts opts) + : TestingSetup{ChainType::REGTEST, opts} { SetMockTime(1598887952); constexpr std::array<unsigned char, 32> vchKey = { diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h index dbd66e3585..e8b630af94 100644 --- a/src/test/util/setup_common.h +++ b/src/test/util/setup_common.h @@ -48,6 +48,12 @@ std::ostream& operator<<(typename std::enable_if<std::is_enum<T>::value, std::os static constexpr CAmount CENT{1000000}; +struct TestOpts { + std::vector<const char*> extra_args{}; + bool coins_db_in_memory{true}; + bool block_tree_db_in_memory{true}; +}; + /** Basic testing setup. * This just configures logging, data dir and chain parameters. */ @@ -55,7 +61,7 @@ struct BasicTestingSetup { util::SignalInterrupt m_interrupt; node::NodeContext m_node; // keep as first member to be destructed last - explicit BasicTestingSetup(const ChainType chainType = ChainType::MAIN, const std::vector<const char*>& extra_args = {}); + explicit BasicTestingSetup(const ChainType chainType = ChainType::MAIN, TestOpts = {}); ~BasicTestingSetup(); fs::path m_path_root; @@ -73,7 +79,7 @@ struct ChainTestingSetup : public BasicTestingSetup { bool m_coins_db_in_memory{true}; bool m_block_tree_db_in_memory{true}; - explicit ChainTestingSetup(const ChainType chainType = ChainType::MAIN, const std::vector<const char*>& extra_args = {}); + explicit ChainTestingSetup(const ChainType chainType = ChainType::MAIN, TestOpts = {}); ~ChainTestingSetup(); // Supplies a chainstate, if one is needed @@ -85,9 +91,7 @@ struct ChainTestingSetup : public BasicTestingSetup { struct TestingSetup : public ChainTestingSetup { explicit TestingSetup( const ChainType chainType = ChainType::MAIN, - const std::vector<const char*>& extra_args = {}, - const bool coins_db_in_memory = true, - const bool block_tree_db_in_memory = true); + TestOpts = {}); }; /** Identical to TestingSetup, but chain set to regtest */ @@ -106,9 +110,7 @@ class CScript; struct TestChain100Setup : public TestingSetup { TestChain100Setup( const ChainType chain_type = ChainType::REGTEST, - const std::vector<const char*>& extra_args = {}, - const bool coins_db_in_memory = true, - const bool block_tree_db_in_memory = true); + TestOpts = {}); /** * Create a new block with just given transactions, coinbase paying to @@ -220,16 +222,16 @@ struct TestChain100Setup : public TestingSetup { * be used in "hot loops", for example fuzzing or benchmarking. */ template <class T = const BasicTestingSetup> -std::unique_ptr<T> MakeNoLogFileContext(const ChainType chain_type = ChainType::REGTEST, const std::vector<const char*>& extra_args = {}) +std::unique_ptr<T> MakeNoLogFileContext(const ChainType chain_type = ChainType::REGTEST, TestOpts opts = {}) { - const std::vector<const char*> arguments = Cat( + opts.extra_args = Cat( { "-nodebuglogfile", "-nodebug", }, - extra_args); + opts.extra_args); - return std::make_unique<T>(chain_type, arguments); + return std::make_unique<T>(chain_type, opts); } CBlock getBlock13b8a(); diff --git a/src/test/util/txmempool.cpp b/src/test/util/txmempool.cpp index 94d50bba50..9d6b4810d0 100644 --- a/src/test/util/txmempool.cpp +++ b/src/test/util/txmempool.cpp @@ -8,7 +8,7 @@ #include <node/context.h> #include <node/mempool_args.h> #include <policy/rbf.h> -#include <policy/v3_policy.h> +#include <policy/truc_policy.h> #include <txmempool.h> #include <util/check.h> #include <util/time.h> @@ -141,30 +141,30 @@ std::optional<std::string> CheckPackageMempoolAcceptResult(const Package& txns, return std::nullopt; } -void CheckMempoolV3Invariants(const CTxMemPool& tx_pool) +void CheckMempoolTRUCInvariants(const CTxMemPool& tx_pool) { LOCK(tx_pool.cs); for (const auto& tx_info : tx_pool.infoAll()) { const auto& entry = *Assert(tx_pool.GetEntry(tx_info.tx->GetHash())); if (tx_info.tx->version == TRUC_VERSION) { // Check that special maximum virtual size is respected - Assert(entry.GetTxSize() <= V3_MAX_VSIZE); + Assert(entry.GetTxSize() <= TRUC_MAX_VSIZE); - // Check that special v3 ancestor/descendant limits and rules are always respected - Assert(entry.GetCountWithDescendants() <= V3_DESCENDANT_LIMIT); - Assert(entry.GetCountWithAncestors() <= V3_ANCESTOR_LIMIT); - Assert(entry.GetSizeWithDescendants() <= V3_MAX_VSIZE + V3_CHILD_MAX_VSIZE); - Assert(entry.GetSizeWithAncestors() <= V3_MAX_VSIZE + V3_CHILD_MAX_VSIZE); + // Check that special TRUC ancestor/descendant limits and rules are always respected + Assert(entry.GetCountWithDescendants() <= TRUC_DESCENDANT_LIMIT); + Assert(entry.GetCountWithAncestors() <= TRUC_ANCESTOR_LIMIT); + Assert(entry.GetSizeWithDescendants() <= TRUC_MAX_VSIZE + TRUC_CHILD_MAX_VSIZE); + Assert(entry.GetSizeWithAncestors() <= TRUC_MAX_VSIZE + TRUC_CHILD_MAX_VSIZE); // If this transaction has at least 1 ancestor, it's a "child" and has restricted weight. if (entry.GetCountWithAncestors() > 1) { - Assert(entry.GetTxSize() <= V3_CHILD_MAX_VSIZE); - // All v3 transactions must only have v3 unconfirmed parents. + Assert(entry.GetTxSize() <= TRUC_CHILD_MAX_VSIZE); + // All TRUC transactions must only have TRUC unconfirmed parents. const auto& parents = entry.GetMemPoolParentsConst(); Assert(parents.begin()->get().GetSharedTx()->version == TRUC_VERSION); } } else if (entry.GetCountWithAncestors() > 1) { - // All non-v3 transactions must only have non-v3 unconfirmed parents. + // All non-TRUC transactions must only have non-TRUC unconfirmed parents. for (const auto& parent : entry.GetMemPoolParentsConst()) { Assert(parent.get().GetSharedTx()->version != TRUC_VERSION); } diff --git a/src/test/util/txmempool.h b/src/test/util/txmempool.h index b3022af7df..6d41fdf87f 100644 --- a/src/test/util/txmempool.h +++ b/src/test/util/txmempool.h @@ -47,13 +47,13 @@ std::optional<std::string> CheckPackageMempoolAcceptResult(const Package& txns, bool expect_valid, const CTxMemPool* mempool); -/** For every transaction in tx_pool, check v3 invariants: - * - a v3 tx's ancestor count must be within V3_ANCESTOR_LIMIT - * - a v3 tx's descendant count must be within V3_DESCENDANT_LIMIT - * - if a v3 tx has ancestors, its sigop-adjusted vsize must be within V3_CHILD_MAX_VSIZE - * - any non-v3 tx must only have non-v3 parents - * - any v3 tx must only have v3 parents +/** For every transaction in tx_pool, check TRUC invariants: + * - a TRUC tx's ancestor count must be within TRUC_ANCESTOR_LIMIT + * - a TRUC tx's descendant count must be within TRUC_DESCENDANT_LIMIT + * - if a TRUC tx has ancestors, its sigop-adjusted vsize must be within TRUC_CHILD_MAX_VSIZE + * - any non-TRUC tx must only have non-TRUC parents + * - any TRUC tx must only have TRUC parents * */ -void CheckMempoolV3Invariants(const CTxMemPool& tx_pool); +void CheckMempoolTRUCInvariants(const CTxMemPool& tx_pool); #endif // BITCOIN_TEST_UTIL_TXMEMPOOL_H diff --git a/src/test/util/xoroshiro128plusplus.h b/src/test/util/xoroshiro128plusplus.h deleted file mode 100644 index ac9f59b3f5..0000000000 --- a/src/test/util/xoroshiro128plusplus.h +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2022 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#ifndef BITCOIN_TEST_UTIL_XOROSHIRO128PLUSPLUS_H -#define BITCOIN_TEST_UTIL_XOROSHIRO128PLUSPLUS_H - -#include <cstdint> -#include <limits> - -/** xoroshiro128++ PRNG. Extremely fast, not appropriate for cryptographic purposes. - * - * Memory footprint is 128bit, period is 2^128 - 1. - * This class is not thread-safe. - * - * Reference implementation available at https://prng.di.unimi.it/xoroshiro128plusplus.c - * See https://prng.di.unimi.it/ - */ -class XoRoShiRo128PlusPlus -{ - uint64_t m_s0; - uint64_t m_s1; - - [[nodiscard]] constexpr static uint64_t rotl(uint64_t x, int n) - { - return (x << n) | (x >> (64 - n)); - } - - [[nodiscard]] constexpr static uint64_t SplitMix64(uint64_t& seedval) noexcept - { - uint64_t z = (seedval += UINT64_C(0x9e3779b97f4a7c15)); - z = (z ^ (z >> 30U)) * UINT64_C(0xbf58476d1ce4e5b9); - z = (z ^ (z >> 27U)) * UINT64_C(0x94d049bb133111eb); - return z ^ (z >> 31U); - } - -public: - using result_type = uint64_t; - - constexpr explicit XoRoShiRo128PlusPlus(uint64_t seedval) noexcept - : m_s0(SplitMix64(seedval)), m_s1(SplitMix64(seedval)) - { - } - - // no copy - that is dangerous, we don't want accidentally copy the RNG and then have two streams - // with exactly the same results. If you need a copy, call copy(). - XoRoShiRo128PlusPlus(const XoRoShiRo128PlusPlus&) = delete; - XoRoShiRo128PlusPlus& operator=(const XoRoShiRo128PlusPlus&) = delete; - - // allow moves - XoRoShiRo128PlusPlus(XoRoShiRo128PlusPlus&&) = default; - XoRoShiRo128PlusPlus& operator=(XoRoShiRo128PlusPlus&&) = default; - - ~XoRoShiRo128PlusPlus() = default; - - constexpr result_type operator()() noexcept - { - uint64_t s0 = m_s0, s1 = m_s1; - const uint64_t result = rotl(s0 + s1, 17) + s0; - s1 ^= s0; - m_s0 = rotl(s0, 49) ^ s1 ^ (s1 << 21); - m_s1 = rotl(s1, 28); - return result; - } - - static constexpr result_type min() noexcept { return std::numeric_limits<result_type>::min(); } - static constexpr result_type max() noexcept { return std::numeric_limits<result_type>::max(); } - static constexpr double entropy() noexcept { return 0.0; } -}; - -#endif // BITCOIN_TEST_UTIL_XOROSHIRO128PLUSPLUS_H diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index a371753adf..5654c8b0a8 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -459,7 +459,7 @@ BOOST_AUTO_TEST_CASE(util_IsHexNumber) BOOST_AUTO_TEST_CASE(util_seed_insecure_rand) { - SeedInsecureRand(SeedRand::ZEROS); + SeedRandomForTest(SeedRand::ZEROS); for (int mod=2;mod<11;mod++) { int mask = 1; @@ -1508,8 +1508,10 @@ struct Tracker Tracker(Tracker&& t) noexcept : origin(t.origin), copies(t.copies) {} Tracker& operator=(const Tracker& t) noexcept { - origin = t.origin; - copies = t.copies + 1; + if (this != &t) { + origin = t.origin; + copies = t.copies + 1; + } return *this; } }; diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp index 1641c4cd22..f93e3cdfb1 100644 --- a/src/test/validation_chainstatemanager_tests.cpp +++ b/src/test/validation_chainstatemanager_tests.cpp @@ -167,9 +167,10 @@ struct SnapshotTestSetup : TestChain100Setup { // destructive filesystem operations. SnapshotTestSetup() : TestChain100Setup{ {}, - {}, - /*coins_db_in_memory=*/false, - /*block_tree_db_in_memory=*/false, + { + .coins_db_in_memory = false, + .block_tree_db_in_memory = false, + }, } { } diff --git a/src/test/xoroshiro128plusplus_tests.cpp b/src/test/xoroshiro128plusplus_tests.cpp deleted file mode 100644 index ea1b3e355f..0000000000 --- a/src/test/xoroshiro128plusplus_tests.cpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2022 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#include <test/util/setup_common.h> -#include <test/util/xoroshiro128plusplus.h> - -#include <boost/test/unit_test.hpp> - -BOOST_FIXTURE_TEST_SUITE(xoroshiro128plusplus_tests, BasicTestingSetup) - -BOOST_AUTO_TEST_CASE(reference_values) -{ - // numbers generated from reference implementation - XoRoShiRo128PlusPlus rng(0); - BOOST_TEST(0x6f68e1e7e2646ee1 == rng()); - BOOST_TEST(0xbf971b7f454094ad == rng()); - BOOST_TEST(0x48f2de556f30de38 == rng()); - BOOST_TEST(0x6ea7c59f89bbfc75 == rng()); - - // seed with a random number - rng = XoRoShiRo128PlusPlus(0x1a26f3fa8546b47a); - BOOST_TEST(0xc8dc5e08d844ac7d == rng()); - BOOST_TEST(0x5b5f1f6d499dad1b == rng()); - BOOST_TEST(0xbeb0031f93313d6f == rng()); - BOOST_TEST(0xbfbcf4f43a264497 == rng()); -} - -BOOST_AUTO_TEST_SUITE_END() diff --git a/src/threadsafety.h b/src/threadsafety.h index 28b6177927..2e9a39bfc9 100644 --- a/src/threadsafety.h +++ b/src/threadsafety.h @@ -71,7 +71,7 @@ class SCOPED_LOCKABLE StdLockGuard : public std::lock_guard<StdMutex> { public: explicit StdLockGuard(StdMutex& cs) EXCLUSIVE_LOCK_FUNCTION(cs) : std::lock_guard<StdMutex>(cs) {} - ~StdLockGuard() UNLOCK_FUNCTION() {} + ~StdLockGuard() UNLOCK_FUNCTION() = default; }; #endif // BITCOIN_THREADSAFETY_H diff --git a/src/tinyformat.h b/src/tinyformat.h index 3ec385bc95..f536306375 100644 --- a/src/tinyformat.h +++ b/src/tinyformat.h @@ -507,8 +507,7 @@ namespace detail { class FormatArg { public: - FormatArg() - { } + FormatArg() = default; template<typename T> explicit FormatArg(const T& value) diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 10674c07ac..f56da08e5f 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -657,7 +657,7 @@ void CTxMemPool::check(const CCoinsViewCache& active_coins_tip, int64_t spendhei { if (m_opts.check_ratio == 0) return; - if (GetRand(m_opts.check_ratio) >= 1) return; + if (FastRandomContext().randrange(m_opts.check_ratio) >= 1) return; AssertLockHeld(::cs_main); LOCK(cs); diff --git a/src/txorphanage.h b/src/txorphanage.h index 3054396b2d..3083c8467f 100644 --- a/src/txorphanage.h +++ b/src/txorphanage.h @@ -92,7 +92,7 @@ protected: template<typename I> bool operator()(const I& a, const I& b) const { - return &(*a) < &(*b); + return a->first < b->first; } }; diff --git a/src/txrequest.cpp b/src/txrequest.cpp index ce5fbd9a7f..6338ccb118 100644 --- a/src/txrequest.cpp +++ b/src/txrequest.cpp @@ -113,8 +113,8 @@ class PriorityComputer { const uint64_t m_k0, m_k1; public: explicit PriorityComputer(bool deterministic) : - m_k0{deterministic ? 0 : GetRand(0xFFFFFFFFFFFFFFFF)}, - m_k1{deterministic ? 0 : GetRand(0xFFFFFFFFFFFFFFFF)} {} + m_k0{deterministic ? 0 : FastRandomContext().rand64()}, + m_k1{deterministic ? 0 : FastRandomContext().rand64()} {} Priority operator()(const uint256& txhash, NodeId peer, bool preferred) const { diff --git a/src/util/bytevectorhash.cpp b/src/util/bytevectorhash.cpp index 92f1dbd5d8..79e4a21fe9 100644 --- a/src/util/bytevectorhash.cpp +++ b/src/util/bytevectorhash.cpp @@ -9,8 +9,8 @@ #include <vector> ByteVectorHash::ByteVectorHash() : - m_k0(GetRand<uint64_t>()), - m_k1(GetRand<uint64_t>()) + m_k0(FastRandomContext().rand64()), + m_k1(FastRandomContext().rand64()) { } diff --git a/src/util/fs_helpers.cpp b/src/util/fs_helpers.cpp index 8952f20f79..41c8fe3b8f 100644 --- a/src/util/fs_helpers.cpp +++ b/src/util/fs_helpers.cpp @@ -16,6 +16,7 @@ #include <fstream> #include <map> #include <memory> +#include <optional> #include <string> #include <system_error> #include <utility> @@ -269,3 +270,42 @@ bool TryCreateDirectories(const fs::path& p) // create_directories didn't create the directory, it had to have existed already return false; } + +std::string PermsToSymbolicString(fs::perms p) +{ + std::string perm_str(9, '-'); + + auto set_perm = [&](size_t pos, fs::perms required_perm, char letter) { + if ((p & required_perm) != fs::perms::none) { + perm_str[pos] = letter; + } + }; + + set_perm(0, fs::perms::owner_read, 'r'); + set_perm(1, fs::perms::owner_write, 'w'); + set_perm(2, fs::perms::owner_exec, 'x'); + set_perm(3, fs::perms::group_read, 'r'); + set_perm(4, fs::perms::group_write, 'w'); + set_perm(5, fs::perms::group_exec, 'x'); + set_perm(6, fs::perms::others_read, 'r'); + set_perm(7, fs::perms::others_write, 'w'); + set_perm(8, fs::perms::others_exec, 'x'); + + return perm_str; +} + +std::optional<fs::perms> InterpretPermString(const std::string& s) +{ + if (s == "owner") { + return fs::perms::owner_read | fs::perms::owner_write; + } else if (s == "group") { + return fs::perms::owner_read | fs::perms::owner_write | + fs::perms::group_read; + } else if (s == "all") { + return fs::perms::owner_read | fs::perms::owner_write | + fs::perms::group_read | + fs::perms::others_read; + } else { + return std::nullopt; + } +} diff --git a/src/util/fs_helpers.h b/src/util/fs_helpers.h index ea3778eac3..28dd6d979d 100644 --- a/src/util/fs_helpers.h +++ b/src/util/fs_helpers.h @@ -12,6 +12,7 @@ #include <cstdio> #include <iosfwd> #include <limits> +#include <optional> /** * Ensure file contents are fully committed to disk, using a platform-specific @@ -62,6 +63,19 @@ void ReleaseDirectoryLocks(); bool TryCreateDirectories(const fs::path& p); fs::path GetDefaultDataDir(); +/** Convert fs::perms to symbolic string of the form 'rwxrwxrwx' + * + * @param[in] p the perms to be converted + * @return Symbolic permissions string + */ +std::string PermsToSymbolicString(fs::perms p); +/** Interpret a custom permissions level string as fs::perms + * + * @param[in] s Permission level string + * @return Permissions as fs::perms + */ +std::optional<fs::perms> InterpretPermString(const std::string& s); + #ifdef WIN32 fs::path GetSpecialFolderPath(int nFolder, bool fCreate = true); #endif diff --git a/src/util/hasher.cpp b/src/util/hasher.cpp index f571725786..3109ba02a8 100644 --- a/src/util/hasher.cpp +++ b/src/util/hasher.cpp @@ -7,14 +7,18 @@ #include <span.h> #include <util/hasher.h> -SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand<uint64_t>()), k1(GetRand<uint64_t>()) {} +SaltedTxidHasher::SaltedTxidHasher() : + k0{FastRandomContext().rand64()}, + k1{FastRandomContext().rand64()} {} SaltedOutpointHasher::SaltedOutpointHasher(bool deterministic) : - k0(deterministic ? 0x8e819f2607a18de6 : GetRand<uint64_t>()), - k1(deterministic ? 0xf4020d2e3983b0eb : GetRand<uint64_t>()) + k0{deterministic ? 0x8e819f2607a18de6 : FastRandomContext().rand64()}, + k1{deterministic ? 0xf4020d2e3983b0eb : FastRandomContext().rand64()} {} -SaltedSipHasher::SaltedSipHasher() : m_k0(GetRand<uint64_t>()), m_k1(GetRand<uint64_t>()) {} +SaltedSipHasher::SaltedSipHasher() : + m_k0{FastRandomContext().rand64()}, + m_k1{FastRandomContext().rand64()} {} size_t SaltedSipHasher::operator()(const Span<const unsigned char>& script) const { diff --git a/src/util/subprocess.h b/src/util/subprocess.h index e76ced687c..3449fa3b1b 100644 --- a/src/util/subprocess.h +++ b/src/util/subprocess.h @@ -678,7 +678,7 @@ struct error class Buffer { public: - Buffer() {} + Buffer() = default; explicit Buffer(size_t cap) { buf.resize(cap); } void add_cap(size_t cap) { buf.resize(cap); } diff --git a/src/util/task_runner.h b/src/util/task_runner.h index d3cd8007de..951381823b 100644 --- a/src/util/task_runner.h +++ b/src/util/task_runner.h @@ -19,7 +19,7 @@ namespace util { class TaskRunnerInterface { public: - virtual ~TaskRunnerInterface() {} + virtual ~TaskRunnerInterface() = default; /** * The callback can either be queued for later/asynchronous/threaded diff --git a/src/util/translation.h b/src/util/translation.h index d33fd2d0a0..6effe102f9 100644 --- a/src/util/translation.h +++ b/src/util/translation.h @@ -1,4 +1,4 @@ -// Copyright (c) 2019-2022 The Bitcoin Core developers +// Copyright (c) 2019-present The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -67,13 +67,19 @@ bilingual_str format(const bilingual_str& fmt, const Args&... args) /** Translate a message to the native language of the user. */ const extern std::function<std::string(const char*)> G_TRANSLATION_FUN; +struct ConstevalStringLiteral { + const char* const lit; + consteval ConstevalStringLiteral(const char* str) : lit{str} {} + consteval ConstevalStringLiteral(std::nullptr_t) = delete; +}; + /** * Translation function. * If no translation function is set, simply return the input. */ -inline bilingual_str _(const char* psz) +inline bilingual_str _(ConstevalStringLiteral str) { - return bilingual_str{psz, G_TRANSLATION_FUN ? (G_TRANSLATION_FUN)(psz) : psz}; + return bilingual_str{str.lit, G_TRANSLATION_FUN ? (G_TRANSLATION_FUN)(str.lit) : str.lit}; } #endif // BITCOIN_UTIL_TRANSLATION_H diff --git a/src/validation.cpp b/src/validation.cpp index 3e9ba08bb1..c49ec404ca 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -35,7 +35,7 @@ #include <policy/policy.h> #include <policy/rbf.h> #include <policy/settings.h> -#include <policy/v3_policy.h> +#include <policy/truc_policy.h> #include <pow.h> #include <primitives/block.h> #include <primitives/transaction.h> @@ -134,6 +134,7 @@ const CBlockIndex* Chainstate::FindForkInGlobalIndex(const CBlockLocator& locato bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, + ValidationCache& validation_cache, std::vector<CScriptCheck>* pvChecks = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); @@ -336,7 +337,7 @@ void Chainstate::MaybeUpdateMempoolForReorg( // Also updates valid entries' cached LockPoints if needed. // If false, the tx is still valid and its lockpoints are updated. // If true, the tx would be invalid in the next block; remove this entry and all of its descendants. - // Note that v3 rules are not applied here, so reorgs may cause violations of v3 inheritance or + // Note that TRUC rules are not applied here, so reorgs may cause violations of TRUC inheritance or // topology restrictions. const auto filter_final_and_mature = [&](CTxMemPool::txiter it) EXCLUSIVE_LOCKS_REQUIRED(m_mempool->cs, ::cs_main) { @@ -394,7 +395,8 @@ void Chainstate::MaybeUpdateMempoolForReorg( * */ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& view, const CTxMemPool& pool, - unsigned int flags, PrecomputedTransactionData& txdata, CCoinsViewCache& coins_tip) + unsigned int flags, PrecomputedTransactionData& txdata, CCoinsViewCache& coins_tip, + ValidationCache& validation_cache) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) { AssertLockHeld(cs_main); @@ -426,7 +428,7 @@ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationS } // Call CheckInputScripts() to cache signature and script validity against current tip consensus rules. - return CheckInputScripts(tx, state, view, flags, /* cacheSigStore= */ true, /* cacheFullScriptStore= */ true, txdata); + return CheckInputScripts(tx, state, view, flags, /* cacheSigStore= */ true, /* cacheFullScriptStore= */ true, txdata, validation_cache); } namespace { @@ -716,6 +718,11 @@ private: return true; } + ValidationCache& GetValidationCache() + { + return m_active_chainstate.m_chainman.m_validation_cache; + } + private: CTxMemPool& m_pool; CCoinsViewCache m_view; @@ -829,7 +836,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // check all unconfirmed ancestors; otherwise an opt-in ancestor // might be replaced, causing removal of this descendant. // - // All V3 transactions are considered replaceable. + // All TRUC transactions are considered replaceable. // // Replaceability signaling of the original transactions may be // ignored due to node setting. @@ -936,7 +943,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // while a tx could be package CPFP'd when entering the mempool, we do not have a DoS-resistant // method of ensuring the tx remains bumped. For example, the fee-bumping child could disappear // due to a replacement. - // The only exception is v3 transactions. + // The only exception is TRUC transactions. if (!bypass_limits && ws.m_ptx->version != TRUC_VERSION && ws.m_modified_fees < m_pool.m_opts.min_relay_feerate.GetFee(ws.m_vsize)) { // Even though this is a fee-related failure, this result is TX_MEMPOOL_POLICY, not // TX_RECONSIDERABLE, because it cannot be bypassed using package validation. @@ -1005,7 +1012,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // If the new transaction is relatively small (up to 40k weight) // and has at most one ancestor (ie ancestor limit of 2, including // the new transaction), allow it if its parent has exactly the - // descendant limit descendants. The transaction also cannot be v3, + // descendant limit descendants. The transaction also cannot be TRUC, // as its topology restrictions do not allow a second child. // // This allows protocols which rely on distrusting counterparties @@ -1032,7 +1039,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // Even though just checking direct mempool parents for inheritance would be sufficient, we // check using the full ancestor set here because it's more convenient to use what we have // already calculated. - if (const auto err{SingleV3Checks(ws.m_ptx, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) { + if (const auto err{SingleTRUCChecks(ws.m_ptx, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) { // Single transaction contexts only. if (args.m_allow_sibling_eviction && err->second != nullptr) { // We should only be considering where replacement is considered valid as well. @@ -1043,15 +1050,15 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) ws.m_conflicts.insert(err->second->GetHash()); // Adding the sibling to m_iters_conflicting here means that it doesn't count towards // RBF Carve Out above. This is correct, since removing to-be-replaced transactions from - // the descendant count is done separately in SingleV3Checks for v3 transactions. + // the descendant count is done separately in SingleTRUCChecks for TRUC transactions. ws.m_iters_conflicting.insert(m_pool.GetIter(err->second->GetHash()).value()); ws.m_sibling_eviction = true; // The sibling will be treated as part of the to-be-replaced set in ReplacementChecks. - // Note that we are not checking whether it opts in to replaceability via BIP125 or v3 - // (which is normally done in PreChecks). However, the only way a v3 transaction can - // have a non-v3 and non-BIP125 descendant is due to a reorg. + // Note that we are not checking whether it opts in to replaceability via BIP125 or TRUC + // (which is normally done in PreChecks). However, the only way a TRUC transaction can + // have a non-TRUC and non-BIP125 descendant is due to a reorg. } else { - return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "v3-rule-violation", err->first); + return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "TRUC-violation", err->first); } } @@ -1103,7 +1110,7 @@ bool MemPoolAccept::ReplacementChecks(Workspace& ws) } // Enforce Rule #2. if (const auto err_string{HasNoNewUnconfirmed(tx, m_pool, m_subpackage.m_all_conflicts)}) { - // Sibling eviction is only done for v3 transactions, which cannot have multiple ancestors. + // Sibling eviction is only done for TRUC transactions, which cannot have multiple ancestors. Assume(!ws.m_sibling_eviction); return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, strprintf("replacement-adds-unconfirmed%s", ws.m_sibling_eviction ? " (including sibling eviction)" : ""), *err_string); @@ -1201,7 +1208,7 @@ bool MemPoolAccept::PackageMempoolChecks(const std::vector<CTransactionRef>& txn const CFeeRate package_feerate(m_subpackage.m_total_modified_fees, m_subpackage.m_total_vsize); if (package_feerate <= parent_feerate) { return package_state.Invalid(PackageValidationResult::PCKG_POLICY, - "package RBF failed: package feerate is less than parent feerate", + "package RBF failed: package feerate is less than or equal to parent feerate", strprintf("package feerate %s <= parent feerate is %s", package_feerate.ToString(), parent_feerate.ToString())); } @@ -1231,13 +1238,13 @@ bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws) // Check input scripts and signatures. // This is done last to help prevent CPU exhaustion denial-of-service attacks. - if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, ws.m_precomputed_txdata)) { + if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, ws.m_precomputed_txdata, GetValidationCache())) { // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we // need to turn both off, and compare against just turning off CLEANSTACK // to see if the failure is specifically due to witness validation. TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts - if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, ws.m_precomputed_txdata) && - !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, ws.m_precomputed_txdata)) { + if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, ws.m_precomputed_txdata, GetValidationCache()) && + !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, ws.m_precomputed_txdata, GetValidationCache())) { // Only the witness is missing, so the transaction itself may be fine. state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED, state.GetRejectReason(), state.GetDebugMessage()); @@ -1273,7 +1280,7 @@ bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws) // transactions into the mempool can be exploited as a DoS attack. unsigned int currentBlockScriptVerifyFlags{GetBlockScriptFlags(*m_active_chainstate.m_chain.Tip(), m_active_chainstate.m_chainman)}; if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, - ws.m_precomputed_txdata, m_active_chainstate.CoinsTip())) { + ws.m_precomputed_txdata, m_active_chainstate.CoinsTip(), GetValidationCache())) { LogPrintf("BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s\n", hash.ToString(), state.ToString()); return Assume(false); } @@ -1545,10 +1552,10 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std:: } // At this point we have all in-mempool ancestors, and we know every transaction's vsize. - // Run the v3 checks on the package. + // Run the TRUC checks on the package. for (Workspace& ws : workspaces) { - if (auto err{PackageV3Checks(ws.m_ptx, ws.m_vsize, txns, ws.m_ancestors)}) { - package_state.Invalid(PackageValidationResult::PCKG_POLICY, "v3-violation", err.value()); + if (auto err{PackageTRUCChecks(ws.m_ptx, ws.m_vsize, txns, ws.m_ancestors)}) { + package_state.Invalid(PackageValidationResult::PCKG_POLICY, "TRUC-violation", err.value()); return PackageMempoolAcceptResult(package_state, {}); } } @@ -2084,29 +2091,23 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txund bool CScriptCheck::operator()() { const CScript &scriptSig = ptxTo->vin[nIn].scriptSig; const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness; - return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error); + return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *m_signature_cache, *txdata), &error); } -static CuckooCache::cache<uint256, SignatureCacheHasher> g_scriptExecutionCache; -static CSHA256 g_scriptExecutionCacheHasher; - -bool InitScriptExecutionCache(size_t max_size_bytes) +ValidationCache::ValidationCache(const size_t script_execution_cache_bytes, const size_t signature_cache_bytes) + : m_signature_cache{signature_cache_bytes} { // Setup the salted hasher uint256 nonce = GetRandHash(); // We want the nonce to be 64 bytes long to force the hasher to process // this chunk, which makes later hash computations more efficient. We // just write our 32-byte entropy twice to fill the 64 bytes. - g_scriptExecutionCacheHasher.Write(nonce.begin(), 32); - g_scriptExecutionCacheHasher.Write(nonce.begin(), 32); - - auto setup_results = g_scriptExecutionCache.setup_bytes(max_size_bytes); - if (!setup_results) return false; + m_script_execution_cache_hasher.Write(nonce.begin(), 32); + m_script_execution_cache_hasher.Write(nonce.begin(), 32); - const auto [num_elems, approx_size_bytes] = *setup_results; + const auto [num_elems, approx_size_bytes] = m_script_execution_cache.setup_bytes(script_execution_cache_bytes); LogPrintf("Using %zu MiB out of %zu MiB requested for script execution cache, able to store %zu elements\n", - approx_size_bytes >> 20, max_size_bytes >> 20, num_elems); - return true; + approx_size_bytes >> 20, script_execution_cache_bytes >> 20, num_elems); } /** @@ -2131,6 +2132,7 @@ bool InitScriptExecutionCache(size_t max_size_bytes) bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, + ValidationCache& validation_cache, std::vector<CScriptCheck>* pvChecks) { if (tx.IsCoinBase()) return true; @@ -2145,10 +2147,10 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, // properly commits to the scriptPubKey in the inputs view of that // transaction). uint256 hashCacheEntry; - CSHA256 hasher = g_scriptExecutionCacheHasher; + CSHA256 hasher = validation_cache.ScriptExecutionCacheHasher(); hasher.Write(UCharCast(tx.GetWitnessHash().begin()), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin()); AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks - if (g_scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) { + if (validation_cache.m_script_execution_cache.contains(hashCacheEntry, !cacheFullScriptStore)) { return true; } @@ -2175,7 +2177,7 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, // spent being checked as a part of CScriptCheck. // Verify signature - CScriptCheck check(txdata.m_spent_outputs[i], tx, i, flags, cacheSigStore, &txdata); + CScriptCheck check(txdata.m_spent_outputs[i], tx, validation_cache.m_signature_cache, i, flags, cacheSigStore, &txdata); if (pvChecks) { pvChecks->emplace_back(std::move(check)); } else if (!check()) { @@ -2188,7 +2190,7 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, // splitting the network between upgraded and // non-upgraded nodes by banning CONSENSUS-failing // data providers. - CScriptCheck check2(txdata.m_spent_outputs[i], tx, i, + CScriptCheck check2(txdata.m_spent_outputs[i], tx, validation_cache.m_signature_cache, i, flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata); if (check2()) return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); @@ -2209,7 +2211,7 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, if (cacheFullScriptStore && !pvChecks) { // We executed all of the provided scripts, and were told to // cache the result. Do so now. - g_scriptExecutionCache.insert(hashCacheEntry); + validation_cache.m_script_execution_cache.insert(hashCacheEntry); } return true; @@ -2397,15 +2399,6 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const Ch } -static SteadyClock::duration time_check{}; -static SteadyClock::duration time_forks{}; -static SteadyClock::duration time_connect{}; -static SteadyClock::duration time_verify{}; -static SteadyClock::duration time_undo{}; -static SteadyClock::duration time_index{}; -static SteadyClock::duration time_total{}; -static int64_t num_blocks_total = 0; - /** Apply the effects of this block (with given index) on the UTXO set represented by coins. * Validity checks that depend on the UTXO set are also done; ConnectBlock() * can fail if those validity checks fail (among other reasons). */ @@ -2450,7 +2443,7 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash(); assert(hashPrevBlock == view.GetBestBlock()); - num_blocks_total++; + m_chainman.num_blocks_total++; // Special case for the genesis block, skipping connection of its transactions // (its coinbase is unspendable) @@ -2492,11 +2485,11 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, } const auto time_1{SteadyClock::now()}; - time_check += time_1 - time_start; + m_chainman.time_check += time_1 - time_start; LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", Ticks<MillisecondsDouble>(time_1 - time_start), - Ticks<SecondsDouble>(time_check), - Ticks<MillisecondsDouble>(time_check) / num_blocks_total); + Ticks<SecondsDouble>(m_chainman.time_check), + Ticks<MillisecondsDouble>(m_chainman.time_check) / m_chainman.num_blocks_total); // Do not allow blocks that contain transactions which 'overwrite' older transactions, // unless those are already completely spent. @@ -2594,11 +2587,11 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, unsigned int flags{GetBlockScriptFlags(*pindex, m_chainman)}; const auto time_2{SteadyClock::now()}; - time_forks += time_2 - time_1; + m_chainman.time_forks += time_2 - time_1; LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", Ticks<MillisecondsDouble>(time_2 - time_1), - Ticks<SecondsDouble>(time_forks), - Ticks<MillisecondsDouble>(time_forks) / num_blocks_total); + Ticks<SecondsDouble>(m_chainman.time_forks), + Ticks<MillisecondsDouble>(m_chainman.time_forks) / m_chainman.num_blocks_total); CBlockUndo blockundo; @@ -2667,7 +2660,7 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, std::vector<CScriptCheck> vChecks; bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */ TxValidationState tx_state; - if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], parallel_script_checks ? &vChecks : nullptr)) { + if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], m_chainman.m_validation_cache, parallel_script_checks ? &vChecks : nullptr)) { // Any transaction validation failure in ConnectBlock is a block consensus failure state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(), tx_state.GetDebugMessage()); @@ -2685,12 +2678,12 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight); } const auto time_3{SteadyClock::now()}; - time_connect += time_3 - time_2; + m_chainman.time_connect += time_3 - time_2; LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), Ticks<MillisecondsDouble>(time_3 - time_2), Ticks<MillisecondsDouble>(time_3 - time_2) / block.vtx.size(), nInputs <= 1 ? 0 : Ticks<MillisecondsDouble>(time_3 - time_2) / (nInputs - 1), - Ticks<SecondsDouble>(time_connect), - Ticks<MillisecondsDouble>(time_connect) / num_blocks_total); + Ticks<SecondsDouble>(m_chainman.time_connect), + Ticks<MillisecondsDouble>(m_chainman.time_connect) / m_chainman.num_blocks_total); CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, params.GetConsensus()); if (block.vtx[0]->GetValueOut() > blockReward) { @@ -2703,12 +2696,12 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "block-validation-failed"); } const auto time_4{SteadyClock::now()}; - time_verify += time_4 - time_2; + m_chainman.time_verify += time_4 - time_2; LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, Ticks<MillisecondsDouble>(time_4 - time_2), nInputs <= 1 ? 0 : Ticks<MillisecondsDouble>(time_4 - time_2) / (nInputs - 1), - Ticks<SecondsDouble>(time_verify), - Ticks<MillisecondsDouble>(time_verify) / num_blocks_total); + Ticks<SecondsDouble>(m_chainman.time_verify), + Ticks<MillisecondsDouble>(m_chainman.time_verify) / m_chainman.num_blocks_total); if (fJustCheck) return true; @@ -2718,11 +2711,11 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, } const auto time_5{SteadyClock::now()}; - time_undo += time_5 - time_4; + m_chainman.time_undo += time_5 - time_4; LogPrint(BCLog::BENCH, " - Write undo data: %.2fms [%.2fs (%.2fms/blk)]\n", Ticks<MillisecondsDouble>(time_5 - time_4), - Ticks<SecondsDouble>(time_undo), - Ticks<MillisecondsDouble>(time_undo) / num_blocks_total); + Ticks<SecondsDouble>(m_chainman.time_undo), + Ticks<MillisecondsDouble>(m_chainman.time_undo) / m_chainman.num_blocks_total); if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) { pindex->RaiseValidity(BLOCK_VALID_SCRIPTS); @@ -2733,11 +2726,11 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, view.SetBestBlock(pindex->GetBlockHash()); const auto time_6{SteadyClock::now()}; - time_index += time_6 - time_5; + m_chainman.time_index += time_6 - time_5; LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", Ticks<MillisecondsDouble>(time_6 - time_5), - Ticks<SecondsDouble>(time_index), - Ticks<MillisecondsDouble>(time_index) / num_blocks_total); + Ticks<SecondsDouble>(m_chainman.time_index), + Ticks<MillisecondsDouble>(m_chainman.time_index) / m_chainman.num_blocks_total); TRACE6(validation, block_connected, block_hash.data(), @@ -3095,11 +3088,6 @@ bool Chainstate::DisconnectTip(BlockValidationState& state, DisconnectedBlockTra return true; } -static SteadyClock::duration time_connect_total{}; -static SteadyClock::duration time_flush{}; -static SteadyClock::duration time_chainstate{}; -static SteadyClock::duration time_post_connect{}; - struct PerBlockConnectTrace { CBlockIndex* pindex = nullptr; std::shared_ptr<const CBlock> pblock; @@ -3186,31 +3174,31 @@ bool Chainstate::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, return false; } time_3 = SteadyClock::now(); - time_connect_total += time_3 - time_2; - assert(num_blocks_total > 0); + m_chainman.time_connect_total += time_3 - time_2; + assert(m_chainman.num_blocks_total > 0); LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", Ticks<MillisecondsDouble>(time_3 - time_2), - Ticks<SecondsDouble>(time_connect_total), - Ticks<MillisecondsDouble>(time_connect_total) / num_blocks_total); + Ticks<SecondsDouble>(m_chainman.time_connect_total), + Ticks<MillisecondsDouble>(m_chainman.time_connect_total) / m_chainman.num_blocks_total); bool flushed = view.Flush(); assert(flushed); } const auto time_4{SteadyClock::now()}; - time_flush += time_4 - time_3; + m_chainman.time_flush += time_4 - time_3; LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", Ticks<MillisecondsDouble>(time_4 - time_3), - Ticks<SecondsDouble>(time_flush), - Ticks<MillisecondsDouble>(time_flush) / num_blocks_total); + Ticks<SecondsDouble>(m_chainman.time_flush), + Ticks<MillisecondsDouble>(m_chainman.time_flush) / m_chainman.num_blocks_total); // Write the chain state to disk, if necessary. if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) { return false; } const auto time_5{SteadyClock::now()}; - time_chainstate += time_5 - time_4; + m_chainman.time_chainstate += time_5 - time_4; LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", Ticks<MillisecondsDouble>(time_5 - time_4), - Ticks<SecondsDouble>(time_chainstate), - Ticks<MillisecondsDouble>(time_chainstate) / num_blocks_total); + Ticks<SecondsDouble>(m_chainman.time_chainstate), + Ticks<MillisecondsDouble>(m_chainman.time_chainstate) / m_chainman.num_blocks_total); // Remove conflicting transactions from the mempool.; if (m_mempool) { m_mempool->removeForBlock(blockConnecting.vtx, pindexNew->nHeight); @@ -3221,16 +3209,16 @@ bool Chainstate::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, UpdateTip(pindexNew); const auto time_6{SteadyClock::now()}; - time_post_connect += time_6 - time_5; - time_total += time_6 - time_1; + m_chainman.time_post_connect += time_6 - time_5; + m_chainman.time_total += time_6 - time_1; LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", Ticks<MillisecondsDouble>(time_6 - time_5), - Ticks<SecondsDouble>(time_post_connect), - Ticks<MillisecondsDouble>(time_post_connect) / num_blocks_total); + Ticks<SecondsDouble>(m_chainman.time_post_connect), + Ticks<MillisecondsDouble>(m_chainman.time_post_connect) / m_chainman.num_blocks_total); LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", Ticks<MillisecondsDouble>(time_6 - time_1), - Ticks<SecondsDouble>(time_total), - Ticks<MillisecondsDouble>(time_total) / num_blocks_total); + Ticks<SecondsDouble>(m_chainman.time_total), + Ticks<MillisecondsDouble>(m_chainman.time_total) / m_chainman.num_blocks_total); // If we are the background validation chainstate, check to see if we are done // validating the snapshot (i.e. our tip has reached the snapshot's base block). @@ -3417,25 +3405,24 @@ static SynchronizationState GetSynchronizationState(bool init, bool blockfiles_i return SynchronizationState::INIT_DOWNLOAD; } -static bool NotifyHeaderTip(ChainstateManager& chainman) LOCKS_EXCLUDED(cs_main) +bool ChainstateManager::NotifyHeaderTip() { bool fNotify = false; bool fInitialBlockDownload = false; - static CBlockIndex* pindexHeaderOld = nullptr; CBlockIndex* pindexHeader = nullptr; { - LOCK(cs_main); - pindexHeader = chainman.m_best_header; + LOCK(GetMutex()); + pindexHeader = m_best_header; - if (pindexHeader != pindexHeaderOld) { + if (pindexHeader != m_last_notified_header) { fNotify = true; - fInitialBlockDownload = chainman.IsInitialBlockDownload(); - pindexHeaderOld = pindexHeader; + fInitialBlockDownload = IsInitialBlockDownload(); + m_last_notified_header = pindexHeader; } } - // Send block tip changed notifications without cs_main + // Send block tip changed notifications without the lock held if (fNotify) { - chainman.GetNotifications().headerTip(GetSynchronizationState(fInitialBlockDownload, chainman.m_blockman.m_blockfiles_indexed), pindexHeader->nHeight, pindexHeader->nTime, false); + GetNotifications().headerTip(GetSynchronizationState(fInitialBlockDownload, m_blockman.m_blockfiles_indexed), pindexHeader->nHeight, pindexHeader->nTime, false); } return fNotify; } @@ -4391,7 +4378,7 @@ bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& } } } - if (NotifyHeaderTip(*this)) { + if (NotifyHeaderTip()) { if (IsInitialBlockDownload() && ppindex && *ppindex) { const CBlockIndex& last_accepted{**ppindex}; int64_t blocks_left{(NodeClock::now() - last_accepted.Time()) / GetConsensus().PowTargetSpacing()}; @@ -4562,7 +4549,7 @@ bool ChainstateManager::ProcessNewBlock(const std::shared_ptr<const CBlock>& blo } } - NotifyHeaderTip(*this); + NotifyHeaderTip(); BlockValidationState state; // Only used to report errors, not invalidity - ignore it if (!ActiveChainstate().ActivateBestChain(state, block)) { @@ -5139,7 +5126,7 @@ void ChainstateManager::LoadExternalBlockFile( } } - NotifyHeaderTip(*this); + NotifyHeaderTip(); if (!blocks_with_unknown_parent) continue; @@ -5165,7 +5152,7 @@ void ChainstateManager::LoadExternalBlockFile( } range.first++; blocks_with_unknown_parent->erase(it); - NotifyHeaderTip(*this); + NotifyHeaderTip(); } } } catch (const std::exception& e) { @@ -5193,7 +5180,7 @@ bool ChainstateManager::ShouldCheckBlockIndex() const { // Assert to verify Flatten() has been called. if (!*Assert(m_options.check_block_index)) return false; - if (GetRand(*m_options.check_block_index) >= 1) return false; + if (FastRandomContext().randrange(*m_options.check_block_index) >= 1) return false; return true; } @@ -5646,23 +5633,44 @@ Chainstate& ChainstateManager::InitializeChainstate(CTxMemPool* mempool) return destroyed && !fs::exists(db_path); } -bool ChainstateManager::ActivateSnapshot( +util::Result<void> ChainstateManager::ActivateSnapshot( AutoFile& coins_file, const SnapshotMetadata& metadata, bool in_memory) { uint256 base_blockhash = metadata.m_base_blockhash; + int base_blockheight = metadata.m_base_blockheight; if (this->SnapshotBlockhash()) { - LogPrintf("[snapshot] can't activate a snapshot-based chainstate more than once\n"); - return false; + return util::Error{Untranslated("Can't activate a snapshot-based chainstate more than once")}; } { LOCK(::cs_main); - if (Assert(m_active_chainstate->GetMempool())->size() > 0) { - LogPrintf("[snapshot] can't activate a snapshot when mempool not empty\n"); - return false; + + if (!GetParams().AssumeutxoForBlockhash(base_blockhash).has_value()) { + auto available_heights = GetParams().GetAvailableSnapshotHeights(); + std::string heights_formatted = util::Join(available_heights, ", ", [&](const auto& i) { return util::ToString(i); }); + return util::Error{strprintf(Untranslated("assumeutxo block hash in snapshot metadata not recognized (hash: %s, height: %s). The following snapshot heights are available: %s"), + base_blockhash.ToString(), + base_blockheight, + heights_formatted)}; + } + + CBlockIndex* snapshot_start_block = m_blockman.LookupBlockIndex(base_blockhash); + if (!snapshot_start_block) { + return util::Error{strprintf(Untranslated("The base block header (%s) must appear in the headers chain. Make sure all headers are syncing, and call loadtxoutset again"), + base_blockhash.ToString())}; + } + + bool start_block_invalid = snapshot_start_block->nStatus & BLOCK_FAILED_MASK; + if (start_block_invalid) { + return util::Error{strprintf(Untranslated("The base block header (%s) is part of an invalid chain"), base_blockhash.ToString())}; + } + + auto mempool{m_active_chainstate->GetMempool()}; + if (mempool && mempool->size() > 0) { + return util::Error{Untranslated("Can't activate a snapshot when mempool not empty")}; } } @@ -5711,8 +5719,7 @@ bool ChainstateManager::ActivateSnapshot( static_cast<size_t>(current_coinstip_cache_size * SNAPSHOT_CACHE_PERC)); } - auto cleanup_bad_snapshot = [&](const char* reason) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { - LogPrintf("[snapshot] activation failed - %s\n", reason); + auto cleanup_bad_snapshot = [&](bilingual_str&& reason) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { this->MaybeRebalanceCaches(); // PopulateAndValidateSnapshot can return (in error) before the leveldb datadir @@ -5728,12 +5735,12 @@ bool ChainstateManager::ActivateSnapshot( "Manually remove it before restarting.\n"), fs::PathToString(*snapshot_datadir))); } } - return false; + return util::Error{std::move(reason)}; }; if (!this->PopulateAndValidateSnapshot(*snapshot_chainstate, coins_file, metadata)) { LOCK(::cs_main); - return cleanup_bad_snapshot("population failed"); + return cleanup_bad_snapshot(Untranslated("population failed")); } LOCK(::cs_main); // cs_main required for rest of snapshot activation. @@ -5742,13 +5749,13 @@ bool ChainstateManager::ActivateSnapshot( // work chain than the active chainstate; a user could have loaded a snapshot // very late in the IBD process, and we wouldn't want to load a useless chainstate. if (!CBlockIndexWorkComparator()(ActiveTip(), snapshot_chainstate->m_chain.Tip())) { - return cleanup_bad_snapshot("work does not exceed active chainstate"); + return cleanup_bad_snapshot(Untranslated("work does not exceed active chainstate")); } // If not in-memory, persist the base blockhash for use during subsequent // initialization. if (!in_memory) { if (!node::WriteSnapshotBaseBlockhash(*snapshot_chainstate)) { - return cleanup_bad_snapshot("could not write base blockhash"); + return cleanup_bad_snapshot(Untranslated("could not write base blockhash")); } } @@ -5771,7 +5778,7 @@ bool ChainstateManager::ActivateSnapshot( m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() / (1000 * 1000)); this->MaybeRebalanceCaches(); - return true; + return {}; } static void FlushSnapshotToDisk(CCoinsViewCache& coins_cache, bool snapshot_loaded) @@ -6229,7 +6236,8 @@ ChainstateManager::ChainstateManager(const util::SignalInterrupt& interrupt, Opt : m_script_check_queue{/*batch_size=*/128, options.worker_threads_num}, m_interrupt{interrupt}, m_options{Flatten(std::move(options))}, - m_blockman{interrupt, std::move(blockman_options)} + m_blockman{interrupt, std::move(blockman_options)}, + m_validation_cache{m_options.script_execution_cache_bytes, m_options.signature_cache_bytes} { } diff --git a/src/validation.h b/src/validation.h index ab7891539a..08e672c620 100644 --- a/src/validation.h +++ b/src/validation.h @@ -10,9 +10,10 @@ #include <attributes.h> #include <chain.h> #include <checkqueue.h> -#include <kernel/chain.h> #include <consensus/amount.h> +#include <cuckoocache.h> #include <deploymentstatus.h> +#include <kernel/chain.h> #include <kernel/chainparams.h> #include <kernel/chainstatemanager_opts.h> #include <kernel/cs_main.h> // IWYU pragma: export @@ -21,6 +22,7 @@ #include <policy/packages.h> #include <policy/policy.h> #include <script/script_error.h> +#include <script/sigcache.h> #include <sync.h> #include <txdb.h> #include <txmempool.h> // For CTxMemPool::cs @@ -340,10 +342,11 @@ private: bool cacheStore; ScriptError error{SCRIPT_ERR_UNKNOWN_ERROR}; PrecomputedTransactionData *txdata; + SignatureCache* m_signature_cache; public: - CScriptCheck(const CTxOut& outIn, const CTransaction& txToIn, unsigned int nInIn, unsigned int nFlagsIn, bool cacheIn, PrecomputedTransactionData* txdataIn) : - m_tx_out(outIn), ptxTo(&txToIn), nIn(nInIn), nFlags(nFlagsIn), cacheStore(cacheIn), txdata(txdataIn) { } + CScriptCheck(const CTxOut& outIn, const CTransaction& txToIn, SignatureCache& signature_cache, unsigned int nInIn, unsigned int nFlagsIn, bool cacheIn, PrecomputedTransactionData* txdataIn) : + m_tx_out(outIn), ptxTo(&txToIn), nIn(nInIn), nFlags(nFlagsIn), cacheStore(cacheIn), txdata(txdataIn), m_signature_cache(&signature_cache) { } CScriptCheck(const CScriptCheck&) = delete; CScriptCheck& operator=(const CScriptCheck&) = delete; @@ -360,8 +363,28 @@ static_assert(std::is_nothrow_move_assignable_v<CScriptCheck>); static_assert(std::is_nothrow_move_constructible_v<CScriptCheck>); static_assert(std::is_nothrow_destructible_v<CScriptCheck>); -/** Initializes the script-execution cache */ -[[nodiscard]] bool InitScriptExecutionCache(size_t max_size_bytes); +/** + * Convenience class for initializing and passing the script execution cache + * and signature cache. + */ +class ValidationCache +{ +private: + //! Pre-initialized hasher to avoid having to recreate it for every hash calculation. + CSHA256 m_script_execution_cache_hasher; + +public: + CuckooCache::cache<uint256, SignatureCacheHasher> m_script_execution_cache; + SignatureCache m_signature_cache; + + ValidationCache(size_t script_execution_cache_bytes, size_t signature_cache_bytes); + + ValidationCache(const ValidationCache&) = delete; + ValidationCache& operator=(const ValidationCache&) = delete; + + //! Return a copy of the pre-initialized hasher. + CSHA256 ScriptExecutionCacheHasher() const { return m_script_execution_cache_hasher; } +}; /** Functions for validating blocks and updating the block tree */ @@ -796,7 +819,6 @@ private: friend ChainstateManager; }; - enum class SnapshotCompletionResult { SUCCESS, SKIPPED, @@ -884,6 +906,11 @@ private: CBlockIndex* m_best_invalid GUARDED_BY(::cs_main){nullptr}; + /** The last header for which a headerTip notification was issued. */ + CBlockIndex* m_last_notified_header GUARDED_BY(GetMutex()){nullptr}; + + bool NotifyHeaderTip() LOCKS_EXCLUDED(GetMutex()); + //! Internal helper for ActivateSnapshot(). //! //! De-serialization of a snapshot that is created with @@ -927,6 +954,21 @@ private: //! A queue for script verifications that have to be performed by worker threads. CCheckQueue<CScriptCheck> m_script_check_queue; + //! Timers and counters used for benchmarking validation in both background + //! and active chainstates. + SteadyClock::duration GUARDED_BY(::cs_main) time_check{}; + SteadyClock::duration GUARDED_BY(::cs_main) time_forks{}; + SteadyClock::duration GUARDED_BY(::cs_main) time_connect{}; + SteadyClock::duration GUARDED_BY(::cs_main) time_verify{}; + SteadyClock::duration GUARDED_BY(::cs_main) time_undo{}; + SteadyClock::duration GUARDED_BY(::cs_main) time_index{}; + SteadyClock::duration GUARDED_BY(::cs_main) time_total{}; + int64_t GUARDED_BY(::cs_main) num_blocks_total{0}; + SteadyClock::duration GUARDED_BY(::cs_main) time_connect_total{}; + SteadyClock::duration GUARDED_BY(::cs_main) time_flush{}; + SteadyClock::duration GUARDED_BY(::cs_main) time_chainstate{}; + SteadyClock::duration GUARDED_BY(::cs_main) time_post_connect{}; + public: using Options = kernel::ChainstateManagerOpts; @@ -970,6 +1012,8 @@ public: //! chainstate to avoid duplicating block metadata. node::BlockManager m_blockman; + ValidationCache m_validation_cache; + /** * Whether initial block download has ended and IsInitialBlockDownload * should return false from now on. @@ -1054,7 +1098,7 @@ public: //! faking nTx* block index data along the way. //! - Move the new chainstate to `m_snapshot_chainstate` and make it our //! ChainstateActive(). - [[nodiscard]] bool ActivateSnapshot( + [[nodiscard]] util::Result<void> ActivateSnapshot( AutoFile& coins_file, const node::SnapshotMetadata& metadata, bool in_memory); //! Once the background validation chainstate has reached the height which diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp index 813fde109c..579444a065 100644 --- a/src/validationinterface.cpp +++ b/src/validationinterface.cpp @@ -95,7 +95,7 @@ public: ValidationSignals::ValidationSignals(std::unique_ptr<util::TaskRunnerInterface> task_runner) : m_internals{std::make_unique<ValidationSignalsImpl>(std::move(task_runner))} {} -ValidationSignals::~ValidationSignals() {} +ValidationSignals::~ValidationSignals() = default; void ValidationSignals::FlushBackgroundCallbacks() { diff --git a/src/wallet/coinselection.cpp b/src/wallet/coinselection.cpp index a54d1a4668..b6fee37c95 100644 --- a/src/wallet/coinselection.cpp +++ b/src/wallet/coinselection.cpp @@ -549,7 +549,7 @@ util::Result<SelectionResult> SelectCoinsSRD(const std::vector<OutputGroup>& utx std::vector<size_t> indexes; indexes.resize(utxo_pool.size()); std::iota(indexes.begin(), indexes.end(), 0); - Shuffle(indexes.begin(), indexes.end(), rng); + std::shuffle(indexes.begin(), indexes.end(), rng); CAmount selected_eff_value = 0; int weight = 0; @@ -663,7 +663,7 @@ util::Result<SelectionResult> KnapsackSolver(std::vector<OutputGroup>& groups, c std::vector<OutputGroup> applicable_groups; CAmount nTotalLower = 0; - Shuffle(groups.begin(), groups.end(), rng); + std::shuffle(groups.begin(), groups.end(), rng); for (const OutputGroup& group : groups) { if (group.m_weight > max_selection_weight) { @@ -942,7 +942,7 @@ const std::set<std::shared_ptr<COutput>>& SelectionResult::GetInputSet() const std::vector<std::shared_ptr<COutput>> SelectionResult::GetShuffledInputVector() const { std::vector<std::shared_ptr<COutput>> coins(m_selected_inputs.begin(), m_selected_inputs.end()); - Shuffle(coins.begin(), coins.end(), FastRandomContext()); + std::shuffle(coins.begin(), coins.end(), FastRandomContext()); return coins; } diff --git a/src/wallet/coinselection.h b/src/wallet/coinselection.h index 8a81cfc268..08889c8e06 100644 --- a/src/wallet/coinselection.h +++ b/src/wallet/coinselection.h @@ -259,7 +259,7 @@ struct OutputGroup /** Total weight of the UTXOs in this group. */ int m_weight{0}; - OutputGroup() {} + OutputGroup() = default; OutputGroup(const CoinSelectionParams& params) : m_long_term_feerate(params.m_long_term_feerate), m_subtract_fee_outputs(params.m_subtract_fee_outputs) diff --git a/src/wallet/db.h b/src/wallet/db.h index b45076d10c..2045d51376 100644 --- a/src/wallet/db.h +++ b/src/wallet/db.h @@ -30,8 +30,8 @@ bool operator<(Span<const std::byte> a, BytePrefix b); class DatabaseCursor { public: - explicit DatabaseCursor() {} - virtual ~DatabaseCursor() {} + explicit DatabaseCursor() = default; + virtual ~DatabaseCursor() = default; DatabaseCursor(const DatabaseCursor&) = delete; DatabaseCursor& operator=(const DatabaseCursor&) = delete; @@ -56,8 +56,8 @@ private: virtual bool HasKey(DataStream&& key) = 0; public: - explicit DatabaseBatch() {} - virtual ~DatabaseBatch() {} + explicit DatabaseBatch() = default; + virtual ~DatabaseBatch() = default; DatabaseBatch(const DatabaseBatch&) = delete; DatabaseBatch& operator=(const DatabaseBatch&) = delete; @@ -131,7 +131,7 @@ class WalletDatabase public: /** Create dummy DB handle */ WalletDatabase() : nUpdateCounter(0) {} - virtual ~WalletDatabase() {}; + virtual ~WalletDatabase() = default; /** Open the database if it is not already opened. */ virtual void Open() = 0; diff --git a/src/wallet/migrate.h b/src/wallet/migrate.h index e4826450af..58c8c0adf4 100644 --- a/src/wallet/migrate.h +++ b/src/wallet/migrate.h @@ -28,7 +28,7 @@ public: { if (open) Open(); } - ~BerkeleyRODatabase(){}; + ~BerkeleyRODatabase() = default; BerkeleyROData m_records; @@ -81,7 +81,7 @@ private: public: explicit BerkeleyROCursor(const BerkeleyRODatabase& database, Span<const std::byte> prefix = {}); - ~BerkeleyROCursor() {} + ~BerkeleyROCursor() = default; Status Next(DataStream& key, DataStream& value) override; }; @@ -102,7 +102,7 @@ private: public: explicit BerkeleyROBatch(const BerkeleyRODatabase& database) : m_database(database) {} - ~BerkeleyROBatch() {} + ~BerkeleyROBatch() = default; BerkeleyROBatch(const BerkeleyROBatch&) = delete; BerkeleyROBatch& operator=(const BerkeleyROBatch&) = delete; diff --git a/src/wallet/rpc/addresses.cpp b/src/wallet/rpc/addresses.cpp index 0c2ad06eea..35c93337c1 100644 --- a/src/wallet/rpc/addresses.cpp +++ b/src/wallet/rpc/addresses.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2011-2022 The Bitcoin Core developers +// Copyright (c) 2011-present The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -534,7 +534,7 @@ RPCHelpMan getaddressinfo() {RPCResult::Type::BOOL, "solvable", "If we know how to spend coins sent to this address, ignoring the possible lack of private keys."}, {RPCResult::Type::STR, "desc", /*optional=*/true, "A descriptor for spending coins sent to this address (only when solvable)."}, {RPCResult::Type::STR, "parent_desc", /*optional=*/true, "The descriptor used to derive this address if this is a descriptor wallet"}, - {RPCResult::Type::BOOL, "isscript", "If the key is a script."}, + {RPCResult::Type::BOOL, "isscript", /*optional=*/true, "If the key is a script."}, {RPCResult::Type::BOOL, "ischange", "If the address was used for change output."}, {RPCResult::Type::BOOL, "iswitness", "If the address is a witness address."}, {RPCResult::Type::NUM, "witness_version", /*optional=*/true, "The version number of the witness program."}, diff --git a/src/wallet/rpc/transactions.cpp b/src/wallet/rpc/transactions.cpp index 5abc983701..0dacfa808b 100644 --- a/src/wallet/rpc/transactions.cpp +++ b/src/wallet/rpc/transactions.cpp @@ -415,13 +415,13 @@ static std::vector<RPCResult> TransactionDescriptionString() {RPCResult::Type::NUM_TIME, "blocktime", /*optional=*/true, "The block time expressed in " + UNIX_EPOCH_TIME + "."}, {RPCResult::Type::STR_HEX, "txid", "The transaction id."}, {RPCResult::Type::STR_HEX, "wtxid", "The hash of serialized transaction, including witness data."}, - {RPCResult::Type::ARR, "walletconflicts", "Conflicting transaction ids.", + {RPCResult::Type::ARR, "walletconflicts", "Confirmed transactions that have been detected by the wallet to conflict with this transaction.", { {RPCResult::Type::STR_HEX, "txid", "The transaction id."}, }}, {RPCResult::Type::STR_HEX, "replaced_by_txid", /*optional=*/true, "Only if 'category' is 'send'. The txid if this tx was replaced."}, {RPCResult::Type::STR_HEX, "replaces_txid", /*optional=*/true, "Only if 'category' is 'send'. The txid if this tx replaces another."}, - {RPCResult::Type::ARR, "mempoolconflicts", "Transactions that directly conflict with either this transaction or an ancestor transaction", + {RPCResult::Type::ARR, "mempoolconflicts", "Transactions in the mempool that directly conflict with either this transaction or an ancestor transaction", { {RPCResult::Type::STR_HEX, "txid", "The transaction id."}, }}, diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index c64aff5fe2..e4632777cc 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -84,7 +84,7 @@ bool PermitsUncompressed(IsMineSigVersion sigversion) return sigversion == IsMineSigVersion::TOP || sigversion == IsMineSigVersion::P2SH; } -bool HaveKeys(const std::vector<valtype>& pubkeys, const LegacyScriptPubKeyMan& keystore) +bool HaveKeys(const std::vector<valtype>& pubkeys, const LegacyDataSPKM& keystore) { for (const valtype& pubkey : pubkeys) { CKeyID keyID = CPubKey(pubkey).GetID(); @@ -102,7 +102,7 @@ bool HaveKeys(const std::vector<valtype>& pubkeys, const LegacyScriptPubKeyMan& //! scripts or simply treat any script that has been //! stored in the keystore as spendable // NOLINTNEXTLINE(misc-no-recursion) -IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& scriptPubKey, IsMineSigVersion sigversion, bool recurse_scripthash=true) +IsMineResult IsMineInner(const LegacyDataSPKM& keystore, const CScript& scriptPubKey, IsMineSigVersion sigversion, bool recurse_scripthash=true) { IsMineResult ret = IsMineResult::NO; @@ -217,7 +217,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s } // namespace -isminetype LegacyScriptPubKeyMan::IsMine(const CScript& script) const +isminetype LegacyDataSPKM::IsMine(const CScript& script) const { switch (IsMineInner(*this, script, IsMineSigVersion::TOP)) { case IsMineResult::INVALID: @@ -231,7 +231,7 @@ isminetype LegacyScriptPubKeyMan::IsMine(const CScript& script) const assert(false); } -bool LegacyScriptPubKeyMan::CheckDecryptionKey(const CKeyingMaterial& master_key) +bool LegacyDataSPKM::CheckDecryptionKey(const CKeyingMaterial& master_key) { { LOCK(cs_KeyStore); @@ -585,7 +585,7 @@ int64_t LegacyScriptPubKeyMan::GetTimeFirstKey() const return nTimeFirstKey; } -std::unique_ptr<SigningProvider> LegacyScriptPubKeyMan::GetSolvingProvider(const CScript& script) const +std::unique_ptr<SigningProvider> LegacyDataSPKM::GetSolvingProvider(const CScript& script) const { return std::make_unique<LegacySigningProvider>(*this); } @@ -721,7 +721,7 @@ void LegacyScriptPubKeyMan::UpdateTimeFirstKey(int64_t nCreateTime) NotifyFirstKeyTimeChanged(this, nTimeFirstKey); } -bool LegacyScriptPubKeyMan::LoadKey(const CKey& key, const CPubKey &pubkey) +bool LegacyDataSPKM::LoadKey(const CKey& key, const CPubKey &pubkey) { return AddKeyPubKeyInner(key, pubkey); } @@ -773,7 +773,7 @@ bool LegacyScriptPubKeyMan::AddKeyPubKeyWithDB(WalletBatch& batch, const CKey& s return true; } -bool LegacyScriptPubKeyMan::LoadCScript(const CScript& redeemScript) +bool LegacyDataSPKM::LoadCScript(const CScript& redeemScript) { /* A sanity check was added in pull #3843 to avoid adding redeemScripts * that never can be redeemed. However, old wallets may still contain @@ -788,18 +788,36 @@ bool LegacyScriptPubKeyMan::LoadCScript(const CScript& redeemScript) return FillableSigningProvider::AddCScript(redeemScript); } +void LegacyDataSPKM::LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata& meta) +{ + LOCK(cs_KeyStore); + mapKeyMetadata[keyID] = meta; +} + void LegacyScriptPubKeyMan::LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata& meta) { LOCK(cs_KeyStore); + LegacyDataSPKM::LoadKeyMetadata(keyID, meta); UpdateTimeFirstKey(meta.nCreateTime); - mapKeyMetadata[keyID] = meta; +} + +void LegacyDataSPKM::LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata& meta) +{ + LOCK(cs_KeyStore); + m_script_metadata[script_id] = meta; } void LegacyScriptPubKeyMan::LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata& meta) { LOCK(cs_KeyStore); + LegacyDataSPKM::LoadScriptMetadata(script_id, meta); UpdateTimeFirstKey(meta.nCreateTime); - m_script_metadata[script_id] = meta; +} + +bool LegacyDataSPKM::AddKeyPubKeyInner(const CKey& key, const CPubKey& pubkey) +{ + LOCK(cs_KeyStore); + return FillableSigningProvider::AddKeyPubKey(key, pubkey); } bool LegacyScriptPubKeyMan::AddKeyPubKeyInner(const CKey& key, const CPubKey &pubkey) @@ -827,7 +845,7 @@ bool LegacyScriptPubKeyMan::AddKeyPubKeyInner(const CKey& key, const CPubKey &pu return true; } -bool LegacyScriptPubKeyMan::LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret, bool checksum_valid) +bool LegacyDataSPKM::LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret, bool checksum_valid) { // Set fDecryptionThoroughlyChecked to false when the checksum is invalid if (!checksum_valid) { @@ -837,7 +855,7 @@ bool LegacyScriptPubKeyMan::LoadCryptedKey(const CPubKey &vchPubKey, const std:: return AddCryptedKeyInner(vchPubKey, vchCryptedSecret); } -bool LegacyScriptPubKeyMan::AddCryptedKeyInner(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret) +bool LegacyDataSPKM::AddCryptedKeyInner(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret) { LOCK(cs_KeyStore); assert(mapKeys.empty()); @@ -865,13 +883,13 @@ bool LegacyScriptPubKeyMan::AddCryptedKey(const CPubKey &vchPubKey, } } -bool LegacyScriptPubKeyMan::HaveWatchOnly(const CScript &dest) const +bool LegacyDataSPKM::HaveWatchOnly(const CScript &dest) const { LOCK(cs_KeyStore); return setWatchOnly.count(dest) > 0; } -bool LegacyScriptPubKeyMan::HaveWatchOnly() const +bool LegacyDataSPKM::HaveWatchOnly() const { LOCK(cs_KeyStore); return (!setWatchOnly.empty()); @@ -905,12 +923,12 @@ bool LegacyScriptPubKeyMan::RemoveWatchOnly(const CScript &dest) return true; } -bool LegacyScriptPubKeyMan::LoadWatchOnly(const CScript &dest) +bool LegacyDataSPKM::LoadWatchOnly(const CScript &dest) { return AddWatchOnlyInMem(dest); } -bool LegacyScriptPubKeyMan::AddWatchOnlyInMem(const CScript &dest) +bool LegacyDataSPKM::AddWatchOnlyInMem(const CScript &dest) { LOCK(cs_KeyStore); setWatchOnly.insert(dest); @@ -954,7 +972,7 @@ bool LegacyScriptPubKeyMan::AddWatchOnly(const CScript& dest, int64_t nCreateTim return AddWatchOnly(dest); } -void LegacyScriptPubKeyMan::LoadHDChain(const CHDChain& chain) +void LegacyDataSPKM::LoadHDChain(const CHDChain& chain) { LOCK(cs_KeyStore); m_hd_chain = chain; @@ -975,14 +993,14 @@ void LegacyScriptPubKeyMan::AddHDChain(const CHDChain& chain) m_hd_chain = chain; } -void LegacyScriptPubKeyMan::AddInactiveHDChain(const CHDChain& chain) +void LegacyDataSPKM::AddInactiveHDChain(const CHDChain& chain) { LOCK(cs_KeyStore); assert(!chain.seed_id.IsNull()); m_inactive_hd_chains[chain.seed_id] = chain; } -bool LegacyScriptPubKeyMan::HaveKey(const CKeyID &address) const +bool LegacyDataSPKM::HaveKey(const CKeyID &address) const { LOCK(cs_KeyStore); if (!m_storage.HasEncryptionKeys()) { @@ -991,7 +1009,7 @@ bool LegacyScriptPubKeyMan::HaveKey(const CKeyID &address) const return mapCryptedKeys.count(address) > 0; } -bool LegacyScriptPubKeyMan::GetKey(const CKeyID &address, CKey& keyOut) const +bool LegacyDataSPKM::GetKey(const CKeyID &address, CKey& keyOut) const { LOCK(cs_KeyStore); if (!m_storage.HasEncryptionKeys()) { @@ -1010,7 +1028,7 @@ bool LegacyScriptPubKeyMan::GetKey(const CKeyID &address, CKey& keyOut) const return false; } -bool LegacyScriptPubKeyMan::GetKeyOrigin(const CKeyID& keyID, KeyOriginInfo& info) const +bool LegacyDataSPKM::GetKeyOrigin(const CKeyID& keyID, KeyOriginInfo& info) const { CKeyMetadata meta; { @@ -1030,7 +1048,7 @@ bool LegacyScriptPubKeyMan::GetKeyOrigin(const CKeyID& keyID, KeyOriginInfo& inf return true; } -bool LegacyScriptPubKeyMan::GetWatchPubKey(const CKeyID &address, CPubKey &pubkey_out) const +bool LegacyDataSPKM::GetWatchPubKey(const CKeyID &address, CPubKey &pubkey_out) const { LOCK(cs_KeyStore); WatchKeyMap::const_iterator it = mapWatchKeys.find(address); @@ -1041,7 +1059,7 @@ bool LegacyScriptPubKeyMan::GetWatchPubKey(const CKeyID &address, CPubKey &pubke return false; } -bool LegacyScriptPubKeyMan::GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const +bool LegacyDataSPKM::GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const { LOCK(cs_KeyStore); if (!m_storage.HasEncryptionKeys()) { @@ -1160,7 +1178,7 @@ void LegacyScriptPubKeyMan::DeriveNewChildKey(WalletBatch &batch, CKeyMetadata& throw std::runtime_error(std::string(__func__) + ": writing HD chain model failed"); } -void LegacyScriptPubKeyMan::LoadKeyPool(int64_t nIndex, const CKeyPool &keypool) +void LegacyDataSPKM::LoadKeyPool(int64_t nIndex, const CKeyPool &keypool) { LOCK(cs_KeyStore); if (keypool.m_pre_split) { @@ -1681,7 +1699,7 @@ std::set<CKeyID> LegacyScriptPubKeyMan::GetKeys() const return set_address; } -std::unordered_set<CScript, SaltedSipHasher> LegacyScriptPubKeyMan::GetScriptPubKeys() const +std::unordered_set<CScript, SaltedSipHasher> LegacyDataSPKM::GetScriptPubKeys() const { LOCK(cs_KeyStore); std::unordered_set<CScript, SaltedSipHasher> spks; @@ -1739,7 +1757,7 @@ std::unordered_set<CScript, SaltedSipHasher> LegacyScriptPubKeyMan::GetScriptPub return spks; } -std::unordered_set<CScript, SaltedSipHasher> LegacyScriptPubKeyMan::GetNotMineScriptPubKeys() const +std::unordered_set<CScript, SaltedSipHasher> LegacyDataSPKM::GetNotMineScriptPubKeys() const { LOCK(cs_KeyStore); std::unordered_set<CScript, SaltedSipHasher> spks; @@ -1749,7 +1767,7 @@ std::unordered_set<CScript, SaltedSipHasher> LegacyScriptPubKeyMan::GetNotMineSc return spks; } -std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor() +std::optional<MigrationData> LegacyDataSPKM::MigrateToDescriptor() { LOCK(cs_KeyStore); if (m_storage.IsLocked()) { @@ -1816,7 +1834,7 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor() WalletDescriptor w_desc(std::move(desc), creation_time, 0, 0, 0); // Make the DescriptorScriptPubKeyMan and get the scriptPubKeys - auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc, m_keypool_size)); + auto desc_spk_man = std::make_unique<DescriptorScriptPubKeyMan>(m_storage, w_desc, /*keypool_size=*/0); desc_spk_man->AddDescriptorKey(key, key.GetPubKey()); desc_spk_man->TopUp(); auto desc_spks = desc_spk_man->GetScriptPubKeys(); @@ -1861,7 +1879,7 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor() WalletDescriptor w_desc(std::move(desc), 0, 0, chain_counter, 0); // Make the DescriptorScriptPubKeyMan and get the scriptPubKeys - auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc, m_keypool_size)); + auto desc_spk_man = std::make_unique<DescriptorScriptPubKeyMan>(m_storage, w_desc, /*keypool_size=*/0); desc_spk_man->AddDescriptorKey(master_key.key, master_key.key.GetPubKey()); desc_spk_man->TopUp(); auto desc_spks = desc_spk_man->GetScriptPubKeys(); @@ -1923,7 +1941,7 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor() } else { // Make the DescriptorScriptPubKeyMan and get the scriptPubKeys WalletDescriptor w_desc(std::move(desc), creation_time, 0, 0, 0); - auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc, m_keypool_size)); + auto desc_spk_man = std::make_unique<DescriptorScriptPubKeyMan>(m_storage, w_desc, /*keypool_size=*/0); for (const auto& keyid : privkeyids) { CKey key; if (!GetKey(keyid, key)) { @@ -2001,7 +2019,7 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor() return out; } -bool LegacyScriptPubKeyMan::DeleteRecords() +bool LegacyDataSPKM::DeleteRecords() { LOCK(cs_KeyStore); WalletBatch batch(m_storage.GetDatabase()); diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h index 4d9f7bb1fa..6659cbf52b 100644 --- a/src/wallet/scriptpubkeyman.h +++ b/src/wallet/scriptpubkeyman.h @@ -178,7 +178,7 @@ protected: public: explicit ScriptPubKeyMan(WalletStorage& storage) : m_storage(storage) {} - virtual ~ScriptPubKeyMan() {}; + virtual ~ScriptPubKeyMan() = default; virtual util::Result<CTxDestination> GetNewDestination(const OutputType type) { return util::Error{Untranslated("Not supported")}; } virtual isminetype IsMine(const CScript& script) const { return ISMINE_NO; } @@ -278,31 +278,111 @@ static const std::unordered_set<OutputType> LEGACY_OUTPUT_TYPES { class DescriptorScriptPubKeyMan; -class LegacyScriptPubKeyMan : public ScriptPubKeyMan, public FillableSigningProvider +// Manages the data for a LegacyScriptPubKeyMan. +// This is the minimum necessary to load a legacy wallet so that it can be migrated. +class LegacyDataSPKM : public ScriptPubKeyMan, public FillableSigningProvider { -private: - //! keeps track of whether Unlock has run a thorough check before - bool fDecryptionThoroughlyChecked = true; - +protected: using WatchOnlySet = std::set<CScript>; using WatchKeyMap = std::map<CKeyID, CPubKey>; - - WalletBatch *encrypted_batch GUARDED_BY(cs_KeyStore) = nullptr; - using CryptedKeyMap = std::map<CKeyID, std::pair<CPubKey, std::vector<unsigned char>>>; CryptedKeyMap mapCryptedKeys GUARDED_BY(cs_KeyStore); WatchOnlySet setWatchOnly GUARDED_BY(cs_KeyStore); WatchKeyMap mapWatchKeys GUARDED_BY(cs_KeyStore); + /* the HD chain data model (external chain counters) */ + CHDChain m_hd_chain; + std::unordered_map<CKeyID, CHDChain, SaltedSipHasher> m_inactive_hd_chains; + + //! keeps track of whether Unlock has run a thorough check before + bool fDecryptionThoroughlyChecked = true; + + bool AddWatchOnlyInMem(const CScript &dest); + virtual bool AddKeyPubKeyInner(const CKey& key, const CPubKey &pubkey); + bool AddCryptedKeyInner(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret); + +public: + using ScriptPubKeyMan::ScriptPubKeyMan; + + // Map from Key ID to key metadata. + std::map<CKeyID, CKeyMetadata> mapKeyMetadata GUARDED_BY(cs_KeyStore); + + // Map from Script ID to key metadata (for watch-only keys). + std::map<CScriptID, CKeyMetadata> m_script_metadata GUARDED_BY(cs_KeyStore); + + // ScriptPubKeyMan overrides + bool CheckDecryptionKey(const CKeyingMaterial& master_key) override; + std::unordered_set<CScript, SaltedSipHasher> GetScriptPubKeys() const override; + std::unique_ptr<SigningProvider> GetSolvingProvider(const CScript& script) const override; + uint256 GetID() const override { return uint256::ONE; } + // TODO: Remove IsMine when deleting LegacyScriptPubKeyMan + isminetype IsMine(const CScript& script) const override; + + // FillableSigningProvider overrides + bool HaveKey(const CKeyID &address) const override; + bool GetKey(const CKeyID &address, CKey& keyOut) const override; + bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const override; + bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const override; + + std::set<int64_t> setInternalKeyPool GUARDED_BY(cs_KeyStore); + std::set<int64_t> setExternalKeyPool GUARDED_BY(cs_KeyStore); + std::set<int64_t> set_pre_split_keypool GUARDED_BY(cs_KeyStore); + int64_t m_max_keypool_index GUARDED_BY(cs_KeyStore) = 0; + std::map<CKeyID, int64_t> m_pool_key_to_index; + + //! Load metadata (used by LoadWallet) + virtual void LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata &metadata); + virtual void LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata &metadata); + + //! Adds a watch-only address to the store, without saving it to disk (used by LoadWallet) + bool LoadWatchOnly(const CScript &dest); + //! Returns whether the watch-only script is in the wallet + bool HaveWatchOnly(const CScript &dest) const; + //! Returns whether there are any watch-only things in the wallet + bool HaveWatchOnly() const; + //! Adds a key to the store, without saving it to disk (used by LoadWallet) + bool LoadKey(const CKey& key, const CPubKey &pubkey); + //! Adds an encrypted key to the store, without saving it to disk (used by LoadWallet) + bool LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret, bool checksum_valid); + //! Adds a CScript to the store + bool LoadCScript(const CScript& redeemScript); + //! Load a HD chain model (used by LoadWallet) + void LoadHDChain(const CHDChain& chain); + void AddInactiveHDChain(const CHDChain& chain); + const CHDChain& GetHDChain() const { return m_hd_chain; } + //! Load a keypool entry + void LoadKeyPool(int64_t nIndex, const CKeyPool &keypool); + + //! Fetches a pubkey from mapWatchKeys if it exists there + bool GetWatchPubKey(const CKeyID &address, CPubKey &pubkey_out) const; + + /** + * Retrieves scripts that were imported by bugs into the legacy spkm and are + * simply invalid, such as a sh(sh(pkh())) script, or not watched. + */ + std::unordered_set<CScript, SaltedSipHasher> GetNotMineScriptPubKeys() const; + + /** Get the DescriptorScriptPubKeyMans (with private keys) that have the same scriptPubKeys as this LegacyScriptPubKeyMan. + * Does not modify this ScriptPubKeyMan. */ + std::optional<MigrationData> MigrateToDescriptor(); + /** Delete all the records ofthis LegacyScriptPubKeyMan from disk*/ + bool DeleteRecords(); +}; + +// Implements the full legacy wallet behavior +class LegacyScriptPubKeyMan : public LegacyDataSPKM +{ +private: + WalletBatch *encrypted_batch GUARDED_BY(cs_KeyStore) = nullptr; + // By default, do not scan any block until keys/scripts are generated/imported int64_t nTimeFirstKey GUARDED_BY(cs_KeyStore) = UNKNOWN_TIME; //! Number of pre-generated keys/scripts (part of the look-ahead process, used to detect payments) int64_t m_keypool_size GUARDED_BY(cs_KeyStore){DEFAULT_KEYPOOL_SIZE}; - bool AddKeyPubKeyInner(const CKey& key, const CPubKey &pubkey); - bool AddCryptedKeyInner(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret); + bool AddKeyPubKeyInner(const CKey& key, const CPubKey &pubkey) override; /** * Private version of AddWatchOnly method which does not accept a @@ -315,7 +395,6 @@ private: */ bool AddWatchOnly(const CScript& dest) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); bool AddWatchOnlyWithDB(WalletBatch &batch, const CScript& dest) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); - bool AddWatchOnlyInMem(const CScript &dest); //! Adds a watch-only address to the store, and saves it to disk. bool AddWatchOnlyWithDB(WalletBatch &batch, const CScript& dest, int64_t create_time) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); @@ -330,18 +409,9 @@ private: /** Add a KeyOriginInfo to the wallet */ bool AddKeyOriginWithDB(WalletBatch& batch, const CPubKey& pubkey, const KeyOriginInfo& info); - /* the HD chain data model (external chain counters) */ - CHDChain m_hd_chain; - std::unordered_map<CKeyID, CHDChain, SaltedSipHasher> m_inactive_hd_chains; - /* HD derive new child key (on internal or external chain) */ void DeriveNewChildKey(WalletBatch& batch, CKeyMetadata& metadata, CKey& secret, CHDChain& hd_chain, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); - std::set<int64_t> setInternalKeyPool GUARDED_BY(cs_KeyStore); - std::set<int64_t> setExternalKeyPool GUARDED_BY(cs_KeyStore); - std::set<int64_t> set_pre_split_keypool GUARDED_BY(cs_KeyStore); - int64_t m_max_keypool_index GUARDED_BY(cs_KeyStore) = 0; - std::map<CKeyID, int64_t> m_pool_key_to_index; // Tracks keypool indexes to CKeyIDs of keys that have been taken out of the keypool but may be returned to it std::map<int64_t, CKeyID> m_index_to_reserved_key; @@ -378,12 +448,10 @@ private: bool TopUpChain(WalletBatch& batch, CHDChain& chain, unsigned int size); public: - LegacyScriptPubKeyMan(WalletStorage& storage, int64_t keypool_size) : ScriptPubKeyMan(storage), m_keypool_size(keypool_size) {} + LegacyScriptPubKeyMan(WalletStorage& storage, int64_t keypool_size) : LegacyDataSPKM(storage), m_keypool_size(keypool_size) {} util::Result<CTxDestination> GetNewDestination(const OutputType type) override; - isminetype IsMine(const CScript& script) const override; - bool CheckDecryptionKey(const CKeyingMaterial& master_key) override; bool Encrypt(const CKeyingMaterial& master_key, WalletBatch* batch) override; util::Result<CTxDestination> GetReservedDestination(const OutputType type, bool internal, int64_t& index, CKeyPool& keypool) override; @@ -417,8 +485,6 @@ public: bool CanGetAddresses(bool internal = false) const override; - std::unique_ptr<SigningProvider> GetSolvingProvider(const CScript& script) const override; - bool CanProvide(const CScript& script, SignatureData& sigdata) override; bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, bilingual_str>& input_errors) const override; @@ -427,58 +493,27 @@ public: uint256 GetID() const override; - // Map from Key ID to key metadata. - std::map<CKeyID, CKeyMetadata> mapKeyMetadata GUARDED_BY(cs_KeyStore); - - // Map from Script ID to key metadata (for watch-only keys). - std::map<CScriptID, CKeyMetadata> m_script_metadata GUARDED_BY(cs_KeyStore); - //! Adds a key to the store, and saves it to disk. bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override; - //! Adds a key to the store, without saving it to disk (used by LoadWallet) - bool LoadKey(const CKey& key, const CPubKey &pubkey); //! Adds an encrypted key to the store, and saves it to disk. bool AddCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret); - //! Adds an encrypted key to the store, without saving it to disk (used by LoadWallet) - bool LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret, bool checksum_valid); void UpdateTimeFirstKey(int64_t nCreateTime) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); - //! Adds a CScript to the store - bool LoadCScript(const CScript& redeemScript); //! Load metadata (used by LoadWallet) - void LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata &metadata); - void LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata &metadata); + void LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata &metadata) override; + void LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata &metadata) override; //! Generate a new key CPubKey GenerateNewKey(WalletBatch& batch, CHDChain& hd_chain, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); /* Set the HD chain model (chain child index counters) and writes it to the database */ void AddHDChain(const CHDChain& chain); - //! Load a HD chain model (used by LoadWallet) - void LoadHDChain(const CHDChain& chain); - const CHDChain& GetHDChain() const { return m_hd_chain; } - void AddInactiveHDChain(const CHDChain& chain); - //! Adds a watch-only address to the store, without saving it to disk (used by LoadWallet) - bool LoadWatchOnly(const CScript &dest); - //! Returns whether the watch-only script is in the wallet - bool HaveWatchOnly(const CScript &dest) const; - //! Returns whether there are any watch-only things in the wallet - bool HaveWatchOnly() const; //! Remove a watch only script from the keystore bool RemoveWatchOnly(const CScript &dest); bool AddWatchOnly(const CScript& dest, int64_t nCreateTime) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); - //! Fetches a pubkey from mapWatchKeys if it exists there - bool GetWatchPubKey(const CKeyID &address, CPubKey &pubkey_out) const; - /* SigningProvider overrides */ - bool HaveKey(const CKeyID &address) const override; - bool GetKey(const CKeyID &address, CKey& keyOut) const override; - bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const override; bool AddCScript(const CScript& redeemScript) override; - bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const override; - //! Load a keypool entry - void LoadKeyPool(int64_t nIndex, const CKeyPool &keypool); bool NewKeyPool(); void MarkPreSplitKeys() EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); @@ -527,28 +562,15 @@ public: const std::map<CKeyID, int64_t>& GetAllReserveKeys() const { return m_pool_key_to_index; } std::set<CKeyID> GetKeys() const override; - std::unordered_set<CScript, SaltedSipHasher> GetScriptPubKeys() const override; - - /** - * Retrieves scripts that were imported by bugs into the legacy spkm and are - * simply invalid, such as a sh(sh(pkh())) script, or not watched. - */ - std::unordered_set<CScript, SaltedSipHasher> GetNotMineScriptPubKeys() const; - - /** Get the DescriptorScriptPubKeyMans (with private keys) that have the same scriptPubKeys as this LegacyScriptPubKeyMan. - * Does not modify this ScriptPubKeyMan. */ - std::optional<MigrationData> MigrateToDescriptor(); - /** Delete all the records ofthis LegacyScriptPubKeyMan from disk*/ - bool DeleteRecords(); }; /** Wraps a LegacyScriptPubKeyMan so that it can be returned in a new unique_ptr. Does not provide privkeys */ class LegacySigningProvider : public SigningProvider { private: - const LegacyScriptPubKeyMan& m_spk_man; + const LegacyDataSPKM& m_spk_man; public: - explicit LegacySigningProvider(const LegacyScriptPubKeyMan& spk_man) : m_spk_man(spk_man) {} + explicit LegacySigningProvider(const LegacyDataSPKM& spk_man) : m_spk_man(spk_man) {} bool GetCScript(const CScriptID &scriptid, CScript& script) const override { return m_spk_man.GetCScript(scriptid, script); } bool HaveCScript(const CScriptID &scriptid) const override { return m_spk_man.HaveCScript(scriptid); } diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp index 1ad570442e..7abf7f59c0 100644 --- a/src/wallet/spend.cpp +++ b/src/wallet/spend.cpp @@ -226,7 +226,7 @@ void CoinsResult::Erase(const std::unordered_set<COutPoint, SaltedOutpointHasher void CoinsResult::Shuffle(FastRandomContext& rng_fast) { for (auto& it : coins) { - ::Shuffle(it.second.begin(), it.second.end(), rng_fast); + std::shuffle(it.second.begin(), it.second.end(), rng_fast); } } @@ -992,6 +992,16 @@ static void DiscourageFeeSniping(CMutableTransaction& tx, FastRandomContext& rng } } +size_t GetSerializeSizeForRecipient(const CRecipient& recipient) +{ + return ::GetSerializeSize(CTxOut(recipient.nAmount, GetScriptForDestination(recipient.dest))); +} + +bool IsDust(const CRecipient& recipient, const CFeeRate& dustRelayFee) +{ + return ::IsDust(CTxOut(recipient.nAmount, GetScriptForDestination(recipient.dest)), dustRelayFee); +} + static util::Result<CreatedTransactionResult> CreateTransactionInternal( CWallet& wallet, const std::vector<CRecipient>& vecSend, @@ -1018,12 +1028,20 @@ static util::Result<CreatedTransactionResult> CreateTransactionInternal( } // Set the long term feerate estimate to the wallet's consolidate feerate coin_selection_params.m_long_term_feerate = wallet.m_consolidate_feerate; + // Static vsize overhead + outputs vsize. 4 nVersion, 4 nLocktime, 1 input count, 1 witness overhead (dummy, flag, stack size) + coin_selection_params.tx_noinputs_size = 10 + GetSizeOfCompactSize(vecSend.size()); // bytes for output count CAmount recipients_sum = 0; const OutputType change_type = wallet.TransactionChangeType(coin_control.m_change_type ? *coin_control.m_change_type : wallet.m_default_change_type, vecSend); ReserveDestination reservedest(&wallet, change_type); unsigned int outputs_to_subtract_fee_from = 0; // The number of outputs which we are subtracting the fee from for (const auto& recipient : vecSend) { + if (IsDust(recipient, wallet.chain().relayDustFee())) { + return util::Error{_("Transaction amount too small")}; + } + + // Include the fee cost for outputs. + coin_selection_params.tx_noinputs_size += GetSerializeSizeForRecipient(recipient); recipients_sum += recipient.nAmount; if (recipient.fSubtractFeeFromAmount) { @@ -1108,23 +1126,6 @@ static util::Result<CreatedTransactionResult> CreateTransactionInternal( const auto change_spend_fee = coin_selection_params.m_discard_feerate.GetFee(coin_selection_params.change_spend_size); coin_selection_params.min_viable_change = std::max(change_spend_fee + 1, dust); - // Static vsize overhead + outputs vsize. 4 version, 4 nLocktime, 1 input count, 1 witness overhead (dummy, flag, stack size) - coin_selection_params.tx_noinputs_size = 10 + GetSizeOfCompactSize(vecSend.size()); // bytes for output count - - // vouts to the payees - for (const auto& recipient : vecSend) - { - CTxOut txout(recipient.nAmount, GetScriptForDestination(recipient.dest)); - - // Include the fee cost for outputs. - coin_selection_params.tx_noinputs_size += ::GetSerializeSize(txout); - - if (IsDust(txout, wallet.chain().relayDustFee())) { - return util::Error{_("Transaction amount too small")}; - } - txNew.vout.push_back(txout); - } - // Include the fees for things that aren't inputs, excluding the change output const CAmount not_input_fees = coin_selection_params.m_effective_feerate.GetFee(coin_selection_params.m_subtract_fee_outputs ? 0 : coin_selection_params.tx_noinputs_size); CAmount selection_target = recipients_sum + not_input_fees; @@ -1165,6 +1166,11 @@ static util::Result<CreatedTransactionResult> CreateTransactionInternal( result.GetWaste(), result.GetSelectedValue()); + // vouts to the payees + for (const auto& recipient : vecSend) + { + txNew.vout.emplace_back(recipient.nAmount, GetScriptForDestination(recipient.dest)); + } const CAmount change_amount = result.GetChange(coin_selection_params.min_viable_change, coin_selection_params.m_change_fee); if (change_amount > 0) { CTxOut newTxOut(change_amount, scriptChange); diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp index 5e3a8179a2..f2110ea3f7 100644 --- a/src/wallet/sqlite.cpp +++ b/src/wallet/sqlite.cpp @@ -52,7 +52,7 @@ static int TraceSqlCallback(unsigned code, void* context, void* param1, void* pa // in the log file, only expand statements that query the database, not // statements that update the database. char* expanded{sqlite3_stmt_readonly(stmt) ? sqlite3_expanded_sql(stmt) : nullptr}; - LogPrintf("[%s] SQLite Statement: %s\n", db->Filename(), expanded ? expanded : sqlite3_sql(stmt)); + LogTrace(BCLog::WALLETDB, "[%s] SQLite Statement: %s\n", db->Filename(), expanded ? expanded : sqlite3_sql(stmt)); if (expanded) sqlite3_free(expanded); } return SQLITE_OK; diff --git a/src/wallet/sqlite.h b/src/wallet/sqlite.h index 0a3243fe19..6b84f34366 100644 --- a/src/wallet/sqlite.h +++ b/src/wallet/sqlite.h @@ -26,7 +26,7 @@ public: std::vector<std::byte> m_prefix_range_start; std::vector<std::byte> m_prefix_range_end; - explicit SQLiteCursor() {} + explicit SQLiteCursor() = default; explicit SQLiteCursor(std::vector<std::byte> start_range, std::vector<std::byte> end_range) : m_prefix_range_start(std::move(start_range)), m_prefix_range_end(std::move(end_range)) @@ -41,7 +41,7 @@ public: class SQliteExecHandler { public: - virtual ~SQliteExecHandler() {} + virtual ~SQliteExecHandler() = default; virtual int Exec(SQLiteDatabase& database, const std::string& statement); }; diff --git a/src/wallet/test/fuzz/crypter.cpp b/src/wallet/test/fuzz/crypter.cpp index 62dd1bfde0..814136476b 100644 --- a/src/wallet/test/fuzz/crypter.cpp +++ b/src/wallet/test/fuzz/crypter.cpp @@ -27,36 +27,36 @@ FUZZ_TARGET(crypter, .init = initialize_crypter) // These values are regularly updated within `CallOneOf` std::vector<unsigned char> cipher_text_ed; CKeyingMaterial plain_text_ed; - const std::vector<unsigned char> random_key = ConsumeRandomLengthByteVector(fuzzed_data_provider); + const std::vector<unsigned char> random_key = ConsumeFixedLengthByteVector(fuzzed_data_provider, WALLET_CRYPTO_KEY_SIZE); LIMITED_WHILE(good_data && fuzzed_data_provider.ConsumeBool(), 10000) { CallOneOf( fuzzed_data_provider, [&] { - const std::string random_string = fuzzed_data_provider.ConsumeRandomLengthString(); + const std::string random_string = fuzzed_data_provider.ConsumeRandomLengthString(100); SecureString secure_string(random_string.begin(), random_string.end()); const unsigned int derivation_method = fuzzed_data_provider.ConsumeBool() ? 0 : fuzzed_data_provider.ConsumeIntegral<unsigned int>(); // Limiting the value of nRounds since it is otherwise uselessly expensive and causes a timeout when fuzzing. crypt.SetKeyFromPassphrase(/*strKeyData=*/secure_string, - /*chSalt=*/ConsumeRandomLengthByteVector(fuzzed_data_provider), + /*chSalt=*/ConsumeFixedLengthByteVector(fuzzed_data_provider, WALLET_CRYPTO_SALT_SIZE), /*nRounds=*/fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(0, 25000), /*nDerivationMethod=*/derivation_method); }, [&] { - const std::vector<unsigned char> random_vector = ConsumeFixedLengthByteVector(fuzzed_data_provider, 32); + const std::vector<unsigned char> random_vector = ConsumeFixedLengthByteVector(fuzzed_data_provider, WALLET_CRYPTO_KEY_SIZE); const CKeyingMaterial new_key(random_vector.begin(), random_vector.end()); - const std::vector<unsigned char>& new_IV = ConsumeFixedLengthByteVector(fuzzed_data_provider, 16); + const std::vector<unsigned char>& new_IV = ConsumeFixedLengthByteVector(fuzzed_data_provider, WALLET_CRYPTO_IV_SIZE); crypt.SetKey(new_key, new_IV); }, [&] { - const std::vector<unsigned char> random_vector = ConsumeRandomLengthByteVector(fuzzed_data_provider); + const std::vector<unsigned char> random_vector = ConsumeFixedLengthByteVector(fuzzed_data_provider, WALLET_CRYPTO_KEY_SIZE); plain_text_ed = CKeyingMaterial(random_vector.begin(), random_vector.end()); }, [&] { - cipher_text_ed = ConsumeRandomLengthByteVector(fuzzed_data_provider); + cipher_text_ed = ConsumeRandomLengthByteVector(fuzzed_data_provider, 64); }, [&] { (void)crypt.Encrypt(plain_text_ed, cipher_text_ed); @@ -82,7 +82,7 @@ FUZZ_TARGET(crypter, .init = initialize_crypter) } const CPubKey pub_key = *random_pub_key; const CKeyingMaterial master_key(random_key.begin(), random_key.end()); - const std::vector<unsigned char> crypted_secret = ConsumeRandomLengthByteVector(fuzzed_data_provider); + const std::vector<unsigned char> crypted_secret = ConsumeRandomLengthByteVector(fuzzed_data_provider, 64); CKey key; DecryptKey(master_key, crypted_secret, pub_key, key); }); diff --git a/src/wallet/test/util.h b/src/wallet/test/util.h index a3e6ede81e..fc7674e961 100644 --- a/src/wallet/test/util.h +++ b/src/wallet/test/util.h @@ -61,7 +61,7 @@ public: explicit MockableCursor(const MockableData& records, bool pass) : m_cursor(records.begin()), m_cursor_end(records.end()), m_pass(pass) {} MockableCursor(const MockableData& records, bool pass, Span<const std::byte> prefix); - ~MockableCursor() {} + ~MockableCursor() = default; Status Next(DataStream& key, DataStream& value) override; }; @@ -80,7 +80,7 @@ private: public: explicit MockableBatch(MockableData& records, bool pass) : m_records(records), m_pass(pass) {} - ~MockableBatch() {} + ~MockableBatch() = default; void Flush() override {} void Close() override {} @@ -106,7 +106,7 @@ public: bool m_pass{true}; MockableDatabase(MockableData records = {}) : WalletDatabase(), m_records(records) {} - ~MockableDatabase() {}; + ~MockableDatabase() = default; void Open() override {} void AddRef() override {} diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index d569c64b43..bb1789f109 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -2225,8 +2225,8 @@ std::optional<PSBTError> CWallet::FillPSBT(PartiallySignedTransaction& psbtx, bo // Complete if every input is now signed complete = true; - for (const auto& input : psbtx.inputs) { - complete &= PSBTInputSigned(input); + for (size_t i = 0; i < psbtx.inputs.size(); ++i) { + complete &= PSBTInputSignedAndVerified(psbtx, i, &txdata); } return {}; @@ -2929,7 +2929,7 @@ bool CWallet::EraseAddressReceiveRequest(WalletBatch& batch, const CTxDestinatio return true; } -std::unique_ptr<WalletDatabase> MakeWalletDatabase(const std::string& name, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error_string) +static util::Result<fs::path> GetWalletPath(const std::string& name) { // Do some checking on wallet path. It should be either a: // @@ -2942,15 +2942,24 @@ std::unique_ptr<WalletDatabase> MakeWalletDatabase(const std::string& name, cons if (!(path_type == fs::file_type::not_found || path_type == fs::file_type::directory || (path_type == fs::file_type::symlink && fs::is_directory(wallet_path)) || (path_type == fs::file_type::regular && fs::PathFromString(name).filename() == fs::PathFromString(name)))) { - error_string = Untranslated(strprintf( + return util::Error{Untranslated(strprintf( "Invalid -wallet path '%s'. -wallet path should point to a directory where wallet.dat and " "database/log.?????????? files can be stored, a location where such a directory could be created, " "or (for backwards compatibility) the name of an existing data file in -walletdir (%s)", - name, fs::quoted(fs::PathToString(GetWalletDir())))); + name, fs::quoted(fs::PathToString(GetWalletDir()))))}; + } + return wallet_path; +} + +std::unique_ptr<WalletDatabase> MakeWalletDatabase(const std::string& name, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error_string) +{ + const auto& wallet_path = GetWalletPath(name); + if (!wallet_path) { + error_string = util::ErrorString(wallet_path); status = DatabaseStatus::FAILED_BAD_PATH; return nullptr; } - return MakeDatabase(wallet_path, options, status, error_string); + return MakeDatabase(*wallet_path, options, status, error_string); } std::shared_ptr<CWallet> CWallet::Create(WalletContext& context, const std::string& name, std::unique_ptr<WalletDatabase> database, uint64_t wallet_creation_flags, bilingual_str& error, std::vector<bilingual_str>& warnings) @@ -3608,6 +3617,16 @@ LegacyScriptPubKeyMan* CWallet::GetLegacyScriptPubKeyMan() const return dynamic_cast<LegacyScriptPubKeyMan*>(it->second); } +LegacyDataSPKM* CWallet::GetLegacyDataSPKM() const +{ + if (IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)) { + return nullptr; + } + auto it = m_internal_spk_managers.find(OutputType::LEGACY); + if (it == m_internal_spk_managers.end()) return nullptr; + return dynamic_cast<LegacyDataSPKM*>(it->second); +} + LegacyScriptPubKeyMan* CWallet::GetOrCreateLegacyScriptPubKeyMan() { SetupLegacyScriptPubKeyMan(); @@ -3624,13 +3643,22 @@ void CWallet::AddScriptPubKeyMan(const uint256& id, std::unique_ptr<ScriptPubKey MaybeUpdateBirthTime(spkm->GetTimeFirstKey()); } +LegacyDataSPKM* CWallet::GetOrCreateLegacyDataSPKM() +{ + SetupLegacyScriptPubKeyMan(); + return GetLegacyDataSPKM(); +} + void CWallet::SetupLegacyScriptPubKeyMan() { if (!m_internal_spk_managers.empty() || !m_external_spk_managers.empty() || !m_spk_managers.empty() || IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)) { return; } - auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new LegacyScriptPubKeyMan(*this, m_keypool_size)); + std::unique_ptr<ScriptPubKeyMan> spk_manager = m_database->Format() == "bdb_ro" ? + std::make_unique<LegacyDataSPKM>(*this) : + std::make_unique<LegacyScriptPubKeyMan>(*this, m_keypool_size); + for (const auto& type : LEGACY_OUTPUT_TYPES) { m_internal_spk_managers[type] = spk_manager.get(); m_external_spk_managers[type] = spk_manager.get(); @@ -3998,7 +4026,7 @@ std::optional<MigrationData> CWallet::GetDescriptorsForLegacy(bilingual_str& err { AssertLockHeld(cs_wallet); - LegacyScriptPubKeyMan* legacy_spkm = GetLegacyScriptPubKeyMan(); + LegacyDataSPKM* legacy_spkm = GetLegacyDataSPKM(); if (!Assume(legacy_spkm)) { // This shouldn't happen error = Untranslated(STR_INTERNAL_BUG("Error: Legacy wallet data missing")); @@ -4017,7 +4045,7 @@ bool CWallet::ApplyMigrationData(MigrationData& data, bilingual_str& error) { AssertLockHeld(cs_wallet); - LegacyScriptPubKeyMan* legacy_spkm = GetLegacyScriptPubKeyMan(); + LegacyDataSPKM* legacy_spkm = GetLegacyDataSPKM(); if (!Assume(legacy_spkm)) { // This shouldn't happen error = Untranslated(STR_INTERNAL_BUG("Error: Legacy wallet data missing")); @@ -4352,11 +4380,24 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(const std::string& walle // If the wallet is still loaded, unload it so that nothing else tries to use it while we're changing it bool was_loaded = false; if (auto wallet = GetWallet(context, wallet_name)) { + if (wallet->IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)) { + return util::Error{_("Error: This wallet is already a descriptor wallet")}; + } + if (!RemoveWallet(context, wallet, /*load_on_start=*/std::nullopt, warnings)) { return util::Error{_("Unable to unload the wallet before migrating")}; } UnloadWallet(std::move(wallet)); was_loaded = true; + } else { + // Check if the wallet is BDB + const auto& wallet_path = GetWalletPath(wallet_name); + if (!wallet_path) { + return util::Error{util::ErrorString(wallet_path)}; + } + if (!IsBDBFile(BDBDataFile(*wallet_path))) { + return util::Error{_("Error: This wallet is already a descriptor wallet")}; + } } // Load the wallet but only in the context of this function. @@ -4365,6 +4406,7 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(const std::string& walle empty_context.args = context.args; DatabaseOptions options; options.require_existing = true; + options.require_format = DatabaseFormat::BERKELEY_RO; DatabaseStatus status; std::unique_ptr<WalletDatabase> database = MakeWalletDatabase(wallet_name, options, status, error); if (!database) { @@ -4379,6 +4421,8 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(const std::string& walle // Helper to reload as normal for some of our exit scenarios const auto& reload_wallet = [&](std::shared_ptr<CWallet>& to_reload) { + // Reset options.require_format as wallets of any format may be reloaded. + options.require_format = std::nullopt; assert(to_reload.use_count() == 1); std::string name = to_reload->GetName(); to_reload.reset(); diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 5bc888462f..984a2d9c48 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -963,8 +963,10 @@ public: //! Get the LegacyScriptPubKeyMan which is used for all types, internal, and external. LegacyScriptPubKeyMan* GetLegacyScriptPubKeyMan() const; LegacyScriptPubKeyMan* GetOrCreateLegacyScriptPubKeyMan(); + LegacyDataSPKM* GetLegacyDataSPKM() const; + LegacyDataSPKM* GetOrCreateLegacyDataSPKM(); - //! Make a LegacyScriptPubKeyMan and set it for all types, internal, and external. + //! Make a Legacy(Data)SPKM and set it for all types, internal, and external. void SetupLegacyScriptPubKeyMan(); bool WithEncryptionKey(std::function<bool (const CKeyingMaterial&)> cb) const override; diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index f34fcfc3fd..61cc9dbc78 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -354,9 +354,9 @@ bool LoadKey(CWallet* pwallet, DataStream& ssKey, DataStream& ssValue, std::stri strErr = "Error reading wallet database: CPrivKey corrupt"; return false; } - if (!pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadKey(key, vchPubKey)) + if (!pwallet->GetOrCreateLegacyDataSPKM()->LoadKey(key, vchPubKey)) { - strErr = "Error reading wallet database: LegacyScriptPubKeyMan::LoadKey failed"; + strErr = "Error reading wallet database: LegacyDataSPKM::LoadKey failed"; return false; } } catch (const std::exception& e) { @@ -393,9 +393,9 @@ bool LoadCryptedKey(CWallet* pwallet, DataStream& ssKey, DataStream& ssValue, st } } - if (!pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadCryptedKey(vchPubKey, vchPrivKey, checksum_valid)) + if (!pwallet->GetOrCreateLegacyDataSPKM()->LoadCryptedKey(vchPubKey, vchPrivKey, checksum_valid)) { - strErr = "Error reading wallet database: LegacyScriptPubKeyMan::LoadCryptedKey failed"; + strErr = "Error reading wallet database: LegacyDataSPKM::LoadCryptedKey failed"; return false; } } catch (const std::exception& e) { @@ -440,7 +440,7 @@ bool LoadHDChain(CWallet* pwallet, DataStream& ssValue, std::string& strErr) try { CHDChain chain; ssValue >> chain; - pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadHDChain(chain); + pwallet->GetOrCreateLegacyDataSPKM()->LoadHDChain(chain); } catch (const std::exception& e) { if (strErr.empty()) { strErr = e.what(); @@ -584,9 +584,9 @@ static DBErrors LoadLegacyWalletRecords(CWallet* pwallet, DatabaseBatch& batch, key >> hash; CScript script; value >> script; - if (!pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadCScript(script)) + if (!pwallet->GetOrCreateLegacyDataSPKM()->LoadCScript(script)) { - strErr = "Error reading wallet database: LegacyScriptPubKeyMan::LoadCScript failed"; + strErr = "Error reading wallet database: LegacyDataSPKM::LoadCScript failed"; return DBErrors::NONCRITICAL_ERROR; } return DBErrors::LOAD_OK; @@ -607,7 +607,7 @@ static DBErrors LoadLegacyWalletRecords(CWallet* pwallet, DatabaseBatch& batch, key >> vchPubKey; CKeyMetadata keyMeta; value >> keyMeta; - pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadKeyMetadata(vchPubKey.GetID(), keyMeta); + pwallet->GetOrCreateLegacyDataSPKM()->LoadKeyMetadata(vchPubKey.GetID(), keyMeta); // Extract some CHDChain info from this metadata if it has any if (keyMeta.nVersion >= CKeyMetadata::VERSION_WITH_HDDATA && !keyMeta.hd_seed_id.IsNull() && keyMeta.hdKeypath.size() > 0) { @@ -674,7 +674,7 @@ static DBErrors LoadLegacyWalletRecords(CWallet* pwallet, DatabaseBatch& batch, // Set inactive chains if (!hd_chains.empty()) { - LegacyScriptPubKeyMan* legacy_spkm = pwallet->GetLegacyScriptPubKeyMan(); + LegacyDataSPKM* legacy_spkm = pwallet->GetLegacyDataSPKM(); if (legacy_spkm) { for (const auto& [hd_seed_id, chain] : hd_chains) { if (hd_seed_id != legacy_spkm->GetHDChain().seed_id) { @@ -695,7 +695,7 @@ static DBErrors LoadLegacyWalletRecords(CWallet* pwallet, DatabaseBatch& batch, uint8_t fYes; value >> fYes; if (fYes == '1') { - pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadWatchOnly(script); + pwallet->GetOrCreateLegacyDataSPKM()->LoadWatchOnly(script); } return DBErrors::LOAD_OK; }); @@ -708,7 +708,7 @@ static DBErrors LoadLegacyWalletRecords(CWallet* pwallet, DatabaseBatch& batch, key >> script; CKeyMetadata keyMeta; value >> keyMeta; - pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadScriptMetadata(CScriptID(script), keyMeta); + pwallet->GetOrCreateLegacyDataSPKM()->LoadScriptMetadata(CScriptID(script), keyMeta); return DBErrors::LOAD_OK; }); result = std::max(result, watch_meta_res.m_result); @@ -720,7 +720,7 @@ static DBErrors LoadLegacyWalletRecords(CWallet* pwallet, DatabaseBatch& batch, key >> nIndex; CKeyPool keypool; value >> keypool; - pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadKeyPool(nIndex, keypool); + pwallet->GetOrCreateLegacyDataSPKM()->LoadKeyPool(nIndex, keypool); return DBErrors::LOAD_OK; }); result = std::max(result, pool_res.m_result); @@ -763,7 +763,7 @@ static DBErrors LoadLegacyWalletRecords(CWallet* pwallet, DatabaseBatch& batch, // nTimeFirstKey is only reliable if all keys have metadata if (pwallet->IsLegacy() && (key_res.m_records + ckey_res.m_records + watch_script_res.m_records) != (keymeta_res.m_records + watch_meta_res.m_records)) { - auto spk_man = pwallet->GetOrCreateLegacyScriptPubKeyMan(); + auto spk_man = pwallet->GetLegacyScriptPubKeyMan(); if (spk_man) { LOCK(spk_man->cs_KeyStore); spk_man->UpdateTimeFirstKey(1); diff --git a/src/wallet/walletutil.h b/src/wallet/walletutil.h index 38926c1eb8..96cb35b926 100644 --- a/src/wallet/walletutil.h +++ b/src/wallet/walletutil.h @@ -111,7 +111,7 @@ public: SER_READ(obj, obj.DeserializeDescriptor(descriptor_str)); } - WalletDescriptor() {} + WalletDescriptor() = default; WalletDescriptor(std::shared_ptr<Descriptor> descriptor, uint64_t creation_time, int32_t range_start, int32_t range_end, int32_t next_index) : descriptor(descriptor), id(DescriptorID(*descriptor)), creation_time(creation_time), range_start(range_start), range_end(range_end), next_index(next_index) { } }; diff --git a/src/walletinitinterface.h b/src/walletinitinterface.h index ce8b6cfd6e..ecee2e93a9 100644 --- a/src/walletinitinterface.h +++ b/src/walletinitinterface.h @@ -22,7 +22,7 @@ public: /** Add wallets that should be opened to list of chain clients. */ virtual void Construct(node::NodeContext& node) const = 0; - virtual ~WalletInitInterface() {} + virtual ~WalletInitInterface() = default; }; extern const WalletInitInterface& g_wallet_init_interface; diff --git a/src/zmq/zmqnotificationinterface.cpp b/src/zmq/zmqnotificationinterface.cpp index 536e471053..44cbacda64 100644 --- a/src/zmq/zmqnotificationinterface.cpp +++ b/src/zmq/zmqnotificationinterface.cpp @@ -24,9 +24,7 @@ #include <utility> #include <vector> -CZMQNotificationInterface::CZMQNotificationInterface() -{ -} +CZMQNotificationInterface::CZMQNotificationInterface() = default; CZMQNotificationInterface::~CZMQNotificationInterface() { diff --git a/test/functional/data/rpc_psbt.json b/test/functional/data/rpc_psbt.json index 3127350872..1ccc5e0ba0 100644 --- a/test/functional/data/rpc_psbt.json +++ b/test/functional/data/rpc_psbt.json @@ -38,7 +38,9 @@ "cHNidP8BAF4CAAAAAZvUh2UjC/mnLmYgAflyVW5U8Mb5f+tWvLVgDYF/aZUmAQAAAAD/////AUjmBSoBAAAAIlEgAw2k/OT32yjCyylRYx4ANxOFZZf+ljiCy1AOaBEsymMAAAAAAAEBKwDyBSoBAAAAIlEgwiR++/2SrEf29AuNQtFpF1oZ+p+hDkol1/NetN2FtpJBFCyxOsaCSN6AaqajZZzzwD62gh0JyBFKToaP696GW7bSzZcOFfU/wMgvlQ/VYP+pGbdhcr4Bc2iomROvB09ACwlCiXVqo3OczGiewPzzo2C+MswLWbFuk6Hou0YFcmssp6P/cGxBdmSWMrLMaOH5ErileONxnOdxCIXHqWb0m81DywEBAAA=", "cHNidP8BAF4CAAAAAZvUh2UjC/mnLmYgAflyVW5U8Mb5f+tWvLVgDYF/aZUmAQAAAAD/////AUjmBSoBAAAAIlEgAw2k/OT32yjCyylRYx4ANxOFZZf+ljiCy1AOaBEsymMAAAAAAAEBKwDyBSoBAAAAIlEgwiR++/2SrEf29AuNQtFpF1oZ+p+hDkol1/NetN2FtpJBFCyxOsaCSN6AaqajZZzzwD62gh0JyBFKToaP696GW7bSzZcOFfU/wMgvlQ/VYP+pGbdhcr4Bc2iomROvB09ACwk5iXVqo3OczGiewPzzo2C+MswLWbFuk6Hou0YFcmssp6P/cGxBdmSWMrLMaOH5ErileONxnOdxCIXHqWb0m81DywAA", "cHNidP8BAF4CAAAAAZvUh2UjC/mnLmYgAflyVW5U8Mb5f+tWvLVgDYF/aZUmAQAAAAD/////AUjmBSoBAAAAIlEgAw2k/OT32yjCyylRYx4ANxOFZZf+ljiCy1AOaBEsymMAAAAAAAEBKwDyBSoBAAAAIlEgwiR++/2SrEf29AuNQtFpF1oZ+p+hDkol1/NetN2FtpJjFcFQkpt0waBJVLeLS2A16XpeB4paDyjsltVHv+6azoA6wG99YgWelJehpKJnVp2YdtpgEBr/OONSm5uTnOf5GulwEV8uSQr3zEXE94UR82BXzlxaXFYyWin7RN/CA/NW4fgAIyAssTrGgkjegGqmo2Wc88A+toIdCcgRSk6Gj+vehlu20qzAAAA=", - "cHNidP8BAF4CAAAAAZvUh2UjC/mnLmYgAflyVW5U8Mb5f+tWvLVgDYF/aZUmAQAAAAD/////AUjmBSoBAAAAIlEgAw2k/OT32yjCyylRYx4ANxOFZZf+ljiCy1AOaBEsymMAAAAAAAEBKwDyBSoBAAAAIlEgwiR++/2SrEf29AuNQtFpF1oZ+p+hDkol1/NetN2FtpJhFcFQkpt0waBJVLeLS2A16XpeB4paDyjsltVHv+6azoA6wG99YgWelJehpKJnVp2YdtpgEBr/OONSm5uTnOf5GulwEV8uSQr3zEXE94UR82BXzlxaXFYyWin7RN/CA/NW4SMgLLE6xoJI3oBqpqNlnPPAPraCHQnIEUpOho/r3oZbttKswAAA" + "cHNidP8BAF4CAAAAAZvUh2UjC/mnLmYgAflyVW5U8Mb5f+tWvLVgDYF/aZUmAQAAAAD/////AUjmBSoBAAAAIlEgAw2k/OT32yjCyylRYx4ANxOFZZf+ljiCy1AOaBEsymMAAAAAAAEBKwDyBSoBAAAAIlEgwiR++/2SrEf29AuNQtFpF1oZ+p+hDkol1/NetN2FtpJhFcFQkpt0waBJVLeLS2A16XpeB4paDyjsltVHv+6azoA6wG99YgWelJehpKJnVp2YdtpgEBr/OONSm5uTnOf5GulwEV8uSQr3zEXE94UR82BXzlxaXFYyWin7RN/CA/NW4SMgLLE6xoJI3oBqpqNlnPPAPraCHQnIEUpOho/r3oZbttKswAAA", + "cHNidP8BAHUCAAAAAQCBcTce3/KF6Tet7qSze3gADAVmy7OtZGQXE8pCFxv2AAAAAAD+////AtPf9QUAAAAAGXapFNDFmQPFusKGh2DpD9UhpGZap2UgiKwA4fUFAAAAABepFDVF5uM7gyxHBQ8k0+65PJwDlIvHh7MuEwAAAQD9pQEBAAAAAAECiaPHHqtNIOA3G7ukzGmPopXJRjr6Ljl/hTPMti+VZ+UBAAAAFxYAFL4Y0VKpsBIDna89p95PUzSe7LmF/////4b4qkOnHf8USIk6UwpyN+9rRgi7st0tAXHmOuxqSJC0AQAAABcWABT+Pp7xp0XpdNkCxDVZQ6vLNL1TU/////8CAMLrCwAAAAAZdqkUhc/xCX/Z4Ai7NK9wnGIZeziXikiIrHL++E4sAAAAF6kUM5cluiHv1irHU6m80GfWx6ajnQWHAkcwRAIgJxK+IuAnDzlPVoMR3HyppolwuAJf3TskAinwf4pfOiQCIAGLONfc0xTnNMkna9b7QPZzMlvEuqFEyADS8vAtsnZcASED0uFWdJQbrUqZY3LLh+GFbTZSYG2YVi/jnF6efkE/IQUCSDBFAiEA0SuFLYXc2WHS9fSrZgZU327tzHlMDDPOXMMJ/7X85Y0CIGczio4OFyXBl/saiK9Z9R5E5CVbIBZ8hoQDHAXR8lkqASECI7cr7vCWXRC+B3jv7NYfysb3mk6haTkzgHNEZPhPKrMAAAAAAAAA", + "cHNidP8BAHUCAAAAASaBcTce3/KF6Tet7qSze3gADAVmy7OtZGQXE8pCFxv2AAAAAgD+////AtPf9QUAAAAAGXapFNDFmQPFusKGh2DpD9UhpGZap2UgiKwA4fUFAAAAABepFDVF5uM7gyxHBQ8k0+65PJwDlIvHh7MuEwAAAQD9pQEBAAAAAAECiaPHHqtNIOA3G7ukzGmPopXJRjr6Ljl/hTPMti+VZ+UBAAAAFxYAFL4Y0VKpsBIDna89p95PUzSe7LmF/////4b4qkOnHf8USIk6UwpyN+9rRgi7st0tAXHmOuxqSJC0AQAAABcWABT+Pp7xp0XpdNkCxDVZQ6vLNL1TU/////8CAMLrCwAAAAAZdqkUhc/xCX/Z4Ai7NK9wnGIZeziXikiIrHL++E4sAAAAF6kUM5cluiHv1irHU6m80GfWx6ajnQWHAkcwRAIgJxK+IuAnDzlPVoMR3HyppolwuAJf3TskAinwf4pfOiQCIAGLONfc0xTnNMkna9b7QPZzMlvEuqFEyADS8vAtsnZcASED0uFWdJQbrUqZY3LLh+GFbTZSYG2YVi/jnF6efkE/IQUCSDBFAiEA0SuFLYXc2WHS9fSrZgZU327tzHlMDDPOXMMJ/7X85Y0CIGczio4OFyXBl/saiK9Z9R5E5CVbIBZ8hoQDHAXR8lkqASECI7cr7vCWXRC+B3jv7NYfysb3mk6haTkzgHNEZPhPKrMAAAAAAAAA" ], "invalid_with_msg": [ [ diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py index 658eea0a0e..688e2866b2 100755 --- a/test/functional/feature_assumeutxo.py +++ b/test/functional/feature_assumeutxo.py @@ -21,7 +21,6 @@ Interesting test cases could be loading an assumeutxo snapshot file with: Interesting starting states could be loading a snapshot when the current chain tip is: - TODO: An ancestor of snapshot block -- TODO: Not an ancestor of the snapshot block but has less work - TODO: The snapshot block - TODO: A descendant of the snapshot block - TODO: Not an ancestor or a descendant of the snapshot block and has more work @@ -33,6 +32,7 @@ from dataclasses import dataclass from test_framework.messages import tx_from_hex from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( + assert_approx, assert_equal, assert_raises_rpc_error, ) @@ -51,18 +51,19 @@ class AssumeutxoTest(BitcoinTestFramework): def set_test_params(self): """Use the pregenerated, deterministic chain up to height 199.""" - self.num_nodes = 3 + self.num_nodes = 4 self.rpc_timeout = 120 self.extra_args = [ [], ["-fastprune", "-prune=1", "-blockfilterindex=1", "-coinstatsindex=1"], ["-persistmempool=0","-txindex=1", "-blockfilterindex=1", "-coinstatsindex=1"], + [] ] def setup_network(self): """Start with the nodes disconnected so that one can generate a snapshot including blocks the other hasn't yet seen.""" - self.add_nodes(3) + self.add_nodes(4) self.start_nodes(extra_args=self.extra_args) def test_invalid_snapshot_scenarios(self, valid_snapshot_path): @@ -70,23 +71,24 @@ class AssumeutxoTest(BitcoinTestFramework): with open(valid_snapshot_path, 'rb') as f: valid_snapshot_contents = f.read() bad_snapshot_path = valid_snapshot_path + '.mod' + node = self.nodes[1] def expected_error(log_msg="", rpc_details=""): - with self.nodes[1].assert_debug_log([log_msg]): - assert_raises_rpc_error(-32603, f"Unable to load UTXO snapshot{rpc_details}", self.nodes[1].loadtxoutset, bad_snapshot_path) + with node.assert_debug_log([log_msg]): + assert_raises_rpc_error(-32603, f"Unable to load UTXO snapshot{rpc_details}", node.loadtxoutset, bad_snapshot_path) self.log.info(" - snapshot file with invalid file magic") parsing_error_code = -22 bad_magic = 0xf00f00f000 with open(bad_snapshot_path, 'wb') as f: f.write(bad_magic.to_bytes(5, "big") + valid_snapshot_contents[5:]) - assert_raises_rpc_error(parsing_error_code, "Unable to parse metadata: Invalid UTXO set snapshot magic bytes. Please check if this is indeed a snapshot file or if you are using an outdated snapshot format.", self.nodes[1].loadtxoutset, bad_snapshot_path) + assert_raises_rpc_error(parsing_error_code, "Unable to parse metadata: Invalid UTXO set snapshot magic bytes. Please check if this is indeed a snapshot file or if you are using an outdated snapshot format.", node.loadtxoutset, bad_snapshot_path) self.log.info(" - snapshot file with unsupported version") for version in [0, 2]: with open(bad_snapshot_path, 'wb') as f: f.write(valid_snapshot_contents[:5] + version.to_bytes(2, "little") + valid_snapshot_contents[7:]) - assert_raises_rpc_error(parsing_error_code, f"Unable to parse metadata: Version of snapshot {version} does not match any of the supported versions.", self.nodes[1].loadtxoutset, bad_snapshot_path) + assert_raises_rpc_error(parsing_error_code, f"Unable to parse metadata: Version of snapshot {version} does not match any of the supported versions.", node.loadtxoutset, bad_snapshot_path) self.log.info(" - snapshot file with mismatching network magic") invalid_magics = [ @@ -101,9 +103,9 @@ class AssumeutxoTest(BitcoinTestFramework): with open(bad_snapshot_path, 'wb') as f: f.write(valid_snapshot_contents[:7] + magic.to_bytes(4, 'big') + valid_snapshot_contents[11:]) if real: - assert_raises_rpc_error(parsing_error_code, f"Unable to parse metadata: The network of the snapshot ({name}) does not match the network of this node (regtest).", self.nodes[1].loadtxoutset, bad_snapshot_path) + assert_raises_rpc_error(parsing_error_code, f"Unable to parse metadata: The network of the snapshot ({name}) does not match the network of this node (regtest).", node.loadtxoutset, bad_snapshot_path) else: - assert_raises_rpc_error(parsing_error_code, "Unable to parse metadata: This snapshot has been created for an unrecognized network. This could be a custom signet, a new testnet or possibly caused by data corruption.", self.nodes[1].loadtxoutset, bad_snapshot_path) + assert_raises_rpc_error(parsing_error_code, "Unable to parse metadata: This snapshot has been created for an unrecognized network. This could be a custom signet, a new testnet or possibly caused by data corruption.", node.loadtxoutset, bad_snapshot_path) self.log.info(" - snapshot file referring to a block that is not in the assumeutxo parameters") prev_block_hash = self.nodes[0].getblockhash(SNAPSHOT_BASE_HEIGHT - 1) @@ -114,8 +116,9 @@ class AssumeutxoTest(BitcoinTestFramework): for bad_block_hash in [bogus_block_hash, prev_block_hash]: with open(bad_snapshot_path, 'wb') as f: f.write(valid_snapshot_contents[:11] + bogus_height.to_bytes(4, "little") + bytes.fromhex(bad_block_hash)[::-1] + valid_snapshot_contents[47:]) - error_details = f", assumeutxo block hash in snapshot metadata not recognized (hash: {bad_block_hash}, height: {bogus_height}). The following snapshot heights are available: 110, 299." - expected_error(rpc_details=error_details) + + msg = f"Unable to load UTXO snapshot: assumeutxo block hash in snapshot metadata not recognized (hash: {bad_block_hash}, height: {bogus_height}). The following snapshot heights are available: 110, 200, 299." + assert_raises_rpc_error(-32603, msg, node.loadtxoutset, bad_snapshot_path) self.log.info(" - snapshot file with wrong number of coins") valid_num_coins = int.from_bytes(valid_snapshot_contents[47:47 + 8], "little") @@ -151,9 +154,8 @@ class AssumeutxoTest(BitcoinTestFramework): def test_headers_not_synced(self, valid_snapshot_path): for node in self.nodes[1:]: - assert_raises_rpc_error(-32603, "The base block header (3bb7ce5eba0be48939b7a521ac1ba9316afee2c7bada3a0cca24188e6d7d96c0) must appear in the headers chain. Make sure all headers are syncing, and call this RPC again.", - node.loadtxoutset, - valid_snapshot_path) + msg = "Unable to load UTXO snapshot: The base block header (3bb7ce5eba0be48939b7a521ac1ba9316afee2c7bada3a0cca24188e6d7d96c0) must appear in the headers chain. Make sure all headers are syncing, and call loadtxoutset again." + assert_raises_rpc_error(-32603, msg, node.loadtxoutset, valid_snapshot_path) def test_invalid_chainstate_scenarios(self): self.log.info("Test different scenarios of invalid snapshot chainstate in datadir") @@ -185,8 +187,8 @@ class AssumeutxoTest(BitcoinTestFramework): assert tx['txid'] in node.getrawmempool() # Attempt to load the snapshot on Node 2 and expect it to fail - with node.assert_debug_log(expected_msgs=["[snapshot] can't activate a snapshot when mempool not empty"]): - assert_raises_rpc_error(-32603, "Unable to load UTXO snapshot", node.loadtxoutset, dump_output_path) + msg = "Unable to load UTXO snapshot: Can't activate a snapshot when mempool not empty" + assert_raises_rpc_error(-32603, msg, node.loadtxoutset, dump_output_path) self.restart_node(2, extra_args=self.extra_args[2]) @@ -202,7 +204,42 @@ class AssumeutxoTest(BitcoinTestFramework): assert_equal(node.getblockcount(), FINAL_HEIGHT) with node.assert_debug_log(expected_msgs=["[snapshot] activation failed - work does not exceed active chainstate"]): assert_raises_rpc_error(-32603, "Unable to load UTXO snapshot", node.loadtxoutset, dump_output_path) - self.restart_node(0, extra_args=self.extra_args[0]) + + def test_snapshot_block_invalidated(self, dump_output_path): + self.log.info("Test snapshot is not loaded when base block is invalid.") + node = self.nodes[0] + # We are testing the case where the base block is invalidated itself + # and also the case where one of its parents is invalidated. + for height in [SNAPSHOT_BASE_HEIGHT, SNAPSHOT_BASE_HEIGHT - 1]: + block_hash = node.getblockhash(height) + node.invalidateblock(block_hash) + assert_equal(node.getblockcount(), height - 1) + msg = "Unable to load UTXO snapshot: The base block header (3bb7ce5eba0be48939b7a521ac1ba9316afee2c7bada3a0cca24188e6d7d96c0) is part of an invalid chain." + assert_raises_rpc_error(-32603, msg, node.loadtxoutset, dump_output_path) + node.reconsiderblock(block_hash) + + def test_snapshot_in_a_divergent_chain(self, dump_output_path): + n0 = self.nodes[0] + n3 = self.nodes[3] + assert_equal(n0.getblockcount(), FINAL_HEIGHT) + assert_equal(n3.getblockcount(), START_HEIGHT) + + self.log.info("Check importing a snapshot where current chain-tip is not an ancestor of the snapshot block but has less work") + # Generate a divergent chain in n3 up to 298 + self.generate(n3, nblocks=99, sync_fun=self.no_op) + assert_equal(n3.getblockcount(), SNAPSHOT_BASE_HEIGHT - 1) + + # Try importing the snapshot and assert its success + loaded = n3.loadtxoutset(dump_output_path) + assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) + normal, snapshot = n3.getchainstates()["chainstates"] + assert_equal(normal['blocks'], START_HEIGHT + 99) + assert_equal(snapshot['blocks'], SNAPSHOT_BASE_HEIGHT) + + # Now lets sync the nodes and wait for the background validation to finish + self.connect_nodes(0, 3) + self.sync_blocks(nodes=(n0, n3)) + self.wait_until(lambda: len(n3.getchainstates()['chainstates']) == 1) def run_test(self): """ @@ -215,6 +252,7 @@ class AssumeutxoTest(BitcoinTestFramework): n0 = self.nodes[0] n1 = self.nodes[1] n2 = self.nodes[2] + n3 = self.nodes[3] self.mini_wallet = MiniWallet(n0) @@ -265,6 +303,7 @@ class AssumeutxoTest(BitcoinTestFramework): # block. n1.submitheader(block) n2.submitheader(block) + n3.submitheader(block) # Ensure everyone is seeing the same headers. for n in self.nodes: @@ -290,6 +329,7 @@ class AssumeutxoTest(BitcoinTestFramework): self.test_invalid_snapshot_scenarios(dump_output['path']) self.test_invalid_chainstate_scenarios() self.test_invalid_file_path() + self.test_snapshot_block_invalidated(dump_output['path']) self.log.info(f"Loading snapshot into second node from {dump_output['path']}") loaded = n1.loadtxoutset(dump_output['path']) @@ -301,21 +341,35 @@ class AssumeutxoTest(BitcoinTestFramework): the snapshot, and final values after the snapshot is validated.""" for height, block in blocks.items(): tx = n1.getblockheader(block.hash)["nTx"] - chain_tx = n1.getchaintxstats(nblocks=1, blockhash=block.hash)["txcount"] + stats = n1.getchaintxstats(nblocks=1, blockhash=block.hash) + chain_tx = stats.get("txcount", None) + window_tx_count = stats.get("window_tx_count", None) + tx_rate = stats.get("txrate", None) + window_interval = stats.get("window_interval") # Intermediate nTx of the starting block should be set, but nTx of # later blocks should be 0 before they are downloaded. + # The window_tx_count of one block is equal to the blocks tx count. + # If the window tx count is unknown, the value is missing. + # The tx_rate is calculated from window_tx_count and window_interval + # when possible. if final or height == START_HEIGHT: assert_equal(tx, block.tx) + assert_equal(window_tx_count, tx) + if window_interval > 0: + assert_approx(tx_rate, window_tx_count / window_interval, vspan=0.1) + else: + assert_equal(tx_rate, None) else: assert_equal(tx, 0) + assert_equal(window_tx_count, None) # Intermediate nChainTx of the starting block and snapshot block - # should be set, but others should be 0 until they are downloaded. + # should be set, but others should be None until they are downloaded. if final or height in (START_HEIGHT, SNAPSHOT_BASE_HEIGHT): assert_equal(chain_tx, block.chain_tx) else: - assert_equal(chain_tx, 0) + assert_equal(chain_tx, None) check_tx_counts(final=False) @@ -450,12 +504,12 @@ class AssumeutxoTest(BitcoinTestFramework): assert_equal(snapshot['validated'], False) self.log.info("Check that loading the snapshot again will fail because there is already an active snapshot.") - with n2.assert_debug_log(expected_msgs=["[snapshot] can't activate a snapshot-based chainstate more than once"]): - assert_raises_rpc_error(-32603, "Unable to load UTXO snapshot", n2.loadtxoutset, dump_output['path']) + msg = "Unable to load UTXO snapshot: Can't activate a snapshot-based chainstate more than once" + assert_raises_rpc_error(-32603, msg, n2.loadtxoutset, dump_output['path']) self.connect_nodes(0, 2) self.wait_until(lambda: n2.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT) - self.sync_blocks() + self.sync_blocks(nodes=(n0, n2)) self.log.info("Ensuring background validation completes") self.wait_until(lambda: len(n2.getchainstates()['chainstates']) == 1) @@ -492,6 +546,8 @@ class AssumeutxoTest(BitcoinTestFramework): self.connect_nodes(0, 2) self.wait_until(lambda: n2.getblockcount() == FINAL_HEIGHT) + self.test_snapshot_in_a_divergent_chain(dump_output['path']) + @dataclass class Block: hash: str diff --git a/test/functional/feature_bind_extra.py b/test/functional/feature_bind_extra.py index 5cd031f852..ed2328b76f 100755 --- a/test/functional/feature_bind_extra.py +++ b/test/functional/feature_bind_extra.py @@ -27,7 +27,7 @@ class BindExtraTest(BitcoinTestFramework): # Avoid any -bind= on the command line. Force the framework to avoid # adding -bind=127.0.0.1. self.bind_to_localhost_only = False - self.num_nodes = 2 + self.num_nodes = 3 def skip_test_if_missing_module(self): # Due to OS-specific network stats queries, we only run on Linux. @@ -60,14 +60,21 @@ class BindExtraTest(BitcoinTestFramework): ) port += 2 + # Node2, no -bind=...=onion, thus no extra port for Tor target. + self.expected.append( + [ + [f"-bind=127.0.0.1:{port}"], + [(loopback_ipv4, port)] + ], + ) + port += 1 + self.extra_args = list(map(lambda e: e[0], self.expected)) - self.add_nodes(self.num_nodes, self.extra_args) - # Don't start the nodes, as some of them would collide trying to bind on the same port. + self.setup_nodes() def run_test(self): - for i in range(len(self.expected)): - self.log.info(f"Starting node {i} with {self.expected[i][0]}") - self.start_node(i) + for i, (args, expected_services) in enumerate(self.expected): + self.log.info(f"Checking listening ports of node {i} with {args}") pid = self.nodes[i].process.pid binds = set(get_bind_addrs(pid)) # Remove IPv6 addresses because on some CI environments "::1" is not configured @@ -78,9 +85,7 @@ class BindExtraTest(BitcoinTestFramework): binds = set(filter(lambda e: len(e[0]) != ipv6_addr_len_bytes, binds)) # Remove RPC ports. They are not relevant for this test. binds = set(filter(lambda e: e[1] != rpc_port(i), binds)) - assert_equal(binds, set(self.expected[i][1])) - self.stop_node(i) - self.log.info(f"Stopped node {i}") + assert_equal(binds, set(expected_services)) if __name__ == '__main__': BindExtraTest().main() diff --git a/test/functional/feature_coinstatsindex.py b/test/functional/feature_coinstatsindex.py index d6c1567e64..691163d053 100755 --- a/test/functional/feature_coinstatsindex.py +++ b/test/functional/feature_coinstatsindex.py @@ -242,6 +242,9 @@ class CoinStatsIndexTest(BitcoinTestFramework): res12 = index_node.gettxoutsetinfo('muhash') assert_equal(res12, res10) + self.log.info("Test obtaining info for a non-existent block hash") + assert_raises_rpc_error(-5, "Block not found", index_node.gettxoutsetinfo, hash_type="none", hash_or_height="ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", use_index=True) + def _test_use_index_option(self): self.log.info("Test use_index option for nodes running the index") diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index 4b548ef0f3..5f99b8dee8 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -25,6 +25,7 @@ from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, + try_rpc, ) # Rescans start at the earliest block up to 2 hours before a key timestamp, so @@ -479,8 +480,12 @@ class PruneTest(BitcoinTestFramework): self.log.info("Test invalid pruning command line options") self.test_invalid_command_line_options() + self.log.info("Test scanblocks can not return pruned data") self.test_scanblocks_pruned() + self.log.info("Test pruneheight reflects the presence of block and undo data") + self.test_pruneheight_undo_presence() + self.log.info("Done") def test_scanblocks_pruned(self): @@ -494,5 +499,18 @@ class PruneTest(BitcoinTestFramework): assert_raises_rpc_error(-1, "Block not available (pruned data)", node.scanblocks, "start", [{"desc": f"raw({false_positive_spk.hex()})"}], 0, 0, "basic", {"filter_false_positives": True}) + def test_pruneheight_undo_presence(self): + node = self.nodes[2] + pruneheight = node.getblockchaininfo()["pruneheight"] + fetch_block = node.getblockhash(pruneheight - 1) + + self.connect_nodes(1, 2) + peers = node.getpeerinfo() + node.getblockfrompeer(fetch_block, peers[0]["id"]) + self.wait_until(lambda: not try_rpc(-1, "Block not available (pruned data)", node.getblock, fetch_block), timeout=5) + + new_pruneheight = node.getblockchaininfo()["pruneheight"] + assert_equal(pruneheight, new_pruneheight) + if __name__ == '__main__': PruneTest().main() diff --git a/test/functional/mempool_package_rbf.py b/test/functional/mempool_package_rbf.py index ceb9530394..e9658aa8d0 100755 --- a/test/functional/mempool_package_rbf.py +++ b/test/functional/mempool_package_rbf.py @@ -168,11 +168,20 @@ class PackageRBFTest(BitcoinTestFramework): self.assert_mempool_contents(expected=package_txns1) self.log.info("Check replacement pays for incremental bandwidth") - package_hex3, package_txns3 = self.create_simple_package(coin, parent_fee=DEFAULT_FEE, child_fee=DEFAULT_CHILD_FEE) - pkg_results3 = node.submitpackage(package_hex3) - assert_equal(f"package RBF failed: insufficient anti-DoS fees, rejecting replacement {package_txns3[1].rehash()}, not enough additional fees to relay; 0.00 < 0.00000{sum([tx.get_vsize() for tx in package_txns3])}", pkg_results3["package_msg"]) - + _, placeholder_txns3 = self.create_simple_package(coin) + package_3_size = sum([tx.get_vsize() for tx in placeholder_txns3]) + incremental_sats_required = Decimal(package_3_size) / COIN + incremental_sats_short = incremental_sats_required - Decimal("0.00000001") + # Recreate the package with slightly higher fee once we know the size of the new package, but still short of required fee + failure_package_hex3, failure_package_txns3 = self.create_simple_package(coin, parent_fee=DEFAULT_FEE, child_fee=DEFAULT_CHILD_FEE + incremental_sats_short) + assert_equal(package_3_size, sum([tx.get_vsize() for tx in failure_package_txns3])) + pkg_results3 = node.submitpackage(failure_package_hex3) + assert_equal(f"package RBF failed: insufficient anti-DoS fees, rejecting replacement {failure_package_txns3[1].rehash()}, not enough additional fees to relay; {incremental_sats_short} < {incremental_sats_required}", pkg_results3["package_msg"]) self.assert_mempool_contents(expected=package_txns1) + + success_package_hex3, success_package_txns3 = self.create_simple_package(coin, parent_fee=DEFAULT_FEE, child_fee=DEFAULT_CHILD_FEE + incremental_sats_required) + node.submitpackage(success_package_hex3) + self.assert_mempool_contents(expected=success_package_txns3) self.generate(node, 1) self.log.info("Check Package RBF must have strict cpfp structure") @@ -180,11 +189,14 @@ class PackageRBFTest(BitcoinTestFramework): package_hex4, package_txns4 = self.create_simple_package(coin, parent_fee=DEFAULT_FEE, child_fee=DEFAULT_CHILD_FEE) node.submitpackage(package_hex4) self.assert_mempool_contents(expected=package_txns4) - package_hex5, package_txns5 = self.create_simple_package(coin, parent_fee=DEFAULT_CHILD_FEE, child_fee=DEFAULT_CHILD_FEE - Decimal("0.00000001")) + package_hex5, package_txns5 = self.create_simple_package(coin, parent_fee=DEFAULT_CHILD_FEE, child_fee=DEFAULT_CHILD_FEE) pkg_results5 = node.submitpackage(package_hex5) - assert 'package RBF failed: package feerate is less than parent feerate' in pkg_results5["package_msg"] - + assert 'package RBF failed: package feerate is less than or equal to parent feerate' in pkg_results5["package_msg"] self.assert_mempool_contents(expected=package_txns4) + + package_hex5_1, package_txns5_1 = self.create_simple_package(coin, parent_fee=DEFAULT_CHILD_FEE, child_fee=DEFAULT_CHILD_FEE + Decimal("0.00000001")) + node.submitpackage(package_hex5_1) + self.assert_mempool_contents(expected=package_txns5_1) self.generate(node, 1) def test_package_rbf_max_conflicts(self): diff --git a/test/functional/mempool_accept_v3.py b/test/functional/mempool_truc.py index d4a33c232e..e1f3d77201 100755 --- a/test/functional/mempool_accept_v3.py +++ b/test/functional/mempool_truc.py @@ -22,7 +22,7 @@ from test_framework.wallet import ( ) MAX_REPLACEMENT_CANDIDATES = 100 -V3_MAX_VSIZE = 10000 +TRUC_MAX_VSIZE = 10000 def cleanup(extra_args=None): def decorator(func): @@ -39,7 +39,7 @@ def cleanup(extra_args=None): return wrapper return decorator -class MempoolAcceptV3(BitcoinTestFramework): +class MempoolTRUC(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [[]] @@ -52,23 +52,23 @@ class MempoolAcceptV3(BitcoinTestFramework): assert all([txid in txids for txid in mempool_contents]) @cleanup(extra_args=["-datacarriersize=20000"]) - def test_v3_max_vsize(self): + def test_truc_max_vsize(self): node = self.nodes[0] - self.log.info("Test v3-specific maximum transaction vsize") - tx_v3_heavy = self.wallet.create_self_transfer(target_weight=(V3_MAX_VSIZE + 1) * WITNESS_SCALE_FACTOR, version=3) - assert_greater_than_or_equal(tx_v3_heavy["tx"].get_vsize(), V3_MAX_VSIZE) - expected_error_heavy = f"v3-rule-violation, v3 tx {tx_v3_heavy['txid']} (wtxid={tx_v3_heavy['wtxid']}) is too big" + self.log.info("Test TRUC-specific maximum transaction vsize") + tx_v3_heavy = self.wallet.create_self_transfer(target_weight=(TRUC_MAX_VSIZE + 1) * WITNESS_SCALE_FACTOR, version=3) + assert_greater_than_or_equal(tx_v3_heavy["tx"].get_vsize(), TRUC_MAX_VSIZE) + expected_error_heavy = f"TRUC-violation, version=3 tx {tx_v3_heavy['txid']} (wtxid={tx_v3_heavy['wtxid']}) is too big" assert_raises_rpc_error(-26, expected_error_heavy, node.sendrawtransaction, tx_v3_heavy["hex"]) self.check_mempool([]) - # Ensure we are hitting the v3-specific limit and not something else - tx_v2_heavy = self.wallet.send_self_transfer(from_node=node, target_weight=(V3_MAX_VSIZE + 1) * WITNESS_SCALE_FACTOR, version=2) + # Ensure we are hitting the TRUC-specific limit and not something else + tx_v2_heavy = self.wallet.send_self_transfer(from_node=node, target_weight=(TRUC_MAX_VSIZE + 1) * WITNESS_SCALE_FACTOR, version=2) self.check_mempool([tx_v2_heavy["txid"]]) @cleanup(extra_args=["-datacarriersize=1000"]) - def test_v3_acceptance(self): + def test_truc_acceptance(self): node = self.nodes[0] - self.log.info("Test a child of a v3 transaction cannot be more than 1000vB") + self.log.info("Test a child of a TRUC transaction cannot be more than 1000vB") tx_v3_parent_normal = self.wallet.send_self_transfer(from_node=node, version=3) self.check_mempool([tx_v3_parent_normal["txid"]]) tx_v3_child_heavy = self.wallet.create_self_transfer( @@ -77,13 +77,13 @@ class MempoolAcceptV3(BitcoinTestFramework): version=3 ) assert_greater_than_or_equal(tx_v3_child_heavy["tx"].get_vsize(), 1000) - expected_error_child_heavy = f"v3-rule-violation, v3 child tx {tx_v3_child_heavy['txid']} (wtxid={tx_v3_child_heavy['wtxid']}) is too big" + expected_error_child_heavy = f"TRUC-violation, version=3 child tx {tx_v3_child_heavy['txid']} (wtxid={tx_v3_child_heavy['wtxid']}) is too big" assert_raises_rpc_error(-26, expected_error_child_heavy, node.sendrawtransaction, tx_v3_child_heavy["hex"]) self.check_mempool([tx_v3_parent_normal["txid"]]) # tx has no descendants assert_equal(node.getmempoolentry(tx_v3_parent_normal["txid"])["descendantcount"], 1) - self.log.info("Test that, during replacements, only the new transaction counts for v3 descendant limit") + self.log.info("Test that, during replacements, only the new transaction counts for TRUC descendant limit") tx_v3_child_almost_heavy = self.wallet.send_self_transfer( from_node=node, fee_rate=DEFAULT_FEE, @@ -106,9 +106,9 @@ class MempoolAcceptV3(BitcoinTestFramework): assert_equal(node.getmempoolentry(tx_v3_parent_normal["txid"])["descendantcount"], 2) @cleanup(extra_args=None) - def test_v3_replacement(self): + def test_truc_replacement(self): node = self.nodes[0] - self.log.info("Test v3 transactions may be replaced by v3 transactions") + self.log.info("Test TRUC transactions may be replaced by TRUC transactions") utxo_v3_bip125 = self.wallet.get_utxo() tx_v3_bip125 = self.wallet.send_self_transfer( from_node=node, @@ -127,7 +127,7 @@ class MempoolAcceptV3(BitcoinTestFramework): ) self.check_mempool([tx_v3_bip125_rbf["txid"]]) - self.log.info("Test v3 transactions may be replaced by V2 transactions") + self.log.info("Test TRUC transactions may be replaced by non-TRUC (BIP125) transactions") tx_v3_bip125_rbf_v2 = self.wallet.send_self_transfer( from_node=node, fee_rate=DEFAULT_FEE * 3, @@ -136,7 +136,7 @@ class MempoolAcceptV3(BitcoinTestFramework): ) self.check_mempool([tx_v3_bip125_rbf_v2["txid"]]) - self.log.info("Test that replacements cannot cause violation of inherited v3") + self.log.info("Test that replacements cannot cause violation of inherited TRUC") utxo_v3_parent = self.wallet.get_utxo() tx_v3_parent = self.wallet.send_self_transfer( from_node=node, @@ -157,15 +157,15 @@ class MempoolAcceptV3(BitcoinTestFramework): utxo_to_spend=tx_v3_parent["new_utxo"], version=2 ) - expected_error_v2_v3 = f"v3-rule-violation, non-v3 tx {tx_v3_child_rbf_v2['txid']} (wtxid={tx_v3_child_rbf_v2['wtxid']}) cannot spend from v3 tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']})" + expected_error_v2_v3 = f"TRUC-violation, non-version=3 tx {tx_v3_child_rbf_v2['txid']} (wtxid={tx_v3_child_rbf_v2['wtxid']}) cannot spend from version=3 tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']})" assert_raises_rpc_error(-26, expected_error_v2_v3, node.sendrawtransaction, tx_v3_child_rbf_v2["hex"]) self.check_mempool([tx_v3_bip125_rbf_v2["txid"], tx_v3_parent["txid"], tx_v3_child["txid"]]) @cleanup(extra_args=None) - def test_v3_bip125(self): + def test_truc_bip125(self): node = self.nodes[0] - self.log.info("Test v3 transactions that don't signal BIP125 are replaceable") + self.log.info("Test TRUC transactions that don't signal BIP125 are replaceable") assert_equal(node.getmempoolinfo()["fullrbf"], False) utxo_v3_no_bip125 = self.wallet.get_utxo() tx_v3_no_bip125 = self.wallet.send_self_transfer( @@ -187,9 +187,9 @@ class MempoolAcceptV3(BitcoinTestFramework): self.check_mempool([tx_v3_no_bip125_rbf["txid"]]) @cleanup(extra_args=["-datacarriersize=40000"]) - def test_v3_reorg(self): + def test_truc_reorg(self): node = self.nodes[0] - self.log.info("Test that, during a reorg, v3 rules are not enforced") + self.log.info("Test that, during a reorg, TRUC rules are not enforced") tx_v2_block = self.wallet.send_self_transfer(from_node=node, version=2) tx_v3_block = self.wallet.send_self_transfer(from_node=node, version=3) tx_v3_block2 = self.wallet.send_self_transfer(from_node=node, version=3) @@ -211,12 +211,12 @@ class MempoolAcceptV3(BitcoinTestFramework): @cleanup(extra_args=["-limitdescendantsize=10", "-datacarriersize=40000"]) def test_nondefault_package_limits(self): """ - Max standard tx size + v3 rules imply the ancestor/descendant rules (at their default + Max standard tx size + TRUC rules imply the ancestor/descendant rules (at their default values), but those checks must not be skipped. Ensure both sets of checks are done by changing the ancestor/descendant limit configurations. """ node = self.nodes[0] - self.log.info("Test that a decreased limitdescendantsize also applies to v3 child") + self.log.info("Test that a decreased limitdescendantsize also applies to TRUC child") parent_target_weight = 9990 * WITNESS_SCALE_FACTOR child_target_weight = 500 * WITNESS_SCALE_FACTOR tx_v3_parent_large1 = self.wallet.send_self_transfer( @@ -231,7 +231,7 @@ class MempoolAcceptV3(BitcoinTestFramework): ) # Parent and child are within v3 limits, but parent's 10kvB descendant limit is exceeded - assert_greater_than_or_equal(V3_MAX_VSIZE, tx_v3_parent_large1["tx"].get_vsize()) + assert_greater_than_or_equal(TRUC_MAX_VSIZE, tx_v3_parent_large1["tx"].get_vsize()) assert_greater_than_or_equal(1000, tx_v3_child_large1["tx"].get_vsize()) assert_greater_than(tx_v3_parent_large1["tx"].get_vsize() + tx_v3_child_large1["tx"].get_vsize(), 10000) @@ -253,8 +253,8 @@ class MempoolAcceptV3(BitcoinTestFramework): version=3 ) - # Parent and child are within v3 limits - assert_greater_than_or_equal(V3_MAX_VSIZE, tx_v3_parent_large2["tx"].get_vsize()) + # Parent and child are within TRUC limits + assert_greater_than_or_equal(TRUC_MAX_VSIZE, tx_v3_parent_large2["tx"].get_vsize()) assert_greater_than_or_equal(1000, tx_v3_child_large2["tx"].get_vsize()) assert_greater_than(tx_v3_parent_large2["tx"].get_vsize() + tx_v3_child_large2["tx"].get_vsize(), 10000) @@ -262,8 +262,8 @@ class MempoolAcceptV3(BitcoinTestFramework): self.check_mempool([tx_v3_parent_large2["txid"]]) @cleanup(extra_args=["-datacarriersize=1000"]) - def test_v3_ancestors_package(self): - self.log.info("Test that v3 ancestor limits are checked within the package") + def test_truc_ancestors_package(self): + self.log.info("Test that TRUC ancestor limits are checked within the package") node = self.nodes[0] tx_v3_parent_normal = self.wallet.create_self_transfer( fee_rate=0, @@ -289,34 +289,34 @@ class MempoolAcceptV3(BitcoinTestFramework): self.check_mempool([]) result = node.submitpackage([tx_v3_parent_normal["hex"], tx_v3_parent_2_normal["hex"], tx_v3_child_multiparent["hex"]]) - assert_equal(result['package_msg'], f"v3-violation, tx {tx_v3_child_multiparent['txid']} (wtxid={tx_v3_child_multiparent['wtxid']}) would have too many ancestors") + assert_equal(result['package_msg'], f"TRUC-violation, tx {tx_v3_child_multiparent['txid']} (wtxid={tx_v3_child_multiparent['wtxid']}) would have too many ancestors") self.check_mempool([]) self.check_mempool([]) result = node.submitpackage([tx_v3_parent_normal["hex"], tx_v3_child_heavy["hex"]]) # tx_v3_child_heavy is heavy based on weight, not sigops. - assert_equal(result['package_msg'], f"v3-violation, v3 child tx {tx_v3_child_heavy['txid']} (wtxid={tx_v3_child_heavy['wtxid']}) is too big: {tx_v3_child_heavy['tx'].get_vsize()} > 1000 virtual bytes") + assert_equal(result['package_msg'], f"TRUC-violation, version=3 child tx {tx_v3_child_heavy['txid']} (wtxid={tx_v3_child_heavy['wtxid']}) is too big: {tx_v3_child_heavy['tx'].get_vsize()} > 1000 virtual bytes") self.check_mempool([]) tx_v3_parent = self.wallet.create_self_transfer(version=3) tx_v3_child = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent["new_utxo"], version=3) tx_v3_grandchild = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_child["new_utxo"], version=3) result = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child["hex"], tx_v3_grandchild["hex"]]) - assert all([txresult["package-error"] == f"v3-violation, tx {tx_v3_grandchild['txid']} (wtxid={tx_v3_grandchild['wtxid']}) would have too many ancestors" for txresult in result]) + assert all([txresult["package-error"] == f"TRUC-violation, tx {tx_v3_grandchild['txid']} (wtxid={tx_v3_grandchild['wtxid']}) would have too many ancestors" for txresult in result]) @cleanup(extra_args=None) - def test_v3_ancestors_package_and_mempool(self): + def test_truc_ancestors_package_and_mempool(self): """ - A v3 transaction in a package cannot have 2 v3 parents. + A TRUC transaction in a package cannot have 2 TRUC parents. Test that if we have a transaction graph A -> B -> C, where A, B, C are - all v3 transactions, that we cannot use submitpackage to get the + all TRUC transactions, that we cannot use submitpackage to get the transactions all into the mempool. Verify, in particular, that if A is already in the mempool, then submitpackage(B, C) will fail. """ node = self.nodes[0] - self.log.info("Test that v3 ancestor limits include transactions within the package and all in-mempool ancestors") + self.log.info("Test that TRUC ancestor limits include transactions within the package and all in-mempool ancestors") # This is our transaction "A": tx_in_mempool = self.wallet.send_self_transfer(from_node=node, version=3) @@ -331,7 +331,7 @@ class MempoolAcceptV3(BitcoinTestFramework): # submitpackage(B, C) should fail result = node.submitpackage([tx_0fee_parent["hex"], tx_child_violator["hex"]]) - assert_equal(result['package_msg'], f"v3-violation, tx {tx_child_violator['txid']} (wtxid={tx_child_violator['wtxid']}) would have too many ancestors") + assert_equal(result['package_msg'], f"TRUC-violation, tx {tx_child_violator['txid']} (wtxid={tx_child_violator['wtxid']}) would have too many ancestors") self.check_mempool([tx_in_mempool["txid"]]) @cleanup(extra_args=None) @@ -341,7 +341,7 @@ class MempoolAcceptV3(BitcoinTestFramework): However, this option is only available in single transaction acceptance. It doesn't work in a multi-testmempoolaccept (where RBF is disabled) or when doing package CPFP. """ - self.log.info("Test v3 sibling eviction in submitpackage and multi-testmempoolaccept") + self.log.info("Test TRUC sibling eviction in submitpackage and multi-testmempoolaccept") node = self.nodes[0] # Add a parent + child to mempool tx_mempool_parent = self.wallet.send_self_transfer_multi( @@ -384,17 +384,17 @@ class MempoolAcceptV3(BitcoinTestFramework): # Fails with another non-related transaction via testmempoolaccept tx_unrelated = self.wallet.create_self_transfer(version=3) result_test_unrelated = node.testmempoolaccept([tx_sibling_1["hex"], tx_unrelated["hex"]]) - assert_equal(result_test_unrelated[0]["reject-reason"], "v3-rule-violation") + assert_equal(result_test_unrelated[0]["reject-reason"], "TRUC-violation") # Fails in a package via testmempoolaccept result_test_1p1c = node.testmempoolaccept([tx_sibling_1["hex"], tx_has_mempool_uncle["hex"]]) - assert_equal(result_test_1p1c[0]["reject-reason"], "v3-rule-violation") + assert_equal(result_test_1p1c[0]["reject-reason"], "TRUC-violation") # Allowed when tx is submitted in a package and evaluated individually. # Note that the child failed since it would be the 3rd generation. result_package_indiv = node.submitpackage([tx_sibling_1["hex"], tx_has_mempool_uncle["hex"]]) self.check_mempool([tx_mempool_parent["txid"], tx_sibling_1["txid"]]) - expected_error_gen3 = f"v3-rule-violation, tx {tx_has_mempool_uncle['txid']} (wtxid={tx_has_mempool_uncle['wtxid']}) would have too many ancestors" + expected_error_gen3 = f"TRUC-violation, tx {tx_has_mempool_uncle['txid']} (wtxid={tx_has_mempool_uncle['wtxid']}) would have too many ancestors" assert_equal(result_package_indiv["tx-results"][tx_has_mempool_uncle['wtxid']]['error'], expected_error_gen3) @@ -402,17 +402,17 @@ class MempoolAcceptV3(BitcoinTestFramework): node.submitpackage([tx_mempool_parent["hex"], tx_sibling_2["hex"]]) self.check_mempool([tx_mempool_parent["txid"], tx_sibling_2["txid"]]) - # Child cannot pay for sibling eviction for parent, as it violates v3 topology limits + # Child cannot pay for sibling eviction for parent, as it violates TRUC topology limits result_package_cpfp = node.submitpackage([tx_sibling_3["hex"], tx_bumps_parent_with_sibling["hex"]]) self.check_mempool([tx_mempool_parent["txid"], tx_sibling_2["txid"]]) - expected_error_cpfp = f"v3-rule-violation, tx {tx_mempool_parent['txid']} (wtxid={tx_mempool_parent['wtxid']}) would exceed descendant count limit" + expected_error_cpfp = f"TRUC-violation, tx {tx_mempool_parent['txid']} (wtxid={tx_mempool_parent['wtxid']}) would exceed descendant count limit" assert_equal(result_package_cpfp["tx-results"][tx_sibling_3['wtxid']]['error'], expected_error_cpfp) @cleanup(extra_args=["-datacarriersize=1000"]) - def test_v3_package_inheritance(self): - self.log.info("Test that v3 inheritance is checked within package") + def test_truc_package_inheritance(self): + self.log.info("Test that TRUC inheritance is checked within package") node = self.nodes[0] tx_v3_parent = self.wallet.create_self_transfer( fee_rate=0, @@ -426,14 +426,14 @@ class MempoolAcceptV3(BitcoinTestFramework): ) self.check_mempool([]) result = node.submitpackage([tx_v3_parent["hex"], tx_v2_child["hex"]]) - assert_equal(result['package_msg'], f"v3-violation, non-v3 tx {tx_v2_child['txid']} (wtxid={tx_v2_child['wtxid']}) cannot spend from v3 tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']})") + assert_equal(result['package_msg'], f"TRUC-violation, non-version=3 tx {tx_v2_child['txid']} (wtxid={tx_v2_child['wtxid']}) cannot spend from version=3 tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']})") self.check_mempool([]) @cleanup(extra_args=None) - def test_v3_in_testmempoolaccept(self): + def test_truc_in_testmempoolaccept(self): node = self.nodes[0] - self.log.info("Test that v3 inheritance is accurately assessed in testmempoolaccept") + self.log.info("Test that TRUC inheritance is accurately assessed in testmempoolaccept") tx_v2 = self.wallet.create_self_transfer(version=2) tx_v2_from_v2 = self.wallet.create_self_transfer(utxo_to_spend=tx_v2["new_utxo"], version=2) tx_v3_from_v2 = self.wallet.create_self_transfer(utxo_to_spend=tx_v2["new_utxo"], version=3) @@ -447,11 +447,11 @@ class MempoolAcceptV3(BitcoinTestFramework): assert all([result["allowed"] for result in test_accept_v2_and_v3]) test_accept_v3_from_v2 = node.testmempoolaccept([tx_v2["hex"], tx_v3_from_v2["hex"]]) - expected_error_v3_from_v2 = f"v3-violation, v3 tx {tx_v3_from_v2['txid']} (wtxid={tx_v3_from_v2['wtxid']}) cannot spend from non-v3 tx {tx_v2['txid']} (wtxid={tx_v2['wtxid']})" + expected_error_v3_from_v2 = f"TRUC-violation, version=3 tx {tx_v3_from_v2['txid']} (wtxid={tx_v3_from_v2['wtxid']}) cannot spend from non-version=3 tx {tx_v2['txid']} (wtxid={tx_v2['wtxid']})" assert all([result["package-error"] == expected_error_v3_from_v2 for result in test_accept_v3_from_v2]) test_accept_v2_from_v3 = node.testmempoolaccept([tx_v3["hex"], tx_v2_from_v3["hex"]]) - expected_error_v2_from_v3 = f"v3-violation, non-v3 tx {tx_v2_from_v3['txid']} (wtxid={tx_v2_from_v3['wtxid']}) cannot spend from v3 tx {tx_v3['txid']} (wtxid={tx_v3['wtxid']})" + expected_error_v2_from_v3 = f"TRUC-violation, non-version=3 tx {tx_v2_from_v3['txid']} (wtxid={tx_v2_from_v3['wtxid']}) cannot spend from version=3 tx {tx_v3['txid']} (wtxid={tx_v3['wtxid']})" assert all([result["package-error"] == expected_error_v2_from_v3 for result in test_accept_v2_from_v3]) test_accept_pairs = node.testmempoolaccept([tx_v2["hex"], tx_v3["hex"], tx_v2_from_v2["hex"], tx_v3_from_v3["hex"]]) @@ -463,16 +463,16 @@ class MempoolAcceptV3(BitcoinTestFramework): tx_v3_child_1 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent["new_utxos"][0], version=3) tx_v3_child_2 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent["new_utxos"][1], version=3) test_accept_2children = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child_1["hex"], tx_v3_child_2["hex"]]) - expected_error_2children = f"v3-violation, tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']}) would exceed descendant count limit" + expected_error_2children = f"TRUC-violation, tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']}) would exceed descendant count limit" assert all([result["package-error"] == expected_error_2children for result in test_accept_2children]) - # Extra v3 transaction does not get incorrectly marked as extra descendant + # Extra TRUC transaction does not get incorrectly marked as extra descendant test_accept_1child_with_exra = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child_1["hex"], tx_v3_independent["hex"]]) assert all([result["allowed"] for result in test_accept_1child_with_exra]) - # Extra v3 transaction does not make us ignore the extra descendant + # Extra TRUC transaction does not make us ignore the extra descendant test_accept_2children_with_exra = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child_1["hex"], tx_v3_child_2["hex"], tx_v3_independent["hex"]]) - expected_error_extra = f"v3-violation, tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']}) would exceed descendant count limit" + expected_error_extra = f"TRUC-violation, tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']}) would exceed descendant count limit" assert all([result["package-error"] == expected_error_extra for result in test_accept_2children_with_exra]) # Same result if the parent is already in mempool node.sendrawtransaction(tx_v3_parent["hex"]) @@ -482,7 +482,7 @@ class MempoolAcceptV3(BitcoinTestFramework): @cleanup(extra_args=None) def test_reorg_2child_rbf(self): node = self.nodes[0] - self.log.info("Test that children of a v3 transaction can be replaced individually, even if there are multiple due to reorg") + self.log.info("Test that children of a TRUC transaction can be replaced individually, even if there are multiple due to reorg") ancestor_tx = self.wallet.send_self_transfer_multi(from_node=node, num_outputs=2, version=3) self.check_mempool([ancestor_tx["txid"]]) @@ -511,8 +511,8 @@ class MempoolAcceptV3(BitcoinTestFramework): assert_equal(node.getmempoolentry(ancestor_tx["txid"])["descendantcount"], 3) @cleanup(extra_args=None) - def test_v3_sibling_eviction(self): - self.log.info("Test sibling eviction for v3") + def test_truc_sibling_eviction(self): + self.log.info("Test sibling eviction for TRUC") node = self.nodes[0] tx_v3_parent = self.wallet.send_self_transfer_multi(from_node=node, num_outputs=2, version=3) # This is the sibling to replace @@ -609,7 +609,7 @@ class MempoolAcceptV3(BitcoinTestFramework): utxo_to_spend=tx_with_multi_children["new_utxos"][2], fee_rate=DEFAULT_FEE*50 ) - expected_error_2siblings = f"v3-rule-violation, tx {tx_with_multi_children['txid']} (wtxid={tx_with_multi_children['wtxid']}) would exceed descendant count limit" + expected_error_2siblings = f"TRUC-violation, tx {tx_with_multi_children['txid']} (wtxid={tx_with_multi_children['wtxid']}) would exceed descendant count limit" assert_raises_rpc_error(-26, expected_error_2siblings, node.sendrawtransaction, tx_with_sibling3["hex"]) # However, an RBF (with conflicting inputs) is possible even if the resulting cluster size exceeds 2 @@ -627,21 +627,21 @@ class MempoolAcceptV3(BitcoinTestFramework): node = self.nodes[0] self.wallet = MiniWallet(node) self.generate(self.wallet, 120) - self.test_v3_max_vsize() - self.test_v3_acceptance() - self.test_v3_replacement() - self.test_v3_bip125() - self.test_v3_reorg() + self.test_truc_max_vsize() + self.test_truc_acceptance() + self.test_truc_replacement() + self.test_truc_bip125() + self.test_truc_reorg() self.test_nondefault_package_limits() - self.test_v3_ancestors_package() - self.test_v3_ancestors_package_and_mempool() + self.test_truc_ancestors_package() + self.test_truc_ancestors_package_and_mempool() self.test_sibling_eviction_package() - self.test_v3_package_inheritance() - self.test_v3_in_testmempoolaccept() + self.test_truc_package_inheritance() + self.test_truc_in_testmempoolaccept() self.test_reorg_2child_rbf() - self.test_v3_sibling_eviction() + self.test_truc_sibling_eviction() self.test_reorg_sibling_eviction_1p2c() if __name__ == "__main__": - MempoolAcceptV3().main() + MempoolTRUC().main() diff --git a/test/functional/p2p_handshake.py b/test/functional/p2p_handshake.py index dd19fe9333..9536e74893 100755 --- a/test/functional/p2p_handshake.py +++ b/test/functional/p2p_handshake.py @@ -17,6 +17,7 @@ from test_framework.messages import ( NODE_WITNESS, ) from test_framework.p2p import P2PInterface +from test_framework.util import p2p_port # Desirable service flags for outbound non-pruned and pruned peers. Note that @@ -88,6 +89,12 @@ class P2PHandshakeTest(BitcoinTestFramework): with node.assert_debug_log([f"feeler connection completed"]): self.add_outbound_connection(node, "feeler", NODE_NONE, wait_for_disconnect=True) + self.log.info("Check that connecting to ourself leads to immediate disconnect") + with node.assert_debug_log(["connected to self", "disconnecting"]): + node_listen_addr = f"127.0.0.1:{p2p_port(0)}" + node.addconnection(node_listen_addr, "outbound-full-relay", self.options.v2transport) + self.wait_until(lambda: len(node.getpeerinfo()) == 0) + if __name__ == '__main__': P2PHandshakeTest().main() diff --git a/test/functional/p2p_v2_earlykeyresponse.py b/test/functional/p2p_v2_earlykeyresponse.py deleted file mode 100755 index 32d2e1148a..0000000000 --- a/test/functional/p2p_v2_earlykeyresponse.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -import random - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.crypto.ellswift import ellswift_create -from test_framework.p2p import P2PInterface -from test_framework.v2_p2p import EncryptedP2PState - - -class TestEncryptedP2PState(EncryptedP2PState): - """ Modify v2 P2P protocol functions for testing that "The responder waits until one byte is received which does - not match the 16 bytes consisting of the network magic followed by "version\x00\x00\x00\x00\x00"." (see BIP 324) - - - if `send_net_magic` is True, send first 4 bytes of ellswift (match network magic) else send remaining 60 bytes - - `can_data_be_received` is a variable used to assert if data is received on recvbuf. - - v2 TestNode shouldn't respond back if we send V1_PREFIX and data shouldn't be received on recvbuf. - This state is represented using `can_data_be_received` = False. - - v2 TestNode responds back when mismatch from V1_PREFIX happens and data can be received on recvbuf. - This state is represented using `can_data_be_received` = True. - """ - - def __init__(self): - super().__init__(initiating=True, net='regtest') - self.send_net_magic = True - self.can_data_be_received = False - - def initiate_v2_handshake(self, garbage_len=random.randrange(4096)): - """Initiator begins the v2 handshake by sending its ellswift bytes and garbage. - Here, the 64 bytes ellswift is assumed to have it's 4 bytes match network magic bytes. It is sent in 2 phases: - 1. when `send_network_magic` = True, send first 4 bytes of ellswift (matches network magic bytes) - 2. when `send_network_magic` = False, send remaining 60 bytes of ellswift - """ - if self.send_net_magic: - self.privkey_ours, self.ellswift_ours = ellswift_create() - self.sent_garbage = random.randbytes(garbage_len) - self.send_net_magic = False - return b"\xfa\xbf\xb5\xda" - else: - self.can_data_be_received = True - return self.ellswift_ours[4:] + self.sent_garbage - - -class PeerEarlyKey(P2PInterface): - """Custom implementation of P2PInterface which uses modified v2 P2P protocol functions for testing purposes.""" - def __init__(self): - super().__init__() - self.v2_state = None - self.connection_opened = False - - def connection_made(self, transport): - """64 bytes ellswift is sent in 2 parts during `initial_v2_handshake()`""" - self.v2_state = TestEncryptedP2PState() - super().connection_made(transport) - - def data_received(self, t): - # check that data can be received on recvbuf only when mismatch from V1_PREFIX happens (send_net_magic = False) - assert self.v2_state.can_data_be_received and not self.v2_state.send_net_magic - - def on_open(self): - self.connection_opened = True - -class P2PEarlyKey(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [["-v2transport=1", "-peertimeout=3"]] - - def run_test(self): - self.log.info('Sending ellswift bytes in parts to ensure that response from responder is received only when') - self.log.info('ellswift bytes have a mismatch from the 16 bytes(network magic followed by "version\\x00\\x00\\x00\\x00\\x00")') - node0 = self.nodes[0] - self.log.info('Sending first 4 bytes of ellswift which match network magic') - self.log.info('If a response is received, assertion failure would happen in our custom data_received() function') - # send happens in `initiate_v2_handshake()` in `connection_made()` - peer1 = node0.add_p2p_connection(PeerEarlyKey(), wait_for_verack=False, send_version=False, supports_v2_p2p=True, wait_for_v2_handshake=False) - self.wait_until(lambda: peer1.connection_opened) - self.log.info('Sending remaining ellswift and garbage which are different from V1_PREFIX. Since a response is') - self.log.info('expected now, our custom data_received() function wouldn\'t result in assertion failure') - ellswift_and_garbage_data = peer1.v2_state.initiate_v2_handshake() - peer1.send_raw_message(ellswift_and_garbage_data) - peer1.wait_for_disconnect(timeout=5) - self.log.info('successful disconnection when MITM happens in the key exchange phase') - - -if __name__ == '__main__': - P2PEarlyKey().main() diff --git a/test/functional/p2p_v2_misbehaving.py b/test/functional/p2p_v2_misbehaving.py new file mode 100755 index 0000000000..0789425bcb --- /dev/null +++ b/test/functional/p2p_v2_misbehaving.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +import random +import time +from enum import Enum + +from test_framework.messages import MAGIC_BYTES +from test_framework.p2p import P2PInterface +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import random_bitflip +from test_framework.v2_p2p import ( + EncryptedP2PState, + MAX_GARBAGE_LEN, +) + + +class TestType(Enum): + """ Scenarios to be tested: + + 1. EARLY_KEY_RESPONSE - The responder needs to wait until one byte is received which does not match the 16 bytes + consisting of network magic followed by "version\x00\x00\x00\x00\x00" before sending out its ellswift + garbage bytes + 2. EXCESS_GARBAGE - Disconnection happens when > MAX_GARBAGE_LEN bytes garbage is sent + 3. WRONG_GARBAGE_TERMINATOR - Disconnection happens when incorrect garbage terminator is sent + 4. WRONG_GARBAGE - Disconnection happens when garbage bytes that is sent is different from what the peer receives + 5. SEND_NO_AAD - Disconnection happens when AAD of first encrypted packet after the garbage terminator is not filled + 6. SEND_NON_EMPTY_VERSION_PACKET - non-empty version packet is simply ignored + """ + EARLY_KEY_RESPONSE = 0 + EXCESS_GARBAGE = 1 + WRONG_GARBAGE_TERMINATOR = 2 + WRONG_GARBAGE = 3 + SEND_NO_AAD = 4 + SEND_NON_EMPTY_VERSION_PACKET = 5 + + +class EarlyKeyResponseState(EncryptedP2PState): + """ Modify v2 P2P protocol functions for testing EARLY_KEY_RESPONSE scenario""" + def __init__(self, initiating, net): + super().__init__(initiating=initiating, net=net) + self.can_data_be_received = False # variable used to assert if data is received on recvbuf. + + def initiate_v2_handshake(self): + """Send ellswift and garbage bytes in 2 parts when TestType = (EARLY_KEY_RESPONSE)""" + self.generate_keypair_and_garbage() + return b"" + + +class ExcessGarbageState(EncryptedP2PState): + """Generate > MAX_GARBAGE_LEN garbage bytes""" + def generate_keypair_and_garbage(self): + garbage_len = MAX_GARBAGE_LEN + random.randrange(1, MAX_GARBAGE_LEN + 1) + return super().generate_keypair_and_garbage(garbage_len) + + +class WrongGarbageTerminatorState(EncryptedP2PState): + """Add option for sending wrong garbage terminator""" + def generate_keypair_and_garbage(self): + garbage_len = random.randrange(MAX_GARBAGE_LEN//2) + return super().generate_keypair_and_garbage(garbage_len) + + def complete_handshake(self, response): + length, handshake_bytes = super().complete_handshake(response) + # first 16 bytes returned by complete_handshake() is the garbage terminator + wrong_garbage_terminator = random_bitflip(handshake_bytes[:16]) + return length, wrong_garbage_terminator + handshake_bytes[16:] + + +class WrongGarbageState(EncryptedP2PState): + """Generate tampered garbage bytes""" + def generate_keypair_and_garbage(self): + garbage_len = random.randrange(1, MAX_GARBAGE_LEN) + ellswift_garbage_bytes = super().generate_keypair_and_garbage(garbage_len) + # assume that garbage bytes sent to TestNode were tampered with + return ellswift_garbage_bytes[:64] + random_bitflip(ellswift_garbage_bytes[64:]) + + +class NoAADState(EncryptedP2PState): + """Add option for not filling first encrypted packet after garbage terminator with AAD""" + def generate_keypair_and_garbage(self): + garbage_len = random.randrange(1, MAX_GARBAGE_LEN) + return super().generate_keypair_and_garbage(garbage_len) + + def complete_handshake(self, response): + self.sent_garbage = b'' # do not authenticate the garbage which is sent + return super().complete_handshake(response) + + +class NonEmptyVersionPacketState(EncryptedP2PState): + """"Add option for sending non-empty transport version packet.""" + def complete_handshake(self, response): + self.transport_version = random.randbytes(5) + return super().complete_handshake(response) + + +class MisbehavingV2Peer(P2PInterface): + """Custom implementation of P2PInterface which uses modified v2 P2P protocol functions for testing purposes.""" + def __init__(self, test_type): + super().__init__() + self.test_type = test_type + + def connection_made(self, transport): + if self.test_type == TestType.EARLY_KEY_RESPONSE: + self.v2_state = EarlyKeyResponseState(initiating=True, net='regtest') + elif self.test_type == TestType.EXCESS_GARBAGE: + self.v2_state = ExcessGarbageState(initiating=True, net='regtest') + elif self.test_type == TestType.WRONG_GARBAGE_TERMINATOR: + self.v2_state = WrongGarbageTerminatorState(initiating=True, net='regtest') + elif self.test_type == TestType.WRONG_GARBAGE: + self.v2_state = WrongGarbageState(initiating=True, net='regtest') + elif self.test_type == TestType.SEND_NO_AAD: + self.v2_state = NoAADState(initiating=True, net='regtest') + elif TestType.SEND_NON_EMPTY_VERSION_PACKET: + self.v2_state = NonEmptyVersionPacketState(initiating=True, net='regtest') + super().connection_made(transport) + + def data_received(self, t): + if self.test_type == TestType.EARLY_KEY_RESPONSE: + # check that data can be received on recvbuf only when mismatch from V1_PREFIX happens + assert self.v2_state.can_data_be_received + else: + super().data_received(t) + + +class EncryptedP2PMisbehaving(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.extra_args = [["-v2transport=1", "-peertimeout=3"]] + + def run_test(self): + self.test_earlykeyresponse() + self.test_v2disconnection() + + def test_earlykeyresponse(self): + self.log.info('Sending ellswift bytes in parts to ensure that response from responder is received only when') + self.log.info('ellswift bytes have a mismatch from the 16 bytes(network magic followed by "version\\x00\\x00\\x00\\x00\\x00")') + node0 = self.nodes[0] + node0.setmocktime(int(time.time())) + self.log.info('Sending first 4 bytes of ellswift which match network magic') + self.log.info('If a response is received, assertion failure would happen in our custom data_received() function') + peer1 = node0.add_p2p_connection(MisbehavingV2Peer(TestType.EARLY_KEY_RESPONSE), wait_for_verack=False, send_version=False, supports_v2_p2p=True, wait_for_v2_handshake=False) + peer1.send_raw_message(MAGIC_BYTES['regtest']) + self.log.info('Sending remaining ellswift and garbage which are different from V1_PREFIX. Since a response is') + self.log.info('expected now, our custom data_received() function wouldn\'t result in assertion failure') + peer1.v2_state.can_data_be_received = True + self.wait_until(lambda: peer1.v2_state.ellswift_ours) + peer1.send_raw_message(peer1.v2_state.ellswift_ours[4:] + peer1.v2_state.sent_garbage) + # Ensure that the bytes sent after 4 bytes network magic are actually received. + self.wait_until(lambda: node0.getpeerinfo()[-1]["bytesrecv"] > 4) + self.wait_until(lambda: node0.getpeerinfo()[-1]["bytessent"] > 0) + with node0.assert_debug_log(['V2 handshake timeout peer=0']): + node0.bumpmocktime(4) # `InactivityCheck()` triggers now + peer1.wait_for_disconnect(timeout=1) + self.log.info('successful disconnection since modified ellswift was sent as response') + + def test_v2disconnection(self): + # test v2 disconnection scenarios + node0 = self.nodes[0] + expected_debug_message = [ + [], # EARLY_KEY_RESPONSE + ["V2 transport error: missing garbage terminator, peer=1"], # EXCESS_GARBAGE + ["V2 handshake timeout peer=3"], # WRONG_GARBAGE_TERMINATOR + ["V2 transport error: packet decryption failure"], # WRONG_GARBAGE + ["V2 transport error: packet decryption failure"], # SEND_NO_AAD + [], # SEND_NON_EMPTY_VERSION_PACKET + ] + for test_type in TestType: + if test_type == TestType.EARLY_KEY_RESPONSE: + continue + elif test_type == TestType.SEND_NON_EMPTY_VERSION_PACKET: + node0.add_p2p_connection(MisbehavingV2Peer(test_type), wait_for_verack=True, send_version=True, supports_v2_p2p=True) + self.log.info(f"No disconnection for {test_type.name}") + else: + with node0.assert_debug_log(expected_debug_message[test_type.value], timeout=5): + node0.setmocktime(int(time.time())) + peer1 = node0.add_p2p_connection(MisbehavingV2Peer(test_type), wait_for_verack=False, send_version=False, supports_v2_p2p=True, expect_success=False) + # Make a passing connection for more robust disconnection checking. + peer2 = node0.add_p2p_connection(P2PInterface()) + assert peer2.is_connected + node0.bumpmocktime(4) # `InactivityCheck()` triggers now + peer1.wait_for_disconnect() + self.log.info(f"Expected disconnection for {test_type.name}") + + +if __name__ == '__main__': + EncryptedP2PMisbehaving().main() diff --git a/test/functional/rpc_invalid_address_message.py b/test/functional/rpc_invalid_address_message.py index 6759b69dd1..33f12484ad 100755 --- a/test/functional/rpc_invalid_address_message.py +++ b/test/functional/rpc_invalid_address_message.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2020-2022 The Bitcoin Core developers +# Copyright (c) 2020-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test error messages for 'getaddressinfo' and 'validateaddress' RPC commands.""" @@ -12,6 +12,7 @@ from test_framework.util import ( ) BECH32_VALID = 'bcrt1qtmp74ayg7p24uslctssvjm06q5phz4yrxucgnv' +BECH32_VALID_UNKNOWN_WITNESS = 'bcrt1p424qxxyd0r' BECH32_VALID_CAPITALS = 'BCRT1QPLMTZKC2XHARPPZDLNPAQL78RSHJ68U33RAH7R' BECH32_VALID_MULTISIG = 'bcrt1qdg3myrgvzw7ml9q0ejxhlkyxm7vl9r56yzkfgvzclrf4hkpx9yfqhpsuks' @@ -80,6 +81,7 @@ class InvalidAddressErrorMessageTest(BitcoinTestFramework): # Valid Bech32 self.check_valid(BECH32_VALID) + self.check_valid(BECH32_VALID_UNKNOWN_WITNESS) self.check_valid(BECH32_VALID_CAPITALS) self.check_valid(BECH32_VALID_MULTISIG) @@ -109,6 +111,7 @@ class InvalidAddressErrorMessageTest(BitcoinTestFramework): assert_raises_rpc_error(-5, "Invalid or unsupported Segwit (Bech32) or Base58 encoding.", node.getaddressinfo, BECH32_INVALID_PREFIX) assert_raises_rpc_error(-5, "Invalid or unsupported Base58-encoded address.", node.getaddressinfo, BASE58_INVALID_PREFIX) assert_raises_rpc_error(-5, "Invalid or unsupported Segwit (Bech32) or Base58 encoding.", node.getaddressinfo, INVALID_ADDRESS) + assert "isscript" not in node.getaddressinfo(BECH32_VALID_UNKNOWN_WITNESS) def run_test(self): self.test_validateaddress() diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 2701d2471d..37e2c1fb71 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -237,28 +237,35 @@ class NetTest(BitcoinTestFramework): def test_addnode_getaddednodeinfo(self): self.log.info("Test addnode and getaddednodeinfo") assert_equal(self.nodes[0].getaddednodeinfo(), []) - # add a node (node2) to node0 + self.log.info("Add a node (node2) to node0") ip_port = "127.0.0.1:{}".format(p2p_port(2)) self.nodes[0].addnode(node=ip_port, command='add') - # try to add an equivalent ip - # (note that OpenBSD doesn't support the IPv4 shorthand notation with omitted zero-bytes) + self.log.info("Try to add an equivalent ip and check it fails") + self.log.debug("(note that OpenBSD doesn't support the IPv4 shorthand notation with omitted zero-bytes)") if platform.system() != "OpenBSD": ip_port2 = "127.1:{}".format(p2p_port(2)) assert_raises_rpc_error(-23, "Node already added", self.nodes[0].addnode, node=ip_port2, command='add') - # check that the node has indeed been added + self.log.info("Check that the node has indeed been added") added_nodes = self.nodes[0].getaddednodeinfo() assert_equal(len(added_nodes), 1) assert_equal(added_nodes[0]['addednode'], ip_port) - # check that node cannot be added again + self.log.info("Check that filtering by node works") + self.nodes[0].addnode(node="11.22.33.44", command='add') + first_added_node = self.nodes[0].getaddednodeinfo(node=ip_port) + assert_equal(added_nodes, first_added_node) + assert_equal(len(self.nodes[0].getaddednodeinfo()), 2) + self.log.info("Check that node cannot be added again") assert_raises_rpc_error(-23, "Node already added", self.nodes[0].addnode, node=ip_port, command='add') - # check that node can be removed + self.log.info("Check that node can be removed") self.nodes[0].addnode(node=ip_port, command='remove') - assert_equal(self.nodes[0].getaddednodeinfo(), []) - # check that an invalid command returns an error + added_nodes = self.nodes[0].getaddednodeinfo() + assert_equal(len(added_nodes), 1) + assert_equal(added_nodes[0]['addednode'], "11.22.33.44") + self.log.info("Check that an invalid command returns an error") assert_raises_rpc_error(-1, 'addnode "node" "command"', self.nodes[0].addnode, node=ip_port, command='abc') - # check that trying to remove the node again returns an error + self.log.info("Check that trying to remove the node again returns an error") assert_raises_rpc_error(-24, "Node could not be removed", self.nodes[0].addnode, node=ip_port, command='remove') - # check that a non-existent node returns an error + self.log.info("Check that a non-existent node returns an error") assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1') def test_service_flags(self): diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py index 111ca63618..8c3adff3cf 100755 --- a/test/functional/rpc_psbt.py +++ b/test/functional/rpc_psbt.py @@ -73,6 +73,28 @@ class PSBTTest(BitcoinTestFramework): def skip_test_if_missing_module(self): self.skip_if_no_wallet() + def test_psbt_incomplete_after_invalid_modification(self): + self.log.info("Check that PSBT is correctly marked as incomplete after invalid modification") + node = self.nodes[2] + wallet = node.get_wallet_rpc(self.default_wallet_name) + address = wallet.getnewaddress() + wallet.sendtoaddress(address=address, amount=1.0) + self.generate(node, nblocks=1, sync_fun=lambda: self.sync_all(self.nodes[:2])) + + utxos = wallet.listunspent(addresses=[address]) + psbt = wallet.createpsbt([{"txid": utxos[0]["txid"], "vout": utxos[0]["vout"]}], [{wallet.getnewaddress(): 0.9999}]) + signed_psbt = wallet.walletprocesspsbt(psbt)["psbt"] + + # Modify the raw transaction by changing the output address, so the signature is no longer valid + signed_psbt_obj = PSBT.from_base64(signed_psbt) + substitute_addr = wallet.getnewaddress() + raw = wallet.createrawtransaction([{"txid": utxos[0]["txid"], "vout": utxos[0]["vout"]}], [{substitute_addr: 0.9999}]) + signed_psbt_obj.g.map[PSBT_GLOBAL_UNSIGNED_TX] = bytes.fromhex(raw) + + # Check that the walletprocesspsbt call succeeds but also recognizes that the transaction is not complete + signed_psbt_incomplete = wallet.walletprocesspsbt(signed_psbt_obj.to_base64(), finalize=False) + assert signed_psbt_incomplete["complete"] is False + def test_utxo_conversion(self): self.log.info("Check that non-witness UTXOs are removed for segwit v1+ inputs") mining_node = self.nodes[2] @@ -634,6 +656,7 @@ class PSBTTest(BitcoinTestFramework): if self.options.descriptors: self.test_utxo_conversion() + self.test_psbt_incomplete_after_invalid_modification() self.test_input_confs_control() @@ -745,11 +768,9 @@ class PSBTTest(BitcoinTestFramework): assert_equal(analysis['next'], 'creator') assert_equal(analysis['error'], 'PSBT is not valid. Output amount invalid') - analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==') - assert_equal(analysis['next'], 'creator') - assert_equal(analysis['error'], 'PSBT is not valid. Input 0 specifies invalid prevout') + assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].analyzepsbt, "cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==") - assert_raises_rpc_error(-25, 'Inputs missing or spent', self.nodes[0].walletprocesspsbt, 'cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==') + assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].walletprocesspsbt, "cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==") self.log.info("Test that we can fund psbts with external inputs specified") diff --git a/test/functional/rpc_users.py b/test/functional/rpc_users.py index 66cdd7cf9a..153493fbab 100755 --- a/test/functional/rpc_users.py +++ b/test/functional/rpc_users.py @@ -11,12 +11,15 @@ from test_framework.util import ( ) import http.client +import os +import platform import urllib.parse import subprocess from random import SystemRandom import string import configparser import sys +from typing import Optional def call_with_auth(node, user, password): @@ -84,6 +87,40 @@ class HTTPBasicsTest(BitcoinTestFramework): self.log.info('Wrong...') assert_equal(401, call_with_auth(node, user + 'wrong', password + 'wrong').status) + def test_rpccookieperms(self): + p = {"owner": 0o600, "group": 0o640, "all": 0o644} + + if platform.system() == 'Windows': + self.log.info(f"Skip cookie file permissions checks as OS detected as: {platform.system()=}") + return + + self.log.info('Check cookie file permissions can be set using -rpccookieperms') + + cookie_file_path = self.nodes[1].chain_path / '.cookie' + PERM_BITS_UMASK = 0o777 + + def test_perm(perm: Optional[str]): + if not perm: + perm = 'owner' + self.restart_node(1) + else: + self.restart_node(1, extra_args=[f"-rpccookieperms={perm}"]) + + file_stat = os.stat(cookie_file_path) + actual_perms = file_stat.st_mode & PERM_BITS_UMASK + expected_perms = p[perm] + assert_equal(expected_perms, actual_perms) + + # Remove any leftover rpc{user|password} config options from previous tests + self.nodes[1].replace_in_config([("rpcuser", "#rpcuser"), ("rpcpassword", "#rpcpassword")]) + + self.log.info('Check default cookie permission') + test_perm(None) + + self.log.info('Check custom cookie permissions') + for perm in ["owner", "group", "all"]: + test_perm(perm) + def run_test(self): self.conf_setup() self.log.info('Check correctness of the rpcauth config option') @@ -115,6 +152,8 @@ class HTTPBasicsTest(BitcoinTestFramework): (self.nodes[0].chain_path / ".cookie.tmp").mkdir() self.nodes[0].assert_start_raises_init_error(expected_msg=init_error) + self.test_rpccookieperms() + if __name__ == '__main__': HTTPBasicsTest().main() diff --git a/test/functional/test-shell.md b/test/functional/test-shell.md index 4cd62c4ef3..d79c4a0ab6 100644 --- a/test/functional/test-shell.md +++ b/test/functional/test-shell.md @@ -169,7 +169,7 @@ can be called after the TestShell is shut down. | Test parameter key | Default Value | Description | |---|---|---| -| `bind_to_localhost_only` | `True` | Binds bitcoind RPC services to `127.0.0.1` if set to `True`.| +| `bind_to_localhost_only` | `True` | Binds bitcoind P2P services to `127.0.0.1` if set to `True`.| | `cachedir` | `"/path/to/bitcoin/test/cache"` | Sets the bitcoind datadir directory. | | `chain` | `"regtest"` | Sets the chain-type for the underlying test bitcoind processes. | | `configfile` | `"/path/to/bitcoin/test/config.ini"` | Sets the location of the test framework config file. | diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py index 06252f8996..939c7cbef6 100644 --- a/test/functional/test_framework/key.py +++ b/test/functional/test_framework/key.py @@ -14,6 +14,7 @@ import random import unittest from test_framework.crypto import secp256k1 +from test_framework.util import random_bitflip # Point with no known discrete log. H_POINT = "50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0" @@ -292,11 +293,6 @@ def sign_schnorr(key, msg, aux=None, flip_p=False, flip_r=False): class TestFrameworkKey(unittest.TestCase): def test_ecdsa_and_schnorr(self): """Test the Python ECDSA and Schnorr implementations.""" - def random_bitflip(sig): - sig = list(sig) - sig[random.randrange(len(sig))] ^= (1 << (random.randrange(8))) - return bytes(sig) - byte_arrays = [generate_privkey() for _ in range(3)] + [v.to_bytes(32, 'big') for v in [0, ORDER - 1, ORDER, 2**256 - 1]] keys = {} for privkey_bytes in byte_arrays: # build array of key/pubkey pairs diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index 4b846df94a..4f1265eb54 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -223,6 +223,7 @@ class P2PConnection(asyncio.Protocol): # send the initial handshake immediately if self.supports_v2_p2p and self.v2_state.initiating and not self.v2_state.tried_v2_handshake: send_handshake_bytes = self.v2_state.initiate_v2_handshake() + logger.debug(f"sending {len(self.v2_state.sent_garbage)} bytes of garbage data") self.send_raw_message(send_handshake_bytes) # for v1 outbound connections, send version message immediately after opening # (for v2 outbound connections, send it after the initial v2 handshake) @@ -262,6 +263,7 @@ class P2PConnection(asyncio.Protocol): self.v2_state = None return elif send_handshake_bytes: + logger.debug(f"sending {len(self.v2_state.sent_garbage)} bytes of garbage data") self.send_raw_message(send_handshake_bytes) elif send_handshake_bytes == b"": return # only after send_handshake_bytes are sent can `complete_handshake()` be done diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 0ac0af27d5..b73566b0e9 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -39,6 +39,7 @@ from .util import ( rpc_url, wait_until_helper_internal, p2p_port, + tor_port, ) BITCOIND_PROC_WAIT_TIMEOUT = 60 @@ -88,8 +89,11 @@ class TestNode(): self.coverage_dir = coverage_dir self.cwd = cwd self.descriptors = descriptors + self.has_explicit_bind = False if extra_conf is not None: append_config(self.datadir_path, extra_conf) + # Remember if there is bind=... in the config file. + self.has_explicit_bind = any(e.startswith("bind=") for e in extra_conf) # Most callers will just need to add extra args to the standard list below. # For those callers that need more flexibility, they can just set the args property directly. # Note that common args are set in the config file (see initialize_datadir) @@ -210,6 +214,17 @@ class TestNode(): if extra_args is None: extra_args = self.extra_args + # If listening and no -bind is given, then bitcoind would bind P2P ports on + # 0.0.0.0:P and 127.0.0.1:18445 (for incoming Tor connections), where P is + # a unique port chosen by the test framework and configured as port=P in + # bitcoin.conf. To avoid collisions on 127.0.0.1:18445, change it to + # 127.0.0.1:tor_port(). + will_listen = all(e != "-nolisten" and e != "-listen=0" for e in extra_args) + has_explicit_bind = self.has_explicit_bind or any(e.startswith("-bind=") for e in extra_args) + if will_listen and not has_explicit_bind: + extra_args.append(f"-bind=0.0.0.0:{p2p_port(self.index)}") + extra_args.append(f"-bind=127.0.0.1:{tor_port(self.index)}=onion") + self.use_v2transport = "-v2transport=1" in extra_args or (self.default_to_v2 and "-v2transport=0" not in extra_args) # Add a new stdout and stderr file each time bitcoind is started @@ -666,7 +681,7 @@ class TestNode(): assert_msg += "with expected error " + expected_msg self._raise_assertion_error(assert_msg) - def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, send_version=True, supports_v2_p2p=None, wait_for_v2_handshake=True, **kwargs): + def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, send_version=True, supports_v2_p2p=None, wait_for_v2_handshake=True, expect_success=True, **kwargs): """Add an inbound p2p connection to the node. This method adds the p2p connection to the self.p2ps list and also @@ -686,7 +701,6 @@ class TestNode(): if supports_v2_p2p is None: supports_v2_p2p = self.use_v2transport - p2p_conn.p2p_connected_to_node = True if self.use_v2transport: kwargs['services'] = kwargs.get('services', P2P_SERVICES) | NODE_P2P_V2 @@ -694,6 +708,8 @@ class TestNode(): p2p_conn.peer_connect(**kwargs, send_version=send_version, net=self.chain, timeout_factor=self.timeout_factor, supports_v2_p2p=supports_v2_p2p)() self.p2ps.append(p2p_conn) + if not expect_success: + return p2p_conn p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False) if supports_v2_p2p and wait_for_v2_handshake: p2p_conn.wait_until(lambda: p2p_conn.v2_state.tried_v2_handshake) diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index c5b69a3954..de756691e0 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -14,6 +14,7 @@ import logging import os import pathlib import platform +import random import re import time @@ -247,6 +248,12 @@ def ceildiv(a, b): return -(-a // b) +def random_bitflip(data): + data = list(data) + data[random.randrange(len(data))] ^= (1 << (random.randrange(8))) + return bytes(data) + + def get_fee(tx_size, feerate_btc_kvb): """Calculate the fee in BTC given a feerate is BTC/kvB. Reflects CFeeRate::GetFee""" feerate_sat_kvb = int(feerate_btc_kvb * Decimal(1e8)) # Fee in sat/kvb as an int to avoid float precision errors @@ -309,9 +316,9 @@ def sha256sum_file(filename): # The maximum number of nodes a single test can spawn MAX_NODES = 12 -# Don't assign rpc or p2p ports lower than this +# Don't assign p2p, rpc or tor ports lower than this PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=11000)) -# The number of ports to "reserve" for p2p and rpc, each +# The number of ports to "reserve" for p2p, rpc and tor, each PORT_RANGE = 5000 @@ -351,7 +358,11 @@ def p2p_port(n): def rpc_port(n): - return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) + return p2p_port(n) + PORT_RANGE + + +def tor_port(n): + return p2p_port(n) + PORT_RANGE * 2 def rpc_url(datadir, i, chain, rpchost): diff --git a/test/functional/test_framework/v2_p2p.py b/test/functional/test_framework/v2_p2p.py index 8f79623bd8..87600c36de 100644 --- a/test/functional/test_framework/v2_p2p.py +++ b/test/functional/test_framework/v2_p2p.py @@ -4,7 +4,6 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for v2 P2P protocol (see BIP 324)""" -import logging import random from .crypto.bip324_cipher import FSChaCha20Poly1305 @@ -14,14 +13,12 @@ from .crypto.hkdf import hkdf_sha256 from .key import TaggedHash from .messages import MAGIC_BYTES -logger = logging.getLogger("TestFramework.v2_p2p") CHACHA20POLY1305_EXPANSION = 16 HEADER_LEN = 1 IGNORE_BIT_POS = 7 LENGTH_FIELD_LEN = 3 MAX_GARBAGE_LEN = 4095 -TRANSPORT_VERSION = b'' SHORTID = { 1: b"addr", @@ -95,6 +92,7 @@ class EncryptedP2PState: # has been decrypted. set to -1 if decryption hasn't been done yet. self.contents_len = -1 self.found_garbage_terminator = False + self.transport_version = b'' @staticmethod def v2_ecdh(priv, ellswift_theirs, ellswift_ours, initiating): @@ -111,12 +109,12 @@ class EncryptedP2PState: # Responding, place their public key encoding first. return TaggedHash("bip324_ellswift_xonly_ecdh", ellswift_theirs + ellswift_ours + ecdh_point_x32) - def generate_keypair_and_garbage(self): + def generate_keypair_and_garbage(self, garbage_len=None): """Generates ellswift keypair and 4095 bytes garbage at max""" self.privkey_ours, self.ellswift_ours = ellswift_create() - garbage_len = random.randrange(MAX_GARBAGE_LEN + 1) + if garbage_len is None: + garbage_len = random.randrange(MAX_GARBAGE_LEN + 1) self.sent_garbage = random.randbytes(garbage_len) - logger.debug(f"sending {garbage_len} bytes of garbage data") return self.ellswift_ours + self.sent_garbage def initiate_v2_handshake(self): @@ -172,7 +170,7 @@ class EncryptedP2PState: msg_to_send += self.v2_enc_packet(decoy_content_len * b'\x00', aad=aad, ignore=True) aad = b'' # Send version packet. - msg_to_send += self.v2_enc_packet(TRANSPORT_VERSION, aad=aad) + msg_to_send += self.v2_enc_packet(self.transport_version, aad=aad) return 64 - len(self.received_prefix), msg_to_send def authenticate_handshake(self, response): diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 8475dc5faa..67693259d3 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -264,9 +264,9 @@ BASE_SCRIPTS = [ 'p2p_invalid_tx.py --v2transport', 'p2p_v2_transport.py', 'p2p_v2_encrypted.py', - 'p2p_v2_earlykeyresponse.py', + 'p2p_v2_misbehaving.py', 'example_test.py', - 'mempool_accept_v3.py', + 'mempool_truc.py', 'wallet_txn_doublespend.py --legacy-wallet', 'wallet_multisig_descriptor_psbt.py --descriptors', 'wallet_txn_doublespend.py --descriptors', diff --git a/test/functional/wallet_conflicts.py b/test/functional/wallet_conflicts.py index e5739a6a59..25a95aa954 100755 --- a/test/functional/wallet_conflicts.py +++ b/test/functional/wallet_conflicts.py @@ -9,7 +9,6 @@ Test that wallet correctly tracks transactions that have been conflicted by bloc from decimal import Decimal -from test_framework.blocktools import COINBASE_MATURITY from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -37,7 +36,6 @@ class TxConflicts(BitcoinTestFramework): """ self.test_block_conflicts() - self.generatetoaddress(self.nodes[0], COINBASE_MATURITY + 7, self.nodes[2].getnewaddress()) self.test_mempool_conflict() self.test_mempool_and_block_conflicts() self.test_descendants_with_mempool_conflicts() diff --git a/test/functional/wallet_create_tx.py b/test/functional/wallet_create_tx.py index fa3e920c25..41ddb2bc69 100755 --- a/test/functional/wallet_create_tx.py +++ b/test/functional/wallet_create_tx.py @@ -114,16 +114,16 @@ class CreateTxWalletTest(BitcoinTestFramework): self.log.info('Check wallet does not create transactions with version=3 yet') wallet_rpc = self.nodes[0].get_wallet_rpc(self.default_wallet_name) - self.nodes[0].createwallet("v3") - wallet_v3 = self.nodes[0].get_wallet_rpc("v3") + self.nodes[0].createwallet("version3") + wallet_v3 = self.nodes[0].get_wallet_rpc("version3") tx_data = wallet_rpc.send(outputs=[{wallet_v3.getnewaddress(): 25}], options={"change_position": 0}) wallet_tx_data = wallet_rpc.gettransaction(tx_data["txid"]) tx_current_version = tx_from_hex(wallet_tx_data["hex"]) - # While v3 transactions are standard, the CURRENT_VERSION is 2. + # While version=3 transactions are standard, the CURRENT_VERSION is 2. # This test can be removed if CURRENT_VERSION is changed, and replaced with tests that the - # wallet handles v3 rules properly. + # wallet handles TRUC rules properly. assert_equal(tx_current_version.version, 2) wallet_v3.unloadwallet() diff --git a/test/functional/wallet_fundrawtransaction.py b/test/functional/wallet_fundrawtransaction.py index 3c1b2deb1d..07737c273d 100755 --- a/test/functional/wallet_fundrawtransaction.py +++ b/test/functional/wallet_fundrawtransaction.py @@ -1322,15 +1322,15 @@ class RawTransactionsTest(BitcoinTestFramework): outputs = [] for _ in range(1472): outputs.append({wallet.getnewaddress(address_type="legacy"): 0.1}) - txid = self.nodes[0].send(outputs=outputs)["txid"] + txid = self.nodes[0].send(outputs=outputs, change_position=0)["txid"] self.generate(self.nodes[0], 1) # 272 WU per input (273 when high-s); picking 1471 inputs will exceed the max standard tx weight. rawtx = wallet.createrawtransaction([], [{wallet.getnewaddress(): 0.1 * 1471}]) - # 1) Try to fund transaction only using the preset inputs + # 1) Try to fund transaction only using the preset inputs (pick all 1472 inputs to cover the fee) input_weights = [] - for i in range(1471): + for i in range(1, 1473): # skip first output as it is the parent tx change output input_weights.append({"txid": txid, "vout": i, "weight": 273}) assert_raises_rpc_error(-4, "Transaction too large", wallet.fundrawtransaction, hexstring=rawtx, input_weights=input_weights) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 890b6a5c1b..8fdc284d24 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -205,9 +205,13 @@ class WalletMigrationTest(BitcoinTestFramework): self.assert_list_txs_equal(basic2.listtransactions(), basic2_txs) # Now test migration on a descriptor wallet - self.log.info("Test \"nothing to migrate\" when the user tries to migrate a wallet with no legacy data") + self.log.info("Test \"nothing to migrate\" when the user tries to migrate a loaded wallet with no legacy data") assert_raises_rpc_error(-4, "Error: This wallet is already a descriptor wallet", basic2.migratewallet) + self.log.info("Test \"nothing to migrate\" when the user tries to migrate an unloaded wallet with no legacy data") + basic2.unloadwallet() + assert_raises_rpc_error(-4, "Error: This wallet is already a descriptor wallet", self.nodes[0].migratewallet, "basic2") + def test_multisig(self): default = self.nodes[0].get_wallet_rpc(self.default_wallet_name) @@ -467,6 +471,12 @@ class WalletMigrationTest(BitcoinTestFramework): assert_raises_rpc_error(-4, "Error: Wallet decryption failed, the wallet passphrase was not provided or was incorrect", wallet.migratewallet, None, "badpass") assert_raises_rpc_error(-4, "The passphrase contains a null character", wallet.migratewallet, None, "pass\0with\0null") + # Check the wallet is still active post-migration failure. + # If not, it will throw an exception and abort the test. + wallet.walletpassphrase("pass", 99999) + wallet.getnewaddress() + + # Verify we can properly migrate the encrypted wallet self.migrate_wallet(wallet, passphrase="pass") info = wallet.getwalletinfo() diff --git a/test/functional/wallet_multisig_descriptor_psbt.py b/test/functional/wallet_multisig_descriptor_psbt.py index 68bf45f7e3..145912025f 100755 --- a/test/functional/wallet_multisig_descriptor_psbt.py +++ b/test/functional/wallet_multisig_descriptor_psbt.py @@ -7,7 +7,6 @@ This is meant to be documentation as much as functional tests, so it is kept as simple and readable as possible. """ -from test_framework.address import base58_to_byte from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_approx, @@ -30,10 +29,12 @@ class WalletMultisigDescriptorPSBTTest(BitcoinTestFramework): self.skip_if_no_sqlite() @staticmethod - def _get_xpub(wallet): + def _get_xpub(wallet, internal): """Extract the wallet's xpubs using `listdescriptors` and pick the one from the `pkh` descriptor since it's least likely to be accidentally reused (legacy addresses).""" - descriptor = next(filter(lambda d: d["desc"].startswith("pkh"), wallet.listdescriptors()["descriptors"])) - return descriptor["desc"].split("]")[-1].split("/")[0] + pkh_descriptor = next(filter(lambda d: d["desc"].startswith("pkh(") and d["internal"] == internal, wallet.listdescriptors()["descriptors"])) + # Keep all key origin information (master key fingerprint and all derivation steps) for proper support of hardware devices + # See section 'Key origin identification' in 'doc/descriptors.md' for more details... + return pkh_descriptor["desc"].split("pkh(")[1].split(")")[0] @staticmethod def _check_psbt(psbt, to, value, multisig): @@ -47,19 +48,13 @@ class WalletMultisigDescriptorPSBTTest(BitcoinTestFramework): amount += vout["value"] assert_approx(amount, float(value), vspan=0.001) - def participants_create_multisigs(self, xpubs): + def participants_create_multisigs(self, external_xpubs, internal_xpubs): """The multisig is created by importing the following descriptors. The resulting wallet is watch-only and every participant can do this.""" - # some simple validation - assert_equal(len(xpubs), self.N) - # a sanity-check/assertion, this will throw if the base58 checksum of any of the provided xpubs are invalid - for xpub in xpubs: - base58_to_byte(xpub) - for i, node in enumerate(self.nodes): node.createwallet(wallet_name=f"{self.name}_{i}", blank=True, descriptors=True, disable_private_keys=True) multisig = node.get_wallet_rpc(f"{self.name}_{i}") - external = multisig.getdescriptorinfo(f"wsh(sortedmulti({self.M},{f'/0/*,'.join(xpubs)}/0/*))") - internal = multisig.getdescriptorinfo(f"wsh(sortedmulti({self.M},{f'/1/*,'.join(xpubs)}/1/*))") + external = multisig.getdescriptorinfo(f"wsh(sortedmulti({self.M},{f','.join(external_xpubs)}))") + internal = multisig.getdescriptorinfo(f"wsh(sortedmulti({self.M},{f','.join(internal_xpubs)}))") result = multisig.importdescriptors([ { # receiving addresses (internal: False) "desc": external["descriptor"], @@ -93,10 +88,10 @@ class WalletMultisigDescriptorPSBTTest(BitcoinTestFramework): } self.log.info("Generate and exchange xpubs...") - xpubs = [self._get_xpub(signer) for signer in participants["signers"]] + external_xpubs, internal_xpubs = [[self._get_xpub(signer, internal) for signer in participants["signers"]] for internal in [False, True]] self.log.info("Every participant imports the following descriptors to create the watch-only multisig...") - participants["multisigs"] = list(self.participants_create_multisigs(xpubs)) + participants["multisigs"] = list(self.participants_create_multisigs(external_xpubs, internal_xpubs)) self.log.info("Check that every participant's multisig generates the same addresses...") for _ in range(10): # we check that the first 10 generated addresses are the same for all participant's multisigs diff --git a/test/lint/lint-assertions.py b/test/lint/lint-assertions.py index d9f86b22b8..5d01b13fd4 100755 --- a/test/lint/lint-assertions.py +++ b/test/lint/lint-assertions.py @@ -27,8 +27,9 @@ def main(): # checks should be used over assert. See: src/util/check.h # src/rpc/server.cpp is excluded from this check since it's mostly meta-code. exit_code = git_grep([ - "-nE", - r"\<(A|a)ss(ume|ert) *\(.*\);", + "--line-number", + "--extended-regexp", + r"\<(A|a)ss(ume|ert)\(", "--", "src/rpc/", "src/wallet/rpc*", @@ -38,8 +39,9 @@ def main(): # The `BOOST_ASSERT` macro requires to `#include boost/assert.hpp`, # which is an unnecessary Boost dependency. exit_code |= git_grep([ - "-E", - r"BOOST_ASSERT *\(.*\);", + "--line-number", + "--extended-regexp", + r"BOOST_ASSERT\(", "--", "*.cpp", "*.h", diff --git a/test/lint/test_runner/src/main.rs b/test/lint/test_runner/src/main.rs index 9c35898c1f..2ba58a6da2 100644 --- a/test/lint/test_runner/src/main.rs +++ b/test/lint/test_runner/src/main.rs @@ -410,6 +410,7 @@ fn lint_markdown() -> LintResult { "--offline", "--ignore-path", md_ignore_path_str.as_str(), + "--gitignore", "--root-dir", ".", ]) @@ -419,11 +420,6 @@ fn lint_markdown() -> LintResult { Ok(output) if output.status.success() => Ok(()), Ok(output) => { let stderr = String::from_utf8_lossy(&output.stderr); - let filtered_stderr: String = stderr // Filter out this annoying trailing line - .lines() - .filter(|&line| line != "The following links could not be resolved:") - .collect::<Vec<&str>>() - .join("\n"); Err(format!( r#" One or more markdown links are broken. @@ -433,7 +429,7 @@ Relative links are preferred (but not required) as jumping to file works nativel Markdown link errors found: {} "#, - filtered_stderr + stderr )) } Err(e) if e.kind() == ErrorKind::NotFound => { diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan index 9818d73fdf..94bd14e6c3 100644 --- a/test/sanitizer_suppressions/ubsan +++ b/test/sanitizer_suppressions/ubsan @@ -51,19 +51,18 @@ unsigned-integer-overflow:CCoinsViewCache::Uncache unsigned-integer-overflow:CompressAmount unsigned-integer-overflow:DecompressAmount unsigned-integer-overflow:crypto/ -unsigned-integer-overflow:getchaintxstats* unsigned-integer-overflow:MurmurHash3 unsigned-integer-overflow:CBlockPolicyEstimator::processBlockTx unsigned-integer-overflow:TxConfirmStats::EstimateMedianVal unsigned-integer-overflow:prevector.h unsigned-integer-overflow:EvalScript -unsigned-integer-overflow:xoroshiro128plusplus.h +unsigned-integer-overflow:InsecureRandomContext::rand64 +unsigned-integer-overflow:InsecureRandomContext::SplitMix64 unsigned-integer-overflow:bitset_detail::PopCount implicit-integer-sign-change:CBlockPolicyEstimator::processBlockTx implicit-integer-sign-change:SetStdinEcho implicit-integer-sign-change:compressor.h implicit-integer-sign-change:crypto/ -implicit-integer-sign-change:getchaintxstats* implicit-integer-sign-change:TxConfirmStats::removeTx implicit-integer-sign-change:prevector.h implicit-integer-sign-change:verify_flags @@ -75,4 +74,6 @@ shift-base:arith_uint256.cpp shift-base:crypto/ shift-base:streams.h shift-base:FormatHDKeypath -shift-base:xoroshiro128plusplus.h +shift-base:InsecureRandomContext::rand64 +shift-base:RandomMixin<*>::randbits +shift-base:RandomMixin<*>::randbits<*> |