diff options
428 files changed, 11030 insertions, 4278 deletions
diff --git a/.appveyor.yml b/.appveyor.yml index eeb2d6590b..7dcf9388b9 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -5,8 +5,6 @@ configuration: Release platform: x64 clone_depth: 5 environment: - APPVEYOR_SAVE_CACHE_ON_ERROR: true - CLCACHE_SERVER: 1 PATH: 'C:\Python37-x64;C:\Python37-x64\Scripts;%PATH%' PYTHONUTF8: 1 QT_DOWNLOAD_URL: 'https://github.com/sipsorcery/qt_win_binary/releases/download/v1.6/Qt5.9.8_x64_static_vs2019.zip' @@ -14,59 +12,41 @@ environment: QT_LOCAL_PATH: 'C:\Qt5.9.8_x64_static_vs2019' VCPKG_INSTALL_PATH: 'C:\tools\vcpkg\installed' VCPKG_COMMIT_ID: 'ed0df8ecc4ed7e755ea03e18aaf285fd9b4b4a74' -cache: -- C:\tools\vcpkg\installed -> build_msvc\vcpkg-packages.txt -- C:\Qt5.9.8_x64_static_vs2019 install: # Disable zmq test for now since python zmq library on Windows would cause Access violation sometimes. # - cmd: pip install zmq # Powershell block below is to install the c++ dependencies via vcpkg. The pseudo code is: -# 1. Check whether the vcpkg install directory exists (note that updating the vcpkg-packages.txt file -# will cause the appveyor cache rules to invalidate the directory) -# 2. If the directory is missing: # a. Checkout the vcpkg source (including port files) for the specific checkout and build the vcpkg binary, # b. Install the missing packages. - ps: | $env:PACKAGES = Get-Content -Path build_msvc\vcpkg-packages.txt - Write-Host "vcpkg list: $env:PACKAGES" - if(!(Test-Path -Path ($env:VCPKG_INSTALL_PATH))) { - cd c:\tools\vcpkg - $env:GIT_REDIRECT_STDERR = '2>&1' # git is writing non-errors to STDERR when doing git pull. Send to STDOUT instead. - git pull origin master - git checkout $env:VCPKG_COMMIT_ID - .\bootstrap-vcpkg.bat - Add-Content "C:\tools\vcpkg\triplets\$env:PLATFORM-windows-static.cmake" "set(VCPKG_BUILD_TYPE release)" - .\vcpkg install --triplet $env:PLATFORM-windows-static $env:PACKAGES.split() > $null - cd "$env:APPVEYOR_BUILD_FOLDER" - } - else { - Write-Host "required vcpkg packages already installed." - } - c:\tools\vcpkg\vcpkg integrate install + Write-Host "vcpkg installing packages: $env:PACKAGES" + cd c:\tools\vcpkg + $env:GIT_REDIRECT_STDERR = '2>&1' # git is writing non-errors to STDERR when doing git pull. Send to STDOUT instead. + git pull origin master > $null + git -c advice.detachedHead=false checkout $env:VCPKG_COMMIT_ID + .\bootstrap-vcpkg.bat > $null + Add-Content "C:\tools\vcpkg\triplets\$env:PLATFORM-windows-static.cmake" "set(VCPKG_BUILD_TYPE release)" + .\vcpkg install --triplet $env:PLATFORM-windows-static $env:PACKAGES.split() > $null + Write-Host "vcpkg packages installed successfully." + .\vcpkg integrate install + cd "$env:APPVEYOR_BUILD_FOLDER" before_build: # Powershell block below is to download and extract the Qt static libraries. The pseudo code is: -# 1. If the Qt destination directory exists assume it is correct and do nothing. To -# force a fresh install of the packages delete the job's appveyor cache. -# 2. Otherwise: # a. Download the zip file with the prebuilt Qt static libraries. # b. Check that the downloaded file matches the expected hash. # c. Extract the zip file to the specific destination path expected by the msbuild projects. - ps: | - if(!(Test-Path -Path ($env:QT_LOCAL_PATH))) { - Write-Host "Downloading Qt binaries."; - Invoke-WebRequest -Uri $env:QT_DOWNLOAD_URL -Out qtdownload.zip; - Write-Host "Qt binaries successfully downloaded, checking hash against $env:QT_DOWNLOAD_HASH..."; - if((Get-FileHash qtdownload.zip).Hash -eq $env:QT_DOWNLOAD_HASH) { - Expand-Archive qtdownload.zip -DestinationPath $env:QT_LOCAL_PATH; - Write-Host "Qt binary download matched the expected hash."; - } - else { - Write-Host "ERROR: Qt binary download did not match the expected hash."; - Exit-AppveyorBuild; - } + Write-Host "Downloading Qt binaries."; + Invoke-WebRequest -Uri $env:QT_DOWNLOAD_URL -Out qtdownload.zip; + Write-Host "Qt binaries successfully downloaded, checking hash against $env:QT_DOWNLOAD_HASH..."; + if((Get-FileHash qtdownload.zip).Hash -eq $env:QT_DOWNLOAD_HASH) { + Expand-Archive qtdownload.zip -DestinationPath $env:QT_LOCAL_PATH; + Write-Host "Qt binary download matched the expected hash."; } else { - Write-Host "Qt binaries already present."; + Write-Host "ERROR: Qt binary download did not match the expected hash."; + Exit-AppveyorBuild; } - cmd: python build_msvc\msvc-autogen.py build_script: @@ -75,7 +55,7 @@ after_build: #- 7z a bitcoin-%APPVEYOR_BUILD_VERSION%.zip %APPVEYOR_BUILD_FOLDER%\build_msvc\%platform%\%configuration%\*.exe test_script: - cmd: src\test_bitcoin.exe -l test_suite -- cmd: src\bench_bitcoin.exe -evals=1 -scaling=0 > NUL +- cmd: src\bench_bitcoin.exe > NUL - ps: python test\util\bitcoin-util-test.py - cmd: python test\util\rpcauth-test.py # Fee estimation test failing on appveyor with: WinError 10048] Only one usage of each socket address (protocol/network address/port) is normally permitted. diff --git a/.cirrus.yml b/.cirrus.yml index 9c2598d606..446d3e35a9 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -1,4 +1,5 @@ -# Global defaults +### Global defaults + timeout_in: 120m # https://cirrus-ci.org/faq/#instance-timed-out container: # https://cirrus-ci.org/faq/#are-there-any-limits @@ -12,7 +13,9 @@ env: TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache CCACHE_SIZE: "200M" CCACHE_DIR: "/tmp/ccache_dir" -# Global task template + +### Global task template + # https://cirrus-ci.org/guide/tips-and-tricks/#sharing-configuration-between-tasks global_task_template: &GLOBAL_TASK_TEMPLATE ccache_cache: @@ -31,6 +34,7 @@ global_task_template: &GLOBAL_TASK_TEMPLATE - git merge FETCH_HEAD # Merge base to detect silent merge conflicts ci_script: - ./ci/test_run_all.sh + #task: # name: "Windows" # windows_container: @@ -48,6 +52,14 @@ global_task_template: &GLOBAL_TASK_TEMPLATE # - choco install python --version=3.7.7 -y task: + name: 'x86_64 Linux [GOAL: install] [focal] [depends, sanitizers: thread (TSan), no gui]' + << : *GLOBAL_TASK_TEMPLATE + container: + image: ubuntu:focal + env: + FILE_ENV: "./ci/test/00_setup_env_native_tsan.sh" + +task: name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, sanitizers: address/leak (ASan + LSan) + undefined (UBSan) + integer]' << : *GLOBAL_TASK_TEMPLATE container: diff --git a/.gitignore b/.gitignore index 23b6090265..1173edfaa7 100644 --- a/.gitignore +++ b/.gitignore @@ -9,7 +9,7 @@ src/bitcoin-gui src/bitcoin-node src/bitcoin-tx src/bitcoin-wallet -src/test/fuzz +src/test/fuzz/* !src/test/fuzz/*.* src/test/test_bitcoin src/qt/test/test_bitcoin-qt @@ -119,7 +119,9 @@ releases /*.info test_bitcoin.coverage/ total.coverage/ +fuzz.coverage/ coverage_percent.txt +/cov_tool_wrapper.sh #build tests linux-coverage-build diff --git a/.travis.yml b/.travis.yml index edec60afba..f1cee7133f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,14 +18,11 @@ os: linux language: minimal arch: amd64 cache: - ccache: true directories: - $TRAVIS_BUILD_DIR/depends/built - $TRAVIS_BUILD_DIR/depends/sdk-sources - $TRAVIS_BUILD_DIR/ci/scratch/.ccache - $TRAVIS_BUILD_DIR/releases/$HOST -before_cache: - - if [ "${TRAVIS_OS_NAME}" = "osx" ]; then brew cleanup; fi stages: - lint - test @@ -47,7 +44,7 @@ script: - if [ $SECONDS -gt 1200 ]; then export CONTINUE=0; fi # Likely the depends build took very long - if [ $TRAVIS_REPO_SLUG = "bitcoin/bitcoin" ]; then export CONTINUE=1; fi # continue on repos with extended build time (90 minutes) - if [ $CONTINUE = "1" ]; then set -o errexit; source ./ci/test/06_script_a.sh; else set +o errexit; echo "$CACHE_ERR_MSG"; false; fi - - if [ $SECONDS -gt 2000 ]; then export CONTINUE=0; fi # Likely the build took very long; The tests take about 1000s, so we should abort if we have less than 50*60-1000=2000s left + - if [[ $SECONDS -gt 50*60-$EXPECTED_TESTS_DURATION_IN_SECONDS ]]; then export CONTINUE=0; fi - if [ $TRAVIS_REPO_SLUG = "bitcoin/bitcoin" ]; then export CONTINUE=1; fi # continue on repos with extended build time (90 minutes) - if [ $CONTINUE = "1" ]; then set -o errexit; source ./ci/test/06_script_b.sh; else set +o errexit; echo "$CACHE_ERR_MSG"; false; fi after_script: @@ -75,17 +72,15 @@ jobs: FILE_ENV="./ci/test/00_setup_env_arm.sh" QEMU_USER_CMD="" -# s390 build was disabled temporarily because of disk space issues on the Travis VM -# -# - stage: test -# name: 'S390x [GOAL: install] [buster] [unit tests, functional tests]' -# arch: s390x # Can disable QEMU_USER_CMD and run the tests natively without qemu -# env: >- -# FILE_ENV="./ci/test/00_setup_env_s390x.sh" -# QEMU_USER_CMD="" + - stage: test + name: 'S390x [GOAL: install] [buster] [unit tests, functional tests]' + arch: s390x # Can disable QEMU_USER_CMD and run the tests natively without qemu + env: >- + FILE_ENV="./ci/test/00_setup_env_s390x.sh" + QEMU_USER_CMD="" - stage: test - name: 'Win64 [GOAL: deploy] [unit tests, no gui, no functional tests]' + name: 'Win64 [GOAL: deploy] [unit tests, no gui, no boost::process, no functional tests]' env: >- FILE_ENV="./ci/test/00_setup_env_win64.sh" @@ -100,11 +95,9 @@ jobs: FILE_ENV="./ci/test/00_setup_env_native_qt5.sh" - stage: test - name: 'x86_64 Linux [GOAL: install] [focal] [depends, sanitizers: thread (TSan), no gui]' - # Not enough memory on travis machines, so feature_block is excluded for now + name: 'x86_64 Linux [GOAL: install] [focal] [depends, sanitizers: memory (MSan)]' env: >- - TEST_RUNNER_EXTRA="--exclude feature_block" - FILE_ENV="./ci/test/00_setup_env_native_tsan.sh" + FILE_ENV="./ci/test/00_setup_env_native_msan.sh" - stage: test name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, sanitizers: fuzzer,address,undefined]' @@ -138,22 +131,12 @@ jobs: # Xcode 11.3.1, macOS 10.14, SDK 10.15 # https://docs.travis-ci.com/user/reference/osx/#macos-version osx_image: xcode11.3 - cache: - directories: - - $TRAVIS_BUILD_DIR/ci/scratch/.ccache - - $TRAVIS_BUILD_DIR/releases/$HOST - - $HOME/Library/Caches/Homebrew - - /usr/local/Homebrew addons: homebrew: packages: - - libtool - berkeley-db4 - - boost - miniupnpc - - qt - qrencode - - python3 - ccache - zeromq env: >- diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 65bc45a00b..2e11474382 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,27 +6,28 @@ welcome to contribute towards development in the form of peer review, testing and patches. This document explains the practical process and guidelines for contributing. -Firstly in terms of structure, there is no particular concept of "Core +First, in terms of structure, there is no particular concept of "Bitcoin Core developers" in the sense of privileged people. Open source often naturally -revolves around meritocracy where longer term contributors gain more trust from -the developer community. However, some hierarchy is necessary for practical -purposes. As such there are repository "maintainers" who are responsible for -merging pull requests as well as a "lead maintainer" who is responsible for the -release cycle, overall merging, moderation and appointment of maintainers. +revolves around a meritocracy where contributors earn trust from the developer +community over time. Nevertheless, some hierarchy is necessary for practical +purposes. As such, there are repository "maintainers" who are responsible for +merging pull requests, as well as a "lead maintainer" who is responsible for the +release cycle as well as overall merging, moderation and appointment of +maintainers. Getting Started --------------- New contributors are very welcome and needed. -Reviewing and testing is the most effective way you can contribute as a new -contributor, and it also will teach you much more about the code and process -than opening PRs. Please refer to the section [peer review](#peer-review) later -in this document. +Reviewing and testing is highly valued and the most effective way you can contribute +as a new contributor. It also will teach you much more about the code and +process than opening pull requests. Please refer to the [peer review](#peer-review) +section below. Before you start contributing, familiarize yourself with the Bitcoin Core build system and tests. Refer to the documentation in the repository on how to build -Bitcoin Core and how to run the unit and functional tests. +Bitcoin Core and how to run the unit tests, functional tests, and fuzz tests. There are many open issues of varying difficulty waiting to be fixed. If you're looking for somewhere to start contributing, check out the @@ -62,7 +63,7 @@ history logs can be found on [http://www.erisian.com.au/bitcoin-core-dev/](http://www.erisian.com.au/bitcoin-core-dev/) and [http://gnusha.org/bitcoin-core-dev/](http://gnusha.org/bitcoin-core-dev/). -Discussion about code base improvements happens in GitHub issues and on pull +Discussion about codebase improvements happens in GitHub issues and pull requests. The developer @@ -75,7 +76,7 @@ Contributor Workflow -------------------- The codebase is maintained using the "contributor workflow" where everyone -without exception contributes patch proposals using "pull requests". This +without exception contributes patch proposals using "pull requests" (PRs). This facilitates social contribution, easy testing and peer review. To contribute a patch, the workflow is as follows: @@ -113,6 +114,9 @@ In general, [commits should be atomic](https://en.wikipedia.org/wiki/Atomic_comm and diffs should be easy to read. For this reason, do not mix any formatting fixes or code moves with actual code changes. +Make sure each individual commit is hygienic: that it builds successfully on its +own without warnings, errors, regressions, or test failures. + Commit messages should be verbose by default consisting of a short subject line (50 chars max), a blank line and detailed explanatory text as separate paragraph(s), unless the title alone is self-explanatory (like "Corrected typo @@ -124,7 +128,7 @@ If a particular commit references another issue, please add the reference. For example: `refs #1234` or `fixes #4321`. Using the `fixes` or `closes` keywords will cause the corresponding issue to be closed when the pull request is merged. -Commit messages should never contain any `@` mentions. +Commit messages should never contain any `@` mentions (usernames prefixed with "@"). Please refer to the [Git manual](https://git-scm.com/doc) for more information about Git. @@ -154,14 +158,20 @@ the pull request affects. Valid areas as: Examples: consensus: Add new opcode for BIP-XXXX OP_CHECKAWESOMESIG - net: Automatically create hidden service, listen on Tor + net: Automatically create onion service, listen on Tor qt: Add feed bump button log: Fix typo in log message -The body of the pull request should contain enough description about what the -patch does together with any justification/reasoning. You should include -references to any discussions (for example other tickets or mailing list -discussions). +The body of the pull request should contain sufficient description of *what* the +patch does, and even more importantly, *why*, with justification and reasoning. +You should include references to any discussions (for example, other issues or +mailing list discussions). + +The description for a new pull request should not contain any `@` mentions. The +PR description will be included in the commit message when the PR is merged and +any users mentioned in the description will be annoyingly notified each time a +fork of Bitcoin Core copies the merge. Instead, make any username mentions in a +subsequent comment to the PR. ### Translation changes @@ -197,13 +207,13 @@ before it will be merged. The basic squashing workflow is shown below. # Save and quit. git push -f # (force push to GitHub) -Please update the resulting commit message if needed. It should read as a -coherent message. In most cases, this means that you should not just list the -interim commits. +Please update the resulting commit message, if needed. It should read as a +coherent message. In most cases, this means not just listing the interim +commits. -If you have problems with squashing (or other workflows with `git`), you can -alternatively enable "Allow edits from maintainers" in the right GitHub -sidebar and ask for help in the pull request. +If you have problems with squashing or other git workflows, you can enable +"Allow edits from maintainers" in the right-hand sidebar of the GitHub web +interface and ask for help in the pull request. Please refrain from creating several pull requests for the same change. Use the pull request that is already open (or was created earlier) to amend @@ -287,8 +297,8 @@ In general, all pull requests must: - Have a clear use case, fix a demonstrable bug or serve the greater good of the project (for example refactoring for modularisation); - - Be well peer reviewed; - - Have unit tests and functional tests where appropriate; + - Be well peer-reviewed; + - Have unit tests, functional tests, and fuzz tests, where appropriate; - Follow code style guidelines ([C++](doc/developer-notes.md), [functional tests](test/functional/README.md)); - Not break the existing test suite; - Where bugs are fixed, where possible, there should be unit tests @@ -315,7 +325,7 @@ spread out over GitHub, mailing list and IRC discussions). #### Conceptual Review A review can be a conceptual review, where the reviewer leaves a comment - * `Concept (N)ACK`, meaning "I do (not) agree in the general goal of this pull + * `Concept (N)ACK`, meaning "I do (not) agree with the general goal of this pull request", * `Approach (N)ACK`, meaning `Concept ACK`, but "I do (not) agree with the approach of this change". @@ -325,30 +335,28 @@ NACKs without accompanying reasoning may be disregarded. #### Code Review -After conceptual agreement on the change, code review can be provided. It is -starting with `ACK BRANCH_COMMIT`, where `BRANCH_COMMIT` is the top of the -topic branch. The review is followed by a description of how the reviewer did -the review. The following -language is used within pull-request comments: +After conceptual agreement on the change, code review can be provided. A review +begins with `ACK BRANCH_COMMIT`, where `BRANCH_COMMIT` is the top of the PR +branch, followed by a description of how the reviewer did the review. The +following language is used within pull request comments: - - "I have tested the code", involving - change-specific manual testing in addition to running the unit and functional - tests, and in case it is not obvious how the manual testing was done, it should - be described; + - "I have tested the code", involving change-specific manual testing in + addition to running the unit, functional, or fuzz tests, and in case it is + not obvious how the manual testing was done, it should be described; - "I have not tested the code, but I have reviewed it and it looks OK, I agree it can be merged"; - - Nit refers to trivial, often non-blocking issues. + - A "nit" refers to a trivial, often non-blocking issue. Project maintainers reserve the right to weigh the opinions of peer reviewers -using common sense judgement and also may weight based on meritocracy: Those -that have demonstrated a deeper commitment and understanding towards the project -(over time) or have clear domain expertise may naturally have more weight, as -one would expect in all walks of life. +using common sense judgement and may also weigh based on merit. Reviewers that +have demonstrated a deeper commitment and understanding of the project over time +or who have clear domain expertise may naturally have more weight, as one would +expect in all walks of life. -Where a patch set affects consensus critical code, the bar will be set much +Where a patch set affects consensus-critical code, the bar will be much higher in terms of discussion and peer review requirements, keeping in mind that mistakes could be very costly to the wider community. This includes refactoring -of consensus critical code. +of consensus-critical code. Where a patch set proposes to change the Bitcoin consensus, it must have been discussed extensively on the mailing list and IRC, be accompanied by a widely @@ -365,7 +373,7 @@ about: - It may be because of a feature freeze due to an upcoming release. During this time, only bug fixes are taken into consideration. If your pull request is a new feature, - it will not be prioritized until the release is over. Wait for release. + it will not be prioritized until after the release. Wait for the release. - It may be because the changes you are suggesting do not appeal to people. Rather than nits and critique, which require effort and means they care enough to spend time on your contribution, thundering silence is a good sign of widespread (mild) dislike of a given change @@ -375,16 +383,18 @@ about: [developer notes](doc/developer-notes.md), is dangerous or insecure, is messily written, etc. Identify and address any of the issues you find. Then ask e.g. on IRC if someone could give their opinion on the concept itself. - - It may be because your code is too complex for all but a few people. And those people + - It may be because your code is too complex for all but a few people, and those people may not have realized your pull request even exists. A great way to find people who are qualified and care about the code you are touching is the [Git Blame feature](https://help.github.com/articles/tracing-changes-in-a-file/). Simply - find the person touching the code you are touching before you and see if you can find - them and give them a nudge. Don't be incessant about the nudging though. + look up who last modified the code you are changing and see if you can find + them and give them a nudge. Don't be incessant about the nudging, though. - Finally, if all else fails, ask on IRC or elsewhere for someone to give your pull request - a look. If you think you've been waiting an unreasonably long amount of time (month+) for - no particular reason (few lines changed, etc), this is totally fine. Try to return the favor - when someone else is asking for feedback on their code, and universe balances out. + a look. If you think you've been waiting for an unreasonably long time (say, + more than a month) for no particular reason (a few lines changed, etc.), + this is totally fine. Try to return the favor when someone else is asking + for feedback on their code, and the universe balances out. + - Remember that the best thing you can do while waiting is give review to others! Backporting @@ -393,11 +403,11 @@ Backporting Security and bug fixes can be backported from `master` to release branches. If the backport is non-trivial, it may be appropriate to open an -additional PR, to backport the change, only after the original PR +additional PR to backport the change, but only after the original PR has been merged. Otherwise, backports will be done in batches and the maintainers will use the proper `Needs backport (...)` labels -when needed (the original author does not need to worry). +when needed (the original author does not need to worry about it). A backport should contain the following metadata in the commit body: diff --git a/Makefile.am b/Makefile.am index 75a164f49e..1d6358b1d5 100644 --- a/Makefile.am +++ b/Makefile.am @@ -65,10 +65,10 @@ OSX_PACKAGING = $(OSX_DEPLOY_SCRIPT) $(OSX_FANCY_PLIST) $(OSX_INSTALLER_ICONS) \ $(top_srcdir)/contrib/macdeploy/detached-sig-apply.sh \ $(top_srcdir)/contrib/macdeploy/detached-sig-create.sh -COVERAGE_INFO = baseline.info \ +COVERAGE_INFO = $(COV_TOOL_WRAPPER) baseline.info \ test_bitcoin_filtered.info total_coverage.info \ baseline_filtered.info functional_test.info functional_test_filtered.info \ - test_bitcoin_coverage.info test_bitcoin.info fuzz.info fuzz_coverage.info + test_bitcoin_coverage.info test_bitcoin.info fuzz.info fuzz_filtered.info fuzz_coverage.info dist-hook: -$(GIT) archive --format=tar HEAD -- src/clientversion.cpp | $(AMTAR) -C $(top_distdir) -xf - @@ -192,7 +192,11 @@ LCOV_FILTER_PATTERN = \ -p "src/secp256k1" \ -p "depends" -baseline.info: +$(COV_TOOL_WRAPPER): + @echo 'exec $(COV_TOOL) "$$@"' > $(COV_TOOL_WRAPPER) + @chmod +x $(COV_TOOL_WRAPPER) + +baseline.info: $(COV_TOOL_WRAPPER) $(LCOV) -c -i -d $(abs_builddir)/src -o $@ baseline_filtered.info: baseline.info diff --git a/build-aux/m4/ax_boost_process.m4 b/build-aux/m4/ax_boost_process.m4 new file mode 100644 index 0000000000..5d20e67464 --- /dev/null +++ b/build-aux/m4/ax_boost_process.m4 @@ -0,0 +1,121 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_boost_process.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_PROCESS +# +# DESCRIPTION +# +# Test for Process library from the Boost C++ libraries. The macro +# requires a preceding call to AX_BOOST_BASE. Further documentation is +# available at <http://randspringer.de/boost/index.html>. +# +# This macro calls: +# +# AC_SUBST(BOOST_PROCESS_LIB) +# +# And sets: +# +# HAVE_BOOST_PROCESS +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg <thomas@randspringer.de> +# Copyright (c) 2008 Michael Tindal +# Copyright (c) 2008 Daniel Casimiro <dan.casimiro@gmail.com> +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 2 + +AC_DEFUN([AX_BOOST_PROCESS], +[ + AC_ARG_WITH([boost-process], + AS_HELP_STRING([--with-boost-process@<:@=special-lib@:>@], + [use the Process library from boost - it is possible to specify a certain library for the linker + e.g. --with-boost-process=boost_process-gcc-mt ]), + [ + if test "$withval" = "no"; then + want_boost_process="no" + elif test "$withval" = "yes"; then + want_boost_process="yes" + ax_boost_user_process_lib="" + else + want_boost_process="yes" + ax_boost_user_process_lib="$withval" + fi + ], + [want_boost_process="yes"] + ) + + if test "x$want_boost_process" = "xyes"; then + AC_REQUIRE([AC_PROG_CC]) + AC_REQUIRE([AC_CANONICAL_BUILD]) + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + AC_CACHE_CHECK(whether the Boost::Process library is available, + ax_cv_boost_process, + [AC_LANG_PUSH([C++]) + CXXFLAGS_SAVE=$CXXFLAGS + CXXFLAGS= + + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include <boost/process.hpp>]], + [[boost::process::child* child = new boost::process::child; delete child;]])], + ax_cv_boost_process=yes, ax_cv_boost_process=no) + CXXFLAGS=$CXXFLAGS_SAVE + AC_LANG_POP([C++]) + ]) + if test "x$ax_cv_boost_process" = "xyes"; then + AC_SUBST(BOOST_CPPFLAGS) + + AC_DEFINE(HAVE_BOOST_PROCESS,,[define if the Boost::Process library is available]) + BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` + + LDFLAGS_SAVE=$LDFLAGS + if test "x$ax_boost_user_process_lib" = "x"; then + for libextension in `ls -r $BOOSTLIBDIR/libboost_process* 2>/dev/null | sed 's,.*/lib,,' | sed 's,\..*,,'` ; do + ax_lib=${libextension} + AC_CHECK_LIB($ax_lib, exit, + [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break], + [link_process="no"]) + done + if test "x$link_process" != "xyes"; then + for libextension in `ls -r $BOOSTLIBDIR/boost_process* 2>/dev/null | sed 's,.*/,,' | sed -e 's,\..*,,'` ; do + ax_lib=${libextension} + AC_CHECK_LIB($ax_lib, exit, + [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break], + [link_process="no"]) + done + fi + + else + for ax_lib in $ax_boost_user_process_lib boost_process-$ax_boost_user_process_lib; do + AC_CHECK_LIB($ax_lib, exit, + [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break], + [link_process="no"]) + done + + fi + if test "x$ax_lib" = "x"; then + AC_MSG_ERROR(Could not find a version of the Boost::Process library!) + fi + if test "x$link_process" = "xno"; then + AC_MSG_ERROR(Could not link against $ax_lib !) + fi + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + fi +]) diff --git a/build_msvc/bitcoin_config.h b/build_msvc/bitcoin_config.h index 35ba8425b3..9d0b50a0b4 100644 --- a/build_msvc/bitcoin_config.h +++ b/build_msvc/bitcoin_config.h @@ -47,6 +47,9 @@ /* define if the Boost::Filesystem library is available */ #define HAVE_BOOST_FILESYSTEM /**/ +/* define if the Boost::Process library is available */ +#define HAVE_BOOST_PROCESS /**/ + /* define if the Boost::System library is available */ #define HAVE_BOOST_SYSTEM /**/ @@ -137,18 +140,6 @@ don't. */ #define HAVE_DECL_STRNLEN 1 -/* Define to 1 if you have the declaration of `__builtin_clz', and to 0 if you - don't. */ -//#define HAVE_DECL___BUILTIN_CLZ 1 - -/* Define to 1 if you have the declaration of `__builtin_clzl', and to 0 if - you don't. */ -//#define HAVE_DECL___BUILTIN_CLZL 1 - -/* Define to 1 if you have the declaration of `__builtin_clzll', and to 0 if - you don't. */ -//#define HAVE_DECL___BUILTIN_CLZLL 1 - /* Define to 1 if you have the <dlfcn.h> header file. */ /* #undef HAVE_DLFCN_H */ diff --git a/build_msvc/common.init.vcxproj b/build_msvc/common.init.vcxproj index c09997d39d..4fd516fff5 100644 --- a/build_msvc/common.init.vcxproj +++ b/build_msvc/common.init.vcxproj @@ -107,10 +107,10 @@ <ClCompile> <WarningLevel>Level3</WarningLevel> <PrecompiledHeader>NotUsing</PrecompiledHeader> - <AdditionalOptions>/utf-8 %(AdditionalOptions)</AdditionalOptions> + <AdditionalOptions>/utf-8 /std:c++17 %(AdditionalOptions)</AdditionalOptions> <DisableSpecificWarnings>4018;4221;4244;4267;4334;4715;4805;4834</DisableSpecificWarnings> <TreatWarningAsError>true</TreatWarningAsError> - <PreprocessorDefinitions>ZMQ_STATIC;NOMINMAX;WIN32;HAVE_CONFIG_H;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;_CONSOLE;_WIN32_WINNT=0x0601;%(PreprocessorDefinitions)</PreprocessorDefinitions> + <PreprocessorDefinitions>_SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING;_SILENCE_CXX17_OLD_ALLOCATOR_MEMBERS_DEPRECATION_WARNING;ZMQ_STATIC;NOMINMAX;WIN32;HAVE_CONFIG_H;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;_CONSOLE;_WIN32_WINNT=0x0601;%(PreprocessorDefinitions)</PreprocessorDefinitions> <AdditionalIncludeDirectories>..\..\src;..\..\src\univalue\include;..\..\src\secp256k1\include;..\..\src\leveldb\include;..\..\src\leveldb\helpers\memenv;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> </ClCompile> <Link> diff --git a/build_msvc/vcpkg-packages.txt b/build_msvc/vcpkg-packages.txt index 307f295f08..edce8576c3 100644 --- a/build_msvc/vcpkg-packages.txt +++ b/build_msvc/vcpkg-packages.txt @@ -1 +1 @@ -berkeleydb boost-filesystem boost-multi-index boost-signals2 boost-test boost-thread libevent[thread] zeromq double-conversion
\ No newline at end of file +berkeleydb boost-filesystem boost-multi-index boost-process boost-signals2 boost-test boost-thread libevent[thread] zeromq double-conversion diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh index 80d0df4a78..fae424051d 100755 --- a/ci/lint/04_install.sh +++ b/ci/lint/04_install.sh @@ -6,6 +6,10 @@ export LC_ALL=C +travis_retry sudo apt update && sudo apt install -y clang-format-9 +sudo update-alternatives --install /usr/bin/clang-format clang-format $(which clang-format-9 ) 100 +sudo update-alternatives --install /usr/bin/clang-format-diff clang-format-diff $(which clang-format-diff-9) 100 + travis_retry pip3 install codespell==1.17.1 travis_retry pip3 install flake8==3.8.3 travis_retry pip3 install yq diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh index 711f436630..2413cfca9f 100755 --- a/ci/test/00_setup_env.sh +++ b/ci/test/00_setup_env.sh @@ -31,11 +31,14 @@ export BASE_SCRATCH_DIR=${BASE_SCRATCH_DIR:-$BASE_ROOT_DIR/ci/scratch} export HOST=${HOST:-$("$BASE_ROOT_DIR/depends/config.guess")} # Whether to prefer BusyBox over GNU utilities export USE_BUSY_BOX=${USE_BUSY_BOX:-false} + export RUN_UNIT_TESTS=${RUN_UNIT_TESTS:-true} export RUN_FUNCTIONAL_TESTS=${RUN_FUNCTIONAL_TESTS:-true} export RUN_SECURITY_TESTS=${RUN_SECURITY_TESTS:-false} export TEST_RUNNER_ENV=${TEST_RUNNER_ENV:-} export RUN_FUZZ_TESTS=${RUN_FUZZ_TESTS:-false} +export EXPECTED_TESTS_DURATION_IN_SECONDS=${EXPECTED_TESTS_DURATION_IN_SECONDS:-1000} + export CONTAINER_NAME=${CONTAINER_NAME:-ci_unnamed} export DOCKER_NAME_TAG=${DOCKER_NAME_TAG:-ubuntu:18.04} # Randomize test order. diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh index b70a581532..2e445c126d 100644 --- a/ci/test/00_setup_env_arm.sh +++ b/ci/test/00_setup_env_arm.sh @@ -25,4 +25,4 @@ export RUN_FUNCTIONAL_TESTS=true export GOAL="install" # -Wno-psabi is to disable ABI warnings: "note: parameter passing for argument of type ... changed in GCC 7.1" # This could be removed once the ABI change warning does not show up by default -export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi --enable-werror" +export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi --enable-werror --with-boost-process" diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh index 5688799f9e..e58003ab19 100644 --- a/ci/test/00_setup_env_i686_centos.sh +++ b/ci/test/00_setup_env_i686_centos.sh @@ -11,5 +11,5 @@ export CONTAINER_NAME=ci_i686_centos_7 export DOCKER_NAME_TAG=centos:7 export DOCKER_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python3 python36-zmq which patch lbzip2 dash" export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports" +export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports --with-boost-process" export CONFIG_SHELL="/bin/dash" diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh index 45a29928cb..b62f1603f4 100644 --- a/ci/test/00_setup_env_mac.sh +++ b/ci/test/00_setup_env_mac.sh @@ -14,4 +14,4 @@ export XCODE_BUILD_ID=11C505 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export GOAL="deploy" -export BITCOIN_CONFIG="--enable-gui --enable-reduce-exports --enable-werror" +export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror --with-boost-process" diff --git a/ci/test/00_setup_env_mac_host.sh b/ci/test/00_setup_env_mac_host.sh index f50efcc33a..5fb127b762 100644 --- a/ci/test/00_setup_env_mac_host.sh +++ b/ci/test/00_setup_env_mac_host.sh @@ -10,9 +10,13 @@ export HOST=x86_64-apple-darwin16 export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic can cross-compile to macos (bionic is used in the gitian build as well) export PIP_PACKAGES="zmq" export GOAL="install" -export BITCOIN_CONFIG="--enable-gui --enable-reduce-exports --enable-werror" -export TEST_RUNNER_EXTRA="wallet_disable" # Only run wallet_disable as a smoke test, see https://github.com/bitcoin/bitcoin/pull/17240#issuecomment-546022121 why the other tests are disabled -export RUN_SECURITY_TESTS="true" -# Run without depends +export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror --with-boost-process" export NO_DEPENDS=1 export OSX_SDK="" +export CCACHE_SIZE=300M + +export RUN_SECURITY_TESTS="true" +if [ "$TRAVIS_REPO_SLUG" != "bitcoin/bitcoin" ]; then + export RUN_FUNCTIONAL_TESTS="false" + export EXPECTED_TESTS_DURATION_IN_SECONDS=200 +fi diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh index 225f77df92..5995964f17 100644 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -10,5 +10,6 @@ export CONTAINER_NAME=ci_native_asan export PACKAGES="clang llvm python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev" export DOCKER_NAME_TAG=ubuntu:20.04 export NO_DEPENDS=1 +export TEST_RUNNER_EXTRA="--timeout-factor=4" # Increase timeout because sanitizers slow down export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang CXX=clang++" +export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang CXX=clang++ --with-boost-process" diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh index 43ee219ef9..a32de4a6b5 100644 --- a/ci/test/00_setup_env_native_fuzz.sh +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -14,4 +14,5 @@ export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export RUN_FUZZ_TESTS=true export GOAL="install" -export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=clang CXX=clang++" +export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=clang CXX=clang++ --with-boost-process" +export CCACHE_SIZE=200M diff --git a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh index c27d525003..e06a40eb23 100644 --- a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh +++ b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh @@ -16,3 +16,4 @@ export RUN_FUZZ_TESTS=true export FUZZ_TESTS_CONFIG="--valgrind" export GOAL="install" export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer CC=clang CXX=clang++" +export CCACHE_SIZE=200M diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh new file mode 100644 index 0000000000..6a4979990b --- /dev/null +++ b/ci/test/00_setup_env_native_msan.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export DOCKER_NAME_TAG="ubuntu:20.04" +LIBCXX_DIR="${BASE_ROOT_DIR}/ci/scratch/msan/build/" +export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" +LIBCXX_FLAGS="-nostdinc++ -stdlib=libc++ -L${LIBCXX_DIR}lib -lc++abi -I${LIBCXX_DIR}include -I${LIBCXX_DIR}include/c++/v1 -lpthread -Wl,-rpath,${LIBCXX_DIR}lib -Wno-unused-command-line-argument" +export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" +export BDB_PREFIX="${BASE_ROOT_DIR}/db4" + +export CONTAINER_NAME="ci_native_msan" +export PACKAGES="clang-9 llvm-9 cmake" +export DEP_OPTS="NO_WALLET=1 NO_QT=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' boost_cxxflags='-std=c++11 -fvisibility=hidden -fPIC ${MSAN_AND_LIBCXX_FLAGS}' zeromq_cxxflags='-std=c++11 ${MSAN_AND_LIBCXX_FLAGS}'" +export GOAL="install" +export BITCOIN_CONFIG="--enable-wallet --with-sanitizers=memory --with-asm=no --prefix=${BASE_ROOT_DIR}/depends/x86_64-pc-linux-gnu/ CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' BDB_LIBS='-L${BDB_PREFIX}/lib -ldb_cxx-4.8' BDB_CFLAGS='-I${BDB_PREFIX}/include'" +export USE_MEMORY_SANITIZER="true" +export RUN_FUNCTIONAL_TESTS="false" +export CCACHE_SIZE=250M diff --git a/ci/test/00_setup_env_native_multiprocess.sh b/ci/test/00_setup_env_native_multiprocess.sh index 786f0f927f..522a5d9fc2 100644 --- a/ci/test/00_setup_env_native_multiprocess.sh +++ b/ci/test/00_setup_env_native_multiprocess.sh @@ -11,5 +11,5 @@ export DOCKER_NAME_TAG=ubuntu:20.04 export PACKAGES="cmake python3" export DEP_OPTS="MULTIPROCESS=1" export GOAL="install" -export BITCOIN_CONFIG="" +export BITCOIN_CONFIG="--with-boost-process" export TEST_RUNNER_ENV="BITCOIND=bitcoin-node" diff --git a/ci/test/00_setup_env_native_nowallet.sh b/ci/test/00_setup_env_native_nowallet.sh index 553dab1491..0a09bfe230 100644 --- a/ci/test/00_setup_env_native_nowallet.sh +++ b/ci/test/00_setup_env_native_nowallet.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_nowallet export DOCKER_NAME_TAG=ubuntu:16.04 # Use xenial to have one config run the tests in python3.5, see doc/dependencies.md -export PACKAGES="python3-zmq" +export PACKAGES="python3-zmq clang-3.8 llvm-3.8" # Use clang-3.8 to test C++11 compatibility, see doc/dependencies.md export DEP_OPTS="NO_WALLET=1" export GOAL="install" -export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports" +export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CC=clang-3.8 CXX=clang++-3.8 --with-boost-process" diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh index 6e2ff729a2..f9d869b4fd 100644 --- a/ci/test/00_setup_env_native_qt5.sh +++ b/ci/test/00_setup_env_native_qt5.sh @@ -16,4 +16,4 @@ export RUN_UNIT_TESTS_SEQUENTIAL="true" export RUN_UNIT_TESTS="false" export GOAL="install" export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.2 v0.16.3 v0.17.1 v0.18.1 v0.19.1" -export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-c++17 --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\"" +export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-c++17 --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\" --with-boost-process" diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index 87b9d9da95..fc18483425 100644 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -10,5 +10,6 @@ export CONTAINER_NAME=ci_native_tsan export DOCKER_NAME_TAG=ubuntu:20.04 export PACKAGES="clang llvm libc++abi-dev libc++-dev python3-zmq" export DEP_OPTS="CC=clang CXX='clang++ -stdlib=libc++'" +export TEST_RUNNER_EXTRA="--exclude feature_block --timeout-factor=4" # Increase timeout because sanitizers slow down. Low memory on Travis machines, exclude feature_block. export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --with-gui=no CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=thread CC=clang CXX='clang++ -stdlib=libc++'" +export BITCOIN_CONFIG="--enable-zmq --with-gui=no CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' CXXFLAGS='-g' --with-sanitizers=thread CC=clang CXX='clang++ -stdlib=libc++' --with-boost-process" diff --git a/ci/test/00_setup_env_native_valgrind.sh b/ci/test/00_setup_env_native_valgrind.sh index ff7c5fe913..0041122f1e 100644 --- a/ci/test/00_setup_env_native_valgrind.sh +++ b/ci/test/00_setup_env_native_valgrind.sh @@ -10,6 +10,6 @@ export CONTAINER_NAME=ci_native_valgrind export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev" export USE_VALGRIND=1 export NO_DEPENDS=1 -export TEST_RUNNER_EXTRA="--exclude rpc_bind --factor=2" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 +export TEST_RUNNER_EXTRA="--exclude rpc_bind --timeout-factor=4" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 export GOAL="install" export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=no CC=clang CXX=clang++" # TODO enable GUI diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh index c180d023de..fe330920d0 100644 --- a/ci/test/00_setup_env_s390x.sh +++ b/ci/test/00_setup_env_s390x.sh @@ -22,4 +22,4 @@ export DOCKER_NAME_TAG="debian:buster" export RUN_UNIT_TESTS=true export RUN_FUNCTIONAL_TESTS=true export GOAL="install" -export BITCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb" +export BITCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb --with-boost-process" diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh index eb8b870dd6..2b351dff6d 100644 --- a/ci/test/00_setup_env_win64.sh +++ b/ci/test/00_setup_env_win64.sh @@ -13,4 +13,4 @@ export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64" export RUN_FUNCTIONAL_TESTS=false export RUN_SECURITY_TESTS="true" export GOAL="deploy" -export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests" +export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests --without-boost-process" diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh index 165983d906..d3566914ac 100755 --- a/ci/test/04_install.sh +++ b/ci/test/04_install.sh @@ -14,7 +14,6 @@ if [[ $QEMU_USER_CMD == qemu-s390* ]]; then fi if [ "$TRAVIS_OS_NAME" == "osx" ]; then - export PATH="/usr/local/opt/ccache/libexec:$PATH" ${CI_RETRY_EXE} pip3 install $PIP_PACKAGES fi @@ -90,6 +89,15 @@ export DIR_FUZZ_IN=${DIR_QA_ASSETS}/fuzz_seed_corpus/ DOCKER_EXEC mkdir -p "${BASE_SCRATCH_DIR}/sanitizer-output/" +if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then + DOCKER_EXEC "update-alternatives --install /usr/bin/clang++ clang++ \$(which clang++-9) 100" + DOCKER_EXEC "update-alternatives --install /usr/bin/clang clang \$(which clang-9) 100" + DOCKER_EXEC "mkdir -p ${BASE_SCRATCH_DIR}/msan/build/" + DOCKER_EXEC "git clone --depth=1 https://github.com/llvm/llvm-project -b llvmorg-10.0.0 ${BASE_SCRATCH_DIR}/msan/llvm-project" + DOCKER_EXEC "cd ${BASE_SCRATCH_DIR}/msan/build/ && cmake -DLLVM_ENABLE_PROJECTS='libcxx;libcxxabi' -DCMAKE_BUILD_TYPE=Release -DLLVM_USE_SANITIZER=Memory -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DLLVM_TARGETS_TO_BUILD=X86 ../llvm-project/llvm/" + DOCKER_EXEC "cd ${BASE_SCRATCH_DIR}/msan/build/ && make $MAKEJOBS cxx" +fi + if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then echo "Create $BASE_ROOT_DIR" DOCKER_EXEC rsync -a /ro_base/ $BASE_ROOT_DIR diff --git a/ci/test/05_before_script.sh b/ci/test/05_before_script.sh index de33881419..131ea21677 100755 --- a/ci/test/05_before_script.sh +++ b/ci/test/05_before_script.sh @@ -21,6 +21,14 @@ OSX_SDK_PATH="${DEPENDS_DIR}/sdk-sources/${OSX_SDK_BASENAME}" if [ -n "$XCODE_VERSION" ] && [ ! -f "$OSX_SDK_PATH" ]; then curl --location --fail "${SDK_URL}/${OSX_SDK_BASENAME}" -o "$OSX_SDK_PATH" fi + +if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then + # Use BDB compiled using install_db4.sh script to work around linking issue when using BDB + # from depends. See https://github.com/bitcoin/bitcoin/pull/18288#discussion_r433189350 for + # details. + DOCKER_EXEC "contrib/install_db4.sh \$(pwd) --enable-umrw CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" +fi + if [ -n "$XCODE_VERSION" ] && [ -f "$OSX_SDK_PATH" ]; then DOCKER_EXEC tar -C "${DEPENDS_DIR}/SDKs" -xf "$OSX_SDK_PATH" fi @@ -40,6 +48,6 @@ if [ -z "$NO_DEPENDS" ]; then fi if [ -n "$PREVIOUS_RELEASES_TO_DOWNLOAD" ]; then BEGIN_FOLD previous-versions - DOCKER_EXEC contrib/devtools/previous_release.sh -b -t "$PREVIOUS_RELEASES_DIR" "${PREVIOUS_RELEASES_TO_DOWNLOAD}" + DOCKER_EXEC contrib/devtools/previous_release.py -b -t "$PREVIOUS_RELEASES_DIR" "${PREVIOUS_RELEASES_TO_DOWNLOAD}" END_FOLD fi diff --git a/ci/test/06_script_a.sh b/ci/test/06_script_a.sh index b68cd9d3f8..17d765b862 100755 --- a/ci/test/06_script_a.sh +++ b/ci/test/06_script_a.sh @@ -37,6 +37,14 @@ END_FOLD set -o errtrace trap 'DOCKER_EXEC "cat ${BASE_SCRATCH_DIR}/sanitizer-output/* 2> /dev/null"' ERR +if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then + # MemorySanitizer (MSAN) does not support tracking memory initialization done by + # using the Linux getrandom syscall. Avoid using getrandom by undefining + # HAVE_SYS_GETRANDOM. See https://github.com/google/sanitizers/issues/852 for + # details. + DOCKER_EXEC 'grep -v HAVE_SYS_GETRANDOM src/config/bitcoin-config.h > src/config/bitcoin-config.h.tmp && mv src/config/bitcoin-config.h.tmp src/config/bitcoin-config.h' +fi + BEGIN_FOLD build DOCKER_EXEC make $MAKEJOBS $GOAL || ( echo "Build failure. Verbose build follows." && DOCKER_EXEC make $GOAL V=1 ; false ) END_FOLD diff --git a/configure.ac b/configure.ac index 12bece6903..2acd702600 100644 --- a/configure.ac +++ b/configure.ac @@ -14,6 +14,12 @@ AC_CONFIG_HEADERS([src/config/bitcoin-config.h]) AC_CONFIG_AUX_DIR([build-aux]) AC_CONFIG_MACRO_DIR([build-aux/m4]) +m4_ifndef([PKG_PROG_PKG_CONFIG], [AC_MSG_ERROR([PKG_PROG_PKG_CONFIG macro not found. Please install pkg-config and re-run autogen.sh])]) +PKG_PROG_PKG_CONFIG +if test "x$PKG_CONFIG" = x; then + AC_MSG_ERROR([pkg-config not found]) +fi + BITCOIN_DAEMON_NAME=bitcoind BITCOIN_GUI_NAME=bitcoin-qt BITCOIN_CLI_NAME=bitcoin-cli @@ -99,6 +105,7 @@ AC_PATH_TOOL(AR, ar) AC_PATH_TOOL(RANLIB, ranlib) AC_PATH_TOOL(STRIP, strip) AC_PATH_TOOL(GCOV, gcov) +AC_PATH_TOOL(LLVM_COV, llvm-cov) AC_PATH_PROG(LCOV, lcov) dnl Python 3.5 is specified in .python-version and should be used if available, see doc/dependencies.md AC_PATH_PROGS([PYTHON], [python3.5 python3.6 python3.7 python3.8 python3 python]) @@ -229,16 +236,6 @@ AC_ARG_ENABLE([zmq], [use_zmq=$enableval], [use_zmq=yes]) -AC_ARG_ENABLE([bip70], - [AS_HELP_STRING([--enable-bip70], - [BIP70 (payment protocol) support in the GUI (no longer supported)])], - [enable_bip70=$enableval], - [enable_bip70=no]) - -if test x$enable_bip70 != xno; then - AC_MSG_ERROR([BIP70 is no longer supported!]) -fi - AC_ARG_WITH([libmultiprocess], [AS_HELP_STRING([--with-libmultiprocess=yes|no|auto], [Build with libmultiprocess library. (default: auto, i.e. detect with pkg-config)])], @@ -396,11 +393,11 @@ if test "x$CXXFLAGS_overridden" = "xno"; then AX_CHECK_COMPILE_FLAG([-Wall],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wall"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wextra],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wextra"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wgnu],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wgnu"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wformat],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wformat"],,[[$CXXFLAG_WERROR]]) + dnl some compilers will ignore -Wformat-security without -Wformat, so just combine the two here. + AX_CHECK_COMPILE_FLAG([-Wformat -Wformat-security],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wformat -Wformat-security"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wvla],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wvla"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wshadow-field],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wshadow-field"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wswitch],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wswitch"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wformat-security],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wformat-security"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wthread-safety],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wthread-safety"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wrange-loop-analysis],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wrange-loop-analysis"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wredundant-decls],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wredundant-decls"],,[[$CXXFLAG_WERROR]]) @@ -570,13 +567,8 @@ AC_ARG_WITH([daemon], [build_bitcoind=$withval], [build_bitcoind=yes]) -use_pkgconfig=yes case $host in *mingw*) - - dnl pkgconfig does more harm than good with MinGW - use_pkgconfig=no - TARGET_OS=windows AC_CHECK_LIB([kernel32], [GetModuleFileNameA],, AC_MSG_ERROR(libkernel32 missing)) AC_CHECK_LIB([user32], [main],, AC_MSG_ERROR(libuser32 missing)) @@ -635,9 +627,10 @@ case $host in bdb_prefix=$($BREW --prefix berkeley-db4 2>/dev/null) qt5_prefix=$($BREW --prefix qt5 2>/dev/null) - if test x$bdb_prefix != x; then - CPPFLAGS="$CPPFLAGS -I$bdb_prefix/include" - LIBS="$LIBS -L$bdb_prefix/lib" + if test x$bdb_prefix != x && test "x$BDB_CFLAGS" = "x" && test "x$BDB_LIBS" = "x"; then + dnl This must precede the call to BITCOIN_FIND_BDB48 below. + BDB_CFLAGS="-I$bdb_prefix/include" + BDB_LIBS="-L$bdb_prefix/lib -ldb_cxx-4.8" fi if test x$qt5_prefix != x; then PKG_CONFIG_PATH="$qt5_prefix/lib/pkgconfig:$PKG_CONFIG_PATH" @@ -650,6 +643,7 @@ case $host in BUILD_OS=darwin ;; *) + AC_PATH_TOOL([DSYMUTIL], [dsymutil], dsymutil) AC_PATH_TOOL([INSTALLNAMETOOL], [install_name_tool], install_name_tool) AC_PATH_TOOL([OTOOL], [otool], otool) AC_PATH_PROGS([GENISOIMAGE], [genisoimage mkisofs],genisoimage) @@ -679,16 +673,6 @@ case $host in ;; esac -if test x$use_pkgconfig = xyes; then - m4_ifndef([PKG_PROG_PKG_CONFIG], [AC_MSG_ERROR(PKG_PROG_PKG_CONFIG macro not found. Please install pkg-config and re-run autogen.sh.)]) - m4_ifdef([PKG_PROG_PKG_CONFIG], [ - PKG_PROG_PKG_CONFIG - if test x"$PKG_CONFIG" = "x"; then - AC_MSG_ERROR(pkg-config not found.) - fi - ]) -fi - if test x$use_extended_functional_tests != xno; then AC_SUBST(EXTENDED_FUNCTIONAL_TESTS, --extended) fi @@ -697,16 +681,37 @@ if test x$use_lcov = xyes; then if test x$LCOV = x; then AC_MSG_ERROR("lcov testing requested but lcov not found") fi - if test x$GCOV = x; then - AC_MSG_ERROR("lcov testing requested but gcov not found") - fi if test x$PYTHON = x; then AC_MSG_ERROR("lcov testing requested but python not found") fi if test x$GENHTML = x; then AC_MSG_ERROR("lcov testing requested but genhtml not found") fi - LCOV="$LCOV --gcov-tool=$GCOV" + + AC_MSG_CHECKING([whether compiler is Clang]) + AC_PREPROC_IFELSE([AC_LANG_SOURCE([[ + #if defined(__clang__) && defined(__llvm__) + // Compiler is Clang + #else + # error Compiler is not Clang + #endif + ]])],[ + AC_MSG_RESULT([yes]) + if test x$LLVM_COV = x; then + AC_MSG_ERROR([lcov testing requested but llvm-cov not found]) + fi + COV_TOOL="$LLVM_COV gcov" + ],[ + AC_MSG_RESULT([no]) + if test x$GCOV = x; then + AC_MSG_ERROR([lcov testing requested but gcov not found]) + fi + COV_TOOL="$GCOV" + ]) + AC_SUBST(COV_TOOL) + AC_SUBST(COV_TOOL_WRAPPER, "cov_tool_wrapper.sh") + LCOV="$LCOV --gcov-tool $(pwd)/$COV_TOOL_WRAPPER" + AX_CHECK_LINK_FLAG([[--coverage]], [LDFLAGS="$LDFLAGS --coverage"], [AC_MSG_ERROR("lcov testing requested but --coverage linker flag does not work")]) AX_CHECK_COMPILE_FLAG([--coverage],[CXXFLAGS="$CXXFLAGS --coverage"], @@ -802,6 +807,7 @@ if test x$use_hardening != xno; then AX_CHECK_LINK_FLAG([[-Wl,--high-entropy-va]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--high-entropy-va"],, [[$LDFLAG_WERROR]]) AX_CHECK_LINK_FLAG([[-Wl,-z,relro]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,relro"],, [[$LDFLAG_WERROR]]) AX_CHECK_LINK_FLAG([[-Wl,-z,now]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,now"],, [[$LDFLAG_WERROR]]) + AX_CHECK_LINK_FLAG([[-Wl,-z,separate-code]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,separate-code"],, [[$LDFLAG_WERROR]]) AX_CHECK_LINK_FLAG([[-fPIE -pie]], [PIE_FLAGS="-fPIE"; HARDENED_LDFLAGS="$HARDENED_LDFLAGS -pie"],, [[$CXXFLAG_WERROR]]) case $host in @@ -849,7 +855,21 @@ AC_CHECK_DECLS([bswap_16, bswap_32, bswap_64],,, #include <byteswap.h> #endif]) -AC_CHECK_DECLS([__builtin_clz, __builtin_clzl, __builtin_clzll]) +AC_MSG_CHECKING(for __builtin_clzl) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ + (void) __builtin_clzl(0); + ]])], + [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_BUILTIN_CLZL, 1, [Define this symbol if you have __builtin_clzl])], + [ AC_MSG_RESULT(no)] +) + +AC_MSG_CHECKING(for __builtin_clzll) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ + (void) __builtin_clzll(0); + ]])], + [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_BUILTIN_CLZLL, 1, [Define this symbol if you have __builtin_clzll])], + [ AC_MSG_RESULT(no)] +) dnl Check for malloc_info (for memory statistics information in getmemoryinfo) AC_MSG_CHECKING(for getmemoryinfo) @@ -1182,9 +1202,9 @@ fi if test x$use_boost = xyes; then dnl Minimum required Boost version -define(MINIMUM_REQUIRED_BOOST, 1.47.0) +define(MINIMUM_REQUIRED_BOOST, 1.58.0) -dnl Check for boost libs +dnl Check for Boost libs AX_BOOST_BASE([MINIMUM_REQUIRED_BOOST]) if test x$want_boost = xno; then AC_MSG_ERROR([[only libbitcoinconsensus can be built without boost]]) @@ -1193,30 +1213,15 @@ AX_BOOST_SYSTEM AX_BOOST_FILESYSTEM AX_BOOST_THREAD +dnl Opt-in to boost-process +AS_IF([ test x$with_boost_process != x ], [ AX_BOOST_PROCESS ], [ ax_cv_boost_process=no ] ) + dnl Boost 1.56 through 1.62 allow using std::atomic instead of its own atomic dnl counter implementations. In 1.63 and later the std::atomic approach is default. m4_pattern_allow(DBOOST_AC_USE_STD_ATOMIC) dnl otherwise it's treated like a macro BOOST_CPPFLAGS="-DBOOST_SP_USE_STD_ATOMIC -DBOOST_AC_USE_STD_ATOMIC $BOOST_CPPFLAGS" -if test x$use_reduce_exports = xyes; then - AC_MSG_CHECKING([for working boost reduced exports]) - TEMP_CPPFLAGS="$CPPFLAGS" - CPPFLAGS="$BOOST_CPPFLAGS $CPPFLAGS" - AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[ - @%:@include <boost/version.hpp> - ]], [[ - #if BOOST_VERSION >= 104900 - // Everything is okay - #else - # error Boost version is too old - #endif - ]])],[ - AC_MSG_RESULT(yes) - ],[ - AC_MSG_ERROR([boost versions < 1.49 are known to be broken with reduced exports. Use --disable-reduce-exports.]) - ]) - CPPFLAGS="$TEMP_CPPFLAGS" -fi +BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB" fi if test x$use_reduce_exports = xyes; then @@ -1230,7 +1235,6 @@ if test x$use_tests = xyes; then AC_MSG_ERROR(hexdump is required for tests) fi - if test x$use_boost = xyes; then AX_BOOST_UNIT_TEST_FRAMEWORK @@ -1256,157 +1260,66 @@ if test x$use_tests = xyes; then fi fi -if test x$use_boost = xyes; then - -BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB" +dnl libevent check +if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench != xnonononono; then + PKG_CHECK_MODULES([EVENT], [libevent >= 2.0.21], [use_libevent=yes], [AC_MSG_ERROR([libevent version 2.0.21 or greater not found.])]) + if test x$TARGET_OS != xwindows; then + PKG_CHECK_MODULES([EVENT_PTHREADS], [libevent_pthreads >= 2.0.21],, [AC_MSG_ERROR([libevent_pthreads version 2.0.21 or greater not found.])]) + fi +fi -dnl If boost (prior to 1.57) was built without c++11, it emulated scoped enums -dnl using c++98 constructs. Unfortunately, this implementation detail leaked into -dnl the abi. This was fixed in 1.57. - -dnl When building against that installed version using c++11, the headers pick up -dnl on the native c++11 scoped enum support and enable it, however it will fail to -dnl link. This can be worked around by disabling c++11 scoped enums if linking will -dnl fail. -dnl BOOST_NO_SCOPED_ENUMS was changed to BOOST_NO_CXX11_SCOPED_ENUMS in 1.51. - -TEMP_LIBS="$LIBS" -LIBS="$BOOST_LIBS $LIBS" -TEMP_CPPFLAGS="$CPPFLAGS" -CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" -AC_MSG_CHECKING([for mismatched boost c++11 scoped enums]) -AC_LINK_IFELSE([AC_LANG_PROGRAM([[ - #include <boost/config.hpp> - #include <boost/version.hpp> - #if !defined(BOOST_NO_SCOPED_ENUMS) && !defined(BOOST_NO_CXX11_SCOPED_ENUMS) && BOOST_VERSION < 105700 - #define BOOST_NO_SCOPED_ENUMS - #define BOOST_NO_CXX11_SCOPED_ENUMS - #define CHECK - #endif - #include <boost/filesystem.hpp> - ]],[[ - #if defined(CHECK) - boost::filesystem::copy_file("foo", "bar"); - #else - choke; - #endif - ]])], - [AC_MSG_RESULT(mismatched); BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_NO_SCOPED_ENUMS -DBOOST_NO_CXX11_SCOPED_ENUMS"], [AC_MSG_RESULT(ok)]) -LIBS="$TEMP_LIBS" -CPPFLAGS="$TEMP_CPPFLAGS" +dnl QR Code encoding library check +if test "x$use_qr" != xno; then + BITCOIN_QT_CHECK([PKG_CHECK_MODULES([QR], [libqrencode], [have_qrencode=yes], [have_qrencode=no])]) fi -if test x$use_pkgconfig = xyes; then - : dnl - m4_ifdef( - [PKG_CHECK_MODULES], - [ - if test x$use_qr != xno; then - BITCOIN_QT_CHECK([PKG_CHECK_MODULES([QR], [libqrencode], [have_qrencode=yes], [have_qrencode=no])]) - fi - if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench != xnonononono; then - PKG_CHECK_MODULES([EVENT], [libevent >= 2.0.21], [use_libevent=yes], [AC_MSG_ERROR(libevent version 2.0.21 or greater not found.)]) - if test x$TARGET_OS != xwindows; then - PKG_CHECK_MODULES([EVENT_PTHREADS], [libevent_pthreads >= 2.0.21],, [AC_MSG_ERROR(libevent_pthreads version 2.0.21 or greater not found.)]) - fi - fi +dnl ZMQ check - if test "x$use_zmq" = "xyes"; then - PKG_CHECK_MODULES([ZMQ],[libzmq >= 4], - [AC_DEFINE([ENABLE_ZMQ],[1],[Define to 1 to enable ZMQ functions])], - [AC_DEFINE([ENABLE_ZMQ],[0],[Define to 1 to enable ZMQ functions]) - AC_MSG_WARN([libzmq version 4.x or greater not found, disabling]) - use_zmq=no]) - else - AC_DEFINE_UNQUOTED([ENABLE_ZMQ],[0],[Define to 1 to enable ZMQ functions]) - fi - ] - ) +if test "x$use_zmq" = xyes; then + PKG_CHECK_MODULES([ZMQ], [libzmq >= 4], + AC_DEFINE([ENABLE_ZMQ], [1], [Define to 1 to enable ZMQ functions]), + [AC_DEFINE([ENABLE_ZMQ], [0], [Define to 1 to enable ZMQ functions]) + AC_MSG_WARN([libzmq version 4.x or greater not found, disabling]) + use_zmq=no]) else + AC_DEFINE_UNQUOTED([ENABLE_ZMQ], [0], [Define to 1 to enable ZMQ functions]) +fi - if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench != xnonononono; then - AC_CHECK_HEADER([event2/event.h], [use_libevent=yes], AC_MSG_ERROR(libevent headers missing),) - AC_CHECK_LIB([event],[main],EVENT_LIBS=-levent,AC_MSG_ERROR(libevent missing)) - if test x$TARGET_OS != xwindows; then - AC_CHECK_LIB([event_pthreads],[main],EVENT_PTHREADS_LIBS=-levent_pthreads,AC_MSG_ERROR(libevent_pthreads missing)) - fi - fi - - if test "x$use_zmq" = "xyes"; then - AC_CHECK_HEADER([zmq.h], - [AC_DEFINE([ENABLE_ZMQ],[1],[Define to 1 to enable ZMQ functions])], - [AC_MSG_WARN([zmq.h not found, disabling zmq support]) - use_zmq=no - AC_DEFINE([ENABLE_ZMQ],[0],[Define to 1 to enable ZMQ functions])]) - AC_CHECK_LIB([zmq],[zmq_ctx_shutdown],ZMQ_LIBS=-lzmq, - [AC_MSG_WARN([libzmq >= 4.0 not found, disabling zmq support]) - use_zmq=no - AC_DEFINE([ENABLE_ZMQ],[0],[Define to 1 to enable ZMQ functions])]) - else - AC_DEFINE_UNQUOTED([ENABLE_ZMQ],[0],[Define to 1 to enable ZMQ functions]) - fi - - if test "x$use_zmq" = "xyes"; then - dnl Assume libzmq was built for static linking - case $host in - *mingw*) - ZMQ_CFLAGS="$ZMQ_CFLAGS -DZMQ_STATIC" - ;; - esac - fi - - if test x$use_qr != xno; then - BITCOIN_QT_CHECK([AC_CHECK_LIB([qrencode], [main],[QR_LIBS=-lqrencode], [have_qrencode=no])]) - BITCOIN_QT_CHECK([AC_CHECK_HEADER([qrencode.h],, have_qrencode=no)]) - fi +if test "x$use_zmq" = xyes; then + dnl Assume libzmq was built for static linking + case $host in + *mingw*) + ZMQ_CFLAGS="$ZMQ_CFLAGS -DZMQ_STATIC" + ;; + esac fi dnl univalue check need_bundled_univalue=yes - if test x$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench = xnonononononono; then need_bundled_univalue=no else - -if test x$system_univalue != xno ; then - found_univalue=no - if test x$use_pkgconfig = xyes; then - : #NOP - m4_ifdef( - [PKG_CHECK_MODULES], - [ - PKG_CHECK_MODULES([UNIVALUE],[libunivalue >= 1.0.4],[found_univalue=yes],[true]) - ] - ) - else - AC_CHECK_HEADER([univalue.h],[ - AC_CHECK_LIB([univalue], [main],[ - UNIVALUE_LIBS=-lunivalue - found_univalue=yes - ],[true]) - ],[true]) + if test x$system_univalue != xno; then + PKG_CHECK_MODULES([UNIVALUE], [libunivalue >= 1.0.4], [found_univalue=yes], [found_univalue=no]) + if test x$found_univalue = xyes; then + system_univalue=yes + need_bundled_univalue=no + elif test x$system_univalue = xyes; then + AC_MSG_ERROR([univalue not found]) + else + system_univalue=no + fi fi - if test x$found_univalue = xyes ; then - system_univalue=yes - need_bundled_univalue=no - elif test x$system_univalue = xyes ; then - AC_MSG_ERROR([univalue not found]) - else - system_univalue=no + if test x$need_bundled_univalue = xyes; then + UNIVALUE_CFLAGS='-I$(srcdir)/univalue/include' + UNIVALUE_LIBS='univalue/libunivalue.la' fi fi -if test x$need_bundled_univalue = xyes ; then - UNIVALUE_CFLAGS='-I$(srcdir)/univalue/include' - UNIVALUE_LIBS='univalue/libunivalue.la' -fi - -fi - AM_CONDITIONAL([EMBEDDED_UNIVALUE],[test x$need_bundled_univalue = xyes]) AC_SUBST(UNIVALUE_CFLAGS) AC_SUBST(UNIVALUE_LIBS) @@ -1415,12 +1328,10 @@ dnl libmultiprocess library check libmultiprocess_found=no if test "x$with_libmultiprocess" = xyes || test "x$with_libmultiprocess" = xauto; then - if test "x$use_pkgconfig" = xyes; then - m4_ifdef([PKG_CHECK_MODULES], [PKG_CHECK_MODULES([LIBMULTIPROCESS], [libmultiprocess], [ - libmultiprocess_found=yes; - libmultiprocess_prefix=`$PKG_CONFIG --variable=prefix libmultiprocess`; - ], [true])]) - fi + m4_ifdef([PKG_CHECK_MODULES], [PKG_CHECK_MODULES([LIBMULTIPROCESS], [libmultiprocess], [ + libmultiprocess_found=yes; + libmultiprocess_prefix=`$PKG_CONFIG --variable=prefix libmultiprocess`; + ], [true])]) elif test "x$with_libmultiprocess" != xno; then AC_MSG_ERROR([--with-libmultiprocess=$with_libmultiprocess value is not yes, auto, or no]) fi @@ -1736,6 +1647,7 @@ esac echo echo "Options used to compile and link:" +echo " boost process = $ax_cv_boost_process" echo " multiprocess = $build_multiprocess" echo " with wallet = $enable_wallet" echo " with gui / qt = $bitcoin_enable_qt" diff --git a/contrib/devtools/copyright_header.py b/contrib/devtools/copyright_header.py index 084914f11a..9a555c70bb 100755 --- a/contrib/devtools/copyright_header.py +++ b/contrib/devtools/copyright_header.py @@ -22,6 +22,7 @@ EXCLUDE = [ 'src/reverse_iterator.h', 'src/test/fuzz/FuzzedDataProvider.h', 'src/tinyformat.h', + 'src/bench/nanobench.h', 'test/functional/test_framework/bignum.py', # python init: '*__init__.py', diff --git a/contrib/devtools/previous_release.py b/contrib/devtools/previous_release.py new file mode 100755 index 0000000000..5599051cf3 --- /dev/null +++ b/contrib/devtools/previous_release.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python3 +# +# Copyright (c) 2018-2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# +# Download or build previous releases. +# Needs curl and tar to download a release, or the build dependencies when +# building a release. + +import argparse +import contextlib +from fnmatch import fnmatch +import os +from pathlib import Path +import re +import shutil +import subprocess +import sys +import hashlib + + +@contextlib.contextmanager +def pushd(new_dir) -> None: + previous_dir = os.getcwd() + os.chdir(new_dir) + try: + yield + finally: + os.chdir(previous_dir) + + +def download_binary(tag, args) -> int: + if Path(tag).is_dir(): + if not args.remove_dir: + print('Using cached {}'.format(tag)) + return 0 + shutil.rmtree(tag) + Path(tag).mkdir() + bin_path = 'bin/bitcoin-core-{}'.format(tag[1:]) + match = re.compile('v(.*)(rc[0-9]+)$').search(tag) + if match: + bin_path = 'bin/bitcoin-core-{}/test.{}'.format( + match.group(1), match.group(2)) + tarball = 'bitcoin-{tag}-{platform}.tar.gz'.format( + tag=tag[1:], platform=args.platform) + sha256Sums = "SHA256SUMS-{tag}.asc".format(tag=tag[1:]) + tarballUrl = 'https://bitcoincore.org/{bin_path}/{tarball}'.format( + bin_path=bin_path, tarball=tarball) + sha256SumsUrl = 'https://bitcoincore.org/{bin_path}/SHA256SUMS.asc'.format( + bin_path=bin_path) + + print('Fetching: {tarballUrl}'.format(tarballUrl=tarballUrl)) + print('Fetching: {sha256SumsUrl}'.format(sha256SumsUrl=sha256SumsUrl)) + + header, status = subprocess.Popen( + ['curl', '--head', tarballUrl], stdout=subprocess.PIPE).communicate() + if re.search("404 Not Found", header.decode("utf-8")): + print("Binary tag was not found") + return 1 + + curlCmds = [ + ['curl', '--remote-name', tarballUrl], + ['curl', '--output', sha256Sums, sha256SumsUrl], + ] + + for cmd in curlCmds: + ret = subprocess.run(cmd).returncode + if ret: + return ret + + hasher = hashlib.sha256() + with open(tarball, "rb") as afile: + hasher.update(afile.read()) + tarballHash = hasher.hexdigest() + tarballHash = '{} {}\n'.format(tarballHash, tarball) + with open(sha256Sums, 'r', encoding="utf-8") as afile: + shasums = afile.readlines() + + if tarballHash not in shasums: + print("Checksum did not match") + Path(tarball).unlink() + return 1 + print("Checksum matched") + + # Bitcoin Core Release Signing Keys v0.11.0+ + signingKey = "01EA5486DE18A882D4C2684590C8019E36C2E964" + + isKeyPresent = subprocess.run( + ["gpg", "--list-keys", signingKey]).returncode + if isKeyPresent: + return isKeyPresent + + isVerified = subprocess.run( + ["gpg", "--verify", sha256Sums]).returncode + if isVerified: + return isVerified + + # Extract tarball + ret = subprocess.run(['tar', '-zxf', tarball, '-C', tag, + '--strip-components=1', + 'bitcoin-{tag}'.format(tag=tag[1:])]).returncode + if ret: + return ret + + Path(tarball).unlink() + Path(sha256Sums).unlink() + return 0 + + +def build_release(tag, args) -> int: + githubUrl = "https://github.com/bitcoin/bitcoin" + if args.remove_dir: + if Path(tag).is_dir(): + shutil.rmtree(tag) + if not Path(tag).is_dir(): + # fetch new tags + subprocess.run( + ["git", "fetch", githubUrl, "--tags"]) + output = subprocess.check_output(['git', 'tag', '-l', tag]) + if not output: + print('Tag {} not found'.format(tag)) + return 1 + ret = subprocess.run([ + 'git', 'clone', githubUrl, tag + ]).returncode + if ret: + return ret + with pushd(tag): + ret = subprocess.run(['git', 'checkout', tag]).returncode + if ret: + return ret + host = args.host + if args.depends: + with pushd('depends'): + ret = subprocess.run(['make', 'NO_QT=1']).returncode + if ret: + return ret + host = os.environ.get( + 'HOST', subprocess.check_output(['./config.guess'])) + config_flags = '--prefix={pwd}/depends/{host} '.format( + pwd=os.getcwd(), + host=host) + args.config_flags + cmds = [ + './autogen.sh', + './configure {}'.format(config_flags), + 'make', + ] + for cmd in cmds: + ret = subprocess.run(cmd.split()).returncode + if ret: + return ret + # Move binaries, so they're in the same place as in the + # release download + Path('bin').mkdir(exist_ok=True) + files = ['bitcoind', 'bitcoin-cli', 'bitcoin-tx'] + for f in files: + Path('src/'+f).rename('bin/'+f) + return 0 + + +def check_host(args) -> int: + args.host = os.environ.get('HOST', subprocess.check_output( + './depends/config.guess').decode()) + if args.download_binary: + platforms = { + 'x86_64-*-linux*': 'x86_64-linux-gnu', + 'x86_64-apple-darwin*': 'osx64', + } + args.platform = '' + for pattern, target in platforms.items(): + if fnmatch(args.host, pattern): + args.platform = target + if not args.platform: + print('Not sure which binary to download for {}'.format(args.host)) + return 1 + return 0 + + +def main(args) -> int: + Path(args.target_dir).mkdir(exist_ok=True, parents=True) + print("Releases directory: {}".format(args.target_dir)) + ret = check_host(args) + if ret: + return ret + if args.download_binary: + with pushd(args.target_dir): + for tag in args.tags: + ret = download_binary(tag, args) + if ret: + return ret + return 0 + args.config_flags = os.environ.get('CONFIG_FLAGS', '') + args.config_flags += ' --without-gui --disable-tests --disable-bench' + with pushd(args.target_dir): + for tag in args.tags: + ret = build_release(tag, args) + if ret: + return ret + return 0 + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-r', '--remove-dir', action='store_true', + help='remove existing directory.') + parser.add_argument('-d', '--depends', action='store_true', + help='use depends.') + parser.add_argument('-b', '--download-binary', action='store_true', + help='download release binary.') + parser.add_argument('-t', '--target-dir', action='store', + help='target directory.', default='releases') + parser.add_argument('tags', nargs='+', + help="release tags. e.g.: v0.18.1 v0.20.0rc2") + args = parser.parse_args() + sys.exit(main(args)) diff --git a/contrib/devtools/previous_release.sh b/contrib/devtools/previous_release.sh deleted file mode 100755 index d375291f47..0000000000 --- a/contrib/devtools/previous_release.sh +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018-2020 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -# -# Build previous releases. - -export LC_ALL=C - -CONFIG_FLAGS="" -FUNCTIONAL_TESTS=0 -DELETE_EXISTING=0 -USE_DEPENDS=0 -DOWNLOAD_BINARY=0 -CONFIG_FLAGS="" -TARGET="releases" - -while getopts ":hfrdbt:" opt; do - case $opt in - h) - echo "Usage: .previous_release.sh [options] tag1 tag2" - echo " options:" - echo " -h Print this message" - echo " -f Configure for functional tests" - echo " -r Remove existing directory" - echo " -d Use depends" - echo " -b Download release binary" - echo " -t Target directory (default: releases)" - exit 0 - ;; - f) - FUNCTIONAL_TESTS=1 - CONFIG_FLAGS="$CONFIG_FLAGS --without-gui --disable-tests --disable-bench" - ;; - r) - DELETE_EXISTING=1 - ;; - d) - USE_DEPENDS=1 - ;; - b) - DOWNLOAD_BINARY=1 - ;; - t) - TARGET=$OPTARG - ;; - \?) - echo "Invalid option: -$OPTARG" >&2 - exit 1 - ;; - esac -done - -shift $((OPTIND-1)) - -if [ -z "$1" ]; then - echo "Specify release tag(s), e.g.: .previous_release v0.15.1" - exit 1 -fi - -if [ ! -d "$TARGET" ]; then - mkdir -p $TARGET -fi - -if [ "$DOWNLOAD_BINARY" -eq "1" ]; then - HOST="${HOST:-$(./depends/config.guess)}" - case "$HOST" in - x86_64-*-linux*) - PLATFORM=x86_64-linux-gnu - ;; - x86_64-apple-darwin*) - PLATFORM=osx64 - ;; - *) - echo "Not sure which binary to download for $HOST." - exit 1 - ;; - esac -fi - -echo "Releases directory: $TARGET" -pushd "$TARGET" || exit 1 -{ - for tag in "$@" - do - if [ "$DELETE_EXISTING" -eq "1" ]; then - if [ -d "$tag" ]; then - rm -r "$tag" - fi - fi - - if [ "$DOWNLOAD_BINARY" -eq "0" ]; then - - if [ ! -d "$tag" ]; then - if [ -z $(git tag -l "$tag") ]; then - echo "Tag $tag not found" - exit 1 - fi - - git clone https://github.com/bitcoin/bitcoin "$tag" - pushd "$tag" || exit 1 - { - git checkout "$tag" - if [ "$USE_DEPENDS" -eq "1" ]; then - pushd depends || exit 1 - { - if [ "$FUNCTIONAL_TESTS" -eq "1" ]; then - make NO_QT=1 - else - make - fi - HOST="${HOST:-$(./config.guess)}" - } - popd || exit 1 - CONFIG_FLAGS="--prefix=$PWD/depends/$HOST $CONFIG_FLAGS" - fi - ./autogen.sh - ./configure $CONFIG_FLAGS - make - # Move binaries, so they're in the same place as in the release download: - mkdir bin - mv src/bitcoind src/bitcoin-cli src/bitcoin-tx bin - if [ "$FUNCTIONAL_TESTS" -eq "0" ]; then - mv src/qt/bitcoin-qt bin - fi - } - popd || exit 1 - fi - else - if [ -d "$tag" ]; then - echo "Using cached $tag" - else - mkdir "$tag" - if [[ "$tag" =~ v(.*)(rc[0-9]+)$ ]]; then - BIN_PATH="bin/bitcoin-core-${BASH_REMATCH[1]}/test.${BASH_REMATCH[2]}" - else - BIN_PATH="bin/bitcoin-core-${tag:1}" - fi - URL="https://bitcoincore.org/$BIN_PATH/bitcoin-${tag:1}-$PLATFORM.tar.gz" - echo "Fetching: $URL" - if ! curl -O -f $URL; then - echo "Download failed." - exit 1 - fi - tar -zxf "bitcoin-${tag:1}-$PLATFORM.tar.gz" -C "$tag" --strip-components=1 "bitcoin-${tag:1}" - rm "bitcoin-${tag:1}-$PLATFORM.tar.gz" - fi - fi - done -} -popd || exit 1 diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py index ca587ca9e5..dc74de9198 100755 --- a/contrib/devtools/security-check.py +++ b/contrib/devtools/security-check.py @@ -40,25 +40,48 @@ def get_ELF_program_headers(executable): stdout = run_command([READELF_CMD, '-l', '-W', executable]) in_headers = False - count = 0 headers = [] for line in stdout.splitlines(): if line.startswith('Program Headers:'): in_headers = True + count = 0 if line == '': in_headers = False if in_headers: if count == 1: # header line - ofs_typ = line.find('Type') - ofs_offset = line.find('Offset') - ofs_flags = line.find('Flg') - ofs_align = line.find('Align') - if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1: + header = [x.strip() for x in line.split()] + ofs_typ = header.index('Type') + ofs_flags = header.index('Flg') + # assert readelf output is what we expect + if ofs_typ == -1 or ofs_flags == -1: raise ValueError('Cannot parse elfread -lW output') elif count > 1: - typ = line[ofs_typ:ofs_offset].rstrip() - flags = line[ofs_flags:ofs_align].rstrip() - headers.append((typ, flags)) + splitline = [x.strip() for x in line.split()] + typ = splitline[ofs_typ] + if not typ.startswith('[R'): # skip [Requesting ...] + splitline = [x.strip() for x in line.split()] + flags = splitline[ofs_flags] + # check for 'R', ' E' + if splitline[ofs_flags + 1] is 'E': + flags += ' E' + headers.append((typ, flags, [])) + count += 1 + + if line.startswith(' Section to Segment mapping:'): + in_mapping = True + count = 0 + if line == '': + in_mapping = False + if in_mapping: + if count == 1: # header line + ofs_segment = line.find('Segment') + ofs_sections = line.find('Sections...') + if ofs_segment == -1 or ofs_sections == -1: + raise ValueError('Cannot parse elfread -lW output') + elif count > 1: + segment = int(line[ofs_segment:ofs_sections].strip()) + sections = line[ofs_sections:].strip().split() + headers[segment][2].extend(sections) count += 1 return headers @@ -68,7 +91,7 @@ def check_ELF_NX(executable) -> bool: ''' have_wx = False have_gnu_stack = False - for (typ, flags) in get_ELF_program_headers(executable): + for (typ, flags, _) in get_ELF_program_headers(executable): if typ == 'GNU_STACK': have_gnu_stack = True if 'W' in flags and 'E' in flags: # section is both writable and executable @@ -82,7 +105,7 @@ def check_ELF_RELRO(executable) -> bool: Dynamic section must have BIND_NOW flag ''' have_gnu_relro = False - for (typ, flags) in get_ELF_program_headers(executable): + for (typ, flags, _) in get_ELF_program_headers(executable): # Note: not checking flags == 'R': here as linkers set the permission differently # This does not affect security: the permission flags of the GNU_RELRO program # header are ignored, the PT_LOAD header determines the effective permissions. @@ -113,6 +136,62 @@ def check_ELF_Canary(executable) -> bool: ok = True return ok +def check_ELF_separate_code(executable): + ''' + Check that sections are appropriately separated in virtual memory, + based on their permissions. This checks for missing -Wl,-z,separate-code + and potentially other problems. + ''' + EXPECTED_FLAGS = { + # Read + execute + '.init': 'R E', + '.plt': 'R E', + '.plt.got': 'R E', + '.plt.sec': 'R E', + '.text': 'R E', + '.fini': 'R E', + # Read-only data + '.interp': 'R', + '.note.gnu.property': 'R', + '.note.gnu.build-id': 'R', + '.note.ABI-tag': 'R', + '.gnu.hash': 'R', + '.dynsym': 'R', + '.dynstr': 'R', + '.gnu.version': 'R', + '.gnu.version_r': 'R', + '.rela.dyn': 'R', + '.rela.plt': 'R', + '.rodata': 'R', + '.eh_frame_hdr': 'R', + '.eh_frame': 'R', + '.qtmetadata': 'R', + '.gcc_except_table': 'R', + '.stapsdt.base': 'R', + # Writable data + '.init_array': 'RW', + '.fini_array': 'RW', + '.dynamic': 'RW', + '.got': 'RW', + '.data': 'RW', + '.bss': 'RW', + } + # For all LOAD program headers get mapping to the list of sections, + # and for each section, remember the flags of the associated program header. + flags_per_section = {} + for (typ, flags, sections) in get_ELF_program_headers(executable): + if typ == 'LOAD': + for section in sections: + assert(section not in flags_per_section) + flags_per_section[section] = flags + # Spot-check ELF LOAD program header flags per section + # If these sections exist, check them against the expected R/W/E flags + for (section, flags) in flags_per_section.items(): + if section in EXPECTED_FLAGS: + if EXPECTED_FLAGS[section] != flags: + return False + return True + def get_PE_dll_characteristics(executable) -> int: '''Get PE DllCharacteristics bits''' stdout = run_command([OBJDUMP_CMD, '-x', executable]) @@ -225,7 +304,8 @@ CHECKS = { ('PIE', check_ELF_PIE), ('NX', check_ELF_NX), ('RELRO', check_ELF_RELRO), - ('Canary', check_ELF_Canary) + ('Canary', check_ELF_Canary), + ('separate_code', check_ELF_separate_code), ], 'PE': [ ('DYNAMIC_BASE', check_PE_DYNAMIC_BASE), diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py index 629eba4f28..ec2d886653 100755 --- a/contrib/devtools/test-security-check.py +++ b/contrib/devtools/test-security-check.py @@ -31,15 +31,17 @@ class TestSecurityChecks(unittest.TestCase): cc = 'gcc' write_testcode(source) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE']), + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), (1, executable+': failed PIE NX RELRO Canary')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE']), + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), (1, executable+': failed PIE RELRO Canary')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE']), + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']), (1, executable+': failed PIE RELRO')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE']), + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']), (1, executable+': failed RELRO')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE']), + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']), + (1, executable+': failed separate_code')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']), (0, '')) def test_PE(self): diff --git a/contrib/devtools/test_deterministic_coverage.sh b/contrib/devtools/test_deterministic_coverage.sh index 95b1553215..8501c72f04 100755 --- a/contrib/devtools/test_deterministic_coverage.sh +++ b/contrib/devtools/test_deterministic_coverage.sh @@ -16,7 +16,6 @@ GCOV_EXECUTABLE="gcov" NON_DETERMINISTIC_TESTS=( "blockfilter_index_tests/blockfilter_index_initial_sync" # src/checkqueue.h: In CCheckQueue::Loop(): while (queue.empty()) { ... } "coinselector_tests/knapsack_solver_test" # coinselector_tests.cpp: if (equal_sets(setCoinsRet, setCoinsRet2)) - "denialofservice_tests/DoS_mapOrphans" # denialofservice_tests.cpp: it = mapOrphanTransactions.lower_bound(InsecureRand256()); "fs_tests/fsbridge_fstream" # deterministic test failure? "miner_tests/CreateNewBlock_validity" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10) "scheduler_tests/manythreads" # scheduler.cpp: CScheduler::serviceQueue() diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml index 0ed1e16f7e..e86ff83798 100644 --- a/contrib/gitian-descriptors/gitian-linux.yml +++ b/contrib/gitian-descriptors/gitian-linux.yml @@ -7,31 +7,29 @@ suites: architectures: - "amd64" packages: -- "curl" -- "g++-aarch64-linux-gnu" -- "g++-8-aarch64-linux-gnu" -- "gcc-8-aarch64-linux-gnu" -- "binutils-aarch64-linux-gnu" -- "g++-arm-linux-gnueabihf" -- "g++-8-arm-linux-gnueabihf" -- "gcc-8-arm-linux-gnueabihf" -- "binutils-arm-linux-gnueabihf" -- "g++-riscv64-linux-gnu" -- "g++-8-riscv64-linux-gnu" -- "gcc-8-riscv64-linux-gnu" -- "binutils-riscv64-linux-gnu" -- "g++-8-multilib" -- "gcc-8-multilib" -- "binutils-gold" -- "git" -- "pkg-config" +# Common dependencies. - "autoconf" -- "libtool" - "automake" -- "faketime" +- "binutils" - "bsdmainutils" - "ca-certificates" +- "curl" +- "faketime" +- "git" +- "libtool" +- "patch" +- "pkg-config" - "python3" +# Cross compilation HOSTS: +# - arm-linux-gnueabihf +- "binutils-arm-linux-gnueabihf" +- "g++-8-arm-linux-gnueabihf" +# - aarch64-linux-gnu +- "binutils-aarch64-linux-gnu" +- "g++-8-aarch64-linux-gnu" +# - riscv64-linux-gnu +- "binutils-riscv64-linux-gnu" +- "g++-8-riscv64-linux-gnu" remotes: - "url": "https://github.com/bitcoin/bitcoin.git" "dir": "bitcoin" @@ -93,45 +91,11 @@ script: | create_per-host_faketime_wrappers "2000-01-01 12:00:00" export PATH=${WRAP_DIR}:${PATH} - EXTRA_INCLUDES_BASE=$WRAP_DIR/extra_includes - mkdir -p $EXTRA_INCLUDES_BASE - - # x86 needs /usr/include/i386-linux-gnu/asm pointed to /usr/include/x86_64-linux-gnu/asm, - # but we can't write there. Instead, create a link here and force it to be included in the - # search paths by wrapping gcc/g++. - - mkdir -p $EXTRA_INCLUDES_BASE/i686-pc-linux-gnu - rm -f $WRAP_DIR/extra_includes/i686-pc-linux-gnu/asm - ln -s /usr/include/x86_64-linux-gnu/asm $EXTRA_INCLUDES_BASE/i686-pc-linux-gnu/asm - - for prog in gcc g++; do - rm -f ${WRAP_DIR}/${prog} - cat << EOF > ${WRAP_DIR}/${prog} - #!/usr/bin/env bash - REAL="$(which -a ${prog}-8 | grep -v ${WRAP_DIR}/${prog} | head -1)" - for var in "\$@" - do - if [ "\$var" = "-m32" ]; then - export C_INCLUDE_PATH="$EXTRA_INCLUDES_BASE/i686-pc-linux-gnu" - export CPLUS_INCLUDE_PATH="$EXTRA_INCLUDES_BASE/i686-pc-linux-gnu" - break - fi - done - \$REAL \$@ - EOF - chmod +x ${WRAP_DIR}/${prog} - done - cd bitcoin BASEPREFIX="${PWD}/depends" # Build dependencies for each host for i in $HOSTS; do - EXTRA_INCLUDES="$EXTRA_INCLUDES_BASE/$i" - if [ -d "$EXTRA_INCLUDES" ]; then - export HOST_ID_SALT="$EXTRA_INCLUDES" - fi make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" - unset HOST_ID_SALT done # Faketime for binaries diff --git a/contrib/testgen/README.md b/contrib/testgen/README.md index 573a71a675..eaca473b40 100644 --- a/contrib/testgen/README.md +++ b/contrib/testgen/README.md @@ -4,5 +4,5 @@ Utilities to generate test vectors for the data-driven Bitcoin tests. Usage: - PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py valid 50 > ../../src/test/data/key_io_keys_valid.json - PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py invalid 50 > ../../src/test/data/key_io_keys_invalid.json + PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py valid 50 > ../../src/test/data/key_io_valid.json + PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py invalid 50 > ../../src/test/data/key_io_invalid.json diff --git a/contrib/valgrind.supp b/contrib/valgrind.supp index d2652119b4..ece02dc24e 100644 --- a/contrib/valgrind.supp +++ b/contrib/valgrind.supp @@ -123,7 +123,6 @@ Memcheck:Cond ... fun:_ZN5boost10filesystem6detail11unique_pathERKNS0_4pathEPNS_6system10error_codeE - fun:unique_path } { Suppress boost warning diff --git a/depends/Makefile b/depends/Makefile index 3d0784cb6b..2bc5df974a 100644 --- a/depends/Makefile +++ b/depends/Makefile @@ -125,6 +125,11 @@ $(host_arch)_$(host_os)_id_string+=$(shell $(host_CXX) --version 2>/dev/null) $(host_arch)_$(host_os)_id_string+=$(shell $(host_RANLIB) --version 2>/dev/null) $(host_arch)_$(host_os)_id_string+=$(shell $(host_STRIP) --version 2>/dev/null) +ifneq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) +build_id_string+=system_clang +$(host_arch)_$(host_os)_id_string+=system_clang +endif + qrencode_packages_$(NO_QR) = $(qrencode_packages) qt_packages_$(NO_QT) = $(qt_packages) $(qt_$(host_os)_packages) $(qt_$(host_arch)_$(host_os)_packages) $(qrencode_packages_) @@ -150,11 +155,17 @@ all_packages = $(packages) $(native_packages) meta_depends = Makefile funcs.mk builders/default.mk hosts/default.mk hosts/$(host_os).mk builders/$(build_os).mk +$(host_arch)_$(host_os)_native_binutils?=$($(host_os)_native_binutils) $(host_arch)_$(host_os)_native_toolchain?=$($(host_os)_native_toolchain) include funcs.mk +binutils_path=$($($(host_arch)_$(host_os)_native_binutils)_prefixbin) +ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) toolchain_path=$($($(host_arch)_$(host_os)_native_toolchain)_prefixbin) +else +toolchain_path= +endif final_build_id_long+=$(shell $(build_SHA256SUM) config.site.in) final_build_id+=$(shell echo -n "$(final_build_id_long)" | $(build_SHA256SUM) | cut -c-$(HASH_LENGTH)) $(host_prefix)/.stamp_$(final_build_id): $(native_packages) $(packages) @@ -170,10 +181,10 @@ $(host_prefix)/share/config.site : config.site.in $(host_prefix)/.stamp_$(final_ $(AT)sed -e 's|@HOST@|$(host)|' \ -e 's|@CC@|$(toolchain_path)$(host_CC)|' \ -e 's|@CXX@|$(toolchain_path)$(host_CXX)|' \ - -e 's|@AR@|$(toolchain_path)$(host_AR)|' \ - -e 's|@RANLIB@|$(toolchain_path)$(host_RANLIB)|' \ - -e 's|@NM@|$(toolchain_path)$(host_NM)|' \ - -e 's|@STRIP@|$(toolchain_path)$(host_STRIP)|' \ + -e 's|@AR@|$(binutils_path)$(host_AR)|' \ + -e 's|@RANLIB@|$(binutils_path)$(host_RANLIB)|' \ + -e 's|@NM@|$(binutils_path)$(host_NM)|' \ + -e 's|@STRIP@|$(binutils_path)$(host_STRIP)|' \ -e 's|@build_os@|$(build_os)|' \ -e 's|@host_os@|$(host_os)|' \ -e 's|@CFLAGS@|$(strip $(host_CFLAGS) $(host_$(release_type)_CFLAGS))|' \ diff --git a/depends/README.md b/depends/README.md index c12ea8bcb3..2356e8be59 100644 --- a/depends/README.md +++ b/depends/README.md @@ -44,7 +44,7 @@ The paths are automatically configured and no other options are needed unless ta #### For macOS cross compilation - sudo apt-get install curl librsvg2-bin libtiff-tools bsdmainutils cmake imagemagick libcap-dev libz-dev libbz2-dev python3-setuptools + sudo apt-get install curl librsvg2-bin libtiff-tools bsdmainutils cmake imagemagick libcap-dev libz-dev libbz2-dev python3-setuptools libtinfo5 #### For Win64 cross compilation @@ -80,21 +80,40 @@ For linux S390X cross compilation: sudo apt-get install g++-s390x-linux-gnu binutils-s390x-linux-gnu ### Dependency Options -The following can be set when running make: make FOO=bar - - SOURCES_PATH: downloaded sources will be placed here - BASE_CACHE: built packages will be placed here - SDK_PATH: Path where sdk's can be found (used by macOS) - FALLBACK_DOWNLOAD_PATH: If a source file can't be fetched, try here before giving up - NO_QT: Don't download/build/cache qt and its dependencies - NO_QR: Don't download/build/cache packages needed for enabling qrencode - NO_ZMQ: Don't download/build/cache packages needed for enabling zeromq - NO_WALLET: Don't download/build/cache libs needed to enable the wallet - NO_UPNP: Don't download/build/cache packages needed for enabling upnp - MULTIPROCESS: build libmultiprocess (experimental, requires cmake) - DEBUG: disable some optimizations and enable more runtime checking - HOST_ID_SALT: Optional salt to use when generating host package ids - BUILD_ID_SALT: Optional salt to use when generating build package ids +The following can be set when running make: `make FOO=bar` + +<dl> +<dt>SOURCES_PATH</dt> +<dd>downloaded sources will be placed here</dd> +<dt>BASE_CACHE</dt> +<dd>built packages will be placed here</dd> +<dt>SDK_PATH</dt> +<dd>Path where sdk's can be found (used by macOS)</dd> +<dt>FALLBACK_DOWNLOAD_PATH</dt> +<dd>If a source file can't be fetched, try here before giving up</dd> +<dt>NO_QT</dt> +<dd>Don't download/build/cache qt and its dependencies</dd> +<dt>NO_QR</dt> +<dd>Don't download/build/cache packages needed for enabling qrencode</dd> +<dt>NO_ZMQ</dt> +<dd>Don't download/build/cache packages needed for enabling zeromq</dd> +<dt>NO_WALLET</dt> +<dd>Don't download/build/cache libs needed to enable the wallet</dd> +<dt>NO_UPNP</dt> +<dd>Don't download/build/cache packages needed for enabling upnp</dd> +<dt>MULTIPROCESS</dt> +<dd>build libmultiprocess (experimental, requires cmake)</dd> +<dt>DEBUG</dt> +<dd>disable some optimizations and enable more runtime checking</dd> +<dt>HOST_ID_SALT</dt> +<dd>Optional salt to use when generating host package ids</dd> +<dt>BUILD_ID_SALT</dt> +<dd>Optional salt to use when generating build package ids</dd> +<dt>FORCE_USE_SYSTEM_CLANG</dt> +<dd>(EXPERTS ONLY) When cross-compiling for macOS, use Clang found in the +system's <code>$PATH</code> rather than the default prebuilt release of Clang +from llvm.org. Clang 8 or later is required.</dd> +</dl> If some packages are not built, for example `make NO_WALLET=1`, the appropriate options will be passed to bitcoin's configure. In this case, `--disable-wallet`. diff --git a/depends/builders/darwin.mk b/depends/builders/darwin.mk index 69c394ec1d..f4103fc1f2 100644 --- a/depends/builders/darwin.mk +++ b/depends/builders/darwin.mk @@ -19,4 +19,5 @@ darwin_LIBTOOL:=$(shell xcrun -f libtool) darwin_OTOOL:=$(shell xcrun -f otool) darwin_NM:=$(shell xcrun -f nm) darwin_INSTALL_NAME_TOOL:=$(shell xcrun -f install_name_tool) +darwin_native_binutils= darwin_native_toolchain= diff --git a/depends/funcs.mk b/depends/funcs.mk index 135ebba9f8..6fc20543bb 100644 --- a/depends/funcs.mk +++ b/depends/funcs.mk @@ -41,7 +41,7 @@ endef define int_get_build_id $(eval $(1)_dependencies += $($(1)_$(host_arch)_$(host_os)_dependencies) $($(1)_$(host_os)_dependencies)) -$(eval $(1)_all_dependencies:=$(call int_get_all_dependencies,$(1),$($($(1)_type)_native_toolchain) $($(1)_dependencies))) +$(eval $(1)_all_dependencies:=$(call int_get_all_dependencies,$(1),$($($(1)_type)_native_toolchain) $($($(1)_type)_native_binutils) $($(1)_dependencies))) $(foreach dep,$($(1)_all_dependencies),$(eval $(1)_build_id_deps+=$(dep)-$($(dep)_version)-$($(dep)_recipe_hash))) $(eval $(1)_build_id_long:=$(1)-$($(1)_version)-$($(1)_recipe_hash)-$(release_type) $($(1)_build_id_deps) $($($(1)_type)_id_string)) $(eval $(1)_build_id:=$(shell echo -n "$($(1)_build_id_long)" | $(build_SHA256SUM) | cut -c-$(HASH_LENGTH))) @@ -260,4 +260,4 @@ $(foreach package,$(all_packages),$(eval $(call int_config_attach_build_config,$ $(foreach package,$(all_packages),$(eval $(call int_add_cmds,$(package)))) #special exception: if a toolchain package exists, all non-native packages depend on it -$(foreach package,$(packages),$(eval $($(package)_unpacked): |$($($(host_arch)_$(host_os)_native_toolchain)_cached) )) +$(foreach package,$(packages),$(eval $($(package)_unpacked): |$($($(host_arch)_$(host_os)_native_toolchain)_cached) $($($(host_arch)_$(host_os)_native_binutils)_cached) )) diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk index 5f0bffa5cb..6099fd4c71 100644 --- a/depends/hosts/darwin.mk +++ b/depends/hosts/darwin.mk @@ -5,8 +5,31 @@ XCODE_BUILD_ID=11C505 LD64_VERSION=530 OSX_SDK=$(SDK_PATH)/Xcode-$(XCODE_VERSION)-$(XCODE_BUILD_ID)-extracted-SDK-with-libcxx-headers -darwin_CC=clang -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(OSX_SDK) -mlinker-version=$(LD64_VERSION) -darwin_CXX=clang++ -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(OSX_SDK) -stdlib=libc++ -mlinker-version=$(LD64_VERSION) + +# Flag explanations: +# +# -mlinker-version +# +# Ensures that modern linker features are enabled. See here for more +# details: https://github.com/bitcoin/bitcoin/pull/19407. +# +# -B$(build_prefix)/bin +# +# Explicitly point to our binaries (e.g. cctools) so that they are +# ensured to be found and preferred over other possibilities. +# +# -nostdinc++ -isystem $(OSX_SDK)/usr/include/c++/v1 +# +# Forces clang to use the libc++ headers from our SDK and completely +# forget about the libc++ headers from the standard directories +# +# TODO: Once we start requiring a clang version that has the +# -stdlib++-isystem<directory> flag first introduced here: +# https://reviews.llvm.org/D64089, we should use that instead. Read the +# differential summary there for more details. +# +darwin_CC=clang -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(OSX_SDK) -mlinker-version=$(LD64_VERSION) -B$(build_prefix)/bin +darwin_CXX=clang++ -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(OSX_SDK) -stdlib=libc++ -mlinker-version=$(LD64_VERSION) -B$(build_prefix)/bin -nostdinc++ -isystem $(OSX_SDK)/usr/include/c++/v1 darwin_CFLAGS=-pipe darwin_CXXFLAGS=$(darwin_CFLAGS) @@ -17,5 +40,11 @@ darwin_release_CXXFLAGS=$(darwin_release_CFLAGS) darwin_debug_CFLAGS=-O1 darwin_debug_CXXFLAGS=$(darwin_debug_CFLAGS) +darwin_native_binutils=native_cctools +ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) darwin_native_toolchain=native_cctools +else +darwin_native_toolchain= +endif + darwin_cmake_system=Darwin diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk index 3a7e605b4f..4f6b543aff 100644 --- a/depends/packages/boost.mk +++ b/depends/packages/boost.mk @@ -31,7 +31,9 @@ $(package)_cxxflags_linux=-fPIC $(package)_cxxflags_android=-fPIC endef +# Fix unused variable in boost_process, can be removed after upgrading to 1.72 define $(package)_preprocess_cmds + sed -i.old "s/int ret_sig = 0;//" boost/process/detail/posix/wait_group.hpp && \ echo "using $($(package)_toolset_$(host_os)) : : $($(package)_cxx) : <cxxflags>\"$($(package)_cxxflags) $($(package)_cppflags)\" <linkflags>\"$($(package)_ldflags)\" <archiver>\"$($(package)_archiver_$(host_os))\" <striper>\"$(host_STRIP)\" <ranlib>\"$(host_RANLIB)\" <rc>\"$(host_WINDRES)\" : ;" > user-config.jam endef diff --git a/depends/packages/libevent.mk b/depends/packages/libevent.mk index eb45e14f6f..1cd5a1749a 100644 --- a/depends/packages/libevent.mk +++ b/depends/packages/libevent.mk @@ -3,17 +3,23 @@ $(package)_version=2.1.11-stable $(package)_download_path=https://github.com/libevent/libevent/archive/ $(package)_file_name=release-$($(package)_version).tar.gz $(package)_sha256_hash=229393ab2bf0dc94694f21836846b424f3532585bac3468738b7bf752c03901e +$(package)_patches=0001-fix-windows-getaddrinfo.patch define $(package)_preprocess_cmds + patch -p1 < $($(package)_patch_dir)/0001-fix-windows-getaddrinfo.patch && \ ./autogen.sh endef +# When building for Windows, we set _WIN32_WINNT to target the same Windows +# version as we do in configure. Due to quirks in libevents build system, this +# is also required to enable support for ipv6. See #19375. define $(package)_set_vars $(package)_config_opts=--disable-shared --disable-openssl --disable-libevent-regress --disable-samples $(package)_config_opts += --disable-dependency-tracking --enable-option-checking $(package)_config_opts_release=--disable-debug-mode $(package)_config_opts_linux=--with-pic $(package)_config_opts_android=--with-pic + $(package)_cppflags_mingw32=-D_WIN32_WINNT=0x0601 endef define $(package)_config_cmds diff --git a/depends/packages/native_cctools.mk b/depends/packages/native_cctools.mk index bdebd11862..5022ed980f 100644 --- a/depends/packages/native_cctools.mk +++ b/depends/packages/native_cctools.mk @@ -1,14 +1,16 @@ package=native_cctools -$(package)_version=4da2f3b485bcf4cef526f30c0b8c0bcda99cdbb4 +$(package)_version=55562e4073dea0fbfd0b20e0bf69ffe6390c7f97 $(package)_download_path=https://github.com/tpoechtrager/cctools-port/archive $(package)_file_name=$($(package)_version).tar.gz -$(package)_sha256_hash=a2d491c0981cef72fee2b833598f20f42a6c44a7614a61c439bda93d56446fec +$(package)_sha256_hash=e51995a843533a3dac155dd0c71362dd471597a2d23f13dff194c6285362f875 $(package)_build_subdir=cctools +ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) $(package)_clang_version=8.0.0 $(package)_clang_download_path=https://releases.llvm.org/$($(package)_clang_version) $(package)_clang_download_file=clang+llvm-$($(package)_clang_version)-x86_64-linux-gnu-ubuntu-14.04.tar.xz $(package)_clang_file_name=clang-llvm-$($(package)_clang_version)-x86_64-linux-gnu-ubuntu-14.04.tar.xz $(package)_clang_sha256_hash=9ef854b71949f825362a119bf2597f744836cb571131ae6b721cd102ffea8cd0 +endif $(package)_libtapi_version=3efb201881e7a76a21e0554906cf306432539cef $(package)_libtapi_download_path=https://github.com/tpoechtrager/apple-libtapi/archive @@ -16,15 +18,25 @@ $(package)_libtapi_download_file=$($(package)_libtapi_version).tar.gz $(package)_libtapi_file_name=$($(package)_libtapi_version).tar.gz $(package)_libtapi_sha256_hash=380c1ca37cfa04a8699d0887a8d3ee1ad27f3d08baba78887c73b09485c0fbd3 -$(package)_extra_sources=$($(package)_clang_file_name) -$(package)_extra_sources += $($(package)_libtapi_file_name) +$(package)_extra_sources=$($(package)_libtapi_file_name) +ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) +$(package)_extra_sources += $($(package)_clang_file_name) +endif +ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) define $(package)_fetch_cmds $(call fetch_file,$(package),$($(package)_download_path),$($(package)_download_file),$($(package)_file_name),$($(package)_sha256_hash)) && \ $(call fetch_file,$(package),$($(package)_clang_download_path),$($(package)_clang_download_file),$($(package)_clang_file_name),$($(package)_clang_sha256_hash)) && \ $(call fetch_file,$(package),$($(package)_libtapi_download_path),$($(package)_libtapi_download_file),$($(package)_libtapi_file_name),$($(package)_libtapi_sha256_hash)) endef +else +define $(package)_fetch_cmds +$(call fetch_file,$(package),$($(package)_download_path),$($(package)_download_file),$($(package)_file_name),$($(package)_sha256_hash)) && \ +$(call fetch_file,$(package),$($(package)_libtapi_download_path),$($(package)_libtapi_download_file),$($(package)_libtapi_file_name),$($(package)_libtapi_sha256_hash)) +endef +endif +ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) define $(package)_extract_cmds mkdir -p $($(package)_extract_dir) && \ echo "$($(package)_sha256_hash) $($(package)_source)" > $($(package)_extract_dir)/.$($(package)_file_name).hash && \ @@ -38,12 +50,29 @@ define $(package)_extract_cmds rm -f toolchain/lib/libc++abi.so* && \ tar --no-same-owner --strip-components=1 -xf $($(package)_source) endef +else +define $(package)_extract_cmds + mkdir -p $($(package)_extract_dir) && \ + echo "$($(package)_sha256_hash) $($(package)_source)" > $($(package)_extract_dir)/.$($(package)_file_name).hash && \ + echo "$($(package)_libtapi_sha256_hash) $($(package)_source_dir)/$($(package)_libtapi_file_name)" >> $($(package)_extract_dir)/.$($(package)_file_name).hash && \ + $(build_SHA256SUM) -c $($(package)_extract_dir)/.$($(package)_file_name).hash && \ + mkdir -p libtapi && \ + tar --no-same-owner --strip-components=1 -C libtapi -xf $($(package)_source_dir)/$($(package)_libtapi_file_name) && \ + tar --no-same-owner --strip-components=1 -xf $($(package)_source) +endef +endif define $(package)_set_vars - $(package)_config_opts=--target=$(host) --disable-lto-support --with-libtapi=$($(package)_extract_dir) + $(package)_config_opts=--target=$(host) --with-libtapi=$($(package)_extract_dir) $(package)_ldflags+=-Wl,-rpath=\\$$$$$$$$\$$$$$$$$ORIGIN/../lib + ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) + $(package)_config_opts+=--enable-lto-support --with-llvm-config=$($(package)_extract_dir)/toolchain/bin/llvm-config $(package)_cc=$($(package)_extract_dir)/toolchain/bin/clang $(package)_cxx=$($(package)_extract_dir)/toolchain/bin/clang++ + else + $(package)_cc=clang + $(package)_cxx=clang++ + endif endef define $(package)_preprocess_cmds @@ -60,6 +89,7 @@ define $(package)_build_cmds $(MAKE) endef +ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) define $(package)_stage_cmds $(MAKE) DESTDIR=$($(package)_staging_dir) install && \ mkdir -p $($(package)_staging_prefix_dir)/lib/ && \ @@ -74,3 +104,11 @@ define $(package)_stage_cmds cp -rf lib/clang/$($(package)_clang_version)/include/* $($(package)_staging_prefix_dir)/lib/clang/$($(package)_clang_version)/include/ && \ cp bin/dsymutil $($(package)_staging_prefix_dir)/bin/$(host)-dsymutil endef +else +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install && \ + mkdir -p $($(package)_staging_prefix_dir)/lib/ && \ + cd $($(package)_extract_dir) && \ + cp lib/libtapi.so.6 $($(package)_staging_prefix_dir)/lib/ +endef +endif diff --git a/depends/packages/native_cdrkit.mk b/depends/packages/native_cdrkit.mk index 8243458ec8..14c37a0fc7 100644 --- a/depends/packages/native_cdrkit.mk +++ b/depends/packages/native_cdrkit.mk @@ -9,8 +9,10 @@ define $(package)_preprocess_cmds patch -p1 < $($(package)_patch_dir)/cdrkit-deterministic.patch endef +# Starting with 10.1, GCC defaults to -fno-common, resulting in linking errors. +# Pass -fcommon to retain the legacy behaviour. define $(package)_config_cmds - cmake -DCMAKE_INSTALL_PREFIX=$(build_prefix) + cmake -DCMAKE_INSTALL_PREFIX=$(build_prefix) -DCMAKE_C_FLAGS="-fcommon" endef define $(package)_build_cmds diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index 631851855a..500881e442 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -80,7 +80,6 @@ $(package)_config_opts += -no-feature-colordialog $(package)_config_opts += -no-feature-commandlineparser $(package)_config_opts += -no-feature-concurrent $(package)_config_opts += -no-feature-dial -$(package)_config_opts += -no-feature-filesystemwatcher $(package)_config_opts += -no-feature-fontcombobox $(package)_config_opts += -no-feature-ftp $(package)_config_opts += -no-feature-http diff --git a/depends/patches/libevent/0001-fix-windows-getaddrinfo.patch b/depends/patches/libevent/0001-fix-windows-getaddrinfo.patch new file mode 100644 index 0000000000..a98cd90bd5 --- /dev/null +++ b/depends/patches/libevent/0001-fix-windows-getaddrinfo.patch @@ -0,0 +1,15 @@ +diff -ur libevent-2.1.8-stable.orig/configure.ac libevent-2.1.8-stable/configure.ac +--- libevent-2.1.8-stable.orig/configure.ac 2017-01-29 17:51:00.000000000 +0000 ++++ libevent-2.1.8-stable/configure.ac 2020-03-07 01:11:16.311335005 +0000 +@@ -389,6 +389,10 @@ + #ifdef HAVE_NETDB_H + #include <netdb.h> + #endif ++#ifdef _WIN32 ++#include <winsock2.h> ++#include <ws2tcpip.h> ++#endif + ]], + [[ + getaddrinfo; +Only in libevent-2.1.8-stable: configure.ac~ diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in index 7e307ab7c8..2f79168212 100644 --- a/doc/Doxyfile.in +++ b/doc/Doxyfile.in @@ -2073,7 +2073,7 @@ INCLUDE_FILE_PATTERNS = # recursively expanded use the := operator instead of the = operator. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -PREDEFINED = +PREDEFINED = HAVE_BOOST_PROCESS # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # tag can be used to specify a list of macro names that should be expanded. The diff --git a/doc/JSON-RPC-interface.md b/doc/JSON-RPC-interface.md index a0cfe84a3e..40d8e330e2 100644 --- a/doc/JSON-RPC-interface.md +++ b/doc/JSON-RPC-interface.md @@ -60,7 +60,7 @@ RPC interface will be abused. are sent as clear text that can be read by anyone on your network path. Additionally, the RPC interface has not been hardened to withstand arbitrary Internet traffic, so changing the above settings - to expose it to the Internet (even using something like a Tor hidden + to expose it to the Internet (even using something like a Tor onion service) could expose you to unconsidered vulnerabilities. See `bitcoind -help` for more information about these settings and other settings described in this document. diff --git a/doc/REST-interface.md b/doc/REST-interface.md index 11e9c90f49..842a3964df 100644 --- a/doc/REST-interface.md +++ b/doc/REST-interface.md @@ -61,7 +61,6 @@ Only supports JSON as output format. * pruned : (boolean) if the blocks are subject to pruning * pruneheight : (numeric) highest block available * softforks : (array) status of softforks in progress -* bip9_softforks : (object) status of BIP9 softforks in progress #### Query UTXO set `GET /rest/getutxos/<checkmempool>/<txid>-<n>/<txid>-<n>/.../<txid>-<n>.<bin|hex|json>` @@ -79,7 +78,6 @@ $ curl localhost:18332/rest/getutxos/checkmempool/b2cdfd7b89def827ff8af7cd9bff76 "bitmap": "1", "utxos" : [ { - "txvers" : 1 "height" : 2147483647, "value" : 8.8687, "scriptPubKey" : { diff --git a/doc/benchmarking.md b/doc/benchmarking.md index b1a06009b5..b6cd86eafe 100644 --- a/doc/benchmarking.md +++ b/doc/benchmarking.md @@ -19,8 +19,10 @@ After compiling bitcoin-core, the benchmarks can be run with: The output will look similar to: ``` -# Benchmark, evals, iterations, total, min, max, median -AssembleBlock, 5, 700, 1.79954, 0.000510913, 0.000517018, 0.000514497 +| ns/byte | byte/s | error % | benchmark +|--------------------:|--------------------:|--------:|:---------------------------------------------- +| 64.13 | 15,592,356.01 | 0.1% | `Base58CheckEncode` +| 24.56 | 40,722,672.68 | 0.2% | `Base58Decode` ... ``` diff --git a/doc/bips.md b/doc/bips.md index b96862297f..456fea7a5a 100644 --- a/doc/bips.md +++ b/doc/bips.md @@ -1,4 +1,4 @@ -BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.19.0**): +BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.21.0**): * [`BIP 9`](https://github.com/bitcoin/bips/blob/master/bip-0009.mediawiki): The changes allowing multiple soft-forks to be deployed in parallel have been implemented since **v0.12.1** ([PR #7575](https://github.com/bitcoin/bitcoin/pull/7575)) * [`BIP 11`](https://github.com/bitcoin/bips/blob/master/bip-0011.mediawiki): Multisig outputs are standard since **v0.6.0** ([PR #669](https://github.com/bitcoin/bitcoin/pull/669)). @@ -42,3 +42,4 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.19.0**): * [`BIP 173`](https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki): Bech32 addresses for native Segregated Witness outputs are supported as of **v0.16.0** ([PR 11167](https://github.com/bitcoin/bitcoin/pull/11167)). Bech32 addresses are generated by default as of **v0.20.0** ([PR 16884](https://github.com/bitcoin/bitcoin/pull/16884)). * [`BIP 174`](https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki): RPCs to operate on Partially Signed Bitcoin Transactions (PSBT) are present as of **v0.17.0** ([PR 13557](https://github.com/bitcoin/bitcoin/pull/13557)). * [`BIP 176`](https://github.com/bitcoin/bips/blob/master/bip-0176.mediawiki): Bits Denomination [QT only] is supported as of **v0.16.0** ([PR 12035](https://github.com/bitcoin/bitcoin/pull/12035)). +* [`BIP 339`](https://github.com/bitcoin/bips/blob/master/bip-0339.mediawiki): Relay of transactions by wtxid is supported as of **v0.21.0** ([PR 18044](https://github.com/bitcoin/bitcoin/pull/18044)). diff --git a/doc/build-openbsd.md b/doc/build-openbsd.md index 53c647ae34..584ee43d48 100644 --- a/doc/build-openbsd.md +++ b/doc/build-openbsd.md @@ -1,6 +1,6 @@ OpenBSD build guide ====================== -(updated for OpenBSD 6.4) +(updated for OpenBSD 6.7) This guide describes how to build bitcoind and command-line utilities on OpenBSD. @@ -15,7 +15,7 @@ Run the following as root to install the base dependencies for building: pkg_add git gmake libevent libtool boost pkg_add autoconf # (select highest version, e.g. 2.69) pkg_add automake # (select highest version, e.g. 1.16) -pkg_add python # (select highest version, e.g. 3.6) +pkg_add python # (select highest version, e.g. 3.8) git clone https://github.com/bitcoin/bitcoin.git ``` @@ -23,10 +23,10 @@ git clone https://github.com/bitcoin/bitcoin.git See [dependencies.md](dependencies.md) for a complete overview. **Important**: From OpenBSD 6.2 onwards a C++11-supporting clang compiler is -part of the base image, and while building it is necessary to make sure that this -compiler is used and not ancient g++ 4.2.1. This is done by appending -`CC=cc CXX=c++` to configuration commands. Mixing different compilers -within the same executable will result in linker errors. +part of the base image, and while building it is necessary to make sure that +this compiler is used and not ancient g++ 4.2.1. This is done by appending +`CC=cc CC_FOR_BUILD=cc CXX=c++` to configuration commands. Mixing different +compilers within the same executable will result in errors. ### Building BerkeleyDB @@ -77,7 +77,7 @@ To configure with wallet: To configure without wallet: ```bash -./configure --disable-wallet --with-gui=no CC=cc CXX=c++ MAKE=gmake +./configure --disable-wallet --with-gui=no CC=cc CC_FOR_BUILD=cc CXX=c++ MAKE=gmake ``` Build and run the tests: diff --git a/doc/build-windows.md b/doc/build-windows.md index d3dc467f19..28b6aceb3c 100644 --- a/doc/build-windows.md +++ b/doc/build-windows.md @@ -91,15 +91,22 @@ Note that for WSL the Bitcoin Core source path MUST be somewhere in the default example /usr/src/bitcoin, AND not under /mnt/d/. If this is not the case the dependency autoconf scripts will fail. This means you cannot use a directory that is located directly on the host Windows file system to perform the build. +Additional WSL Note: WSL support for [launching Win32 applications](https://docs.microsoft.com/en-us/archive/blogs/wsl/windows-and-ubuntu-interoperability#launching-win32-applications-from-within-wsl) +results in `Autoconf` configure scripts being able to execute Windows Portable Executable files. This can cause +unexpected behaviour during the build, such as Win32 error dialogs for missing libraries. The recommended approach +is to temporarily disable WSL support for Win32 applications. + Build using: PATH=$(echo "$PATH" | sed -e 's/:\/mnt.*//g') # strip out problematic Windows %PATH% imported var + sudo bash -c "echo 0 > /proc/sys/fs/binfmt_misc/status" # Disable WSL support for Win32 applications. cd depends make HOST=x86_64-w64-mingw32 cd .. ./autogen.sh # not required when building from tarball CONFIG_SITE=$PWD/depends/x86_64-w64-mingw32/share/config.site ./configure --prefix=/ make + sudo bash -c "echo 1 > /proc/sys/fs/binfmt_misc/status" # Enable WSL support for Win32 applications. ## Depends system diff --git a/doc/dependencies.md b/doc/dependencies.md index 0cb5311e8b..92dea65309 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -6,7 +6,7 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct | Dependency | Version used | Minimum required | CVEs | Shared | [Bundled Qt library](https://doc.qt.io/qt-5/configure-options.html#third-party-libraries) | | --- | --- | --- | --- | --- | --- | | Berkeley DB | [4.8.30](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.x | No | | | -| Boost | [1.70.0](https://www.boost.org/users/download/) | [1.47.0](https://github.com/bitcoin/bitcoin/pull/8920) | No | | | +| Boost | [1.70.0](https://www.boost.org/users/download/) | [1.58.0](https://github.com/bitcoin/bitcoin/pull/19667) | No | | | | Clang | | [3.3+](https://releases.llvm.org/download.html) (C++11 support) | | | | | Expat | [2.2.7](https://libexpat.github.io/) | | No | Yes | | | fontconfig | [2.12.1](https://www.freedesktop.org/software/fontconfig/release/) | | No | Yes | | diff --git a/doc/developer-notes.md b/doc/developer-notes.md index b33b3ad18a..6ae7e770e8 100644 --- a/doc/developer-notes.md +++ b/doc/developer-notes.md @@ -620,6 +620,19 @@ class A - *Rationale*: Easier to understand what is happening, thus easier to spot mistakes, even for those that are not language lawyers. +- Use `Span` as function argument when it can operate on any range-like container. + + - *Rationale*: Compared to `Foo(const vector<int>&)` this avoids the need for a (potentially expensive) + conversion to vector if the caller happens to have the input stored in another type of container. + However, be aware of the pitfalls documented in [span.h](../src/span.h). + +```cpp +void Foo(Span<const int> data); + +std::vector<int> vec{1,2,3}; +Foo(vec); +``` + - Prefer `enum class` (scoped enumerations) over `enum` (traditional enumerations) where possible. - *Rationale*: Scoped enumerations avoid two potential pitfalls/problems with traditional C++ enumerations: implicit conversions to `int`, and name clashes due to enumerators being exported to the surrounding scope. @@ -874,7 +887,7 @@ Others are external projects without a tight relationship with our project. Chan be sent upstream, but bugfixes may also be prudent to PR against Bitcoin Core so that they can be integrated quickly. Cosmetic changes should be purely taken upstream. -There is a tool in `test/lint/git-subtree-check.sh` to check a subtree directory for consistency with +There is a tool in `test/lint/git-subtree-check.sh` ([instructions](../test/lint#git-subtree-checksh)) to check a subtree directory for consistency with its upstream repository. Current subtrees include: diff --git a/doc/files.md b/doc/files.md index cd23d547bb..52e094a60b 100644 --- a/doc/files.md +++ b/doc/files.md @@ -50,14 +50,15 @@ Subdirectory | File(s) | Description `indexes/blockfilter/basic/` | `fltrNNNNN.dat`<sup>[\[2\]](#note2)</sup> | Blockfilter index filters for the basic filtertype; *optional*, used if `-blockfilterindex=basic` `wallets/` | | [Contains wallets](#multi-wallet-environment); can be specified by `-walletdir` option; if `wallets/` subdirectory does not exist, a wallet resides in the data directory `./` | `banlist.dat` | Stores the IPs/subnets of banned nodes -`./` | `bitcoin.conf` | Contains [configuration settings](bitcoin-conf.md) for `bitcoind` or `bitcoin-qt`; can be specified by `-conf` option +`./` | `bitcoin.conf` | User-defined [configuration settings](bitcoin-conf.md) for `bitcoind` or `bitcoin-qt`. File is not written to by the software and must be created manually. Path can be specified by `-conf` option `./` | `bitcoind.pid` | Stores the process ID (PID) of `bitcoind` or `bitcoin-qt` while running; created at start and deleted on shutdown; can be specified by `-pid` option `./` | `debug.log` | Contains debug information and general logging generated by `bitcoind` or `bitcoin-qt`; can be specified by `-debuglogfile` option `./` | `fee_estimates.dat` | Stores statistics used to estimate minimum transaction fees and priorities required for confirmation `./` | `guisettings.ini.bak` | Backup of former [GUI settings](#gui-settings) after `-resetguisettings` option is used `./` | `mempool.dat` | Dump of the mempool's transactions -`./` | `onion_private_key` | Cached Tor hidden service private key for `-listenonion` option +`./` | `onion_private_key` | Cached Tor onion service private key for `-listenonion` option `./` | `peers.dat` | Peer IP address database (custom format) +`./` | `settings.json` | Read-write settings set through GUI or RPC interfaces, augmenting manual settings from [bitcoin.conf](bitcoin-conf.md). File is created automatically if read-write settings storage is not disabled with `-nosettings` option. Path can be specified with `-settings` option `./` | `.cookie` | Session RPC authentication cookie; if used, created at start and deleted on shutdown; can be specified by `-rpccookiefile` option `./` | `.lock` | Data directory lock file diff --git a/doc/fuzzing.md b/doc/fuzzing.md index 419b1db44e..c97b8d4d50 100644 --- a/doc/fuzzing.md +++ b/doc/fuzzing.md @@ -121,6 +121,8 @@ $ git clone https://github.com/google/afl $ make -C afl/ $ make -C afl/llvm_mode/ $ ./autogen.sh +# It is possible to compile with afl-gcc and afl-g++ instead of afl-clang. However, running afl-fuzz +# may require more memory via the -m flag. $ CC=$(pwd)/afl/afl-clang-fast CXX=$(pwd)/afl/afl-clang-fast++ ./configure --enable-fuzz $ make # For macOS you may need to ignore x86 compilation checks when running "make". If so, diff --git a/doc/reduce-traffic.md b/doc/reduce-traffic.md index ce77a00dd5..86943b1f72 100644 --- a/doc/reduce-traffic.md +++ b/doc/reduce-traffic.md @@ -23,7 +23,7 @@ longer serving historic blocks (blocks older than one week). Keep in mind that new nodes require other nodes that are willing to serve historic blocks. -Peers with the `noban` permission will never be disconnected, although their traffic counts for +Peers with the `download` permission will never be disconnected, although their traffic counts for calculating the target. ## 2. Disable "listening" (`-listen=0`) @@ -50,7 +50,7 @@ Be reminded of the effects of this setting. Doing so disables the automatic broadcasting of transactions from wallet. Not relaying other's transactions could hurt your privacy if used while a wallet is loaded or if you use the node to broadcast transactions. -- If a peer is whitelisted and "-whitelistforcerelay" is set to "1" (which will - also set "whitelistrelay" to "1"), we will still receive and relay their transactions. +- If a peer has the forcerelay permission, we will still receive and relay + their transactions. - It makes block propagation slower because compact block relay can only be used when transaction relay is enabled. diff --git a/doc/release-notes-11413.md b/doc/release-notes-11413.md deleted file mode 100644 index 32735e37f6..0000000000 --- a/doc/release-notes-11413.md +++ /dev/null @@ -1,11 +0,0 @@ -Updated or changed RPC ----------------------- - -The `bumpfee`, `fundrawtransaction`, `sendmany`, `sendtoaddress`, and `walletcreatefundedpsbt` -RPC commands have been updated to include two new fee estimation methods "BTC/kB" and "sat/B". -The target is the fee expressed explicitly in the given form. Note that use of this feature -will trigger BIP 125 (replace-by-fee) opt-in. - -In addition, the `estimate_mode` parameter is now case insensitive for all of the above RPC commands. - -The `bumpfee` command now uses `conf_target` rather than `confTarget` in the options. diff --git a/doc/release-notes-16377.md b/doc/release-notes-16377.md deleted file mode 100644 index 3442fa451b..0000000000 --- a/doc/release-notes-16377.md +++ /dev/null @@ -1,9 +0,0 @@ -RPC changes ------------ -- The `walletcreatefundedpsbt` RPC call will now fail with - `Insufficient funds` when inputs are manually selected but are not enough to cover - the outputs and fee. Additional inputs can automatically be added through the - new `add_inputs` option. - -- The `fundrawtransaction` RPC now supports `add_inputs` option that when `false` - prevents adding more inputs if necessary and consequently the RPC fails. diff --git a/doc/release-notes-16525.md b/doc/release-notes-16525.md new file mode 100644 index 0000000000..220cb78de4 --- /dev/null +++ b/doc/release-notes-16525.md @@ -0,0 +1,9 @@ +RPC changes +----------- + +Exposed transaction version numbers are now treated as unsigned 32-bit integers +instead of signed 32-bit integers. This matches their treatment in consensus +logic. Versions greater than 2 continue to be non-standard (matching previous +behavior of smaller than 1 or greater than 2 being non-standard). Note that +this includes the joinpsbt command, which combines partially-signed +transactions by selecting the highest version number. diff --git a/doc/release-notes-16528.md b/doc/release-notes-16528.md deleted file mode 100644 index e69de29bb2..0000000000 --- a/doc/release-notes-16528.md +++ /dev/null diff --git a/doc/release-notes-18918.md b/doc/release-notes-18918.md deleted file mode 100644 index e69de29bb2..0000000000 --- a/doc/release-notes-18918.md +++ /dev/null diff --git a/doc/release-notes-19133.md b/doc/release-notes-19133.md deleted file mode 100644 index 5150fbe1c7..0000000000 --- a/doc/release-notes-19133.md +++ /dev/null @@ -1,7 +0,0 @@ -## CLI - -A new `bitcoin-cli -generate` command, equivalent to RPC `generatenewaddress` -followed by `generatetoaddress`, can generate blocks for command line testing -purposes. This is a client-side version of the -[former](https://github.com/bitcoin/bitcoin/issues/14299) `generate` RPC. See -the help for details. (#19133) diff --git a/doc/release-notes-19200.md b/doc/release-notes-19200.md deleted file mode 100644 index 4670cb2e75..0000000000 --- a/doc/release-notes-19200.md +++ /dev/null @@ -1,7 +0,0 @@ -## Wallet - -- Backwards compatibility has been dropped for two `getaddressinfo` RPC - deprecations, as notified in the 0.20 release notes. The deprecated `label` - field has been removed as well as the deprecated `labels` behavior of - returning a JSON object containing `name` and `purpose` key-value pairs. Since - 0.20, the `labels` field returns a JSON array of label names. (#19200) diff --git a/doc/release-notes.md b/doc/release-notes.md index e73bedfb10..23983dcd7b 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -87,6 +87,28 @@ Updated RPCs whether initial broadcast of the transaction has been acknowledged by a peer. `getmempoolancestors` and `getmempooldescendants` are also updated. +- The `bumpfee`, `fundrawtransaction`, `sendmany`, `sendtoaddress`, and `walletcreatefundedpsbt` +RPC commands have been updated to include two new fee estimation methods "BTC/kB" and "sat/B". +The target is the fee expressed explicitly in the given form. Note that use of this feature +will trigger BIP 125 (replace-by-fee) opt-in. (#11413) + +- In addition, the `estimate_mode` parameter is now case insensitive for all of + the above RPC commands. (#11413) + +- The `bumpfee` command now uses `conf_target` rather than `confTarget` in the + options. (#11413) + +- The `getpeerinfo` RPC no longer returns the `banscore` field unless the configuration + option `-deprecatedrpc=banscore` is used. The `banscore` field will be fully + removed in the next major release. (#19469) + +- The `walletcreatefundedpsbt` RPC call will now fail with + `Insufficient funds` when inputs are manually selected but are not enough to cover + the outputs and fee. Additional inputs can automatically be added through the + new `add_inputs` option. (#16377) + +- The `fundrawtransaction` RPC now supports `add_inputs` option that when `false` + prevents adding more inputs if necessary and consequently the RPC fails. Changes to Wallet or GUI related RPCs can be found in the GUI or Wallet section below. @@ -99,17 +121,46 @@ Build System Updated settings ---------------- +- The `-banscore` configuration option, which modified the default threshold for + disconnecting and discouraging misbehaving peers, has been removed as part of + changes in 0.20.1 and in this release to the handling of misbehaving peers. + Refer to "Changes regarding misbehaving peers" in the 0.20.1 release notes for + details. (#19464) + - The `-debug=db` logging category, which was deprecated in 0.20 and replaced by `-debug=walletdb` to distinguish it from `coindb`, has been removed. (#19202) +- A `download` permission has been extracted from the `noban` permission. For + compatibility, `noban` implies the `download` permission, but this may change + in future releases. Refer to the help of the affected settings `-whitebind` + and `-whitelist` for more details. (#19191) + Changes to Wallet or GUI related settings can be found in the GUI or Wallet section below. +Tools and Utilities +------------------- + +- A new `bitcoin-cli -generate` command, equivalent to RPC `generatenewaddress` + followed by `generatetoaddress`, can generate blocks for command line testing + purposes. This is a client-side version of the + former `generate` RPC. See the help for details. (#19133) + +- The `bitcoin-cli -getinfo` command now displays the wallet name and balance for + each of the loaded wallets when more than one is loaded (e.g. in multiwallet + mode) and a wallet is not specified with `-rpcwallet`. (#18594) + New settings ------------ Wallet ------ +- Backwards compatibility has been dropped for two `getaddressinfo` RPC + deprecations, as notified in the 0.20 release notes. The deprecated `label` + field has been removed as well as the deprecated `labels` behavior of + returning a JSON object containing `name` and `purpose` key-value pairs. Since + 0.20, the `labels` field returns a JSON array of label names. (#19200) + - To improve wallet privacy, the frequency of wallet rebroadcast attempts is reduced from approximately once every 15 minutes to once every 12-36 hours. To maintain a similar level of guarantee for initial broadcast of wallet @@ -253,9 +304,23 @@ issue. GUI changes ----------- +- The GUI Peers window no longer displays a "Ban Score" field. This is part of + changes in 0.20.1 and in this release to the handling of misbehaving + peers. Refer to "Changes regarding misbehaving peers" in the 0.20.1 release + notes for details. (#19512) + Low-level changes ================= +RPC +--- + +- To make RPC `sendtoaddress` more consistent with `sendmany` the following error + `sendtoaddress` codes were changed from `-4` to `-6`: + - Insufficient funds + - Fee estimation failed + - Transaction has too long of a mempool chain + Tests ----- diff --git a/doc/release-notes/release-notes-0.20.1.md b/doc/release-notes/release-notes-0.20.1.md new file mode 100644 index 0000000000..9fbb29cb82 --- /dev/null +++ b/doc/release-notes/release-notes-0.20.1.md @@ -0,0 +1,158 @@ +0.20.1 Release Notes +==================== + +Bitcoin Core version 0.20.1 is now available from: + + <https://bitcoincore.org/bin/bitcoin-core-0.20.1/> + +This minor release includes various bug fixes and performance +improvements, as well as updated translations. + +Please report bugs using the issue tracker at GitHub: + + <https://github.com/bitcoin/bitcoin/issues> + +To receive security and update notifications, please subscribe to: + + <https://bitcoincore.org/en/list/announcements/join/> + +How to Upgrade +============== + +If you are running an older version, shut it down. Wait until it has completely +shut down (which might take a few minutes in some cases), then run the +installer (on Windows) or just copy over `/Applications/Bitcoin-Qt` (on Mac) +or `bitcoind`/`bitcoin-qt` (on Linux). + +Upgrading directly from a version of Bitcoin Core that has reached its EOL is +possible, but it might take some time if the data directory needs to be migrated. Old +wallet versions of Bitcoin Core are generally supported. + +Compatibility +============== + +Bitcoin Core is supported and extensively tested on operating systems +using the Linux kernel, macOS 10.12+, and Windows 7 and newer. Bitcoin +Core should also work on most other Unix-like systems but is not as +frequently tested on them. It is not recommended to use Bitcoin Core on +unsupported systems. + +From Bitcoin Core 0.20.0 onwards, macOS versions earlier than 10.12 are no +longer supported. Additionally, Bitcoin Core does not yet change appearance +when macOS "dark mode" is activated. + +Known Bugs +========== + +The process for generating the source code release ("tarball") has changed in an +effort to make it more complete, however, there are a few regressions in +this release: + +- The generated `configure` script is currently missing, and you will need to + install autotools and run `./autogen.sh` before you can run + `./configure`. This is the same as when checking out from git. + +- Instead of running `make` simply, you should instead run + `BITCOIN_GENBUILD_NO_GIT=1 make`. + +Notable changes +=============== + +Changes regarding misbehaving peers +----------------------------------- + +Peers that misbehave (e.g. send us invalid blocks) are now referred to as +discouraged nodes in log output, as they're not (and weren't) strictly banned: +incoming connections are still allowed from them, but they're preferred for +eviction. + +Furthermore, a few additional changes are introduced to how discouraged +addresses are treated: + +- Discouraging an address does not time out automatically after 24 hours + (or the `-bantime` setting). Depending on traffic from other peers, + discouragement may time out at an indeterminate time. + +- Discouragement is not persisted over restarts. + +- There is no method to list discouraged addresses. They are not returned by + the `listbanned` RPC. That RPC also no longer reports the `ban_reason` + field, as `"manually added"` is the only remaining option. + +- Discouragement cannot be removed with the `setban remove` RPC command. + If you need to remove a discouragement, you can remove all discouragements by + stop-starting your node. + +Notification changes +-------------------- + +`-walletnotify` notifications are now sent for wallet transactions that are +removed from the mempool because they conflict with a new block. These +notifications were sent previously before the v0.19 release, but had been +broken since that release (bug +[#18325](https://github.com/bitcoin/bitcoin/issues/18325)). + +PSBT changes +------------ + +PSBTs will contain both the non-witness utxo and the witness utxo for segwit +inputs in order to restore compatibility with wallet software that are now +requiring the full previous transaction for segwit inputs. The witness utxo +is still provided to maintain compatibility with software which relied on its +existence to determine whether an input was segwit. + +0.20.1 change log +================= + +### Mining +- #19019 Fix GBT: Restore "!segwit" and "csv" to "rules" key (luke-jr) + +### P2P protocol and network code +- #19219 Replace automatic bans with discouragement filter (sipa) + +### Wallet +- #19300 Handle concurrent wallet loading (promag) +- #18982 Minimal fix to restore conflicted transaction notifications (ryanofsky) + +### RPC and other APIs +- #19524 Increment input value sum only once per UTXO in decodepsbt (fanquake) +- #19517 psbt: Increment input value sum only once per UTXO in decodepsbt (achow101) +- #19215 psbt: Include and allow both non_witness_utxo and witness_utxo for segwit inputs (achow101) + +### GUI +- #19097 Add missing QPainterPath include (achow101) +- #19059 update Qt base translations for macOS release (fanquake) + +### Build system +- #19152 improve build OS configure output (skmcontrib) +- #19536 qt, build: Fix QFileDialog for static builds (hebasto) + +### Tests and QA +- #19444 Remove cached directories and associated script blocks from appveyor config (sipsorcery) +- #18640 appveyor: Remove clcache (MarcoFalke) + +### Miscellaneous +- #19194 util: Don't reference errno when pthread fails (miztake) +- #18700 Fix locking on WSL using flock instead of fcntl (meshcollider) + +Credits +======= + +Thanks to everyone who directly contributed to this release: + +- Aaron Clauson +- Andrew Chow +- fanquake +- Hennadii Stepanov +- João Barbosa +- Luke Dashjr +- MarcoFalke +- MIZUTA Takeshi +- Pieter Wuille +- Russell Yanofsky +- sachinkm77 +- Samuel Dobson +- Wladimir J. van der Laan + +As well as to everyone that helped with translations on +[Transifex](https://www.transifex.com/bitcoin/bitcoin/). diff --git a/doc/tor.md b/doc/tor.md index 2c54e32f84..17807856e5 100644 --- a/doc/tor.md +++ b/doc/tor.md @@ -1,6 +1,6 @@ # TOR SUPPORT IN BITCOIN -It is possible to run Bitcoin Core as a Tor hidden service, and connect to such services. +It is possible to run Bitcoin Core as a Tor onion service, and connect to such services. The following directions assume you have a Tor proxy running on port 9050. Many distributions default to having a SOCKS proxy listening on port 9050, but others may not. In particular, the Tor Browser Bundle defaults to listening on port 9150. See [Tor Project FAQ:TBBSocksPort](https://www.torproject.org/docs/faq.html.en#TBBSocksPort) for how to properly configure Tor. @@ -14,12 +14,12 @@ outgoing connections, but more is possible. -proxy=ip:port Set the proxy server. If SOCKS5 is selected (default), this proxy server will be used to try to reach .onion addresses as well. - -onion=ip:port Set the proxy server to use for Tor hidden services. You do not + -onion=ip:port Set the proxy server to use for Tor onion services. You do not need to set this if it's the same as -proxy. You can use -noonion - to explicitly disable access to hidden services. + to explicitly disable access to onion services. -listen When using -proxy, listening is disabled by default. If you want - to run a hidden service (see next section), you'll need to enable + to run an onion service (see next section), you'll need to enable it explicitly. -connect=X When behind a Tor proxy, you can specify .onion addresses instead @@ -94,11 +94,11 @@ for normal IPv4/IPv6 communication, use: ## 3. Automatically listen on Tor Starting with Tor version 0.2.7.1 it is possible, through Tor's control socket -API, to create and destroy 'ephemeral' hidden services programmatically. +API, to create and destroy 'ephemeral' onion services programmatically. Bitcoin Core has been updated to make use of this. This means that if Tor is running (and proper authentication has been configured), -Bitcoin Core automatically creates a hidden service to listen on. This will positively +Bitcoin Core automatically creates an onion service to listen on. This will positively affect the number of available .onion nodes. This new feature is enabled by default if Bitcoin Core is listening (`-listen`), and @@ -110,7 +110,7 @@ Connecting to Tor's control socket API requires one of two authentication method configured. It also requires the control socket to be enabled, e.g. put `ControlPort 9051` in `torrc` config file. For cookie authentication the user running bitcoind must have read access to the `CookieAuthFile` specified in Tor configuration. In some cases this is -preconfigured and the creation of a hidden service is automatic. If permission problems +preconfigured and the creation of an onion service is automatic. If permission problems are seen with `-debug=tor` they can be resolved by adding both the user running Tor and the user running bitcoind to the same group and setting permissions appropriately. On Debian-based systems the user running bitcoind can be added to the debian-tor group, @@ -127,8 +127,8 @@ in the tor configuration file. The hashed password can be obtained with the comm ## 4. Privacy recommendations -- Do not add anything but Bitcoin Core ports to the hidden service created in section 2. - If you run a web service too, create a new hidden service for that. +- Do not add anything but Bitcoin Core ports to the onion service created in section 2. + If you run a web service too, create a new onion service for that. Otherwise it is trivial to link them, which may reduce privacy. Hidden services created automatically (as in section 3) always have only one port open. diff --git a/share/examples/bitcoin.conf b/share/examples/bitcoin.conf index 96fb6658a0..90a592cc63 100644 --- a/share/examples/bitcoin.conf +++ b/share/examples/bitcoin.conf @@ -20,8 +20,8 @@ # Bind to given address and always listen on it. Use [host]:port notation for IPv6 #bind=<addr> -# Bind to given address and whitelist peers connecting to it. Use [host]:port notation for IPv6 -#whitebind=<addr> +# Bind to given address and add permission flags to peers connecting to it. Use [host]:port notation for IPv6 +#whitebind=perm@<addr> ############################################################## ## Quick Primer on addnode vs connect ## diff --git a/src/.clang-format b/src/.clang-format index a8f8565f80..ef7a0ef5c7 100644 --- a/src/.clang-format +++ b/src/.clang-format @@ -3,7 +3,6 @@ AccessModifierOffset: -4 AlignAfterOpenBracket: true AlignEscapedNewlinesLeft: true AlignTrailingComments: true -AllowAllArgumentsOnNextLine : true AllowAllParametersOfDeclarationOnNextLine: true AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: true diff --git a/src/Makefile.am b/src/Makefile.am index 632ed3e31f..cd3cc95707 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -110,9 +110,9 @@ BITCOIN_CORE_H = \ banman.h \ base58.h \ bech32.h \ - bloom.h \ blockencodings.h \ blockfilter.h \ + bloom.h \ chain.h \ chainparams.h \ chainparamsbase.h \ @@ -133,6 +133,7 @@ BITCOIN_CORE_H = \ core_io.h \ core_memusage.h \ cuckoocache.h \ + dbwrapper.h \ flatfile.h \ fs.h \ httprpc.h \ @@ -148,7 +149,6 @@ BITCOIN_CORE_H = \ interfaces/wallet.h \ key.h \ key_io.h \ - dbwrapper.h \ limitedmap.h \ logging.h \ logging/timer.h \ @@ -167,6 +167,7 @@ BITCOIN_CORE_H = \ node/context.h \ node/psbt.h \ node/transaction.h \ + node/ui_interface.h \ node/utxo_snapshot.h \ noui.h \ optional.h \ @@ -206,13 +207,12 @@ BITCOIN_CORE_H = \ support/events.h \ support/lockedpool.h \ sync.h \ - threadsafety.h \ threadinterrupt.h \ + threadsafety.h \ timedata.h \ torcontrol.h \ txdb.h \ txmempool.h \ - ui_interface.h \ undo.h \ util/asmap.h \ util/bip32.h \ @@ -221,8 +221,6 @@ BITCOIN_CORE_H = \ util/error.h \ util/fees.h \ util/golombrice.h \ - util/spanparsing.h \ - util/system.h \ util/macros.h \ util/memory.h \ util/message.h \ @@ -230,19 +228,22 @@ BITCOIN_CORE_H = \ util/rbf.h \ util/ref.h \ util/settings.h \ + util/spanparsing.h \ util/string.h \ + util/system.h \ util/threadnames.h \ util/time.h \ util/translation.h \ + util/ui_change_type.h \ util/url.h \ util/vector.h \ validation.h \ validationinterface.h \ versionbits.h \ versionbitsinfo.h \ - walletinitinterface.h \ wallet/bdb.h \ wallet/coincontrol.h \ + wallet/coinselection.h \ wallet/context.h \ wallet/crypter.h \ wallet/db.h \ @@ -257,7 +258,7 @@ BITCOIN_CORE_H = \ wallet/walletdb.h \ wallet/wallettool.h \ wallet/walletutil.h \ - wallet/coinselection.h \ + walletinitinterface.h \ warnings.h \ zmq/zmqabstractnotifier.h \ zmq/zmqconfig.h\ @@ -286,16 +287,16 @@ libbitcoin_server_a_SOURCES = \ blockfilter.cpp \ chain.cpp \ consensus/tx_verify.cpp \ + dbwrapper.cpp \ flatfile.cpp \ httprpc.cpp \ httpserver.cpp \ index/base.cpp \ index/blockfilterindex.cpp \ index/txindex.cpp \ + init.cpp \ interfaces/chain.cpp \ interfaces/node.cpp \ - init.cpp \ - dbwrapper.cpp \ miner.cpp \ net.cpp \ net_processing.cpp \ @@ -304,6 +305,7 @@ libbitcoin_server_a_SOURCES = \ node/context.cpp \ node/psbt.cpp \ node/transaction.cpp \ + node/ui_interface.cpp \ noui.cpp \ policy/fees.cpp \ policy/rbf.cpp \ @@ -322,7 +324,6 @@ libbitcoin_server_a_SOURCES = \ torcontrol.cpp \ txdb.cpp \ txmempool.cpp \ - ui_interface.cpp \ validation.cpp \ validationinterface.cpp \ versionbits.cpp \ diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include index 93b5156af3..c224ca7bf6 100644 --- a/src/Makefile.bench.include +++ b/src/Makefile.bench.include @@ -33,6 +33,8 @@ bench_bench_bitcoin_SOURCES = \ bench/merkle_root.cpp \ bench/mempool_eviction.cpp \ bench/mempool_stress.cpp \ + bench/nanobench.h \ + bench/nanobench.cpp \ bench/rpc_blockchain.cpp \ bench/rpc_mempool.cpp \ bench/util_time.cpp \ diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include index 1f66516172..848053e841 100644 --- a/src/Makefile.qt.include +++ b/src/Makefile.qt.include @@ -276,9 +276,7 @@ if ENABLE_WALLET BITCOIN_QT_CPP += $(BITCOIN_QT_WALLET_CPP) endif # ENABLE_WALLET -RES_IMAGES = - -RES_MOVIES = $(wildcard $(srcdir)/qt/res/movies/spinner-*.png) +RES_ANIMATION = $(wildcard $(srcdir)/qt/res/animation/spinner-*.png) BITCOIN_RC = qt/res/bitcoin-qt-res.rc @@ -290,7 +288,7 @@ qt_libbitcoinqt_a_CXXFLAGS = $(AM_CXXFLAGS) $(QT_PIE_FLAGS) qt_libbitcoinqt_a_OBJCXXFLAGS = $(AM_OBJCXXFLAGS) $(QT_PIE_FLAGS) qt_libbitcoinqt_a_SOURCES = $(BITCOIN_QT_CPP) $(BITCOIN_QT_H) $(QT_FORMS_UI) \ - $(QT_QRC) $(QT_QRC_LOCALE) $(QT_TS) $(RES_ICONS) $(RES_IMAGES) $(RES_MOVIES) + $(QT_QRC) $(QT_QRC_LOCALE) $(QT_TS) $(RES_ICONS) $(RES_ANIMATION) if TARGET_DARWIN qt_libbitcoinqt_a_SOURCES += $(BITCOIN_MM) endif @@ -361,7 +359,7 @@ $(QT_QRC_LOCALE_CPP): $(QT_QRC_LOCALE) $(QT_QM) $(SED) -e '/^\*\*.*Created:/d' -e '/^\*\*.*by:/d' > $@ @rm $(@D)/temp_$(<F) -$(QT_QRC_CPP): $(QT_QRC) $(QT_FORMS_H) $(RES_ICONS) $(RES_IMAGES) $(RES_MOVIES) +$(QT_QRC_CPP): $(QT_QRC) $(QT_FORMS_H) $(RES_ICONS) $(RES_ANIMATION) @test -f $(RCC) $(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(RCC) -name bitcoin $< | \ $(SED) -e '/^\*\*.*Created:/d' -e '/^\*\*.*by:/d' > $@ diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 472382c7d2..c3e46c0def 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -10,7 +10,9 @@ FUZZ_TARGETS = \ test/fuzz/addrman_deserialize \ test/fuzz/asmap \ test/fuzz/asmap_direct \ + test/fuzz/autofile \ test/fuzz/banentry_deserialize \ + test/fuzz/banman \ test/fuzz/base_encode_decode \ test/fuzz/bech32 \ test/fuzz/block \ @@ -28,12 +30,19 @@ FUZZ_TARGETS = \ test/fuzz/blockundo_deserialize \ test/fuzz/bloom_filter \ test/fuzz/bloomfilter_deserialize \ + test/fuzz/buffered_file \ test/fuzz/chain \ test/fuzz/checkqueue \ test/fuzz/coins_deserialize \ test/fuzz/coins_view \ test/fuzz/crypto \ + test/fuzz/crypto_aes256 \ + test/fuzz/crypto_aes256cbc \ + test/fuzz/crypto_chacha20 \ + test/fuzz/crypto_chacha20_poly1305_aead \ test/fuzz/crypto_common \ + test/fuzz/crypto_hkdf_hmac_sha256_l32 \ + test/fuzz/crypto_poly1305 \ test/fuzz/cuckoocache \ test/fuzz/decode_tx \ test/fuzz/descriptor_parse \ @@ -54,6 +63,7 @@ FUZZ_TARGETS = \ test/fuzz/key_io \ test/fuzz/key_origin_info_deserialize \ test/fuzz/kitchen_sink \ + test/fuzz/load_external_block_file \ test/fuzz/locale \ test/fuzz/merkle_block_deserialize \ test/fuzz/merkleblock \ @@ -73,6 +83,7 @@ FUZZ_TARGETS = \ test/fuzz/partial_merkle_tree_deserialize \ test/fuzz/partially_signed_transaction_deserialize \ test/fuzz/policy_estimator \ + test/fuzz/policy_estimator_io \ test/fuzz/pow \ test/fuzz/prefilled_transaction_deserialize \ test/fuzz/prevector \ @@ -232,6 +243,7 @@ BITCOIN_TESTS =\ test/net_tests.cpp \ test/netbase_tests.cpp \ test/pmt_tests.cpp \ + test/policy_fee_tests.cpp \ test/policyestimator_tests.cpp \ test/pow_tests.cpp \ test/prevector_tests.cpp \ @@ -253,6 +265,7 @@ BITCOIN_TESTS =\ test/skiplist_tests.cpp \ test/streams_tests.cpp \ test/sync_tests.cpp \ + test/system_tests.cpp \ test/util_threadnames_tests.cpp \ test/timedata_tests.cpp \ test/torcontrol_tests.cpp \ @@ -263,6 +276,7 @@ BITCOIN_TESTS =\ test/uint256_tests.cpp \ test/util_tests.cpp \ test/validation_block_tests.cpp \ + test/validation_chainstate_tests.cpp \ test/validation_chainstatemanager_tests.cpp \ test/validation_flush_tests.cpp \ test/validationinterface_tests.cpp \ @@ -348,12 +362,24 @@ test_fuzz_asmap_direct_LDADD = $(FUZZ_SUITE_LD_COMMON) test_fuzz_asmap_direct_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) test_fuzz_asmap_direct_SOURCES = test/fuzz/asmap_direct.cpp +test_fuzz_autofile_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_autofile_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_autofile_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_autofile_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +test_fuzz_autofile_SOURCES = test/fuzz/autofile.cpp + test_fuzz_banentry_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBANENTRY_DESERIALIZE=1 test_fuzz_banentry_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_banentry_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON) test_fuzz_banentry_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) test_fuzz_banentry_deserialize_SOURCES = test/fuzz/deserialize.cpp +test_fuzz_banman_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_banman_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_banman_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_banman_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +test_fuzz_banman_SOURCES = test/fuzz/banman.cpp + test_fuzz_base_encode_decode_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) test_fuzz_base_encode_decode_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_base_encode_decode_LDADD = $(FUZZ_SUITE_LD_COMMON) @@ -456,6 +482,12 @@ test_fuzz_bloomfilter_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON) test_fuzz_bloomfilter_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) test_fuzz_bloomfilter_deserialize_SOURCES = test/fuzz/deserialize.cpp +test_fuzz_buffered_file_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_buffered_file_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_buffered_file_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_buffered_file_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +test_fuzz_buffered_file_SOURCES = test/fuzz/buffered_file.cpp + test_fuzz_chain_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) test_fuzz_chain_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_chain_LDADD = $(FUZZ_SUITE_LD_COMMON) @@ -486,12 +518,48 @@ test_fuzz_crypto_LDADD = $(FUZZ_SUITE_LD_COMMON) test_fuzz_crypto_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) test_fuzz_crypto_SOURCES = test/fuzz/crypto.cpp +test_fuzz_crypto_aes256_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_crypto_aes256_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_crypto_aes256_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_crypto_aes256_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +test_fuzz_crypto_aes256_SOURCES = test/fuzz/crypto_aes256.cpp + +test_fuzz_crypto_aes256cbc_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_crypto_aes256cbc_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_crypto_aes256cbc_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_crypto_aes256cbc_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +test_fuzz_crypto_aes256cbc_SOURCES = test/fuzz/crypto_aes256cbc.cpp + +test_fuzz_crypto_chacha20_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_crypto_chacha20_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_crypto_chacha20_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_crypto_chacha20_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +test_fuzz_crypto_chacha20_SOURCES = test/fuzz/crypto_chacha20.cpp + +test_fuzz_crypto_chacha20_poly1305_aead_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_crypto_chacha20_poly1305_aead_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_crypto_chacha20_poly1305_aead_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_crypto_chacha20_poly1305_aead_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +test_fuzz_crypto_chacha20_poly1305_aead_SOURCES = test/fuzz/crypto_chacha20_poly1305_aead.cpp + test_fuzz_crypto_common_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) test_fuzz_crypto_common_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_crypto_common_LDADD = $(FUZZ_SUITE_LD_COMMON) test_fuzz_crypto_common_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) test_fuzz_crypto_common_SOURCES = test/fuzz/crypto_common.cpp +test_fuzz_crypto_hkdf_hmac_sha256_l32_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_crypto_hkdf_hmac_sha256_l32_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_crypto_hkdf_hmac_sha256_l32_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_crypto_hkdf_hmac_sha256_l32_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +test_fuzz_crypto_hkdf_hmac_sha256_l32_SOURCES = test/fuzz/crypto_hkdf_hmac_sha256_l32.cpp + +test_fuzz_crypto_poly1305_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_crypto_poly1305_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_crypto_poly1305_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_crypto_poly1305_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +test_fuzz_crypto_poly1305_SOURCES = test/fuzz/crypto_poly1305.cpp + test_fuzz_cuckoocache_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) test_fuzz_cuckoocache_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_cuckoocache_LDADD = $(FUZZ_SUITE_LD_COMMON) @@ -612,6 +680,12 @@ test_fuzz_kitchen_sink_LDADD = $(FUZZ_SUITE_LD_COMMON) test_fuzz_kitchen_sink_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) test_fuzz_kitchen_sink_SOURCES = test/fuzz/kitchen_sink.cpp +test_fuzz_load_external_block_file_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_load_external_block_file_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_load_external_block_file_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_load_external_block_file_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +test_fuzz_load_external_block_file_SOURCES = test/fuzz/load_external_block_file.cpp + test_fuzz_locale_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) test_fuzz_locale_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_locale_LDADD = $(FUZZ_SUITE_LD_COMMON) @@ -732,6 +806,12 @@ test_fuzz_policy_estimator_LDADD = $(FUZZ_SUITE_LD_COMMON) test_fuzz_policy_estimator_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) test_fuzz_policy_estimator_SOURCES = test/fuzz/policy_estimator.cpp +test_fuzz_policy_estimator_io_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_policy_estimator_io_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_policy_estimator_io_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_policy_estimator_io_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +test_fuzz_policy_estimator_io_SOURCES = test/fuzz/policy_estimator_io.cpp + test_fuzz_pow_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) test_fuzz_pow_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_pow_LDADD = $(FUZZ_SUITE_LD_COMMON) @@ -1128,7 +1208,7 @@ nodist_test_test_bitcoin_SOURCES = $(GENERATED_TEST_FILES) $(BITCOIN_TESTS): $(GENERATED_TEST_FILES) -CLEAN_BITCOIN_TEST = test/*.gcda test/*.gcno $(GENERATED_TEST_FILES) $(BITCOIN_TESTS:=.log) +CLEAN_BITCOIN_TEST = test/*.gcda test/*.gcno test/fuzz/*.gcda test/fuzz/*.gcno $(GENERATED_TEST_FILES) $(BITCOIN_TESTS:=.log) CLEANFILES += $(CLEAN_BITCOIN_TEST) @@ -1158,8 +1238,8 @@ endif if TARGET_WINDOWS else if ENABLE_BENCH - @echo "Running bench/bench_bitcoin -evals=1 -scaling=0..." - $(BENCH_BINARY) -evals=1 -scaling=0 > /dev/null + @echo "Running bench/bench_bitcoin ..." + $(BENCH_BINARY) > /dev/null endif endif $(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C secp256k1 check diff --git a/src/addrdb.cpp b/src/addrdb.cpp index 835c5d6c65..f3e8a19de2 100644 --- a/src/addrdb.cpp +++ b/src/addrdb.cpp @@ -8,6 +8,7 @@ #include <addrman.h> #include <chainparams.h> #include <clientversion.h> +#include <cstdint> #include <hash.h> #include <random.h> #include <streams.h> @@ -36,7 +37,7 @@ template <typename Data> bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data& data) { // Generate random temporary filename - unsigned short randv = 0; + uint16_t randv = 0; GetRandBytes((unsigned char*)&randv, sizeof(randv)); std::string tmpfn = strprintf("%s.%04x", prefix, randv); diff --git a/src/addrdb.h b/src/addrdb.h index c6d4307d69..8410c3776c 100644 --- a/src/addrdb.h +++ b/src/addrdb.h @@ -17,13 +17,6 @@ class CSubNet; class CAddrMan; class CDataStream; -typedef enum BanReason -{ - BanReasonUnknown = 0, - BanReasonNodeMisbehaving = 1, - BanReasonManuallyAdded = 2 -} BanReason; - class CBanEntry { public: @@ -31,7 +24,6 @@ public: int nVersion; int64_t nCreateTime; int64_t nBanUntil; - uint8_t banReason; CBanEntry() { @@ -44,31 +36,17 @@ public: nCreateTime = nCreateTimeIn; } - explicit CBanEntry(int64_t n_create_time_in, BanReason ban_reason_in) : CBanEntry(n_create_time_in) + SERIALIZE_METHODS(CBanEntry, obj) { - banReason = ban_reason_in; + uint8_t ban_reason = 2; //! For backward compatibility + READWRITE(obj.nVersion, obj.nCreateTime, obj.nBanUntil, ban_reason); } - SERIALIZE_METHODS(CBanEntry, obj) { READWRITE(obj.nVersion, obj.nCreateTime, obj.nBanUntil, obj.banReason); } - void SetNull() { nVersion = CBanEntry::CURRENT_VERSION; nCreateTime = 0; nBanUntil = 0; - banReason = BanReasonUnknown; - } - - std::string banReasonToString() const - { - switch (banReason) { - case BanReasonNodeMisbehaving: - return "node misbehaving"; - case BanReasonManuallyAdded: - return "manually added"; - default: - return "unknown"; - } } }; diff --git a/src/addrman.cpp b/src/addrman.cpp index 7aba340d9d..7636c6bad2 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -479,11 +479,15 @@ int CAddrMan::Check_() } #endif -void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr) +void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size_t max_pct) { - unsigned int nNodes = ADDRMAN_GETADDR_MAX_PCT * vRandom.size() / 100; - if (nNodes > ADDRMAN_GETADDR_MAX) - nNodes = ADDRMAN_GETADDR_MAX; + size_t nNodes = vRandom.size(); + if (max_pct != 0) { + nNodes = max_pct * nNodes / 100; + } + if (max_addresses != 0) { + nNodes = std::min(nNodes, max_addresses); + } // gather a list of random nodes, skipping those of low quality for (unsigned int n = 0; n < vRandom.size(); n++) { diff --git a/src/addrman.h b/src/addrman.h index 8e82020df0..ca045b91cd 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -153,12 +153,6 @@ public: //! how recent a successful connection should be before we allow an address to be evicted from tried #define ADDRMAN_REPLACEMENT_HOURS 4 -//! the maximum percentage of nodes to return in a getaddr call -#define ADDRMAN_GETADDR_MAX_PCT 23 - -//! the maximum number of nodes to return in a getaddr call -#define ADDRMAN_GETADDR_MAX 2500 - //! Convenience #define ADDRMAN_TRIED_BUCKET_COUNT (1 << ADDRMAN_TRIED_BUCKET_COUNT_LOG2) #define ADDRMAN_NEW_BUCKET_COUNT (1 << ADDRMAN_NEW_BUCKET_COUNT_LOG2) @@ -261,7 +255,7 @@ protected: #endif //! Select several addresses at once. - void GetAddr_(std::vector<CAddress> &vAddr) EXCLUSIVE_LOCKS_REQUIRED(cs); + void GetAddr_(std::vector<CAddress> &vAddr, size_t max_addresses, size_t max_pct) EXCLUSIVE_LOCKS_REQUIRED(cs); //! Mark an entry as currently-connected-to. void Connected_(const CService &addr, int64_t nTime) EXCLUSIVE_LOCKS_REQUIRED(cs); @@ -638,13 +632,13 @@ public: } //! Return a bunch of addresses, selected at random. - std::vector<CAddress> GetAddr() + std::vector<CAddress> GetAddr(size_t max_addresses, size_t max_pct) { Check(); std::vector<CAddress> vAddr; { LOCK(cs); - GetAddr_(vAddr); + GetAddr_(vAddr, max_addresses, max_pct); } Check(); return vAddr; diff --git a/src/banman.cpp b/src/banman.cpp index 9cc584f0e4..8752185a60 100644 --- a/src/banman.cpp +++ b/src/banman.cpp @@ -6,7 +6,7 @@ #include <banman.h> #include <netaddress.h> -#include <ui_interface.h> +#include <node/ui_interface.h> #include <util/system.h> #include <util/time.h> #include <util/translation.h> @@ -26,7 +26,7 @@ BanMan::BanMan(fs::path ban_file, CClientUIInterface* client_interface, int64_t SweepBanned(); // sweep out unused entries LogPrint(BCLog::NET, "Loaded %d banned node ips/subnets from banlist.dat %dms\n", - banmap.size(), GetTimeMillis() - n_start); + m_banned.size(), GetTimeMillis() - n_start); } else { LogPrintf("Invalid or missing banlist.dat; recreating\n"); SetBannedSetDirty(true); // force write @@ -68,28 +68,13 @@ void BanMan::ClearBanned() if (m_client_interface) m_client_interface->BannedListChanged(); } -int BanMan::IsBannedLevel(CNetAddr net_addr) +bool BanMan::IsDiscouraged(const CNetAddr& net_addr) { - // Returns the most severe level of banning that applies to this address. - // 0 - Not banned - // 1 - Automatic misbehavior ban - // 2 - Any other ban - int level = 0; - auto current_time = GetTime(); LOCK(m_cs_banned); - for (const auto& it : m_banned) { - CSubNet sub_net = it.first; - CBanEntry ban_entry = it.second; - - if (current_time < ban_entry.nBanUntil && sub_net.Match(net_addr)) { - if (ban_entry.banReason != BanReasonNodeMisbehaving) return 2; - level = 1; - } - } - return level; + return m_discouraged.contains(net_addr.GetAddrBytes()); } -bool BanMan::IsBanned(CNetAddr net_addr) +bool BanMan::IsBanned(const CNetAddr& net_addr) { auto current_time = GetTime(); LOCK(m_cs_banned); @@ -104,7 +89,7 @@ bool BanMan::IsBanned(CNetAddr net_addr) return false; } -bool BanMan::IsBanned(CSubNet sub_net) +bool BanMan::IsBanned(const CSubNet& sub_net) { auto current_time = GetTime(); LOCK(m_cs_banned); @@ -118,15 +103,21 @@ bool BanMan::IsBanned(CSubNet sub_net) return false; } -void BanMan::Ban(const CNetAddr& net_addr, const BanReason& ban_reason, int64_t ban_time_offset, bool since_unix_epoch) +void BanMan::Ban(const CNetAddr& net_addr, int64_t ban_time_offset, bool since_unix_epoch) { CSubNet sub_net(net_addr); - Ban(sub_net, ban_reason, ban_time_offset, since_unix_epoch); + Ban(sub_net, ban_time_offset, since_unix_epoch); +} + +void BanMan::Discourage(const CNetAddr& net_addr) +{ + LOCK(m_cs_banned); + m_discouraged.insert(net_addr.GetAddrBytes()); } -void BanMan::Ban(const CSubNet& sub_net, const BanReason& ban_reason, int64_t ban_time_offset, bool since_unix_epoch) +void BanMan::Ban(const CSubNet& sub_net, int64_t ban_time_offset, bool since_unix_epoch) { - CBanEntry ban_entry(GetTime(), ban_reason); + CBanEntry ban_entry(GetTime()); int64_t normalized_ban_time_offset = ban_time_offset; bool normalized_since_unix_epoch = since_unix_epoch; @@ -146,8 +137,8 @@ void BanMan::Ban(const CSubNet& sub_net, const BanReason& ban_reason, int64_t ba } if (m_client_interface) m_client_interface->BannedListChanged(); - //store banlist to disk immediately if user requested ban - if (ban_reason == BanReasonManuallyAdded) DumpBanlist(); + //store banlist to disk immediately + DumpBanlist(); } bool BanMan::Unban(const CNetAddr& net_addr) diff --git a/src/banman.h b/src/banman.h index 6bea2e75e9..f6bfbd1e49 100644 --- a/src/banman.h +++ b/src/banman.h @@ -6,6 +6,7 @@ #define BITCOIN_BANMAN_H #include <addrdb.h> +#include <bloom.h> #include <fs.h> #include <net_types.h> // For banmap_t #include <sync.h> @@ -23,32 +24,55 @@ class CClientUIInterface; class CNetAddr; class CSubNet; -// Denial-of-service detection/prevention -// The idea is to detect peers that are behaving -// badly and disconnect/ban them, but do it in a -// one-coding-mistake-won't-shatter-the-entire-network -// way. -// IMPORTANT: There should be nothing I can give a -// node that it will forward on that will make that -// node's peers drop it. If there is, an attacker -// can isolate a node and/or try to split the network. -// Dropping a node for sending stuff that is invalid -// now but might be valid in a later version is also -// dangerous, because it can cause a network split -// between nodes running old code and nodes running -// new code. +// Banman manages two related but distinct concepts: +// +// 1. Banning. This is configured manually by the user, through the setban RPC. +// If an address or subnet is banned, we never accept incoming connections from +// it and never create outgoing connections to it. We won't gossip its address +// to other peers in addr messages. Banned addresses and subnets are stored to +// banlist.dat on shutdown and reloaded on startup. Banning can be used to +// prevent connections with spy nodes or other griefers. +// +// 2. Discouragement. If a peer misbehaves enough (see Misbehaving() in +// net_processing.cpp), we'll mark that address as discouraged. We still allow +// incoming connections from them, but they're preferred for eviction when +// we receive new incoming connections. We never make outgoing connections to +// them, and do not gossip their address to other peers. This is implemented as +// a bloom filter. We can (probabilistically) test for membership, but can't +// list all discouraged addresses or unmark them as discouraged. Discouragement +// can prevent our limited connection slots being used up by incompatible +// or broken peers. +// +// Neither banning nor discouragement are protections against denial-of-service +// attacks, since if an attacker has a way to waste our resources and we +// disconnect from them and ban that address, it's trivial for them to +// reconnect from another IP address. +// +// Attempting to automatically disconnect or ban any class of peer carries the +// risk of splitting the network. For example, if we banned/disconnected for a +// transaction that fails a policy check and a future version changes the +// policy check so the transaction is accepted, then that transaction could +// cause the network to split between old nodes and new nodes. class BanMan { public: ~BanMan(); BanMan(fs::path ban_file, CClientUIInterface* client_interface, int64_t default_ban_time); - void Ban(const CNetAddr& net_addr, const BanReason& ban_reason, int64_t ban_time_offset = 0, bool since_unix_epoch = false); - void Ban(const CSubNet& sub_net, const BanReason& ban_reason, int64_t ban_time_offset = 0, bool since_unix_epoch = false); + void Ban(const CNetAddr& net_addr, int64_t ban_time_offset = 0, bool since_unix_epoch = false); + void Ban(const CSubNet& sub_net, int64_t ban_time_offset = 0, bool since_unix_epoch = false); + void Discourage(const CNetAddr& net_addr); void ClearBanned(); - int IsBannedLevel(CNetAddr net_addr); - bool IsBanned(CNetAddr net_addr); - bool IsBanned(CSubNet sub_net); + + //! Return whether net_addr is banned + bool IsBanned(const CNetAddr& net_addr); + + //! Return whether sub_net is exactly banned + bool IsBanned(const CSubNet& sub_net); + + //! Return whether net_addr is discouraged. + bool IsDiscouraged(const CNetAddr& net_addr); + bool Unban(const CNetAddr& net_addr); bool Unban(const CSubNet& sub_net); void GetBanned(banmap_t& banmap); @@ -68,6 +92,7 @@ private: CClientUIInterface* m_client_interface = nullptr; CBanDB m_ban_db; const int64_t m_default_ban_time; + CRollingBloomFilter m_discouraged GUARDED_BY(m_cs_banned) {50000, 0.000001}; }; #endif diff --git a/src/base58.cpp b/src/base58.cpp index 6a9e21ffc2..9b2946e7a9 100644 --- a/src/base58.cpp +++ b/src/base58.cpp @@ -141,7 +141,7 @@ std::string EncodeBase58Check(const std::vector<unsigned char>& vchIn) { // add 4-byte hash check to the end std::vector<unsigned char> vch(vchIn); - uint256 hash = Hash(vch.begin(), vch.end()); + uint256 hash = Hash(vch); vch.insert(vch.end(), (unsigned char*)&hash, (unsigned char*)&hash + 4); return EncodeBase58(vch); } @@ -154,7 +154,7 @@ bool DecodeBase58Check(const char* psz, std::vector<unsigned char>& vchRet, int return false; } // re-calculate the checksum, ensure it matches the included 4-byte checksum - uint256 hash = Hash(vchRet.begin(), vchRet.end() - 4); + uint256 hash = Hash(MakeSpan(vchRet).first(vchRet.size() - 4)); if (memcmp(&hash, &vchRet[vchRet.size() - 4], 4) != 0) { vchRet.clear(); return false; diff --git a/src/bench/addrman.cpp b/src/bench/addrman.cpp index cc260df2b8..ebdad5a4b8 100644 --- a/src/bench/addrman.cpp +++ b/src/bench/addrman.cpp @@ -67,52 +67,52 @@ static void FillAddrMan(CAddrMan& addrman) /* Benchmarks */ -static void AddrManAdd(benchmark::State& state) +static void AddrManAdd(benchmark::Bench& bench) { CreateAddresses(); CAddrMan addrman; - while (state.KeepRunning()) { + bench.run([&] { AddAddressesToAddrMan(addrman); addrman.Clear(); - } + }); } -static void AddrManSelect(benchmark::State& state) +static void AddrManSelect(benchmark::Bench& bench) { CAddrMan addrman; FillAddrMan(addrman); - while (state.KeepRunning()) { + bench.run([&] { const auto& address = addrman.Select(); assert(address.GetPort() > 0); - } + }); } -static void AddrManGetAddr(benchmark::State& state) +static void AddrManGetAddr(benchmark::Bench& bench) { CAddrMan addrman; FillAddrMan(addrman); - while (state.KeepRunning()) { - const auto& addresses = addrman.GetAddr(); + bench.run([&] { + const auto& addresses = addrman.GetAddr(2500, 23); assert(addresses.size() > 0); - } + }); } -static void AddrManGood(benchmark::State& state) +static void AddrManGood(benchmark::Bench& bench) { /* Create many CAddrMan objects - one to be modified at each loop iteration. * This is necessary because the CAddrMan::Good() method modifies the * object, affecting the timing of subsequent calls to the same method and * we want to do the same amount of work in every loop iteration. */ - const uint64_t numLoops = state.m_num_iters * state.m_num_evals; + bench.epochs(5).epochIterations(1); - std::vector<CAddrMan> addrmans(numLoops); + std::vector<CAddrMan> addrmans(bench.epochs() * bench.epochIterations()); for (auto& addrman : addrmans) { FillAddrMan(addrman); } @@ -128,13 +128,13 @@ static void AddrManGood(benchmark::State& state) }; uint64_t i = 0; - while (state.KeepRunning()) { + bench.run([&] { markSomeAsGood(addrmans.at(i)); ++i; - } + }); } -BENCHMARK(AddrManAdd, 5); -BENCHMARK(AddrManSelect, 1000000); -BENCHMARK(AddrManGetAddr, 500); -BENCHMARK(AddrManGood, 2); +BENCHMARK(AddrManAdd); +BENCHMARK(AddrManSelect); +BENCHMARK(AddrManGetAddr); +BENCHMARK(AddrManGood); diff --git a/src/bench/base58.cpp b/src/bench/base58.cpp index 0690483d50..00544cba31 100644 --- a/src/bench/base58.cpp +++ b/src/bench/base58.cpp @@ -10,7 +10,7 @@ #include <vector> -static void Base58Encode(benchmark::State& state) +static void Base58Encode(benchmark::Bench& bench) { static const std::array<unsigned char, 32> buff = { { @@ -19,13 +19,13 @@ static void Base58Encode(benchmark::State& state) 200, 24 } }; - while (state.KeepRunning()) { + bench.batch(buff.size()).unit("byte").run([&] { EncodeBase58(buff.data(), buff.data() + buff.size()); - } + }); } -static void Base58CheckEncode(benchmark::State& state) +static void Base58CheckEncode(benchmark::Bench& bench) { static const std::array<unsigned char, 32> buff = { { @@ -36,22 +36,22 @@ static void Base58CheckEncode(benchmark::State& state) }; std::vector<unsigned char> vch; vch.assign(buff.begin(), buff.end()); - while (state.KeepRunning()) { + bench.batch(buff.size()).unit("byte").run([&] { EncodeBase58Check(vch); - } + }); } -static void Base58Decode(benchmark::State& state) +static void Base58Decode(benchmark::Bench& bench) { const char* addr = "17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem"; std::vector<unsigned char> vch; - while (state.KeepRunning()) { + bench.batch(strlen(addr)).unit("byte").run([&] { (void) DecodeBase58(addr, vch, 64); - } + }); } -BENCHMARK(Base58Encode, 470 * 1000); -BENCHMARK(Base58CheckEncode, 320 * 1000); -BENCHMARK(Base58Decode, 800 * 1000); +BENCHMARK(Base58Encode); +BENCHMARK(Base58CheckEncode); +BENCHMARK(Base58Decode); diff --git a/src/bench/bech32.cpp b/src/bench/bech32.cpp index 2107840a3a..c74d8d51b3 100644 --- a/src/bench/bech32.cpp +++ b/src/bench/bech32.cpp @@ -3,6 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> +#include <bench/nanobench.h> #include <bech32.h> #include <util/strencodings.h> @@ -11,26 +12,26 @@ #include <vector> -static void Bech32Encode(benchmark::State& state) +static void Bech32Encode(benchmark::Bench& bench) { std::vector<uint8_t> v = ParseHex("c97f5a67ec381b760aeaf67573bc164845ff39a3bb26a1cee401ac67243b48db"); std::vector<unsigned char> tmp = {0}; tmp.reserve(1 + 32 * 8 / 5); ConvertBits<8, 5, true>([&](unsigned char c) { tmp.push_back(c); }, v.begin(), v.end()); - while (state.KeepRunning()) { + bench.batch(v.size()).unit("byte").run([&] { bech32::Encode("bc", tmp); - } + }); } -static void Bech32Decode(benchmark::State& state) +static void Bech32Decode(benchmark::Bench& bench) { std::string addr = "bc1qkallence7tjawwvy0dwt4twc62qjgaw8f4vlhyd006d99f09"; - while (state.KeepRunning()) { + bench.batch(addr.size()).unit("byte").run([&] { bech32::Decode(addr); - } + }); } -BENCHMARK(Bech32Encode, 800 * 1000); -BENCHMARK(Bech32Decode, 800 * 1000); +BENCHMARK(Bech32Encode); +BENCHMARK(Bech32Decode); diff --git a/src/bench/bench.cpp b/src/bench/bench.cpp index 7b93ef688d..01466d0b6f 100644 --- a/src/bench/bench.cpp +++ b/src/bench/bench.cpp @@ -8,141 +8,73 @@ #include <test/util/setup_common.h> #include <validation.h> -#include <algorithm> -#include <assert.h> -#include <iomanip> -#include <iostream> -#include <numeric> #include <regex> const std::function<void(const std::string&)> G_TEST_LOG_FUN{}; -void benchmark::ConsolePrinter::header() -{ - std::cout << "# Benchmark, evals, iterations, total, min, max, median" << std::endl; -} +namespace { -void benchmark::ConsolePrinter::result(const State& state) +void GenerateTemplateResults(const std::vector<ankerl::nanobench::Result>& benchmarkResults, const std::string& filename, const char* tpl) { - auto results = state.m_elapsed_results; - std::sort(results.begin(), results.end()); - - double total = state.m_num_iters * std::accumulate(results.begin(), results.end(), 0.0); - - double front = 0; - double back = 0; - double median = 0; - - if (!results.empty()) { - front = results.front(); - back = results.back(); - - size_t mid = results.size() / 2; - median = results[mid]; - if (0 == results.size() % 2) { - median = (results[mid] + results[mid + 1]) / 2; - } + if (benchmarkResults.empty() || filename.empty()) { + // nothing to write, bail out + return; } - - std::cout << std::setprecision(6); - std::cout << state.m_name << ", " << state.m_num_evals << ", " << state.m_num_iters << ", " << total << ", " << front << ", " << back << ", " << median << std::endl; -} - -void benchmark::ConsolePrinter::footer() {} -benchmark::PlotlyPrinter::PlotlyPrinter(std::string plotly_url, int64_t width, int64_t height) - : m_plotly_url(plotly_url), m_width(width), m_height(height) -{ -} - -void benchmark::PlotlyPrinter::header() -{ - std::cout << "<html><head>" - << "<script src=\"" << m_plotly_url << "\"></script>" - << "</head><body><div id=\"myDiv\" style=\"width:" << m_width << "px; height:" << m_height << "px\"></div>" - << "<script> var data = [" - << std::endl; -} - -void benchmark::PlotlyPrinter::result(const State& state) -{ - std::cout << "{ " << std::endl - << " name: '" << state.m_name << "', " << std::endl - << " y: ["; - - const char* prefix = ""; - for (const auto& e : state.m_elapsed_results) { - std::cout << prefix << std::setprecision(6) << e; - prefix = ", "; + std::ofstream fout(filename); + if (fout.is_open()) { + ankerl::nanobench::render(tpl, benchmarkResults, fout); + } else { + std::cout << "Could write to file '" << filename << "'" << std::endl; } - std::cout << "]," << std::endl - << " boxpoints: 'all', jitter: 0.3, pointpos: 0, type: 'box'," - << std::endl - << "}," << std::endl; -} -void benchmark::PlotlyPrinter::footer() -{ - std::cout << "]; var layout = { showlegend: false, yaxis: { rangemode: 'tozero', autorange: true } };" - << "Plotly.newPlot('myDiv', data, layout);" - << "</script></body></html>"; + std::cout << "Created '" << filename << "'" << std::endl; } +} // namespace benchmark::BenchRunner::BenchmarkMap& benchmark::BenchRunner::benchmarks() { - static std::map<std::string, Bench> benchmarks_map; + static std::map<std::string, BenchFunction> benchmarks_map; return benchmarks_map; } -benchmark::BenchRunner::BenchRunner(std::string name, benchmark::BenchFunction func, uint64_t num_iters_for_one_second) +benchmark::BenchRunner::BenchRunner(std::string name, benchmark::BenchFunction func) { - benchmarks().insert(std::make_pair(name, Bench{func, num_iters_for_one_second})); + benchmarks().insert(std::make_pair(name, func)); } -void benchmark::BenchRunner::RunAll(Printer& printer, uint64_t num_evals, double scaling, const std::string& filter, bool is_list_only) +void benchmark::BenchRunner::RunAll(const Args& args) { - if (!std::ratio_less_equal<benchmark::clock::period, std::micro>::value) { - std::cerr << "WARNING: Clock precision is worse than microsecond - benchmarks may be less accurate!\n"; - } -#ifdef DEBUG - std::cerr << "WARNING: This is a debug build - may result in slower benchmarks.\n"; -#endif - - std::regex reFilter(filter); + std::regex reFilter(args.regex_filter); std::smatch baseMatch; - printer.header(); - + std::vector<ankerl::nanobench::Result> benchmarkResults; for (const auto& p : benchmarks()) { if (!std::regex_match(p.first, baseMatch, reFilter)) { continue; } - uint64_t num_iters = static_cast<uint64_t>(p.second.num_iters_for_one_second * scaling); - if (0 == num_iters) { - num_iters = 1; - } - State state(p.first, num_evals, num_iters, printer); - if (!is_list_only) { - p.second.func(state); + if (args.is_list_only) { + std::cout << p.first << std::endl; + continue; } - printer.result(state); - } - - printer.footer(); -} - -bool benchmark::State::UpdateTimer(const benchmark::time_point current_time) -{ - if (m_start_time != time_point()) { - std::chrono::duration<double> diff = current_time - m_start_time; - m_elapsed_results.push_back(diff.count() / m_num_iters); - if (m_elapsed_results.size() == m_num_evals) { - return false; + Bench bench; + bench.name(p.first); + if (args.asymptote.empty()) { + p.second(bench); + } else { + for (auto n : args.asymptote) { + bench.complexityN(n); + p.second(bench); + } + std::cout << bench.complexityBigO() << std::endl; } + benchmarkResults.push_back(bench.results().back()); } - m_num_iters_left = m_num_iters - 1; - return true; + GenerateTemplateResults(benchmarkResults, args.output_csv, "# Benchmark, evals, iterations, total, min, max, median\n" + "{{#result}}{{name}}, {{epochs}}, {{average(iterations)}}, {{sumProduct(iterations, elapsed)}}, {{minimum(elapsed)}}, {{maximum(elapsed)}}, {{median(elapsed)}}\n" + "{{/result}}"); + GenerateTemplateResults(benchmarkResults, args.output_json, ankerl::nanobench::templates::json()); } diff --git a/src/bench/bench.h b/src/bench/bench.h index 629bca9a73..bafc7f8716 100644 --- a/src/bench/bench.h +++ b/src/bench/bench.h @@ -11,131 +11,53 @@ #include <string> #include <vector> +#include <bench/nanobench.h> #include <boost/preprocessor/cat.hpp> #include <boost/preprocessor/stringize.hpp> -// Simple micro-benchmarking framework; API mostly matches a subset of the Google Benchmark -// framework (see https://github.com/google/benchmark) -// Why not use the Google Benchmark framework? Because adding Yet Another Dependency -// (that uses cmake as its build system and has lots of features we don't need) isn't -// worth it. - /* * Usage: -static void CODE_TO_TIME(benchmark::State& state) +static void CODE_TO_TIME(benchmark::Bench& bench) { ... do any setup needed... - while (state.KeepRunning()) { + nanobench::Config().run([&] { ... do stuff you want to time... - } + }); ... do any cleanup needed... } -// default to running benchmark for 5000 iterations -BENCHMARK(CODE_TO_TIME, 5000); +BENCHMARK(CODE_TO_TIME); */ namespace benchmark { -// In case high_resolution_clock is steady, prefer that, otherwise use steady_clock. -struct best_clock { - using hi_res_clock = std::chrono::high_resolution_clock; - using steady_clock = std::chrono::steady_clock; - using type = std::conditional<hi_res_clock::is_steady, hi_res_clock, steady_clock>::type; -}; -using clock = best_clock::type; -using time_point = clock::time_point; -using duration = clock::duration; - -class Printer; - -class State -{ -public: - std::string m_name; - uint64_t m_num_iters_left; - const uint64_t m_num_iters; - const uint64_t m_num_evals; - std::vector<double> m_elapsed_results; - time_point m_start_time; - bool UpdateTimer(time_point finish_time); +using ankerl::nanobench::Bench; - State(std::string name, uint64_t num_evals, double num_iters, Printer& printer) : m_name(name), m_num_iters_left(0), m_num_iters(num_iters), m_num_evals(num_evals) - { - } +typedef std::function<void(Bench&)> BenchFunction; - inline bool KeepRunning() - { - if (m_num_iters_left--) { - return true; - } - - bool result = UpdateTimer(clock::now()); - // measure again so runtime of UpdateTimer is not included - m_start_time = clock::now(); - return result; - } +struct Args { + std::string regex_filter; + bool is_list_only; + std::vector<double> asymptote; + std::string output_csv; + std::string output_json; }; -typedef std::function<void(State&)> BenchFunction; - class BenchRunner { - struct Bench { - BenchFunction func; - uint64_t num_iters_for_one_second; - }; - typedef std::map<std::string, Bench> BenchmarkMap; + typedef std::map<std::string, BenchFunction> BenchmarkMap; static BenchmarkMap& benchmarks(); public: - BenchRunner(std::string name, BenchFunction func, uint64_t num_iters_for_one_second); - - static void RunAll(Printer& printer, uint64_t num_evals, double scaling, const std::string& filter, bool is_list_only); -}; + BenchRunner(std::string name, BenchFunction func); -// interface to output benchmark results. -class Printer -{ -public: - virtual ~Printer() {} - virtual void header() = 0; - virtual void result(const State& state) = 0; - virtual void footer() = 0; -}; - -// default printer to console, shows min, max, median. -class ConsolePrinter : public Printer -{ -public: - void header() override; - void result(const State& state) override; - void footer() override; -}; - -// creates box plot with plotly.js -class PlotlyPrinter : public Printer -{ -public: - PlotlyPrinter(std::string plotly_url, int64_t width, int64_t height); - void header() override; - void result(const State& state) override; - void footer() override; - -private: - std::string m_plotly_url; - int64_t m_width; - int64_t m_height; + static void RunAll(const Args& args); }; } - - -// BENCHMARK(foo, num_iters_for_one_second) expands to: benchmark::BenchRunner bench_11foo("foo", num_iterations); -// Choose a num_iters_for_one_second that takes roughly 1 second. The goal is that all benchmarks should take approximately -// the same time, and scaling factor can be used that the total time is appropriate for your system. -#define BENCHMARK(n, num_iters_for_one_second) \ - benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n, (num_iters_for_one_second)); +// BENCHMARK(foo) expands to: benchmark::BenchRunner bench_11foo("foo"); +#define BENCHMARK(n) \ + benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n); #endif // BITCOIN_BENCH_BENCH_H diff --git a/src/bench/bench_bitcoin.cpp b/src/bench/bench_bitcoin.cpp index 1b75854210..135659f87f 100644 --- a/src/bench/bench_bitcoin.cpp +++ b/src/bench/bench_bitcoin.cpp @@ -4,37 +4,43 @@ #include <bench/bench.h> +#include <crypto/sha256.h> #include <util/strencodings.h> #include <util/system.h> #include <memory> -static const int64_t DEFAULT_BENCH_EVALUATIONS = 5; static const char* DEFAULT_BENCH_FILTER = ".*"; -static const char* DEFAULT_BENCH_SCALING = "1.0"; -static const char* DEFAULT_BENCH_PRINTER = "console"; -static const char* DEFAULT_PLOT_PLOTLYURL = "https://cdn.plot.ly/plotly-latest.min.js"; -static const int64_t DEFAULT_PLOT_WIDTH = 1024; -static const int64_t DEFAULT_PLOT_HEIGHT = 768; static void SetupBenchArgs(ArgsManager& argsman) { SetupHelpOptions(argsman); - argsman.AddArg("-list", "List benchmarks without executing them. Can be combined with -scaling and -filter", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - argsman.AddArg("-evals=<n>", strprintf("Number of measurement evaluations to perform. (default: %u)", DEFAULT_BENCH_EVALUATIONS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-list", "List benchmarks without executing them", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-filter=<regex>", strprintf("Regular expression filter to select benchmark by name (default: %s)", DEFAULT_BENCH_FILTER), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - argsman.AddArg("-scaling=<n>", strprintf("Scaling factor for benchmark's runtime (default: %u)", DEFAULT_BENCH_SCALING), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - argsman.AddArg("-printer=(console|plot)", strprintf("Choose printer format. console: print data to console. plot: Print results as HTML graph (default: %s)", DEFAULT_BENCH_PRINTER), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - argsman.AddArg("-plot-plotlyurl=<uri>", strprintf("URL to use for plotly.js (default: %s)", DEFAULT_PLOT_PLOTLYURL), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - argsman.AddArg("-plot-width=<x>", strprintf("Plot width in pixel (default: %u)", DEFAULT_PLOT_WIDTH), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - argsman.AddArg("-plot-height=<x>", strprintf("Plot height in pixel (default: %u)", DEFAULT_PLOT_HEIGHT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-asymptote=n1,n2,n3,...", strprintf("Test asymptotic growth of the runtime of an algorithm, if supported by the benchmark"), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-output_csv=<output.csv>", "Generate CSV file with the most important benchmark results.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-output_json=<output.json>", "Generate JSON file with all benchmark results.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); +} + +// parses a comma separated list like "10,20,30,50" +static std::vector<double> parseAsymptote(const std::string& str) { + std::stringstream ss(str); + std::vector<double> numbers; + double d; + char c; + while (ss >> d) { + numbers.push_back(d); + ss >> c; + } + return numbers; } int main(int argc, char** argv) { ArgsManager argsman; SetupBenchArgs(argsman); + SHA256AutoDetect(); std::string error; if (!argsman.ParseParameters(argc, argv, error)) { tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error); @@ -47,34 +53,14 @@ int main(int argc, char** argv) return EXIT_SUCCESS; } - int64_t evaluations = argsman.GetArg("-evals", DEFAULT_BENCH_EVALUATIONS); - std::string regex_filter = argsman.GetArg("-filter", DEFAULT_BENCH_FILTER); - std::string scaling_str = argsman.GetArg("-scaling", DEFAULT_BENCH_SCALING); - bool is_list_only = argsman.GetBoolArg("-list", false); - - if (evaluations == 0) { - return EXIT_SUCCESS; - } else if (evaluations < 0) { - tfm::format(std::cerr, "Error parsing evaluations argument: %d\n", evaluations); - return EXIT_FAILURE; - } - - double scaling_factor; - if (!ParseDouble(scaling_str, &scaling_factor)) { - tfm::format(std::cerr, "Error parsing scaling factor as double: %s\n", scaling_str); - return EXIT_FAILURE; - } - - std::unique_ptr<benchmark::Printer> printer = MakeUnique<benchmark::ConsolePrinter>(); - std::string printer_arg = argsman.GetArg("-printer", DEFAULT_BENCH_PRINTER); - if ("plot" == printer_arg) { - printer.reset(new benchmark::PlotlyPrinter( - argsman.GetArg("-plot-plotlyurl", DEFAULT_PLOT_PLOTLYURL), - argsman.GetArg("-plot-width", DEFAULT_PLOT_WIDTH), - argsman.GetArg("-plot-height", DEFAULT_PLOT_HEIGHT))); - } + benchmark::Args args; + args.regex_filter = argsman.GetArg("-filter", DEFAULT_BENCH_FILTER); + args.is_list_only = argsman.GetBoolArg("-list", false); + args.asymptote = parseAsymptote(argsman.GetArg("-asymptote", "")); + args.output_csv = argsman.GetArg("-output_csv", ""); + args.output_json = argsman.GetArg("-output_json", ""); - benchmark::BenchRunner::RunAll(*printer, evaluations, scaling_factor, regex_filter, is_list_only); + benchmark::BenchRunner::RunAll(args); return EXIT_SUCCESS; } diff --git a/src/bench/block_assemble.cpp b/src/bench/block_assemble.cpp index 268f67cada..3f15f3f856 100644 --- a/src/bench/block_assemble.cpp +++ b/src/bench/block_assemble.cpp @@ -14,7 +14,7 @@ #include <vector> -static void AssembleBlock(benchmark::State& state) +static void AssembleBlock(benchmark::Bench& bench) { TestingSetup test_setup{ CBaseChainParams::REGTEST, @@ -54,9 +54,9 @@ static void AssembleBlock(benchmark::State& state) } } - while (state.KeepRunning()) { + bench.run([&] { PrepareBlock(test_setup.m_node, SCRIPT_PUB); - } + }); } -BENCHMARK(AssembleBlock, 700); +BENCHMARK(AssembleBlock); diff --git a/src/bench/ccoins_caching.cpp b/src/bench/ccoins_caching.cpp index 86f9a0bf67..116de98b14 100644 --- a/src/bench/ccoins_caching.cpp +++ b/src/bench/ccoins_caching.cpp @@ -16,7 +16,7 @@ // characteristics than e.g. reindex timings. But that's not a requirement of // every benchmark." // (https://github.com/bitcoin/bitcoin/issues/7883#issuecomment-224807484) -static void CCoinsCaching(benchmark::State& state) +static void CCoinsCaching(benchmark::Bench& bench) { const ECCVerifyHandle verify_handle; ECC_Start(); @@ -44,11 +44,11 @@ static void CCoinsCaching(benchmark::State& state) // Benchmark. const CTransaction tx_1(t1); - while (state.KeepRunning()) { + bench.run([&] { bool success = AreInputsStandard(tx_1, coins); assert(success); - } + }); ECC_Stop(); } -BENCHMARK(CCoinsCaching, 170 * 1000); +BENCHMARK(CCoinsCaching); diff --git a/src/bench/chacha20.cpp b/src/bench/chacha20.cpp index f1b0a9a989..913e0f8d57 100644 --- a/src/bench/chacha20.cpp +++ b/src/bench/chacha20.cpp @@ -11,7 +11,7 @@ static const uint64_t BUFFER_SIZE_TINY = 64; static const uint64_t BUFFER_SIZE_SMALL = 256; static const uint64_t BUFFER_SIZE_LARGE = 1024*1024; -static void CHACHA20(benchmark::State& state, size_t buffersize) +static void CHACHA20(benchmark::Bench& bench, size_t buffersize) { std::vector<uint8_t> key(32,0); ChaCha20 ctx(key.data(), key.size()); @@ -19,26 +19,26 @@ static void CHACHA20(benchmark::State& state, size_t buffersize) ctx.Seek(0); std::vector<uint8_t> in(buffersize,0); std::vector<uint8_t> out(buffersize,0); - while (state.KeepRunning()) { + bench.batch(in.size()).unit("byte").run([&] { ctx.Crypt(in.data(), out.data(), in.size()); - } + }); } -static void CHACHA20_64BYTES(benchmark::State& state) +static void CHACHA20_64BYTES(benchmark::Bench& bench) { - CHACHA20(state, BUFFER_SIZE_TINY); + CHACHA20(bench, BUFFER_SIZE_TINY); } -static void CHACHA20_256BYTES(benchmark::State& state) +static void CHACHA20_256BYTES(benchmark::Bench& bench) { - CHACHA20(state, BUFFER_SIZE_SMALL); + CHACHA20(bench, BUFFER_SIZE_SMALL); } -static void CHACHA20_1MB(benchmark::State& state) +static void CHACHA20_1MB(benchmark::Bench& bench) { - CHACHA20(state, BUFFER_SIZE_LARGE); + CHACHA20(bench, BUFFER_SIZE_LARGE); } -BENCHMARK(CHACHA20_64BYTES, 500000); -BENCHMARK(CHACHA20_256BYTES, 250000); -BENCHMARK(CHACHA20_1MB, 340); +BENCHMARK(CHACHA20_64BYTES); +BENCHMARK(CHACHA20_256BYTES); +BENCHMARK(CHACHA20_1MB); diff --git a/src/bench/chacha_poly_aead.cpp b/src/bench/chacha_poly_aead.cpp index df10f27d03..3b1d3e697a 100644 --- a/src/bench/chacha_poly_aead.cpp +++ b/src/bench/chacha_poly_aead.cpp @@ -21,7 +21,7 @@ static const unsigned char k2[32] = {0}; static ChaCha20Poly1305AEAD aead(k1, 32, k2, 32); -static void CHACHA20_POLY1305_AEAD(benchmark::State& state, size_t buffersize, bool include_decryption) +static void CHACHA20_POLY1305_AEAD(benchmark::Bench& bench, size_t buffersize, bool include_decryption) { std::vector<unsigned char> in(buffersize + CHACHA20_POLY1305_AEAD_AAD_LEN + POLY1305_TAGLEN, 0); std::vector<unsigned char> out(buffersize + CHACHA20_POLY1305_AEAD_AAD_LEN + POLY1305_TAGLEN, 0); @@ -29,7 +29,7 @@ static void CHACHA20_POLY1305_AEAD(benchmark::State& state, size_t buffersize, b uint64_t seqnr_aad = 0; int aad_pos = 0; uint32_t len = 0; - while (state.KeepRunning()) { + bench.batch(buffersize).unit("byte").run([&] { // encrypt or decrypt the buffer with a static key assert(aead.Crypt(seqnr_payload, seqnr_aad, aad_pos, out.data(), out.size(), in.data(), buffersize, true)); @@ -53,70 +53,71 @@ static void CHACHA20_POLY1305_AEAD(benchmark::State& state, size_t buffersize, b seqnr_aad = 0; aad_pos = 0; } - } + }); } -static void CHACHA20_POLY1305_AEAD_64BYTES_ONLY_ENCRYPT(benchmark::State& state) +static void CHACHA20_POLY1305_AEAD_64BYTES_ONLY_ENCRYPT(benchmark::Bench& bench) { - CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_TINY, false); + CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_TINY, false); } -static void CHACHA20_POLY1305_AEAD_256BYTES_ONLY_ENCRYPT(benchmark::State& state) +static void CHACHA20_POLY1305_AEAD_256BYTES_ONLY_ENCRYPT(benchmark::Bench& bench) { - CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_SMALL, false); + CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_SMALL, false); } -static void CHACHA20_POLY1305_AEAD_1MB_ONLY_ENCRYPT(benchmark::State& state) +static void CHACHA20_POLY1305_AEAD_1MB_ONLY_ENCRYPT(benchmark::Bench& bench) { - CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_LARGE, false); + CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_LARGE, false); } -static void CHACHA20_POLY1305_AEAD_64BYTES_ENCRYPT_DECRYPT(benchmark::State& state) +static void CHACHA20_POLY1305_AEAD_64BYTES_ENCRYPT_DECRYPT(benchmark::Bench& bench) { - CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_TINY, true); + CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_TINY, true); } -static void CHACHA20_POLY1305_AEAD_256BYTES_ENCRYPT_DECRYPT(benchmark::State& state) +static void CHACHA20_POLY1305_AEAD_256BYTES_ENCRYPT_DECRYPT(benchmark::Bench& bench) { - CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_SMALL, true); + CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_SMALL, true); } -static void CHACHA20_POLY1305_AEAD_1MB_ENCRYPT_DECRYPT(benchmark::State& state) +static void CHACHA20_POLY1305_AEAD_1MB_ENCRYPT_DECRYPT(benchmark::Bench& bench) { - CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_LARGE, true); + CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_LARGE, true); } // Add Hash() (dbl-sha256) bench for comparison -static void HASH(benchmark::State& state, size_t buffersize) +static void HASH(benchmark::Bench& bench, size_t buffersize) { uint8_t hash[CHash256::OUTPUT_SIZE]; std::vector<uint8_t> in(buffersize,0); - while (state.KeepRunning()) - CHash256().Write(in.data(), in.size()).Finalize(hash); + bench.batch(in.size()).unit("byte").run([&] { + CHash256().Write(in).Finalize(hash); + }); } -static void HASH_64BYTES(benchmark::State& state) +static void HASH_64BYTES(benchmark::Bench& bench) { - HASH(state, BUFFER_SIZE_TINY); + HASH(bench, BUFFER_SIZE_TINY); } -static void HASH_256BYTES(benchmark::State& state) +static void HASH_256BYTES(benchmark::Bench& bench) { - HASH(state, BUFFER_SIZE_SMALL); + HASH(bench, BUFFER_SIZE_SMALL); } -static void HASH_1MB(benchmark::State& state) +static void HASH_1MB(benchmark::Bench& bench) { - HASH(state, BUFFER_SIZE_LARGE); + HASH(bench, BUFFER_SIZE_LARGE); } -BENCHMARK(CHACHA20_POLY1305_AEAD_64BYTES_ONLY_ENCRYPT, 500000); -BENCHMARK(CHACHA20_POLY1305_AEAD_256BYTES_ONLY_ENCRYPT, 250000); -BENCHMARK(CHACHA20_POLY1305_AEAD_1MB_ONLY_ENCRYPT, 340); -BENCHMARK(CHACHA20_POLY1305_AEAD_64BYTES_ENCRYPT_DECRYPT, 500000); -BENCHMARK(CHACHA20_POLY1305_AEAD_256BYTES_ENCRYPT_DECRYPT, 250000); -BENCHMARK(CHACHA20_POLY1305_AEAD_1MB_ENCRYPT_DECRYPT, 340); -BENCHMARK(HASH_64BYTES, 500000); -BENCHMARK(HASH_256BYTES, 250000); -BENCHMARK(HASH_1MB, 340); +BENCHMARK(CHACHA20_POLY1305_AEAD_64BYTES_ONLY_ENCRYPT); +BENCHMARK(CHACHA20_POLY1305_AEAD_256BYTES_ONLY_ENCRYPT); +BENCHMARK(CHACHA20_POLY1305_AEAD_1MB_ONLY_ENCRYPT); +BENCHMARK(CHACHA20_POLY1305_AEAD_64BYTES_ENCRYPT_DECRYPT); +BENCHMARK(CHACHA20_POLY1305_AEAD_256BYTES_ENCRYPT_DECRYPT); +BENCHMARK(CHACHA20_POLY1305_AEAD_1MB_ENCRYPT_DECRYPT); +BENCHMARK(HASH_64BYTES); +BENCHMARK(HASH_256BYTES); +BENCHMARK(HASH_1MB); diff --git a/src/bench/checkblock.cpp b/src/bench/checkblock.cpp index 2b2c78905e..dc0aa4031c 100644 --- a/src/bench/checkblock.cpp +++ b/src/bench/checkblock.cpp @@ -14,21 +14,21 @@ // a block off the wire, but before we can relay the block on to peers using // compact block relay. -static void DeserializeBlockTest(benchmark::State& state) +static void DeserializeBlockTest(benchmark::Bench& bench) { CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION); char a = '\0'; stream.write(&a, 1); // Prevent compaction - while (state.KeepRunning()) { + bench.unit("block").run([&] { CBlock block; stream >> block; bool rewound = stream.Rewind(benchmark::data::block413567.size()); assert(rewound); - } + }); } -static void DeserializeAndCheckBlockTest(benchmark::State& state) +static void DeserializeAndCheckBlockTest(benchmark::Bench& bench) { CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION); char a = '\0'; @@ -36,7 +36,7 @@ static void DeserializeAndCheckBlockTest(benchmark::State& state) const auto chainParams = CreateChainParams(CBaseChainParams::MAIN); - while (state.KeepRunning()) { + bench.unit("block").run([&] { CBlock block; // Note that CBlock caches its checked state, so we need to recreate it here stream >> block; bool rewound = stream.Rewind(benchmark::data::block413567.size()); @@ -45,8 +45,8 @@ static void DeserializeAndCheckBlockTest(benchmark::State& state) BlockValidationState validationState; bool checked = CheckBlock(block, validationState, chainParams->GetConsensus()); assert(checked); - } + }); } -BENCHMARK(DeserializeBlockTest, 130); -BENCHMARK(DeserializeAndCheckBlockTest, 160); +BENCHMARK(DeserializeBlockTest); +BENCHMARK(DeserializeAndCheckBlockTest); diff --git a/src/bench/checkqueue.cpp b/src/bench/checkqueue.cpp index e052681181..19d7bc0dbc 100644 --- a/src/bench/checkqueue.cpp +++ b/src/bench/checkqueue.cpp @@ -24,7 +24,7 @@ static const unsigned int QUEUE_BATCH_SIZE = 128; // This Benchmark tests the CheckQueue with a slightly realistic workload, // where checks all contain a prevector that is indirect 50% of the time // and there is a little bit of work done between calls to Add. -static void CCheckQueueSpeedPrevectorJob(benchmark::State& state) +static void CCheckQueueSpeedPrevectorJob(benchmark::Bench& bench) { const ECCVerifyHandle verify_handle; ECC_Start(); @@ -47,23 +47,28 @@ static void CCheckQueueSpeedPrevectorJob(benchmark::State& state) for (auto x = 0; x < std::max(MIN_CORES, GetNumCores()); ++x) { tg.create_thread([&]{queue.Thread();}); } - while (state.KeepRunning()) { + + // create all the data once, then submit copies in the benchmark. + FastRandomContext insecure_rand(true); + std::vector<std::vector<PrevectorJob>> vBatches(BATCHES); + for (auto& vChecks : vBatches) { + vChecks.reserve(BATCH_SIZE); + for (size_t x = 0; x < BATCH_SIZE; ++x) + vChecks.emplace_back(insecure_rand); + } + + bench.minEpochIterations(10).batch(BATCH_SIZE * BATCHES).unit("job").run([&] { // Make insecure_rand here so that each iteration is identical. - FastRandomContext insecure_rand(true); CCheckQueueControl<PrevectorJob> control(&queue); - std::vector<std::vector<PrevectorJob>> vBatches(BATCHES); - for (auto& vChecks : vBatches) { - vChecks.reserve(BATCH_SIZE); - for (size_t x = 0; x < BATCH_SIZE; ++x) - vChecks.emplace_back(insecure_rand); + for (auto vChecks : vBatches) { control.Add(vChecks); } // control waits for completion by RAII, but // it is done explicitly here for clarity control.Wait(); - } + }); tg.interrupt_all(); tg.join_all(); ECC_Stop(); } -BENCHMARK(CCheckQueueSpeedPrevectorJob, 1400); +BENCHMARK(CCheckQueueSpeedPrevectorJob); diff --git a/src/bench/coin_selection.cpp b/src/bench/coin_selection.cpp index f2d12531d7..3a71a6ca03 100644 --- a/src/bench/coin_selection.cpp +++ b/src/bench/coin_selection.cpp @@ -27,7 +27,7 @@ static void addCoin(const CAmount& nValue, const CWallet& wallet, std::vector<st // same one over and over isn't too useful. Generating random isn't useful // either for measurements." // (https://github.com/bitcoin/bitcoin/issues/7883#issuecomment-224807484) -static void CoinSelection(benchmark::State& state) +static void CoinSelection(benchmark::Bench& bench) { NodeContext node; auto chain = interfaces::MakeChain(node); @@ -51,7 +51,7 @@ static void CoinSelection(benchmark::State& state) const CoinEligibilityFilter filter_standard(1, 6, 0); const CoinSelectionParams coin_selection_params(true, 34, 148, CFeeRate(0), 0); - while (state.KeepRunning()) { + bench.run([&] { std::set<CInputCoin> setCoinsRet; CAmount nValueRet; bool bnb_used; @@ -59,7 +59,7 @@ static void CoinSelection(benchmark::State& state) assert(success); assert(nValueRet == 1003 * COIN); assert(setCoinsRet.size() == 2); - } + }); } typedef std::set<CInputCoin> CoinSet; @@ -91,7 +91,7 @@ static CAmount make_hard_case(int utxos, std::vector<OutputGroup>& utxo_pool) return target; } -static void BnBExhaustion(benchmark::State& state) +static void BnBExhaustion(benchmark::Bench& bench) { // Setup testWallet.SetupLegacyScriptPubKeyMan(); @@ -100,7 +100,7 @@ static void BnBExhaustion(benchmark::State& state) CAmount value_ret = 0; CAmount not_input_fees = 0; - while (state.KeepRunning()) { + bench.run([&] { // Benchmark CAmount target = make_hard_case(17, utxo_pool); SelectCoinsBnB(utxo_pool, target, 0, selection, value_ret, not_input_fees); // Should exhaust @@ -108,8 +108,8 @@ static void BnBExhaustion(benchmark::State& state) // Cleanup utxo_pool.clear(); selection.clear(); - } + }); } -BENCHMARK(CoinSelection, 650); -BENCHMARK(BnBExhaustion, 650); +BENCHMARK(CoinSelection); +BENCHMARK(BnBExhaustion); diff --git a/src/bench/crypto_hash.cpp b/src/bench/crypto_hash.cpp index ddcef5121e..36be86bcc8 100644 --- a/src/bench/crypto_hash.cpp +++ b/src/bench/crypto_hash.cpp @@ -16,88 +16,92 @@ /* Number of bytes to hash per iteration */ static const uint64_t BUFFER_SIZE = 1000*1000; -static void RIPEMD160(benchmark::State& state) +static void RIPEMD160(benchmark::Bench& bench) { uint8_t hash[CRIPEMD160::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); - while (state.KeepRunning()) + bench.batch(in.size()).unit("byte").run([&] { CRIPEMD160().Write(in.data(), in.size()).Finalize(hash); + }); } -static void SHA1(benchmark::State& state) +static void SHA1(benchmark::Bench& bench) { uint8_t hash[CSHA1::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); - while (state.KeepRunning()) + bench.batch(in.size()).unit("byte").run([&] { CSHA1().Write(in.data(), in.size()).Finalize(hash); + }); } -static void SHA256(benchmark::State& state) +static void SHA256(benchmark::Bench& bench) { uint8_t hash[CSHA256::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); - while (state.KeepRunning()) + bench.batch(in.size()).unit("byte").run([&] { CSHA256().Write(in.data(), in.size()).Finalize(hash); + }); } -static void SHA256_32b(benchmark::State& state) +static void SHA256_32b(benchmark::Bench& bench) { std::vector<uint8_t> in(32,0); - while (state.KeepRunning()) { + bench.batch(in.size()).unit("byte").run([&] { CSHA256() .Write(in.data(), in.size()) .Finalize(in.data()); - } + }); } -static void SHA256D64_1024(benchmark::State& state) +static void SHA256D64_1024(benchmark::Bench& bench) { std::vector<uint8_t> in(64 * 1024, 0); - while (state.KeepRunning()) { + bench.batch(in.size()).unit("byte").run([&] { SHA256D64(in.data(), in.data(), 1024); - } + }); } -static void SHA512(benchmark::State& state) +static void SHA512(benchmark::Bench& bench) { uint8_t hash[CSHA512::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); - while (state.KeepRunning()) + bench.batch(in.size()).unit("byte").run([&] { CSHA512().Write(in.data(), in.size()).Finalize(hash); + }); } -static void SipHash_32b(benchmark::State& state) +static void SipHash_32b(benchmark::Bench& bench) { uint256 x; uint64_t k1 = 0; - while (state.KeepRunning()) { + bench.run([&] { *((uint64_t*)x.begin()) = SipHashUint256(0, ++k1, x); - } + }); } -static void FastRandom_32bit(benchmark::State& state) +static void FastRandom_32bit(benchmark::Bench& bench) { FastRandomContext rng(true); - while (state.KeepRunning()) { + bench.run([&] { rng.rand32(); - } + }); } -static void FastRandom_1bit(benchmark::State& state) +static void FastRandom_1bit(benchmark::Bench& bench) { FastRandomContext rng(true); - while (state.KeepRunning()) { + bench.run([&] { rng.randbool(); - } + }); } -BENCHMARK(RIPEMD160, 440); -BENCHMARK(SHA1, 570); -BENCHMARK(SHA256, 340); -BENCHMARK(SHA512, 330); +BENCHMARK(RIPEMD160); +BENCHMARK(SHA1); +BENCHMARK(SHA256); +BENCHMARK(SHA512); -BENCHMARK(SHA256_32b, 4700 * 1000); -BENCHMARK(SipHash_32b, 40 * 1000 * 1000); -BENCHMARK(SHA256D64_1024, 7400); -BENCHMARK(FastRandom_32bit, 110 * 1000 * 1000); -BENCHMARK(FastRandom_1bit, 440 * 1000 * 1000); +BENCHMARK(SHA256_32b); +BENCHMARK(SipHash_32b); +BENCHMARK(SHA256D64_1024); +BENCHMARK(FastRandom_32bit); +BENCHMARK(FastRandom_1bit); diff --git a/src/bench/duplicate_inputs.cpp b/src/bench/duplicate_inputs.cpp index e87f15042b..5745e4276c 100644 --- a/src/bench/duplicate_inputs.cpp +++ b/src/bench/duplicate_inputs.cpp @@ -12,7 +12,7 @@ #include <validation.h> -static void DuplicateInputs(benchmark::State& state) +static void DuplicateInputs(benchmark::Bench& bench) { TestingSetup test_setup{ CBaseChainParams::REGTEST, @@ -61,11 +61,11 @@ static void DuplicateInputs(benchmark::State& state) block.hashMerkleRoot = BlockMerkleRoot(block); - while (state.KeepRunning()) { + bench.run([&] { BlockValidationState cvstate{}; assert(!CheckBlock(block, cvstate, chainparams.GetConsensus(), false, false)); assert(cvstate.GetRejectReason() == "bad-txns-inputs-duplicate"); - } + }); } -BENCHMARK(DuplicateInputs, 10); +BENCHMARK(DuplicateInputs); diff --git a/src/bench/examples.cpp b/src/bench/examples.cpp index f88150200a..dcd615b9da 100644 --- a/src/bench/examples.cpp +++ b/src/bench/examples.cpp @@ -3,31 +3,19 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> -#include <util/time.h> - -// Sanity test: this should loop ten times, and -// min/max/average should be close to 100ms. -static void Sleep100ms(benchmark::State& state) -{ - while (state.KeepRunning()) { - UninterruptibleSleep(std::chrono::milliseconds{100}); - } -} - -BENCHMARK(Sleep100ms, 10); // Extremely fast-running benchmark: #include <math.h> volatile double sum = 0.0; // volatile, global so not optimized away -static void Trig(benchmark::State& state) +static void Trig(benchmark::Bench& bench) { double d = 0.01; - while (state.KeepRunning()) { + bench.run([&] { sum += sin(d); d += 0.000001; - } + }); } -BENCHMARK(Trig, 12 * 1000 * 1000); +BENCHMARK(Trig); diff --git a/src/bench/gcs_filter.cpp b/src/bench/gcs_filter.cpp index 535ad35571..ef83242e41 100644 --- a/src/bench/gcs_filter.cpp +++ b/src/bench/gcs_filter.cpp @@ -5,7 +5,7 @@ #include <bench/bench.h> #include <blockfilter.h> -static void ConstructGCSFilter(benchmark::State& state) +static void ConstructGCSFilter(benchmark::Bench& bench) { GCSFilter::ElementSet elements; for (int i = 0; i < 10000; ++i) { @@ -16,14 +16,14 @@ static void ConstructGCSFilter(benchmark::State& state) } uint64_t siphash_k0 = 0; - while (state.KeepRunning()) { + bench.batch(elements.size()).unit("elem").run([&] { GCSFilter filter({siphash_k0, 0, 20, 1 << 20}, elements); siphash_k0++; - } + }); } -static void MatchGCSFilter(benchmark::State& state) +static void MatchGCSFilter(benchmark::Bench& bench) { GCSFilter::ElementSet elements; for (int i = 0; i < 10000; ++i) { @@ -34,10 +34,10 @@ static void MatchGCSFilter(benchmark::State& state) } GCSFilter filter({0, 0, 20, 1 << 20}, elements); - while (state.KeepRunning()) { + bench.unit("elem").run([&] { filter.Match(GCSFilter::Element()); - } + }); } -BENCHMARK(ConstructGCSFilter, 1000); -BENCHMARK(MatchGCSFilter, 50 * 1000); +BENCHMARK(ConstructGCSFilter); +BENCHMARK(MatchGCSFilter); diff --git a/src/bench/hashpadding.cpp b/src/bench/hashpadding.cpp index 985be8bdba..309cae3723 100644 --- a/src/bench/hashpadding.cpp +++ b/src/bench/hashpadding.cpp @@ -8,7 +8,7 @@ #include <uint256.h> -static void PrePadded(benchmark::State& state) +static void PrePadded(benchmark::Bench& bench) { CSHA256 hasher; @@ -18,30 +18,30 @@ static void PrePadded(benchmark::State& state) hasher.Write(nonce.begin(), 32); hasher.Write(nonce.begin(), 32); uint256 data = GetRandHash(); - while (state.KeepRunning()) { + bench.run([&] { unsigned char out[32]; CSHA256 h = hasher; h.Write(data.begin(), 32); h.Finalize(out); - } + }); } -BENCHMARK(PrePadded, 10000); +BENCHMARK(PrePadded); -static void RegularPadded(benchmark::State& state) +static void RegularPadded(benchmark::Bench& bench) { CSHA256 hasher; // Setup the salted hasher uint256 nonce = GetRandHash(); uint256 data = GetRandHash(); - while (state.KeepRunning()) { + bench.run([&] { unsigned char out[32]; CSHA256 h = hasher; h.Write(nonce.begin(), 32); h.Write(data.begin(), 32); h.Finalize(out); - } + }); } -BENCHMARK(RegularPadded, 10000); +BENCHMARK(RegularPadded); diff --git a/src/bench/lockedpool.cpp b/src/bench/lockedpool.cpp index 5d943810df..32b060a15a 100644 --- a/src/bench/lockedpool.cpp +++ b/src/bench/lockedpool.cpp @@ -9,10 +9,9 @@ #include <vector> #define ASIZE 2048 -#define BITER 5000 #define MSIZE 2048 -static void BenchLockedPool(benchmark::State& state) +static void BenchLockedPool(benchmark::Bench& bench) { void *synth_base = reinterpret_cast<void*>(0x08000000); const size_t synth_size = 1024*1024; @@ -22,24 +21,22 @@ static void BenchLockedPool(benchmark::State& state) for (int x=0; x<ASIZE; ++x) addr.push_back(nullptr); uint32_t s = 0x12345678; - while (state.KeepRunning()) { - for (int x=0; x<BITER; ++x) { - int idx = s & (addr.size()-1); - if (s & 0x80000000) { - b.free(addr[idx]); - addr[idx] = nullptr; - } else if(!addr[idx]) { - addr[idx] = b.alloc((s >> 16) & (MSIZE-1)); - } - bool lsb = s & 1; - s >>= 1; - if (lsb) - s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0 + bench.run([&] { + int idx = s & (addr.size() - 1); + if (s & 0x80000000) { + b.free(addr[idx]); + addr[idx] = nullptr; + } else if (!addr[idx]) { + addr[idx] = b.alloc((s >> 16) & (MSIZE - 1)); } - } + bool lsb = s & 1; + s >>= 1; + if (lsb) + s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0 + }); for (void *ptr: addr) b.free(ptr); addr.clear(); } -BENCHMARK(BenchLockedPool, 1300); +BENCHMARK(BenchLockedPool); diff --git a/src/bench/mempool_eviction.cpp b/src/bench/mempool_eviction.cpp index 69483f2914..1b9e428c9d 100644 --- a/src/bench/mempool_eviction.cpp +++ b/src/bench/mempool_eviction.cpp @@ -23,7 +23,7 @@ static void AddTx(const CTransactionRef& tx, const CAmount& nFee, CTxMemPool& po // Right now this is only testing eviction performance in an extremely small // mempool. Code needs to be written to generate a much wider variety of // unique transactions for a more meaningful performance measurement. -static void MempoolEviction(benchmark::State& state) +static void MempoolEviction(benchmark::Bench& bench) { TestingSetup test_setup{ CBaseChainParams::REGTEST, @@ -125,7 +125,7 @@ static void MempoolEviction(benchmark::State& state) const CTransactionRef tx6_r{MakeTransactionRef(tx6)}; const CTransactionRef tx7_r{MakeTransactionRef(tx7)}; - while (state.KeepRunning()) { + bench.run([&]() NO_THREAD_SAFETY_ANALYSIS { AddTx(tx1_r, 10000LL, pool); AddTx(tx2_r, 5000LL, pool); AddTx(tx3_r, 20000LL, pool); @@ -135,7 +135,7 @@ static void MempoolEviction(benchmark::State& state) AddTx(tx7_r, 9000LL, pool); pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4); pool.TrimToSize(GetVirtualTransactionSize(*tx1_r)); - } + }); } -BENCHMARK(MempoolEviction, 41000); +BENCHMARK(MempoolEviction); diff --git a/src/bench/mempool_stress.cpp b/src/bench/mempool_stress.cpp index 38d8632318..89233e390c 100644 --- a/src/bench/mempool_stress.cpp +++ b/src/bench/mempool_stress.cpp @@ -26,8 +26,13 @@ struct Available { Available(CTransactionRef& ref, size_t tx_count) : ref(ref), tx_count(tx_count){} }; -static void ComplexMemPool(benchmark::State& state) +static void ComplexMemPool(benchmark::Bench& bench) { + int childTxs = 800; + if (bench.complexityN() > 1) { + childTxs = static_cast<int>(bench.complexityN()); + } + FastRandomContext det_rand{true}; std::vector<Available> available_coins; std::vector<CTransactionRef> ordered_coins; @@ -46,7 +51,7 @@ static void ComplexMemPool(benchmark::State& state) ordered_coins.emplace_back(MakeTransactionRef(tx)); available_coins.emplace_back(ordered_coins.back(), tx_counter++); } - for (auto x = 0; x < 800 && !available_coins.empty(); ++x) { + for (auto x = 0; x < childTxs && !available_coins.empty(); ++x) { CMutableTransaction tx = CMutableTransaction(); size_t n_ancestors = det_rand.randrange(10)+1; for (size_t ancestor = 0; ancestor < n_ancestors && !available_coins.empty(); ++ancestor){ @@ -77,13 +82,13 @@ static void ComplexMemPool(benchmark::State& state) TestingSetup test_setup; CTxMemPool pool; LOCK2(cs_main, pool.cs); - while (state.KeepRunning()) { + bench.run([&]() NO_THREAD_SAFETY_ANALYSIS { for (auto& tx : ordered_coins) { AddTx(tx, pool); } pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4); pool.TrimToSize(GetVirtualTransactionSize(*ordered_coins.front())); - } + }); } -BENCHMARK(ComplexMemPool, 1); +BENCHMARK(ComplexMemPool); diff --git a/src/bench/merkle_root.cpp b/src/bench/merkle_root.cpp index e84f92feae..ba6629b9f0 100644 --- a/src/bench/merkle_root.cpp +++ b/src/bench/merkle_root.cpp @@ -8,7 +8,7 @@ #include <random.h> #include <uint256.h> -static void MerkleRoot(benchmark::State& state) +static void MerkleRoot(benchmark::Bench& bench) { FastRandomContext rng(true); std::vector<uint256> leaves; @@ -16,11 +16,11 @@ static void MerkleRoot(benchmark::State& state) for (auto& item : leaves) { item = rng.rand256(); } - while (state.KeepRunning()) { + bench.batch(leaves.size()).unit("leaf").run([&] { bool mutation = false; uint256 hash = ComputeMerkleRoot(std::vector<uint256>(leaves), &mutation); leaves[mutation] = hash; - } + }); } -BENCHMARK(MerkleRoot, 800); +BENCHMARK(MerkleRoot); diff --git a/src/bench/nanobench.cpp b/src/bench/nanobench.cpp new file mode 100644 index 0000000000..fcdd86495a --- /dev/null +++ b/src/bench/nanobench.cpp @@ -0,0 +1,6 @@ +// Copyright (c) 2019-2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#define ANKERL_NANOBENCH_IMPLEMENT +#include <bench/nanobench.h> diff --git a/src/bench/nanobench.h b/src/bench/nanobench.h new file mode 100644 index 0000000000..c5379e7fd4 --- /dev/null +++ b/src/bench/nanobench.h @@ -0,0 +1,3225 @@ +// __ _ _______ __ _ _____ ______ _______ __ _ _______ _ _ +// | \ | |_____| | \ | | | |_____] |______ | \ | | |_____| +// | \_| | | | \_| |_____| |_____] |______ | \_| |_____ | | +// +// Microbenchmark framework for C++11/14/17/20 +// https://github.com/martinus/nanobench +// +// Licensed under the MIT License <http://opensource.org/licenses/MIT>. +// SPDX-License-Identifier: MIT +// Copyright (c) 2019-2020 Martin Ankerl <martin.ankerl@gmail.com> +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef ANKERL_NANOBENCH_H_INCLUDED +#define ANKERL_NANOBENCH_H_INCLUDED + +// see https://semver.org/ +#define ANKERL_NANOBENCH_VERSION_MAJOR 4 // incompatible API changes +#define ANKERL_NANOBENCH_VERSION_MINOR 0 // backwards-compatible changes +#define ANKERL_NANOBENCH_VERSION_PATCH 0 // backwards-compatible bug fixes + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// public facing api - as minimal as possible +/////////////////////////////////////////////////////////////////////////////////////////////////// + +#include <chrono> // high_resolution_clock +#include <cstring> // memcpy +#include <iosfwd> // for std::ostream* custom output target in Config +#include <string> // all names +#include <vector> // holds all results + +#define ANKERL_NANOBENCH(x) ANKERL_NANOBENCH_PRIVATE_##x() + +#define ANKERL_NANOBENCH_PRIVATE_CXX() __cplusplus +#define ANKERL_NANOBENCH_PRIVATE_CXX98() 199711L +#define ANKERL_NANOBENCH_PRIVATE_CXX11() 201103L +#define ANKERL_NANOBENCH_PRIVATE_CXX14() 201402L +#define ANKERL_NANOBENCH_PRIVATE_CXX17() 201703L + +#if ANKERL_NANOBENCH(CXX) >= ANKERL_NANOBENCH(CXX17) +# define ANKERL_NANOBENCH_PRIVATE_NODISCARD() [[nodiscard]] +#else +# define ANKERL_NANOBENCH_PRIVATE_NODISCARD() +#endif + +#if defined(__clang__) +# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_PUSH() \ + _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wpadded\"") +# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_POP() _Pragma("clang diagnostic pop") +#else +# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_PUSH() +# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_POP() +#endif + +#if defined(__GNUC__) +# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_PUSH() _Pragma("GCC diagnostic push") _Pragma("GCC diagnostic ignored \"-Weffc++\"") +# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_POP() _Pragma("GCC diagnostic pop") +#else +# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_PUSH() +# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_POP() +#endif + +#if defined(ANKERL_NANOBENCH_LOG_ENABLED) +# include <iostream> +# define ANKERL_NANOBENCH_LOG(x) std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << x << std::endl +#else +# define ANKERL_NANOBENCH_LOG(x) +#endif + +#if defined(__linux__) && !defined(ANKERL_NANOBENCH_DISABLE_PERF_COUNTERS) +# define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 1 +#else +# define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 0 +#endif + +#if defined(__clang__) +# define ANKERL_NANOBENCH_NO_SANITIZE(...) __attribute__((no_sanitize(__VA_ARGS__))) +#else +# define ANKERL_NANOBENCH_NO_SANITIZE(...) +#endif + +#if defined(_MSC_VER) +# define ANKERL_NANOBENCH_PRIVATE_NOINLINE() __declspec(noinline) +#else +# define ANKERL_NANOBENCH_PRIVATE_NOINLINE() __attribute__((noinline)) +#endif + +// workaround missing "is_trivially_copyable" in g++ < 5.0 +// See https://stackoverflow.com/a/31798726/48181 +#if defined(__GNUC__) && __GNUC__ < 5 +# define ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__) +#else +# define ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value +#endif + +// declarations /////////////////////////////////////////////////////////////////////////////////// + +namespace ankerl { +namespace nanobench { + +using Clock = std::conditional<std::chrono::high_resolution_clock::is_steady, std::chrono::high_resolution_clock, + std::chrono::steady_clock>::type; +class Bench; +struct Config; +class Result; +class Rng; +class BigO; + +/** + * @brief Renders output from a mustache-like template and benchmark results. + * + * The templating facility here is heavily inspired by [mustache - logic-less templates](https://mustache.github.io/). + * It adds a few more features that are necessary to get all of the captured data out of nanobench. Please read the + * excellent [mustache manual](https://mustache.github.io/mustache.5.html) to see what this is all about. + * + * nanobench output has two nested layers, *result* and *measurement*. Here is a hierarchy of the allowed tags: + * + * * `{{#result}}` Marks the begin of the result layer. Whatever comes after this will be instantiated as often as + * a benchmark result is available. Within it, you can use these tags: + * + * * `{{title}}` See Bench::title(). + * + * * `{{name}}` Benchmark name, usually directly provided with Bench::run(), but can also be set with Bench::name(). + * + * * `{{unit}}` Unit, e.g. `byte`. Defaults to `op`, see Bench::title(). + * + * * `{{batch}}` Batch size, see Bench::batch(). + * + * * `{{complexityN}}` Value used for asymptotic complexity calculation. See Bench::complexityN(). + * + * * `{{epochs}}` Number of epochs, see Bench::epochs(). + * + * * `{{clockResolution}}` Accuracy of the clock, i.e. what's the smallest time possible to measure with the clock. + * For modern systems, this can be around 20 ns. This value is automatically determined by nanobench at the first + * benchmark that is run, and used as a static variable throughout the application's runtime. + * + * * `{{clockResolutionMultiple}}` Configuration multiplier for `clockResolution`. See Bench::clockResolutionMultiple(). + * This is the target runtime for each measurement (epoch). That means the more accurate your clock is, the faster + * will be the benchmark. Basing the measurement's runtime on the clock resolution is the main reason why nanobench is so fast. + * + * * `{{maxEpochTime}}` Configuration for a maximum time each measurement (epoch) is allowed to take. Note that at least + * a single iteration will be performed, even when that takes longer than maxEpochTime. See Bench::maxEpochTime(). + * + * * `{{minEpochTime}}` Minimum epoch time, usually not set. See Bench::minEpochTime(). + * + * * `{{minEpochIterations}}` See Bench::minEpochIterations(). + * + * * `{{epochIterations}}` See Bench::epochIterations(). + * + * * `{{warmup}}` Number of iterations used before measuring starts. See Bench::warmup(). + * + * * `{{relative}}` True or false, depending on the setting you have used. See Bench::relative(). + * + * Apart from these tags, it is also possible to use some mathematical operations on the measurement data. The operations + * are of the form `{{command(name)}}`. Currently `name` can be one of `elapsed`, `iterations`. If performance counters + * are available (currently only on current Linux systems), you also have `pagefaults`, `cpucycles`, + * `contextswitches`, `instructions`, `branchinstructions`, and `branchmisses`. All the measuers (except `iterations`) are + * provided for a single iteration (so `elapsed` is the time a single iteration took). The following tags are available: + * + * * `{{median(<name>>)}}` Calculate median of a measurement data set, e.g. `{{median(elapsed)}}`. + * + * * `{{average(<name>)}}` Average (mean) calculation. + * + * * `{{medianAbsolutePercentError(<name>)}}` Calculates MdAPE, the Median Absolute Percentage Error. The MdAPE is an excellent + * metric for the variation of measurements. It is more robust to outliers than the + * [Mean absolute percentage error (M-APE)](https://en.wikipedia.org/wiki/Mean_absolute_percentage_error). + * @f[ + * \mathrm{medianAbsolutePercentError}(e) = \mathrm{median}\{| \frac{e_i - \mathrm{median}\{e\}}{e_i}| \} + * @f] + * E.g. for *elapsed*: First, @f$ \mathrm{median}\{elapsed\} @f$ is calculated. This is used to calculate the absolute percentage + * error to this median for each measurement, as in @f$ | \frac{e_i - \mathrm{median}\{e\}}{e_i}| @f$. All these results + * are sorted, and the middle value is chosen as the median absolute percent error. + * + * This measurement is a bit hard to interpret, but it is very robust against outliers. E.g. a value of 5% means that half of the + * measurements deviate less than 5% from the median, and the other deviate more than 5% from the median. + * + * * `{{sum(<name>)}}` Sums of all the measurements. E.g. `{{sum(iterations)}}` will give you the total number of iterations +* measured in this benchmark. + * + * * `{{minimum(<name>)}}` Minimum of all measurements. + * + * * `{{maximum(<name>)}}` Maximum of all measurements. + * + * * `{{sumProduct(<first>, <second>)}}` Calculates the sum of the products of corresponding measures: + * @f[ + * \mathrm{sumProduct}(a,b) = \sum_{i=1}^{n}a_i\cdot b_i + * @f] + * E.g. to calculate total runtime of the benchmark, you multiply iterations with elapsed time for each measurement, and + * sum these results up: + * `{{sumProduct(iterations, elapsed)}}`. + * + * * `{{#measurement}}` To access individual measurement results, open the begin tag for measurements. + * + * * `{{elapsed}}` Average elapsed time per iteration, in seconds. + * + * * `{{iterations}}` Number of iterations in the measurement. The number of iterations will fluctuate due + * to some applied randomness, to enhance accuracy. + * + * * `{{pagefaults}}` Average number of pagefaults per iteration. + * + * * `{{cpucycles}}` Average number of CPU cycles processed per iteration. + * + * * `{{contextswitches}}` Average number of context switches per iteration. + * + * * `{{instructions}}` Average number of retired instructions per iteration. + * + * * `{{branchinstructions}}` Average number of branches executed per iteration. + * + * * `{{branchmisses}}` Average number of branches that were missed per iteration. + * + * * `{{/measurement}}` Ends the measurement tag. + * + * * `{{/result}}` Marks the end of the result layer. This is the end marker for the template part that will be instantiated + * for each benchmark result. + * + * + * For the layer tags *result* and *measurement* you additionally can use these special markers: + * + * * ``{{#-first}}`` - Begin marker of a template that will be instantiated *only for the first* entry in the layer. Use is only + * allowed between the begin and end marker of the layer allowed. So between ``{{#result}}`` and ``{{/result}}``, or between + * ``{{#measurement}}`` and ``{{/measurement}}``. Finish the template with ``{{/-first}}``. + * + * * ``{{^-first}}`` - Begin marker of a template that will be instantiated *for each except the first* entry in the layer. This, + * this is basically the inversion of ``{{#-first}}``. Use is only allowed between the begin and end marker of the layer allowed. + * So between ``{{#result}}`` and ``{{/result}}``, or between ``{{#measurement}}`` and ``{{/measurement}}``. + * + * * ``{{/-first}}`` - End marker for either ``{{#-first}}`` or ``{{^-first}}``. + * + * * ``{{#-last}}`` - Begin marker of a template that will be instantiated *only for the last* entry in the layer. Use is only + * allowed between the begin and end marker of the layer allowed. So between ``{{#result}}`` and ``{{/result}}``, or between + * ``{{#measurement}}`` and ``{{/measurement}}``. Finish the template with ``{{/-last}}``. + * + * * ``{{^-last}}`` - Begin marker of a template that will be instantiated *for each except the last* entry in the layer. This, + * this is basically the inversion of ``{{#-last}}``. Use is only allowed between the begin and end marker of the layer allowed. + * So between ``{{#result}}`` and ``{{/result}}``, or between ``{{#measurement}}`` and ``{{/measurement}}``. + * + * * ``{{/-last}}`` - End marker for either ``{{#-last}}`` or ``{{^-last}}``. + * + @verbatim embed:rst + + For an overview of all the possible data you can get out of nanobench, please see the tutorial at :ref:`tutorial-template-json`. + + The templates that ship with nanobench are: + + * :cpp:func:`templates::csv() <ankerl::nanobench::templates::csv()>` + * :cpp:func:`templates::json() <ankerl::nanobench::templates::json()>` + * :cpp:func:`templates::htmlBoxplot() <ankerl::nanobench::templates::htmlBoxplot()>` + + @endverbatim + * + * @param mustacheTemplate The template. + * @param bench Benchmark, containing all the results. + * @param out Output for the generated output. + */ +void render(char const* mustacheTemplate, Bench const& bench, std::ostream& out); + +/** + * Same as render(char const* mustacheTemplate, Bench const& bench, std::ostream& out), but for when + * you only have results available. + * + * @param mustacheTemplate The template. + * @param results All the results to be used for rendering. + * @param out Output for the generated output. + */ +void render(char const* mustacheTemplate, std::vector<Result> const& results, std::ostream& out); + +// Contains mustache-like templates +namespace templates { + +/*! + @brief CSV data for the benchmark results. + + Generates a comma-separated values dataset. First line is the header, each following line is a summary of each benchmark run. + + @verbatim embed:rst + See the tutorial at :ref:`tutorial-template-csv` for an example. + @endverbatim + */ +char const* csv() noexcept; + +/*! + @brief HTML output that uses plotly to generate an interactive boxplot chart. See the tutorial for an example output. + + The output uses only the elapsed time, and displays each epoch as a single dot. + @verbatim embed:rst + See the tutorial at :ref:`tutorial-template-html` for an example. + @endverbatim + + @see ankerl::nanobench::render() + */ +char const* htmlBoxplot() noexcept; + +/*! + @brief Template to generate JSON data. + + The generated JSON data contains *all* data that has been generated. All times are as double values, in seconds. The output can get + quite large. + @verbatim embed:rst + See the tutorial at :ref:`tutorial-template-json` for an example. + @endverbatim + */ +char const* json() noexcept; + +} // namespace templates + +namespace detail { + +template <typename T> +struct PerfCountSet; + +class IterationLogic; +class PerformanceCounters; + +#if ANKERL_NANOBENCH(PERF_COUNTERS) +class LinuxPerformanceCounters; +#endif + +} // namespace detail +} // namespace nanobench +} // namespace ankerl + +// definitions //////////////////////////////////////////////////////////////////////////////////// + +namespace ankerl { +namespace nanobench { +namespace detail { + +template <typename T> +struct PerfCountSet { + T pageFaults{}; + T cpuCycles{}; + T contextSwitches{}; + T instructions{}; + T branchInstructions{}; + T branchMisses{}; +}; + +} // namespace detail + +ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) +struct Config { + // actual benchmark config + std::string mBenchmarkTitle = "benchmark"; + std::string mBenchmarkName = "noname"; + std::string mUnit = "op"; + double mBatch = 1.0; + double mComplexityN = -1.0; + size_t mNumEpochs = 11; + size_t mClockResolutionMultiple = static_cast<size_t>(1000); + std::chrono::nanoseconds mMaxEpochTime = std::chrono::milliseconds(100); + std::chrono::nanoseconds mMinEpochTime{}; + uint64_t mMinEpochIterations{1}; + uint64_t mEpochIterations{0}; // If not 0, run *exactly* these number of iterations per epoch. + uint64_t mWarmup = 0; + std::ostream* mOut = nullptr; + bool mShowPerformanceCounters = true; + bool mIsRelative = false; + + Config(); + ~Config(); + Config& operator=(Config const&); + Config& operator=(Config&&); + Config(Config const&); + Config(Config&&) noexcept; +}; +ANKERL_NANOBENCH(IGNORE_PADDED_POP) + +// Result returned after a benchmark has finished. Can be used as a baseline for relative(). +ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) +class Result { +public: + enum class Measure : size_t { + elapsed, + iterations, + pagefaults, + cpucycles, + contextswitches, + instructions, + branchinstructions, + branchmisses, + _size + }; + + explicit Result(Config const& benchmarkConfig); + + ~Result(); + Result& operator=(Result const&); + Result& operator=(Result&&); + Result(Result const&); + Result(Result&&) noexcept; + + // adds new measurement results + // all values are scaled by iters (except iters...) + void add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters const& pc); + + ANKERL_NANOBENCH(NODISCARD) Config const& config() const noexcept; + + ANKERL_NANOBENCH(NODISCARD) double median(Measure m) const; + ANKERL_NANOBENCH(NODISCARD) double medianAbsolutePercentError(Measure m) const; + ANKERL_NANOBENCH(NODISCARD) double average(Measure m) const; + ANKERL_NANOBENCH(NODISCARD) double sum(Measure m) const noexcept; + ANKERL_NANOBENCH(NODISCARD) double sumProduct(Measure m1, Measure m2) const noexcept; + ANKERL_NANOBENCH(NODISCARD) double minimum(Measure m) const noexcept; + ANKERL_NANOBENCH(NODISCARD) double maximum(Measure m) const noexcept; + + ANKERL_NANOBENCH(NODISCARD) bool has(Measure m) const noexcept; + ANKERL_NANOBENCH(NODISCARD) double get(size_t idx, Measure m) const; + ANKERL_NANOBENCH(NODISCARD) bool empty() const noexcept; + ANKERL_NANOBENCH(NODISCARD) size_t size() const noexcept; + + // Finds string, if not found, returns _size. + static Measure fromString(std::string const& str); + +private: + Config mConfig{}; + std::vector<std::vector<double>> mNameToMeasurements{}; +}; +ANKERL_NANOBENCH(IGNORE_PADDED_POP) + +/** + * An extremely fast random generator. Currently, this implements *RomuDuoJr*, developed by Mark Overton. Source: + * http://www.romu-random.org/ + * + * RomuDuoJr is extremely fast and provides reasonable good randomness. Not enough for large jobs, but definitely + * good enough for a benchmarking framework. + * + * * Estimated capacity: @f$ 2^{51} @f$ bytes + * * Register pressure: 4 + * * State size: 128 bits + * + * This random generator is a drop-in replacement for the generators supplied by ``<random>``. It is not + * cryptographically secure. It's intended purpose is to be very fast so that benchmarks that make use + * of randomness are not distorted too much by the random generator. + * + * Rng also provides a few non-standard helpers, optimized for speed. + */ +class Rng final { +public: + /** + * @brief This RNG provides 64bit randomness. + */ + using result_type = uint64_t; + + static constexpr uint64_t(min)(); + static constexpr uint64_t(max)(); + + /** + * As a safety precausion, we don't allow copying. Copying a PRNG would mean you would have two random generators that produce the + * same sequence, which is generally not what one wants. Instead create a new rng with the default constructor Rng(), which is + * automatically seeded from `std::random_device`. If you really need a copy, use copy(). + */ + Rng(Rng const&) = delete; + + /** + * Same as Rng(Rng const&), we don't allow assignment. If you need a new Rng create one with the default constructor Rng(). + */ + Rng& operator=(Rng const&) = delete; + + // moving is ok + Rng(Rng&&) noexcept = default; + Rng& operator=(Rng&&) noexcept = default; + ~Rng() noexcept = default; + + /** + * @brief Creates a new Random generator with random seed. + * + * Instead of a default seed (as the random generators from the STD), this properly seeds the random generator from + * `std::random_device`. It guarantees correct seeding. Note that seeding can be relatively slow, depending on the source of + * randomness used. So it is best to create a Rng once and use it for all your randomness purposes. + */ + Rng(); + + /*! + Creates a new Rng that is seeded with a specific seed. Each Rng created from the same seed will produce the same randomness + sequence. This can be useful for deterministic behavior. + + @verbatim embed:rst + .. note:: + + The random algorithm might change between nanobench releases. Whenever a faster and/or better random + generator becomes available, I will switch the implementation. + @endverbatim + + As per the Romu paper, this seeds the Rng with splitMix64 algorithm and performs 10 initial rounds for further mixing up of the + internal state. + + @param seed The 64bit seed. All values are allowed, even 0. + */ + explicit Rng(uint64_t seed) noexcept; + Rng(uint64_t x, uint64_t y) noexcept; + + /** + * Creates a copy of the Rng, thus the copy provides exactly the same random sequence as the original. + */ + ANKERL_NANOBENCH(NODISCARD) Rng copy() const noexcept; + + /** + * @brief Produces a 64bit random value. This should be very fast, thus it is marked as inline. In my benchmark, this is ~46 times + * faster than `std::default_random_engine` for producing 64bit random values. It seems that the fastest std contender is + * `std::mt19937_64`. Still, this RNG is 2-3 times as fast. + * + * @return uint64_t The next 64 bit random value. + */ + inline uint64_t operator()() noexcept; + + // This is slightly biased. See + + /** + * Generates a random number between 0 and range (excluding range). + * + * The algorithm only produces 32bit numbers, and is slightly biased. The effect is quite small unless your range is close to the + * maximum value of an integer. It is possible to correct the bias with rejection sampling (see + * [here](https://lemire.me/blog/2016/06/30/fast-random-shuffling/), but this is most likely irrelevant in practices for the + * purposes of this Rng. + * + * See Daniel Lemire's blog post [A fast alternative to the modulo + * reduction](https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/) + * + * @param range Upper exclusive range. E.g a value of 3 will generate random numbers 0, 1, 2. + * @return uint32_t Generated random values in range [0, range(. + */ + inline uint32_t bounded(uint32_t range) noexcept; + + // random double in range [0, 1( + // see http://prng.di.unimi.it/ + + /** + * Provides a random uniform double value between 0 and 1. This uses the method described in [Generating uniform doubles in the + * unit interval](http://prng.di.unimi.it/), and is extremely fast. + * + * @return double Uniformly distributed double value in range [0,1(, excluding 1. + */ + inline double uniform01() noexcept; + + /** + * Shuffles all entries in the given container. Although this has a slight bias due to the implementation of bounded(), this is + * preferable to `std::shuffle` because it is over 5 times faster. See Daniel Lemire's blog post [Fast random + * shuffling](https://lemire.me/blog/2016/06/30/fast-random-shuffling/). + * + * @param container The whole container will be shuffled. + */ + template <typename Container> + void shuffle(Container& container) noexcept; + +private: + static constexpr uint64_t rotl(uint64_t x, unsigned k) noexcept; + + uint64_t mX; + uint64_t mY; +}; + +/** + * @brief Main entry point to nanobench's benchmarking facility. + * + * It holds configuration and results from one or more benchmark runs. Usually it is used in a single line, where the object is + * constructed, configured, and then a benchmark is run. E.g. like this: + * + * ankerl::nanobench::Bench().unit("byte").batch(1000).run("random fluctuations", [&] { + * // here be the benchmark code + * }); + * + * In that example Bench() constructs the benchmark, it is then configured with unit() and batch(), and after configuration a + * benchmark is executed with run(). Once run() has finished, it prints the result to `std::cout`. It would also store the results + * in the Bench instance, but in this case the object is immediately destroyed so it's not available any more. + */ +ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) +class Bench { +public: + /** + * @brief Creates a new benchmark for configuration and running of benchmarks. + */ + Bench(); + + Bench(Bench&& other); + Bench& operator=(Bench&& other); + Bench(Bench const& other); + Bench& operator=(Bench const& other); + ~Bench() noexcept; + + /*! + @brief Repeatedly calls `op()` based on the configuration, and performs measurements. + + This call is marked with `noinline` to prevent the compiler to optimize beyond different benchmarks. This can have quite a big + effect on benchmark accuracy. + + @verbatim embed:rst + .. note:: + + Each call to your lambda must have a side effect that the compiler can't possibly optimize it away. E.g. add a result to an + externally defined number (like `x` in the above example), and finally call `doNotOptimizeAway` on the variables the compiler + must not remove. You can also use :cpp:func:`ankerl::nanobench::doNotOptimizeAway` directly in the lambda, but be aware that + this has a small overhead. + + @endverbatim + + @tparam Op The code to benchmark. + */ + template <typename Op> + ANKERL_NANOBENCH(NOINLINE) + Bench& run(char const* benchmarkName, Op&& op); + + template <typename Op> + ANKERL_NANOBENCH(NOINLINE) + Bench& run(std::string const& benchmarkName, Op&& op); + + /** + * @brief Same as run(char const* benchmarkName, Op op), but instead uses the previously set name. + * @tparam Op The code to benchmark. + */ + template <typename Op> + ANKERL_NANOBENCH(NOINLINE) + Bench& run(Op&& op); + + /** + * @brief Title of the benchmark, will be shown in the table header. Changing the title will start a new markdown table. + * + * @param benchmarkTitle The title of the benchmark. + */ + Bench& title(char const* benchmarkTitle); + Bench& title(std::string const& benchmarkTitle); + ANKERL_NANOBENCH(NODISCARD) std::string const& title() const noexcept; + + /// Name of the benchmark, will be shown in the table row. + Bench& name(char const* benchmarkName); + Bench& name(std::string const& benchmarkName); + ANKERL_NANOBENCH(NODISCARD) std::string const& name() const noexcept; + + /** + * @brief Sets the batch size. + * + * E.g. number of processed byte, or some other metric for the size of the processed data in each iteration. If you benchmark + * hashing of a 1000 byte long string and want byte/sec as a result, you can specify 1000 as the batch size. + * + * @tparam T Any input type is internally cast to `double`. + * @param b batch size + */ + template <typename T> + Bench& batch(T b) noexcept; + ANKERL_NANOBENCH(NODISCARD) double batch() const noexcept; + + /** + * @brief Sets the operation unit. + * + * Defaults to "op". Could be e.g. "byte" for string processing. This is used for the table header, e.g. to show `ns/byte`. Use + * singular (*byte*, not *bytes*). A change clears the currently collected results. + * + * @param unit The unit name. + */ + Bench& unit(char const* unit); + Bench& unit(std::string const& unit); + ANKERL_NANOBENCH(NODISCARD) std::string const& unit() const noexcept; + + /** + * @brief Set the output stream where the resulting markdown table will be printed to. + * + * The default is `&std::cout`. You can disable all output by setting `nullptr`. + * + * @param outstream Pointer to output stream, can be `nullptr`. + */ + Bench& output(std::ostream* outstream) noexcept; + ANKERL_NANOBENCH(NODISCARD) std::ostream* output() const noexcept; + + /** + * Modern processors have a very accurate clock, being able to measure as low as 20 nanoseconds. This is the main trick nanobech to + * be so fast: we find out how accurate the clock is, then run the benchmark only so often that the clock's accuracy is good enough + * for accurate measurements. + * + * The default is to run one epoch for 1000 times the clock resolution. So for 20ns resolution and 11 epochs, this gives a total + * runtime of + * + * @f[ + * 20ns * 1000 * 11 \approx 0.2ms + * @f] + * + * To be precise, nanobench adds a 0-20% random noise to each evaluation. This is to prevent any aliasing effects, and further + * improves accuracy. + * + * Total runtime will be higher though: Some initial time is needed to find out the target number of iterations for each epoch, and + * there is some overhead involved to start & stop timers and calculate resulting statistics and writing the output. + * + * @param multiple Target number of times of clock resolution. Usually 1000 is a good compromise between runtime and accuracy. + */ + Bench& clockResolutionMultiple(size_t multiple) noexcept; + ANKERL_NANOBENCH(NODISCARD) size_t clockResolutionMultiple() const noexcept; + + /** + * @brief Controls number of epochs, the number of measurements to perform. + * + * The reported result will be the median of evaluation of each epoch. The higher you choose this, the more + * deterministic the result be and outliers will be more easily removed. Also the `err%` will be more accurate the higher this + * number is. Note that the `err%` will not necessarily decrease when number of epochs is increased. But it will be a more accurate + * representation of the benchmarked code's runtime stability. + * + * Choose the value wisely. In practice, 11 has been shown to be a reasonable choice between runtime performance and accuracy. + * This setting goes hand in hand with minEpocIterations() (or minEpochTime()). If you are more interested in *median* runtime, you + * might want to increase epochs(). If you are more interested in *mean* runtime, you might want to increase minEpochIterations() + * instead. + * + * @param numEpochs Number of epochs. + */ + Bench& epochs(size_t numEpochs) noexcept; + ANKERL_NANOBENCH(NODISCARD) size_t epochs() const noexcept; + + /** + * @brief Upper limit for the runtime of each epoch. + * + * As a safety precausion if the clock is not very accurate, we can set an upper limit for the maximum evaluation time per + * epoch. Default is 100ms. At least a single evaluation of the benchmark is performed. + * + * @see minEpochTime(), minEpochIterations() + * + * @param t Maximum target runtime for a single epoch. + */ + Bench& maxEpochTime(std::chrono::nanoseconds t) noexcept; + ANKERL_NANOBENCH(NODISCARD) std::chrono::nanoseconds maxEpochTime() const noexcept; + + /** + * @brief Minimum time each epoch should take. + * + * Default is zero, so we are fully relying on clockResolutionMultiple(). In most cases this is exactly what you want. If you see + * that the evaluation is unreliable with a high `err%`, you can increase either minEpochTime() or minEpochIterations(). + * + * @see maxEpochTime(), minEpochIterations() + * + * @param t Minimum time each epoch should take. + */ + Bench& minEpochTime(std::chrono::nanoseconds t) noexcept; + ANKERL_NANOBENCH(NODISCARD) std::chrono::nanoseconds minEpochTime() const noexcept; + + /** + * @brief Sets the minimum number of iterations each epoch should take. + * + * Default is 1, and we rely on clockResolutionMultiple(). If the `err%` is high and you want a more smooth result, you might want + * to increase the minimum number or iterations, or increase the minEpochTime(). + * + * @see minEpochTime(), maxEpochTime(), minEpochIterations() + * + * @param numIters Minimum number of iterations per epoch. + */ + Bench& minEpochIterations(uint64_t numIters) noexcept; + ANKERL_NANOBENCH(NODISCARD) uint64_t minEpochIterations() const noexcept; + + /** + * Sets exactly the number of iterations for each epoch. Ignores all other epoch limits. This forces nanobench to use exactly + * the given number of iterations for each epoch, not more and not less. Default is 0 (disabled). + * + * @param numIters Exact number of iterations to use. Set to 0 to disable. + */ + Bench& epochIterations(uint64_t numIters) noexcept; + ANKERL_NANOBENCH(NODISCARD) uint64_t epochIterations() const noexcept; + + /** + * @brief Sets a number of iterations that are initially performed without any measurements. + * + * Some benchmarks need a few evaluations to warm up caches / database / whatever access. Normally this should not be needed, since + * we show the median result so initial outliers will be filtered away automatically. If the warmup effect is large though, you + * might want to set it. Default is 0. + * + * @param numWarmupIters Number of warmup iterations. + */ + Bench& warmup(uint64_t numWarmupIters) noexcept; + ANKERL_NANOBENCH(NODISCARD) uint64_t warmup() const noexcept; + + /** + * @brief Marks the next run as the baseline. + * + * Call `relative(true)` to mark the run as the baseline. Successive runs will be compared to this run. It is calculated by + * + * @f[ + * 100\% * \frac{baseline}{runtime} + * @f] + * + * * 100% means it is exactly as fast as the baseline + * * >100% means it is faster than the baseline. E.g. 200% means the current run is twice as fast as the baseline. + * * <100% means it is slower than the baseline. E.g. 50% means it is twice as slow as the baseline. + * + * See the tutorial section "Comparing Results" for example usage. + * + * @param isRelativeEnabled True to enable processing + */ + Bench& relative(bool isRelativeEnabled) noexcept; + ANKERL_NANOBENCH(NODISCARD) bool relative() const noexcept; + + /** + * @brief Enables/disables performance counters. + * + * On Linux nanobench has a powerful feature to use performance counters. This enables counting of retired instructions, count + * number of branches, missed branches, etc. On default this is enabled, but you can disable it if you don't need that feature. + * + * @param showPerformanceCounters True to enable, false to disable. + */ + Bench& performanceCounters(bool showPerformanceCounters) noexcept; + ANKERL_NANOBENCH(NODISCARD) bool performanceCounters() const noexcept; + + /** + * @brief Retrieves all benchmark results collected by the bench object so far. + * + * Each call to run() generates a Result that is stored within the Bench instance. This is mostly for advanced users who want to + * see all the nitty gritty detials. + * + * @return All results collected so far. + */ + ANKERL_NANOBENCH(NODISCARD) std::vector<Result> const& results() const noexcept; + + /*! + @verbatim embed:rst + + Convenience shortcut to :cpp:func:`ankerl::nanobench::doNotOptimizeAway`. + + @endverbatim + */ + template <typename Arg> + Bench& doNotOptimizeAway(Arg&& arg); + + /*! + @verbatim embed:rst + + Sets N for asymptotic complexity calculation, so it becomes possible to calculate `Big O + <https://en.wikipedia.org/wiki/Big_O_notation>`_ from multiple benchmark evaluations. + + Use :cpp:func:`ankerl::nanobench::Bench::complexityBigO` when the evaluation has finished. See the tutorial + :ref:`asymptotic-complexity` for details. + + @endverbatim + + @tparam T Any type is cast to `double`. + @param b Length of N for the next benchmark run, so it is possible to calculate `bigO`. + */ + template <typename T> + Bench& complexityN(T b) noexcept; + ANKERL_NANOBENCH(NODISCARD) double complexityN() const noexcept; + + /*! + Calculates [Big O](https://en.wikipedia.org/wiki/Big_O_notation>) of the results with all preconfigured complexity functions. + Currently these complexity functions are fitted into the benchmark results: + + @f$ \mathcal{O}(1) @f$, + @f$ \mathcal{O}(n) @f$, + @f$ \mathcal{O}(\log{}n) @f$, + @f$ \mathcal{O}(n\log{}n) @f$, + @f$ \mathcal{O}(n^2) @f$, + @f$ \mathcal{O}(n^3) @f$. + + If we e.g. evaluate the complexity of `std::sort`, this is the result of `std::cout << bench.complexityBigO()`: + + ``` + | coefficient | err% | complexity + |--------------:|-------:|------------ + | 5.08935e-09 | 2.6% | O(n log n) + | 6.10608e-08 | 8.0% | O(n) + | 1.29307e-11 | 47.2% | O(n^2) + | 2.48677e-15 | 69.6% | O(n^3) + | 9.88133e-06 | 132.3% | O(log n) + | 5.98793e-05 | 162.5% | O(1) + ``` + + So in this case @f$ \mathcal{O}(n\log{}n) @f$ provides the best approximation. + + @verbatim embed:rst + See the tutorial :ref:`asymptotic-complexity` for details. + @endverbatim + @return Evaluation results, which can be printed or otherwise inspected. + */ + std::vector<BigO> complexityBigO() const; + + /** + * @brief Calculates bigO for a custom function. + * + * E.g. to calculate the mean squared error for @f$ \mathcal{O}(\log{}\log{}n) @f$, which is not part of the default set of + * complexityBigO(), you can do this: + * + * ``` + * auto logLogN = bench.complexityBigO("O(log log n)", [](double n) { + * return std::log2(std::log2(n)); + * }); + * ``` + * + * The resulting mean squared error can be printed with `std::cout << logLogN`. E.g. it prints something like this: + * + * ```text + * 2.46985e-05 * O(log log n), rms=1.48121 + * ``` + * + * @tparam Op Type of mapping operation. + * @param name Name for the function, e.g. "O(log log n)" + * @param op Op's operator() maps a `double` with the desired complexity function, e.g. `log2(log2(n))`. + * @return BigO Error calculation, which is streamable to std::cout. + */ + template <typename Op> + BigO complexityBigO(char const* name, Op op) const; + + template <typename Op> + BigO complexityBigO(std::string const& name, Op op) const; + + /*! + @verbatim embed:rst + + Convenience shortcut to :cpp:func:`ankerl::nanobench::render`. + + @endverbatim + */ + Bench& render(char const* templateContent, std::ostream& os); + + Bench& config(Config const& benchmarkConfig); + ANKERL_NANOBENCH(NODISCARD) Config const& config() const noexcept; + +private: + Config mConfig{}; + std::vector<Result> mResults{}; +}; +ANKERL_NANOBENCH(IGNORE_PADDED_POP) + +/** + * @brief Makes sure none of the given arguments are optimized away by the compiler. + * + * @tparam Arg Type of the argument that shouldn't be optimized away. + * @param arg The input that we mark as being used, even though we don't do anything with it. + */ +template <typename Arg> +void doNotOptimizeAway(Arg&& arg); + +namespace detail { + +#if defined(_MSC_VER) +void doNotOptimizeAwaySink(void const*); + +template <typename T> +void doNotOptimizeAway(T const& val); + +#else + +// see folly's Benchmark.h +template <typename T> +constexpr bool doNotOptimizeNeedsIndirect() { + using Decayed = typename std::decay<T>::type; + return !ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(Decayed) || sizeof(Decayed) > sizeof(long) || std::is_pointer<Decayed>::value; +} + +template <typename T> +typename std::enable_if<!doNotOptimizeNeedsIndirect<T>()>::type doNotOptimizeAway(T const& val) { + // NOLINTNEXTLINE(hicpp-no-assembler) + asm volatile("" ::"r"(val)); +} + +template <typename T> +typename std::enable_if<doNotOptimizeNeedsIndirect<T>()>::type doNotOptimizeAway(T const& val) { + // NOLINTNEXTLINE(hicpp-no-assembler) + asm volatile("" ::"m"(val) : "memory"); +} +#endif + +// internally used, but visible because run() is templated. +// Not movable/copy-able, so we simply use a pointer instead of unique_ptr. This saves us from +// having to include <memory>, and the template instantiation overhead of unique_ptr which is unfortunately quite significant. +ANKERL_NANOBENCH(IGNORE_EFFCPP_PUSH) +class IterationLogic { +public: + explicit IterationLogic(Bench const& config) noexcept; + ~IterationLogic(); + + ANKERL_NANOBENCH(NODISCARD) uint64_t numIters() const noexcept; + void add(std::chrono::nanoseconds elapsed, PerformanceCounters const& pc) noexcept; + void moveResultTo(std::vector<Result>& results) noexcept; + +private: + struct Impl; + Impl* mPimpl; +}; +ANKERL_NANOBENCH(IGNORE_EFFCPP_POP) + +ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) +class PerformanceCounters { +public: + PerformanceCounters(PerformanceCounters const&) = delete; + PerformanceCounters& operator=(PerformanceCounters const&) = delete; + + PerformanceCounters(); + ~PerformanceCounters(); + + void beginMeasure(); + void endMeasure(); + void updateResults(uint64_t numIters); + + ANKERL_NANOBENCH(NODISCARD) PerfCountSet<uint64_t> const& val() const noexcept; + ANKERL_NANOBENCH(NODISCARD) PerfCountSet<bool> const& has() const noexcept; + +private: +#if ANKERL_NANOBENCH(PERF_COUNTERS) + LinuxPerformanceCounters* mPc = nullptr; +#endif + PerfCountSet<uint64_t> mVal{}; + PerfCountSet<bool> mHas{}; +}; +ANKERL_NANOBENCH(IGNORE_PADDED_POP) + +// Gets the singleton +PerformanceCounters& performanceCounters(); + +} // namespace detail + +class BigO { +public: + using RangeMeasure = std::vector<std::pair<double, double>>; + + template <typename Op> + static RangeMeasure mapRangeMeasure(RangeMeasure data, Op op) { + for (auto& rangeMeasure : data) { + rangeMeasure.first = op(rangeMeasure.first); + } + return data; + } + + static RangeMeasure collectRangeMeasure(std::vector<Result> const& results); + + template <typename Op> + BigO(char const* bigOName, RangeMeasure const& rangeMeasure, Op rangeToN) + : BigO(bigOName, mapRangeMeasure(rangeMeasure, rangeToN)) {} + + template <typename Op> + BigO(std::string const& bigOName, RangeMeasure const& rangeMeasure, Op rangeToN) + : BigO(bigOName, mapRangeMeasure(rangeMeasure, rangeToN)) {} + + BigO(char const* bigOName, RangeMeasure const& scaledRangeMeasure); + BigO(std::string const& bigOName, RangeMeasure const& scaledRangeMeasure); + ANKERL_NANOBENCH(NODISCARD) std::string const& name() const noexcept; + ANKERL_NANOBENCH(NODISCARD) double constant() const noexcept; + ANKERL_NANOBENCH(NODISCARD) double normalizedRootMeanSquare() const noexcept; + ANKERL_NANOBENCH(NODISCARD) bool operator<(BigO const& other) const noexcept; + +private: + std::string mName{}; + double mConstant{}; + double mNormalizedRootMeanSquare{}; +}; +std::ostream& operator<<(std::ostream& os, BigO const& bigO); +std::ostream& operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO> const& bigOs); + +} // namespace nanobench +} // namespace ankerl + +// implementation ///////////////////////////////////////////////////////////////////////////////// + +namespace ankerl { +namespace nanobench { + +constexpr uint64_t(Rng::min)() { + return 0; +} + +constexpr uint64_t(Rng::max)() { + return (std::numeric_limits<uint64_t>::max)(); +} + +ANKERL_NANOBENCH_NO_SANITIZE("integer") +uint64_t Rng::operator()() noexcept { + auto x = mX; + + mX = UINT64_C(15241094284759029579) * mY; + mY = rotl(mY - x, 27); + + return x; +} + +ANKERL_NANOBENCH_NO_SANITIZE("integer") +uint32_t Rng::bounded(uint32_t range) noexcept { + uint64_t r32 = static_cast<uint32_t>(operator()()); + auto multiresult = r32 * range; + return static_cast<uint32_t>(multiresult >> 32U); +} + +double Rng::uniform01() noexcept { + auto i = (UINT64_C(0x3ff) << 52U) | (operator()() >> 12U); + // can't use union in c++ here for type puning, it's undefined behavior. + // std::memcpy is optimized anyways. + double d; + std::memcpy(&d, &i, sizeof(double)); + return d - 1.0; +} + +template <typename Container> +void Rng::shuffle(Container& container) noexcept { + auto size = static_cast<uint32_t>(container.size()); + for (auto i = size; i > 1U; --i) { + using std::swap; + auto p = bounded(i); // number in [0, i) + swap(container[i - 1], container[p]); + } +} + +constexpr uint64_t Rng::rotl(uint64_t x, unsigned k) noexcept { + return (x << k) | (x >> (64U - k)); +} + +template <typename Op> +ANKERL_NANOBENCH_NO_SANITIZE("integer") +Bench& Bench::run(Op&& op) { + // It is important that this method is kept short so the compiler can do better optimizations/ inlining of op() + detail::IterationLogic iterationLogic(*this); + auto& pc = detail::performanceCounters(); + + while (auto n = iterationLogic.numIters()) { + pc.beginMeasure(); + Clock::time_point before = Clock::now(); + while (n-- > 0) { + op(); + } + Clock::time_point after = Clock::now(); + pc.endMeasure(); + pc.updateResults(iterationLogic.numIters()); + iterationLogic.add(after - before, pc); + } + iterationLogic.moveResultTo(mResults); + return *this; +} + +// Performs all evaluations. +template <typename Op> +Bench& Bench::run(char const* benchmarkName, Op&& op) { + name(benchmarkName); + return run(std::forward<Op>(op)); +} + +template <typename Op> +Bench& Bench::run(std::string const& benchmarkName, Op&& op) { + name(benchmarkName); + return run(std::forward<Op>(op)); +} + +template <typename Op> +BigO Bench::complexityBigO(char const* benchmarkName, Op op) const { + return BigO(benchmarkName, BigO::collectRangeMeasure(mResults), op); +} + +template <typename Op> +BigO Bench::complexityBigO(std::string const& benchmarkName, Op op) const { + return BigO(benchmarkName, BigO::collectRangeMeasure(mResults), op); +} + +// Set the batch size, e.g. number of processed bytes, or some other metric for the size of the processed data in each iteration. +// Any argument is cast to double. +template <typename T> +Bench& Bench::batch(T b) noexcept { + mConfig.mBatch = static_cast<double>(b); + return *this; +} + +// Sets the computation complexity of the next run. Any argument is cast to double. +template <typename T> +Bench& Bench::complexityN(T n) noexcept { + mConfig.mComplexityN = static_cast<double>(n); + return *this; +} + +// Convenience: makes sure none of the given arguments are optimized away by the compiler. +template <typename Arg> +Bench& Bench::doNotOptimizeAway(Arg&& arg) { + detail::doNotOptimizeAway(std::forward<Arg>(arg)); + return *this; +} + +// Makes sure none of the given arguments are optimized away by the compiler. +template <typename Arg> +void doNotOptimizeAway(Arg&& arg) { + detail::doNotOptimizeAway(std::forward<Arg>(arg)); +} + +namespace detail { + +#if defined(_MSC_VER) +template <typename T> +void doNotOptimizeAway(T const& val) { + doNotOptimizeAwaySink(&val); +} + +#endif + +} // namespace detail +} // namespace nanobench +} // namespace ankerl + +#if defined(ANKERL_NANOBENCH_IMPLEMENT) + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// implementation part - only visible in .cpp +/////////////////////////////////////////////////////////////////////////////////////////////////// + +# include <algorithm> // sort, reverse +# include <atomic> // compare_exchange_strong in loop overhead +# include <cstdlib> // getenv +# include <cstring> // strstr, strncmp +# include <fstream> // ifstream to parse proc files +# include <iomanip> // setw, setprecision +# include <iostream> // cout +# include <numeric> // accumulate +# include <random> // random_device +# include <sstream> // to_s in Number +# include <stdexcept> // throw for rendering templates +# include <tuple> // std::tie +# if defined(__linux__) +# include <unistd.h> //sysconf +# endif +# if ANKERL_NANOBENCH(PERF_COUNTERS) +# include <map> // map + +# include <linux/perf_event.h> +# include <sys/ioctl.h> +# include <sys/syscall.h> +# include <unistd.h> +# endif + +// declarations /////////////////////////////////////////////////////////////////////////////////// + +namespace ankerl { +namespace nanobench { + +// helper stuff that is only intended to be used internally +namespace detail { + +struct TableInfo; + +// formatting utilities +namespace fmt { + +class NumSep; +class StreamStateRestorer; +class Number; +class MarkDownColumn; +class MarkDownCode; + +} // namespace fmt +} // namespace detail +} // namespace nanobench +} // namespace ankerl + +// definitions //////////////////////////////////////////////////////////////////////////////////// + +namespace ankerl { +namespace nanobench { + +uint64_t splitMix64(uint64_t& state) noexcept; + +namespace detail { + +// helpers to get double values +template <typename T> +inline double d(T t) noexcept { + return static_cast<double>(t); +} +inline double d(Clock::duration duration) noexcept { + return std::chrono::duration_cast<std::chrono::duration<double>>(duration).count(); +} + +// Calculates clock resolution once, and remembers the result +inline Clock::duration clockResolution() noexcept; + +} // namespace detail + +namespace templates { + +char const* csv() noexcept { + return R"DELIM("title";"name";"unit";"batch";"elapsed";"error %";"instructions";"branches";"branch misses";"total" +{{#result}}"{{title}}";"{{name}}";"{{unit}}";{{batch}};{{median(elapsed)}};{{medianAbsolutePercentError(elapsed)}};{{median(instructions)}};{{median(branchinstructions)}};{{median(branchmisses)}};{{sumProduct(iterations, elapsed)}} +{{/result}})DELIM"; +} + +char const* htmlBoxplot() noexcept { + return R"DELIM(<html> + +<head> + <script src="https://cdn.plot.ly/plotly-latest.min.js"></script> +</head> + +<body> + <div id="myDiv"></div> + <script> + var data = [ + {{#result}}{ + name: '{{name}}', + y: [{{#measurement}}{{elapsed}}{{^-last}}, {{/last}}{{/measurement}}], + }, + {{/result}} + ]; + var title = '{{title}}'; + + data = data.map(a => Object.assign(a, { boxpoints: 'all', pointpos: 0, type: 'box' })); + var layout = { title: { text: title }, showlegend: false, yaxis: { title: 'time per unit', rangemode: 'tozero', autorange: true } }; Plotly.newPlot('myDiv', data, layout, {responsive: true}); + </script> +</body> + +</html>)DELIM"; +} + +char const* json() noexcept { + return R"DELIM({ + "results": [ +{{#result}} { + "title": "{{title}}", + "name": "{{name}}", + "unit": "{{unit}}", + "batch": {{batch}}, + "complexityN": {{complexityN}}, + "epochs": {{epochs}}, + "clockResolution": {{clockResolution}}, + "clockResolutionMultiple": {{clockResolutionMultiple}}, + "maxEpochTime": {{maxEpochTime}}, + "minEpochTime": {{minEpochTime}}, + "minEpochIterations": {{minEpochIterations}}, + "epochIterations": {{epochIterations}}, + "warmup": {{warmup}}, + "relative": {{relative}}, + "median(elapsed)": {{median(elapsed)}}, + "medianAbsolutePercentError(elapsed)": {{medianAbsolutePercentError(elapsed)}}, + "median(instructions)": {{median(instructions)}}, + "medianAbsolutePercentError(instructions)": {{medianAbsolutePercentError(instructions)}}, + "median(cpucycles)": {{median(cpucycles)}}, + "median(contextswitches)": {{median(contextswitches)}}, + "median(pagefaults)": {{median(pagefaults)}}, + "median(branchinstructions)": {{median(branchinstructions)}}, + "median(branchmisses)": {{median(branchmisses)}}, + "totalTime": {{sumProduct(iterations, elapsed)}}, + "measurements": [ +{{#measurement}} { + "iterations": {{iterations}}, + "elapsed": {{elapsed}}, + "pagefaults": {{pagefaults}}, + "cpucycles": {{cpucycles}}, + "contextswitches": {{contextswitches}}, + "instructions": {{instructions}}, + "branchinstructions": {{branchinstructions}}, + "branchmisses": {{branchmisses}} + }{{^-last}},{{/-last}} +{{/measurement}} ] + }{{^-last}},{{/-last}} +{{/result}} ] +})DELIM"; +} + +ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) +struct Node { + enum class Type { tag, content, section, inverted_section }; + + char const* begin; + char const* end; + std::vector<Node> children; + Type type; + + template <size_t N> + // NOLINTNEXTLINE(hicpp-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays) + bool operator==(char const (&str)[N]) const noexcept { + return static_cast<size_t>(std::distance(begin, end) + 1) == N && 0 == strncmp(str, begin, N - 1); + } +}; +ANKERL_NANOBENCH(IGNORE_PADDED_POP) + +static std::vector<Node> parseMustacheTemplate(char const** tpl) { + std::vector<Node> nodes; + + while (true) { + auto begin = std::strstr(*tpl, "{{"); + auto end = begin; + if (begin != nullptr) { + begin += 2; + end = std::strstr(begin, "}}"); + } + + if (begin == nullptr || end == nullptr) { + // nothing found, finish node + nodes.emplace_back(Node{*tpl, *tpl + std::strlen(*tpl), std::vector<Node>{}, Node::Type::content}); + return nodes; + } + + nodes.emplace_back(Node{*tpl, begin - 2, std::vector<Node>{}, Node::Type::content}); + + // we found a tag + *tpl = end + 2; + switch (*begin) { + case '/': + // finished! bail out + return nodes; + + case '#': + nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::section}); + break; + + case '^': + nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::inverted_section}); + break; + + default: + nodes.emplace_back(Node{begin, end, std::vector<Node>{}, Node::Type::tag}); + break; + } + } +} + +static bool generateFirstLast(Node const& n, size_t idx, size_t size, std::ostream& out) { + bool matchFirst = n == "-first"; + bool matchLast = n == "-last"; + if (!matchFirst && !matchLast) { + return false; + } + + bool doWrite = false; + if (n.type == Node::Type::section) { + doWrite = (matchFirst && idx == 0) || (matchLast && idx == size - 1); + } else if (n.type == Node::Type::inverted_section) { + doWrite = (matchFirst && idx != 0) || (matchLast && idx != size - 1); + } + + if (doWrite) { + for (auto const& child : n.children) { + if (child.type == Node::Type::content) { + out.write(child.begin, std::distance(child.begin, child.end)); + } + } + } + return true; +} + +static bool matchCmdArgs(std::string const& str, std::vector<std::string>& matchResult) { + matchResult.clear(); + auto idxOpen = str.find('('); + auto idxClose = str.find(')', idxOpen); + if (idxClose == std::string::npos) { + return false; + } + + matchResult.emplace_back(str.substr(0, idxOpen)); + + // split by comma + matchResult.emplace_back(std::string{}); + for (size_t i = idxOpen + 1; i != idxClose; ++i) { + if (str[i] == ' ' || str[i] == '\t') { + // skip whitespace + continue; + } + if (str[i] == ',') { + // got a comma => new string + matchResult.emplace_back(std::string{}); + continue; + } + // no whitespace no comma, append + matchResult.back() += str[i]; + } + return true; +} + +static bool generateConfigTag(Node const& n, Config const& config, std::ostream& out) { + using detail::d; + + if (n == "title") { + out << config.mBenchmarkTitle; + return true; + } else if (n == "name") { + out << config.mBenchmarkName; + return true; + } else if (n == "unit") { + out << config.mUnit; + return true; + } else if (n == "batch") { + out << config.mBatch; + return true; + } else if (n == "complexityN") { + out << config.mComplexityN; + return true; + } else if (n == "epochs") { + out << config.mNumEpochs; + return true; + } else if (n == "clockResolution") { + out << d(detail::clockResolution()); + return true; + } else if (n == "clockResolutionMultiple") { + out << config.mClockResolutionMultiple; + return true; + } else if (n == "maxEpochTime") { + out << d(config.mMaxEpochTime); + return true; + } else if (n == "minEpochTime") { + out << d(config.mMinEpochTime); + return true; + } else if (n == "minEpochIterations") { + out << config.mMinEpochIterations; + return true; + } else if (n == "epochIterations") { + out << config.mEpochIterations; + return true; + } else if (n == "warmup") { + out << config.mWarmup; + return true; + } else if (n == "relative") { + out << config.mIsRelative; + return true; + } + return false; +} + +static std::ostream& generateResultTag(Node const& n, Result const& r, std::ostream& out) { + if (generateConfigTag(n, r.config(), out)) { + return out; + } + // match e.g. "median(elapsed)" + // g++ 4.8 doesn't implement std::regex :( + // static std::regex const regOpArg1("^([a-zA-Z]+)\\(([a-zA-Z]*)\\)$"); + // std::cmatch matchResult; + // if (std::regex_match(n.begin, n.end, matchResult, regOpArg1)) { + std::vector<std::string> matchResult; + if (matchCmdArgs(std::string(n.begin, n.end), matchResult)) { + if (matchResult.size() == 2) { + auto m = Result::fromString(matchResult[1]); + if (m == Result::Measure::_size) { + return out << 0.0; + } + + if (matchResult[0] == "median") { + return out << r.median(m); + } + if (matchResult[0] == "average") { + return out << r.average(m); + } + if (matchResult[0] == "medianAbsolutePercentError") { + return out << r.medianAbsolutePercentError(m); + } + if (matchResult[0] == "sum") { + return out << r.sum(m); + } + if (matchResult[0] == "minimum") { + return out << r.minimum(m); + } + if (matchResult[0] == "maximum") { + return out << r.maximum(m); + } + } else if (matchResult.size() == 3) { + auto m1 = Result::fromString(matchResult[1]); + auto m2 = Result::fromString(matchResult[2]); + if (m1 == Result::Measure::_size || m2 == Result::Measure::_size) { + return out << 0.0; + } + + if (matchResult[0] == "sumProduct") { + return out << r.sumProduct(m1, m2); + } + } + } + + // match e.g. "sumProduct(elapsed, iterations)" + // static std::regex const regOpArg2("^([a-zA-Z]+)\\(([a-zA-Z]*)\\s*,\\s+([a-zA-Z]*)\\)$"); + + // nothing matches :( + throw std::runtime_error("command '" + std::string(n.begin, n.end) + "' not understood"); +} + +static void generateResultMeasurement(std::vector<Node> const& nodes, size_t idx, Result const& r, std::ostream& out) { + for (auto const& n : nodes) { + if (!generateFirstLast(n, idx, r.size(), out)) { + ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type)); + switch (n.type) { + case Node::Type::content: + out.write(n.begin, std::distance(n.begin, n.end)); + break; + + case Node::Type::inverted_section: + throw std::runtime_error("got a inverted section inside measurement"); + + case Node::Type::section: + throw std::runtime_error("got a section inside measurement"); + + case Node::Type::tag: { + auto m = Result::fromString(std::string(n.begin, n.end)); + if (m == Result::Measure::_size || !r.has(m)) { + out << 0.0; + } else { + out << r.get(idx, m); + } + break; + } + } + } + } +} + +static void generateResult(std::vector<Node> const& nodes, size_t idx, std::vector<Result> const& results, std::ostream& out) { + auto const& r = results[idx]; + for (auto const& n : nodes) { + if (!generateFirstLast(n, idx, results.size(), out)) { + ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type)); + switch (n.type) { + case Node::Type::content: + out.write(n.begin, std::distance(n.begin, n.end)); + break; + + case Node::Type::inverted_section: + throw std::runtime_error("got a inverted section inside result"); + + case Node::Type::section: + if (n == "measurement") { + for (size_t i = 0; i < r.size(); ++i) { + generateResultMeasurement(n.children, i, r, out); + } + } else { + throw std::runtime_error("got a section inside result"); + } + break; + + case Node::Type::tag: + generateResultTag(n, r, out); + break; + } + } + } +} + +} // namespace templates + +// helper stuff that only intended to be used internally +namespace detail { + +char const* getEnv(char const* name); +bool isEndlessRunning(std::string const& name); + +template <typename T> +T parseFile(std::string const& filename); + +void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations); +void printStabilityInformationOnce(std::ostream* os); + +// remembers the last table settings used. When it changes, a new table header is automatically written for the new entry. +uint64_t& singletonHeaderHash() noexcept; + +// determines resolution of the given clock. This is done by measuring multiple times and returning the minimum time difference. +Clock::duration calcClockResolution(size_t numEvaluations) noexcept; + +// formatting utilities +namespace fmt { + +// adds thousands separator to numbers +ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) +class NumSep : public std::numpunct<char> { +public: + explicit NumSep(char sep); + char do_thousands_sep() const override; + std::string do_grouping() const override; + +private: + char mSep; +}; +ANKERL_NANOBENCH(IGNORE_PADDED_POP) + +// RAII to save & restore a stream's state +ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) +class StreamStateRestorer { +public: + explicit StreamStateRestorer(std::ostream& s); + ~StreamStateRestorer(); + + // sets back all stream info that we remembered at construction + void restore(); + + // don't allow copying / moving + StreamStateRestorer(StreamStateRestorer const&) = delete; + StreamStateRestorer& operator=(StreamStateRestorer const&) = delete; + StreamStateRestorer(StreamStateRestorer&&) = delete; + StreamStateRestorer& operator=(StreamStateRestorer&&) = delete; + +private: + std::ostream& mStream; + std::locale mLocale; + std::streamsize const mPrecision; + std::streamsize const mWidth; + std::ostream::char_type const mFill; + std::ostream::fmtflags const mFmtFlags; +}; +ANKERL_NANOBENCH(IGNORE_PADDED_POP) + +// Number formatter +class Number { +public: + Number(int width, int precision, double value); + Number(int width, int precision, int64_t value); + std::string to_s() const; + +private: + friend std::ostream& operator<<(std::ostream& os, Number const& n); + std::ostream& write(std::ostream& os) const; + + int mWidth; + int mPrecision; + double mValue; +}; + +// helper replacement for std::to_string of signed/unsigned numbers so we are locale independent +std::string to_s(uint64_t s); + +std::ostream& operator<<(std::ostream& os, Number const& n); + +class MarkDownColumn { +public: + MarkDownColumn(int w, int prec, std::string const& tit, std::string const& suff, double val); + std::string title() const; + std::string separator() const; + std::string invalid() const; + std::string value() const; + +private: + int mWidth; + int mPrecision; + std::string mTitle; + std::string mSuffix; + double mValue; +}; + +// Formats any text as markdown code, escaping backticks. +class MarkDownCode { +public: + explicit MarkDownCode(std::string const& what); + +private: + friend std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode); + std::ostream& write(std::ostream& os) const; + + std::string mWhat{}; +}; + +std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode); + +} // namespace fmt +} // namespace detail +} // namespace nanobench +} // namespace ankerl + +// implementation ///////////////////////////////////////////////////////////////////////////////// + +namespace ankerl { +namespace nanobench { + +void render(char const* mustacheTemplate, std::vector<Result> const& results, std::ostream& out) { + detail::fmt::StreamStateRestorer restorer(out); + + out.precision(std::numeric_limits<double>::digits10); + auto nodes = templates::parseMustacheTemplate(&mustacheTemplate); + + for (auto const& n : nodes) { + ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type)); + switch (n.type) { + case templates::Node::Type::content: + out.write(n.begin, std::distance(n.begin, n.end)); + break; + + case templates::Node::Type::inverted_section: + throw std::runtime_error("unknown list '" + std::string(n.begin, n.end) + "'"); + + case templates::Node::Type::section: + if (n == "result") { + const size_t nbResults = results.size(); + for (size_t i = 0; i < nbResults; ++i) { + generateResult(n.children, i, results, out); + } + } else { + throw std::runtime_error("unknown section '" + std::string(n.begin, n.end) + "'"); + } + break; + + case templates::Node::Type::tag: + // This just uses the last result's config. + if (!generateConfigTag(n, results.back().config(), out)) { + throw std::runtime_error("unknown tag '" + std::string(n.begin, n.end) + "'"); + } + break; + } + } +} + +void render(char const* mustacheTemplate, const Bench& bench, std::ostream& out) { + render(mustacheTemplate, bench.results(), out); +} + +namespace detail { + +PerformanceCounters& performanceCounters() { +# if defined(__clang__) +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wexit-time-destructors" +# endif + static PerformanceCounters pc; +# if defined(__clang__) +# pragma clang diagnostic pop +# endif + return pc; +} + +// Windows version of doNotOptimizeAway +// see https://github.com/google/benchmark/blob/master/include/benchmark/benchmark.h#L307 +// see https://github.com/facebook/folly/blob/master/folly/Benchmark.h#L280 +// see https://docs.microsoft.com/en-us/cpp/preprocessor/optimize +# if defined(_MSC_VER) +# pragma optimize("", off) +void doNotOptimizeAwaySink(void const*) {} +# pragma optimize("", on) +# endif + +template <typename T> +T parseFile(std::string const& filename) { + std::ifstream fin(filename); + T num{}; + fin >> num; + return num; +} + +char const* getEnv(char const* name) { +# if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4996) // getenv': This function or variable may be unsafe. +# endif + return std::getenv(name); +# if defined(_MSC_VER) +# pragma warning(pop) +# endif +} + +bool isEndlessRunning(std::string const& name) { + auto endless = getEnv("NANOBENCH_ENDLESS"); + return nullptr != endless && endless == name; +} + +void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations) { + warnings.clear(); + recommendations.clear(); + + bool recommendCheckFlags = false; + +# if defined(DEBUG) + warnings.emplace_back("DEBUG defined"); + recommendCheckFlags = true; +# endif + + bool recommendPyPerf = false; +# if defined(__linux__) + auto nprocs = sysconf(_SC_NPROCESSORS_CONF); + if (nprocs <= 0) { + warnings.emplace_back("couldn't figure out number of processors - no governor, turbo check possible"); + } else { + + // check frequency scaling + for (long id = 0; id < nprocs; ++id) { + auto idStr = detail::fmt::to_s(static_cast<uint64_t>(id)); + auto sysCpu = "/sys/devices/system/cpu/cpu" + idStr; + auto minFreq = parseFile<int64_t>(sysCpu + "/cpufreq/scaling_min_freq"); + auto maxFreq = parseFile<int64_t>(sysCpu + "/cpufreq/scaling_max_freq"); + if (minFreq != maxFreq) { + auto minMHz = static_cast<double>(minFreq) / 1000.0; + auto maxMHz = static_cast<double>(maxFreq) / 1000.0; + warnings.emplace_back("CPU frequency scaling enabled: CPU " + idStr + " between " + + detail::fmt::Number(1, 1, minMHz).to_s() + " and " + detail::fmt::Number(1, 1, maxMHz).to_s() + + " MHz"); + recommendPyPerf = true; + break; + } + } + + auto currentGovernor = parseFile<std::string>("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"); + if ("performance" != currentGovernor) { + warnings.emplace_back("CPU governor is '" + currentGovernor + "' but should be 'performance'"); + recommendPyPerf = true; + } + + if (0 == parseFile<int>("/sys/devices/system/cpu/intel_pstate/no_turbo")) { + warnings.emplace_back("Turbo is enabled, CPU frequency will fluctuate"); + recommendPyPerf = true; + } + } +# endif + + if (recommendCheckFlags) { + recommendations.emplace_back("Make sure you compile for Release"); + } + if (recommendPyPerf) { + recommendations.emplace_back("Use 'pyperf system tune' before benchmarking. See https://github.com/vstinner/pyperf"); + } +} + +void printStabilityInformationOnce(std::ostream* outStream) { + static bool shouldPrint = true; + if (shouldPrint && outStream) { + auto& os = *outStream; + shouldPrint = false; + std::vector<std::string> warnings; + std::vector<std::string> recommendations; + gatherStabilityInformation(warnings, recommendations); + if (warnings.empty()) { + return; + } + + os << "Warning, results might be unstable:" << std::endl; + for (auto const& w : warnings) { + os << "* " << w << std::endl; + } + + os << std::endl << "Recommendations" << std::endl; + for (auto const& r : recommendations) { + os << "* " << r << std::endl; + } + } +} + +// remembers the last table settings used. When it changes, a new table header is automatically written for the new entry. +uint64_t& singletonHeaderHash() noexcept { + static uint64_t sHeaderHash{}; + return sHeaderHash; +} + +ANKERL_NANOBENCH_NO_SANITIZE("integer") +inline uint64_t fnv1a(std::string const& str) noexcept { + auto val = UINT64_C(14695981039346656037); + for (auto c : str) { + val = (val ^ static_cast<uint8_t>(c)) * UINT64_C(1099511628211); + } + return val; +} + +ANKERL_NANOBENCH_NO_SANITIZE("integer") +inline uint64_t hash_combine(uint64_t seed, uint64_t val) { + return seed ^ (val + UINT64_C(0x9e3779b9) + (seed << 6U) + (seed >> 2U)); +} + +// determines resolution of the given clock. This is done by measuring multiple times and returning the minimum time difference. +Clock::duration calcClockResolution(size_t numEvaluations) noexcept { + auto bestDuration = Clock::duration::max(); + Clock::time_point tBegin; + Clock::time_point tEnd; + for (size_t i = 0; i < numEvaluations; ++i) { + tBegin = Clock::now(); + do { + tEnd = Clock::now(); + } while (tBegin == tEnd); + bestDuration = (std::min)(bestDuration, tEnd - tBegin); + } + return bestDuration; +} + +// Calculates clock resolution once, and remembers the result +Clock::duration clockResolution() noexcept { + static Clock::duration sResolution = calcClockResolution(20); + return sResolution; +} + +ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) +struct IterationLogic::Impl { + enum class State { warmup, upscaling_runtime, measuring, endless }; + + explicit Impl(Bench const& bench) + : mBench(bench) + , mResult(bench.config()) { + printStabilityInformationOnce(mBench.output()); + + // determine target runtime per epoch + mTargetRuntimePerEpoch = detail::clockResolution() * mBench.clockResolutionMultiple(); + if (mTargetRuntimePerEpoch > mBench.maxEpochTime()) { + mTargetRuntimePerEpoch = mBench.maxEpochTime(); + } + if (mTargetRuntimePerEpoch < mBench.minEpochTime()) { + mTargetRuntimePerEpoch = mBench.minEpochTime(); + } + + if (isEndlessRunning(mBench.name())) { + std::cerr << "NANOBENCH_ENDLESS set: running '" << mBench.name() << "' endlessly" << std::endl; + mNumIters = (std::numeric_limits<uint64_t>::max)(); + mState = State::endless; + } else if (0 != mBench.warmup()) { + mNumIters = mBench.warmup(); + mState = State::warmup; + } else if (0 != mBench.epochIterations()) { + // exact number of iterations + mNumIters = mBench.epochIterations(); + mState = State::measuring; + } else { + mNumIters = mBench.minEpochIterations(); + mState = State::upscaling_runtime; + } + } + + // directly calculates new iters based on elapsed&iters, and adds a 10% noise. Makes sure we don't underflow. + ANKERL_NANOBENCH(NODISCARD) uint64_t calcBestNumIters(std::chrono::nanoseconds elapsed, uint64_t iters) noexcept { + auto doubleElapsed = d(elapsed); + auto doubleTargetRuntimePerEpoch = d(mTargetRuntimePerEpoch); + auto doubleNewIters = doubleTargetRuntimePerEpoch / doubleElapsed * d(iters); + + auto doubleMinEpochIters = d(mBench.minEpochIterations()); + if (doubleNewIters < doubleMinEpochIters) { + doubleNewIters = doubleMinEpochIters; + } + doubleNewIters *= 1.0 + 0.2 * mRng.uniform01(); + + // +0.5 for correct rounding when casting + // NOLINTNEXTLINE(bugprone-incorrect-roundings) + return static_cast<uint64_t>(doubleNewIters + 0.5); + } + + ANKERL_NANOBENCH_NO_SANITIZE("integer") void upscale(std::chrono::nanoseconds elapsed) { + if (elapsed * 10 < mTargetRuntimePerEpoch) { + // we are far below the target runtime. Multiply iterations by 10 (with overflow check) + if (mNumIters * 10 < mNumIters) { + // overflow :-( + showResult("iterations overflow. Maybe your code got optimized away?"); + mNumIters = 0; + return; + } + mNumIters *= 10; + } else { + mNumIters = calcBestNumIters(elapsed, mNumIters); + } + } + + void add(std::chrono::nanoseconds elapsed, PerformanceCounters const& pc) noexcept { +# if defined(ANKERL_NANOBENCH_LOG_ENABLED) + auto oldIters = mNumIters; +# endif + + switch (mState) { + case State::warmup: + if (isCloseEnoughForMeasurements(elapsed)) { + // if elapsed is close enough, we can skip upscaling and go right to measurements + // still, we don't add the result to the measurements. + mState = State::measuring; + mNumIters = calcBestNumIters(elapsed, mNumIters); + } else { + // not close enough: switch to upscaling + mState = State::upscaling_runtime; + upscale(elapsed); + } + break; + + case State::upscaling_runtime: + if (isCloseEnoughForMeasurements(elapsed)) { + // if we are close enough, add measurement and switch to always measuring + mState = State::measuring; + mTotalElapsed += elapsed; + mTotalNumIters += mNumIters; + mResult.add(elapsed, mNumIters, pc); + mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters); + } else { + upscale(elapsed); + } + break; + + case State::measuring: + // just add measurements - no questions asked. Even when runtime is low. But we can't ignore + // that fluctuation, or else we would bias the result + mTotalElapsed += elapsed; + mTotalNumIters += mNumIters; + mResult.add(elapsed, mNumIters, pc); + if (0 != mBench.epochIterations()) { + mNumIters = mBench.epochIterations(); + } else { + mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters); + } + break; + + case State::endless: + mNumIters = (std::numeric_limits<uint64_t>::max)(); + break; + } + + if (static_cast<uint64_t>(mResult.size()) == mBench.epochs()) { + // we got all the results that we need, finish it + showResult(""); + mNumIters = 0; + } + + ANKERL_NANOBENCH_LOG(mBench.name() << ": " << detail::fmt::Number(20, 3, static_cast<double>(elapsed.count())) << " elapsed, " + << detail::fmt::Number(20, 3, static_cast<double>(mTargetRuntimePerEpoch.count())) + << " target. oldIters=" << oldIters << ", mNumIters=" << mNumIters + << ", mState=" << static_cast<int>(mState)); + } + + void showResult(std::string const& errorMessage) const { + ANKERL_NANOBENCH_LOG(errorMessage); + + if (mBench.output() != nullptr) { + // prepare column data /////// + std::vector<fmt::MarkDownColumn> columns; + + auto rMedian = mResult.median(Result::Measure::elapsed); + + if (mBench.relative()) { + double d = 100.0; + if (!mBench.results().empty()) { + d = rMedian <= 0.0 ? 0.0 : mBench.results().front().median(Result::Measure::elapsed) / rMedian * 100.0; + } + columns.emplace_back(11, 1, "relative", "%", d); + } + + if (mBench.complexityN() > 0) { + columns.emplace_back(14, 0, "complexityN", "", mBench.complexityN()); + } + + columns.emplace_back(22, 2, "ns/" + mBench.unit(), "", 1e9 * rMedian / mBench.batch()); + columns.emplace_back(22, 2, mBench.unit() + "/s", "", rMedian <= 0.0 ? 0.0 : mBench.batch() / rMedian); + + double rErrorMedian = mResult.medianAbsolutePercentError(Result::Measure::elapsed); + columns.emplace_back(10, 1, "err%", "%", rErrorMedian * 100.0); + + double rInsMedian = -1.0; + if (mResult.has(Result::Measure::instructions)) { + rInsMedian = mResult.median(Result::Measure::instructions); + columns.emplace_back(18, 2, "ins/" + mBench.unit(), "", rInsMedian / mBench.batch()); + } + + double rCycMedian = -1.0; + if (mResult.has(Result::Measure::cpucycles)) { + rCycMedian = mResult.median(Result::Measure::cpucycles); + columns.emplace_back(18, 2, "cyc/" + mBench.unit(), "", rCycMedian / mBench.batch()); + } + if (rInsMedian > 0.0 && rCycMedian > 0.0) { + columns.emplace_back(9, 3, "IPC", "", rCycMedian <= 0.0 ? 0.0 : rInsMedian / rCycMedian); + } + if (mResult.has(Result::Measure::branchinstructions)) { + double rBraMedian = mResult.median(Result::Measure::branchinstructions); + columns.emplace_back(17, 2, "bra/" + mBench.unit(), "", rBraMedian / mBench.batch()); + if (mResult.has(Result::Measure::branchmisses)) { + double p = 0.0; + if (rBraMedian >= 1e-9) { + p = 100.0 * mResult.median(Result::Measure::branchmisses) / rBraMedian; + } + columns.emplace_back(10, 1, "miss%", "%", p); + } + } + + columns.emplace_back(12, 2, "total", "", mResult.sum(Result::Measure::elapsed)); + + // write everything + auto& os = *mBench.output(); + + uint64_t hash = 0; + hash = hash_combine(fnv1a(mBench.unit()), hash); + hash = hash_combine(fnv1a(mBench.title()), hash); + hash = hash_combine(mBench.relative(), hash); + hash = hash_combine(mBench.performanceCounters(), hash); + + if (hash != singletonHeaderHash()) { + singletonHeaderHash() = hash; + + // no result yet, print header + os << std::endl; + for (auto const& col : columns) { + os << col.title(); + } + os << "| " << mBench.title() << std::endl; + + for (auto const& col : columns) { + os << col.separator(); + } + os << "|:" << std::string(mBench.title().size() + 1U, '-') << std::endl; + } + + if (!errorMessage.empty()) { + for (auto const& col : columns) { + os << col.invalid(); + } + os << "| :boom: " << fmt::MarkDownCode(mBench.name()) << " (" << errorMessage << ')' << std::endl; + } else { + for (auto const& col : columns) { + os << col.value(); + } + os << "| "; + auto showUnstable = rErrorMedian >= 0.05; + if (showUnstable) { + os << ":wavy_dash: "; + } + os << fmt::MarkDownCode(mBench.name()); + if (showUnstable) { + auto avgIters = static_cast<double>(mTotalNumIters) / static_cast<double>(mBench.epochs()); + // NOLINTNEXTLINE(bugprone-incorrect-roundings) + auto suggestedIters = static_cast<uint64_t>(avgIters * 10 + 0.5); + + os << " (Unstable with ~" << detail::fmt::Number(1, 1, avgIters) + << " iters. Increase `minEpochIterations` to e.g. " << suggestedIters << ")"; + } + os << std::endl; + } + } + } + + ANKERL_NANOBENCH(NODISCARD) bool isCloseEnoughForMeasurements(std::chrono::nanoseconds elapsed) const noexcept { + return elapsed * 3 >= mTargetRuntimePerEpoch * 2; + } + + uint64_t mNumIters = 1; + Bench const& mBench; + std::chrono::nanoseconds mTargetRuntimePerEpoch{}; + Result mResult; + Rng mRng{123}; + std::chrono::nanoseconds mTotalElapsed{}; + uint64_t mTotalNumIters = 0; + + State mState = State::upscaling_runtime; +}; +ANKERL_NANOBENCH(IGNORE_PADDED_POP) + +IterationLogic::IterationLogic(Bench const& bench) noexcept + : mPimpl(new Impl(bench)) {} + +IterationLogic::~IterationLogic() { + if (mPimpl) { + delete mPimpl; + } +} + +uint64_t IterationLogic::numIters() const noexcept { + ANKERL_NANOBENCH_LOG(mPimpl->mBench.name() << ": mNumIters=" << mPimpl->mNumIters); + return mPimpl->mNumIters; +} + +void IterationLogic::add(std::chrono::nanoseconds elapsed, PerformanceCounters const& pc) noexcept { + mPimpl->add(elapsed, pc); +} + +void IterationLogic::moveResultTo(std::vector<Result>& results) noexcept { + results.emplace_back(std::move(mPimpl->mResult)); +} + +# if ANKERL_NANOBENCH(PERF_COUNTERS) + +ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) +class LinuxPerformanceCounters { +public: + struct Target { + Target(uint64_t* targetValue_, bool correctMeasuringOverhead_, bool correctLoopOverhead_) + : targetValue(targetValue_) + , correctMeasuringOverhead(correctMeasuringOverhead_) + , correctLoopOverhead(correctLoopOverhead_) {} + + uint64_t* targetValue{}; + bool correctMeasuringOverhead{}; + bool correctLoopOverhead{}; + }; + + ~LinuxPerformanceCounters(); + + // quick operation + inline void start() {} + + inline void stop() {} + + bool monitor(perf_sw_ids swId, Target target); + bool monitor(perf_hw_id hwId, Target target); + + bool hasError() const noexcept { + return mHasError; + } + + // Just reading data is faster than enable & disabling. + // we subtract data ourselves. + inline void beginMeasure() { + if (mHasError) { + return; + } + + // NOLINTNEXTLINE(hicpp-signed-bitwise) + mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP); + if (mHasError) { + return; + } + + // NOLINTNEXTLINE(hicpp-signed-bitwise) + mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP); + } + + inline void endMeasure() { + if (mHasError) { + return; + } + + // NOLINTNEXTLINE(hicpp-signed-bitwise) + mHasError = (-1 == ioctl(mFd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP)); + if (mHasError) { + return; + } + + auto const numBytes = sizeof(uint64_t) * mCounters.size(); + auto ret = read(mFd, mCounters.data(), numBytes); + mHasError = ret != static_cast<ssize_t>(numBytes); + } + + void updateResults(uint64_t numIters); + + // rounded integer division + template <typename T> + static inline T divRounded(T a, T divisor) { + return (a + divisor / 2) / divisor; + } + + template <typename Op> + ANKERL_NANOBENCH_NO_SANITIZE("integer") + void calibrate(Op&& op) { + // clear current calibration data, + for (auto& v : mCalibratedOverhead) { + v = UINT64_C(0); + } + + // create new calibration data + auto newCalibration = mCalibratedOverhead; + for (auto& v : newCalibration) { + v = (std::numeric_limits<uint64_t>::max)(); + } + for (size_t iter = 0; iter < 100; ++iter) { + beginMeasure(); + op(); + endMeasure(); + if (mHasError) { + return; + } + + for (size_t i = 0; i < newCalibration.size(); ++i) { + auto diff = mCounters[i]; + if (newCalibration[i] > diff) { + newCalibration[i] = diff; + } + } + } + + mCalibratedOverhead = std::move(newCalibration); + + { + // calibrate loop overhead. For branches & instructions this makes sense, not so much for everything else like cycles. + // marsaglia's xorshift: mov, sal/shr, xor. Times 3. + // This has the nice property that the compiler doesn't seem to be able to optimize multiple calls any further. + // see https://godbolt.org/z/49RVQ5 + uint64_t const numIters = 100000U + (std::random_device{}() & 3); + uint64_t n = numIters; + uint32_t x = 1234567; + auto fn = [&]() { + x ^= x << 13; + x ^= x >> 17; + x ^= x << 5; + }; + + beginMeasure(); + while (n-- > 0) { + fn(); + } + endMeasure(); + detail::doNotOptimizeAway(x); + auto measure1 = mCounters; + + n = numIters; + beginMeasure(); + while (n-- > 0) { + // we now run *twice* so we can easily calculate the overhead + fn(); + fn(); + } + endMeasure(); + detail::doNotOptimizeAway(x); + auto measure2 = mCounters; + + for (size_t i = 0; i < mCounters.size(); ++i) { + // factor 2 because we have two instructions per loop + auto m1 = measure1[i] > mCalibratedOverhead[i] ? measure1[i] - mCalibratedOverhead[i] : 0; + auto m2 = measure2[i] > mCalibratedOverhead[i] ? measure2[i] - mCalibratedOverhead[i] : 0; + auto overhead = m1 * 2 > m2 ? m1 * 2 - m2 : 0; + + mLoopOverhead[i] = divRounded(overhead, numIters); + } + } + } + +private: + bool monitor(uint32_t type, uint64_t eventid, Target target); + + std::map<uint64_t, Target> mIdToTarget{}; + + // start with minimum size of 3 for read_format + std::vector<uint64_t> mCounters{3}; + std::vector<uint64_t> mCalibratedOverhead{3}; + std::vector<uint64_t> mLoopOverhead{3}; + + uint64_t mTimeEnabledNanos = 0; + uint64_t mTimeRunningNanos = 0; + int mFd = -1; + bool mHasError = false; +}; +ANKERL_NANOBENCH(IGNORE_PADDED_POP) + +LinuxPerformanceCounters::~LinuxPerformanceCounters() { + if (-1 != mFd) { + close(mFd); + } +} + +bool LinuxPerformanceCounters::monitor(perf_sw_ids swId, LinuxPerformanceCounters::Target target) { + return monitor(PERF_TYPE_SOFTWARE, swId, target); +} + +bool LinuxPerformanceCounters::monitor(perf_hw_id hwId, LinuxPerformanceCounters::Target target) { + return monitor(PERF_TYPE_HARDWARE, hwId, target); +} + +// overflow is ok, it's checked +ANKERL_NANOBENCH_NO_SANITIZE("integer") +void LinuxPerformanceCounters::updateResults(uint64_t numIters) { + // clear old data + for (auto& id_value : mIdToTarget) { + *id_value.second.targetValue = UINT64_C(0); + } + + if (mHasError) { + return; + } + + mTimeEnabledNanos = mCounters[1] - mCalibratedOverhead[1]; + mTimeRunningNanos = mCounters[2] - mCalibratedOverhead[2]; + + for (uint64_t i = 0; i < mCounters[0]; ++i) { + auto idx = static_cast<size_t>(3 + i * 2 + 0); + auto id = mCounters[idx + 1U]; + + auto it = mIdToTarget.find(id); + if (it != mIdToTarget.end()) { + + auto& tgt = it->second; + *tgt.targetValue = mCounters[idx]; + if (tgt.correctMeasuringOverhead) { + if (*tgt.targetValue >= mCalibratedOverhead[idx]) { + *tgt.targetValue -= mCalibratedOverhead[idx]; + } else { + *tgt.targetValue = 0U; + } + } + if (tgt.correctLoopOverhead) { + auto correctionVal = mLoopOverhead[idx] * numIters; + if (*tgt.targetValue >= correctionVal) { + *tgt.targetValue -= correctionVal; + } else { + *tgt.targetValue = 0U; + } + } + } + } +} + +bool LinuxPerformanceCounters::monitor(uint32_t type, uint64_t eventid, Target target) { + *target.targetValue = (std::numeric_limits<uint64_t>::max)(); + if (mHasError) { + return false; + } + + auto pea = perf_event_attr(); + std::memset(&pea, 0, sizeof(perf_event_attr)); + pea.type = type; + pea.size = sizeof(perf_event_attr); + pea.config = eventid; + pea.disabled = 1; // start counter as disabled + pea.exclude_kernel = 1; + pea.exclude_hv = 1; + + // NOLINTNEXTLINE(hicpp-signed-bitwise) + pea.read_format = PERF_FORMAT_GROUP | PERF_FORMAT_ID | PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING; + + const int pid = 0; // the current process + const int cpu = -1; // all CPUs +# if defined(PERF_FLAG_FD_CLOEXEC) // since Linux 3.14 + const unsigned long flags = PERF_FLAG_FD_CLOEXEC; +# else + const unsigned long flags = 0; +# endif + + auto fd = static_cast<int>(syscall(__NR_perf_event_open, &pea, pid, cpu, mFd, flags)); + if (-1 == fd) { + return false; + } + if (-1 == mFd) { + // first call: set to fd, and use this from now on + mFd = fd; + } + uint64_t id = 0; + // NOLINTNEXTLINE(hicpp-signed-bitwise) + if (-1 == ioctl(fd, PERF_EVENT_IOC_ID, &id)) { + // couldn't get id + return false; + } + + // insert into map, rely on the fact that map's references are constant. + mIdToTarget.emplace(id, target); + + // prepare readformat with the correct size (after the insert) + auto size = 3 + 2 * mIdToTarget.size(); + mCounters.resize(size); + mCalibratedOverhead.resize(size); + mLoopOverhead.resize(size); + + return true; +} + +PerformanceCounters::PerformanceCounters() + : mPc(new LinuxPerformanceCounters()) + , mVal() + , mHas() { + + mHas.pageFaults = mPc->monitor(PERF_COUNT_SW_PAGE_FAULTS, LinuxPerformanceCounters::Target(&mVal.pageFaults, true, false)); + mHas.cpuCycles = mPc->monitor(PERF_COUNT_HW_REF_CPU_CYCLES, LinuxPerformanceCounters::Target(&mVal.cpuCycles, true, false)); + mHas.contextSwitches = + mPc->monitor(PERF_COUNT_SW_CONTEXT_SWITCHES, LinuxPerformanceCounters::Target(&mVal.contextSwitches, true, false)); + mHas.instructions = mPc->monitor(PERF_COUNT_HW_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.instructions, true, true)); + mHas.branchInstructions = + mPc->monitor(PERF_COUNT_HW_BRANCH_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.branchInstructions, true, false)); + mHas.branchMisses = mPc->monitor(PERF_COUNT_HW_BRANCH_MISSES, LinuxPerformanceCounters::Target(&mVal.branchMisses, true, false)); + // mHas.branchMisses = false; + + mPc->start(); + mPc->calibrate([] { + auto before = ankerl::nanobench::Clock::now(); + auto after = ankerl::nanobench::Clock::now(); + (void)before; + (void)after; + }); + + if (mPc->hasError()) { + // something failed, don't monitor anything. + mHas = PerfCountSet<bool>{}; + } +} + +PerformanceCounters::~PerformanceCounters() { + if (nullptr != mPc) { + delete mPc; + } +} + +void PerformanceCounters::beginMeasure() { + mPc->beginMeasure(); +} + +void PerformanceCounters::endMeasure() { + mPc->endMeasure(); +} + +void PerformanceCounters::updateResults(uint64_t numIters) { + mPc->updateResults(numIters); +} + +# else + +PerformanceCounters::PerformanceCounters() = default; +PerformanceCounters::~PerformanceCounters() = default; +void PerformanceCounters::beginMeasure() {} +void PerformanceCounters::endMeasure() {} +void PerformanceCounters::updateResults(uint64_t) {} + +# endif + +ANKERL_NANOBENCH(NODISCARD) PerfCountSet<uint64_t> const& PerformanceCounters::val() const noexcept { + return mVal; +} +ANKERL_NANOBENCH(NODISCARD) PerfCountSet<bool> const& PerformanceCounters::has() const noexcept { + return mHas; +} + +// formatting utilities +namespace fmt { + +// adds thousands separator to numbers +NumSep::NumSep(char sep) + : mSep(sep) {} + +char NumSep::do_thousands_sep() const { + return mSep; +} + +std::string NumSep::do_grouping() const { + return "\003"; +} + +// RAII to save & restore a stream's state +StreamStateRestorer::StreamStateRestorer(std::ostream& s) + : mStream(s) + , mLocale(s.getloc()) + , mPrecision(s.precision()) + , mWidth(s.width()) + , mFill(s.fill()) + , mFmtFlags(s.flags()) {} + +StreamStateRestorer::~StreamStateRestorer() { + restore(); +} + +// sets back all stream info that we remembered at construction +void StreamStateRestorer::restore() { + mStream.imbue(mLocale); + mStream.precision(mPrecision); + mStream.width(mWidth); + mStream.fill(mFill); + mStream.flags(mFmtFlags); +} + +Number::Number(int width, int precision, int64_t value) + : mWidth(width) + , mPrecision(precision) + , mValue(static_cast<double>(value)) {} + +Number::Number(int width, int precision, double value) + : mWidth(width) + , mPrecision(precision) + , mValue(value) {} + +std::ostream& Number::write(std::ostream& os) const { + StreamStateRestorer restorer(os); + os.imbue(std::locale(os.getloc(), new NumSep(','))); + os << std::setw(mWidth) << std::setprecision(mPrecision) << std::fixed << mValue; + return os; +} + +std::string Number::to_s() const { + std::stringstream ss; + write(ss); + return ss.str(); +} + +std::string to_s(uint64_t n) { + std::string str; + do { + str += static_cast<char>('0' + static_cast<char>(n % 10)); + n /= 10; + } while (n != 0); + std::reverse(str.begin(), str.end()); + return str; +} + +std::ostream& operator<<(std::ostream& os, Number const& n) { + return n.write(os); +} + +MarkDownColumn::MarkDownColumn(int w, int prec, std::string const& tit, std::string const& suff, double val) + : mWidth(w) + , mPrecision(prec) + , mTitle(tit) + , mSuffix(suff) + , mValue(val) {} + +std::string MarkDownColumn::title() const { + std::stringstream ss; + ss << '|' << std::setw(mWidth - 2) << std::right << mTitle << ' '; + return ss.str(); +} + +std::string MarkDownColumn::separator() const { + std::string sep(static_cast<size_t>(mWidth), '-'); + sep.front() = '|'; + sep.back() = ':'; + return sep; +} + +std::string MarkDownColumn::invalid() const { + std::string sep(static_cast<size_t>(mWidth), ' '); + sep.front() = '|'; + sep[sep.size() - 2] = '-'; + return sep; +} + +std::string MarkDownColumn::value() const { + std::stringstream ss; + auto width = mWidth - 2 - static_cast<int>(mSuffix.size()); + ss << '|' << Number(width, mPrecision, mValue) << mSuffix << ' '; + return ss.str(); +} + +// Formats any text as markdown code, escaping backticks. +MarkDownCode::MarkDownCode(std::string const& what) { + mWhat.reserve(what.size() + 2); + mWhat.push_back('`'); + for (char c : what) { + mWhat.push_back(c); + if ('`' == c) { + mWhat.push_back('`'); + } + } + mWhat.push_back('`'); +} + +std::ostream& MarkDownCode::write(std::ostream& os) const { + return os << mWhat; +} + +std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode) { + return mdCode.write(os); +} +} // namespace fmt +} // namespace detail + +// provide implementation here so it's only generated once +Config::Config() = default; +Config::~Config() = default; +Config& Config::operator=(Config const&) = default; +Config& Config::operator=(Config&&) = default; +Config::Config(Config const&) = default; +Config::Config(Config&&) noexcept = default; + +// provide implementation here so it's only generated once +Result::~Result() = default; +Result& Result::operator=(Result const&) = default; +Result& Result::operator=(Result&&) = default; +Result::Result(Result const&) = default; +Result::Result(Result&&) noexcept = default; + +namespace detail { +template <typename T> +inline constexpr typename std::underlying_type<T>::type u(T val) noexcept { + return static_cast<typename std::underlying_type<T>::type>(val); +} +} // namespace detail + +// Result returned after a benchmark has finished. Can be used as a baseline for relative(). +Result::Result(Config const& benchmarkConfig) + : mConfig(benchmarkConfig) + , mNameToMeasurements{detail::u(Result::Measure::_size)} {} + +void Result::add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters const& pc) { + using detail::d; + using detail::u; + + double dIters = d(iters); + mNameToMeasurements[u(Result::Measure::iterations)].push_back(dIters); + + mNameToMeasurements[u(Result::Measure::elapsed)].push_back(d(totalElapsed) / dIters); + if (pc.has().pageFaults) { + mNameToMeasurements[u(Result::Measure::pagefaults)].push_back(d(pc.val().pageFaults) / dIters); + } + if (pc.has().cpuCycles) { + mNameToMeasurements[u(Result::Measure::cpucycles)].push_back(d(pc.val().cpuCycles) / dIters); + } + if (pc.has().contextSwitches) { + mNameToMeasurements[u(Result::Measure::contextswitches)].push_back(d(pc.val().contextSwitches) / dIters); + } + if (pc.has().instructions) { + mNameToMeasurements[u(Result::Measure::instructions)].push_back(d(pc.val().instructions) / dIters); + } + if (pc.has().branchInstructions) { + double branchInstructions = 0.0; + // correcting branches: remove branch introduced by the while (...) loop for each iteration. + if (pc.val().branchInstructions > iters + 1U) { + branchInstructions = d(pc.val().branchInstructions - (iters + 1U)); + } + mNameToMeasurements[u(Result::Measure::branchinstructions)].push_back(branchInstructions / dIters); + + if (pc.has().branchMisses) { + // correcting branch misses + double branchMisses = d(pc.val().branchMisses); + if (branchMisses > branchInstructions) { + // can't have branch misses when there were branches... + branchMisses = branchInstructions; + } + + // assuming at least one missed branch for the loop + branchMisses -= 1.0; + if (branchMisses < 1.0) { + branchMisses = 1.0; + } + mNameToMeasurements[u(Result::Measure::branchmisses)].push_back(branchMisses / dIters); + } + } +} + +Config const& Result::config() const noexcept { + return mConfig; +} + +inline double calcMedian(std::vector<double>& data) { + if (data.empty()) { + return 0.0; + } + std::sort(data.begin(), data.end()); + + auto midIdx = data.size() / 2U; + if (1U == (data.size() & 1U)) { + return data[midIdx]; + } + return (data[midIdx - 1U] + data[midIdx]) / 2U; +} + +double Result::median(Measure m) const { + // create a copy so we can sort + auto data = mNameToMeasurements[detail::u(m)]; + return calcMedian(data); +} + +double Result::average(Measure m) const { + using detail::d; + auto const& data = mNameToMeasurements[detail::u(m)]; + if (data.empty()) { + return 0.0; + } + + // create a copy so we can sort + return sum(m) / d(data.size()); +} + +double Result::medianAbsolutePercentError(Measure m) const { + // create copy + auto data = mNameToMeasurements[detail::u(m)]; + + // calculates MdAPE which is the median of percentage error + // see https://www.spiderfinancial.com/support/documentation/numxl/reference-manual/forecasting-performance/mdape + auto med = calcMedian(data); + + // transform the data to absolute error + for (auto& x : data) { + x = (x - med) / x; + if (x < 0) { + x = -x; + } + } + return calcMedian(data); +} + +double Result::sum(Measure m) const noexcept { + auto const& data = mNameToMeasurements[detail::u(m)]; + return std::accumulate(data.begin(), data.end(), 0.0); +} + +double Result::sumProduct(Measure m1, Measure m2) const noexcept { + auto const& data1 = mNameToMeasurements[detail::u(m1)]; + auto const& data2 = mNameToMeasurements[detail::u(m2)]; + + if (data1.size() != data2.size()) { + return 0.0; + } + + double result = 0.0; + for (size_t i = 0, s = data1.size(); i != s; ++i) { + result += data1[i] * data2[i]; + } + return result; +} + +bool Result::has(Measure m) const noexcept { + return !mNameToMeasurements[detail::u(m)].empty(); +} + +double Result::get(size_t idx, Measure m) const { + auto const& data = mNameToMeasurements[detail::u(m)]; + return data.at(idx); +} + +bool Result::empty() const noexcept { + return 0U == size(); +} + +size_t Result::size() const noexcept { + auto const& data = mNameToMeasurements[detail::u(Measure::elapsed)]; + return data.size(); +} + +double Result::minimum(Measure m) const noexcept { + auto const& data = mNameToMeasurements[detail::u(m)]; + if (data.empty()) { + return 0.0; + } + + // here its save to assume that at least one element is there + return *std::min_element(data.begin(), data.end()); +} + +double Result::maximum(Measure m) const noexcept { + auto const& data = mNameToMeasurements[detail::u(m)]; + if (data.empty()) { + return 0.0; + } + + // here its save to assume that at least one element is there + return *std::max_element(data.begin(), data.end()); +} + +Result::Measure Result::fromString(std::string const& str) { + if (str == "elapsed") { + return Measure::elapsed; + } else if (str == "iterations") { + return Measure::iterations; + } else if (str == "pagefaults") { + return Measure::pagefaults; + } else if (str == "cpucycles") { + return Measure::cpucycles; + } else if (str == "contextswitches") { + return Measure::contextswitches; + } else if (str == "instructions") { + return Measure::instructions; + } else if (str == "branchinstructions") { + return Measure::branchinstructions; + } else if (str == "branchmisses") { + return Measure::branchmisses; + } else { + // not found, return _size + return Measure::_size; + } +} + +// Configuration of a microbenchmark. +Bench::Bench() { + mConfig.mOut = &std::cout; +} + +Bench::Bench(Bench&&) = default; +Bench& Bench::operator=(Bench&&) = default; +Bench::Bench(Bench const&) = default; +Bench& Bench::operator=(Bench const&) = default; +Bench::~Bench() noexcept = default; + +double Bench::batch() const noexcept { + return mConfig.mBatch; +} + +double Bench::complexityN() const noexcept { + return mConfig.mComplexityN; +} + +// Set a baseline to compare it to. 100% it is exactly as fast as the baseline, >100% means it is faster than the baseline, <100% +// means it is slower than the baseline. +Bench& Bench::relative(bool isRelativeEnabled) noexcept { + mConfig.mIsRelative = isRelativeEnabled; + return *this; +} +bool Bench::relative() const noexcept { + return mConfig.mIsRelative; +} + +Bench& Bench::performanceCounters(bool showPerformanceCounters) noexcept { + mConfig.mShowPerformanceCounters = showPerformanceCounters; + return *this; +} +bool Bench::performanceCounters() const noexcept { + return mConfig.mShowPerformanceCounters; +} + +// Operation unit. Defaults to "op", could be e.g. "byte" for string processing. +// If u differs from currently set unit, the stored results will be cleared. +// Use singular (byte, not bytes). +Bench& Bench::unit(char const* u) { + if (u != mConfig.mUnit) { + mResults.clear(); + } + mConfig.mUnit = u; + return *this; +} + +Bench& Bench::unit(std::string const& u) { + return unit(u.c_str()); +} + +std::string const& Bench::unit() const noexcept { + return mConfig.mUnit; +} + +// If benchmarkTitle differs from currently set title, the stored results will be cleared. +Bench& Bench::title(const char* benchmarkTitle) { + if (benchmarkTitle != mConfig.mBenchmarkTitle) { + mResults.clear(); + } + mConfig.mBenchmarkTitle = benchmarkTitle; + return *this; +} +Bench& Bench::title(std::string const& benchmarkTitle) { + if (benchmarkTitle != mConfig.mBenchmarkTitle) { + mResults.clear(); + } + mConfig.mBenchmarkTitle = benchmarkTitle; + return *this; +} + +std::string const& Bench::title() const noexcept { + return mConfig.mBenchmarkTitle; +} + +Bench& Bench::name(const char* benchmarkName) { + mConfig.mBenchmarkName = benchmarkName; + return *this; +} + +Bench& Bench::name(std::string const& benchmarkName) { + mConfig.mBenchmarkName = benchmarkName; + return *this; +} + +std::string const& Bench::name() const noexcept { + return mConfig.mBenchmarkName; +} + +// Number of epochs to evaluate. The reported result will be the median of evaluation of each epoch. +Bench& Bench::epochs(size_t numEpochs) noexcept { + mConfig.mNumEpochs = numEpochs; + return *this; +} +size_t Bench::epochs() const noexcept { + return mConfig.mNumEpochs; +} + +// Desired evaluation time is a multiple of clock resolution. Default is to be 1000 times above this measurement precision. +Bench& Bench::clockResolutionMultiple(size_t multiple) noexcept { + mConfig.mClockResolutionMultiple = multiple; + return *this; +} +size_t Bench::clockResolutionMultiple() const noexcept { + return mConfig.mClockResolutionMultiple; +} + +// Sets the maximum time each epoch should take. Default is 100ms. +Bench& Bench::maxEpochTime(std::chrono::nanoseconds t) noexcept { + mConfig.mMaxEpochTime = t; + return *this; +} +std::chrono::nanoseconds Bench::maxEpochTime() const noexcept { + return mConfig.mMaxEpochTime; +} + +// Sets the maximum time each epoch should take. Default is 100ms. +Bench& Bench::minEpochTime(std::chrono::nanoseconds t) noexcept { + mConfig.mMinEpochTime = t; + return *this; +} +std::chrono::nanoseconds Bench::minEpochTime() const noexcept { + return mConfig.mMinEpochTime; +} + +Bench& Bench::minEpochIterations(uint64_t numIters) noexcept { + mConfig.mMinEpochIterations = (numIters == 0) ? 1 : numIters; + return *this; +} +uint64_t Bench::minEpochIterations() const noexcept { + return mConfig.mMinEpochIterations; +} + +Bench& Bench::epochIterations(uint64_t numIters) noexcept { + mConfig.mEpochIterations = numIters; + return *this; +} +uint64_t Bench::epochIterations() const noexcept { + return mConfig.mEpochIterations; +} + +Bench& Bench::warmup(uint64_t numWarmupIters) noexcept { + mConfig.mWarmup = numWarmupIters; + return *this; +} +uint64_t Bench::warmup() const noexcept { + return mConfig.mWarmup; +} + +Bench& Bench::config(Config const& benchmarkConfig) { + mConfig = benchmarkConfig; + return *this; +} +Config const& Bench::config() const noexcept { + return mConfig; +} + +Bench& Bench::output(std::ostream* outstream) noexcept { + mConfig.mOut = outstream; + return *this; +} + +ANKERL_NANOBENCH(NODISCARD) std::ostream* Bench::output() const noexcept { + return mConfig.mOut; +} + +std::vector<Result> const& Bench::results() const noexcept { + return mResults; +} + +Bench& Bench::render(char const* templateContent, std::ostream& os) { + ::ankerl::nanobench::render(templateContent, *this, os); + return *this; +} + +std::vector<BigO> Bench::complexityBigO() const { + std::vector<BigO> bigOs; + auto rangeMeasure = BigO::collectRangeMeasure(mResults); + bigOs.emplace_back("O(1)", rangeMeasure, [](double) { + return 1.0; + }); + bigOs.emplace_back("O(n)", rangeMeasure, [](double n) { + return n; + }); + bigOs.emplace_back("O(log n)", rangeMeasure, [](double n) { + return std::log2(n); + }); + bigOs.emplace_back("O(n log n)", rangeMeasure, [](double n) { + return n * std::log2(n); + }); + bigOs.emplace_back("O(n^2)", rangeMeasure, [](double n) { + return n * n; + }); + bigOs.emplace_back("O(n^3)", rangeMeasure, [](double n) { + return n * n * n; + }); + std::sort(bigOs.begin(), bigOs.end()); + return bigOs; +} + +Rng::Rng() + : mX(0) + , mY(0) { + std::random_device rd; + std::uniform_int_distribution<uint64_t> dist; + do { + mX = dist(rd); + mY = dist(rd); + } while (mX == 0 && mY == 0); +} + +ANKERL_NANOBENCH_NO_SANITIZE("integer") +uint64_t splitMix64(uint64_t& state) noexcept { + uint64_t z = (state += UINT64_C(0x9e3779b97f4a7c15)); + z = (z ^ (z >> 30U)) * UINT64_C(0xbf58476d1ce4e5b9); + z = (z ^ (z >> 27U)) * UINT64_C(0x94d049bb133111eb); + return z ^ (z >> 31U); +} + +// Seeded as described in romu paper (update april 2020) +Rng::Rng(uint64_t seed) noexcept + : mX(splitMix64(seed)) + , mY(splitMix64(seed)) { + for (size_t i = 0; i < 10; ++i) { + operator()(); + } +} + +// only internally used to copy the RNG. +Rng::Rng(uint64_t x, uint64_t y) noexcept + : mX(x) + , mY(y) {} + +Rng Rng::copy() const noexcept { + return Rng{mX, mY}; +} + +BigO::RangeMeasure BigO::collectRangeMeasure(std::vector<Result> const& results) { + BigO::RangeMeasure rangeMeasure; + for (auto const& result : results) { + if (result.config().mComplexityN > 0.0) { + rangeMeasure.emplace_back(result.config().mComplexityN, result.median(Result::Measure::elapsed)); + } + } + return rangeMeasure; +} + +BigO::BigO(std::string const& bigOName, RangeMeasure const& rangeMeasure) + : mName(bigOName) { + + // estimate the constant factor + double sumRangeMeasure = 0.0; + double sumRangeRange = 0.0; + + for (size_t i = 0; i < rangeMeasure.size(); ++i) { + sumRangeMeasure += rangeMeasure[i].first * rangeMeasure[i].second; + sumRangeRange += rangeMeasure[i].first * rangeMeasure[i].first; + } + mConstant = sumRangeMeasure / sumRangeRange; + + // calculate root mean square + double err = 0.0; + double sumMeasure = 0.0; + for (size_t i = 0; i < rangeMeasure.size(); ++i) { + auto diff = mConstant * rangeMeasure[i].first - rangeMeasure[i].second; + err += diff * diff; + + sumMeasure += rangeMeasure[i].second; + } + + auto n = static_cast<double>(rangeMeasure.size()); + auto mean = sumMeasure / n; + mNormalizedRootMeanSquare = std::sqrt(err / n) / mean; +} + +BigO::BigO(const char* bigOName, RangeMeasure const& rangeMeasure) + : BigO(std::string(bigOName), rangeMeasure) {} + +std::string const& BigO::name() const noexcept { + return mName; +} + +double BigO::constant() const noexcept { + return mConstant; +} + +double BigO::normalizedRootMeanSquare() const noexcept { + return mNormalizedRootMeanSquare; +} + +bool BigO::operator<(BigO const& other) const noexcept { + return std::tie(mNormalizedRootMeanSquare, mName) < std::tie(other.mNormalizedRootMeanSquare, other.mName); +} + +std::ostream& operator<<(std::ostream& os, BigO const& bigO) { + return os << bigO.constant() << " * " << bigO.name() << ", rms=" << bigO.normalizedRootMeanSquare(); +} + +std::ostream& operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO> const& bigOs) { + detail::fmt::StreamStateRestorer restorer(os); + os << std::endl << "| coefficient | err% | complexity" << std::endl << "|--------------:|-------:|------------" << std::endl; + for (auto const& bigO : bigOs) { + os << "|" << std::setw(14) << std::setprecision(7) << std::scientific << bigO.constant() << " "; + os << "|" << detail::fmt::Number(6, 1, bigO.normalizedRootMeanSquare() * 100.0) << "% "; + os << "| " << bigO.name(); + os << std::endl; + } + return os; +} + +} // namespace nanobench +} // namespace ankerl + +#endif // ANKERL_NANOBENCH_IMPLEMENT +#endif // ANKERL_NANOBENCH_H_INCLUDED diff --git a/src/bench/poly1305.cpp b/src/bench/poly1305.cpp index 02e5fecc0d..d8db99e7d4 100644 --- a/src/bench/poly1305.cpp +++ b/src/bench/poly1305.cpp @@ -11,30 +11,31 @@ static constexpr uint64_t BUFFER_SIZE_TINY = 64; static constexpr uint64_t BUFFER_SIZE_SMALL = 256; static constexpr uint64_t BUFFER_SIZE_LARGE = 1024*1024; -static void POLY1305(benchmark::State& state, size_t buffersize) +static void POLY1305(benchmark::Bench& bench, size_t buffersize) { std::vector<unsigned char> tag(POLY1305_TAGLEN, 0); std::vector<unsigned char> key(POLY1305_KEYLEN, 0); std::vector<unsigned char> in(buffersize, 0); - while (state.KeepRunning()) + bench.batch(in.size()).unit("byte").run([&] { poly1305_auth(tag.data(), in.data(), in.size(), key.data()); + }); } -static void POLY1305_64BYTES(benchmark::State& state) +static void POLY1305_64BYTES(benchmark::Bench& bench) { - POLY1305(state, BUFFER_SIZE_TINY); + POLY1305(bench, BUFFER_SIZE_TINY); } -static void POLY1305_256BYTES(benchmark::State& state) +static void POLY1305_256BYTES(benchmark::Bench& bench) { - POLY1305(state, BUFFER_SIZE_SMALL); + POLY1305(bench, BUFFER_SIZE_SMALL); } -static void POLY1305_1MB(benchmark::State& state) +static void POLY1305_1MB(benchmark::Bench& bench) { - POLY1305(state, BUFFER_SIZE_LARGE); + POLY1305(bench, BUFFER_SIZE_LARGE); } -BENCHMARK(POLY1305_64BYTES, 500000); -BENCHMARK(POLY1305_256BYTES, 250000); -BENCHMARK(POLY1305_1MB, 340); +BENCHMARK(POLY1305_64BYTES); +BENCHMARK(POLY1305_256BYTES); +BENCHMARK(POLY1305_1MB); diff --git a/src/bench/prevector.cpp b/src/bench/prevector.cpp index 42b351a72d..a2dbefa54a 100644 --- a/src/bench/prevector.cpp +++ b/src/bench/prevector.cpp @@ -30,51 +30,44 @@ static_assert(IS_TRIVIALLY_CONSTRUCTIBLE<trivial_t>::value, "expected trivial_t to be trivially constructible"); template <typename T> -static void PrevectorDestructor(benchmark::State& state) +static void PrevectorDestructor(benchmark::Bench& bench) { - while (state.KeepRunning()) { - for (auto x = 0; x < 1000; ++x) { - prevector<28, T> t0; - prevector<28, T> t1; - t0.resize(28); - t1.resize(29); - } - } + bench.batch(2).run([&] { + prevector<28, T> t0; + prevector<28, T> t1; + t0.resize(28); + t1.resize(29); + }); } template <typename T> -static void PrevectorClear(benchmark::State& state) +static void PrevectorClear(benchmark::Bench& bench) { - - while (state.KeepRunning()) { - for (auto x = 0; x < 1000; ++x) { - prevector<28, T> t0; - prevector<28, T> t1; - t0.resize(28); - t0.clear(); - t1.resize(29); - t1.clear(); - } - } + prevector<28, T> t0; + prevector<28, T> t1; + bench.batch(2).run([&] { + t0.resize(28); + t0.clear(); + t1.resize(29); + t1.clear(); + }); } template <typename T> -static void PrevectorResize(benchmark::State& state) +static void PrevectorResize(benchmark::Bench& bench) { - while (state.KeepRunning()) { - prevector<28, T> t0; - prevector<28, T> t1; - for (auto x = 0; x < 1000; ++x) { - t0.resize(28); - t0.resize(0); - t1.resize(29); - t1.resize(0); - } - } + prevector<28, T> t0; + prevector<28, T> t1; + bench.batch(4).run([&] { + t0.resize(28); + t0.resize(0); + t1.resize(29); + t1.resize(0); + }); } template <typename T> -static void PrevectorDeserialize(benchmark::State& state) +static void PrevectorDeserialize(benchmark::Bench& bench) { CDataStream s0(SER_NETWORK, 0); prevector<28, T> t0; @@ -86,26 +79,28 @@ static void PrevectorDeserialize(benchmark::State& state) for (auto x = 0; x < 101; ++x) { s0 << t0; } - while (state.KeepRunning()) { + bench.batch(1000).run([&] { prevector<28, T> t1; for (auto x = 0; x < 1000; ++x) { s0 >> t1; } s0.Init(SER_NETWORK, 0); - } + }); } -#define PREVECTOR_TEST(name, nontrivops, trivops) \ - static void Prevector ## name ## Nontrivial(benchmark::State& state) { \ - Prevector ## name<nontrivial_t>(state); \ - } \ - BENCHMARK(Prevector ## name ## Nontrivial, nontrivops); \ - static void Prevector ## name ## Trivial(benchmark::State& state) { \ - Prevector ## name<trivial_t>(state); \ - } \ - BENCHMARK(Prevector ## name ## Trivial, trivops); +#define PREVECTOR_TEST(name) \ + static void Prevector##name##Nontrivial(benchmark::Bench& bench) \ + { \ + Prevector##name<nontrivial_t>(bench); \ + } \ + BENCHMARK(Prevector##name##Nontrivial); \ + static void Prevector##name##Trivial(benchmark::Bench& bench) \ + { \ + Prevector##name<trivial_t>(bench); \ + } \ + BENCHMARK(Prevector##name##Trivial); -PREVECTOR_TEST(Clear, 28300, 88600) -PREVECTOR_TEST(Destructor, 28800, 88900) -PREVECTOR_TEST(Resize, 28900, 90300) -PREVECTOR_TEST(Deserialize, 6800, 52000) +PREVECTOR_TEST(Clear) +PREVECTOR_TEST(Destructor) +PREVECTOR_TEST(Resize) +PREVECTOR_TEST(Deserialize) diff --git a/src/bench/rollingbloom.cpp b/src/bench/rollingbloom.cpp index 6cdb4ff0a7..9b43951e6e 100644 --- a/src/bench/rollingbloom.cpp +++ b/src/bench/rollingbloom.cpp @@ -6,12 +6,12 @@ #include <bench/bench.h> #include <bloom.h> -static void RollingBloom(benchmark::State& state) +static void RollingBloom(benchmark::Bench& bench) { CRollingBloomFilter filter(120000, 0.000001); std::vector<unsigned char> data(32); uint32_t count = 0; - while (state.KeepRunning()) { + bench.run([&] { count++; data[0] = count; data[1] = count >> 8; @@ -24,16 +24,16 @@ static void RollingBloom(benchmark::State& state) data[2] = count >> 8; data[3] = count; filter.contains(data); - } + }); } -static void RollingBloomReset(benchmark::State& state) +static void RollingBloomReset(benchmark::Bench& bench) { CRollingBloomFilter filter(120000, 0.000001); - while (state.KeepRunning()) { + bench.run([&] { filter.reset(); - } + }); } -BENCHMARK(RollingBloom, 1500 * 1000); -BENCHMARK(RollingBloomReset, 20000); +BENCHMARK(RollingBloom); +BENCHMARK(RollingBloomReset); diff --git a/src/bench/rpc_blockchain.cpp b/src/bench/rpc_blockchain.cpp index 511573abac..4b45264a3c 100644 --- a/src/bench/rpc_blockchain.cpp +++ b/src/bench/rpc_blockchain.cpp @@ -11,7 +11,8 @@ #include <univalue.h> -static void BlockToJsonVerbose(benchmark::State& state) { +static void BlockToJsonVerbose(benchmark::Bench& bench) +{ CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION); char a = '\0'; stream.write(&a, 1); // Prevent compaction @@ -24,9 +25,9 @@ static void BlockToJsonVerbose(benchmark::State& state) { blockindex.phashBlock = &blockHash; blockindex.nBits = 403014710; - while (state.KeepRunning()) { + bench.run([&] { (void)blockToJSON(block, &blockindex, &blockindex, /*verbose*/ true); - } + }); } -BENCHMARK(BlockToJsonVerbose, 10); +BENCHMARK(BlockToJsonVerbose); diff --git a/src/bench/rpc_mempool.cpp b/src/bench/rpc_mempool.cpp index bf63cccf09..1ff41765cf 100644 --- a/src/bench/rpc_mempool.cpp +++ b/src/bench/rpc_mempool.cpp @@ -15,7 +15,7 @@ static void AddTx(const CTransactionRef& tx, const CAmount& fee, CTxMemPool& poo pool.addUnchecked(CTxMemPoolEntry(tx, fee, /* time */ 0, /* height */ 1, /* spendsCoinbase */ false, /* sigOpCost */ 4, lp)); } -static void RpcMempool(benchmark::State& state) +static void RpcMempool(benchmark::Bench& bench) { CTxMemPool pool; LOCK2(cs_main, pool.cs); @@ -32,9 +32,9 @@ static void RpcMempool(benchmark::State& state) AddTx(tx_r, /* fee */ i, pool); } - while (state.KeepRunning()) { + bench.run([&] { (void)MempoolToJSON(pool, /*verbose*/ true); - } + }); } -BENCHMARK(RpcMempool, 40); +BENCHMARK(RpcMempool); diff --git a/src/bench/util_time.cpp b/src/bench/util_time.cpp index 72d97354aa..fad179eb87 100644 --- a/src/bench/util_time.cpp +++ b/src/bench/util_time.cpp @@ -6,37 +6,37 @@ #include <util/time.h> -static void BenchTimeDeprecated(benchmark::State& state) +static void BenchTimeDeprecated(benchmark::Bench& bench) { - while (state.KeepRunning()) { + bench.run([&] { (void)GetTime(); - } + }); } -static void BenchTimeMock(benchmark::State& state) +static void BenchTimeMock(benchmark::Bench& bench) { SetMockTime(111); - while (state.KeepRunning()) { + bench.run([&] { (void)GetTime<std::chrono::seconds>(); - } + }); SetMockTime(0); } -static void BenchTimeMillis(benchmark::State& state) +static void BenchTimeMillis(benchmark::Bench& bench) { - while (state.KeepRunning()) { + bench.run([&] { (void)GetTime<std::chrono::milliseconds>(); - } + }); } -static void BenchTimeMillisSys(benchmark::State& state) +static void BenchTimeMillisSys(benchmark::Bench& bench) { - while (state.KeepRunning()) { + bench.run([&] { (void)GetTimeMillis(); - } + }); } -BENCHMARK(BenchTimeDeprecated, 100000000); -BENCHMARK(BenchTimeMillis, 6000000); -BENCHMARK(BenchTimeMillisSys, 6000000); -BENCHMARK(BenchTimeMock, 300000000); +BENCHMARK(BenchTimeDeprecated); +BENCHMARK(BenchTimeMillis); +BENCHMARK(BenchTimeMillisSys); +BENCHMARK(BenchTimeMock); diff --git a/src/bench/verify_script.cpp b/src/bench/verify_script.cpp index 14bca5f7d1..9af0b502eb 100644 --- a/src/bench/verify_script.cpp +++ b/src/bench/verify_script.cpp @@ -16,7 +16,7 @@ // Microbenchmark for verification of a basic P2WPKH script. Can be easily // modified to measure performance of other types of scripts. -static void VerifyScriptBench(benchmark::State& state) +static void VerifyScriptBench(benchmark::Bench& bench) { const ECCVerifyHandle verify_handle; ECC_Start(); @@ -34,7 +34,7 @@ static void VerifyScriptBench(benchmark::State& state) key.Set(vchKey.begin(), vchKey.end(), false); CPubKey pubkey = key.GetPubKey(); uint160 pubkeyHash; - CHash160().Write(pubkey.begin(), pubkey.size()).Finalize(pubkeyHash.begin()); + CHash160().Write(pubkey).Finalize(pubkeyHash); // Script. CScript scriptPubKey = CScript() << witnessversion << ToByteVector(pubkeyHash); @@ -49,7 +49,7 @@ static void VerifyScriptBench(benchmark::State& state) witness.stack.push_back(ToByteVector(pubkey)); // Benchmark. - while (state.KeepRunning()) { + bench.run([&] { ScriptError err; bool success = VerifyScript( txSpend.vin[0].scriptSig, @@ -71,11 +71,12 @@ static void VerifyScriptBench(benchmark::State& state) (const unsigned char*)stream.data(), stream.size(), 0, flags, nullptr); assert(csuccess == 1); #endif - } + }); ECC_Stop(); } -static void VerifyNestedIfScript(benchmark::State& state) { +static void VerifyNestedIfScript(benchmark::Bench& bench) +{ std::vector<std::vector<unsigned char>> stack; CScript script; for (int i = 0; i < 100; ++i) { @@ -87,15 +88,13 @@ static void VerifyNestedIfScript(benchmark::State& state) { for (int i = 0; i < 100; ++i) { script << OP_ENDIF; } - while (state.KeepRunning()) { + bench.run([&] { auto stack_copy = stack; ScriptError error; bool ret = EvalScript(stack_copy, script, 0, BaseSignatureChecker(), SigVersion::BASE, &error); assert(ret); - } + }); } - -BENCHMARK(VerifyScriptBench, 6300); - -BENCHMARK(VerifyNestedIfScript, 100); +BENCHMARK(VerifyScriptBench); +BENCHMARK(VerifyNestedIfScript); diff --git a/src/bench/wallet_balance.cpp b/src/bench/wallet_balance.cpp index 05cfb3438e..e16182b48e 100644 --- a/src/bench/wallet_balance.cpp +++ b/src/bench/wallet_balance.cpp @@ -12,7 +12,7 @@ #include <validationinterface.h> #include <wallet/wallet.h> -static void WalletBalance(benchmark::State& state, const bool set_dirty, const bool add_watchonly, const bool add_mine) +static void WalletBalance(benchmark::Bench& bench, const bool set_dirty, const bool add_watchonly, const bool add_mine) { TestingSetup test_setup{ CBaseChainParams::REGTEST, @@ -45,20 +45,20 @@ static void WalletBalance(benchmark::State& state, const bool set_dirty, const b auto bal = wallet.GetBalance(); // Cache - while (state.KeepRunning()) { + bench.run([&] { if (set_dirty) wallet.MarkDirty(); bal = wallet.GetBalance(); if (add_mine) assert(bal.m_mine_trusted > 0); if (add_watchonly) assert(bal.m_watchonly_trusted > 0); - } + }); } -static void WalletBalanceDirty(benchmark::State& state) { WalletBalance(state, /* set_dirty */ true, /* add_watchonly */ true, /* add_mine */ true); } -static void WalletBalanceClean(benchmark::State& state) { WalletBalance(state, /* set_dirty */ false, /* add_watchonly */ true, /* add_mine */ true); } -static void WalletBalanceMine(benchmark::State& state) { WalletBalance(state, /* set_dirty */ false, /* add_watchonly */ false, /* add_mine */ true); } -static void WalletBalanceWatch(benchmark::State& state) { WalletBalance(state, /* set_dirty */ false, /* add_watchonly */ true, /* add_mine */ false); } +static void WalletBalanceDirty(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ true, /* add_watchonly */ true, /* add_mine */ true); } +static void WalletBalanceClean(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ false, /* add_watchonly */ true, /* add_mine */ true); } +static void WalletBalanceMine(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ false, /* add_watchonly */ false, /* add_mine */ true); } +static void WalletBalanceWatch(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ false, /* add_watchonly */ true, /* add_mine */ false); } -BENCHMARK(WalletBalanceDirty, 2500); -BENCHMARK(WalletBalanceClean, 8000); -BENCHMARK(WalletBalanceMine, 16000); -BENCHMARK(WalletBalanceWatch, 8000); +BENCHMARK(WalletBalanceDirty); +BENCHMARK(WalletBalanceClean); +BENCHMARK(WalletBalanceMine); +BENCHMARK(WalletBalanceWatch); diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index f5125f22db..cf52b710cb 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -43,32 +43,32 @@ static const int CONTINUE_EXECUTION=-1; /** Default number of blocks to generate for RPC generatetoaddress. */ static const std::string DEFAULT_NBLOCKS = "1"; -static void SetupCliArgs() +static void SetupCliArgs(ArgsManager& argsman) { - SetupHelpOptions(gArgs); + SetupHelpOptions(argsman); const auto defaultBaseParams = CreateBaseChainParams(CBaseChainParams::MAIN); const auto testnetBaseParams = CreateBaseChainParams(CBaseChainParams::TESTNET); const auto regtestBaseParams = CreateBaseChainParams(CBaseChainParams::REGTEST); - gArgs.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-conf=<file>", strprintf("Specify configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-generate", strprintf("Generate blocks immediately, equivalent to RPC generatenewaddress followed by RPC generatetoaddress. Optional positional integer arguments are number of blocks to generate (default: %s) and maximum iterations to try (default: %s), equivalent to RPC generatetoaddress nblocks and maxtries arguments. Example: bitcoin-cli -generate 4 1000", DEFAULT_NBLOCKS, DEFAULT_MAX_TRIES), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-getinfo", "Get general information from the remote server. Note that unlike server-side RPC calls, the results of -getinfo is the result of multiple non-atomic requests. Some entries in the result may represent results from different states (e.g. wallet balance may be as of a different block from the chain state reported)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - SetupChainParamsBaseOptions(); - gArgs.AddArg("-named", strprintf("Pass named instead of positional arguments (default: %s)", DEFAULT_NAMED), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-rpcclienttimeout=<n>", strprintf("Timeout in seconds during HTTP requests, or 0 for no timeout. (default: %d)", DEFAULT_HTTP_CLIENT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-rpcconnect=<ip>", strprintf("Send commands to node running on <ip> (default: %s)", DEFAULT_RPCCONNECT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-rpcport=<port>", strprintf("Connect to JSON-RPC on <port> (default: %u, testnet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::OPTIONS); - gArgs.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-rpcwait", "Wait for RPC server to start", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-rpcwallet=<walletname>", "Send RPC for non-default wallet on RPC server (needs to exactly match corresponding -wallet option passed to bitcoind). This changes the RPC endpoint used, e.g. http://127.0.0.1:8332/wallet/<walletname>", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-stdin", "Read extra arguments from standard input, one per line until EOF/Ctrl-D (recommended for sensitive information such as passphrases). When combined with -stdinrpcpass, the first line from standard input is used for the RPC password.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-stdinrpcpass", "Read RPC password from standard input as a single line. When combined with -stdin, the first line from standard input is used for the RPC password. When combined with -stdinwalletpassphrase, -stdinrpcpass consumes the first line, and -stdinwalletpassphrase consumes the second.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-stdinwalletpassphrase", "Read wallet passphrase from standard input as a single line. When combined with -stdin, the first line from standard input is used for the wallet passphrase.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-conf=<file>", strprintf("Specify configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-generate", strprintf("Generate blocks immediately, equivalent to RPC generatenewaddress followed by RPC generatetoaddress. Optional positional integer arguments are number of blocks to generate (default: %s) and maximum iterations to try (default: %s), equivalent to RPC generatetoaddress nblocks and maxtries arguments. Example: bitcoin-cli -generate 4 1000", DEFAULT_NBLOCKS, DEFAULT_MAX_TRIES), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-getinfo", "Get general information from the remote server. Note that unlike server-side RPC calls, the results of -getinfo is the result of multiple non-atomic requests. Some entries in the result may represent results from different states (e.g. wallet balance may be as of a different block from the chain state reported)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + SetupChainParamsBaseOptions(argsman); + argsman.AddArg("-named", strprintf("Pass named instead of positional arguments (default: %s)", DEFAULT_NAMED), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-rpcclienttimeout=<n>", strprintf("Timeout in seconds during HTTP requests, or 0 for no timeout. (default: %d)", DEFAULT_HTTP_CLIENT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-rpcconnect=<ip>", strprintf("Send commands to node running on <ip> (default: %s)", DEFAULT_RPCCONNECT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-rpcport=<port>", strprintf("Connect to JSON-RPC on <port> (default: %u, testnet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::OPTIONS); + argsman.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-rpcwait", "Wait for RPC server to start", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-rpcwallet=<walletname>", "Send RPC for non-default wallet on RPC server (needs to exactly match corresponding -wallet option passed to bitcoind). This changes the RPC endpoint used, e.g. http://127.0.0.1:8332/wallet/<walletname>", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-stdin", "Read extra arguments from standard input, one per line until EOF/Ctrl-D (recommended for sensitive information such as passphrases). When combined with -stdinrpcpass, the first line from standard input is used for the RPC password.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-stdinrpcpass", "Read RPC password from standard input as a single line. When combined with -stdin, the first line from standard input is used for the RPC password. When combined with -stdinwalletpassphrase, -stdinrpcpass consumes the first line, and -stdinwalletpassphrase consumes the second.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-stdinwalletpassphrase", "Read wallet passphrase from standard input as a single line. When combined with -stdin, the first line from standard input is used for the wallet passphrase.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); } /** libevent event log callback */ @@ -111,7 +111,7 @@ static int AppInitRPC(int argc, char* argv[]) // // Parameters // - SetupCliArgs(); + SetupCliArgs(gArgs); std::string error; if (!gArgs.ParseParameters(argc, argv, error)) { tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error); @@ -516,8 +516,8 @@ static void ParseError(const UniValue& error, std::string& strPrint, int& nRet) */ static void GetWalletBalances(UniValue& result) { - std::unique_ptr<BaseRequestHandler> rh{MakeUnique<DefaultRequestHandler>()}; - const UniValue listwallets = ConnectAndCallRPC(rh.get(), "listwallets", /* args=*/{}); + DefaultRequestHandler rh; + const UniValue listwallets = ConnectAndCallRPC(&rh, "listwallets", /* args=*/{}); if (!find_value(listwallets, "error").isNull()) return; const UniValue& wallets = find_value(listwallets, "result"); if (wallets.size() <= 1) return; @@ -525,7 +525,7 @@ static void GetWalletBalances(UniValue& result) UniValue balances(UniValue::VOBJ); for (const UniValue& wallet : wallets.getValues()) { const std::string wallet_name = wallet.get_str(); - const UniValue getbalances = ConnectAndCallRPC(rh.get(), "getbalances", /* args=*/{}, wallet_name); + const UniValue getbalances = ConnectAndCallRPC(&rh, "getbalances", /* args=*/{}, wallet_name); const UniValue& balance = find_value(getbalances, "result")["mine"]["trusted"]; balances.pushKV(wallet_name, balance); } @@ -540,8 +540,8 @@ static UniValue GetNewAddress() { Optional<std::string> wallet_name{}; if (gArgs.IsArgSet("-rpcwallet")) wallet_name = gArgs.GetArg("-rpcwallet", ""); - std::unique_ptr<BaseRequestHandler> rh{MakeUnique<DefaultRequestHandler>()}; - return ConnectAndCallRPC(rh.get(), "getnewaddress", /* args=*/{}, wallet_name); + DefaultRequestHandler rh; + return ConnectAndCallRPC(&rh, "getnewaddress", /* args=*/{}, wallet_name); } /** diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp index f54a299a36..56afcb6ded 100644 --- a/src/bitcoin-tx.cpp +++ b/src/bitcoin-tx.cpp @@ -36,40 +36,40 @@ static const int CONTINUE_EXECUTION=-1; const std::function<std::string(const char*)> G_TRANSLATION_FUN = nullptr; -static void SetupBitcoinTxArgs() +static void SetupBitcoinTxArgs(ArgsManager &argsman) { - SetupHelpOptions(gArgs); - - gArgs.AddArg("-create", "Create new, empty TX.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-json", "Select JSON output", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-txid", "Output only the hex-encoded transaction id of the resultant transaction.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - SetupChainParamsBaseOptions(); - - gArgs.AddArg("delin=N", "Delete input N from TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - gArgs.AddArg("delout=N", "Delete output N from TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - gArgs.AddArg("in=TXID:VOUT(:SEQUENCE_NUMBER)", "Add input to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - gArgs.AddArg("locktime=N", "Set TX lock time to N", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - gArgs.AddArg("nversion=N", "Set TX version to N", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - gArgs.AddArg("outaddr=VALUE:ADDRESS", "Add address-based output to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - gArgs.AddArg("outdata=[VALUE:]DATA", "Add data-based output to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - gArgs.AddArg("outmultisig=VALUE:REQUIRED:PUBKEYS:PUBKEY1:PUBKEY2:....[:FLAGS]", "Add Pay To n-of-m Multi-sig output to TX. n = REQUIRED, m = PUBKEYS. " + SetupHelpOptions(argsman); + + argsman.AddArg("-create", "Create new, empty TX.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-json", "Select JSON output", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-txid", "Output only the hex-encoded transaction id of the resultant transaction.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + SetupChainParamsBaseOptions(argsman); + + argsman.AddArg("delin=N", "Delete input N from TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); + argsman.AddArg("delout=N", "Delete output N from TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); + argsman.AddArg("in=TXID:VOUT(:SEQUENCE_NUMBER)", "Add input to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); + argsman.AddArg("locktime=N", "Set TX lock time to N", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); + argsman.AddArg("nversion=N", "Set TX version to N", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); + argsman.AddArg("outaddr=VALUE:ADDRESS", "Add address-based output to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); + argsman.AddArg("outdata=[VALUE:]DATA", "Add data-based output to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); + argsman.AddArg("outmultisig=VALUE:REQUIRED:PUBKEYS:PUBKEY1:PUBKEY2:....[:FLAGS]", "Add Pay To n-of-m Multi-sig output to TX. n = REQUIRED, m = PUBKEYS. " "Optionally add the \"W\" flag to produce a pay-to-witness-script-hash output. " "Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash.", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - gArgs.AddArg("outpubkey=VALUE:PUBKEY[:FLAGS]", "Add pay-to-pubkey output to TX. " + argsman.AddArg("outpubkey=VALUE:PUBKEY[:FLAGS]", "Add pay-to-pubkey output to TX. " "Optionally add the \"W\" flag to produce a pay-to-witness-pubkey-hash output. " "Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash.", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - gArgs.AddArg("outscript=VALUE:SCRIPT[:FLAGS]", "Add raw script output to TX. " + argsman.AddArg("outscript=VALUE:SCRIPT[:FLAGS]", "Add raw script output to TX. " "Optionally add the \"W\" flag to produce a pay-to-witness-script-hash output. " "Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash.", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - gArgs.AddArg("replaceable(=N)", "Set RBF opt-in sequence number for input N (if not provided, opt-in all available inputs)", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - gArgs.AddArg("sign=SIGHASH-FLAGS", "Add zero or more signatures to transaction. " + argsman.AddArg("replaceable(=N)", "Set RBF opt-in sequence number for input N (if not provided, opt-in all available inputs)", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); + argsman.AddArg("sign=SIGHASH-FLAGS", "Add zero or more signatures to transaction. " "This command requires JSON registers:" "prevtxs=JSON object, " "privatekeys=JSON object. " "See signrawtransactionwithkey docs for format of sighash flags, JSON objects.", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - gArgs.AddArg("load=NAME:FILENAME", "Load JSON file FILENAME into register NAME", ArgsManager::ALLOW_ANY, OptionsCategory::REGISTER_COMMANDS); - gArgs.AddArg("set=NAME:JSON-STRING", "Set register NAME to given JSON-STRING", ArgsManager::ALLOW_ANY, OptionsCategory::REGISTER_COMMANDS); + argsman.AddArg("load=NAME:FILENAME", "Load JSON file FILENAME into register NAME", ArgsManager::ALLOW_ANY, OptionsCategory::REGISTER_COMMANDS); + argsman.AddArg("set=NAME:JSON-STRING", "Set register NAME to given JSON-STRING", ArgsManager::ALLOW_ANY, OptionsCategory::REGISTER_COMMANDS); } // @@ -81,7 +81,7 @@ static int AppInitRawTx(int argc, char* argv[]) // // Parameters // - SetupBitcoinTxArgs(); + SetupBitcoinTxArgs(gArgs); std::string error; if (!gArgs.ParseParameters(argc, argv, error)) { tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error); diff --git a/src/bitcoin-wallet.cpp b/src/bitcoin-wallet.cpp index b420463c00..06b0c86476 100644 --- a/src/bitcoin-wallet.cpp +++ b/src/bitcoin-wallet.cpp @@ -19,24 +19,24 @@ const std::function<std::string(const char*)> G_TRANSLATION_FUN = nullptr; UrlDecodeFn* const URL_DECODE = nullptr; -static void SetupWalletToolArgs() +static void SetupWalletToolArgs(ArgsManager& argsman) { - SetupHelpOptions(gArgs); - SetupChainParamsBaseOptions(); + SetupHelpOptions(argsman); + SetupChainParamsBaseOptions(argsman); - gArgs.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-wallet=<wallet-name>", "Specify wallet name", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::OPTIONS); - gArgs.AddArg("-debug=<category>", "Output debugging information (default: 0).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -debug is true, 0 otherwise).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-wallet=<wallet-name>", "Specify wallet name", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::OPTIONS); + argsman.AddArg("-debug=<category>", "Output debugging information (default: 0).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -debug is true, 0 otherwise).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("info", "Get wallet info", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - gArgs.AddArg("create", "Create new wallet file", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - gArgs.AddArg("salvage", "Attempt to recover private keys from a corrupt wallet", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); + argsman.AddArg("info", "Get wallet info", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); + argsman.AddArg("create", "Create new wallet file", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); + argsman.AddArg("salvage", "Attempt to recover private keys from a corrupt wallet", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); } static bool WalletAppInit(int argc, char* argv[]) { - SetupWalletToolArgs(); + SetupWalletToolArgs(gArgs); std::string error_message; if (!gArgs.ParseParameters(argc, argv, error_message)) { tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error_message); diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp index b8e8717896..b04cc12059 100644 --- a/src/bitcoind.cpp +++ b/src/bitcoind.cpp @@ -13,9 +13,9 @@ #include <init.h> #include <interfaces/chain.h> #include <node/context.h> +#include <node/ui_interface.h> #include <noui.h> #include <shutdown.h> -#include <ui_interface.h> #include <util/ref.h> #include <util/strencodings.h> #include <util/system.h> @@ -101,6 +101,11 @@ static bool AppInit(int argc, char* argv[]) } } + if (!gArgs.InitSettings(error)) { + InitError(Untranslated(error)); + return false; + } + // -server defaults to true for bitcoind but not for the GUI so do this here gArgs.SoftSetBoolArg("-server", true); // Set this early so that parameter interactions go to console diff --git a/src/blockfilter.cpp b/src/blockfilter.cpp index 5f5bed5bda..9a6fb4abd0 100644 --- a/src/blockfilter.cpp +++ b/src/blockfilter.cpp @@ -291,7 +291,7 @@ uint256 BlockFilter::GetHash() const const std::vector<unsigned char>& data = GetEncodedFilter(); uint256 result; - CHash256().Write(data.data(), data.size()).Finalize(result.begin()); + CHash256().Write(data).Finalize(result); return result; } @@ -301,8 +301,8 @@ uint256 BlockFilter::ComputeHeader(const uint256& prev_header) const uint256 result; CHash256() - .Write(filter_hash.begin(), filter_hash.size()) - .Write(prev_header.begin(), prev_header.size()) - .Finalize(result.begin()); + .Write(filter_hash) + .Write(prev_header) + .Finalize(result); return result; } diff --git a/src/bloom.cpp b/src/bloom.cpp index 54fcf487e4..d182f0728e 100644 --- a/src/bloom.cpp +++ b/src/bloom.cpp @@ -135,8 +135,8 @@ bool CBloomFilter::IsRelevantAndUpdate(const CTransaction& tx) else if ((nFlags & BLOOM_UPDATE_MASK) == BLOOM_UPDATE_P2PUBKEY_ONLY) { std::vector<std::vector<unsigned char> > vSolutions; - txnouttype type = Solver(txout.scriptPubKey, vSolutions); - if (type == TX_PUBKEY || type == TX_MULTISIG) { + TxoutType type = Solver(txout.scriptPubKey, vSolutions); + if (type == TxoutType::PUBKEY || type == TxoutType::MULTISIG) { insert(COutPoint(hash, i)); } } diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 092c45e4ce..ffd2076c9a 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -110,7 +110,7 @@ public: // Note that of those which support the service bits prefix, most only support a subset of // possible options. - // This is fine at runtime as we'll fall back to using them as a oneshot if they don't support the + // This is fine at runtime as we'll fall back to using them as an addrfetch if they don't support the // service bits we want, but we should get them updated to support all service bits wanted by any // release ASAP to avoid it where possible. vSeeds.emplace_back("seed.bitcoin.sipa.be"); // Pieter Wuille, only supports x1, x5, x9, and xd @@ -341,8 +341,8 @@ public: void CRegTestParams::UpdateActivationParametersFromArgs(const ArgsManager& args) { - if (gArgs.IsArgSet("-segwitheight")) { - int64_t height = gArgs.GetArg("-segwitheight", consensus.SegwitHeight); + if (args.IsArgSet("-segwitheight")) { + int64_t height = args.GetArg("-segwitheight", consensus.SegwitHeight); if (height < -1 || height >= std::numeric_limits<int>::max()) { throw std::runtime_error(strprintf("Activation height %ld for segwit is out of valid range. Use -1 to disable segwit.", height)); } else if (height == -1) { diff --git a/src/chainparamsbase.cpp b/src/chainparamsbase.cpp index 894b8553c4..1825ced640 100644 --- a/src/chainparamsbase.cpp +++ b/src/chainparamsbase.cpp @@ -15,14 +15,14 @@ const std::string CBaseChainParams::MAIN = "main"; const std::string CBaseChainParams::TESTNET = "test"; const std::string CBaseChainParams::REGTEST = "regtest"; -void SetupChainParamsBaseOptions() +void SetupChainParamsBaseOptions(ArgsManager& argsman) { - gArgs.AddArg("-chain=<chain>", "Use the chain <chain> (default: main). Allowed values: main, test, regtest", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS); - gArgs.AddArg("-regtest", "Enter regression test mode, which uses a special chain in which blocks can be solved instantly. " + argsman.AddArg("-chain=<chain>", "Use the chain <chain> (default: main). Allowed values: main, test, regtest", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS); + argsman.AddArg("-regtest", "Enter regression test mode, which uses a special chain in which blocks can be solved instantly. " "This is intended for regression testing tools and app development. Equivalent to -chain=regtest.", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CHAINPARAMS); - gArgs.AddArg("-segwitheight=<n>", "Set the activation height of segwit. -1 to disable. (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-testnet", "Use the test chain. Equivalent to -chain=test.", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS); - gArgs.AddArg("-vbparams=deployment:start:end", "Use given start/end times for specified version bits deployment (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CHAINPARAMS); + argsman.AddArg("-segwitheight=<n>", "Set the activation height of segwit. -1 to disable. (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-testnet", "Use the test chain. Equivalent to -chain=test.", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS); + argsman.AddArg("-vbparams=deployment:start:end", "Use given start/end times for specified version bits deployment (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CHAINPARAMS); } static std::unique_ptr<CBaseChainParams> globalChainBaseParams; diff --git a/src/chainparamsbase.h b/src/chainparamsbase.h index 3c139931ea..1c52d0ea97 100644 --- a/src/chainparamsbase.h +++ b/src/chainparamsbase.h @@ -8,6 +8,8 @@ #include <memory> #include <string> +class ArgsManager; + /** * CBaseChainParams defines the base parameters (shared between bitcoin-cli and bitcoind) * of a given instance of the Bitcoin system. @@ -43,7 +45,7 @@ std::unique_ptr<CBaseChainParams> CreateBaseChainParams(const std::string& chain /** *Set the arguments for chainparams */ -void SetupChainParamsBaseOptions(); +void SetupChainParamsBaseOptions(ArgsManager& argsman); /** * Return the currently selected parameters. This won't change after app diff --git a/src/coins.cpp b/src/coins.cpp index 7b76c13f98..5de2ed7810 100644 --- a/src/coins.cpp +++ b/src/coins.cpp @@ -245,6 +245,14 @@ bool CCoinsViewCache::HaveInputs(const CTransaction& tx) const return true; } +void CCoinsViewCache::ReallocateCache() +{ + // Cache should be empty when we're calling this. + assert(cacheCoins.size() == 0); + cacheCoins.~CCoinsMap(); + ::new (&cacheCoins) CCoinsMap(); +} + static const size_t MIN_TRANSACTION_OUTPUT_WEIGHT = WITNESS_SCALE_FACTOR * ::GetSerializeSize(CTxOut(), PROTOCOL_VERSION); static const size_t MAX_OUTPUTS_PER_BLOCK = MAX_BLOCK_WEIGHT / MIN_TRANSACTION_OUTPUT_WEIGHT; diff --git a/src/coins.h b/src/coins.h index a3f34bb0ee..a3e241ac90 100644 --- a/src/coins.h +++ b/src/coins.h @@ -318,6 +318,13 @@ public: //! Check whether all prevouts of the transaction are present in the UTXO set represented by this view bool HaveInputs(const CTransaction& tx) const; + //! Force a reallocation of the cache map. This is required when downsizing + //! the cache because the map's allocator may be hanging onto a lot of + //! memory despite having called .clear(). + //! + //! See: https://stackoverflow.com/questions/42114044/how-to-release-unordered-map-memory + void ReallocateCache(); + private: /** * @note this is marked const, but may actually append to `cacheCoins`, increasing diff --git a/src/consensus/validation.h b/src/consensus/validation.h index a79e7b9d12..2a93a090d6 100644 --- a/src/consensus/validation.h +++ b/src/consensus/validation.h @@ -26,16 +26,21 @@ enum class TxValidationResult { * is uninteresting. */ TX_RECENT_CONSENSUS_CHANGE, - TX_NOT_STANDARD, //!< didn't meet our local policy rules + TX_INPUTS_NOT_STANDARD, //!< inputs (covered by txid) failed policy rules + TX_NOT_STANDARD, //!< otherwise didn't meet our local policy rules TX_MISSING_INPUTS, //!< transaction was missing some of its inputs TX_PREMATURE_SPEND, //!< transaction spends a coinbase too early, or violates locktime/sequence locks /** - * Transaction might be missing a witness, have a witness prior to SegWit + * Transaction might have a witness prior to SegWit * activation, or witness may have been malleated (which includes * non-standard witnesses). */ TX_WITNESS_MUTATED, /** + * Transaction is missing a witness. + */ + TX_WITNESS_STRIPPED, + /** * Tx already in mempool or conflicts with a tx in the chain * (if it conflicts with another tx in mempool, we use MEMPOOL_POLICY as it failed to reach the RBF threshold) * Currently this is only used if the transaction already exists in the mempool or on chain. @@ -75,37 +80,39 @@ enum class BlockValidationResult { * by TxValidationState and BlockValidationState for validation information on transactions * and blocks respectively. */ template <typename Result> -class ValidationState { +class ValidationState +{ private: - enum mode_state { - MODE_VALID, //!< everything ok - MODE_INVALID, //!< network rule violation (DoS value may be set) - MODE_ERROR, //!< run-time error - } m_mode{MODE_VALID}; + enum class ModeState { + M_VALID, //!< everything ok + M_INVALID, //!< network rule violation (DoS value may be set) + M_ERROR, //!< run-time error + } m_mode{ModeState::M_VALID}; Result m_result{}; std::string m_reject_reason; std::string m_debug_message; + public: bool Invalid(Result result, - const std::string &reject_reason="", - const std::string &debug_message="") + const std::string& reject_reason = "", + const std::string& debug_message = "") { m_result = result; m_reject_reason = reject_reason; m_debug_message = debug_message; - if (m_mode != MODE_ERROR) m_mode = MODE_INVALID; + if (m_mode != ModeState::M_ERROR) m_mode = ModeState::M_INVALID; return false; } bool Error(const std::string& reject_reason) { - if (m_mode == MODE_VALID) + if (m_mode == ModeState::M_VALID) m_reject_reason = reject_reason; - m_mode = MODE_ERROR; + m_mode = ModeState::M_ERROR; return false; } - bool IsValid() const { return m_mode == MODE_VALID; } - bool IsInvalid() const { return m_mode == MODE_INVALID; } - bool IsError() const { return m_mode == MODE_ERROR; } + bool IsValid() const { return m_mode == ModeState::M_VALID; } + bool IsInvalid() const { return m_mode == ModeState::M_INVALID; } + bool IsError() const { return m_mode == ModeState::M_ERROR; } Result GetResult() const { return m_result; } std::string GetRejectReason() const { return m_reject_reason; } std::string GetDebugMessage() const { return m_debug_message; } diff --git a/src/core_write.cpp b/src/core_write.cpp index 429c9c5a1a..f9d918cb6d 100644 --- a/src/core_write.cpp +++ b/src/core_write.cpp @@ -48,13 +48,14 @@ std::string FormatScript(const CScript& script) } } if (vch.size() > 0) { - ret += strprintf("0x%x 0x%x ", HexStr(it2, it - vch.size()), HexStr(it - vch.size(), it)); + ret += strprintf("0x%x 0x%x ", HexStr(std::vector<uint8_t>(it2, it - vch.size())), + HexStr(std::vector<uint8_t>(it - vch.size(), it))); } else { - ret += strprintf("0x%x ", HexStr(it2, it)); + ret += strprintf("0x%x ", HexStr(std::vector<uint8_t>(it2, it))); } continue; } - ret += strprintf("0x%x ", HexStr(it2, script.end())); + ret += strprintf("0x%x ", HexStr(std::vector<uint8_t>(it2, script.end()))); break; } return ret.substr(0, ret.size() - 1); @@ -140,11 +141,11 @@ void ScriptToUniv(const CScript& script, UniValue& out, bool include_address) out.pushKV("hex", HexStr(script)); std::vector<std::vector<unsigned char>> solns; - txnouttype type = Solver(script, solns); + TxoutType type = Solver(script, solns); out.pushKV("type", GetTxnOutputType(type)); CTxDestination address; - if (include_address && ExtractDestination(script, address) && type != TX_PUBKEY) { + if (include_address && ExtractDestination(script, address) && type != TxoutType::PUBKEY) { out.pushKV("address", EncodeDestination(address)); } } @@ -152,7 +153,7 @@ void ScriptToUniv(const CScript& script, UniValue& out, bool include_address) void ScriptPubKeyToUniv(const CScript& scriptPubKey, UniValue& out, bool fIncludeHex) { - txnouttype type; + TxoutType type; std::vector<CTxDestination> addresses; int nRequired; @@ -160,7 +161,7 @@ void ScriptPubKeyToUniv(const CScript& scriptPubKey, if (fIncludeHex) out.pushKV("hex", HexStr(scriptPubKey)); - if (!ExtractDestinations(scriptPubKey, type, addresses, nRequired) || type == TX_PUBKEY) { + if (!ExtractDestinations(scriptPubKey, type, addresses, nRequired) || type == TxoutType::PUBKEY) { out.pushKV("type", GetTxnOutputType(type)); return; } @@ -179,7 +180,9 @@ void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry, { entry.pushKV("txid", tx.GetHash().GetHex()); entry.pushKV("hash", tx.GetWitnessHash().GetHex()); - entry.pushKV("version", tx.nVersion); + // Transaction version is actually unsigned in consensus checks, just signed in memory, + // so cast to unsigned before giving it to the user. + entry.pushKV("version", static_cast<int64_t>(static_cast<uint32_t>(tx.nVersion))); entry.pushKV("size", (int)::GetSerializeSize(tx, PROTOCOL_VERSION)); entry.pushKV("vsize", (GetTransactionWeight(tx) + WITNESS_SCALE_FACTOR - 1) / WITNESS_SCALE_FACTOR); entry.pushKV("weight", GetTransactionWeight(tx)); diff --git a/src/crypto/common.h b/src/crypto/common.h index e7bb020a19..5b4932c992 100644 --- a/src/crypto/common.h +++ b/src/crypto/common.h @@ -82,12 +82,12 @@ void static inline WriteBE64(unsigned char* ptr, uint64_t x) /** Return the smallest number n such that (x >> n) == 0 (or 64 if the highest bit in x is set. */ uint64_t static inline CountBits(uint64_t x) { -#if HAVE_DECL___BUILTIN_CLZL +#if HAVE_BUILTIN_CLZL if (sizeof(unsigned long) >= sizeof(uint64_t)) { return x ? 8 * sizeof(unsigned long) - __builtin_clzl(x) : 0; } #endif -#if HAVE_DECL___BUILTIN_CLZLL +#if HAVE_BUILTIN_CLZLL if (sizeof(unsigned long long) >= sizeof(uint64_t)) { return x ? 8 * sizeof(unsigned long long) - __builtin_clzll(x) : 0; } diff --git a/src/dbwrapper.h b/src/dbwrapper.h index 116d7d8679..215b033708 100644 --- a/src/dbwrapper.h +++ b/src/dbwrapper.h @@ -292,18 +292,6 @@ public: // Get an estimate of LevelDB memory usage (in bytes). size_t DynamicMemoryUsage() const; - // not available for LevelDB; provide for compatibility with BDB - bool Flush() - { - return true; - } - - bool Sync() - { - CDBBatch batch(*this); - return WriteBatch(batch, true); - } - CDBIterator *NewIterator() { return new CDBIterator(*this, pdb->NewIterator(iteroptions)); diff --git a/src/dummywallet.cpp b/src/dummywallet.cpp index 0f7848bae1..18dc7a69e2 100644 --- a/src/dummywallet.cpp +++ b/src/dummywallet.cpp @@ -20,14 +20,14 @@ class DummyWalletInit : public WalletInitInterface { public: bool HasWalletSupport() const override {return false;} - void AddWalletOptions() const override; + void AddWalletOptions(ArgsManager& argsman) const override; bool ParameterInteraction() const override {return true;} void Construct(NodeContext& node) const override {LogPrintf("No wallet support compiled in!\n");} }; -void DummyWalletInit::AddWalletOptions() const +void DummyWalletInit::AddWalletOptions(ArgsManager& argsman) const { - gArgs.AddHiddenArgs({ + argsman.AddHiddenArgs({ "-addresstype", "-avoidpartialspends", "-changetype", diff --git a/src/hash.cpp b/src/hash.cpp index 26150e5ca8..4c09f5f646 100644 --- a/src/hash.cpp +++ b/src/hash.cpp @@ -12,7 +12,7 @@ inline uint32_t ROTL32(uint32_t x, int8_t r) return (x << r) | (x >> (32 - r)); } -unsigned int MurmurHash3(unsigned int nHashSeed, const std::vector<unsigned char>& vDataToHash) +unsigned int MurmurHash3(unsigned int nHashSeed, Span<const unsigned char> vDataToHash) { // The following is MurmurHash3 (x86_32), see http://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp uint32_t h1 = nHashSeed; diff --git a/src/hash.h b/src/hash.h index c295568a3e..71806483ff 100644 --- a/src/hash.h +++ b/src/hash.h @@ -25,14 +25,15 @@ private: public: static const size_t OUTPUT_SIZE = CSHA256::OUTPUT_SIZE; - void Finalize(unsigned char hash[OUTPUT_SIZE]) { + void Finalize(Span<unsigned char> output) { + assert(output.size() == OUTPUT_SIZE); unsigned char buf[CSHA256::OUTPUT_SIZE]; sha.Finalize(buf); - sha.Reset().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(hash); + sha.Reset().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(output.data()); } - CHash256& Write(const unsigned char *data, size_t len) { - sha.Write(data, len); + CHash256& Write(Span<const unsigned char> input) { + sha.Write(input.data(), input.size()); return *this; } @@ -49,14 +50,15 @@ private: public: static const size_t OUTPUT_SIZE = CRIPEMD160::OUTPUT_SIZE; - void Finalize(unsigned char hash[OUTPUT_SIZE]) { + void Finalize(Span<unsigned char> output) { + assert(output.size() == OUTPUT_SIZE); unsigned char buf[CSHA256::OUTPUT_SIZE]; sha.Finalize(buf); - CRIPEMD160().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(hash); + CRIPEMD160().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(output.data()); } - CHash160& Write(const unsigned char *data, size_t len) { - sha.Write(data, len); + CHash160& Write(Span<const unsigned char> input) { + sha.Write(input.data(), input.size()); return *this; } @@ -67,52 +69,31 @@ public: }; /** Compute the 256-bit hash of an object. */ -template<typename T1> -inline uint256 Hash(const T1 pbegin, const T1 pend) +template<typename T> +inline uint256 Hash(const T& in1) { - static const unsigned char pblank[1] = {}; uint256 result; - CHash256().Write(pbegin == pend ? pblank : (const unsigned char*)&pbegin[0], (pend - pbegin) * sizeof(pbegin[0])) - .Finalize((unsigned char*)&result); + CHash256().Write(MakeUCharSpan(in1)).Finalize(result); return result; } /** Compute the 256-bit hash of the concatenation of two objects. */ template<typename T1, typename T2> -inline uint256 Hash(const T1 p1begin, const T1 p1end, - const T2 p2begin, const T2 p2end) { - static const unsigned char pblank[1] = {}; +inline uint256 Hash(const T1& in1, const T2& in2) { uint256 result; - CHash256().Write(p1begin == p1end ? pblank : (const unsigned char*)&p1begin[0], (p1end - p1begin) * sizeof(p1begin[0])) - .Write(p2begin == p2end ? pblank : (const unsigned char*)&p2begin[0], (p2end - p2begin) * sizeof(p2begin[0])) - .Finalize((unsigned char*)&result); + CHash256().Write(MakeUCharSpan(in1)).Write(MakeUCharSpan(in2)).Finalize(result); return result; } /** Compute the 160-bit hash an object. */ template<typename T1> -inline uint160 Hash160(const T1 pbegin, const T1 pend) +inline uint160 Hash160(const T1& in1) { - static unsigned char pblank[1] = {}; uint160 result; - CHash160().Write(pbegin == pend ? pblank : (const unsigned char*)&pbegin[0], (pend - pbegin) * sizeof(pbegin[0])) - .Finalize((unsigned char*)&result); + CHash160().Write(MakeUCharSpan(in1)).Finalize(result); return result; } -/** Compute the 160-bit hash of a vector. */ -inline uint160 Hash160(const std::vector<unsigned char>& vch) -{ - return Hash160(vch.begin(), vch.end()); -} - -/** Compute the 160-bit hash of a vector. */ -template<unsigned int N> -inline uint160 Hash160(const prevector<N, unsigned char>& vch) -{ - return Hash160(vch.begin(), vch.end()); -} - /** A writer stream (for serialization) that computes a 256-bit hash. */ class CHashWriter { @@ -129,13 +110,13 @@ public: int GetVersion() const { return nVersion; } void write(const char *pch, size_t size) { - ctx.Write((const unsigned char*)pch, size); + ctx.Write({(const unsigned char*)pch, size}); } // invalidates the object uint256 GetHash() { uint256 result; - ctx.Finalize((unsigned char*)&result); + ctx.Finalize(result); return result; } @@ -200,7 +181,7 @@ uint256 SerializeHash(const T& obj, int nType=SER_GETHASH, int nVersion=PROTOCOL return ss.GetHash(); } -unsigned int MurmurHash3(unsigned int nHashSeed, const std::vector<unsigned char>& vDataToHash); +unsigned int MurmurHash3(unsigned int nHashSeed, Span<const unsigned char> vDataToHash); void BIP32Hash(const ChainCode &chainCode, unsigned int nChild, unsigned char header, const unsigned char data[32], unsigned char output[64]); diff --git a/src/httpserver.cpp b/src/httpserver.cpp index 5e78fd1d71..1e5ea2de83 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -7,10 +7,10 @@ #include <chainparamsbase.h> #include <compat.h> #include <netbase.h> +#include <node/ui_interface.h> #include <rpc/protocol.h> // For HTTP status codes #include <shutdown.h> #include <sync.h> -#include <ui_interface.h> #include <util/strencodings.h> #include <util/system.h> #include <util/threadnames.h> diff --git a/src/index/base.cpp b/src/index/base.cpp index a93b67395d..f587205a28 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -4,9 +4,9 @@ #include <chainparams.h> #include <index/base.h> +#include <node/ui_interface.h> #include <shutdown.h> #include <tinyformat.h> -#include <ui_interface.h> #include <util/system.h> #include <util/translation.h> #include <validation.h> diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp index 59d1888fff..64472714cc 100644 --- a/src/index/txindex.cpp +++ b/src/index/txindex.cpp @@ -3,8 +3,8 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <index/txindex.h> +#include <node/ui_interface.h> #include <shutdown.h> -#include <ui_interface.h> #include <util/system.h> #include <util/translation.h> #include <validation.h> diff --git a/src/init.cpp b/src/init.cpp index 8d9566edc3..08944b79a5 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -31,6 +31,7 @@ #include <net_processing.h> #include <netbase.h> #include <node/context.h> +#include <node/ui_interface.h> #include <policy/feerate.h> #include <policy/fees.h> #include <policy/policy.h> @@ -48,8 +49,8 @@ #include <torcontrol.h> #include <txdb.h> #include <txmempool.h> -#include <ui_interface.h> #include <util/asmap.h> +#include <util/check.h> #include <util/moneystr.h> #include <util/string.h> #include <util/system.h> @@ -368,9 +369,10 @@ void SetupServerArgs(NodeContext& node) { assert(!node.args); node.args = &gArgs; + ArgsManager& argsman = *node.args; SetupHelpOptions(gArgs); - gArgs.AddArg("-help-debug", "Print help message with debugging options and exit", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); // server-only for now + argsman.AddArg("-help-debug", "Print help message with debugging options and exit", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); // server-only for now const auto defaultBaseParams = CreateBaseChainParams(CBaseChainParams::MAIN); const auto testnetBaseParams = CreateBaseChainParams(CBaseChainParams::TESTNET); @@ -385,108 +387,109 @@ void SetupServerArgs(NodeContext& node) // GUI args. These will be overwritten by SetupUIArgs for the GUI "-choosedatadir", "-lang=<lang>", "-min", "-resetguisettings", "-splash", "-uiplatform"}; - gArgs.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); #if HAVE_SYSTEM - gArgs.AddArg("-alertnotify=<cmd>", "Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-alertnotify=<cmd>", "Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); #endif - gArgs.AddArg("-assumevalid=<hex>", strprintf("If this block is in the chain assume that it and its ancestors are valid and potentially skip their script verification (0 to verify all, default: %s, testnet: %s)", defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(), testnetChainParams->GetConsensus().defaultAssumeValid.GetHex()), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-blocksdir=<dir>", "Specify directory to hold blocks subdirectory for *.dat files (default: <datadir>)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-assumevalid=<hex>", strprintf("If this block is in the chain assume that it and its ancestors are valid and potentially skip their script verification (0 to verify all, default: %s, testnet: %s)", defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(), testnetChainParams->GetConsensus().defaultAssumeValid.GetHex()), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-blocksdir=<dir>", "Specify directory to hold blocks subdirectory for *.dat files (default: <datadir>)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); #if HAVE_SYSTEM - gArgs.AddArg("-blocknotify=<cmd>", "Execute command when the best block changes (%s in cmd is replaced by block hash)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-blocknotify=<cmd>", "Execute command when the best block changes (%s in cmd is replaced by block hash)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); #endif - gArgs.AddArg("-blockreconstructionextratxn=<n>", strprintf("Extra transactions to keep in memory for compact block reconstructions (default: %u)", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-blocksonly", strprintf("Whether to reject transactions from network peers. Automatic broadcast and rebroadcast of any transactions from inbound peers is disabled, unless '-whitelistforcerelay' is '1', in which case whitelisted peers' transactions will be relayed. RPC transactions are not affected. (default: %u)", DEFAULT_BLOCKSONLY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-conf=<file>", strprintf("Specify configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS); - gArgs.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (%d to %d, default: %d). In addition, unused mempool memory is shared for this cache (see -maxmempool).", nMinDbCache, nMaxDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-debuglogfile=<file>", strprintf("Specify location of debug log file. Relative paths will be prefixed by a net-specific datadir location. (-nodebuglogfile to disable; default: %s)", DEFAULT_DEBUGLOGFILE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-feefilter", strprintf("Tell other nodes to filter invs to us by our mempool min fee (default: %u)", DEFAULT_FEEFILTER), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS); - gArgs.AddArg("-includeconf=<file>", "Specify additional configuration file, relative to the -datadir path (only useable from configuration file, not command line)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-loadblock=<file>", "Imports blocks from external file on startup", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-maxmempool=<n>", strprintf("Keep the transaction memory pool below <n> megabytes (default: %u)", DEFAULT_MAX_MEMPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-maxorphantx=<n>", strprintf("Keep at most <n> unconnectable transactions in memory (default: %u)", DEFAULT_MAX_ORPHAN_TRANSACTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-mempoolexpiry=<n>", strprintf("Do not keep transactions in the mempool longer than <n> hours (default: %u)", DEFAULT_MEMPOOL_EXPIRY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-minimumchainwork=<hex>", strprintf("Minimum work assumed to exist on a valid chain in hex (default: %s, testnet: %s)", defaultChainParams->GetConsensus().nMinimumChainWork.GetHex(), testnetChainParams->GetConsensus().nMinimumChainWork.GetHex()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS); - gArgs.AddArg("-par=<n>", strprintf("Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)", + argsman.AddArg("-blockreconstructionextratxn=<n>", strprintf("Extra transactions to keep in memory for compact block reconstructions (default: %u)", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-blocksonly", strprintf("Whether to reject transactions from network peers. Automatic broadcast and rebroadcast of any transactions from inbound peers is disabled, unless the peer has the 'forcerelay' permission. RPC transactions are not affected. (default: %u)", DEFAULT_BLOCKSONLY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-conf=<file>", strprintf("Specify path to read-only configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS); + argsman.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (%d to %d, default: %d). In addition, unused mempool memory is shared for this cache (see -maxmempool).", nMinDbCache, nMaxDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-debuglogfile=<file>", strprintf("Specify location of debug log file. Relative paths will be prefixed by a net-specific datadir location. (-nodebuglogfile to disable; default: %s)", DEFAULT_DEBUGLOGFILE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-feefilter", strprintf("Tell other nodes to filter invs to us by our mempool min fee (default: %u)", DEFAULT_FEEFILTER), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS); + argsman.AddArg("-includeconf=<file>", "Specify additional configuration file, relative to the -datadir path (only useable from configuration file, not command line)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-loadblock=<file>", "Imports blocks from external file on startup", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-maxmempool=<n>", strprintf("Keep the transaction memory pool below <n> megabytes (default: %u)", DEFAULT_MAX_MEMPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-maxorphantx=<n>", strprintf("Keep at most <n> unconnectable transactions in memory (default: %u)", DEFAULT_MAX_ORPHAN_TRANSACTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-mempoolexpiry=<n>", strprintf("Do not keep transactions in the mempool longer than <n> hours (default: %u)", DEFAULT_MEMPOOL_EXPIRY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-minimumchainwork=<hex>", strprintf("Minimum work assumed to exist on a valid chain in hex (default: %s, testnet: %s)", defaultChainParams->GetConsensus().nMinimumChainWork.GetHex(), testnetChainParams->GetConsensus().nMinimumChainWork.GetHex()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS); + argsman.AddArg("-par=<n>", strprintf("Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)", -GetNumCores(), MAX_SCRIPTCHECK_THREADS, DEFAULT_SCRIPTCHECK_THREADS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-persistmempool", strprintf("Whether to save the mempool on shutdown and load on restart (default: %u)", DEFAULT_PERSIST_MEMPOOL), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-pid=<file>", strprintf("Specify pid file. Relative paths will be prefixed by a net-specific datadir location. (default: %s)", BITCOIN_PID_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-prune=<n>", strprintf("Reduce storage requirements by enabling pruning (deleting) of old blocks. This allows the pruneblockchain RPC to be called to delete specific blocks, and enables automatic pruning of old blocks if a target size in MiB is provided. This mode is incompatible with -txindex and -rescan. " + argsman.AddArg("-persistmempool", strprintf("Whether to save the mempool on shutdown and load on restart (default: %u)", DEFAULT_PERSIST_MEMPOOL), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-pid=<file>", strprintf("Specify pid file. Relative paths will be prefixed by a net-specific datadir location. (default: %s)", BITCOIN_PID_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-prune=<n>", strprintf("Reduce storage requirements by enabling pruning (deleting) of old blocks. This allows the pruneblockchain RPC to be called to delete specific blocks, and enables automatic pruning of old blocks if a target size in MiB is provided. This mode is incompatible with -txindex and -rescan. " "Warning: Reverting this setting requires re-downloading the entire blockchain. " "(default: 0 = disable pruning blocks, 1 = allow manual pruning via RPC, >=%u = automatically prune block files to stay under the specified target size in MiB)", MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-reindex", "Rebuild chain state and block index from the blk*.dat files on disk", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-reindex-chainstate", "Rebuild chain state from the currently indexed blocks. When in pruning mode or if blocks on disk might be corrupted, use full -reindex instead.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-reindex", "Rebuild chain state and block index from the blk*.dat files on disk", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-reindex-chainstate", "Rebuild chain state from the currently indexed blocks. When in pruning mode or if blocks on disk might be corrupted, use full -reindex instead.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-settings=<file>", strprintf("Specify path to dynamic settings data file. Can be disabled with -nosettings. File is written at runtime and not meant to be edited by users (use %s instead for custom settings). Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME, BITCOIN_SETTINGS_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); #ifndef WIN32 - gArgs.AddArg("-sysperms", "Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-sysperms", "Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); #else hidden_args.emplace_back("-sysperms"); #endif - gArgs.AddArg("-txindex", strprintf("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)", DEFAULT_TXINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-blockfilterindex=<type>", + argsman.AddArg("-txindex", strprintf("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)", DEFAULT_TXINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-blockfilterindex=<type>", strprintf("Maintain an index of compact filters by block (default: %s, values: %s).", DEFAULT_BLOCKFILTERINDEX, ListBlockFilterTypes()) + " If <type> is not supplied or if <type> = 1, indexes for all known types are enabled.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-addnode=<ip>", "Add a node to connect to and attempt to keep the connection open (see the `addnode` RPC command help for more info). This option can be specified multiple times to add multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); - gArgs.AddArg("-asmap=<file>", strprintf("Specify asn mapping used for bucketing of the peers (default: %s). Relative paths will be prefixed by the net-specific datadir location.", DEFAULT_ASMAP_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-banscore=<n>", strprintf("Threshold for disconnecting misbehaving peers (default: %u)", DEFAULT_BANSCORE_THRESHOLD), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-bantime=<n>", strprintf("Number of seconds to keep misbehaving peers from reconnecting (default: %u)", DEFAULT_MISBEHAVING_BANTIME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-bind=<addr>", "Bind to given address and always listen on it. Use [host]:port notation for IPv6", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); - gArgs.AddArg("-connect=<ip>", "Connect only to the specified node; -noconnect disables automatic connections (the rules for this peer are the same as for -addnode). This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); - gArgs.AddArg("-discover", "Discover own IP addresses (default: 1 when listening and no -externalip or -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-dns", strprintf("Allow DNS lookups for -addnode, -seednode and -connect (default: %u)", DEFAULT_NAME_LOOKUP), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-dnsseed", "Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect used)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-externalip=<ip>", "Specify your own public address", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-listenonion", strprintf("Automatically create Tor hidden service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-maxconnections=<n>", strprintf("Maintain at most <n> connections to peers (default: %u)", DEFAULT_MAX_PEER_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-maxreceivebuffer=<n>", strprintf("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXRECEIVEBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-maxsendbuffer=<n>", strprintf("Maximum per-connection send buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXSENDBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-maxtimeadjustment", strprintf("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)", DEFAULT_MAX_TIME_ADJUSTMENT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-maxuploadtarget=<n>", strprintf("Tries to keep outbound traffic under the given target (in MiB per 24h). Limit does not apply to peers with 'noban' permission. 0 = no limit (default: %d)", DEFAULT_MAX_UPLOAD_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor hidden services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-onlynet=<net>", "Make outgoing connections only through network <net> (ipv4, ipv6 or onion). Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple networks.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-permitbaremultisig", strprintf("Relay non-P2SH multisig (default: %u)", DEFAULT_PERMIT_BAREMULTISIG), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-port=<port>", strprintf("Listen for connections on <port> (default: %u, testnet: %u, regtest: %u)", defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort(), regtestChainParams->GetDefaultPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); - gArgs.AddArg("-proxy=<ip:port>", "Connect through SOCKS5 proxy, set -noproxy to disable (default: disabled)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-proxyrandomize", strprintf("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)", DEFAULT_PROXYRANDOMIZE), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-seednode=<ip>", "Connect to a node to retrieve peer addresses, and disconnect. This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-timeout=<n>", strprintf("Specify connection timeout in milliseconds (minimum: 1, default: %d)", DEFAULT_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-peertimeout=<n>", strprintf("Specify p2p connection timeout in seconds. This option determines the amount of time a peer may be inactive before the connection to it is dropped. (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION); - gArgs.AddArg("-torcontrol=<ip>:<port>", strprintf("Tor control port to use if onion listening enabled (default: %s)", DEFAULT_TOR_CONTROL), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-torpassword=<pass>", "Tor control port password (default: empty)", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::CONNECTION); + argsman.AddArg("-addnode=<ip>", "Add a node to connect to and attempt to keep the connection open (see the `addnode` RPC command help for more info). This option can be specified multiple times to add multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); + argsman.AddArg("-asmap=<file>", strprintf("Specify asn mapping used for bucketing of the peers (default: %s). Relative paths will be prefixed by the net-specific datadir location.", DEFAULT_ASMAP_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-bantime=<n>", strprintf("Default duration (in seconds) of manually configured bans (default: %u)", DEFAULT_MISBEHAVING_BANTIME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-bind=<addr>", "Bind to given address and always listen on it. Use [host]:port notation for IPv6", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); + argsman.AddArg("-connect=<ip>", "Connect only to the specified node; -noconnect disables automatic connections (the rules for this peer are the same as for -addnode). This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); + argsman.AddArg("-discover", "Discover own IP addresses (default: 1 when listening and no -externalip or -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-dns", strprintf("Allow DNS lookups for -addnode, -seednode and -connect (default: %u)", DEFAULT_NAME_LOOKUP), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-dnsseed", "Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect used)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-externalip=<ip>", "Specify your own public address", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-listenonion", strprintf("Automatically create Tor onion service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-maxconnections=<n>", strprintf("Maintain at most <n> connections to peers (default: %u)", DEFAULT_MAX_PEER_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-maxreceivebuffer=<n>", strprintf("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXRECEIVEBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-maxsendbuffer=<n>", strprintf("Maximum per-connection send buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXSENDBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-maxtimeadjustment", strprintf("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)", DEFAULT_MAX_TIME_ADJUSTMENT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-maxuploadtarget=<n>", strprintf("Tries to keep outbound traffic under the given target (in MiB per 24h). Limit does not apply to peers with 'download' permission. 0 = no limit (default: %d)", DEFAULT_MAX_UPLOAD_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor onion services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-onlynet=<net>", "Make outgoing connections only through network <net> (ipv4, ipv6 or onion). Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple networks.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-permitbaremultisig", strprintf("Relay non-P2SH multisig (default: %u)", DEFAULT_PERMIT_BAREMULTISIG), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-port=<port>", strprintf("Listen for connections on <port> (default: %u, testnet: %u, regtest: %u)", defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort(), regtestChainParams->GetDefaultPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); + argsman.AddArg("-proxy=<ip:port>", "Connect through SOCKS5 proxy, set -noproxy to disable (default: disabled)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-proxyrandomize", strprintf("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)", DEFAULT_PROXYRANDOMIZE), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-seednode=<ip>", "Connect to a node to retrieve peer addresses, and disconnect. This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-networkactive", "Enable all P2P network activity (default: 1). Can be changed by the setnetworkactive RPC command", ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION); + argsman.AddArg("-timeout=<n>", strprintf("Specify connection timeout in milliseconds (minimum: 1, default: %d)", DEFAULT_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-peertimeout=<n>", strprintf("Specify p2p connection timeout in seconds. This option determines the amount of time a peer may be inactive before the connection to it is dropped. (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION); + argsman.AddArg("-torcontrol=<ip>:<port>", strprintf("Tor control port to use if onion listening enabled (default: %s)", DEFAULT_TOR_CONTROL), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-torpassword=<pass>", "Tor control port password (default: empty)", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::CONNECTION); #ifdef USE_UPNP #if USE_UPNP - gArgs.AddArg("-upnp", "Use UPnP to map the listening port (default: 1 when listening and no -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-upnp", "Use UPnP to map the listening port (default: 1 when listening and no -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); #else - gArgs.AddArg("-upnp", strprintf("Use UPnP to map the listening port (default: %u)", 0), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-upnp", strprintf("Use UPnP to map the listening port (default: %u)", 0), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); #endif #else hidden_args.emplace_back("-upnp"); #endif - gArgs.AddArg("-whitebind=<[permissions@]addr>", "Bind to given address and whitelist peers connecting to it. " + argsman.AddArg("-whitebind=<[permissions@]addr>", "Bind to the given address and add permission flags to the peers connecting to it. " "Use [host]:port notation for IPv6. Allowed permissions: " + Join(NET_PERMISSIONS_DOC, ", ") + ". " - "Specify multiple permissions separated by commas (default: noban,mempool,relay). Can be specified multiple times.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + "Specify multiple permissions separated by commas (default: download,noban,mempool,relay). Can be specified multiple times.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-whitelist=<[permissions@]IP address or network>", "Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) or " - "CIDR notated network(e.g. 1.2.3.0/24). Uses same permissions as " + argsman.AddArg("-whitelist=<[permissions@]IP address or network>", "Add permission flags to the peers connecting from the given IP address (e.g. 1.2.3.4) or " + "CIDR-notated network (e.g. 1.2.3.0/24). Uses the same permissions as " "-whitebind. Can be specified multiple times." , ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - g_wallet_init_interface.AddWalletOptions(); + g_wallet_init_interface.AddWalletOptions(argsman); #if ENABLE_ZMQ - gArgs.AddArg("-zmqpubhashblock=<address>", "Enable publish hash block in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); - gArgs.AddArg("-zmqpubhashtx=<address>", "Enable publish hash transaction in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); - gArgs.AddArg("-zmqpubrawblock=<address>", "Enable publish raw block in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); - gArgs.AddArg("-zmqpubrawtx=<address>", "Enable publish raw transaction in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); - gArgs.AddArg("-zmqpubhashblockhwm=<n>", strprintf("Set publish hash block outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); - gArgs.AddArg("-zmqpubhashtxhwm=<n>", strprintf("Set publish hash transaction outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); - gArgs.AddArg("-zmqpubrawblockhwm=<n>", strprintf("Set publish raw block outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); - gArgs.AddArg("-zmqpubrawtxhwm=<n>", strprintf("Set publish raw transaction outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); + argsman.AddArg("-zmqpubhashblock=<address>", "Enable publish hash block in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); + argsman.AddArg("-zmqpubhashtx=<address>", "Enable publish hash transaction in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); + argsman.AddArg("-zmqpubrawblock=<address>", "Enable publish raw block in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); + argsman.AddArg("-zmqpubrawtx=<address>", "Enable publish raw transaction in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); + argsman.AddArg("-zmqpubhashblockhwm=<n>", strprintf("Set publish hash block outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); + argsman.AddArg("-zmqpubhashtxhwm=<n>", strprintf("Set publish hash transaction outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); + argsman.AddArg("-zmqpubrawblockhwm=<n>", strprintf("Set publish raw block outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); + argsman.AddArg("-zmqpubrawtxhwm=<n>", strprintf("Set publish raw transaction outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ); #else hidden_args.emplace_back("-zmqpubhashblock=<address>"); hidden_args.emplace_back("-zmqpubhashtx=<address>"); @@ -498,82 +501,82 @@ void SetupServerArgs(NodeContext& node) hidden_args.emplace_back("-zmqpubrawtxhwm=<n>"); #endif - gArgs.AddArg("-checkblocks=<n>", strprintf("How many blocks to check at startup (default: %u, 0 = all)", DEFAULT_CHECKBLOCKS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-checklevel=<n>", strprintf("How thorough the block verification of -checkblocks is: %s (0-4, default: %u)", Join(CHECKLEVEL_DOC, ", "), DEFAULT_CHECKLEVEL), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-checkblockindex", strprintf("Do a consistency check for the block tree, chainstate, and other validation data structures occasionally. (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-checkmempool=<n>", strprintf("Run checks every <n> transactions (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-checkpoints", strprintf("Enable rejection of any forks from the known historical chain until block 295000 (default: %u)", DEFAULT_CHECKPOINTS_ENABLED), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-deprecatedrpc=<method>", "Allows deprecated RPC method(s) to be used", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-dropmessagestest=<n>", "Randomly drop 1 of every <n> network messages", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-limitancestorcount=<n>", strprintf("Do not accept transactions if number of in-mempool ancestors is <n> or more (default: %u)", DEFAULT_ANCESTOR_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-addrmantest", "Allows to test address relay on localhost", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-debug=<category>", "Output debugging information (default: -nodebug, supplying <category> is optional). " + argsman.AddArg("-checkblocks=<n>", strprintf("How many blocks to check at startup (default: %u, 0 = all)", DEFAULT_CHECKBLOCKS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-checklevel=<n>", strprintf("How thorough the block verification of -checkblocks is: %s (0-4, default: %u)", Join(CHECKLEVEL_DOC, ", "), DEFAULT_CHECKLEVEL), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-checkblockindex", strprintf("Do a consistency check for the block tree, chainstate, and other validation data structures occasionally. (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-checkmempool=<n>", strprintf("Run checks every <n> transactions (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-checkpoints", strprintf("Enable rejection of any forks from the known historical chain until block 295000 (default: %u)", DEFAULT_CHECKPOINTS_ENABLED), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-deprecatedrpc=<method>", "Allows deprecated RPC method(s) to be used", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-dropmessagestest=<n>", "Randomly drop 1 of every <n> network messages", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-limitancestorcount=<n>", strprintf("Do not accept transactions if number of in-mempool ancestors is <n> or more (default: %u)", DEFAULT_ANCESTOR_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-addrmantest", "Allows to test address relay on localhost", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-debug=<category>", "Output debugging information (default: -nodebug, supplying <category> is optional). " "If <category> is not supplied or if <category> = 1, output all debugging information. <category> can be: " + LogInstance().LogCategoriesString() + ".", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-debugexclude=<category>", strprintf("Exclude debugging information for a category. Can be used in conjunction with -debug=1 to output debug logs for all categories except one or more specified categories."), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-logips", strprintf("Include IP addresses in debug output (default: %u)", DEFAULT_LOGIPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-logtimestamps", strprintf("Prepend debug output with timestamp (default: %u)", DEFAULT_LOGTIMESTAMPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-debugexclude=<category>", strprintf("Exclude debugging information for a category. Can be used in conjunction with -debug=1 to output debug logs for all categories except one or more specified categories."), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-logips", strprintf("Include IP addresses in debug output (default: %u)", DEFAULT_LOGIPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-logtimestamps", strprintf("Prepend debug output with timestamp (default: %u)", DEFAULT_LOGTIMESTAMPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); #ifdef HAVE_THREAD_LOCAL - gArgs.AddArg("-logthreadnames", strprintf("Prepend debug output with name of the originating thread (only available on platforms supporting thread_local) (default: %u)", DEFAULT_LOGTHREADNAMES), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-logthreadnames", strprintf("Prepend debug output with name of the originating thread (only available on platforms supporting thread_local) (default: %u)", DEFAULT_LOGTHREADNAMES), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); #else hidden_args.emplace_back("-logthreadnames"); #endif - gArgs.AddArg("-logtimemicros", strprintf("Add microsecond precision to debug timestamps (default: %u)", DEFAULT_LOGTIMEMICROS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-mocktime=<n>", "Replace actual time with " + UNIX_EPOCH_TIME + " (default: 0)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-maxsigcachesize=<n>", strprintf("Limit sum of signature cache and script execution cache sizes to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-maxtipage=<n>", strprintf("Maximum tip age in seconds to consider node in initial block download (default: %u)", DEFAULT_MAX_TIP_AGE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-printpriority", strprintf("Log transaction fee per kB when mining blocks (default: %u)", DEFAULT_PRINTPRIORITY), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -daemon. To disable logging to file, set -nodebuglogfile)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-shrinkdebugfile", "Shrink debug.log file on client startup (default: 1 when no -debug)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-uacomment=<cmt>", "Append comment to the user agent string", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - - SetupChainParamsBaseOptions(); - - gArgs.AddArg("-acceptnonstdtxn", strprintf("Relay and mine \"non-standard\" transactions (%sdefault: %u)", "testnet/regtest only; ", !testnetChainParams->RequireStandard()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY); - gArgs.AddArg("-incrementalrelayfee=<amt>", strprintf("Fee rate (in %s/kB) used to define cost of relay, used for mempool limiting and BIP 125 replacement. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_INCREMENTAL_RELAY_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY); - gArgs.AddArg("-dustrelayfee=<amt>", strprintf("Fee rate (in %s/kB) used to define dust, the value of an output such that it will cost more than its value in fees at this fee rate to spend it. (default: %s)", CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY); - gArgs.AddArg("-bytespersigop", strprintf("Equivalent bytes per sigop in transactions for relay and mining (default: %u)", DEFAULT_BYTES_PER_SIGOP), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); - gArgs.AddArg("-datacarrier", strprintf("Relay and mine data carrier transactions (default: %u)", DEFAULT_ACCEPT_DATACARRIER), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); - gArgs.AddArg("-datacarriersize", strprintf("Maximum size of data in data carrier transactions we relay and mine (default: %u)", MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); - gArgs.AddArg("-minrelaytxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)", + argsman.AddArg("-logtimemicros", strprintf("Add microsecond precision to debug timestamps (default: %u)", DEFAULT_LOGTIMEMICROS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-mocktime=<n>", "Replace actual time with " + UNIX_EPOCH_TIME + " (default: 0)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-maxsigcachesize=<n>", strprintf("Limit sum of signature cache and script execution cache sizes to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-maxtipage=<n>", strprintf("Maximum tip age in seconds to consider node in initial block download (default: %u)", DEFAULT_MAX_TIP_AGE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-printpriority", strprintf("Log transaction fee per kB when mining blocks (default: %u)", DEFAULT_PRINTPRIORITY), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -daemon. To disable logging to file, set -nodebuglogfile)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-shrinkdebugfile", "Shrink debug.log file on client startup (default: 1 when no -debug)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-uacomment=<cmt>", "Append comment to the user agent string", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); + + SetupChainParamsBaseOptions(argsman); + + argsman.AddArg("-acceptnonstdtxn", strprintf("Relay and mine \"non-standard\" transactions (%sdefault: %u)", "testnet/regtest only; ", !testnetChainParams->RequireStandard()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY); + argsman.AddArg("-incrementalrelayfee=<amt>", strprintf("Fee rate (in %s/kB) used to define cost of relay, used for mempool limiting and BIP 125 replacement. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_INCREMENTAL_RELAY_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY); + argsman.AddArg("-dustrelayfee=<amt>", strprintf("Fee rate (in %s/kB) used to define dust, the value of an output such that it will cost more than its value in fees at this fee rate to spend it. (default: %s)", CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY); + argsman.AddArg("-bytespersigop", strprintf("Equivalent bytes per sigop in transactions for relay and mining (default: %u)", DEFAULT_BYTES_PER_SIGOP), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); + argsman.AddArg("-datacarrier", strprintf("Relay and mine data carrier transactions (default: %u)", DEFAULT_ACCEPT_DATACARRIER), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); + argsman.AddArg("-datacarriersize", strprintf("Maximum size of data in data carrier transactions we relay and mine (default: %u)", MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); + argsman.AddArg("-minrelaytxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); - gArgs.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted inbound peers with default permissions. This will relay transactions even if the transactions were already in the mempool. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); - gArgs.AddArg("-whitelistrelay", strprintf("Add 'relay' permission to whitelisted inbound peers with default permissions. This will accept relayed transactions even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); - - - gArgs.AddArg("-blockmaxweight=<n>", strprintf("Set maximum BIP141 block weight (default: %d)", DEFAULT_BLOCK_MAX_WEIGHT), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION); - gArgs.AddArg("-blockmintxfee=<amt>", strprintf("Set lowest fee rate (in %s/kB) for transactions to be included in block creation. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION); - gArgs.AddArg("-blockversion=<n>", "Override block version to test forking scenarios", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::BLOCK_CREATION); - - gArgs.AddArg("-rest", strprintf("Accept public REST requests (default: %u)", DEFAULT_REST_ENABLE), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); - gArgs.AddArg("-rpcallowip=<ip>", "Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); - gArgs.AddArg("-rpcauth=<userpw>", "Username and HMAC-SHA-256 hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcauth. The client then connects normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of arguments. This option can be specified multiple times", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC); - gArgs.AddArg("-rpcbind=<addr>[:port]", "Bind to given address to listen for JSON-RPC connections. Do not expose the RPC server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY | ArgsManager::SENSITIVE, OptionsCategory::RPC); - gArgs.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); - gArgs.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC); - gArgs.AddArg("-rpcport=<port>", strprintf("Listen for JSON-RPC connections on <port> (default: %u, testnet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC); - gArgs.AddArg("-rpcserialversion", strprintf("Sets the serialization of raw transaction or block hex returned in non-verbose mode, non-segwit(0) or segwit(1) (default: %d)", DEFAULT_RPC_SERIALIZE_VERSION), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); - gArgs.AddArg("-rpcservertimeout=<n>", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC); - gArgs.AddArg("-rpcthreads=<n>", strprintf("Set the number of threads to service RPC calls (default: %d)", DEFAULT_HTTP_THREADS), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); - gArgs.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC); - gArgs.AddArg("-rpcwhitelist=<whitelist>", "Set a whitelist to filter incoming RPC calls for a specific user. The field <whitelist> comes in the format: <USERNAME>:<rpc 1>,<rpc 2>,...,<rpc n>. If multiple whitelists are set for a given user, they are set-intersected. See -rpcwhitelistdefault documentation for information on default whitelist behavior.", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); - gArgs.AddArg("-rpcwhitelistdefault", "Sets default behavior for rpc whitelisting. Unless rpcwhitelistdefault is set to 0, if any -rpcwhitelist is set, the rpc server acts as if all rpc users are subject to empty-unless-otherwise-specified whitelists. If rpcwhitelistdefault is set to 1 and no -rpcwhitelist is set, rpc server acts as if all rpc users are subject to empty whitelists.", ArgsManager::ALLOW_BOOL, OptionsCategory::RPC); - gArgs.AddArg("-rpcworkqueue=<n>", strprintf("Set the depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC); - gArgs.AddArg("-server", "Accept command line and JSON-RPC commands", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); + argsman.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted inbound peers with default permissions. This will relay transactions even if the transactions were already in the mempool. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); + argsman.AddArg("-whitelistrelay", strprintf("Add 'relay' permission to whitelisted inbound peers with default permissions. This will accept relayed transactions even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); + + + argsman.AddArg("-blockmaxweight=<n>", strprintf("Set maximum BIP141 block weight (default: %d)", DEFAULT_BLOCK_MAX_WEIGHT), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION); + argsman.AddArg("-blockmintxfee=<amt>", strprintf("Set lowest fee rate (in %s/kB) for transactions to be included in block creation. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION); + argsman.AddArg("-blockversion=<n>", "Override block version to test forking scenarios", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::BLOCK_CREATION); + + argsman.AddArg("-rest", strprintf("Accept public REST requests (default: %u)", DEFAULT_REST_ENABLE), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); + argsman.AddArg("-rpcallowip=<ip>", "Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); + argsman.AddArg("-rpcauth=<userpw>", "Username and HMAC-SHA-256 hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcauth. The client then connects normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of arguments. This option can be specified multiple times", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC); + argsman.AddArg("-rpcbind=<addr>[:port]", "Bind to given address to listen for JSON-RPC connections. Do not expose the RPC server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY | ArgsManager::SENSITIVE, OptionsCategory::RPC); + argsman.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); + argsman.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC); + argsman.AddArg("-rpcport=<port>", strprintf("Listen for JSON-RPC connections on <port> (default: %u, testnet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC); + argsman.AddArg("-rpcserialversion", strprintf("Sets the serialization of raw transaction or block hex returned in non-verbose mode, non-segwit(0) or segwit(1) (default: %d)", DEFAULT_RPC_SERIALIZE_VERSION), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); + argsman.AddArg("-rpcservertimeout=<n>", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC); + argsman.AddArg("-rpcthreads=<n>", strprintf("Set the number of threads to service RPC calls (default: %d)", DEFAULT_HTTP_THREADS), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); + argsman.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC); + argsman.AddArg("-rpcwhitelist=<whitelist>", "Set a whitelist to filter incoming RPC calls for a specific user. The field <whitelist> comes in the format: <USERNAME>:<rpc 1>,<rpc 2>,...,<rpc n>. If multiple whitelists are set for a given user, they are set-intersected. See -rpcwhitelistdefault documentation for information on default whitelist behavior.", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); + argsman.AddArg("-rpcwhitelistdefault", "Sets default behavior for rpc whitelisting. Unless rpcwhitelistdefault is set to 0, if any -rpcwhitelist is set, the rpc server acts as if all rpc users are subject to empty-unless-otherwise-specified whitelists. If rpcwhitelistdefault is set to 1 and no -rpcwhitelist is set, rpc server acts as if all rpc users are subject to empty whitelists.", ArgsManager::ALLOW_BOOL, OptionsCategory::RPC); + argsman.AddArg("-rpcworkqueue=<n>", strprintf("Set the depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC); + argsman.AddArg("-server", "Accept command line and JSON-RPC commands", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); #if HAVE_DECL_DAEMON - gArgs.AddArg("-daemon", "Run in the background as a daemon and accept commands", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-daemon", "Run in the background as a daemon and accept commands", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); #else hidden_args.emplace_back("-daemon"); #endif // Add the hidden options - gArgs.AddHiddenArgs(hidden_args); + argsman.AddHiddenArgs(hidden_args); } std::string LicenseInfo() @@ -774,13 +777,14 @@ static bool InitSanityCheck() return true; } -static bool AppInitServers(const util::Ref& context) +static bool AppInitServers(const util::Ref& context, NodeContext& node) { RPCServer::OnStarted(&OnRPCStarted); RPCServer::OnStopped(&OnRPCStopped); if (!InitHTTPServer()) return false; StartRPC(); + node.rpc_interruption_point = RpcInterruptionPoint; if (!StartHTTPRPC(context)) return false; if (gArgs.GetBoolArg("-rest", DEFAULT_REST_ENABLE)) StartREST(context); @@ -1317,8 +1321,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node) node.scheduler = MakeUnique<CScheduler>(); // Start the lightweight task scheduler thread - CScheduler::Function serviceLoop = [&node]{ node.scheduler->serviceQueue(); }; - threadGroup.create_thread(std::bind(&TraceThread<CScheduler::Function>, "scheduler", serviceLoop)); + threadGroup.create_thread([&] { TraceThread("scheduler", [&] { node.scheduler->serviceQueue(); }); }); // Gather some entropy once per minute. node.scheduler->scheduleEvery([]{ @@ -1352,7 +1355,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node) if (gArgs.GetBoolArg("-server", false)) { uiInterface.InitMessage_connect(SetRPCWarmupStatus); - if (!AppInitServers(context)) + if (!AppInitServers(context, node)) return InitError(_("Unable to start HTTP server. See debug log for details.")); } @@ -1372,16 +1375,16 @@ bool AppInitMain(const util::Ref& context, NodeContext& node) assert(!node.banman); node.banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", &uiInterface, gArgs.GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME)); assert(!node.connman); - node.connman = std::unique_ptr<CConnman>(new CConnman(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max()))); + node.connman = MakeUnique<CConnman>(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max()), gArgs.GetBoolArg("-networkactive", true)); // Make mempool generally available in the node context. For example the connection manager, wallet, or RPC threads, // which are all started after this, may use it from the node context. assert(!node.mempool); node.mempool = &::mempool; assert(!node.chainman); node.chainman = &g_chainman; - ChainstateManager& chainman = EnsureChainman(node); + ChainstateManager& chainman = *Assert(node.chainman); - node.peer_logic.reset(new PeerLogicValidation(node.connman.get(), node.banman.get(), *node.scheduler, *node.chainman, *node.mempool)); + node.peer_logic.reset(new PeerLogicValidation(node.connman.get(), node.banman.get(), *node.scheduler, chainman, *node.mempool)); RegisterValidationInterface(node.peer_logic.get()); // sanitize comments per BIP-0014, format user agent and check total size @@ -1532,7 +1535,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node) int64_t nCoinDBCache = std::min(nTotalCache / 2, (nTotalCache / 4) + (1 << 23)); // use 25%-50% of the remainder for disk cache nCoinDBCache = std::min(nCoinDBCache, nMaxCoinsDBCache << 20); // cap total coins db cache nTotalCache -= nCoinDBCache; - nCoinCacheUsage = nTotalCache; // the rest goes to in-memory cache + int64_t nCoinCacheUsage = nTotalCache; // the rest goes to in-memory cache int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; LogPrintf("Cache configuration:\n"); LogPrintf("* Using %.1f MiB for block index database\n", nBlockTreeDBCache * (1.0 / 1024 / 1024)); @@ -1561,7 +1564,10 @@ bool AppInitMain(const util::Ref& context, NodeContext& node) try { LOCK(cs_main); chainman.InitializeChainstate(); - UnloadBlockIndex(); + chainman.m_total_coinstip_cache = nCoinCacheUsage; + chainman.m_total_coinsdb_cache = nCoinDBCache; + + UnloadBlockIndex(node.mempool); // new CBlockTreeDB tries to delete the existing file, which // fails if it's still open from the previous loop. Close it first: @@ -1589,7 +1595,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node) // If the loaded chain has a wrong genesis, bail out immediately // (we're likely using a testnet datadir, or the other way around). - if (!::BlockIndex().empty() && + if (!chainman.BlockIndex().empty() && !LookupBlockIndex(chainparams.GetConsensus().hashGenesisBlock)) { return InitError(_("Incorrect or no genesis block found. Wrong datadir for network?")); } @@ -1644,7 +1650,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node) } // The on-disk coinsdb is now in a good state, create the cache - chainstate->InitCoinsCache(); + chainstate->InitCoinsCache(nCoinCacheUsage); assert(chainstate->CanFlushToDisk()); if (!is_coinsview_empty(chainstate)) { @@ -1869,8 +1875,8 @@ bool AppInitMain(const util::Ref& context, NodeContext& node) //// debug print { LOCK(cs_main); - LogPrintf("block tree size = %u\n", ::BlockIndex().size()); - chain_active_height = ::ChainActive().Height(); + LogPrintf("block tree size = %u\n", chainman.BlockIndex().size()); + chain_active_height = chainman.ActiveChain().Height(); } LogPrintf("nBestHeight = %d\n", chain_active_height); @@ -1888,7 +1894,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node) connOptions.nLocalServices = nLocalServices; connOptions.nMaxConnections = nMaxConnections; connOptions.m_max_outbound_full_relay = std::min(MAX_OUTBOUND_FULL_RELAY_CONNECTIONS, connOptions.nMaxConnections); - connOptions.m_max_outbound_block_relay = std::min(MAX_BLOCKS_ONLY_CONNECTIONS, connOptions.nMaxConnections-connOptions.m_max_outbound_full_relay); + connOptions.m_max_outbound_block_relay = std::min(MAX_BLOCK_RELAY_ONLY_CONNECTIONS, connOptions.nMaxConnections-connOptions.m_max_outbound_full_relay); connOptions.nMaxAddnode = MAX_ADDNODE_CONNECTIONS; connOptions.nMaxFeeler = MAX_FEELER_CONNECTIONS; connOptions.nBestHeight = chain_active_height; diff --git a/src/interfaces/chain.cpp b/src/interfaces/chain.cpp index d1e04b114d..d49e4454af 100644 --- a/src/interfaces/chain.cpp +++ b/src/interfaces/chain.cpp @@ -13,6 +13,7 @@ #include <node/coin.h> #include <node/context.h> #include <node/transaction.h> +#include <node/ui_interface.h> #include <policy/fees.h> #include <policy/policy.h> #include <policy/rbf.h> @@ -25,7 +26,6 @@ #include <sync.h> #include <timedata.h> #include <txmempool.h> -#include <ui_interface.h> #include <uint256.h> #include <univalue.h> #include <util/system.h> diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h index 65695707f7..bbeb0fa801 100644 --- a/src/interfaces/chain.h +++ b/src/interfaces/chain.h @@ -15,6 +15,7 @@ #include <string> #include <vector> +class ArgsManager; class CBlock; class CFeeRate; class CRPCCommand; @@ -322,7 +323,7 @@ std::unique_ptr<Chain> MakeChain(NodeContext& node); //! analysis, or fee estimation. These clients need to expose their own //! MakeXXXClient functions returning their implementations of the ChainClient //! interface. -std::unique_ptr<ChainClient> MakeWalletClient(Chain& chain, std::vector<std::string> wallet_filenames); +std::unique_ptr<ChainClient> MakeWalletClient(Chain& chain, ArgsManager& args, std::vector<std::string> wallet_filenames); } // namespace interfaces diff --git a/src/interfaces/node.cpp b/src/interfaces/node.cpp index d420788dbe..969767b90f 100644 --- a/src/interfaces/node.cpp +++ b/src/interfaces/node.cpp @@ -17,6 +17,7 @@ #include <netaddress.h> #include <netbase.h> #include <node/context.h> +#include <node/ui_interface.h> #include <policy/feerate.h> #include <policy/fees.h> #include <policy/settings.h> @@ -26,7 +27,6 @@ #include <support/allocators/secure.h> #include <sync.h> #include <txmempool.h> -#include <ui_interface.h> #include <util/ref.h> #include <util/system.h> #include <util/translation.h> @@ -56,6 +56,7 @@ namespace { class NodeImpl : public Node { public: + NodeImpl(NodeContext* context) { setContext(context); } void initError(const bilingual_str& message) override { InitError(message); } bool parseParameters(int argc, const char* const argv[], std::string& error) override { @@ -66,6 +67,7 @@ public: bool softSetArg(const std::string& arg, const std::string& value) override { return gArgs.SoftSetArg(arg, value); } bool softSetBoolArg(const std::string& arg, bool value) override { return gArgs.SoftSetBoolArg(arg, value); } void selectParams(const std::string& network) override { SelectParams(network); } + bool initSettings(std::string& error) override { return gArgs.InitSettings(error); } uint64_t getAssumedBlockchainSize() override { return Params().AssumedBlockchainSize(); } uint64_t getAssumedChainStateSize() override { return Params().AssumedChainStateSize(); } std::string getNetwork() override { return Params().NetworkIDString(); } @@ -80,13 +82,13 @@ public: } bool appInitMain() override { - m_context.chain = MakeChain(m_context); - return AppInitMain(m_context_ref, m_context); + m_context->chain = MakeChain(*m_context); + return AppInitMain(m_context_ref, *m_context); } void appShutdown() override { - Interrupt(m_context); - Shutdown(m_context); + Interrupt(*m_context); + Shutdown(*m_context); } void startShutdown() override { @@ -107,19 +109,19 @@ public: StopMapPort(); } } - void setupServerArgs() override { return SetupServerArgs(m_context); } + void setupServerArgs() override { return SetupServerArgs(*m_context); } bool getProxy(Network net, proxyType& proxy_info) override { return GetProxy(net, proxy_info); } size_t getNodeCount(CConnman::NumConnections flags) override { - return m_context.connman ? m_context.connman->GetNodeCount(flags) : 0; + return m_context->connman ? m_context->connman->GetNodeCount(flags) : 0; } bool getNodesStats(NodesStats& stats) override { stats.clear(); - if (m_context.connman) { + if (m_context->connman) { std::vector<CNodeStats> stats_temp; - m_context.connman->GetNodeStats(stats_temp); + m_context->connman->GetNodeStats(stats_temp); stats.reserve(stats_temp.size()); for (auto& node_stats_temp : stats_temp) { @@ -140,46 +142,46 @@ public: } bool getBanned(banmap_t& banmap) override { - if (m_context.banman) { - m_context.banman->GetBanned(banmap); + if (m_context->banman) { + m_context->banman->GetBanned(banmap); return true; } return false; } - bool ban(const CNetAddr& net_addr, BanReason reason, int64_t ban_time_offset) override + bool ban(const CNetAddr& net_addr, int64_t ban_time_offset) override { - if (m_context.banman) { - m_context.banman->Ban(net_addr, reason, ban_time_offset); + if (m_context->banman) { + m_context->banman->Ban(net_addr, ban_time_offset); return true; } return false; } bool unban(const CSubNet& ip) override { - if (m_context.banman) { - m_context.banman->Unban(ip); + if (m_context->banman) { + m_context->banman->Unban(ip); return true; } return false; } bool disconnectByAddress(const CNetAddr& net_addr) override { - if (m_context.connman) { - return m_context.connman->DisconnectNode(net_addr); + if (m_context->connman) { + return m_context->connman->DisconnectNode(net_addr); } return false; } bool disconnectById(NodeId id) override { - if (m_context.connman) { - return m_context.connman->DisconnectNode(id); + if (m_context->connman) { + return m_context->connman->DisconnectNode(id); } return false; } - int64_t getTotalBytesRecv() override { return m_context.connman ? m_context.connman->GetTotalBytesRecv() : 0; } - int64_t getTotalBytesSent() override { return m_context.connman ? m_context.connman->GetTotalBytesSent() : 0; } - size_t getMempoolSize() override { return m_context.mempool ? m_context.mempool->size() : 0; } - size_t getMempoolDynamicUsage() override { return m_context.mempool ? m_context.mempool->DynamicMemoryUsage() : 0; } + int64_t getTotalBytesRecv() override { return m_context->connman ? m_context->connman->GetTotalBytesRecv() : 0; } + int64_t getTotalBytesSent() override { return m_context->connman ? m_context->connman->GetTotalBytesSent() : 0; } + size_t getMempoolSize() override { return m_context->mempool ? m_context->mempool->size() : 0; } + size_t getMempoolDynamicUsage() override { return m_context->mempool ? m_context->mempool->DynamicMemoryUsage() : 0; } bool getHeaderTip(int& height, int64_t& block_time) override { LOCK(::cs_main); @@ -222,11 +224,11 @@ public: bool getImporting() override { return ::fImporting; } void setNetworkActive(bool active) override { - if (m_context.connman) { - m_context.connman->SetNetworkActive(active); + if (m_context->connman) { + m_context->connman->SetNetworkActive(active); } } - bool getNetworkActive() override { return m_context.connman && m_context.connman->GetNetworkActive(); } + bool getNetworkActive() override { return m_context->connman && m_context->connman->GetNetworkActive(); } CFeeRate estimateSmartFee(int num_blocks, bool conservative, int* returned_target = nullptr) override { FeeCalculation fee_calc; @@ -268,7 +270,7 @@ public: std::vector<std::unique_ptr<Wallet>> getWallets() override { std::vector<std::unique_ptr<Wallet>> wallets; - for (auto& client : m_context.chain_clients) { + for (auto& client : m_context->chain_clients) { auto client_wallets = client->getWallets(); std::move(client_wallets.begin(), client_wallets.end(), std::back_inserter(wallets)); } @@ -276,12 +278,12 @@ public: } std::unique_ptr<Wallet> loadWallet(const std::string& name, bilingual_str& error, std::vector<bilingual_str>& warnings) override { - return MakeWallet(LoadWallet(*m_context.chain, name, error, warnings)); + return MakeWallet(LoadWallet(*m_context->chain, name, error, warnings)); } std::unique_ptr<Wallet> createWallet(const SecureString& passphrase, uint64_t wallet_creation_flags, const std::string& name, bilingual_str& error, std::vector<bilingual_str>& warnings, WalletCreationStatus& status) override { std::shared_ptr<CWallet> wallet; - status = CreateWallet(*m_context.chain, passphrase, wallet_creation_flags, name, error, warnings, wallet); + status = CreateWallet(*m_context->chain, passphrase, wallet_creation_flags, name, error, warnings, wallet); return MakeWallet(wallet); } std::unique_ptr<Handler> handleInitMessage(InitMessageFn fn) override @@ -335,13 +337,22 @@ public: /* verification progress is unused when a header was received */ 0); })); } - NodeContext* context() override { return &m_context; } - NodeContext m_context; - util::Ref m_context_ref{m_context}; + NodeContext* context() override { return m_context; } + void setContext(NodeContext* context) override + { + m_context = context; + if (context) { + m_context_ref.Set(*context); + } else { + m_context_ref.Clear(); + } + } + NodeContext* m_context{nullptr}; + util::Ref m_context_ref; }; } // namespace -std::unique_ptr<Node> MakeNode() { return MakeUnique<NodeImpl>(); } +std::unique_ptr<Node> MakeNode(NodeContext* context) { return MakeUnique<NodeImpl>(context); } } // namespace interfaces diff --git a/src/interfaces/node.h b/src/interfaces/node.h index 877a40568f..cd3cfe487d 100644 --- a/src/interfaces/node.h +++ b/src/interfaces/node.h @@ -66,6 +66,11 @@ public: //! Choose network parameters. virtual void selectParams(const std::string& network) = 0; + //! Read and update <datadir>/settings.json file with saved settings. This + //! needs to be called after selectParams() because the settings file + //! location is network-specific. + virtual bool initSettings(std::string& error) = 0; + //! Get the (assumed) blockchain size. virtual uint64_t getAssumedBlockchainSize() = 0; @@ -122,7 +127,7 @@ public: virtual bool getBanned(banmap_t& banmap) = 0; //! Ban node. - virtual bool ban(const CNetAddr& net_addr, BanReason reason, int64_t ban_time_offset) = 0; + virtual bool ban(const CNetAddr& net_addr, int64_t ban_time_offset) = 0; //! Unban node. virtual bool unban(const CSubNet& ip) = 0; @@ -263,12 +268,14 @@ public: std::function<void(SynchronizationState, interfaces::BlockTip tip, double verification_progress)>; virtual std::unique_ptr<Handler> handleNotifyHeaderTip(NotifyHeaderTipFn fn) = 0; - //! Return pointer to internal chain interface, useful for testing. + //! Get and set internal node context. Useful for testing, but not + //! accessible across processes. virtual NodeContext* context() { return nullptr; } + virtual void setContext(NodeContext* context) { } }; //! Return implementation of Node interface. -std::unique_ptr<Node> MakeNode(); +std::unique_ptr<Node> MakeNode(NodeContext* context = nullptr); //! Block tip (could be a header or not, depends on the subscribed signal). struct BlockTip { diff --git a/src/interfaces/wallet.cpp b/src/interfaces/wallet.cpp index b65eb72b1c..7fd24425cf 100644 --- a/src/interfaces/wallet.cpp +++ b/src/interfaces/wallet.cpp @@ -13,11 +13,11 @@ #include <script/standard.h> #include <support/allocators/secure.h> #include <sync.h> -#include <ui_interface.h> #include <uint256.h> #include <util/check.h> #include <util/ref.h> #include <util/system.h> +#include <util/ui_change_type.h> #include <wallet/context.h> #include <wallet/feebumper.h> #include <wallet/fees.h> @@ -438,7 +438,6 @@ public: bool canGetAddresses() override { return m_wallet->CanGetAddresses(); } bool privateKeysDisabled() override { return m_wallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS); } OutputType getDefaultAddressType() override { return m_wallet->m_default_address_type; } - OutputType getDefaultChangeType() override { return m_wallet->m_default_change_type; } CAmount getDefaultMaxTxFee() override { return m_wallet->m_default_max_tx_fee; } void remove() override { @@ -484,10 +483,11 @@ public: class WalletClientImpl : public ChainClient { public: - WalletClientImpl(Chain& chain, std::vector<std::string> wallet_filenames) + WalletClientImpl(Chain& chain, ArgsManager& args, std::vector<std::string> wallet_filenames) : m_wallet_filenames(std::move(wallet_filenames)) { m_context.chain = &chain; + m_context.args = &args; } void registerRpcs() override { @@ -500,7 +500,7 @@ public: } bool verify() override { return VerifyWallets(*m_context.chain, m_wallet_filenames); } bool load() override { return LoadWallets(*m_context.chain, m_wallet_filenames); } - void start(CScheduler& scheduler) override { return StartWallets(scheduler); } + void start(CScheduler& scheduler) override { return StartWallets(scheduler, *Assert(m_context.args)); } void flush() override { return FlushWallets(); } void stop() override { return StopWallets(); } void setMockTime(int64_t time) override { return SetMockTime(time); } @@ -515,7 +515,7 @@ public: ~WalletClientImpl() override { UnloadWallets(); } WalletContext m_context; - std::vector<std::string> m_wallet_filenames; + const std::vector<std::string> m_wallet_filenames; std::vector<std::unique_ptr<Handler>> m_rpc_handlers; std::list<CRPCCommand> m_rpc_commands; }; @@ -524,9 +524,9 @@ public: std::unique_ptr<Wallet> MakeWallet(const std::shared_ptr<CWallet>& wallet) { return wallet ? MakeUnique<WalletImpl>(wallet) : nullptr; } -std::unique_ptr<ChainClient> MakeWalletClient(Chain& chain, std::vector<std::string> wallet_filenames) +std::unique_ptr<ChainClient> MakeWalletClient(Chain& chain, ArgsManager& args, std::vector<std::string> wallet_filenames) { - return MakeUnique<WalletClientImpl>(chain, std::move(wallet_filenames)); + return MakeUnique<WalletClientImpl>(chain, args, std::move(wallet_filenames)); } } // namespace interfaces diff --git a/src/interfaces/wallet.h b/src/interfaces/wallet.h index e2161521f6..3cdadbc72e 100644 --- a/src/interfaces/wallet.h +++ b/src/interfaces/wallet.h @@ -9,8 +9,8 @@ #include <pubkey.h> // For CKeyID and CScriptID (definitions needed in CTxDestination instantiation) #include <script/standard.h> // For CTxDestination #include <support/allocators/secure.h> // For SecureString -#include <ui_interface.h> // For ChangeType #include <util/message.h> +#include <util/ui_change_type.h> #include <functional> #include <map> @@ -256,9 +256,6 @@ public: // Get default address type. virtual OutputType getDefaultAddressType() = 0; - // Get default change type. - virtual OutputType getDefaultChangeType() = 0; - //! Get max tx fee. virtual CAmount getDefaultMaxTxFee() = 0; diff --git a/src/key.cpp b/src/key.cpp index 7eecc6e083..4ed74a39b1 100644 --- a/src/key.cpp +++ b/src/key.cpp @@ -237,7 +237,7 @@ bool CKey::VerifyPubKey(const CPubKey& pubkey) const { std::string str = "Bitcoin key verification\n"; GetRandBytes(rnd, sizeof(rnd)); uint256 hash; - CHash256().Write((unsigned char*)str.data(), str.size()).Write(rnd, sizeof(rnd)).Finalize(hash.begin()); + CHash256().Write(MakeUCharSpan(str)).Write(rnd).Finalize(hash); std::vector<unsigned char> vchSig; Sign(hash, vchSig); return pubkey.Verify(hash, vchSig); diff --git a/src/merkleblock.cpp b/src/merkleblock.cpp index 8072b12119..b571d463c9 100644 --- a/src/merkleblock.cpp +++ b/src/merkleblock.cpp @@ -70,7 +70,7 @@ uint256 CPartialMerkleTree::CalcHash(int height, unsigned int pos, const std::ve else right = left; // combine subhashes - return Hash(left.begin(), left.end(), right.begin(), right.end()); + return Hash(left, right); } } @@ -126,7 +126,7 @@ uint256 CPartialMerkleTree::TraverseAndExtract(int height, unsigned int pos, uns right = left; } // and combine them before returning - return Hash(left.begin(), left.end(), right.begin(), right.end()); + return Hash(left, right); } } diff --git a/src/miner.cpp b/src/miner.cpp index d9dcbe8a70..41a835f70a 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -109,7 +109,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc if(!pblocktemplate.get()) return nullptr; - pblock = &pblocktemplate->block; // pointer for convenience + CBlock* const pblock = &pblocktemplate->block; // pointer for convenience // Add dummy coinbase tx as first transaction pblock->vtx.emplace_back(); @@ -226,7 +226,7 @@ bool BlockAssembler::TestPackageTransactions(const CTxMemPool::setEntries& packa void BlockAssembler::AddToBlock(CTxMemPool::txiter iter) { - pblock->vtx.emplace_back(iter->GetSharedTx()); + pblocktemplate->block.vtx.emplace_back(iter->GetSharedTx()); pblocktemplate->vTxFees.push_back(iter->GetFee()); pblocktemplate->vTxSigOpsCost.push_back(iter->GetSigOpCost()); nBlockWeight += iter->GetTxWeight(); diff --git a/src/miner.h b/src/miner.h index 69296f9078..096585dfe4 100644 --- a/src/miner.h +++ b/src/miner.h @@ -128,8 +128,6 @@ class BlockAssembler private: // The constructed block template std::unique_ptr<CBlockTemplate> pblocktemplate; - // A convenience pointer that always refers to the CBlock in pblocktemplate - CBlock* pblock; // Configuration parameters for the block size bool fIncludeWitness; diff --git a/src/net.cpp b/src/net.cpp index 371fbeed59..6c1980735c 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -14,12 +14,12 @@ #include <clientversion.h> #include <consensus/consensus.h> #include <crypto/sha256.h> -#include <netbase.h> #include <net_permissions.h> +#include <netbase.h> +#include <node/ui_interface.h> #include <protocol.h> #include <random.h> #include <scheduler.h> -#include <ui_interface.h> #include <util/strencodings.h> #include <util/translation.h> @@ -42,6 +42,7 @@ static_assert(MINIUPNPC_API_VERSION >= 10, "miniUPnPc API version >= 10 assumed"); #endif +#include <cstdint> #include <unordered_map> #include <math.h> @@ -104,15 +105,15 @@ std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(cs_mapLocalHost); static bool vfLimited[NET_MAX] GUARDED_BY(cs_mapLocalHost) = {}; std::string strSubVersion; -void CConnman::AddOneShot(const std::string& strDest) +void CConnman::AddAddrFetch(const std::string& strDest) { - LOCK(cs_vOneShots); - vOneShots.push_back(strDest); + LOCK(m_addr_fetches_mutex); + m_addr_fetches.push_back(strDest); } -unsigned short GetListenPort() +uint16_t GetListenPort() { - return (unsigned short)(gArgs.GetArg("-port", Params().GetDefaultPort())); + return (uint16_t)(gArgs.GetArg("-port", Params().GetDefaultPort())); } // find 'best' local address for a particular peer @@ -345,7 +346,7 @@ bool CConnman::CheckIncomingNonce(uint64_t nonce) { LOCK(cs_vNodes); for (const CNode* pnode : vNodes) { - if (!pnode->fSuccessfullyConnected && !pnode->fInbound && pnode->GetLocalNonce() == nonce) + if (!pnode->fSuccessfullyConnected && !pnode->IsInboundConn() && pnode->GetLocalNonce() == nonce) return false; } return true; @@ -367,8 +368,10 @@ static CAddress GetBindAddress(SOCKET sock) return addr_bind; } -CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, bool manual_connection, bool block_relay_only) +CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type) { + assert(conn_type != ConnectionType::INBOUND); + if (pszDest == nullptr) { if (IsLocal(addrConnect)) return nullptr; @@ -431,7 +434,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo if (hSocket == INVALID_SOCKET) { return nullptr; } - connected = ConnectSocketDirectly(addrConnect, hSocket, nConnectTimeout, manual_connection); + connected = ConnectSocketDirectly(addrConnect, hSocket, nConnectTimeout, conn_type == ConnectionType::MANUAL); } if (!proxyConnectionFailed) { // If a connection to the node was attempted, and failure (if any) is not caused by a problem connecting to @@ -458,7 +461,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize(); CAddress addr_bind = GetBindAddress(hSocket); - CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", false, block_relay_only); + CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", conn_type); pnode->AddRef(); // We're making a new connection, harvest entropy from the time (and our peer count) @@ -535,8 +538,8 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap) LOCK(cs_SubVer); X(cleanSubVer); } - X(fInbound); - X(m_manual_connection); + stats.fInbound = IsInboundConn(); + stats.m_manual_connection = IsManualConn(); X(nStartingHeight); { LOCK(cs_vSend); @@ -563,15 +566,15 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap) // since pingtime does not update until the ping is complete, which might take a while. // So, if a ping is taking an unusually long time in flight, // the caller can immediately detect that this is happening. - int64_t nPingUsecWait = 0; - if ((0 != nPingNonceSent) && (0 != nPingUsecStart)) { - nPingUsecWait = GetTimeMicros() - nPingUsecStart; + std::chrono::microseconds ping_wait{0}; + if ((0 != nPingNonceSent) && (0 != m_ping_start.load().count())) { + ping_wait = GetTime<std::chrono::microseconds>() - m_ping_start.load(); } // Raw ping time is in microseconds, but show it to user as whole seconds (Bitcoin users should be well used to small numbers with many decimal places by now :) stats.m_ping_usec = nPingUsecTime; stats.m_min_ping_usec = nMinPingUsecTime; - stats.m_ping_wait_usec = nPingUsecWait; + stats.m_ping_wait_usec = count_microseconds(ping_wait); // Leave string empty if addrLocal invalid (not filled in yet) CService addrLocalUnlocked = GetAddrLocal(); @@ -582,9 +585,9 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap) bool CNode::ReceiveMsgBytes(const char *pch, unsigned int nBytes, bool& complete) { complete = false; - int64_t nTimeMicros = GetTimeMicros(); + const auto time = GetTime<std::chrono::microseconds>(); LOCK(cs_vRecv); - nLastRecv = nTimeMicros / 1000000; + nLastRecv = std::chrono::duration_cast<std::chrono::seconds>(time).count(); nRecvBytes += nBytes; while (nBytes > 0) { // absorb network data @@ -596,7 +599,7 @@ bool CNode::ReceiveMsgBytes(const char *pch, unsigned int nBytes, bool& complete if (m_deserializer->Complete()) { // decompose a transport agnostic CNetMessage from the deserializer - CNetMessage msg = m_deserializer->GetMessage(Params().MessageStart(), nTimeMicros); + CNetMessage msg = m_deserializer->GetMessage(Params().MessageStart(), time); //store received bytes per message command //to prevent a memory DOS, only allow valid commands @@ -684,7 +687,7 @@ int V1TransportDeserializer::readData(const char *pch, unsigned int nBytes) vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024)); } - hasher.Write((const unsigned char*)pch, nCopy); + hasher.Write({(const unsigned char*)pch, nCopy}); memcpy(&vRecv[nDataPos], pch, nCopy); nDataPos += nCopy; @@ -695,11 +698,12 @@ const uint256& V1TransportDeserializer::GetMessageHash() const { assert(Complete()); if (data_hash.IsNull()) - hasher.Finalize(data_hash.begin()); + hasher.Finalize(data_hash); return data_hash; } -CNetMessage V1TransportDeserializer::GetMessage(const CMessageHeader::MessageStartChars& message_start, int64_t time) { +CNetMessage V1TransportDeserializer::GetMessage(const CMessageHeader::MessageStartChars& message_start, const std::chrono::microseconds time) +{ // decompose a single CNetMessage from the TransportDeserializer CNetMessage msg(std::move(vRecv)); @@ -720,8 +724,8 @@ CNetMessage V1TransportDeserializer::GetMessage(const CMessageHeader::MessageSta if (!msg.m_valid_checksum) { LogPrint(BCLog::NET, "CHECKSUM ERROR (%s, %u bytes), expected %s was %s\n", SanitizeString(msg.m_command), msg.m_message_size, - HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE), - HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE)); + HexStr(Span<uint8_t>(hash.begin(), hash.begin() + CMessageHeader::CHECKSUM_SIZE)), + HexStr(hdr.pchChecksum)); } // store receive time @@ -734,7 +738,7 @@ CNetMessage V1TransportDeserializer::GetMessage(const CMessageHeader::MessageSta void V1TransportSerializer::prepareForTransport(CSerializedNetMsg& msg, std::vector<unsigned char>& header) { // create dbl-sha256 checksum - uint256 hash = Hash(msg.data.begin(), msg.data.end()); + uint256 hash = Hash(msg.data); // create header CMessageHeader hdr(Params().MessageStart(), msg.m_type.c_str(), msg.data.size()); @@ -870,7 +874,7 @@ bool CConnman::AttemptToEvictConnection() for (const CNode* node : vNodes) { if (node->HasPermission(PF_NOBAN)) continue; - if (!node->fInbound) + if (!node->IsInboundConn()) continue; if (node->fDisconnect) continue; @@ -981,7 +985,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) { { LOCK(cs_vNodes); for (const CNode* pnode : vNodes) { - if (pnode->fInbound) nInbound++; + if (pnode->IsInboundConn()) nInbound++; } } @@ -1010,17 +1014,24 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) { // on all platforms. Set it again here just to be sure. SetSocketNoDelay(hSocket); - int bannedlevel = m_banman ? m_banman->IsBannedLevel(addr) : 0; - - // Don't accept connections from banned peers, but if our inbound slots aren't almost full, accept - // if the only banning reason was an automatic misbehavior ban. - if (!NetPermissions::HasFlag(permissionFlags, NetPermissionFlags::PF_NOBAN) && bannedlevel > ((nInbound + 1 < nMaxInbound) ? 1 : 0)) + // Don't accept connections from banned peers. + bool banned = m_banman && m_banman->IsBanned(addr); + if (!NetPermissions::HasFlag(permissionFlags, NetPermissionFlags::PF_NOBAN) && banned) { LogPrint(BCLog::NET, "connection from %s dropped (banned)\n", addr.ToString()); CloseSocket(hSocket); return; } + // Only accept connections from discouraged peers if our inbound slots aren't (almost) full. + bool discouraged = m_banman && m_banman->IsDiscouraged(addr); + if (!NetPermissions::HasFlag(permissionFlags, NetPermissionFlags::PF_NOBAN) && nInbound + 1 >= nMaxInbound && discouraged) + { + LogPrint(BCLog::NET, "connection from %s dropped (discouraged)\n", addr.ToString()); + CloseSocket(hSocket); + return; + } + if (nInbound >= nMaxInbound) { if (!AttemptToEvictConnection()) { @@ -1039,12 +1050,12 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) { if (NetPermissions::HasFlag(permissionFlags, PF_BLOOMFILTER)) { nodeServices = static_cast<ServiceFlags>(nodeServices | NODE_BLOOM); } - CNode* pnode = new CNode(id, nodeServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, addr_bind, "", true); + CNode* pnode = new CNode(id, nodeServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, addr_bind, "", ConnectionType::INBOUND); pnode->AddRef(); pnode->m_permissionFlags = permissionFlags; // If this flag is present, the user probably expect that RPC and QT report it as whitelisted (backward compatibility) pnode->m_legacyWhitelisted = legacyWhitelisted; - pnode->m_prefer_evict = bannedlevel > 0; + pnode->m_prefer_evict = discouraged; m_msgproc->InitializeNode(pnode); LogPrint(BCLog::NET, "connection from %s accepted\n", addr.ToString()); @@ -1103,12 +1114,9 @@ void CConnman::DisconnectNodes() if (pnode->GetRefCount() <= 0) { bool fDelete = false; { - TRY_LOCK(pnode->cs_inventory, lockInv); - if (lockInv) { - TRY_LOCK(pnode->cs_vSend, lockSend); - if (lockSend) { - fDelete = true; - } + TRY_LOCK(pnode->cs_vSend, lockSend); + if (lockSend) { + fDelete = true; } } if (fDelete) { @@ -1154,9 +1162,9 @@ void CConnman::InactivityCheck(CNode *pnode) LogPrintf("socket receive timeout: %is\n", nTime - pnode->nLastRecv); pnode->fDisconnect = true; } - else if (pnode->nPingNonceSent && pnode->nPingUsecStart + TIMEOUT_INTERVAL * 1000000 < GetTimeMicros()) + else if (pnode->nPingNonceSent && pnode->m_ping_start.load() + std::chrono::seconds{TIMEOUT_INTERVAL} < GetTime<std::chrono::microseconds>()) { - LogPrintf("ping timeout: %fs\n", 0.000001 * (GetTimeMicros() - pnode->nPingUsecStart)); + LogPrintf("ping timeout: %fs\n", 0.000001 * count_microseconds(GetTime<std::chrono::microseconds>() - pnode->m_ping_start.load())); pnode->fDisconnect = true; } else if (!pnode->fSuccessfullyConnected) @@ -1640,7 +1648,7 @@ void CConnman::ThreadDNSAddressSeed() { LOCK(cs_vNodes); for (const CNode* pnode : vNodes) { - nRelevant += pnode->fSuccessfullyConnected && !pnode->fFeeler && !pnode->fOneShot && !pnode->m_manual_connection && !pnode->fInbound; + if (pnode->fSuccessfullyConnected && pnode->IsOutboundOrBlockRelayConn()) ++nRelevant; } } if (nRelevant >= 2) { @@ -1668,7 +1676,7 @@ void CConnman::ThreadDNSAddressSeed() LogPrintf("Loading addresses from DNS seed %s\n", seed); if (HaveNameProxy()) { - AddOneShot(seed); + AddAddrFetch(seed); } else { std::vector<CNetAddr> vIPs; std::vector<CAddress> vAdd; @@ -1690,8 +1698,8 @@ void CConnman::ThreadDNSAddressSeed() addrman.Add(vAdd, resolveSource); } else { // We now avoid directly using results from DNS Seeds which do not support service bit filtering, - // instead using them as a oneshot to get nodes with our desired service bits. - AddOneShot(seed); + // instead using them as a addrfetch to get nodes with our desired service bits. + AddAddrFetch(seed); } } --seeds_right_now; @@ -1699,17 +1707,6 @@ void CConnman::ThreadDNSAddressSeed() LogPrintf("%d addresses found from DNS seeds\n", found); } - - - - - - - - - - - void CConnman::DumpAddresses() { int64_t nStart = GetTimeMillis(); @@ -1721,20 +1718,20 @@ void CConnman::DumpAddresses() addrman.size(), GetTimeMillis() - nStart); } -void CConnman::ProcessOneShot() +void CConnman::ProcessAddrFetch() { std::string strDest; { - LOCK(cs_vOneShots); - if (vOneShots.empty()) + LOCK(m_addr_fetches_mutex); + if (m_addr_fetches.empty()) return; - strDest = vOneShots.front(); - vOneShots.pop_front(); + strDest = m_addr_fetches.front(); + m_addr_fetches.pop_front(); } CAddress addr; CSemaphoreGrant grant(*semOutbound, true); if (grant) { - OpenNetworkConnection(addr, false, &grant, strDest.c_str(), true); + OpenNetworkConnection(addr, false, &grant, strDest.c_str(), ConnectionType::ADDR_FETCH); } } @@ -1761,7 +1758,7 @@ int CConnman::GetExtraOutboundCount() { LOCK(cs_vNodes); for (const CNode* pnode : vNodes) { - if (!pnode->fInbound && !pnode->m_manual_connection && !pnode->fFeeler && !pnode->fDisconnect && !pnode->fOneShot && pnode->fSuccessfullyConnected) { + if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsOutboundOrBlockRelayConn()) { ++nOutbound; } } @@ -1776,11 +1773,11 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) { for (int64_t nLoop = 0;; nLoop++) { - ProcessOneShot(); + ProcessAddrFetch(); for (const std::string& strAddr : connect) { CAddress addr(CService(), NODE_NONE); - OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), false, false, true); + OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), ConnectionType::MANUAL); for (int i = 0; i < 10 && i < nLoop; i++) { if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) @@ -1799,7 +1796,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) int64_t nNextFeeler = PoissonNextSend(nStart*1000*1000, FEELER_INTERVAL); while (!interruptNet) { - ProcessOneShot(); + ProcessAddrFetch(); if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) return; @@ -1832,21 +1829,27 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) int nOutboundFullRelay = 0; int nOutboundBlockRelay = 0; std::set<std::vector<unsigned char> > setConnected; + { LOCK(cs_vNodes); for (const CNode* pnode : vNodes) { - if (!pnode->fInbound && !pnode->m_manual_connection) { - // Netgroups for inbound and addnode peers are not excluded because our goal here - // is to not use multiple of our limited outbound slots on a single netgroup - // but inbound and addnode peers do not use our outbound slots. Inbound peers - // also have the added issue that they're attacker controlled and could be used - // to prevent us from connecting to particular hosts if we used them here. - setConnected.insert(pnode->addr.GetGroup(addrman.m_asmap)); - if (pnode->m_tx_relay == nullptr) { - nOutboundBlockRelay++; - } else if (!pnode->fFeeler) { - nOutboundFullRelay++; - } + if (pnode->IsFullOutboundConn()) nOutboundFullRelay++; + if (pnode->IsBlockOnlyConn()) nOutboundBlockRelay++; + + // Netgroups for inbound and manual peers are not excluded because our goal here + // is to not use multiple of our limited outbound slots on a single netgroup + // but inbound and manual peers do not use our outbound slots. Inbound peers + // also have the added issue that they could be attacker controlled and used + // to prevent us from connecting to particular hosts if we used them here. + switch(pnode->m_conn_type){ + case ConnectionType::INBOUND: + case ConnectionType::MANUAL: + break; + case ConnectionType::OUTBOUND: + case ConnectionType::BLOCK_RELAY: + case ConnectionType::ADDR_FETCH: + case ConnectionType::FEELER: + setConnected.insert(pnode->addr.GetGroup(addrman.m_asmap)); } } } @@ -1939,14 +1942,24 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) LogPrint(BCLog::NET, "Making feeler connection to %s\n", addrConnect.ToString()); } - // Open this connection as block-relay-only if we're already at our - // full-relay capacity, but not yet at our block-relay peer limit. - // (It should not be possible for fFeeler to be set if we're not - // also at our block-relay peer limit, but check against that as - // well for sanity.) - bool block_relay_only = nOutboundBlockRelay < m_max_outbound_block_relay && !fFeeler && nOutboundFullRelay >= m_max_outbound_full_relay; + ConnectionType conn_type; + // Determine what type of connection to open. If fFeeler is not + // set, open OUTBOUND connections until we meet our full-relay + // capacity. Then open BLOCK_RELAY connections until we hit our + // block-relay peer limit. Otherwise, default to opening an + // OUTBOUND connection. + if (fFeeler) { + conn_type = ConnectionType::FEELER; + } else if (nOutboundFullRelay < m_max_outbound_full_relay) { + conn_type = ConnectionType::OUTBOUND; + } else if (nOutboundBlockRelay < m_max_outbound_block_relay) { + conn_type = ConnectionType::BLOCK_RELAY; + } else { + // GetTryNewOutboundPeer() is true + conn_type = ConnectionType::OUTBOUND; + } - OpenNetworkConnection(addrConnect, (int)setConnected.size() >= std::min(nMaxConnections - 1, 2), &grant, nullptr, false, fFeeler, false, block_relay_only); + OpenNetworkConnection(addrConnect, (int)setConnected.size() >= std::min(nMaxConnections - 1, 2), &grant, nullptr, conn_type); } } } @@ -1970,11 +1983,11 @@ std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo() LOCK(cs_vNodes); for (const CNode* pnode : vNodes) { if (pnode->addr.IsValid()) { - mapConnected[pnode->addr] = pnode->fInbound; + mapConnected[pnode->addr] = pnode->IsInboundConn(); } std::string addrName = pnode->GetAddrName(); if (!addrName.empty()) { - mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->fInbound, static_cast<const CService&>(pnode->addr)); + mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->IsInboundConn(), static_cast<const CService&>(pnode->addr)); } } } @@ -2021,7 +2034,7 @@ void CConnman::ThreadOpenAddedConnections() } tried = true; CAddress addr(CService(), NODE_NONE); - OpenNetworkConnection(addr, false, &grant, info.strAddedNode.c_str(), false, false, true); + OpenNetworkConnection(addr, false, &grant, info.strAddedNode.c_str(), ConnectionType::MANUAL); if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) return; } @@ -2033,8 +2046,10 @@ void CConnman::ThreadOpenAddedConnections() } // if successful, this moves the passed grant to the constructed node -void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, bool fOneShot, bool fFeeler, bool manual_connection, bool block_relay_only) +void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, ConnectionType conn_type) { + assert(conn_type != ConnectionType::INBOUND); + // // Initiate outbound network connection // @@ -2045,25 +2060,19 @@ void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai return; } if (!pszDest) { - if (IsLocal(addrConnect) || - FindNode(static_cast<CNetAddr>(addrConnect)) || (m_banman && m_banman->IsBanned(addrConnect)) || - FindNode(addrConnect.ToStringIPPort())) + bool banned_or_discouraged = m_banman && (m_banman->IsDiscouraged(addrConnect) || m_banman->IsBanned(addrConnect)); + if (IsLocal(addrConnect) || FindNode(static_cast<CNetAddr>(addrConnect)) || banned_or_discouraged || FindNode(addrConnect.ToStringIPPort())) { return; + } } else if (FindNode(std::string(pszDest))) return; - CNode* pnode = ConnectNode(addrConnect, pszDest, fCountFailure, manual_connection, block_relay_only); + CNode* pnode = ConnectNode(addrConnect, pszDest, fCountFailure, conn_type); if (!pnode) return; if (grantOutbound) grantOutbound->MoveTo(pnode->grantOutbound); - if (fOneShot) - pnode->fOneShot = true; - if (fFeeler) - pnode->fFeeler = true; - if (manual_connection) - pnode->m_manual_connection = true; m_msgproc->InitializeNode(pnode); { @@ -2121,11 +2130,6 @@ void CConnman::ThreadMessageHandler() } } - - - - - bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError, NetPermissionFlags permissions) { int nOne = 1; @@ -2247,7 +2251,7 @@ void Discover() void CConnman::SetNetworkActive(bool active) { - LogPrint(BCLog::NET, "SetNetworkActive: %s\n", active); + LogPrintf("%s: %s\n", __func__, active); if (fNetworkActive == active) { return; @@ -2258,12 +2262,14 @@ void CConnman::SetNetworkActive(bool active) uiInterface.NotifyNetworkActiveChanged(fNetworkActive); } -CConnman::CConnman(uint64_t nSeed0In, uint64_t nSeed1In) : nSeed0(nSeed0In), nSeed1(nSeed1In) +CConnman::CConnman(uint64_t nSeed0In, uint64_t nSeed1In, bool network_active) + : nSeed0(nSeed0In), nSeed1(nSeed1In) { SetTryNewOutboundPeer(false); Options connOptions; Init(connOptions); + SetNetworkActive(network_active); } NodeId CConnman::GetNewNodeId() @@ -2329,7 +2335,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions) } for (const auto& strDest : connOptions.vSeedNodes) { - AddOneShot(strDest); + AddAddrFetch(strDest); } if (clientInterface) { @@ -2382,7 +2388,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions) else threadDNSAddressSeed = std::thread(&TraceThread<std::function<void()> >, "dnsseed", std::function<void()>(std::bind(&CConnman::ThreadDNSAddressSeed, this))); - // Initiate outbound connections from -addnode + // Initiate manual connections threadOpenAddedConnections = std::thread(&TraceThread<std::function<void()> >, "addcon", std::function<void()>(std::bind(&CConnman::ThreadOpenAddedConnections, this))); if (connOptions.m_use_addrman_outgoing && !connOptions.m_specified_outgoing.empty()) { @@ -2505,11 +2511,6 @@ CConnman::~CConnman() Stop(); } -size_t CConnman::GetAddressCount() const -{ - return addrman.size(); -} - void CConnman::SetServices(const CService &addr, ServiceFlags nServices) { addrman.SetServices(addr, nServices); @@ -2520,14 +2521,31 @@ void CConnman::MarkAddressGood(const CAddress& addr) addrman.Good(addr); } -void CConnman::AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddress& addrFrom, int64_t nTimePenalty) +bool CConnman::AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddress& addrFrom, int64_t nTimePenalty) { - addrman.Add(vAddr, addrFrom, nTimePenalty); + return addrman.Add(vAddr, addrFrom, nTimePenalty); } -std::vector<CAddress> CConnman::GetAddresses() +std::vector<CAddress> CConnman::GetAddresses(size_t max_addresses, size_t max_pct) { - return addrman.GetAddr(); + std::vector<CAddress> addresses = addrman.GetAddr(max_addresses, max_pct); + if (m_banman) { + addresses.erase(std::remove_if(addresses.begin(), addresses.end(), + [this](const CAddress& addr){return m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr);}), + addresses.end()); + } + return addresses; +} + +std::vector<CAddress> CConnman::GetAddresses(Network requestor_network, size_t max_addresses, size_t max_pct) +{ + const auto current_time = GetTime<std::chrono::microseconds>(); + if (m_addr_response_caches.find(requestor_network) == m_addr_response_caches.end() || + m_addr_response_caches[requestor_network].m_update_addr_response < current_time) { + m_addr_response_caches[requestor_network].m_addrs_response_cache = GetAddresses(max_addresses, max_pct); + m_addr_response_caches[requestor_network].m_update_addr_response = current_time + std::chrono::hours(21) + GetRandMillis(std::chrono::hours(6)); + } + return m_addr_response_caches[requestor_network].m_addrs_response_cache; } bool CConnman::AddNode(const std::string& strNode) @@ -2561,7 +2579,7 @@ size_t CConnman::GetNodeCount(NumConnections flags) int nNum = 0; for (const auto& pnode : vNodes) { - if (flags & (pnode->fInbound ? CONNECTIONS_IN : CONNECTIONS_OUT)) { + if (flags & (pnode->IsInboundConn() ? CONNECTIONS_IN : CONNECTIONS_OUT)) { nNum++; } } @@ -2639,7 +2657,7 @@ void CConnman::RecordBytesSent(uint64_t bytes) nMaxOutboundTotalBytesSentInCycle = 0; } - // TODO, exclude peers with noban permission + // TODO, exclude peers with download permission nMaxOutboundTotalBytesSentInCycle += bytes; } @@ -2745,26 +2763,26 @@ int CConnman::GetBestHeight() const unsigned int CConnman::GetReceiveFloodSize() const { return nReceiveFloodSize; } -CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, bool fInboundIn, bool block_relay_only) +CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, ConnectionType conn_type_in) : nTimeConnected(GetSystemTimeInSeconds()), addr(addrIn), addrBind(addrBindIn), - fInbound(fInboundIn), nKeyedNetGroup(nKeyedNetGroupIn), // Don't relay addr messages to peers that we connect to as block-relay-only // peers (to prevent adversaries from inferring these links from addr // traffic). - m_addr_known{block_relay_only ? nullptr : MakeUnique<CRollingBloomFilter>(5000, 0.001)}, id(idIn), nLocalHostNonce(nLocalHostNonceIn), + m_conn_type(conn_type_in), nLocalServices(nLocalServicesIn), nMyStartingHeight(nMyStartingHeightIn) { hSocket = hSocketIn; addrName = addrNameIn == "" ? addr.ToStringIPPort() : addrNameIn; hashContinue = uint256(); - if (!block_relay_only) { + if (conn_type_in != ConnectionType::BLOCK_RELAY) { m_tx_relay = MakeUnique<TxRelay>(); + m_addr_known = MakeUnique<CRollingBloomFilter>(5000, 0.001); } for (const std::string &msg : getAllNetMessageTypes()) @@ -25,8 +25,9 @@ #include <uint256.h> #include <atomic> +#include <cstdint> #include <deque> -#include <stdint.h> +#include <map> #include <thread> #include <memory> #include <condition_variable> @@ -50,8 +51,8 @@ static const bool DEFAULT_WHITELISTFORCERELAY = false; static const int TIMEOUT_INTERVAL = 20 * 60; /** Run the feeler connection loop once every 2 minutes or 120 seconds. **/ static const int FEELER_INTERVAL = 120; -/** The maximum number of new addresses to accumulate before announcing. */ -static const unsigned int MAX_ADDR_TO_SEND = 1000; +/** The maximum number of addresses from our addrman to return in response to a getaddr message. */ +static constexpr size_t MAX_ADDR_TO_SEND = 1000; /** Maximum length of incoming protocol messages (no message over 4 MB is currently acceptable). */ static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH = 4 * 1000 * 1000; /** Maximum length of the user agent string in `version` message */ @@ -61,7 +62,7 @@ static const int MAX_OUTBOUND_FULL_RELAY_CONNECTIONS = 8; /** Maximum number of addnode outgoing nodes */ static const int MAX_ADDNODE_CONNECTIONS = 8; /** Maximum number of block-relay-only outgoing connections */ -static const int MAX_BLOCKS_ONLY_CONNECTIONS = 2; +static const int MAX_BLOCK_RELAY_ONLY_CONNECTIONS = 2; /** Maximum number of feeler connections */ static const int MAX_FEELER_CONNECTIONS = 1; /** -listen default */ @@ -113,6 +114,17 @@ struct CSerializedNetMsg std::string m_type; }; +/** Different types of connections to a peer. This enum encapsulates the + * information we have available at the time of opening or accepting the + * connection. Aside from INBOUND, all types are initiated by us. */ +enum class ConnectionType { + INBOUND, /**< peer initiated connections */ + OUTBOUND, /**< full relay connections (blocks, addrs, txns) made automatically. Addresses selected from AddrMan. */ + MANUAL, /**< connections to addresses added via addnode or the connect command line argument */ + FEELER, /**< short lived connections used to test address validity */ + BLOCK_RELAY, /**< only relay blocks to these automatic outbound connections. Addresses selected from AddrMan. */ + ADDR_FETCH, /**< short lived connections used to solicit addrs when starting the node without a populated AddrMan */ +}; class NetEventsInterface; class CConnman @@ -181,7 +193,7 @@ public: } } - CConnman(uint64_t seed0, uint64_t seed1); + CConnman(uint64_t seed0, uint64_t seed1, bool network_active = true); ~CConnman(); bool Start(CScheduler& scheduler, const Options& options); @@ -197,7 +209,7 @@ public: bool GetNetworkActive() const { return fNetworkActive; }; bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; }; void SetNetworkActive(bool active); - void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = nullptr, const char *strDest = nullptr, bool fOneShot = false, bool fFeeler = false, bool manual_connection = false, bool block_relay_only = false); + void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = nullptr, const char *strDest = nullptr, ConnectionType conn_type = ConnectionType::OUTBOUND); bool CheckIncomingNonce(uint64_t nonce); bool ForNode(NodeId id, std::function<bool(CNode* pnode)> func); @@ -247,11 +259,17 @@ public: }; // Addrman functions - size_t GetAddressCount() const; void SetServices(const CService &addr, ServiceFlags nServices); void MarkAddressGood(const CAddress& addr); - void AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddress& addrFrom, int64_t nTimePenalty = 0); - std::vector<CAddress> GetAddresses(); + bool AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddress& addrFrom, int64_t nTimePenalty = 0); + std::vector<CAddress> GetAddresses(size_t max_addresses, size_t max_pct); + /** + * Cache is used to minimize topology leaks, so it should + * be used for all non-trusted calls, for example, p2p. + * A non-malicious call (from RPC or a peer with addr permission) should + * call the function without a parameter to avoid using the cache. + */ + std::vector<CAddress> GetAddresses(Network requestor_network, size_t max_addresses, size_t max_pct); // This allows temporarily exceeding m_max_outbound_full_relay, with the goal of finding // a peer that is better than all our current peers. @@ -341,8 +359,8 @@ private: bool Bind(const CService& addr, unsigned int flags, NetPermissionFlags permissions); bool InitBinds(const std::vector<CService>& binds, const std::vector<NetWhitebindPermissions>& whiteBinds); void ThreadOpenAddedConnections(); - void AddOneShot(const std::string& strDest); - void ProcessOneShot(); + void AddAddrFetch(const std::string& strDest); + void ProcessAddrFetch(); void ThreadOpenConnections(std::vector<std::string> connect); void ThreadMessageHandler(); void AcceptConnection(const ListenSocket& hListenSocket); @@ -363,7 +381,7 @@ private: CNode* FindNode(const CService& addr); bool AttemptToEvictConnection(); - CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, bool manual_connection, bool block_relay_only); + CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type); void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr) const; void DeleteNode(CNode* pnode); @@ -406,8 +424,8 @@ private: std::atomic<bool> fNetworkActive{true}; bool fAddressesInitialized{false}; CAddrMan addrman; - std::deque<std::string> vOneShots GUARDED_BY(cs_vOneShots); - RecursiveMutex cs_vOneShots; + std::deque<std::string> m_addr_fetches GUARDED_BY(m_addr_fetches_mutex); + RecursiveMutex m_addr_fetches_mutex; std::vector<std::string> vAddedNodes GUARDED_BY(cs_vAddedNodes); RecursiveMutex cs_vAddedNodes; std::vector<CNode*> vNodes GUARDED_BY(cs_vNodes); @@ -417,6 +435,29 @@ private: unsigned int nPrevNodeCount{0}; /** + * Cache responses to addr requests to minimize privacy leak. + * Attack example: scraping addrs in real-time may allow an attacker + * to infer new connections of the victim by detecting new records + * with fresh timestamps (per self-announcement). + */ + struct CachedAddrResponse { + std::vector<CAddress> m_addrs_response_cache; + std::chrono::microseconds m_update_addr_response{0}; + }; + + /** + * Addr responses stored in different caches + * per network prevent cross-network node identification. + * If a node for example is multi-homed under Tor and IPv6, + * a single cache (or no cache at all) would let an attacker + * to easily detect that it is the same node by comparing responses. + * The used memory equals to 1000 CAddress records (or around 32 bytes) per + * distinct Network (up to 5) we have/had an inbound peer from, + * resulting in at most ~160 KB. + */ + std::map<Network, CachedAddrResponse> m_addr_response_caches; + + /** * Services this instance offers. * * This data is replicated in each CNode instance we create during peer @@ -448,6 +489,7 @@ private: std::atomic<int> nBestHeight; CClientUIInterface* clientInterface; NetEventsInterface* m_msgproc; + /** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */ BanMan* m_banman; /** SipHasher seeds for deterministic randomness */ @@ -482,7 +524,7 @@ void Discover(); void StartMapPort(); void InterruptMapPort(); void StopMapPort(); -unsigned short GetListenPort(); +uint16_t GetListenPort(); struct CombinerAll { @@ -612,13 +654,13 @@ public: */ class CNetMessage { public: - CDataStream m_recv; // received message data - int64_t m_time = 0; // time (in microseconds) of message receipt. + CDataStream m_recv; //!< received message data + std::chrono::microseconds m_time{0}; //!< time of message receipt bool m_valid_netmagic = false; bool m_valid_header = false; bool m_valid_checksum = false; - uint32_t m_message_size = 0; // size of the payload - uint32_t m_raw_message_size = 0; // used wire size of the message (including header/checksum) + uint32_t m_message_size{0}; //!< size of the payload + uint32_t m_raw_message_size{0}; //!< used wire size of the message (including header/checksum) std::string m_command; CNetMessage(CDataStream&& recv_in) : m_recv(std::move(recv_in)) {} @@ -642,7 +684,7 @@ public: // read and deserialize data virtual int Read(const char *data, unsigned int bytes) = 0; // decomposes a message from the context - virtual CNetMessage GetMessage(const CMessageHeader::MessageStartChars& message_start, int64_t time) = 0; + virtual CNetMessage GetMessage(const CMessageHeader::MessageStartChars& message_start, std::chrono::microseconds time) = 0; virtual ~TransportDeserializer() {} }; @@ -695,7 +737,7 @@ public: if (ret < 0) Reset(); return ret; } - CNetMessage GetMessage(const CMessageHeader::MessageStartChars& message_start, int64_t time) override; + CNetMessage GetMessage(const CMessageHeader::MessageStartChars& message_start, std::chrono::microseconds time) override; }; /** The TransportSerializer prepares messages for the network transport @@ -764,12 +806,8 @@ public: } // This boolean is unusued in actual processing, only present for backward compatibility at RPC/QT level bool m_legacyWhitelisted{false}; - bool fFeeler{false}; // If true this node is being used as a short lived feeler. - bool fOneShot{false}; - bool m_manual_connection{false}; bool fClient{false}; // set by version message bool m_limited_node{false}; //after BIP159, set by version message - const bool fInbound; std::atomic_bool fSuccessfullyConnected{false}; // Setting fDisconnect to true will cause the node to be disconnected the // next time DisconnectNodes() runs @@ -782,6 +820,60 @@ public: std::atomic_bool fPauseRecv{false}; std::atomic_bool fPauseSend{false}; + bool IsOutboundOrBlockRelayConn() const { + switch(m_conn_type) { + case ConnectionType::OUTBOUND: + case ConnectionType::BLOCK_RELAY: + return true; + case ConnectionType::INBOUND: + case ConnectionType::MANUAL: + case ConnectionType::ADDR_FETCH: + case ConnectionType::FEELER: + return false; + } + + assert(false); + } + + bool IsFullOutboundConn() const { + return m_conn_type == ConnectionType::OUTBOUND; + } + + bool IsManualConn() const { + return m_conn_type == ConnectionType::MANUAL; + } + + bool IsBlockOnlyConn() const { + return m_conn_type == ConnectionType::BLOCK_RELAY; + } + + bool IsFeelerConn() const { + return m_conn_type == ConnectionType::FEELER; + } + + bool IsAddrFetchConn() const { + return m_conn_type == ConnectionType::ADDR_FETCH; + } + + bool IsInboundConn() const { + return m_conn_type == ConnectionType::INBOUND; + } + + bool ExpectServicesFromConn() const { + switch(m_conn_type) { + case ConnectionType::INBOUND: + case ConnectionType::MANUAL: + case ConnectionType::FEELER: + return false; + case ConnectionType::OUTBOUND: + case ConnectionType::BLOCK_RELAY: + case ConnectionType::ADDR_FETCH: + return true; + } + + assert(false); + } + protected: mapMsgCmdSize mapSendBytesPerMsgCmd; mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv); @@ -792,7 +884,7 @@ public: // flood relay std::vector<CAddress> vAddrToSend; - const std::unique_ptr<CRollingBloomFilter> m_addr_known; + std::unique_ptr<CRollingBloomFilter> m_addr_known = nullptr; bool fGetAddr{false}; std::chrono::microseconds m_next_addr_send GUARDED_BY(cs_sendProcessing){0}; std::chrono::microseconds m_next_local_addr_send GUARDED_BY(cs_sendProcessing){0}; @@ -803,7 +895,7 @@ public: // There is no final sorting before sending, as they are always sent immediately // and in the order requested. std::vector<uint256> vInventoryBlockToSend GUARDED_BY(cs_inventory); - RecursiveMutex cs_inventory; + Mutex cs_inventory; struct TxRelay { mutable RecursiveMutex cs_filter; @@ -845,8 +937,8 @@ public: // Ping time measurement: // The pong reply we're expecting, or 0 if no pong expected. std::atomic<uint64_t> nPingNonceSent{0}; - // Time (in usec) the last ping was sent, or 0 if no ping was ever sent. - std::atomic<int64_t> nPingUsecStart{0}; + /** When the last ping was sent, or 0 if no ping was ever sent */ + std::atomic<std::chrono::microseconds> m_ping_start{std::chrono::microseconds{0}}; // Last measured round-trip time. std::atomic<int64_t> nPingUsecTime{0}; // Best measured round-trip time. @@ -856,7 +948,7 @@ public: std::set<uint256> orphan_work_set; - CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn = "", bool fInboundIn = false, bool block_relay_only = false); + CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn, ConnectionType conn_type_in); ~CNode(); CNode(const CNode&) = delete; CNode& operator=(const CNode&) = delete; @@ -864,6 +956,7 @@ public: private: const NodeId id; const uint64_t nLocalHostNonce; + const ConnectionType m_conn_type; //! Services offered to this peer. //! @@ -965,11 +1058,11 @@ public: } - void AddInventoryKnown(const CInv& inv) + void AddKnownTx(const uint256& hash) { if (m_tx_relay != nullptr) { LOCK(m_tx_relay->cs_tx_inventory); - m_tx_relay->filterInventoryKnown.insert(inv.hash); + m_tx_relay->filterInventoryKnown.insert(hash); } } @@ -982,18 +1075,6 @@ public: } } - void PushBlockInventory(const uint256& hash) - { - LOCK(cs_inventory); - vInventoryBlockToSend.push_back(hash); - } - - void PushBlockHash(const uint256 &hash) - { - LOCK(cs_inventory); - vBlockHashesToAnnounce.push_back(hash); - } - void CloseSocketDisconnect(); void copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap); diff --git a/src/net_permissions.cpp b/src/net_permissions.cpp index da09149856..53648deb40 100644 --- a/src/net_permissions.cpp +++ b/src/net_permissions.cpp @@ -10,10 +10,12 @@ const std::vector<std::string> NET_PERMISSIONS_DOC{ "bloomfilter (allow requesting BIP37 filtered blocks and transactions)", - "noban (do not ban for misbehavior)", + "noban (do not ban for misbehavior; implies download)", "forcerelay (relay transactions that are already in the mempool; implies relay)", "relay (relay even in -blocksonly mode)", "mempool (allow requesting BIP35 mempool contents)", + "download (allow getheaders during IBD, no disconnect after maxuploadtarget limit)", + "addr (responses to GETADDR avoid hitting the cache and contain random records with the most up-to-date info)" }; namespace { @@ -46,8 +48,10 @@ bool TryParsePermissionFlags(const std::string str, NetPermissionFlags& output, else if (permission == "noban") NetPermissions::AddFlag(flags, PF_NOBAN); else if (permission == "forcerelay") NetPermissions::AddFlag(flags, PF_FORCERELAY); else if (permission == "mempool") NetPermissions::AddFlag(flags, PF_MEMPOOL); + else if (permission == "download") NetPermissions::AddFlag(flags, PF_DOWNLOAD); else if (permission == "all") NetPermissions::AddFlag(flags, PF_ALL); else if (permission == "relay") NetPermissions::AddFlag(flags, PF_RELAY); + else if (permission == "addr") NetPermissions::AddFlag(flags, PF_ADDR); else if (permission.length() == 0); // Allow empty entries else { error = strprintf(_("Invalid P2P permission: '%s'"), permission); @@ -72,6 +76,8 @@ std::vector<std::string> NetPermissions::ToStrings(NetPermissionFlags flags) if (NetPermissions::HasFlag(flags, PF_FORCERELAY)) strings.push_back("forcerelay"); if (NetPermissions::HasFlag(flags, PF_RELAY)) strings.push_back("relay"); if (NetPermissions::HasFlag(flags, PF_MEMPOOL)) strings.push_back("mempool"); + if (NetPermissions::HasFlag(flags, PF_DOWNLOAD)) strings.push_back("download"); + if (NetPermissions::HasFlag(flags, PF_ADDR)) strings.push_back("addr"); return strings; } diff --git a/src/net_permissions.h b/src/net_permissions.h index e004067e75..5b68f635a7 100644 --- a/src/net_permissions.h +++ b/src/net_permissions.h @@ -14,8 +14,7 @@ struct bilingual_str; extern const std::vector<std::string> NET_PERMISSIONS_DOC; -enum NetPermissionFlags -{ +enum NetPermissionFlags { PF_NONE = 0, // Can query bloomfilter even if -peerbloomfilters is false PF_BLOOMFILTER = (1U << 1), @@ -24,14 +23,18 @@ enum NetPermissionFlags // Always relay transactions from this peer, even if already in mempool // Keep parameter interaction: forcerelay implies relay PF_FORCERELAY = (1U << 2) | PF_RELAY, - // Can't be banned for misbehavior - PF_NOBAN = (1U << 4), + // Allow getheaders during IBD and block-download after maxuploadtarget limit + PF_DOWNLOAD = (1U << 6), + // Can't be banned/disconnected/discouraged for misbehavior + PF_NOBAN = (1U << 4) | PF_DOWNLOAD, // Can query the mempool PF_MEMPOOL = (1U << 5), + // Can request addrs without hitting a privacy-preserving cache + PF_ADDR = (1U << 7), // True if the user did not specifically set fine grained permissions PF_ISIMPLICIT = (1U << 31), - PF_ALL = PF_BLOOMFILTER | PF_FORCERELAY | PF_RELAY | PF_NOBAN | PF_MEMPOOL, + PF_ALL = PF_BLOOMFILTER | PF_FORCERELAY | PF_RELAY | PF_NOBAN | PF_MEMPOOL | PF_DOWNLOAD | PF_ADDR, }; class NetPermissions diff --git a/src/net_processing.cpp b/src/net_processing.cpp index b5912e18a7..3e06f1fff0 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -13,10 +13,9 @@ #include <consensus/validation.h> #include <hash.h> #include <index/blockfilterindex.h> -#include <validation.h> #include <merkleblock.h> -#include <netmessagemaker.h> #include <netbase.h> +#include <netmessagemaker.h> #include <policy/fees.h> #include <policy/policy.h> #include <primitives/block.h> @@ -26,22 +25,22 @@ #include <scheduler.h> #include <tinyformat.h> #include <txmempool.h> -#include <util/system.h> +#include <util/check.h> // For NDEBUG compile time check #include <util/strencodings.h> +#include <util/system.h> +#include <validation.h> #include <memory> #include <typeinfo> -#if defined(NDEBUG) -# error "Bitcoin cannot be compiled without assertions." -#endif - /** Expiration time for orphan transactions in seconds */ static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60; /** Minimum time between orphan transactions expire time checks in seconds */ static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60; /** How long to cache transactions in mapRelay for normal relay */ -static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME{15 * 60}; +static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME = std::chrono::minutes{15}; +/** How long a transaction has to be in the mempool before it can unconditionally be relayed (even when not in mapRelay). */ +static constexpr std::chrono::seconds UNCONDITIONAL_RELAY_DELAY = std::chrono::minutes{2}; /** Headers download timeout expressed in microseconds * Timeout = base + per_header * (expected number of headers) */ static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 15 minutes @@ -66,8 +65,8 @@ static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60; /// Age after which a block is considered historical for purposes of rate /// limiting block relay. Set to one week, denominated in seconds. static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60; -/** Time between pings automatically sent out for latency probing and keepalive (in seconds). */ -static const int PING_INTERVAL = 2 * 60; +/** Time between pings automatically sent out for latency probing and keepalive */ +static constexpr std::chrono::minutes PING_INTERVAL{2}; /** The maximum number of entries in a locator */ static const unsigned int MAX_LOCATOR_SZ = 101; /** The maximum number of entries in an 'inv' protocol message */ @@ -76,6 +75,8 @@ static const unsigned int MAX_INV_SZ = 50000; static constexpr int32_t MAX_PEER_TX_IN_FLIGHT = 100; /** Maximum number of announced transactions from a peer */ static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 2 * MAX_INV_SZ; +/** How many microseconds to delay requesting transactions via txids, if we have wtxid-relaying peers */ +static constexpr std::chrono::microseconds TXID_RELAY_DELAY{std::chrono::seconds{2}}; /** How many microseconds to delay requesting transactions from inbound peers */ static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY{std::chrono::seconds{2}}; /** How long to wait (in microseconds) before downloading a transaction from an additional peer */ @@ -120,11 +121,20 @@ static constexpr std::chrono::hours AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24}; /** Average delay between peer address broadcasts */ static constexpr std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL{30}; /** Average delay between trickled inventory transmissions in seconds. - * Blocks and whitelisted receivers bypass this, outbound peers get half this delay. */ + * Blocks and peers with noban permission bypass this, outbound peers get half this delay. */ static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5; -/** Maximum number of inventory items to send per transmission. +/** Maximum rate of inventory items to send per second. * Limits the impact of low-fee transaction floods. */ -static constexpr unsigned int INVENTORY_BROADCAST_MAX = 7 * INVENTORY_BROADCAST_INTERVAL; +static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7; +/** Maximum number of inventory items to send per transmission. */ +static constexpr unsigned int INVENTORY_BROADCAST_MAX = INVENTORY_BROADCAST_PER_SECOND * INVENTORY_BROADCAST_INTERVAL; +/** The number of most recently announced transactions a peer can request. */ +static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500; +/** Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything typically + * relayed before unconditional relay from the mempool kicks in. This is only a + * lower bound, and it should be larger to account for higher inv rate to outbound + * peers, and random variations in the broadcast mechanism. */ +static_assert(INVENTORY_MAX_RECENT_RELAY >= INVENTORY_BROADCAST_PER_SECOND * UNCONDITIONAL_RELAY_DELAY / std::chrono::seconds{1}, "INVENTORY_RELAY_MAX too low"); /** Average delay between feefilter broadcasts in seconds. */ static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60; /** Maximum feefilter broadcast delay after significant change. */ @@ -133,6 +143,8 @@ static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60; static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000; /** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */ static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000; +/** the maximum percentage of addresses from our addrman to return in response to a getaddr message. */ +static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23; struct COrphanTx { // When modifying, adapt the copy of this definition in tests/DoS_tests. @@ -143,12 +155,10 @@ struct COrphanTx { }; RecursiveMutex g_cs_orphans; std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans); +std::map<uint256, std::map<uint256, COrphanTx>::iterator> g_orphans_by_wtxid GUARDED_BY(g_cs_orphans); void EraseOrphansFor(NodeId peer); -/** Increase a node's misbehavior score. */ -void Misbehaving(NodeId nodeid, int howmuch, const std::string& message="") EXCLUSIVE_LOCKS_REQUIRED(cs_main); - // Internal stuff namespace { /** Number of nodes with fSyncStarted. */ @@ -179,6 +189,21 @@ namespace { * million to make it highly unlikely for users to have issues with this * filter. * + * We typically only add wtxids to this filter. For non-segwit + * transactions, the txid == wtxid, so this only prevents us from + * re-downloading non-segwit transactions when communicating with + * non-wtxidrelay peers -- which is important for avoiding malleation + * attacks that could otherwise interfere with transaction relay from + * non-wtxidrelay peers. For communicating with wtxidrelay peers, having + * the reject filter store wtxids is exactly what we want to avoid + * redownload of a rejected transaction. + * + * In cases where we can tell that a segwit transaction will fail + * validation no matter the witness, we may add the txid of such + * transaction to the filter as well. This can be helpful when + * communicating with txid-relay peers or if we were to otherwise fetch a + * transaction via txid (eg in our orphan handling). + * * Memory used: 1.3 MB */ std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main); @@ -210,13 +235,16 @@ namespace { /** Number of peers from which we're downloading blocks. */ int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0; + /** Number of peers with wtxid relay. */ + int g_wtxid_relay_peers GUARDED_BY(cs_main) = 0; + /** Number of outbound peers with m_chain_sync.m_protect. */ int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; /** When our tip was last updated. */ std::atomic<int64_t> g_last_tip_update(0); - /** Relay map */ + /** Relay map (txid or wtxid -> CTransactionRef) */ typedef std::map<uint256, CTransactionRef> MapRelay; MapRelay mapRelay GUARDED_BY(cs_main); /** Expiration-time ordered list of (expire time, relay map entry) pairs. */ @@ -252,8 +280,8 @@ struct CNodeState { bool fCurrentlyConnected; //! Accumulated misbehaviour score for this peer. int nMisbehavior; - //! Whether this peer should be disconnected and banned (unless whitelisted). - bool fShouldBan; + //! Whether this peer should be disconnected and marked as discouraged (unless it has the noban permission). + bool m_should_discourage; //! String name of this peer (debugging/logging purposes). const std::string name; //! The best known block we know this peer has announced. @@ -378,7 +406,7 @@ struct CNodeState { /* Track when to attempt download of announced transactions (process * time in micros -> txid) */ - std::multimap<std::chrono::microseconds, uint256> m_tx_process_time; + std::multimap<std::chrono::microseconds, GenTxid> m_tx_process_time; //! Store all the transactions a peer has recently announced std::set<uint256> m_tx_announced; @@ -398,13 +426,19 @@ struct CNodeState { //! Whether this peer is a manual connection bool m_is_manual_connection; + //! A rolling bloom filter of all announced tx CInvs to this peer. + CRollingBloomFilter m_recently_announced_invs = CRollingBloomFilter{INVENTORY_MAX_RECENT_RELAY, 0.000001}; + + //! Whether this peer relays txs via wtxid + bool m_wtxid_relay{false}; + CNodeState(CAddress addrIn, std::string addrNameIn, bool is_inbound, bool is_manual) : address(addrIn), name(std::move(addrNameIn)), m_is_inbound(is_inbound), m_is_manual_connection (is_manual) { fCurrentlyConnected = false; nMisbehavior = 0; - fShouldBan = false; + m_should_discourage = false; pindexBestKnownBlock = nullptr; hashLastUnknownBlock.SetNull(); pindexLastCommonBlock = nullptr; @@ -425,6 +459,7 @@ struct CNodeState { fSupportsDesiredCmpctVersion = false; m_chain_sync = { 0, nullptr, false, false }; m_last_block_announcement = 0; + m_recently_announced_invs.reset(); } }; @@ -446,12 +481,12 @@ static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUS nPreferredDownload -= state->fPreferredDownload; // Whether this node should be marked as a preferred download node. - state->fPreferredDownload = (!node.fInbound || node.HasPermission(PF_NOBAN)) && !node.fOneShot && !node.fClient; + state->fPreferredDownload = (!node.IsInboundConn() || node.HasPermission(PF_NOBAN)) && !node.IsAddrFetchConn() && !node.fClient; nPreferredDownload += state->fPreferredDownload; } -static void PushNodeVersion(CNode& pnode, CConnman* connman, int64_t nTime) +static void PushNodeVersion(CNode& pnode, CConnman& connman, int64_t nTime) { // Note that pnode->GetLocalServices() is a reflection of the local // services we were offering when the CNode object was created for this @@ -465,7 +500,7 @@ static void PushNodeVersion(CNode& pnode, CConnman* connman, int64_t nTime) CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices)); CAddress addrMe = CAddress(CService(), nLocalNodeServices); - connman->PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe, + connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe, nonce, strSubVersion, nNodeStartingHeight, ::g_relay_txes && pnode.m_tx_relay != nullptr)); if (fLogIPs) { @@ -576,7 +611,7 @@ static void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) EXCLUSIV * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by * removing the first element if necessary. */ -static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman* connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); CNodeState* nodestate = State(nodeid); @@ -592,20 +627,20 @@ static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman* connma return; } } - connman->ForNode(nodeid, [connman](CNode* pfrom){ + connman.ForNode(nodeid, [&connman](CNode* pfrom){ AssertLockHeld(cs_main); uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1; if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { // As per BIP152, we only get 3 of our peers to announce // blocks using compact encodings. - connman->ForNode(lNodesAnnouncingHeaderAndIDs.front(), [connman, nCMPCTBLOCKVersion](CNode* pnodeStop){ + connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, nCMPCTBLOCKVersion](CNode* pnodeStop){ AssertLockHeld(cs_main); - connman->PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion)); + connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion)); return true; }); lNodesAnnouncingHeaderAndIDs.pop_front(); } - connman->PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion)); + connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion)); lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); return true; }); @@ -724,34 +759,34 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec } } -void EraseTxRequest(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void EraseTxRequest(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - g_already_asked_for.erase(txid); + g_already_asked_for.erase(gtxid.GetHash()); } -std::chrono::microseconds GetTxRequestTime(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +std::chrono::microseconds GetTxRequestTime(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - auto it = g_already_asked_for.find(txid); + auto it = g_already_asked_for.find(gtxid.GetHash()); if (it != g_already_asked_for.end()) { return it->second; } return {}; } -void UpdateTxRequestTime(const uint256& txid, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void UpdateTxRequestTime(const GenTxid& gtxid, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - auto it = g_already_asked_for.find(txid); + auto it = g_already_asked_for.find(gtxid.GetHash()); if (it == g_already_asked_for.end()) { - g_already_asked_for.insert(std::make_pair(txid, request_time)); + g_already_asked_for.insert(std::make_pair(gtxid.GetHash(), request_time)); } else { g_already_asked_for.update(it, request_time); } } -std::chrono::microseconds CalculateTxGetDataTime(const uint256& txid, std::chrono::microseconds current_time, bool use_inbound_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +std::chrono::microseconds CalculateTxGetDataTime(const GenTxid& gtxid, std::chrono::microseconds current_time, bool use_inbound_delay, bool use_txid_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { std::chrono::microseconds process_time; - const auto last_request_time = GetTxRequestTime(txid); + const auto last_request_time = GetTxRequestTime(gtxid); // First time requesting this tx if (last_request_time.count() == 0) { process_time = current_time; @@ -764,26 +799,29 @@ std::chrono::microseconds CalculateTxGetDataTime(const uint256& txid, std::chron // We delay processing announcements from inbound peers if (use_inbound_delay) process_time += INBOUND_PEER_TX_DELAY; + // We delay processing announcements from peers that use txid-relay (instead of wtxid) + if (use_txid_delay) process_time += TXID_RELAY_DELAY; + return process_time; } -void RequestTx(CNodeState* state, const uint256& txid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void RequestTx(CNodeState* state, const GenTxid& gtxid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { CNodeState::TxDownloadState& peer_download_state = state->m_tx_download; if (peer_download_state.m_tx_announced.size() >= MAX_PEER_TX_ANNOUNCEMENTS || peer_download_state.m_tx_process_time.size() >= MAX_PEER_TX_ANNOUNCEMENTS || - peer_download_state.m_tx_announced.count(txid)) { + peer_download_state.m_tx_announced.count(gtxid.GetHash())) { // Too many queued announcements from this peer, or we already have // this announcement return; } - peer_download_state.m_tx_announced.insert(txid); + peer_download_state.m_tx_announced.insert(gtxid.GetHash()); // Calculate the time to try requesting this transaction. Use // fPreferredDownload as a proxy for outbound peers. - const auto process_time = CalculateTxGetDataTime(txid, current_time, !state->fPreferredDownload); + const auto process_time = CalculateTxGetDataTime(gtxid, current_time, !state->fPreferredDownload, !state->m_wtxid_relay && g_wtxid_relay_peers > 0); - peer_download_state.m_tx_process_time.emplace(process_time, txid); + peer_download_state.m_tx_process_time.emplace(process_time, gtxid); } } // namespace @@ -797,35 +835,29 @@ void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) if (state) state->m_last_block_announcement = time_in_seconds; } -// Returns true for outbound peers, excluding manual connections, feelers, and -// one-shots. -static bool IsOutboundDisconnectionCandidate(const CNode& node) -{ - return !(node.fInbound || node.m_manual_connection || node.fFeeler || node.fOneShot); -} - void PeerLogicValidation::InitializeNode(CNode *pnode) { CAddress addr = pnode->addr; std::string addrName = pnode->GetAddrName(); NodeId nodeid = pnode->GetId(); { LOCK(cs_main); - mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName), pnode->fInbound, pnode->m_manual_connection)); + mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName), pnode->IsInboundConn(), pnode->IsManualConn())); } - if(!pnode->fInbound) - PushNodeVersion(*pnode, connman, GetTime()); + if(!pnode->IsInboundConn()) + PushNodeVersion(*pnode, *connman, GetTime()); } void PeerLogicValidation::ReattemptInitialBroadcast(CScheduler& scheduler) const { - std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs(); + std::map<uint256, uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs(); - for (const uint256& txid : unbroadcast_txids) { + for (const auto& elem : unbroadcast_txids) { // Sanity check: all unbroadcast txns should exist in the mempool - if (m_mempool.exists(txid)) { - RelayTransaction(txid, *connman); + if (m_mempool.exists(elem.first)) { + LOCK(cs_main); + RelayTransaction(elem.first, elem.second, *connman); } else { - m_mempool.RemoveUnbroadcastTx(txid, true); + m_mempool.RemoveUnbroadcastTx(elem.first, true); } } @@ -857,6 +889,8 @@ void PeerLogicValidation::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTim assert(nPeersWithValidatedDownloads >= 0); g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; assert(g_outbound_peers_with_protect_from_disconnect >= 0); + g_wtxid_relay_peers -= state->m_wtxid_relay; + assert(g_wtxid_relay_peers >= 0); mapNodeState.erase(nodeid); @@ -866,6 +900,7 @@ void PeerLogicValidation::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTim assert(nPreferredDownload == 0); assert(nPeersWithValidatedDownloads == 0); assert(g_outbound_peers_with_protect_from_disconnect == 0); + assert(g_wtxid_relay_peers == 0); } LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); } @@ -924,6 +959,8 @@ bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRE auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, g_orphan_list.size()}); assert(ret.second); g_orphan_list.push_back(ret.first); + // Allow for lookups in the orphan pool by wtxid, as well as txid + g_orphans_by_wtxid.emplace(tx->GetWitnessHash(), ret.first); for (const CTxIn& txin : tx->vin) { mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first); } @@ -960,6 +997,7 @@ int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) it_last->second.list_pos = old_pos; } g_orphan_list.pop_back(); + g_orphans_by_wtxid.erase(it->second.tx->GetWitnessHash()); mapOrphanTransactions.erase(it); return 1; @@ -1019,30 +1057,29 @@ unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) } /** - * Mark a misbehaving peer to be banned depending upon the value of `-banscore`. + * Increment peer's misbehavior score. If the new value >= DISCOURAGEMENT_THRESHOLD, mark the node + * to be discouraged, meaning the peer might be disconnected and added to the discouragement filter. */ -void Misbehaving(NodeId pnode, int howmuch, const std::string& message) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - if (howmuch == 0) - return; + assert(howmuch > 0); - CNodeState *state = State(pnode); - if (state == nullptr) - return; + CNodeState* const state = State(pnode); + if (state == nullptr) return; state->nMisbehavior += howmuch; - int banscore = gArgs.GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD); - std::string message_prefixed = message.empty() ? "" : (": " + message); - if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore) + const std::string message_prefixed = message.empty() ? "" : (": " + message); + if (state->nMisbehavior >= DISCOURAGEMENT_THRESHOLD && state->nMisbehavior - howmuch < DISCOURAGEMENT_THRESHOLD) { - LogPrint(BCLog::NET, "%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED%s\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior, message_prefixed); - state->fShouldBan = true; - } else - LogPrint(BCLog::NET, "%s: %s peer=%d (%d -> %d)%s\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior, message_prefixed); + LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d) DISCOURAGE THRESHOLD EXCEEDED%s\n", pnode, state->nMisbehavior - howmuch, state->nMisbehavior, message_prefixed); + state->m_should_discourage = true; + } else { + LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s\n", pnode, state->nMisbehavior - howmuch, state->nMisbehavior, message_prefixed); + } } /** - * Potentially ban a node based on the contents of a BlockValidationState object + * Potentially mark a node discouraged based on the contents of a BlockValidationState object * * @param[in] via_compact_block this bool is passed in because net_processing should * punish peers differently depending on whether the data was provided in a compact @@ -1072,7 +1109,7 @@ static bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& s break; } - // Ban outbound (but not inbound) peers if on an invalid chain. + // Discourage outbound (but not inbound) peers if on an invalid chain. // Exempt HB compact block peers and manual connections. if (!via_compact_block && !node_state->m_is_inbound && !node_state->m_is_manual_connection) { Misbehaving(nodeid, 100, message); @@ -1107,7 +1144,7 @@ static bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& s } /** - * Potentially ban a node based on the contents of a TxValidationState object + * Potentially disconnect and discourage a node based on the contents of a TxValidationState object * * @return Returns true if the peer was punished (probably disconnected) */ @@ -1125,10 +1162,12 @@ static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, } // Conflicting (but not necessarily invalid) data or different policy: case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE: + case TxValidationResult::TX_INPUTS_NOT_STANDARD: case TxValidationResult::TX_NOT_STANDARD: case TxValidationResult::TX_MISSING_INPUTS: case TxValidationResult::TX_PREMATURE_SPEND: case TxValidationResult::TX_WITNESS_MUTATED: + case TxValidationResult::TX_WITNESS_STRIPPED: case TxValidationResult::TX_CONFLICT: case TxValidationResult::TX_MEMPOOL_POLICY: break; @@ -1169,14 +1208,15 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CS recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); // Blocks don't typically have more than 4000 transactions, so this should - // be at least six blocks (~1 hr) worth of transactions that we can store. + // be at least six blocks (~1 hr) worth of transactions that we can store, + // inserting both a txid and wtxid for every observed transaction. // If the number of transactions appearing in a block goes up, or if we are // seeing getdata requests more than an hour after initial announcement, we // can increase this number. // The false positive rate of 1/1M should come out to less than 1 // transaction per day that would be inadvertently ignored (which is the // same probability that we have in the reject filter). - g_recent_confirmed_transactions.reset(new CRollingBloomFilter(24000, 0.000001)); + g_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001)); const Consensus::Params& consensusParams = Params().GetConsensus(); // Stale tip checking and peer eviction are on two different timers, but we @@ -1232,6 +1272,9 @@ void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pb LOCK(g_cs_recent_confirmed_transactions); for (const auto& ptx : pblock->vtx) { g_recent_confirmed_transactions->insert(ptx->GetHash()); + if (ptx->GetHash() != ptx->GetWitnessHash()) { + g_recent_confirmed_transactions->insert(ptx->GetWitnessHash()); + } } } } @@ -1328,9 +1371,10 @@ void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CB } // Relay inventory, but don't relay old inventory during initial block download. connman->ForEachNode([nNewHeight, &vHashes](CNode* pnode) { + LOCK(pnode->cs_inventory); if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) { for (const uint256& hash : reverse_iterate(vHashes)) { - pnode->PushBlockHash(hash); + pnode->vBlockHashesToAnnounce.push_back(hash); } } }); @@ -1339,7 +1383,7 @@ void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CB } /** - * Handle invalid block rejection and consequent peer banning, maintain which + * Handle invalid block rejection and consequent peer discouragement, maintain which * peers announce compact blocks. */ void PeerLogicValidation::BlockChecked(const CBlock& block, const BlockValidationState& state) { @@ -1365,7 +1409,7 @@ void PeerLogicValidation::BlockChecked(const CBlock& block, const BlockValidatio !::ChainstateActive().IsInitialBlockDownload() && mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) { if (it != mapBlockSource.end()) { - MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, connman); + MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, *connman); } } if (it != mapBlockSource.end()) @@ -1384,6 +1428,7 @@ bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LO { case MSG_TX: case MSG_WITNESS_TX: + case MSG_WTX: { assert(recentRejects); if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip) @@ -1398,7 +1443,11 @@ bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LO { LOCK(g_cs_orphans); - if (mapOrphanTransactions.count(inv.hash)) return true; + if (!inv.IsMsgWtx() && mapOrphanTransactions.count(inv.hash)) { + return true; + } else if (inv.IsMsgWtx() && g_orphans_by_wtxid.count(inv.hash)) { + return true; + } } { @@ -1406,8 +1455,7 @@ bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LO if (g_recent_confirmed_transactions->contains(inv.hash)) return true; } - return recentRejects->contains(inv.hash) || - mempool.exists(inv.hash); + return recentRejects->contains(inv.hash) || mempool.exists(ToGenTxid(inv)); } case MSG_BLOCK: case MSG_WITNESS_BLOCK: @@ -1417,11 +1465,17 @@ bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LO return true; } -void RelayTransaction(const uint256& txid, const CConnman& connman) +void RelayTransaction(const uint256& txid, const uint256& wtxid, const CConnman& connman) { - connman.ForEachNode([&txid](CNode* pnode) + connman.ForEachNode([&txid, &wtxid](CNode* pnode) { - pnode->PushTxInventory(txid); + AssertLockHeld(cs_main); + CNodeState &state = *State(pnode->GetId()); + if (state.m_wtxid_relay) { + pnode->PushTxInventory(wtxid); + } else { + pnode->PushTxInventory(txid); + } }); } @@ -1440,7 +1494,7 @@ static void RelayAddress(const CAddress& addr, bool fReachable, const CConnman& assert(nRelayNodes <= best.size()); auto sortfunc = [&best, &hasher, nRelayNodes](CNode* pnode) { - if (pnode->nVersion >= CADDR_TIME_VERSION && pnode->IsAddrRelayPeer()) { + if (pnode->IsAddrRelayPeer()) { uint64_t hashKey = CSipHasher(hasher).Write(pnode->GetId()).Finalize(); for (unsigned int i = 0; i < nRelayNodes; i++) { if (hashKey > best[i].first) { @@ -1461,7 +1515,7 @@ static void RelayAddress(const CAddress& addr, bool fReachable, const CConnman& connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc)); } -void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, const CInv& inv, CConnman* connman) +void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, const CInv& inv, CConnman& connman) { bool send = false; std::shared_ptr<const CBlock> a_recent_block; @@ -1509,9 +1563,9 @@ void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, c const CNetMsgMaker msgMaker(pfrom.GetSendVersion()); // disconnect node in case we have reached the outbound limit for serving historical blocks if (send && - connman->OutboundTargetReached(true) && + connman.OutboundTargetReached(true) && (((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && - !pfrom.HasPermission(PF_NOBAN) // never disconnect nodes with the noban permission + !pfrom.HasPermission(PF_DOWNLOAD) // nodes with the download permission may exceed target ) { LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId()); @@ -1543,7 +1597,7 @@ void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, c if (!ReadRawBlockFromDisk(block_data, pindex, chainparams.MessageStart())) { assert(!"cannot load block from disk"); } - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, MakeSpan(block_data))); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, MakeSpan(block_data))); // Don't set pblock as we've sent the block } else { // Send block from disk @@ -1554,9 +1608,9 @@ void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, c } if (pblock) { if (inv.type == MSG_BLOCK) - connman->PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock)); + connman.PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock)); else if (inv.type == MSG_WITNESS_BLOCK) - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock)); else if (inv.type == MSG_FILTERED_BLOCK) { bool sendMerkleBlock = false; @@ -1569,7 +1623,7 @@ void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, c } } if (sendMerkleBlock) { - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock)); // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see // This avoids hurting performance by pointlessly requiring a round-trip // Note that there is currently no way for a node to request any single transactions we didn't send here - @@ -1578,7 +1632,7 @@ void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, c // however we MUST always provide at least what the remote peer needs typedef std::pair<unsigned int, uint256> PairType; for (PairType& pair : merkleBlock.vMatchedTxn) - connman->PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first])); + connman.PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first])); } // else // no response @@ -1593,13 +1647,13 @@ void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, c int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS; if (CanDirectFetch(consensusParams) && pindex->nHeight >= ::ChainActive().Height() - MAX_CMPCTBLOCK_DEPTH) { if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) { - connman->PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block)); + connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block)); } else { CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness); - connman->PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); + connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); } } else { - connman->PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock)); + connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock)); } } } @@ -1607,49 +1661,46 @@ void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, c // Trigger the peer node to send a getblocks request for the next batch of inventory if (inv.hash == pfrom.hashContinue) { - // Bypass PushBlockInventory, this must send even if redundant, + // Send immediately. This must send even if redundant, // and we want it right after the last block so they don't // wait for other stuff first. std::vector<CInv> vInv; vInv.push_back(CInv(MSG_BLOCK, ::ChainActive().Tip()->GetBlockHash())); - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv)); pfrom.hashContinue.SetNull(); } } } //! Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). -CTransactionRef static FindTxForGetData(CNode& peer, const uint256& txid, const std::chrono::seconds mempool_req, const std::chrono::seconds longlived_mempool_time) LOCKS_EXCLUDED(cs_main) +CTransactionRef static FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main) { - // Check if the requested transaction is so recent that we're just - // about to announce it to the peer; if so, they certainly shouldn't - // know we already have it. - { - LOCK(peer.m_tx_relay->cs_tx_inventory); - if (peer.m_tx_relay->setInventoryTxToSend.count(txid)) return {}; + auto txinfo = mempool.info(gtxid); + if (txinfo.tx) { + // If a TX could have been INVed in reply to a MEMPOOL request, + // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request + // unconditionally. + if ((mempool_req.count() && txinfo.m_time <= mempool_req) || txinfo.m_time <= now - UNCONDITIONAL_RELAY_DELAY) { + return std::move(txinfo.tx); + } } { LOCK(cs_main); - // Look up transaction in relay pool - auto mi = mapRelay.find(txid); - if (mi != mapRelay.end()) return mi->second; - } - - auto txinfo = mempool.info(txid); - if (txinfo.tx) { - // To protect privacy, do not answer getdata using the mempool when - // that TX couldn't have been INVed in reply to a MEMPOOL request, - // or when it's too recent to have expired from mapRelay. - if ((mempool_req.count() && txinfo.m_time <= mempool_req) || txinfo.m_time <= longlived_mempool_time) { - return txinfo.tx; + // Otherwise, the transaction must have been announced recently. + if (State(peer.GetId())->m_recently_announced_invs.contains(gtxid.GetHash())) { + // If it was, it can be relayed from either the mempool... + if (txinfo.tx) return std::move(txinfo.tx); + // ... or the relay pool. + auto mi = mapRelay.find(gtxid.GetHash()); + if (mi != mapRelay.end()) return mi->second; } } return {}; } -void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnman* connman, CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) LOCKS_EXCLUDED(cs_main) +void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnman& connman, CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) LOCKS_EXCLUDED(cs_main) { AssertLockNotHeld(cs_main); @@ -1657,8 +1708,7 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm std::vector<CInv> vNotFound; const CNetMsgMaker msgMaker(pfrom.GetSendVersion()); - // mempool entries added before this time have likely expired from mapRelay - const std::chrono::seconds longlived_mempool_time = GetTime<std::chrono::seconds>() - RELAY_TX_CACHE_TIME; + const std::chrono::seconds now = GetTime<std::chrono::seconds>(); // Get last mempool request time const std::chrono::seconds mempool_req = pfrom.m_tx_relay != nullptr ? pfrom.m_tx_relay->m_last_mempool_req.load() : std::chrono::seconds::min(); @@ -1666,7 +1716,7 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm // Process as many TX items from the front of the getdata queue as // possible, since they're common and it's efficient to batch process // them. - while (it != pfrom.vRecvGetData.end() && (it->type == MSG_TX || it->type == MSG_WITNESS_TX)) { + while (it != pfrom.vRecvGetData.end() && it->IsGenTxMsg()) { if (interruptMsgProc) return; // The send buffer provides backpressure. If there's no space in // the buffer, pause processing until the next call. @@ -1679,11 +1729,34 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm continue; } - CTransactionRef tx = FindTxForGetData(pfrom, inv.hash, mempool_req, longlived_mempool_time); + CTransactionRef tx = FindTxForGetData(pfrom, ToGenTxid(inv), mempool_req, now); if (tx) { - int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0); - connman->PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx)); - mempool.RemoveUnbroadcastTx(inv.hash); + // WTX and WITNESS_TX imply we serialize with witness + int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0); + connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx)); + mempool.RemoveUnbroadcastTx(tx->GetHash()); + // As we're going to send tx, make sure its unconfirmed parents are made requestable. + std::vector<uint256> parent_ids_to_add; + { + LOCK(mempool.cs); + auto txiter = mempool.GetIter(tx->GetHash()); + if (txiter) { + const CTxMemPool::setEntries& parents = mempool.GetMemPoolParents(*txiter); + parent_ids_to_add.reserve(parents.size()); + for (CTxMemPool::txiter parent_iter : parents) { + if (parent_iter->GetTime() > now - UNCONDITIONAL_RELAY_DELAY) { + parent_ids_to_add.push_back(parent_iter->GetTx().GetHash()); + } + } + } + } + for (const uint256& parent_txid : parent_ids_to_add) { + // Relaying a transaction with a recent but unconfirmed parent. + if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory, return !pfrom.m_tx_relay->filterInventoryKnown.contains(parent_txid))) { + LOCK(cs_main); + State(pfrom.GetId())->m_recently_announced_invs.insert(parent_txid); + } + } } else { vNotFound.push_back(inv); } @@ -1717,7 +1790,7 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm // In normal operation, we often send NOTFOUND messages for parents of // transactions that we relay; if a peer is missing a parent, they may // assume we have them and request the parents from us. - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound)); } } @@ -1729,12 +1802,12 @@ static uint32_t GetFetchFlags(const CNode& pfrom) EXCLUSIVE_LOCKS_REQUIRED(cs_ma return nFetchFlags; } -inline void static SendBlockTransactions(const CBlock& block, const BlockTransactionsRequest& req, CNode& pfrom, CConnman* connman) { +inline void static SendBlockTransactions(const CBlock& block, const BlockTransactionsRequest& req, CNode& pfrom, CConnman& connman) { BlockTransactions resp(req); for (size_t i = 0; i < req.indexes.size(); i++) { if (req.indexes[i] >= block.vtx.size()) { LOCK(cs_main); - Misbehaving(pfrom.GetId(), 100, strprintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom.GetId())); + Misbehaving(pfrom.GetId(), 100, "getblocktxn with out-of-bounds tx indices"); return; } resp.txn[i] = block.vtx[req.indexes[i]]; @@ -1742,10 +1815,10 @@ inline void static SendBlockTransactions(const CBlock& block, const BlockTransac LOCK(cs_main); const CNetMsgMaker msgMaker(pfrom.GetSendVersion()); int nSendFlags = State(pfrom.GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS; - connman->PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp)); + connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp)); } -static void ProcessHeadersMessage(CNode& pfrom, CConnman* connman, ChainstateManager& chainman, CTxMemPool& mempool, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams, bool via_compact_block) +static void ProcessHeadersMessage(CNode& pfrom, CConnman& connman, ChainstateManager& chainman, CTxMemPool& mempool, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams, bool via_compact_block) { const CNetMsgMaker msgMaker(pfrom.GetSendVersion()); size_t nCount = headers.size(); @@ -1771,7 +1844,7 @@ static void ProcessHeadersMessage(CNode& pfrom, CConnman* connman, ChainstateMan // nUnconnectingHeaders gets reset back to 0. if (!LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) { nodestate->nUnconnectingHeaders++; - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256())); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256())); LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", headers[0].GetHash().ToString(), headers[0].hashPrevBlock.ToString(), @@ -1783,7 +1856,7 @@ static void ProcessHeadersMessage(CNode& pfrom, CConnman* connman, ChainstateMan UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()); if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) { - Misbehaving(pfrom.GetId(), 20); + Misbehaving(pfrom.GetId(), 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders)); } return; } @@ -1836,7 +1909,7 @@ static void ProcessHeadersMessage(CNode& pfrom, CConnman* connman, ChainstateMan // TODO: optimize: if pindexLast is an ancestor of ::ChainActive().Tip or pindexBestHeader, continue // from there instead. LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom.GetId(), pfrom.nStartingHeight); - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexLast), uint256())); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexLast), uint256())); } bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus()); @@ -1886,7 +1959,7 @@ static void ProcessHeadersMessage(CNode& pfrom, CConnman* connman, ChainstateMan // In any case, we want to download using a compact block, not a regular one vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); } - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData)); } } } @@ -1897,21 +1970,21 @@ static void ProcessHeadersMessage(CNode& pfrom, CConnman* connman, ChainstateMan // headers to fetch from this peer. if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { // This peer has too little work on their headers chain to help - // us sync -- disconnect if using an outbound slot (unless - // whitelisted or addnode). + // us sync -- disconnect if it is an outbound disconnection + // candidate. // Note: We compare their tip to nMinimumChainWork (rather than // ::ChainActive().Tip()) because we won't start block download // until we have a headers chain that has at least // nMinimumChainWork, even if a peer has a chain past our tip, // as an anti-DoS measure. - if (IsOutboundDisconnectionCandidate(pfrom)) { + if (pfrom.IsOutboundOrBlockRelayConn()) { LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId()); pfrom.fDisconnect = true; } } } - if (!pfrom.fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr && pfrom.m_tx_relay != nullptr) { + if (!pfrom.fDisconnect && pfrom.IsOutboundOrBlockRelayConn() && nodestate->pindexBestKnownBlock != nullptr && pfrom.m_tx_relay != nullptr) { // If this is an outbound full-relay peer, check to see if we should protect // it from the bad/lagging chain logic. // Note that block-relay-only peers are already implicitly protected, so we @@ -1927,7 +2000,7 @@ static void ProcessHeadersMessage(CNode& pfrom, CConnman* connman, ChainstateMan return; } -void static ProcessOrphanTx(CConnman* connman, CTxMemPool& mempool, std::set<uint256>& orphan_work_set, std::list<CTransactionRef>& removed_txn) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans) +void static ProcessOrphanTx(CConnman& connman, CTxMemPool& mempool, std::set<uint256>& orphan_work_set, std::list<CTransactionRef>& removed_txn) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans) { AssertLockHeld(cs_main); AssertLockHeld(g_cs_orphans); @@ -1951,7 +2024,7 @@ void static ProcessOrphanTx(CConnman* connman, CTxMemPool& mempool, std::set<uin if (setMisbehaving.count(fromPeer)) continue; if (AcceptToMemoryPool(mempool, orphan_state, porphanTx, &removed_txn, false /* bypass_limits */, 0 /* nAbsurdFee */)) { LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString()); - RelayTransaction(orphanHash, *connman); + RelayTransaction(orphanHash, porphanTx->GetWitnessHash(), connman); for (unsigned int i = 0; i < orphanTx.vout.size(); i++) { auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(orphanHash, i)); if (it_by_prev != mapOrphanTransactionsByPrev.end()) { @@ -1968,17 +2041,43 @@ void static ProcessOrphanTx(CConnman* connman, CTxMemPool& mempool, std::set<uin if (MaybePunishNodeForTx(fromPeer, orphan_state)) { setMisbehaving.insert(fromPeer); } - LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n", orphanHash.ToString()); + LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s from peer=%d. %s\n", + orphanHash.ToString(), + fromPeer, + orphan_state.ToString()); } // Has inputs but not accepted to mempool // Probably non-standard or insufficient fee LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString()); - if (!orphanTx.HasWitness() && orphan_state.GetResult() != TxValidationResult::TX_WITNESS_MUTATED) { - // Do not use rejection cache for witness transactions or - // witness-stripped transactions, as they can have been malleated. - // See https://github.com/bitcoin/bitcoin/issues/8279 for details. + if (orphan_state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) { + // We can add the wtxid of this transaction to our reject filter. + // Do not add txids of witness transactions or witness-stripped + // transactions to the filter, as they can have been malleated; + // adding such txids to the reject filter would potentially + // interfere with relay of valid transactions from peers that + // do not support wtxid-based relay. See + // https://github.com/bitcoin/bitcoin/issues/8279 for details. + // We can remove this restriction (and always add wtxids to + // the filter even for witness stripped transactions) once + // wtxid-based relay is broadly deployed. + // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034 + // for concerns around weakening security of unupgraded nodes + // if we start doing this too early. assert(recentRejects); - recentRejects->insert(orphanHash); + recentRejects->insert(orphanTx.GetWitnessHash()); + // If the transaction failed for TX_INPUTS_NOT_STANDARD, + // then we know that the witness was irrelevant to the policy + // failure, since this check depends only on the txid + // (the scriptPubKey being spent is covered by the txid). + // Add the txid to the reject filter to prevent repeated + // processing of this transaction in the event that child + // transactions are later received (resulting in + // parent-fetching by txid via the orphan-handling logic). + if (orphan_state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && orphanTx.GetWitnessHash() != orphanTx.GetHash()) { + // We only add the txid if it differs from the wtxid, to + // avoid wasting entries in the rolling bloom filter. + recentRejects->insert(orphanTx.GetHash()); + } } EraseOrphanTx(orphanHash); done = true; @@ -2208,11 +2307,11 @@ void ProcessMessage( CNode& pfrom, const std::string& msg_type, CDataStream& vRecv, - int64_t nTimeReceived, + const std::chrono::microseconds time_received, const CChainParams& chainparams, ChainstateManager& chainman, CTxMemPool& mempool, - CConnman* connman, + CConnman& connman, BanMan* banman, const std::atomic<bool>& interruptMsgProc) { @@ -2229,7 +2328,7 @@ void ProcessMessage( if (pfrom.nVersion != 0) { LOCK(cs_main); - Misbehaving(pfrom.GetId(), 1); + Misbehaving(pfrom.GetId(), 1, "redundant version message"); return; } @@ -2248,11 +2347,11 @@ void ProcessMessage( vRecv >> nVersion >> nServiceInt >> nTime >> addrMe; nSendVersion = std::min(nVersion, PROTOCOL_VERSION); nServices = ServiceFlags(nServiceInt); - if (!pfrom.fInbound) + if (!pfrom.IsInboundConn()) { - connman->SetServices(pfrom.addr, nServices); + connman.SetServices(pfrom.addr, nServices); } - if (!pfrom.fInbound && !pfrom.fFeeler && !pfrom.m_manual_connection && !HasAllDesirableServiceFlags(nServices)) + if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices)) { LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices)); pfrom.fDisconnect = true; @@ -2279,23 +2378,27 @@ void ProcessMessage( if (!vRecv.empty()) vRecv >> fRelay; // Disconnect if we connected to ourself - if (pfrom.fInbound && !connman->CheckIncomingNonce(nNonce)) + if (pfrom.IsInboundConn() && !connman.CheckIncomingNonce(nNonce)) { LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToString()); pfrom.fDisconnect = true; return; } - if (pfrom.fInbound && addrMe.IsRoutable()) + if (pfrom.IsInboundConn() && addrMe.IsRoutable()) { SeenLocal(addrMe); } // Be shy and don't send version until we hear - if (pfrom.fInbound) + if (pfrom.IsInboundConn()) PushNodeVersion(pfrom, connman, GetAdjustedTime()); - connman->PushMessage(&pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK)); + if (nVersion >= WTXID_RELAY_VERSION) { + connman.PushMessage(&pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::WTXIDRELAY)); + } + + connman.PushMessage(&pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK)); pfrom.nServices = nServices; pfrom.SetAddrLocal(addrMe); @@ -2332,7 +2435,7 @@ void ProcessMessage( UpdatePreferredDownload(pfrom, State(pfrom.GetId())); } - if (!pfrom.fInbound && pfrom.IsAddrRelayPeer()) + if (!pfrom.IsInboundConn() && pfrom.IsAddrRelayPeer()) { // Advertise our address if (fListen && !::ChainstateActive().IsInitialBlockDownload()) @@ -2351,12 +2454,9 @@ void ProcessMessage( } // Get recent addresses - if (pfrom.fOneShot || pfrom.nVersion >= CADDR_TIME_VERSION || connman->GetAddressCount() < 1000) - { - connman->PushMessage(&pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR)); - pfrom.fGetAddr = true; - } - connman->MarkAddressGood(pfrom.addr); + connman.PushMessage(&pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR)); + pfrom.fGetAddr = true; + connman.MarkAddressGood(pfrom.addr); } std::string remoteAddr; @@ -2375,12 +2475,11 @@ void ProcessMessage( // If the peer is old enough to have the old alert system, send it the final alert. if (pfrom.nVersion <= 70012) { CDataStream finalAlert(ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION); - connman->PushMessage(&pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert)); + connman.PushMessage(&pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert)); } // Feeler connections exist only to verify if address is online. - if (pfrom.fFeeler) { - assert(pfrom.fInbound == false); + if (pfrom.IsFeelerConn()) { pfrom.fDisconnect = true; } return; @@ -2389,7 +2488,7 @@ void ProcessMessage( if (pfrom.nVersion == 0) { // Must have a version message before anything else LOCK(cs_main); - Misbehaving(pfrom.GetId(), 1); + Misbehaving(pfrom.GetId(), 1, "non-version message before version handshake"); return; } @@ -2400,7 +2499,7 @@ void ProcessMessage( { pfrom.SetRecvVersion(std::min(pfrom.nVersion.load(), PROTOCOL_VERSION)); - if (!pfrom.fInbound) { + if (!pfrom.IsInboundConn()) { // Mark this node as currently connected, so we update its timestamp later. LOCK(cs_main); State(pfrom.GetId())->fCurrentlyConnected = true; @@ -2415,7 +2514,7 @@ void ProcessMessage( // We send this to non-NODE NETWORK peers as well, because even // non-NODE NETWORK peers can announce blocks (such as pruning // nodes) - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDHEADERS)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDHEADERS)); } if (pfrom.nVersion >= SHORT_IDS_BLOCKS_VERSION) { // Tell our peer we are willing to provide version 1 or 2 cmpctblocks @@ -2426,18 +2525,37 @@ void ProcessMessage( bool fAnnounceUsingCMPCTBLOCK = false; uint64_t nCMPCTBLOCKVersion = 2; if (pfrom.GetLocalServices() & NODE_WITNESS) - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); nCMPCTBLOCKVersion = 1; - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); } pfrom.fSuccessfullyConnected = true; return; } + // Feature negotiation of wtxidrelay should happen between VERSION and + // VERACK, to avoid relay problems from switching after a connection is up + if (msg_type == NetMsgType::WTXIDRELAY) { + if (pfrom.fSuccessfullyConnected) { + // Disconnect peers that send wtxidrelay message after VERACK; this + // must be negotiated between VERSION and VERACK. + pfrom.fDisconnect = true; + return; + } + if (pfrom.nVersion >= WTXID_RELAY_VERSION) { + LOCK(cs_main); + if (!State(pfrom.GetId())->m_wtxid_relay) { + State(pfrom.GetId())->m_wtxid_relay = true; + g_wtxid_relay_peers++; + } + } + return; + } + if (!pfrom.fSuccessfullyConnected) { // Must have a verack message before anything else LOCK(cs_main); - Misbehaving(pfrom.GetId(), 1); + Misbehaving(pfrom.GetId(), 1, "non-verack message before version handshake"); return; } @@ -2445,13 +2563,10 @@ void ProcessMessage( std::vector<CAddress> vAddr; vRecv >> vAddr; - // Don't want addr from older versions unless seeding - if (pfrom.nVersion < CADDR_TIME_VERSION && connman->GetAddressCount() > 1000) - return; if (!pfrom.IsAddrRelayPeer()) { return; } - if (vAddr.size() > 1000) + if (vAddr.size() > MAX_ADDR_TO_SEND) { LOCK(cs_main); Misbehaving(pfrom.GetId(), 20, strprintf("addr message size = %u", vAddr.size())); @@ -2476,21 +2591,24 @@ void ProcessMessage( if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) addr.nTime = nNow - 5 * 24 * 60 * 60; pfrom.AddAddressKnown(addr); - if (banman->IsBanned(addr)) continue; // Do not process banned addresses beyond remembering we received them + if (banman && (banman->IsDiscouraged(addr) || banman->IsBanned(addr))) { + // Do not process banned/discouraged addresses beyond remembering we received them + continue; + } bool fReachable = IsReachable(addr); if (addr.nTime > nSince && !pfrom.fGetAddr && vAddr.size() <= 10 && addr.IsRoutable()) { // Relay to a limited number of other nodes - RelayAddress(addr, fReachable, *connman); + RelayAddress(addr, fReachable, connman); } // Do not store addresses outside our network if (fReachable) vAddrOk.push_back(addr); } - connman->AddNewAddresses(vAddrOk, pfrom.addr, 2 * 60 * 60); + connman.AddNewAddresses(vAddrOk, pfrom.addr, 2 * 60 * 60); if (vAddr.size() < 1000) pfrom.fGetAddr = false; - if (pfrom.fOneShot) + if (pfrom.IsAddrFetchConn()) pfrom.fDisconnect = true; return; } @@ -2538,9 +2656,10 @@ void ProcessMessage( // block-relay-only peer bool fBlocksOnly = !g_relay_txes || (pfrom.m_tx_relay == nullptr); - // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true - if (pfrom.HasPermission(PF_RELAY)) + // Allow peers with relay permission to send data other than blocks in blocks only mode + if (pfrom.HasPermission(PF_RELAY)) { fBlocksOnly = false; + } LOCK(cs_main); @@ -2553,10 +2672,19 @@ void ProcessMessage( if (interruptMsgProc) return; + // Ignore INVs that don't match wtxidrelay setting. + // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting. + // This is fine as no INV messages are involved in that process. + if (State(pfrom.GetId())->m_wtxid_relay) { + if (inv.IsMsgTx()) continue; + } else { + if (inv.IsMsgWtx()) continue; + } + bool fAlreadyHave = AlreadyHave(inv, mempool); LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); - if (inv.type == MSG_TX) { + if (inv.IsMsgTx()) { inv.type |= nFetchFlags; } @@ -2571,19 +2699,19 @@ void ProcessMessage( best_block = &inv.hash; } } else { - pfrom.AddInventoryKnown(inv); + pfrom.AddKnownTx(inv.hash); if (fBlocksOnly) { LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId()); pfrom.fDisconnect = true; return; - } else if (!fAlreadyHave && !fImporting && !fReindex && !::ChainstateActive().IsInitialBlockDownload()) { - RequestTx(State(pfrom.GetId()), inv.hash, current_time); + } else if (!fAlreadyHave && !chainman.ActiveChainstate().IsInitialBlockDownload()) { + RequestTx(State(pfrom.GetId()), ToGenTxid(inv), current_time); } } } if (best_block != nullptr) { - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), *best_block)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), *best_block)); LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, best_block->ToString(), pfrom.GetId()); } @@ -2666,7 +2794,7 @@ void ProcessMessage( LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); break; } - pfrom.PushBlockInventory(pindex->GetBlockHash()); + WITH_LOCK(pfrom.cs_inventory, pfrom.vInventoryBlockToSend.push_back(pindex->GetBlockHash())); if (--nLimit <= 0) { // When this block is requested, we'll send an inv that'll @@ -2740,7 +2868,7 @@ void ProcessMessage( } LOCK(cs_main); - if (::ChainstateActive().IsInitialBlockDownload() && !pfrom.HasPermission(PF_NOBAN)) { + if (::ChainstateActive().IsInitialBlockDownload() && !pfrom.HasPermission(PF_DOWNLOAD)) { LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom.GetId()); return; } @@ -2791,7 +2919,7 @@ void ProcessMessage( // will re-announce the new block via headers (or compact blocks again) // in the SendMessages logic. nodestate->pindexBestHeaderSent = pindex ? pindex : ::ChainActive().Tip(); - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); return; } @@ -2810,26 +2938,52 @@ void ProcessMessage( vRecv >> ptx; const CTransaction& tx = *ptx; - CInv inv(MSG_TX, tx.GetHash()); - pfrom.AddInventoryKnown(inv); + const uint256& txid = ptx->GetHash(); + const uint256& wtxid = ptx->GetWitnessHash(); LOCK2(cs_main, g_cs_orphans); + CNodeState* nodestate = State(pfrom.GetId()); + + const uint256& hash = nodestate->m_wtxid_relay ? wtxid : txid; + pfrom.AddKnownTx(hash); + if (nodestate->m_wtxid_relay && txid != wtxid) { + // Insert txid into filterInventoryKnown, even for + // wtxidrelay peers. This prevents re-adding of + // unconfirmed parents to the recently_announced + // filter, when a child tx is requested. See + // ProcessGetData(). + pfrom.AddKnownTx(txid); + } + TxValidationState state; - CNodeState* nodestate = State(pfrom.GetId()); - nodestate->m_tx_download.m_tx_announced.erase(inv.hash); - nodestate->m_tx_download.m_tx_in_flight.erase(inv.hash); - EraseTxRequest(inv.hash); + for (const GenTxid& gtxid : {GenTxid(false, txid), GenTxid(true, wtxid)}) { + nodestate->m_tx_download.m_tx_announced.erase(gtxid.GetHash()); + nodestate->m_tx_download.m_tx_in_flight.erase(gtxid.GetHash()); + EraseTxRequest(gtxid); + } std::list<CTransactionRef> lRemovedTxn; - if (!AlreadyHave(inv, mempool) && + // We do the AlreadyHave() check using wtxid, rather than txid - in the + // absence of witness malleation, this is strictly better, because the + // recent rejects filter may contain the wtxid but rarely contains + // the txid of a segwit transaction that has been rejected. + // In the presence of witness malleation, it's possible that by only + // doing the check with wtxid, we could overlook a transaction which + // was confirmed with a different witness, or exists in our mempool + // with a different witness, but this has limited downside: + // mempool validation does its own lookup of whether we have the txid + // already; and an adversary can already relay us old transactions + // (older than our recency filter) if trying to DoS us, without any need + // for witness malleation. + if (!AlreadyHave(CInv(MSG_WTX, wtxid), mempool) && AcceptToMemoryPool(mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */, 0 /* nAbsurdFee */)) { mempool.check(&::ChainstateActive().CoinsTip()); - RelayTransaction(tx.GetHash(), *connman); + RelayTransaction(tx.GetHash(), tx.GetWitnessHash(), connman); for (unsigned int i = 0; i < tx.vout.size(); i++) { - auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(inv.hash, i)); + auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(txid, i)); if (it_by_prev != mapOrphanTransactionsByPrev.end()) { for (const auto& elem : it_by_prev->second) { pfrom.orphan_work_set.insert(elem->first); @@ -2850,8 +3004,19 @@ void ProcessMessage( else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected + + // Deduplicate parent txids, so that we don't have to loop over + // the same parent txid more than once down below. + std::vector<uint256> unique_parents; + unique_parents.reserve(tx.vin.size()); for (const CTxIn& txin : tx.vin) { - if (recentRejects->contains(txin.prevout.hash)) { + // We start with all parents, and then remove duplicates below. + unique_parents.push_back(txin.prevout.hash); + } + std::sort(unique_parents.begin(), unique_parents.end()); + unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end()); + for (const uint256& parent_txid : unique_parents) { + if (recentRejects->contains(parent_txid)) { fRejectedParents = true; break; } @@ -2860,10 +3025,15 @@ void ProcessMessage( uint32_t nFetchFlags = GetFetchFlags(pfrom); const auto current_time = GetTime<std::chrono::microseconds>(); - for (const CTxIn& txin : tx.vin) { - CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash); - pfrom.AddInventoryKnown(_inv); - if (!AlreadyHave(_inv, mempool)) RequestTx(State(pfrom.GetId()), _inv.hash, current_time); + for (const uint256& parent_txid : unique_parents) { + // Here, we only have the txid (and not wtxid) of the + // inputs, so we only request in txid mode, even for + // wtxidrelay peers. + // Eventually we should replace this with an improved + // protocol for getting all unconfirmed parents. + CInv _inv(MSG_TX | nFetchFlags, parent_txid); + pfrom.AddKnownTx(parent_txid); + if (!AlreadyHave(_inv, mempool)) RequestTx(State(pfrom.GetId()), ToGenTxid(_inv), current_time); } AddOrphanTx(ptx, pfrom.GetId()); @@ -2877,15 +3047,41 @@ void ProcessMessage( LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString()); // We will continue to reject this tx since it has rejected // parents so avoid re-requesting it from other peers. + // Here we add both the txid and the wtxid, as we know that + // regardless of what witness is provided, we will not accept + // this, so we don't need to allow for redownload of this txid + // from any of our non-wtxidrelay peers. recentRejects->insert(tx.GetHash()); + recentRejects->insert(tx.GetWitnessHash()); } } else { - if (!tx.HasWitness() && state.GetResult() != TxValidationResult::TX_WITNESS_MUTATED) { - // Do not use rejection cache for witness transactions or - // witness-stripped transactions, as they can have been malleated. - // See https://github.com/bitcoin/bitcoin/issues/8279 for details. + if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) { + // We can add the wtxid of this transaction to our reject filter. + // Do not add txids of witness transactions or witness-stripped + // transactions to the filter, as they can have been malleated; + // adding such txids to the reject filter would potentially + // interfere with relay of valid transactions from peers that + // do not support wtxid-based relay. See + // https://github.com/bitcoin/bitcoin/issues/8279 for details. + // We can remove this restriction (and always add wtxids to + // the filter even for witness stripped transactions) once + // wtxid-based relay is broadly deployed. + // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034 + // for concerns around weakening security of unupgraded nodes + // if we start doing this too early. assert(recentRejects); - recentRejects->insert(tx.GetHash()); + recentRejects->insert(tx.GetWitnessHash()); + // If the transaction failed for TX_INPUTS_NOT_STANDARD, + // then we know that the witness was irrelevant to the policy + // failure, since this check depends only on the txid + // (the scriptPubKey being spent is covered by the txid). + // Add the txid to the reject filter to prevent repeated + // processing of this transaction in the event that child + // transactions are later received (resulting in + // parent-fetching by txid via the orphan-handling logic). + if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) { + recentRejects->insert(tx.GetHash()); + } if (RecursiveDynamicUsage(*ptx) < 100000) { AddToCompactExtraTransactions(ptx); } @@ -2894,15 +3090,15 @@ void ProcessMessage( } if (pfrom.HasPermission(PF_FORCERELAY)) { - // Always relay transactions received from whitelisted peers, even + // Always relay transactions received from peers with forcerelay permission, even // if they were already in the mempool, // allowing the node to function as a gateway for // nodes hidden behind it. if (!mempool.exists(tx.GetHash())) { - LogPrintf("Not relaying non-mempool transaction %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom.GetId()); + LogPrintf("Not relaying non-mempool transaction %s from forcerelay peer=%d\n", tx.GetHash().ToString(), pfrom.GetId()); } else { - LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom.GetId()); - RelayTransaction(tx.GetHash(), *connman); + LogPrintf("Force relaying tx %s from peer=%d\n", tx.GetHash().ToString(), pfrom.GetId()); + RelayTransaction(tx.GetHash(), tx.GetWitnessHash(), connman); } } } @@ -2927,8 +3123,7 @@ void ProcessMessage( // peer simply for relaying a tx that our recentRejects has caught, // regardless of false positives. - if (state.IsInvalid()) - { + if (state.IsInvalid()) { LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(), pfrom.GetId(), state.ToString()); @@ -2956,7 +3151,7 @@ void ProcessMessage( if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) { // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers if (!::ChainstateActive().IsInitialBlockDownload()) - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256())); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256())); return; } @@ -3017,7 +3212,7 @@ void ProcessMessage( // so we just grab the block via normal getdata std::vector<CInv> vInv(1); vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash()); - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); } return; } @@ -3051,14 +3246,14 @@ void ProcessMessage( PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock; ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact); if (status == READ_STATUS_INVALID) { - MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case of whitelist - Misbehaving(pfrom.GetId(), 100, strprintf("Peer %d sent us invalid compact block\n", pfrom.GetId())); + MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case Misbehaving does not result in a disconnect + Misbehaving(pfrom.GetId(), 100, "invalid compact block"); return; } else if (status == READ_STATUS_FAILED) { // Duplicate txindexes, the block is now in-flight, so just request it std::vector<CInv> vInv(1); vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash()); - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); return; } @@ -3075,7 +3270,7 @@ void ProcessMessage( fProcessBLOCKTXN = true; } else { req.blockhash = pindex->GetBlockHash(); - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req)); } } else { // This block is either already in flight from a different @@ -3101,7 +3296,7 @@ void ProcessMessage( // mempool will probably be useless - request the block normally std::vector<CInv> vInv(1); vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash()); - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); return; } else { // If this was an announce-cmpctblock, we want the same treatment as a header message @@ -3111,14 +3306,14 @@ void ProcessMessage( } // cs_main if (fProcessBLOCKTXN) - return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, chainman, mempool, connman, banman, interruptMsgProc); + return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, time_received, chainparams, chainman, mempool, connman, banman, interruptMsgProc); if (fRevertToHeaderProcessing) { // Headers received from HB compact block peers are permitted to be // relayed before full validation (see BIP 152), so we don't want to disconnect // the peer if the header turns out to be for an invalid block. // Note that if a peer tries to build on an invalid chain, that - // will be detected and the peer will be banned. + // will be detected and the peer will be disconnected/discouraged. return ProcessHeadersMessage(pfrom, connman, chainman, mempool, {cmpctblock.header}, chainparams, /*via_compact_block=*/true); } @@ -3184,14 +3379,14 @@ void ProcessMessage( PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock; ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn); if (status == READ_STATUS_INVALID) { - MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case of whitelist - Misbehaving(pfrom.GetId(), 100, strprintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom.GetId())); + MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case Misbehaving does not result in a disconnect + Misbehaving(pfrom.GetId(), 100, "invalid compact block/non-matching block transactions"); return; } else if (status == READ_STATUS_FAILED) { // Might have collided, fall back to getdata now :( std::vector<CInv> invs; invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom), resp.blockhash)); - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs)); } else { // Block is either okay, or possibly we received // READ_STATUS_CHECKBLOCK_FAILED. @@ -3204,7 +3399,7 @@ void ProcessMessage( // 3. the block is otherwise invalid (eg invalid coinbase, // block is too big, too many legacy sigops, etc). // So if CheckBlock failed, #3 is the only possibility. - // Under BIP 152, we don't DoS-ban unless proof of work is + // Under BIP 152, we don't discourage the peer unless proof of work is // invalid (we don't require all the stateless checks to have // been run). This is handled below, so just treat this as // though the block was successfully read, and rely on the @@ -3308,7 +3503,7 @@ void ProcessMessage( // to users' AddrMan and later request them by sending getaddr messages. // Making nodes which are behind NAT and can only make outgoing connections ignore // the getaddr message mitigates the attack. - if (!pfrom.fInbound) { + if (!pfrom.IsInboundConn()) { LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom.GetId()); return; } @@ -3326,12 +3521,15 @@ void ProcessMessage( pfrom.fSentAddr = true; pfrom.vAddrToSend.clear(); - std::vector<CAddress> vAddr = connman->GetAddresses(); + std::vector<CAddress> vAddr; + if (pfrom.HasPermission(PF_ADDR)) { + vAddr = connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND); + } else { + vAddr = connman.GetAddresses(pfrom.addr.GetNetwork(), MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND); + } FastRandomContext insecure_rand; for (const CAddress &addr : vAddr) { - if (!banman->IsBanned(addr)) { - pfrom.PushAddress(addr, insecure_rand); - } + pfrom.PushAddress(addr, insecure_rand); } return; } @@ -3347,7 +3545,7 @@ void ProcessMessage( return; } - if (connman->OutboundTargetReached(false) && !pfrom.HasPermission(PF_MEMPOOL)) + if (connman.OutboundTargetReached(false) && !pfrom.HasPermission(PF_MEMPOOL)) { if (!pfrom.HasPermission(PF_NOBAN)) { @@ -3380,13 +3578,13 @@ void ProcessMessage( // it, if the remote node sends a ping once per second and this node takes 5 // seconds to respond to each, the 5th ping the remote sends would appear to // return very quickly. - connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::PONG, nonce)); + connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::PONG, nonce)); } return; } if (msg_type == NetMsgType::PONG) { - int64_t pingUsecEnd = nTimeReceived; + const auto ping_end = time_received; uint64_t nonce = 0; size_t nAvail = vRecv.in_avail(); bool bPingFinished = false; @@ -3400,11 +3598,11 @@ void ProcessMessage( if (nonce == pfrom.nPingNonceSent) { // Matching pong received, this ping is no longer outstanding bPingFinished = true; - int64_t pingUsecTime = pingUsecEnd - pfrom.nPingUsecStart; - if (pingUsecTime > 0) { + const auto ping_time = ping_end - pfrom.m_ping_start.load(); + if (ping_time.count() > 0) { // Successful ping time measurement, replace previous - pfrom.nPingUsecTime = pingUsecTime; - pfrom.nMinPingUsecTime = std::min(pfrom.nMinPingUsecTime.load(), pingUsecTime); + pfrom.nPingUsecTime = count_microseconds(ping_time); + pfrom.nMinPingUsecTime = std::min(pfrom.nMinPingUsecTime.load(), count_microseconds(ping_time)); } else { // This should never happen sProblem = "Timing mishap"; @@ -3453,7 +3651,7 @@ void ProcessMessage( { // There is no excuse for sending a too-large filter LOCK(cs_main); - Misbehaving(pfrom.GetId(), 100); + Misbehaving(pfrom.GetId(), 100, "too-large bloom filter"); } else if (pfrom.m_tx_relay != nullptr) { @@ -3487,7 +3685,7 @@ void ProcessMessage( } if (bad) { LOCK(cs_main); - Misbehaving(pfrom.GetId(), 100); + Misbehaving(pfrom.GetId(), 100, "bad filteradd message"); } return; } @@ -3520,17 +3718,17 @@ void ProcessMessage( } if (msg_type == NetMsgType::GETCFILTERS) { - ProcessGetCFilters(pfrom, vRecv, chainparams, *connman); + ProcessGetCFilters(pfrom, vRecv, chainparams, connman); return; } if (msg_type == NetMsgType::GETCFHEADERS) { - ProcessGetCFHeaders(pfrom, vRecv, chainparams, *connman); + ProcessGetCFHeaders(pfrom, vRecv, chainparams, connman); return; } if (msg_type == NetMsgType::GETCFCHECKPT) { - ProcessGetCFCheckPt(pfrom, vRecv, chainparams, *connman); + ProcessGetCFCheckPt(pfrom, vRecv, chainparams, connman); return; } @@ -3542,7 +3740,7 @@ void ProcessMessage( vRecv >> vInv; if (vInv.size() <= MAX_PEER_TX_IN_FLIGHT + MAX_BLOCKS_IN_TRANSIT_PER_PEER) { for (CInv &inv : vInv) { - if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX) { + if (inv.IsGenTxMsg()) { // If we receive a NOTFOUND message for a txid we requested, erase // it from our data structures for this peer. auto in_flight_it = state->m_tx_download.m_tx_in_flight.find(inv.hash); @@ -3564,31 +3762,49 @@ void ProcessMessage( return; } -bool PeerLogicValidation::CheckIfBanned(CNode& pnode) +/** Maybe disconnect a peer and discourage future connections from its address. + * + * @param[in] pnode The node to check. + * @return True if the peer was marked for disconnection in this function + */ +bool PeerLogicValidation::MaybeDiscourageAndDisconnect(CNode& pnode) { - AssertLockHeld(cs_main); - CNodeState &state = *State(pnode.GetId()); - - if (state.fShouldBan) { - state.fShouldBan = false; - if (pnode.HasPermission(PF_NOBAN)) - LogPrintf("Warning: not punishing whitelisted peer %s!\n", pnode.addr.ToString()); - else if (pnode.m_manual_connection) - LogPrintf("Warning: not punishing manually-connected peer %s!\n", pnode.addr.ToString()); - else if (pnode.addr.IsLocal()) { - // Disconnect but don't ban _this_ local node - LogPrintf("Warning: disconnecting but not banning local peer %s!\n", pnode.addr.ToString()); - pnode.fDisconnect = true; - } else { - // Disconnect and ban all nodes sharing the address - if (m_banman) { - m_banman->Ban(pnode.addr, BanReasonNodeMisbehaving); - } - connman->DisconnectNode(pnode.addr); - } + const NodeId peer_id{pnode.GetId()}; + { + LOCK(cs_main); + CNodeState& state = *State(peer_id); + + // There's nothing to do if the m_should_discourage flag isn't set + if (!state.m_should_discourage) return false; + + state.m_should_discourage = false; + } // cs_main + + if (pnode.HasPermission(PF_NOBAN)) { + // We never disconnect or discourage peers for bad behavior if they have the NOBAN permission flag + LogPrintf("Warning: not punishing noban peer %d!\n", peer_id); + return false; + } + + if (pnode.IsManualConn()) { + // We never disconnect or discourage manual peers for bad behavior + LogPrintf("Warning: not punishing manually connected peer %d!\n", peer_id); + return false; + } + + if (pnode.addr.IsLocal()) { + // We disconnect local peers for bad behavior but don't discourage (since that would discourage + // all peers on the same local address) + LogPrintf("Warning: disconnecting but not discouraging local peer %d!\n", peer_id); + pnode.fDisconnect = true; return true; } - return false; + + // Normal case: Disconnect the peer and discourage all nodes sharing the address + LogPrintf("Disconnecting and discouraging peer %d!\n", peer_id); + if (m_banman) m_banman->Discourage(pnode.addr); + connman->DisconnectNode(pnode.addr); + return true; } bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc) @@ -3605,12 +3821,12 @@ bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& inter bool fMoreWork = false; if (!pfrom->vRecvGetData.empty()) - ProcessGetData(*pfrom, chainparams, connman, m_mempool, interruptMsgProc); + ProcessGetData(*pfrom, chainparams, *connman, m_mempool, interruptMsgProc); if (!pfrom->orphan_work_set.empty()) { std::list<CTransactionRef> removed_txn; LOCK2(cs_main, g_cs_orphans); - ProcessOrphanTx(connman, m_mempool, pfrom->orphan_work_set, removed_txn); + ProcessOrphanTx(*connman, m_mempool, pfrom->orphan_work_set, removed_txn); for (const CTransactionRef& removedTx : removed_txn) { AddToCompactExtraTransactions(removedTx); } @@ -3670,7 +3886,7 @@ bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& inter } try { - ProcessMessage(*pfrom, msg_type, vRecv, msg.m_time, chainparams, m_chainman, m_mempool, connman, m_banman, interruptMsgProc); + ProcessMessage(*pfrom, msg_type, vRecv, msg.m_time, chainparams, m_chainman, m_mempool, *connman, m_banman, interruptMsgProc); if (interruptMsgProc) return false; if (!pfrom->vRecvGetData.empty()) @@ -3681,9 +3897,6 @@ bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& inter LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg_type), nMessageSize); } - LOCK(cs_main); - CheckIfBanned(*pfrom); - return fMoreWork; } @@ -3694,7 +3907,7 @@ void PeerLogicValidation::ConsiderEviction(CNode& pto, int64_t time_in_seconds) CNodeState &state = *State(pto.GetId()); const CNetMsgMaker msgMaker(pto.GetSendVersion()); - if (!state.m_chain_sync.m_protect && IsOutboundDisconnectionCandidate(pto) && state.fSyncStarted) { + if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) { // This is an outbound peer subject to disconnection if they don't // announce a block with as much work as the current tip within // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if @@ -3756,7 +3969,7 @@ void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds) AssertLockHeld(cs_main); // Ignore non-outbound peers, or nodes marked for disconnect already - if (!IsOutboundDisconnectionCandidate(*pnode) || pnode->fDisconnect) return; + if (!pnode->IsOutboundOrBlockRelayConn() || pnode->fDisconnect) return; CNodeState *state = State(pnode->GetId()); if (state == nullptr) return; // shouldn't be possible, but just in case // Don't evict our protected peers @@ -3826,17 +4039,19 @@ namespace { class CompareInvMempoolOrder { CTxMemPool *mp; + bool m_wtxid_relay; public: - explicit CompareInvMempoolOrder(CTxMemPool *_mempool) + explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid) { mp = _mempool; + m_wtxid_relay = use_wtxid; } bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b) { /* As std::make_heap produces a max-heap, we want the entries with the * fewest ancestors/highest fee to sort later. */ - return mp->CompareDepthAndScore(*b, *a); + return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay); } }; } @@ -3844,48 +4059,49 @@ public: bool PeerLogicValidation::SendMessages(CNode* pto) { const Consensus::Params& consensusParams = Params().GetConsensus(); - { - // Don't send anything until the version handshake is complete - if (!pto->fSuccessfullyConnected || pto->fDisconnect) - return true; - // If we get here, the outgoing message serialization version is set and can't change. - const CNetMsgMaker msgMaker(pto->GetSendVersion()); + // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll + // disconnect misbehaving peers even before the version handshake is complete. + if (MaybeDiscourageAndDisconnect(*pto)) return true; - // - // Message: ping - // - bool pingSend = false; - if (pto->fPingQueued) { - // RPC ping request by user - pingSend = true; - } - if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) { - // Ping automatically sent as a latency probe & keepalive. - pingSend = true; - } - if (pingSend) { - uint64_t nonce = 0; - while (nonce == 0) { - GetRandBytes((unsigned char*)&nonce, sizeof(nonce)); - } - pto->fPingQueued = false; - pto->nPingUsecStart = GetTimeMicros(); - if (pto->nVersion > BIP0031_VERSION) { - pto->nPingNonceSent = nonce; - connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce)); - } else { - // Peer is too old to support ping command with nonce, pong will never arrive. - pto->nPingNonceSent = 0; - connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING)); - } - } + // Don't send anything until the version handshake is complete + if (!pto->fSuccessfullyConnected || pto->fDisconnect) + return true; - TRY_LOCK(cs_main, lockMain); - if (!lockMain) - return true; + // If we get here, the outgoing message serialization version is set and can't change. + const CNetMsgMaker msgMaker(pto->GetSendVersion()); - if (CheckIfBanned(*pto)) return true; + // + // Message: ping + // + bool pingSend = false; + if (pto->fPingQueued) { + // RPC ping request by user + pingSend = true; + } + if (pto->nPingNonceSent == 0 && pto->m_ping_start.load() + PING_INTERVAL < GetTime<std::chrono::microseconds>()) { + // Ping automatically sent as a latency probe & keepalive. + pingSend = true; + } + if (pingSend) { + uint64_t nonce = 0; + while (nonce == 0) { + GetRandBytes((unsigned char*)&nonce, sizeof(nonce)); + } + pto->fPingQueued = false; + pto->m_ping_start = GetTime<std::chrono::microseconds>(); + if (pto->nVersion > BIP0031_VERSION) { + pto->nPingNonceSent = nonce; + connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce)); + } else { + // Peer is too old to support ping command with nonce, pong will never arrive. + pto->nPingNonceSent = 0; + connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING)); + } + } + + { + LOCK(cs_main); CNodeState &state = *State(pto->GetId()); @@ -3912,8 +4128,8 @@ bool PeerLogicValidation::SendMessages(CNode* pto) { pto->m_addr_known->insert(addr.GetKey()); vAddr.push_back(addr); - // receiver rejects addr messages larger than 1000 - if (vAddr.size() >= 1000) + // receiver rejects addr messages larger than MAX_ADDR_TO_SEND + if (vAddr.size() >= MAX_ADDR_TO_SEND) { connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr)); vAddr.clear(); @@ -3931,7 +4147,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) // Start block sync if (pindexBestHeader == nullptr) pindexBestHeader = ::ChainActive().Tip(); - bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do. + bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); // Download if this is a nice peer, or we have no nice peers and this one might do. if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) { // Only actively request headers from a single peer, unless we're close to today. if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { @@ -4083,7 +4299,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) // If the peer's chain has this block, don't inv it back. if (!PeerHasHeader(&state, pindex)) { - pto->PushBlockInventory(hashToAnnounce); + pto->vInventoryBlockToSend.push_back(hashToAnnounce); LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__, pto->GetId(), hashToAnnounce.ToString()); } @@ -4116,7 +4332,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) bool fSendTrickle = pto->HasPermission(PF_NOBAN); if (pto->m_tx_relay->nNextInvSend < current_time) { fSendTrickle = true; - if (pto->fInbound) { + if (pto->IsInboundConn()) { pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{connman->PoissonNextSendInbound(nNow, INVENTORY_BROADCAST_INTERVAL)}; } else { // Use half the delay for outbound peers, as there is less privacy concern for them. @@ -4143,8 +4359,8 @@ bool PeerLogicValidation::SendMessages(CNode* pto) LOCK(pto->m_tx_relay->cs_filter); for (const auto& txinfo : vtxinfo) { - const uint256& hash = txinfo.tx->GetHash(); - CInv inv(MSG_TX, hash); + const uint256& hash = state.m_wtxid_relay ? txinfo.tx->GetWitnessHash() : txinfo.tx->GetHash(); + CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash); pto->m_tx_relay->setInventoryTxToSend.erase(hash); // Don't send transactions that peers will not put into their mempool if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { @@ -4154,6 +4370,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) if (!pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue; } pto->m_tx_relay->filterInventoryKnown.insert(hash); + // Responses to MEMPOOL requests bypass the m_recently_announced_invs filter. vInv.push_back(inv); if (vInv.size() == MAX_INV_SZ) { connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); @@ -4178,7 +4395,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) } // Topologically and fee-rate sort the inventory we send for privacy and priority reasons. // A heap is used so that not all items need sorting if only a few are being sent. - CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool); + CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, state.m_wtxid_relay); std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); // No reason to drain out at many times the network's capacity, // especially since we have many peers and some will draw much shorter delays. @@ -4190,6 +4407,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) std::set<uint256>::iterator it = vInvTx.back(); vInvTx.pop_back(); uint256 hash = *it; + CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash); // Remove it from the to-be-sent set pto->m_tx_relay->setInventoryTxToSend.erase(it); // Check if not in the filter already @@ -4197,17 +4415,20 @@ bool PeerLogicValidation::SendMessages(CNode* pto) continue; } // Not in the mempool anymore? don't bother sending it. - auto txinfo = m_mempool.info(hash); + auto txinfo = m_mempool.info(ToGenTxid(inv)); if (!txinfo.tx) { continue; } + auto txid = txinfo.tx->GetHash(); + auto wtxid = txinfo.tx->GetWitnessHash(); // Peer told you to not send transactions at that feerate? Don't bother sending it. if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { continue; } if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue; // Send - vInv.push_back(CInv(MSG_TX, hash)); + State(pto->GetId())->m_recently_announced_invs.insert(hash); + vInv.push_back(inv); nRelayedTransactions++; { // Expire old relay messages @@ -4217,9 +4438,14 @@ bool PeerLogicValidation::SendMessages(CNode* pto) vRelayExpiration.pop_front(); } - auto ret = mapRelay.insert(std::make_pair(hash, std::move(txinfo.tx))); + auto ret = mapRelay.emplace(txid, std::move(txinfo.tx)); if (ret.second) { - vRelayExpiration.push_back(std::make_pair(nNow + std::chrono::microseconds{RELAY_TX_CACHE_TIME}.count(), ret.first)); + vRelayExpiration.emplace_back(nNow + std::chrono::microseconds{RELAY_TX_CACHE_TIME}.count(), ret.first); + } + // Add wtxid-based lookup into mapRelay as well, so that peers can request by wtxid + auto ret2 = mapRelay.emplace(wtxid, ret.first->second); + if (ret2.second) { + vRelayExpiration.emplace_back(nNow + std::chrono::microseconds{RELAY_TX_CACHE_TIME}.count(), ret2.first); } } if (vInv.size() == MAX_INV_SZ) { @@ -4227,6 +4453,14 @@ bool PeerLogicValidation::SendMessages(CNode* pto) vInv.clear(); } pto->m_tx_relay->filterInventoryKnown.insert(hash); + if (hash != txid) { + // Insert txid into filterInventoryKnown, even for + // wtxidrelay peers. This prevents re-adding of + // unconfirmed parents to the recently_announced + // filter, when a child tx is requested. See + // ProcessGetData(). + pto->m_tx_relay->filterInventoryKnown.insert(txid); + } } } } @@ -4264,9 +4498,9 @@ bool PeerLogicValidation::SendMessages(CNode* pto) // Check for headers sync timeouts if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) { // Detect whether this is a stalling initial-headers-sync peer - if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24*60*60) { + if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24 * 60 * 60) { if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) { - // Disconnect a (non-whitelisted) peer if it is our only sync peer, + // Disconnect a peer (without the noban permission) if it is our only sync peer, // and we have others we could be using instead. // Note: If all our peers are inbound, then we won't // disconnect our sync peer for stalling; we have bigger @@ -4276,7 +4510,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) pto->fDisconnect = true; return true; } else { - LogPrintf("Timeout downloading headers from whitelisted peer=%d, not disconnecting\n", pto->GetId()); + LogPrintf("Timeout downloading headers from noban peer=%d, not disconnecting\n", pto->GetId()); // Reset the headers sync state so that we have a // chance to try downloading from a different peer. // Note: this will also result in at least one more @@ -4347,15 +4581,15 @@ bool PeerLogicValidation::SendMessages(CNode* pto) auto& tx_process_time = state.m_tx_download.m_tx_process_time; while (!tx_process_time.empty() && tx_process_time.begin()->first <= current_time && state.m_tx_download.m_tx_in_flight.size() < MAX_PEER_TX_IN_FLIGHT) { - const uint256 txid = tx_process_time.begin()->second; + const GenTxid gtxid = tx_process_time.begin()->second; // Erase this entry from tx_process_time (it may be added back for // processing at a later time, see below) tx_process_time.erase(tx_process_time.begin()); - CInv inv(MSG_TX | GetFetchFlags(*pto), txid); + CInv inv(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash()); if (!AlreadyHave(inv, m_mempool)) { // If this transaction was last requested more than 1 minute ago, // then request. - const auto last_request_time = GetTxRequestTime(inv.hash); + const auto last_request_time = GetTxRequestTime(gtxid); if (last_request_time <= current_time - GETDATA_TX_INTERVAL) { LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId()); vGetData.push_back(inv); @@ -4363,20 +4597,28 @@ bool PeerLogicValidation::SendMessages(CNode* pto) connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); vGetData.clear(); } - UpdateTxRequestTime(inv.hash, current_time); - state.m_tx_download.m_tx_in_flight.emplace(inv.hash, current_time); + UpdateTxRequestTime(gtxid, current_time); + state.m_tx_download.m_tx_in_flight.emplace(gtxid.GetHash(), current_time); } else { // This transaction is in flight from someone else; queue // up processing to happen after the download times out // (with a slight delay for inbound peers, to prefer // requests to outbound peers). - const auto next_process_time = CalculateTxGetDataTime(txid, current_time, !state.fPreferredDownload); - tx_process_time.emplace(next_process_time, txid); + // Don't apply the txid-delay to re-requests of a + // transaction; the heuristic of delaying requests to + // txid-relay peers is to save bandwidth on initial + // announcement of a transaction, and doesn't make sense + // for a followup request if our first peer times out (and + // would open us up to an attacker using inbound + // wtxid-relay to prevent us from requesting transactions + // from outbound txid-relay peers). + const auto next_process_time = CalculateTxGetDataTime(gtxid, current_time, !state.fPreferredDownload, false); + tx_process_time.emplace(next_process_time, gtxid); } } else { // We have already seen this transaction, no need to download. - state.m_tx_download.m_tx_announced.erase(inv.hash); - state.m_tx_download.m_tx_in_flight.erase(inv.hash); + state.m_tx_download.m_tx_announced.erase(gtxid.GetHash()); + state.m_tx_download.m_tx_in_flight.erase(gtxid.GetHash()); } } @@ -4392,10 +4634,21 @@ bool PeerLogicValidation::SendMessages(CNode* pto) ) { CAmount currentFilter = m_mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK(); int64_t timeNow = GetTimeMicros(); + static FeeFilterRounder g_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}}; + if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) { + // Received tx-inv messages are discarded when the active + // chainstate is in IBD, so tell the peer to not send them. + currentFilter = MAX_MONEY; + } else { + static const CAmount MAX_FILTER{g_filter_rounder.round(MAX_MONEY)}; + if (pto->m_tx_relay->lastSentFeeFilter == MAX_FILTER) { + // Send the current filter if we sent MAX_FILTER previously + // and made it out of IBD. + pto->m_tx_relay->nextSendTimeFeeFilter = timeNow - 1; + } + } if (timeNow > pto->m_tx_relay->nextSendTimeFeeFilter) { - static CFeeRate default_feerate(DEFAULT_MIN_RELAY_TX_FEE); - static FeeFilterRounder filterRounder(default_feerate); - CAmount filterToSend = filterRounder.round(currentFilter); + CAmount filterToSend = g_filter_rounder.round(currentFilter); // We always have a fee filter of at least minRelayTxFee filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK()); if (filterToSend != pto->m_tx_relay->lastSentFeeFilter) { @@ -4411,7 +4664,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) pto->m_tx_relay->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000; } } - } + } // release cs_main return true; } @@ -4423,6 +4676,7 @@ public: // orphan transactions mapOrphanTransactions.clear(); mapOrphanTransactionsByPrev.clear(); + g_orphans_by_wtxid.clear(); } }; static CNetProcessingCleanup instance_of_cnetprocessingcleanup; diff --git a/src/net_processing.h b/src/net_processing.h index 19beca0cc4..2d98714122 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -23,15 +23,18 @@ static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS = 100; static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN = 100; static const bool DEFAULT_PEERBLOOMFILTERS = false; static const bool DEFAULT_PEERBLOCKFILTERS = false; +/** Threshold for marking a node to be discouraged, e.g. disconnected and added to the discouragement filter. */ +static const int DISCOURAGEMENT_THRESHOLD{100}; class PeerLogicValidation final : public CValidationInterface, public NetEventsInterface { private: CConnman* const connman; + /** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */ BanMan* const m_banman; ChainstateManager& m_chainman; CTxMemPool& m_mempool; - bool CheckIfBanned(CNode& pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + bool MaybeDiscourageAndDisconnect(CNode& pnode); public: PeerLogicValidation(CConnman* connman, BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool); @@ -97,6 +100,6 @@ struct CNodeStateStats { bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats); /** Relay transaction to every node */ -void RelayTransaction(const uint256&, const CConnman& connman); +void RelayTransaction(const uint256& txid, const uint256& wtxid, const CConnman& connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main); #endif // BITCOIN_NET_PROCESSING_H diff --git a/src/netaddress.cpp b/src/netaddress.cpp index f79425a52e..d29aed6c8b 100644 --- a/src/netaddress.cpp +++ b/src/netaddress.cpp @@ -3,6 +3,7 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include <cstdint> #include <netaddress.h> #include <hash.h> #include <util/strencodings.h> @@ -27,19 +28,35 @@ CNetAddr::CNetAddr() void CNetAddr::SetIP(const CNetAddr& ipIn) { + m_net = ipIn.m_net; memcpy(ip, ipIn.ip, sizeof(ip)); } +void CNetAddr::SetLegacyIPv6(const uint8_t ipv6[16]) +{ + if (memcmp(ipv6, pchIPv4, sizeof(pchIPv4)) == 0) { + m_net = NET_IPV4; + } else if (memcmp(ipv6, pchOnionCat, sizeof(pchOnionCat)) == 0) { + m_net = NET_ONION; + } else if (memcmp(ipv6, g_internal_prefix, sizeof(g_internal_prefix)) == 0) { + m_net = NET_INTERNAL; + } else { + m_net = NET_IPV6; + } + memcpy(ip, ipv6, 16); +} + void CNetAddr::SetRaw(Network network, const uint8_t *ip_in) { switch(network) { case NET_IPV4: + m_net = NET_IPV4; memcpy(ip, pchIPv4, 12); memcpy(ip+12, ip_in, 4); break; case NET_IPV6: - memcpy(ip, ip_in, 16); + SetLegacyIPv6(ip_in); break; default: assert(!"invalid network"); @@ -65,6 +82,7 @@ bool CNetAddr::SetInternal(const std::string &name) if (name.empty()) { return false; } + m_net = NET_INTERNAL; unsigned char hash[32] = {}; CSHA256().Write((const unsigned char*)name.data(), name.size()).Finalize(hash); memcpy(ip, g_internal_prefix, sizeof(g_internal_prefix)); @@ -88,6 +106,7 @@ bool CNetAddr::SetSpecial(const std::string &strName) std::vector<unsigned char> vchAddr = DecodeBase32(strName.substr(0, strName.size() - 6).c_str()); if (vchAddr.size() != 16-sizeof(pchOnionCat)) return false; + m_net = NET_ONION; memcpy(ip, pchOnionCat, sizeof(pchOnionCat)); for (unsigned int i=0; i<16-sizeof(pchOnionCat); i++) ip[i + sizeof(pchOnionCat)] = vchAddr[i]; @@ -122,15 +141,9 @@ bool CNetAddr::IsBindAny() const return true; } -bool CNetAddr::IsIPv4() const -{ - return (memcmp(ip, pchIPv4, sizeof(pchIPv4)) == 0); -} +bool CNetAddr::IsIPv4() const { return m_net == NET_IPV4; } -bool CNetAddr::IsIPv6() const -{ - return (!IsIPv4() && !IsTor() && !IsInternal()); -} +bool CNetAddr::IsIPv6() const { return m_net == NET_IPV6; } bool CNetAddr::IsRFC1918() const { @@ -164,50 +177,54 @@ bool CNetAddr::IsRFC5737() const bool CNetAddr::IsRFC3849() const { - return GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x0D && GetByte(12) == 0xB8; + return IsIPv6() && GetByte(15) == 0x20 && GetByte(14) == 0x01 && + GetByte(13) == 0x0D && GetByte(12) == 0xB8; } bool CNetAddr::IsRFC3964() const { - return (GetByte(15) == 0x20 && GetByte(14) == 0x02); + return IsIPv6() && GetByte(15) == 0x20 && GetByte(14) == 0x02; } bool CNetAddr::IsRFC6052() const { static const unsigned char pchRFC6052[] = {0,0x64,0xFF,0x9B,0,0,0,0,0,0,0,0}; - return (memcmp(ip, pchRFC6052, sizeof(pchRFC6052)) == 0); + return IsIPv6() && memcmp(ip, pchRFC6052, sizeof(pchRFC6052)) == 0; } bool CNetAddr::IsRFC4380() const { - return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0 && GetByte(12) == 0); + return IsIPv6() && GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0 && + GetByte(12) == 0; } bool CNetAddr::IsRFC4862() const { static const unsigned char pchRFC4862[] = {0xFE,0x80,0,0,0,0,0,0}; - return (memcmp(ip, pchRFC4862, sizeof(pchRFC4862)) == 0); + return IsIPv6() && memcmp(ip, pchRFC4862, sizeof(pchRFC4862)) == 0; } bool CNetAddr::IsRFC4193() const { - return ((GetByte(15) & 0xFE) == 0xFC); + return IsIPv6() && (GetByte(15) & 0xFE) == 0xFC; } bool CNetAddr::IsRFC6145() const { static const unsigned char pchRFC6145[] = {0,0,0,0,0,0,0,0,0xFF,0xFF,0,0}; - return (memcmp(ip, pchRFC6145, sizeof(pchRFC6145)) == 0); + return IsIPv6() && memcmp(ip, pchRFC6145, sizeof(pchRFC6145)) == 0; } bool CNetAddr::IsRFC4843() const { - return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x00 && (GetByte(12) & 0xF0) == 0x10); + return IsIPv6() && GetByte(15) == 0x20 && GetByte(14) == 0x01 && + GetByte(13) == 0x00 && (GetByte(12) & 0xF0) == 0x10; } bool CNetAddr::IsRFC7343() const { - return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x00 && (GetByte(12) & 0xF0) == 0x20); + return IsIPv6() && GetByte(15) == 0x20 && GetByte(14) == 0x01 && + GetByte(13) == 0x00 && (GetByte(12) & 0xF0) == 0x20; } bool CNetAddr::IsHeNet() const @@ -221,10 +238,7 @@ bool CNetAddr::IsHeNet() const * * @see CNetAddr::SetSpecial(const std::string &) */ -bool CNetAddr::IsTor() const -{ - return (memcmp(ip, pchOnionCat, sizeof(pchOnionCat)) == 0); -} +bool CNetAddr::IsTor() const { return m_net == NET_ONION; } bool CNetAddr::IsLocal() const { @@ -234,7 +248,7 @@ bool CNetAddr::IsLocal() const // IPv6 loopback (::1/128) static const unsigned char pchLocal[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1}; - if (memcmp(ip, pchLocal, 16) == 0) + if (IsIPv6() && memcmp(ip, pchLocal, 16) == 0) return true; return false; @@ -258,12 +272,12 @@ bool CNetAddr::IsValid() const // header20 vectorlen3 addr26 addr26 addr26 header20 vectorlen3 addr26 addr26 addr26... // so if the first length field is garbled, it reads the second batch // of addr misaligned by 3 bytes. - if (memcmp(ip, pchIPv4+3, sizeof(pchIPv4)-3) == 0) + if (IsIPv6() && memcmp(ip, pchIPv4+3, sizeof(pchIPv4)-3) == 0) return false; // unspecified IPv6 address (::/128) unsigned char ipNone6[16] = {}; - if (memcmp(ip, ipNone6, 16) == 0) + if (IsIPv6() && memcmp(ip, ipNone6, 16) == 0) return false; // documentation IPv6 address @@ -310,7 +324,7 @@ bool CNetAddr::IsRoutable() const */ bool CNetAddr::IsInternal() const { - return memcmp(ip, g_internal_prefix, sizeof(g_internal_prefix)) == 0; + return m_net == NET_INTERNAL; } enum Network CNetAddr::GetNetwork() const @@ -321,13 +335,7 @@ enum Network CNetAddr::GetNetwork() const if (!IsRoutable()) return NET_UNROUTABLE; - if (IsIPv4()) - return NET_IPV4; - - if (IsTor()) - return NET_ONION; - - return NET_IPV6; + return m_net; } std::string CNetAddr::ToStringIP() const @@ -361,12 +369,12 @@ std::string CNetAddr::ToString() const bool operator==(const CNetAddr& a, const CNetAddr& b) { - return (memcmp(a.ip, b.ip, 16) == 0); + return a.m_net == b.m_net && memcmp(a.ip, b.ip, 16) == 0; } bool operator<(const CNetAddr& a, const CNetAddr& b) { - return (memcmp(a.ip, b.ip, 16) < 0); + return a.m_net < b.m_net || (a.m_net == b.m_net && memcmp(a.ip, b.ip, 16) < 0); } /** @@ -545,7 +553,7 @@ std::vector<unsigned char> CNetAddr::GetGroup(const std::vector<bool> &asmap) co uint64_t CNetAddr::GetHash() const { - uint256 hash = Hash(&ip[0], &ip[16]); + uint256 hash = Hash(ip); uint64_t nRet; memcpy(&nRet, &hash, sizeof(nRet)); return nRet; @@ -627,15 +635,15 @@ CService::CService() : port(0) { } -CService::CService(const CNetAddr& cip, unsigned short portIn) : CNetAddr(cip), port(portIn) +CService::CService(const CNetAddr& cip, uint16_t portIn) : CNetAddr(cip), port(portIn) { } -CService::CService(const struct in_addr& ipv4Addr, unsigned short portIn) : CNetAddr(ipv4Addr), port(portIn) +CService::CService(const struct in_addr& ipv4Addr, uint16_t portIn) : CNetAddr(ipv4Addr), port(portIn) { } -CService::CService(const struct in6_addr& ipv6Addr, unsigned short portIn) : CNetAddr(ipv6Addr), port(portIn) +CService::CService(const struct in6_addr& ipv6Addr, uint16_t portIn) : CNetAddr(ipv6Addr), port(portIn) { } @@ -663,7 +671,7 @@ bool CService::SetSockAddr(const struct sockaddr *paddr) } } -unsigned short CService::GetPort() const +uint16_t CService::GetPort() const { return port; } @@ -725,12 +733,10 @@ bool CService::GetSockAddr(struct sockaddr* paddr, socklen_t *addrlen) const */ std::vector<unsigned char> CService::GetKey() const { - std::vector<unsigned char> vKey; - vKey.resize(18); - memcpy(vKey.data(), ip, 16); - vKey[16] = port / 0x100; // most significant byte of our port - vKey[17] = port & 0x0FF; // least significant byte of our port - return vKey; + auto key = GetAddrBytes(); + key.push_back(port / 0x100); // most significant byte of our port + key.push_back(port & 0x0FF); // least significant byte of our port + return key; } std::string CService::ToStringPort() const @@ -814,7 +820,7 @@ CSubNet::CSubNet(const CNetAddr &addr): */ bool CSubNet::Match(const CNetAddr &addr) const { - if (!valid || !addr.IsValid()) + if (!valid || !addr.IsValid() || network.m_net != addr.m_net) return false; for(int x=0; x<16; ++x) if ((addr.ip[x] & netmask[x]) != network.ip[x]) diff --git a/src/netaddress.h b/src/netaddress.h index e640c07d32..0365907d44 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -12,25 +12,54 @@ #include <compat.h> #include <serialize.h> -#include <stdint.h> +#include <cstdint> #include <string> #include <vector> +/** + * A network type. + * @note An address may belong to more than one network, for example `10.0.0.1` + * belongs to both `NET_UNROUTABLE` and `NET_IPV4`. + * Keep these sequential starting from 0 and `NET_MAX` as the last entry. + * We have loops like `for (int i = 0; i < NET_MAX; i++)` that expect to iterate + * over all enum values and also `GetExtNetwork()` "extends" this enum by + * introducing standalone constants starting from `NET_MAX`. + */ enum Network { + /// Addresses from these networks are not publicly routable on the global Internet. NET_UNROUTABLE = 0, + + /// IPv4 NET_IPV4, + + /// IPv6 NET_IPV6, + + /// TORv2 NET_ONION, + + /// A set of dummy addresses that map a name to an IPv6 address. These + /// addresses belong to RFC4193's fc00::/7 subnet (unique-local addresses). + /// We use them to map a string or FQDN to an IPv6 address in CAddrMan to + /// keep track of which DNS seeds were used. NET_INTERNAL, + /// Dummy value to indicate the number of NET_* constants. NET_MAX, }; -/** IP address (IPv6, or IPv4 using mapped IPv6 range (::FFFF:0:0/96)) */ +/** + * Network address. + */ class CNetAddr { protected: + /** + * Network to which this address belongs. + */ + Network m_net{NET_IPV6}; + unsigned char ip[16]; // in network byte order uint32_t scopeId{0}; // for scoped/link-local ipv6 addresses @@ -40,6 +69,14 @@ class CNetAddr void SetIP(const CNetAddr& ip); /** + * Set from a legacy IPv6 address. + * Legacy IPv6 address may be a normal IPv6 address, or another address + * (e.g. IPv4) disguised as IPv6. This encoding is used in the legacy + * `addr` encoding. + */ + void SetLegacyIPv6(const uint8_t ipv6[16]); + + /** * Set raw IPv4 or IPv6 address (in network byte order) * @note Only NET_IPV4 and NET_IPV6 are allowed for network. */ @@ -90,6 +127,7 @@ class CNetAddr uint32_t GetMappedAS(const std::vector<bool> &asmap) const; std::vector<unsigned char> GetGroup(const std::vector<bool> &asmap) const; + std::vector<unsigned char> GetAddrBytes() const { return {std::begin(ip), std::end(ip)}; } int GetReachabilityFrom(const CNetAddr *paddrPartner = nullptr) const; explicit CNetAddr(const struct in6_addr& pipv6Addr, const uint32_t scope = 0); @@ -99,7 +137,27 @@ class CNetAddr friend bool operator!=(const CNetAddr& a, const CNetAddr& b) { return !(a == b); } friend bool operator<(const CNetAddr& a, const CNetAddr& b); - SERIALIZE_METHODS(CNetAddr, obj) { READWRITE(obj.ip); } + /** + * Serialize to a stream. + */ + template <typename Stream> + void Serialize(Stream& s) const + { + s << ip; + } + + /** + * Unserialize from a stream. + */ + template <typename Stream> + void Unserialize(Stream& s) + { + unsigned char ip_temp[sizeof(ip)]; + s >> ip_temp; + // Use SetLegacyIPv6() so that m_net is set correctly. For example + // ::FFFF:0102:0304 should be set as m_net=NET_IPV4 (1.2.3.4). + SetLegacyIPv6(ip_temp); + } friend class CSubNet; }; @@ -142,10 +200,10 @@ class CService : public CNetAddr public: CService(); - CService(const CNetAddr& ip, unsigned short port); - CService(const struct in_addr& ipv4Addr, unsigned short port); + CService(const CNetAddr& ip, uint16_t port); + CService(const struct in_addr& ipv4Addr, uint16_t port); explicit CService(const struct sockaddr_in& addr); - unsigned short GetPort() const; + uint16_t GetPort() const; bool GetSockAddr(struct sockaddr* paddr, socklen_t *addrlen) const; bool SetSockAddr(const struct sockaddr* paddr); friend bool operator==(const CService& a, const CService& b); @@ -156,10 +214,14 @@ class CService : public CNetAddr std::string ToStringPort() const; std::string ToStringIPPort() const; - CService(const struct in6_addr& ipv6Addr, unsigned short port); + CService(const struct in6_addr& ipv6Addr, uint16_t port); explicit CService(const struct sockaddr_in6& addr); - SERIALIZE_METHODS(CService, obj) { READWRITE(obj.ip, Using<BigEndianFormatter<2>>(obj.port)); } + SERIALIZE_METHODS(CService, obj) + { + READWRITEAS(CNetAddr, obj); + READWRITE(Using<BigEndianFormatter<2>>(obj.port)); + } }; bool SanityCheckASMap(const std::vector<bool>& asmap); diff --git a/src/netbase.cpp b/src/netbase.cpp index 9fe03c6a24..3a3b5f3e66 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -12,6 +12,7 @@ #include <util/system.h> #include <atomic> +#include <cstdint> #ifndef WIN32 #include <fcntl.h> @@ -798,11 +799,11 @@ bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int ProxyCredentials random_auth; static std::atomic_int counter(0); random_auth.username = random_auth.password = strprintf("%i", counter++); - if (!Socks5(strDest, (unsigned short)port, &random_auth, hSocket)) { + if (!Socks5(strDest, (uint16_t)port, &random_auth, hSocket)) { return false; } } else { - if (!Socks5(strDest, (unsigned short)port, 0, hSocket)) { + if (!Socks5(strDest, (uint16_t)port, 0, hSocket)) { return false; } } diff --git a/src/node/coinstats.cpp b/src/node/coinstats.cpp index e3c4c828b6..fb46ea1731 100644 --- a/src/node/coinstats.cpp +++ b/src/node/coinstats.cpp @@ -8,13 +8,23 @@ #include <coins.h> #include <hash.h> #include <serialize.h> -#include <validation.h> #include <uint256.h> #include <util/system.h> +#include <validation.h> #include <map> -static void ApplyStats(CCoinsStats &stats, CHashWriter& ss, const uint256& hash, const std::map<uint32_t, Coin>& outputs) +static uint64_t GetBogoSize(const CScript& scriptPubKey) +{ + return 32 /* txid */ + + 4 /* vout index */ + + 4 /* height + coinbase */ + + 8 /* amount */ + + 2 /* scriptPubKey len */ + + scriptPubKey.size() /* scriptPubKey */; +} + +static void ApplyStats(CCoinsStats& stats, CHashWriter& ss, const uint256& hash, const std::map<uint32_t, Coin>& outputs) { assert(!outputs.empty()); ss << hash; @@ -26,26 +36,38 @@ static void ApplyStats(CCoinsStats &stats, CHashWriter& ss, const uint256& hash, ss << VARINT_MODE(output.second.out.nValue, VarIntMode::NONNEGATIVE_SIGNED); stats.nTransactionOutputs++; stats.nTotalAmount += output.second.out.nValue; - stats.nBogoSize += 32 /* txid */ + 4 /* vout index */ + 4 /* height + coinbase */ + 8 /* amount */ + - 2 /* scriptPubKey len */ + output.second.out.scriptPubKey.size() /* scriptPubKey */; + stats.nBogoSize += GetBogoSize(output.second.out.scriptPubKey); } ss << VARINT(0u); } +static void ApplyStats(CCoinsStats& stats, std::nullptr_t, const uint256& hash, const std::map<uint32_t, Coin>& outputs) +{ + assert(!outputs.empty()); + stats.nTransactions++; + for (const auto& output : outputs) { + stats.nTransactionOutputs++; + stats.nTotalAmount += output.second.out.nValue; + stats.nBogoSize += GetBogoSize(output.second.out.scriptPubKey); + } +} + //! Calculate statistics about the unspent transaction output set -bool GetUTXOStats(CCoinsView* view, CCoinsStats& stats, const std::function<void()>& interruption_point) +template <typename T> +static bool GetUTXOStats(CCoinsView* view, CCoinsStats& stats, T hash_obj, const std::function<void()>& interruption_point) { stats = CCoinsStats(); std::unique_ptr<CCoinsViewCursor> pcursor(view->Cursor()); assert(pcursor); - CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION); stats.hashBlock = pcursor->GetBestBlock(); { LOCK(cs_main); stats.nHeight = LookupBlockIndex(stats.hashBlock)->nHeight; } - ss << stats.hashBlock; + + PrepareHash(hash_obj, stats); + uint256 prevkey; std::map<uint32_t, Coin> outputs; while (pcursor->Valid()) { @@ -54,7 +76,7 @@ bool GetUTXOStats(CCoinsView* view, CCoinsStats& stats, const std::function<void Coin coin; if (pcursor->GetKey(key) && pcursor->GetValue(coin)) { if (!outputs.empty() && key.hash != prevkey) { - ApplyStats(stats, ss, prevkey, outputs); + ApplyStats(stats, hash_obj, prevkey, outputs); outputs.clear(); } prevkey = key.hash; @@ -66,9 +88,38 @@ bool GetUTXOStats(CCoinsView* view, CCoinsStats& stats, const std::function<void pcursor->Next(); } if (!outputs.empty()) { - ApplyStats(stats, ss, prevkey, outputs); + ApplyStats(stats, hash_obj, prevkey, outputs); } - stats.hashSerialized = ss.GetHash(); + + FinalizeHash(hash_obj, stats); + stats.nDiskSize = view->EstimateSize(); return true; } + +bool GetUTXOStats(CCoinsView* view, CCoinsStats& stats, CoinStatsHashType hash_type, const std::function<void()>& interruption_point) +{ + switch (hash_type) { + case(CoinStatsHashType::HASH_SERIALIZED): { + CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION); + return GetUTXOStats(view, stats, ss, interruption_point); + } + case(CoinStatsHashType::NONE): { + return GetUTXOStats(view, stats, nullptr, interruption_point); + } + } // no default case, so the compiler can warn about missing cases + assert(false); +} + +// The legacy hash serializes the hashBlock +static void PrepareHash(CHashWriter& ss, CCoinsStats& stats) +{ + ss << stats.hashBlock; +} +static void PrepareHash(std::nullptr_t, CCoinsStats& stats) {} + +static void FinalizeHash(CHashWriter& ss, CCoinsStats& stats) +{ + stats.hashSerialized = ss.GetHash(); +} +static void FinalizeHash(std::nullptr_t, CCoinsStats& stats) {} diff --git a/src/node/coinstats.h b/src/node/coinstats.h index d9cdaa3036..2a7441c10e 100644 --- a/src/node/coinstats.h +++ b/src/node/coinstats.h @@ -14,6 +14,11 @@ class CCoinsView; +enum class CoinStatsHashType { + HASH_SERIALIZED, + NONE, +}; + struct CCoinsStats { int nHeight{0}; @@ -30,6 +35,6 @@ struct CCoinsStats }; //! Calculate statistics about the unspent transaction output set -bool GetUTXOStats(CCoinsView* view, CCoinsStats& stats, const std::function<void()>& interruption_point = {}); +bool GetUTXOStats(CCoinsView* view, CCoinsStats& stats, const CoinStatsHashType hash_type, const std::function<void()>& interruption_point = {}); #endif // BITCOIN_NODE_COINSTATS_H diff --git a/src/node/context.h b/src/node/context.h index c45d9e6689..be568cba36 100644 --- a/src/node/context.h +++ b/src/node/context.h @@ -6,6 +6,7 @@ #define BITCOIN_NODE_CONTEXT_H #include <cassert> +#include <functional> #include <memory> #include <vector> @@ -41,6 +42,7 @@ struct NodeContext { std::unique_ptr<interfaces::Chain> chain; std::vector<std::unique_ptr<interfaces::ChainClient>> chain_clients; std::unique_ptr<CScheduler> scheduler; + std::function<void()> rpc_interruption_point = [] {}; //! Declare default constructor and destructor that are not inline, so code //! instantiating the NodeContext struct doesn't need to #include class @@ -49,10 +51,4 @@ struct NodeContext { ~NodeContext(); }; -inline ChainstateManager& EnsureChainman(const NodeContext& node) -{ - assert(node.chainman); - return *node.chainman; -} - #endif // BITCOIN_NODE_CONTEXT_H diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp index 3841d8687d..5633abe817 100644 --- a/src/node/transaction.cpp +++ b/src/node/transaction.cpp @@ -80,9 +80,10 @@ TransactionError BroadcastTransaction(NodeContext& node, const CTransactionRef t if (relay) { // the mempool tracks locally submitted transactions to make a // best-effort of initial broadcast - node.mempool->AddUnbroadcastTx(hashTx); + node.mempool->AddUnbroadcastTx(hashTx, tx->GetWitnessHash()); - RelayTransaction(hashTx, *node.connman); + LOCK(cs_main); + RelayTransaction(hashTx, tx->GetWitnessHash(), *node.connman); } return TransactionError::OK; diff --git a/src/ui_interface.cpp b/src/node/ui_interface.cpp index 15795bd67f..8d3665975d 100644 --- a/src/ui_interface.cpp +++ b/src/node/ui_interface.cpp @@ -2,18 +2,18 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include <ui_interface.h> +#include <node/ui_interface.h> #include <util/translation.h> -#include <boost/signals2/last_value.hpp> +#include <boost/signals2/optional_last_value.hpp> #include <boost/signals2/signal.hpp> CClientUIInterface uiInterface; struct UISignals { - boost::signals2::signal<CClientUIInterface::ThreadSafeMessageBoxSig, boost::signals2::last_value<bool>> ThreadSafeMessageBox; - boost::signals2::signal<CClientUIInterface::ThreadSafeQuestionSig, boost::signals2::last_value<bool>> ThreadSafeQuestion; + boost::signals2::signal<CClientUIInterface::ThreadSafeMessageBoxSig, boost::signals2::optional_last_value<bool>> ThreadSafeMessageBox; + boost::signals2::signal<CClientUIInterface::ThreadSafeQuestionSig, boost::signals2::optional_last_value<bool>> ThreadSafeQuestion; boost::signals2::signal<CClientUIInterface::InitMessageSig> InitMessage; boost::signals2::signal<CClientUIInterface::NotifyNumConnectionsChangedSig> NotifyNumConnectionsChanged; boost::signals2::signal<CClientUIInterface::NotifyNetworkActiveChangedSig> NotifyNetworkActiveChanged; @@ -42,8 +42,8 @@ ADD_SIGNALS_IMPL_WRAPPER(NotifyBlockTip); ADD_SIGNALS_IMPL_WRAPPER(NotifyHeaderTip); ADD_SIGNALS_IMPL_WRAPPER(BannedListChanged); -bool CClientUIInterface::ThreadSafeMessageBox(const bilingual_str& message, const std::string& caption, unsigned int style) { return g_ui_signals.ThreadSafeMessageBox(message, caption, style); } -bool CClientUIInterface::ThreadSafeQuestion(const bilingual_str& message, const std::string& non_interactive_message, const std::string& caption, unsigned int style) { return g_ui_signals.ThreadSafeQuestion(message, non_interactive_message, caption, style); } +bool CClientUIInterface::ThreadSafeMessageBox(const bilingual_str& message, const std::string& caption, unsigned int style) { return g_ui_signals.ThreadSafeMessageBox(message, caption, style).value_or(false);} +bool CClientUIInterface::ThreadSafeQuestion(const bilingual_str& message, const std::string& non_interactive_message, const std::string& caption, unsigned int style) { return g_ui_signals.ThreadSafeQuestion(message, non_interactive_message, caption, style).value_or(false);} void CClientUIInterface::InitMessage(const std::string& message) { return g_ui_signals.InitMessage(message); } void CClientUIInterface::NotifyNumConnectionsChanged(int newNumConnections) { return g_ui_signals.NotifyNumConnectionsChanged(newNumConnections); } void CClientUIInterface::NotifyNetworkActiveChanged(bool networkActive) { return g_ui_signals.NotifyNetworkActiveChanged(networkActive); } diff --git a/src/ui_interface.h b/src/node/ui_interface.h index b7895e373f..d574ab879f 100644 --- a/src/ui_interface.h +++ b/src/node/ui_interface.h @@ -3,8 +3,8 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#ifndef BITCOIN_UI_INTERFACE_H -#define BITCOIN_UI_INTERFACE_H +#ifndef BITCOIN_NODE_UI_INTERFACE_H +#define BITCOIN_NODE_UI_INTERFACE_H #include <functional> #include <memory> @@ -20,14 +20,6 @@ class connection; } } // namespace boost -/** General change type (added, updated, removed). */ -enum ChangeType -{ - CT_NEW, - CT_UPDATED, - CT_DELETED -}; - /** Signals for UI communication. */ class CClientUIInterface { @@ -122,8 +114,8 @@ void InitWarning(const bilingual_str& str); /** Show error message **/ bool InitError(const bilingual_str& str); -inline bool AbortError(const bilingual_str& str) { return InitError(str); } +constexpr auto AbortError = InitError; extern CClientUIInterface uiInterface; -#endif // BITCOIN_UI_INTERFACE_H +#endif // BITCOIN_NODE_UI_INTERFACE_H diff --git a/src/noui.cpp b/src/noui.cpp index 821d10e3bc..3c82512fac 100644 --- a/src/noui.cpp +++ b/src/noui.cpp @@ -6,7 +6,7 @@ #include <noui.h> #include <logging.h> -#include <ui_interface.h> +#include <node/ui_interface.h> #include <util/translation.h> #include <string> diff --git a/src/outputtype.cpp b/src/outputtype.cpp index 871474d56e..e978852826 100644 --- a/src/outputtype.cpp +++ b/src/outputtype.cpp @@ -42,8 +42,8 @@ const std::string& FormatOutputType(OutputType type) case OutputType::LEGACY: return OUTPUT_TYPE_STRING_LEGACY; case OutputType::P2SH_SEGWIT: return OUTPUT_TYPE_STRING_P2SH_SEGWIT; case OutputType::BECH32: return OUTPUT_TYPE_STRING_BECH32; - default: assert(false); - } + } // no default case, so the compiler can warn about missing cases + assert(false); } CTxDestination GetDestinationForKey(const CPubKey& key, OutputType type) @@ -61,8 +61,8 @@ CTxDestination GetDestinationForKey(const CPubKey& key, OutputType type) return witdest; } } - default: assert(false); - } + } // no default case, so the compiler can warn about missing cases + assert(false); } std::vector<CTxDestination> GetAllDestinationsForKey(const CPubKey& key) @@ -100,6 +100,6 @@ CTxDestination AddAndGetDestinationForScript(FillableSigningProvider& keystore, return ScriptHash(witprog); } } - default: assert(false); - } + } // no default case, so the compiler can warn about missing cases + assert(false); } diff --git a/src/outputtype.h b/src/outputtype.h index 1438f65844..77a16b1d05 100644 --- a/src/outputtype.h +++ b/src/outputtype.h @@ -18,14 +18,6 @@ enum class OutputType { LEGACY, P2SH_SEGWIT, BECH32, - - /** - * Special output type for change outputs only. Automatically choose type - * based on address type setting and the types other of non-change outputs - * (see -changetype option documentation and implementation in - * CWallet::TransactionChangeType for details). - */ - CHANGE_AUTO, }; extern const std::array<OutputType, 3> OUTPUT_TYPES; diff --git a/src/policy/fees.h b/src/policy/fees.h index e445c1590d..e79dbc9868 100644 --- a/src/policy/fees.h +++ b/src/policy/fees.h @@ -273,7 +273,7 @@ public: /** Create new FeeFilterRounder */ explicit FeeFilterRounder(const CFeeRate& minIncrementalFee); - /** Quantize a minimum fee for privacy purpose before broadcast **/ + /** Quantize a minimum fee for privacy purpose before broadcast. Not thread-safe due to use of FastRandomContext */ CAmount round(CAmount currentMinFee); private: diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp index 07d51c0088..0e9820da1e 100644 --- a/src/policy/policy.cpp +++ b/src/policy/policy.cpp @@ -50,14 +50,14 @@ bool IsDust(const CTxOut& txout, const CFeeRate& dustRelayFeeIn) return (txout.nValue < GetDustThreshold(txout, dustRelayFeeIn)); } -bool IsStandard(const CScript& scriptPubKey, txnouttype& whichType) +bool IsStandard(const CScript& scriptPubKey, TxoutType& whichType) { std::vector<std::vector<unsigned char> > vSolutions; whichType = Solver(scriptPubKey, vSolutions); - if (whichType == TX_NONSTANDARD) { + if (whichType == TxoutType::NONSTANDARD) { return false; - } else if (whichType == TX_MULTISIG) { + } else if (whichType == TxoutType::MULTISIG) { unsigned char m = vSolutions.front()[0]; unsigned char n = vSolutions.back()[0]; // Support up to x-of-3 multisig txns as standard @@ -65,7 +65,7 @@ bool IsStandard(const CScript& scriptPubKey, txnouttype& whichType) return false; if (m < 1 || m > n) return false; - } else if (whichType == TX_NULL_DATA && + } else if (whichType == TxoutType::NULL_DATA && (!fAcceptDatacarrier || scriptPubKey.size() > nMaxDatacarrierBytes)) { return false; } @@ -110,16 +110,16 @@ bool IsStandardTx(const CTransaction& tx, bool permit_bare_multisig, const CFeeR } unsigned int nDataOut = 0; - txnouttype whichType; + TxoutType whichType; for (const CTxOut& txout : tx.vout) { if (!::IsStandard(txout.scriptPubKey, whichType)) { reason = "scriptpubkey"; return false; } - if (whichType == TX_NULL_DATA) + if (whichType == TxoutType::NULL_DATA) nDataOut++; - else if ((whichType == TX_MULTISIG) && (!permit_bare_multisig)) { + else if ((whichType == TxoutType::MULTISIG) && (!permit_bare_multisig)) { reason = "bare-multisig"; return false; } else if (IsDust(txout, dust_relay_fee)) { @@ -152,6 +152,8 @@ bool IsStandardTx(const CTransaction& tx, bool permit_bare_multisig, const CFeeR * script can be anything; an attacker could use a very * expensive-to-check-upon-redemption script like: * DUP CHECKSIG DROP ... repeated 100 times... OP_1 + * + * Note that only the non-witness portion of the transaction is checked here. */ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) { @@ -163,10 +165,14 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) const CTxOut& prev = mapInputs.AccessCoin(tx.vin[i].prevout).out; std::vector<std::vector<unsigned char> > vSolutions; - txnouttype whichType = Solver(prev.scriptPubKey, vSolutions); - if (whichType == TX_NONSTANDARD) { + TxoutType whichType = Solver(prev.scriptPubKey, vSolutions); + if (whichType == TxoutType::NONSTANDARD || whichType == TxoutType::WITNESS_UNKNOWN) { + // WITNESS_UNKNOWN failures are typically also caught with a policy + // flag in the script interpreter, but it can be helpful to catch + // this type of NONSTANDARD transaction earlier in transaction + // validation. return false; - } else if (whichType == TX_SCRIPTHASH) { + } else if (whichType == TxoutType::SCRIPTHASH) { std::vector<std::vector<unsigned char> > stack; // convert the scriptSig into a stack, so we can inspect the redeemScript if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker(), SigVersion::BASE)) diff --git a/src/policy/policy.h b/src/policy/policy.h index 1561a41c5e..7f168ee20f 100644 --- a/src/policy/policy.h +++ b/src/policy/policy.h @@ -81,7 +81,7 @@ CAmount GetDustThreshold(const CTxOut& txout, const CFeeRate& dustRelayFee); bool IsDust(const CTxOut& txout, const CFeeRate& dustRelayFee); -bool IsStandard(const CScript& scriptPubKey, txnouttype& whichType); +bool IsStandard(const CScript& scriptPubKey, TxoutType& whichType); /** * Check for standard transaction types * @return True if all outputs (scriptPubKeys) use only standard transaction forms diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h index 4514db578a..544bab6d9b 100644 --- a/src/primitives/transaction.h +++ b/src/primitives/transaction.h @@ -12,6 +12,8 @@ #include <serialize.h> #include <uint256.h> +#include <tuple> + static const int SERIALIZE_TRANSACTION_NO_WITNESS = 0x40000000; /** An outpoint - a combination of a transaction hash and an index n into its vout */ @@ -388,4 +390,17 @@ typedef std::shared_ptr<const CTransaction> CTransactionRef; static inline CTransactionRef MakeTransactionRef() { return std::make_shared<const CTransaction>(); } template <typename Tx> static inline CTransactionRef MakeTransactionRef(Tx&& txIn) { return std::make_shared<const CTransaction>(std::forward<Tx>(txIn)); } +/** A generic txid reference (txid or wtxid). */ +class GenTxid +{ + const bool m_is_wtxid; + const uint256 m_hash; +public: + GenTxid(bool is_wtxid, const uint256& hash) : m_is_wtxid(is_wtxid), m_hash(hash) {} + bool IsWtxid() const { return m_is_wtxid; } + const uint256& GetHash() const { return m_hash; } + friend bool operator==(const GenTxid& a, const GenTxid& b) { return a.m_is_wtxid == b.m_is_wtxid && a.m_hash == b.m_hash; } + friend bool operator<(const GenTxid& a, const GenTxid& b) { return std::tie(a.m_is_wtxid, a.m_hash) < std::tie(b.m_is_wtxid, b.m_hash); } +}; + #endif // BITCOIN_PRIMITIVES_TRANSACTION_H diff --git a/src/protocol.cpp b/src/protocol.cpp index 2dfe4bee74..5a91acee0f 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -46,6 +46,7 @@ const char *GETCFHEADERS="getcfheaders"; const char *CFHEADERS="cfheaders"; const char *GETCFCHECKPT="getcfcheckpt"; const char *CFCHECKPT="cfcheckpt"; +const char *WTXIDRELAY="wtxidrelay"; } // namespace NetMsgType /** All known message types. Keep this in the same order as the list of @@ -83,6 +84,7 @@ const static std::string allNetMessageTypes[] = { NetMsgType::CFHEADERS, NetMsgType::GETCFCHECKPT, NetMsgType::CFCHECKPT, + NetMsgType::WTXIDRELAY, }; const static std::vector<std::string> allNetMessageTypesVec(allNetMessageTypes, allNetMessageTypes+ARRAYLEN(allNetMessageTypes)); @@ -177,6 +179,8 @@ std::string CInv::GetCommand() const switch (masked) { case MSG_TX: return cmd.append(NetMsgType::TX); + // WTX is not a message type, just an inv type + case MSG_WTX: return cmd.append("wtx"); case MSG_BLOCK: return cmd.append(NetMsgType::BLOCK); case MSG_FILTERED_BLOCK: return cmd.append(NetMsgType::MERKLEBLOCK); case MSG_CMPCT_BLOCK: return cmd.append(NetMsgType::CMPCTBLOCK); @@ -237,3 +241,9 @@ std::vector<std::string> serviceFlagsToStr(uint64_t flags) return str_flags; } + +GenTxid ToGenTxid(const CInv& inv) +{ + assert(inv.IsGenTxMsg()); + return {inv.IsMsgWtx(), inv.hash}; +} diff --git a/src/protocol.h b/src/protocol.h index 985f44640b..1d0adaae6e 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -11,6 +11,7 @@ #define BITCOIN_PROTOCOL_H #include <netaddress.h> +#include <primitives/transaction.h> #include <serialize.h> #include <uint256.h> #include <version.h> @@ -63,100 +64,84 @@ namespace NetMsgType { /** * The version message provides information about the transmitting node to the * receiving node at the beginning of a connection. - * @see https://bitcoin.org/en/developer-reference#version */ extern const char* VERSION; /** * The verack message acknowledges a previously-received version message, * informing the connecting node that it can begin to send other messages. - * @see https://bitcoin.org/en/developer-reference#verack */ extern const char* VERACK; /** * The addr (IP address) message relays connection information for peers on the * network. - * @see https://bitcoin.org/en/developer-reference#addr */ extern const char* ADDR; /** * The inv message (inventory message) transmits one or more inventories of * objects known to the transmitting peer. - * @see https://bitcoin.org/en/developer-reference#inv */ extern const char* INV; /** * The getdata message requests one or more data objects from another node. - * @see https://bitcoin.org/en/developer-reference#getdata */ extern const char* GETDATA; /** * The merkleblock message is a reply to a getdata message which requested a * block using the inventory type MSG_MERKLEBLOCK. * @since protocol version 70001 as described by BIP37. - * @see https://bitcoin.org/en/developer-reference#merkleblock */ extern const char* MERKLEBLOCK; /** * The getblocks message requests an inv message that provides block header * hashes starting from a particular point in the block chain. - * @see https://bitcoin.org/en/developer-reference#getblocks */ extern const char* GETBLOCKS; /** * The getheaders message requests a headers message that provides block * headers starting from a particular point in the block chain. * @since protocol version 31800. - * @see https://bitcoin.org/en/developer-reference#getheaders */ extern const char* GETHEADERS; /** * The tx message transmits a single transaction. - * @see https://bitcoin.org/en/developer-reference#tx */ extern const char* TX; /** * The headers message sends one or more block headers to a node which * previously requested certain headers with a getheaders message. * @since protocol version 31800. - * @see https://bitcoin.org/en/developer-reference#headers */ extern const char* HEADERS; /** * The block message transmits a single serialized block. - * @see https://bitcoin.org/en/developer-reference#block */ extern const char* BLOCK; /** * The getaddr message requests an addr message from the receiving node, * preferably one with lots of IP addresses of other receiving nodes. - * @see https://bitcoin.org/en/developer-reference#getaddr */ extern const char* GETADDR; /** * The mempool message requests the TXIDs of transactions that the receiving * node has verified as valid but which have not yet appeared in a block. * @since protocol version 60002. - * @see https://bitcoin.org/en/developer-reference#mempool */ extern const char* MEMPOOL; /** * The ping message is sent periodically to help confirm that the receiving * peer is still connected. - * @see https://bitcoin.org/en/developer-reference#ping */ extern const char* PING; /** * The pong message replies to a ping message, proving to the pinging node that * the ponging node is still alive. * @since protocol version 60001 as described by BIP31. - * @see https://bitcoin.org/en/developer-reference#pong */ extern const char* PONG; /** * The notfound message is a reply to a getdata message which requested an * object the receiving node does not have available for relay. * @since protocol version 70001. - * @see https://bitcoin.org/en/developer-reference#notfound */ extern const char* NOTFOUND; /** @@ -165,7 +150,6 @@ extern const char* NOTFOUND; * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. - * @see https://bitcoin.org/en/developer-reference#filterload */ extern const char* FILTERLOAD; /** @@ -174,7 +158,6 @@ extern const char* FILTERLOAD; * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. - * @see https://bitcoin.org/en/developer-reference#filteradd */ extern const char* FILTERADD; /** @@ -183,14 +166,12 @@ extern const char* FILTERADD; * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. - * @see https://bitcoin.org/en/developer-reference#filterclear */ extern const char* FILTERCLEAR; /** * Indicates that a node prefers to receive new block announcements via a * "headers" message rather than an "inv". * @since protocol version 70012 as described by BIP130. - * @see https://bitcoin.org/en/developer-reference#sendheaders */ extern const char* SENDHEADERS; /** @@ -261,6 +242,12 @@ extern const char* GETCFCHECKPT; * evenly spaced filter headers for blocks on the requested chain. */ extern const char* CFCHECKPT; +/** + * Indicates that a node prefers to relay transactions via wtxid, rather than + * txid. + * @since protocol version 70016 as described by BIP 339. + */ +extern const char *WTXIDRELAY; }; // namespace NetMsgType /* Get a vector of all valid message types (see above) */ @@ -371,7 +358,13 @@ public: READWRITE(nVersion); } if ((s.GetType() & SER_DISK) || - (nVersion >= CADDR_TIME_VERSION && !(s.GetType() & SER_GETHASH))) { + (nVersion != INIT_PROTO_VERSION && !(s.GetType() & SER_GETHASH))) { + // The only time we serialize a CAddress object without nTime is in + // the initial VERSION messages which contain two CAddress records. + // At that point, the serialization version is INIT_PROTO_VERSION. + // After the version handshake, serialization version is >= + // MIN_PEER_PROTO_VERSION and all ADDR messages are serialized with + // nTime. READWRITE(obj.nTime); } READWRITE(Using<CustomUintFormatter<8>>(obj.nServices)); @@ -391,11 +384,12 @@ const uint32_t MSG_TYPE_MASK = 0xffffffff >> 2; * These numbers are defined by the protocol. When adding a new value, be sure * to mention it in the respective BIP. */ -enum GetDataMsg { +enum GetDataMsg : uint32_t { UNDEFINED = 0, MSG_TX = 1, MSG_BLOCK = 2, - // The following can only occur in getdata. Invs always use TX or BLOCK. + MSG_WTX = 5, //!< Defined in BIP 339 + // The following can only occur in getdata. Invs always use TX/WTX or BLOCK. MSG_FILTERED_BLOCK = 3, //!< Defined in BIP37 MSG_CMPCT_BLOCK = 4, //!< Defined in BIP152 MSG_WITNESS_BLOCK = MSG_BLOCK | MSG_WITNESS_FLAG, //!< Defined in BIP144 @@ -417,8 +411,19 @@ public: std::string GetCommand() const; std::string ToString() const; + // Single-message helper methods + bool IsMsgTx() const { return type == MSG_TX; } + bool IsMsgWtx() const { return type == MSG_WTX; } + bool IsMsgWitnessTx() const { return type == MSG_WITNESS_TX; } + + // Combined-message helper methods + bool IsGenTxMsg() const { return type == MSG_TX || type == MSG_WTX || type == MSG_WITNESS_TX; } + int type; uint256 hash; }; +/** Convert a TX/WITNESS_TX/WTX CInv to a GenTxid. */ +GenTxid ToGenTxid(const CInv& inv); + #endif // BITCOIN_PROTOCOL_H diff --git a/src/psbt.cpp b/src/psbt.cpp index 10260740f0..3fb743e5db 100644 --- a/src/psbt.cpp +++ b/src/psbt.cpp @@ -35,14 +35,6 @@ bool PartiallySignedTransaction::Merge(const PartiallySignedTransaction& psbt) return true; } -bool PartiallySignedTransaction::IsSane() const -{ - for (PSBTInput input : inputs) { - if (!input.IsSane()) return false; - } - return true; -} - bool PartiallySignedTransaction::AddInput(const CTxIn& txin, PSBTInput& psbtin) { if (std::find(tx->vin.begin(), tx->vin.end(), txin) != tx->vin.end()) { @@ -144,8 +136,8 @@ void PSBTInput::Merge(const PSBTInput& input) { if (!non_witness_utxo && input.non_witness_utxo) non_witness_utxo = input.non_witness_utxo; if (witness_utxo.IsNull() && !input.witness_utxo.IsNull()) { + // TODO: For segwit v1, we will want to clear out the non-witness utxo when setting a witness one. For v0 and non-segwit, this is not safe witness_utxo = input.witness_utxo; - non_witness_utxo = nullptr; // Clear out any non-witness utxo when we set a witness one. } partial_sigs.insert(input.partial_sigs.begin(), input.partial_sigs.end()); @@ -158,18 +150,6 @@ void PSBTInput::Merge(const PSBTInput& input) if (final_script_witness.IsNull() && !input.final_script_witness.IsNull()) final_script_witness = input.final_script_witness; } -bool PSBTInput::IsSane() const -{ - // Cannot have both witness and non-witness utxos - if (!witness_utxo.IsNull() && non_witness_utxo) return false; - - // If we have a witness_script or a scriptWitness, we must also have a witness utxo - if (!witness_script.empty() && witness_utxo.IsNull()) return false; - if (!final_script_witness.IsNull() && witness_utxo.IsNull()) return false; - - return true; -} - void PSBTOutput::FillSignatureData(SignatureData& sigdata) const { if (!redeem_script.empty()) { @@ -261,11 +241,6 @@ bool SignPSBTInput(const SigningProvider& provider, PartiallySignedTransaction& bool require_witness_sig = false; CTxOut utxo; - // Verify input sanity, which checks that at most one of witness or non-witness utxos is provided. - if (!input.IsSane()) { - return false; - } - if (input.non_witness_utxo) { // If we're taking our information from a non-witness UTXO, verify that it matches the prevout. COutPoint prevout = tx.vin[index].prevout; @@ -299,10 +274,11 @@ bool SignPSBTInput(const SigningProvider& provider, PartiallySignedTransaction& if (require_witness_sig && !sigdata.witness) return false; input.FromSignatureData(sigdata); - // If we have a witness signature, use the smaller witness UTXO. + // If we have a witness signature, put a witness UTXO. + // TODO: For segwit v1, we should remove the non_witness_utxo if (sigdata.witness) { input.witness_utxo = utxo; - input.non_witness_utxo = nullptr; + // input.non_witness_utxo = nullptr; } // Fill in the missing info @@ -356,10 +332,6 @@ TransactionError CombinePSBTs(PartiallySignedTransaction& out, const std::vector return TransactionError::PSBT_MISMATCH; } } - if (!out.IsSane()) { - return TransactionError::INVALID_PSBT; - } - return TransactionError::OK; } diff --git a/src/psbt.h b/src/psbt.h index 0a8ea2ea0b..0951b76f83 100644 --- a/src/psbt.h +++ b/src/psbt.h @@ -62,18 +62,17 @@ struct PSBTInput void FillSignatureData(SignatureData& sigdata) const; void FromSignatureData(const SignatureData& sigdata); void Merge(const PSBTInput& input); - bool IsSane() const; PSBTInput() {} template <typename Stream> inline void Serialize(Stream& s) const { // Write the utxo - // If there is a non-witness utxo, then don't add the witness one. if (non_witness_utxo) { SerializeToVector(s, PSBT_IN_NON_WITNESS_UTXO); OverrideStream<Stream> os(&s, s.GetType(), s.GetVersion() | SERIALIZE_TRANSACTION_NO_WITNESS); SerializeToVector(os, non_witness_utxo); - } else if (!witness_utxo.IsNull()) { + } + if (!witness_utxo.IsNull()) { SerializeToVector(s, PSBT_IN_WITNESS_UTXO); SerializeToVector(s, witness_utxo); } @@ -284,7 +283,6 @@ struct PSBTOutput void FillSignatureData(SignatureData& sigdata) const; void FromSignatureData(const SignatureData& sigdata); void Merge(const PSBTOutput& output); - bool IsSane() const; PSBTOutput() {} template <typename Stream> @@ -401,7 +399,6 @@ struct PartiallySignedTransaction /** Merge psbt into this. The two psbts must have the same underlying CTransaction (i.e. the * same actual Bitcoin transaction.) Returns true if the merge succeeded, false otherwise. */ NODISCARD bool Merge(const PartiallySignedTransaction& psbt); - bool IsSane() const; bool AddInput(const CTxIn& txin, PSBTInput& psbtin); bool AddOutput(const CTxOut& txout, const PSBTOutput& psbtout); PartiallySignedTransaction() {} @@ -551,10 +548,6 @@ struct PartiallySignedTransaction if (outputs.size() != tx->vout.size()) { throw std::ios_base::failure("Outputs provided does not match the number of outputs in transaction."); } - // Sanity check - if (!IsSane()) { - throw std::ios_base::failure("PSBT is not sane."); - } } template <typename Stream> diff --git a/src/pubkey.h b/src/pubkey.h index 4c28af4a4d..fcbc7e8416 100644 --- a/src/pubkey.h +++ b/src/pubkey.h @@ -157,13 +157,13 @@ public: //! Get the KeyID of this public key (hash of its serialization) CKeyID GetID() const { - return CKeyID(Hash160(vch, vch + size())); + return CKeyID(Hash160(MakeSpan(vch).first(size()))); } //! Get the 256-bit hash of this public key. uint256 GetHash() const { - return Hash(vch, vch + size()); + return Hash(MakeSpan(vch).first(size())); } /* diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index e0b9345a32..4f1e0056be 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -29,12 +29,12 @@ #include <interfaces/handler.h> #include <interfaces/node.h> +#include <node/context.h> #include <noui.h> -#include <ui_interface.h> #include <uint256.h> #include <util/system.h> -#include <util/translation.h> #include <util/threadnames.h> +#include <util/translation.h> #include <validation.h> #include <memory> @@ -412,14 +412,14 @@ WId BitcoinApplication::getMainWinId() const return window->winId(); } -static void SetupUIArgs() +static void SetupUIArgs(ArgsManager& argsman) { - gArgs.AddArg("-choosedatadir", strprintf("Choose data directory on startup (default: %u)", DEFAULT_CHOOSE_DATADIR), ArgsManager::ALLOW_ANY, OptionsCategory::GUI); - gArgs.AddArg("-lang=<lang>", "Set language, for example \"de_DE\" (default: system locale)", ArgsManager::ALLOW_ANY, OptionsCategory::GUI); - gArgs.AddArg("-min", "Start minimized", ArgsManager::ALLOW_ANY, OptionsCategory::GUI); - gArgs.AddArg("-resetguisettings", "Reset all settings changed in the GUI", ArgsManager::ALLOW_ANY, OptionsCategory::GUI); - gArgs.AddArg("-splash", strprintf("Show splash screen on startup (default: %u)", DEFAULT_SPLASHSCREEN), ArgsManager::ALLOW_ANY, OptionsCategory::GUI); - gArgs.AddArg("-uiplatform", strprintf("Select platform to customize UI for (one of windows, macosx, other; default: %s)", BitcoinGUI::DEFAULT_UIPLATFORM), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::GUI); + argsman.AddArg("-choosedatadir", strprintf("Choose data directory on startup (default: %u)", DEFAULT_CHOOSE_DATADIR), ArgsManager::ALLOW_ANY, OptionsCategory::GUI); + argsman.AddArg("-lang=<lang>", "Set language, for example \"de_DE\" (default: system locale)", ArgsManager::ALLOW_ANY, OptionsCategory::GUI); + argsman.AddArg("-min", "Start minimized", ArgsManager::ALLOW_ANY, OptionsCategory::GUI); + argsman.AddArg("-resetguisettings", "Reset all settings changed in the GUI", ArgsManager::ALLOW_ANY, OptionsCategory::GUI); + argsman.AddArg("-splash", strprintf("Show splash screen on startup (default: %u)", DEFAULT_SPLASHSCREEN), ArgsManager::ALLOW_ANY, OptionsCategory::GUI); + argsman.AddArg("-uiplatform", strprintf("Select platform to customize UI for (one of windows, macosx, other; default: %s)", BitcoinGUI::DEFAULT_UIPLATFORM), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::GUI); } int GuiMain(int argc, char* argv[]) @@ -431,7 +431,8 @@ int GuiMain(int argc, char* argv[]) SetupEnvironment(); util::ThreadSetInternalName("main"); - std::unique_ptr<interfaces::Node> node = interfaces::MakeNode(); + NodeContext node_context; + std::unique_ptr<interfaces::Node> node = interfaces::MakeNode(&node_context); // Subscribe to global signals from core std::unique_ptr<interfaces::Handler> handler_message_box = node->handleMessageBox(noui_ThreadSafeMessageBox); @@ -455,7 +456,7 @@ int GuiMain(int argc, char* argv[]) /// 2. Parse command-line options. We do this after qt in order to show an error if there are problems parsing these // Command-line options take precedence: node->setupServerArgs(); - SetupUIArgs(); + SetupUIArgs(gArgs); std::string error; if (!node->parseParameters(argc, argv, error)) { node->initError(strprintf(Untranslated("Error parsing command line arguments: %s\n"), error)); @@ -529,6 +530,11 @@ int GuiMain(int argc, char* argv[]) // Parse URIs on command line -- this can affect Params() PaymentServer::ipcParseCommandLine(*node, argc, argv); #endif + if (!node->initSettings(error)) { + node->initError(Untranslated(error)); + QMessageBox::critical(nullptr, PACKAGE_NAME, QObject::tr("Error initializing settings: %1").arg(QString::fromStdString(error))); + return EXIT_FAILURE; + } QScopedPointer<const NetworkStyle> networkStyle(NetworkStyle::instantiate(Params().NetworkIDString())); assert(!networkStyle.isNull()); @@ -557,6 +563,8 @@ int GuiMain(int argc, char* argv[]) /// 9. Main GUI initialization // Install global event filter that makes sure that long tooltips can be word-wrapped app.installEventFilter(new GUIUtil::ToolTipToRichTextFilter(TOOLTIP_WRAP_THRESHOLD, &app)); + // Install global event filter that makes sure that out-of-focus labels do not contain text cursor. + app.installEventFilter(new GUIUtil::LabelOutOfFocusEventFilter(&app)); #if defined(Q_OS_WIN) // Install global event filter for processing Windows session related Windows messages (WM_QUERYENDSESSION and WM_ENDSESSION) qApp->installNativeEventFilter(new WinShutdownMonitor()); diff --git a/src/qt/bitcoin.qrc b/src/qt/bitcoin.qrc index 037b23e4b2..7115459808 100644 --- a/src/qt/bitcoin.qrc +++ b/src/qt/bitcoin.qrc @@ -45,42 +45,42 @@ <file alias="network_disabled">res/icons/network_disabled.png</file> <file alias="proxy">res/icons/proxy.png</file> </qresource> - <qresource prefix="/movies"> - <file alias="spinner-000">res/movies/spinner-000.png</file> - <file alias="spinner-001">res/movies/spinner-001.png</file> - <file alias="spinner-002">res/movies/spinner-002.png</file> - <file alias="spinner-003">res/movies/spinner-003.png</file> - <file alias="spinner-004">res/movies/spinner-004.png</file> - <file alias="spinner-005">res/movies/spinner-005.png</file> - <file alias="spinner-006">res/movies/spinner-006.png</file> - <file alias="spinner-007">res/movies/spinner-007.png</file> - <file alias="spinner-008">res/movies/spinner-008.png</file> - <file alias="spinner-009">res/movies/spinner-009.png</file> - <file alias="spinner-010">res/movies/spinner-010.png</file> - <file alias="spinner-011">res/movies/spinner-011.png</file> - <file alias="spinner-012">res/movies/spinner-012.png</file> - <file alias="spinner-013">res/movies/spinner-013.png</file> - <file alias="spinner-014">res/movies/spinner-014.png</file> - <file alias="spinner-015">res/movies/spinner-015.png</file> - <file alias="spinner-016">res/movies/spinner-016.png</file> - <file alias="spinner-017">res/movies/spinner-017.png</file> - <file alias="spinner-018">res/movies/spinner-018.png</file> - <file alias="spinner-019">res/movies/spinner-019.png</file> - <file alias="spinner-020">res/movies/spinner-020.png</file> - <file alias="spinner-021">res/movies/spinner-021.png</file> - <file alias="spinner-022">res/movies/spinner-022.png</file> - <file alias="spinner-023">res/movies/spinner-023.png</file> - <file alias="spinner-024">res/movies/spinner-024.png</file> - <file alias="spinner-025">res/movies/spinner-025.png</file> - <file alias="spinner-026">res/movies/spinner-026.png</file> - <file alias="spinner-027">res/movies/spinner-027.png</file> - <file alias="spinner-028">res/movies/spinner-028.png</file> - <file alias="spinner-029">res/movies/spinner-029.png</file> - <file alias="spinner-030">res/movies/spinner-030.png</file> - <file alias="spinner-031">res/movies/spinner-031.png</file> - <file alias="spinner-032">res/movies/spinner-032.png</file> - <file alias="spinner-033">res/movies/spinner-033.png</file> - <file alias="spinner-034">res/movies/spinner-034.png</file> - <file alias="spinner-035">res/movies/spinner-035.png</file> + <qresource prefix="/animation"> + <file alias="spinner-000">res/animation/spinner-000.png</file> + <file alias="spinner-001">res/animation/spinner-001.png</file> + <file alias="spinner-002">res/animation/spinner-002.png</file> + <file alias="spinner-003">res/animation/spinner-003.png</file> + <file alias="spinner-004">res/animation/spinner-004.png</file> + <file alias="spinner-005">res/animation/spinner-005.png</file> + <file alias="spinner-006">res/animation/spinner-006.png</file> + <file alias="spinner-007">res/animation/spinner-007.png</file> + <file alias="spinner-008">res/animation/spinner-008.png</file> + <file alias="spinner-009">res/animation/spinner-009.png</file> + <file alias="spinner-010">res/animation/spinner-010.png</file> + <file alias="spinner-011">res/animation/spinner-011.png</file> + <file alias="spinner-012">res/animation/spinner-012.png</file> + <file alias="spinner-013">res/animation/spinner-013.png</file> + <file alias="spinner-014">res/animation/spinner-014.png</file> + <file alias="spinner-015">res/animation/spinner-015.png</file> + <file alias="spinner-016">res/animation/spinner-016.png</file> + <file alias="spinner-017">res/animation/spinner-017.png</file> + <file alias="spinner-018">res/animation/spinner-018.png</file> + <file alias="spinner-019">res/animation/spinner-019.png</file> + <file alias="spinner-020">res/animation/spinner-020.png</file> + <file alias="spinner-021">res/animation/spinner-021.png</file> + <file alias="spinner-022">res/animation/spinner-022.png</file> + <file alias="spinner-023">res/animation/spinner-023.png</file> + <file alias="spinner-024">res/animation/spinner-024.png</file> + <file alias="spinner-025">res/animation/spinner-025.png</file> + <file alias="spinner-026">res/animation/spinner-026.png</file> + <file alias="spinner-027">res/animation/spinner-027.png</file> + <file alias="spinner-028">res/animation/spinner-028.png</file> + <file alias="spinner-029">res/animation/spinner-029.png</file> + <file alias="spinner-030">res/animation/spinner-030.png</file> + <file alias="spinner-031">res/animation/spinner-031.png</file> + <file alias="spinner-032">res/animation/spinner-032.png</file> + <file alias="spinner-033">res/animation/spinner-033.png</file> + <file alias="spinner-034">res/animation/spinner-034.png</file> + <file alias="spinner-035">res/animation/spinner-035.png</file> </qresource> </RCC> diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index b4182e8524..ebcc04a5eb 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -34,7 +34,7 @@ #include <chainparams.h> #include <interfaces/handler.h> #include <interfaces/node.h> -#include <ui_interface.h> +#include <node/ui_interface.h> #include <util/system.h> #include <util/translation.h> #include <validation.h> @@ -112,6 +112,8 @@ BitcoinGUI::BitcoinGUI(interfaces::Node& node, const PlatformStyle *_platformSty Q_EMIT consoleShown(rpcConsole); } + modalOverlay = new ModalOverlay(enableWallet, this->centralWidget()); + // Accept D&D of URIs setAcceptDrops(true); @@ -201,7 +203,6 @@ BitcoinGUI::BitcoinGUI(interfaces::Node& node, const PlatformStyle *_platformSty openOptionsDialogWithTab(OptionsDialog::TAB_NETWORK); }); - modalOverlay = new ModalOverlay(enableWallet, this->centralWidget()); connect(labelBlocksIcon, &GUIUtil::ClickableLabel::clicked, this, &BitcoinGUI::showModalOverlay); connect(progressBar, &GUIUtil::ClickableProgressBar::clicked, this, &BitcoinGUI::showModalOverlay); #ifdef ENABLE_WALLET @@ -238,6 +239,7 @@ BitcoinGUI::~BitcoinGUI() void BitcoinGUI::createActions() { QActionGroup *tabGroup = new QActionGroup(this); + connect(modalOverlay, &ModalOverlay::triggered, tabGroup, &QActionGroup::setEnabled); overviewAction = new QAction(platformStyle->SingleColorIcon(":/icons/overview"), tr("&Overview"), this); overviewAction->setStatusTip(tr("Show general overview of wallet")); @@ -683,6 +685,7 @@ void BitcoinGUI::removeWallet(WalletModel* walletModel) m_wallet_selector->removeItem(index); if (m_wallet_selector->count() == 0) { setWalletActionsEnabled(false); + overviewAction->setChecked(true); } else if (m_wallet_selector->count() == 1) { m_wallet_selector_label_action->setVisible(false); m_wallet_selector_action->setVisible(false); @@ -1037,7 +1040,7 @@ void BitcoinGUI::setNumBlocks(int count, const QDateTime& blockDate, double nVer if(count != prevBlocks) { labelBlocksIcon->setPixmap(platformStyle->SingleColorIcon(QString( - ":/movies/spinner-%1").arg(spinnerFrame, 3, 10, QChar('0'))) + ":/animation/spinner-%1").arg(spinnerFrame, 3, 10, QChar('0'))) .pixmap(STATUSBAR_ICONSIZE, STATUSBAR_ICONSIZE)); spinnerFrame = (spinnerFrame + 1) % SPINNER_FRAMES; } diff --git a/src/qt/forms/debugwindow.ui b/src/qt/forms/debugwindow.ui index 8b70800838..93840b4169 100644 --- a/src/qt/forms/debugwindow.ui +++ b/src/qt/forms/debugwindow.ui @@ -1078,22 +1078,16 @@ <height>426</height> </rect> </property> - <property name="minimumSize"> - <size> - <width>300</width> - <height>0</height> - </size> - </property> <layout class="QGridLayout" name="gridLayout_2" columnstretch="0,1"> <item row="0" column="0"> <widget class="QLabel" name="label_30"> <property name="text"> - <string>Whitelisted</string> + <string>Permissions</string> </property> </widget> </item> <item row="0" column="1"> - <widget class="QLabel" name="peerWhitelisted"> + <widget class="QLabel" name="peerPermissions"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> </property> @@ -1270,36 +1264,13 @@ </widget> </item> <item row="8" column="0"> - <widget class="QLabel" name="label_24"> - <property name="text"> - <string>Ban Score</string> - </property> - </widget> - </item> - <item row="8" column="1"> - <widget class="QLabel" name="peerBanScore"> - <property name="cursor"> - <cursorShape>IBeamCursor</cursorShape> - </property> - <property name="text"> - <string>N/A</string> - </property> - <property name="textFormat"> - <enum>Qt::PlainText</enum> - </property> - <property name="textInteractionFlags"> - <set>Qt::LinksAccessibleByMouse|Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse</set> - </property> - </widget> - </item> - <item row="9" column="0"> <widget class="QLabel" name="label_22"> <property name="text"> <string>Connection Time</string> </property> </widget> </item> - <item row="9" column="1"> + <item row="8" column="1"> <widget class="QLabel" name="peerConnTime"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1315,14 +1286,14 @@ </property> </widget> </item> - <item row="10" column="0"> + <item row="9" column="0"> <widget class="QLabel" name="label_15"> <property name="text"> <string>Last Send</string> </property> </widget> </item> - <item row="10" column="1"> + <item row="9" column="1"> <widget class="QLabel" name="peerLastSend"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1338,14 +1309,14 @@ </property> </widget> </item> - <item row="11" column="0"> + <item row="10" column="0"> <widget class="QLabel" name="label_19"> <property name="text"> <string>Last Receive</string> </property> </widget> </item> - <item row="11" column="1"> + <item row="10" column="1"> <widget class="QLabel" name="peerLastRecv"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1361,14 +1332,14 @@ </property> </widget> </item> - <item row="12" column="0"> + <item row="11" column="0"> <widget class="QLabel" name="label_18"> <property name="text"> <string>Sent</string> </property> </widget> </item> - <item row="12" column="1"> + <item row="11" column="1"> <widget class="QLabel" name="peerBytesSent"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1384,14 +1355,14 @@ </property> </widget> </item> - <item row="13" column="0"> + <item row="12" column="0"> <widget class="QLabel" name="label_20"> <property name="text"> <string>Received</string> </property> </widget> </item> - <item row="13" column="1"> + <item row="12" column="1"> <widget class="QLabel" name="peerBytesRecv"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1407,14 +1378,14 @@ </property> </widget> </item> - <item row="14" column="0"> + <item row="13" column="0"> <widget class="QLabel" name="label_26"> <property name="text"> <string>Ping Time</string> </property> </widget> </item> - <item row="14" column="1"> + <item row="13" column="1"> <widget class="QLabel" name="peerPingTime"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1430,7 +1401,7 @@ </property> </widget> </item> - <item row="15" column="0"> + <item row="14" column="0"> <widget class="QLabel" name="peerPingWaitLabel"> <property name="toolTip"> <string>The duration of a currently outstanding ping.</string> @@ -1440,7 +1411,7 @@ </property> </widget> </item> - <item row="15" column="1"> + <item row="14" column="1"> <widget class="QLabel" name="peerPingWait"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1456,14 +1427,14 @@ </property> </widget> </item> - <item row="16" column="0"> + <item row="15" column="0"> <widget class="QLabel" name="peerMinPingLabel"> <property name="text"> <string>Min Ping</string> </property> </widget> </item> - <item row="16" column="1"> + <item row="15" column="1"> <widget class="QLabel" name="peerMinPing"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1479,14 +1450,14 @@ </property> </widget> </item> - <item row="17" column="0"> + <item row="16" column="0"> <widget class="QLabel" name="label_timeoffset"> <property name="text"> <string>Time Offset</string> </property> </widget> </item> - <item row="17" column="1"> + <item row="16" column="1"> <widget class="QLabel" name="timeoffset"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1502,7 +1473,7 @@ </property> </widget> </item> - <item row="18" column="0"> + <item row="17" column="0"> <widget class="QLabel" name="peerMappedASLabel"> <property name="toolTip"> <string>The mapped Autonomous System used for diversifying peer selection.</string> @@ -1512,7 +1483,7 @@ </property> </widget> </item> - <item row="18" column="1"> + <item row="17" column="1"> <widget class="QLabel" name="peerMappedAS"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1528,7 +1499,7 @@ </property> </widget> </item> - <item row="19" column="0"> + <item row="18" column="0"> <spacer name="verticalSpacer_3"> <property name="orientation"> <enum>Qt::Vertical</enum> diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui index fea759dee0..0016fb9739 100644 --- a/src/qt/forms/optionsdialog.ui +++ b/src/qt/forms/optionsdialog.ui @@ -459,10 +459,10 @@ <item> <widget class="QCheckBox" name="connectSocksTor"> <property name="toolTip"> - <string>Connect to the Bitcoin network through a separate SOCKS5 proxy for Tor hidden services.</string> + <string>Connect to the Bitcoin network through a separate SOCKS5 proxy for Tor onion services.</string> </property> <property name="text"> - <string>Use separate SOCKS&5 proxy to reach peers via Tor hidden services:</string> + <string>Use separate SOCKS&5 proxy to reach peers via Tor onion services:</string> </property> </widget> </item> diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp index 3cadac2f2f..7f439fa45e 100644 --- a/src/qt/guiutil.cpp +++ b/src/qt/guiutil.cpp @@ -450,6 +450,28 @@ bool ToolTipToRichTextFilter::eventFilter(QObject *obj, QEvent *evt) return QObject::eventFilter(obj, evt); } +LabelOutOfFocusEventFilter::LabelOutOfFocusEventFilter(QObject* parent) + : QObject(parent) +{ +} + +bool LabelOutOfFocusEventFilter::eventFilter(QObject* watched, QEvent* event) +{ + if (event->type() == QEvent::FocusOut) { + auto focus_out = static_cast<QFocusEvent*>(event); + if (focus_out->reason() != Qt::PopupFocusReason) { + auto label = qobject_cast<QLabel*>(watched); + if (label) { + auto flags = label->textInteractionFlags(); + label->setTextInteractionFlags(Qt::NoTextInteraction); + label->setTextInteractionFlags(flags); + } + } + } + + return QObject::eventFilter(watched, event); +} + void TableViewLastColumnResizingFixer::connectViewHeadersSignals() { connect(tableView->horizontalHeader(), &QHeaderView::sectionResized, this, &TableViewLastColumnResizingFixer::on_sectionResized); diff --git a/src/qt/guiutil.h b/src/qt/guiutil.h index 8741d90102..2bd94b5eb3 100644 --- a/src/qt/guiutil.h +++ b/src/qt/guiutil.h @@ -162,6 +162,21 @@ namespace GUIUtil }; /** + * Qt event filter that intercepts QEvent::FocusOut events for QLabel objects, and + * resets their `textInteractionFlags' property to get rid of the visible cursor. + * + * This is a temporary fix of QTBUG-59514. + */ + class LabelOutOfFocusEventFilter : public QObject + { + Q_OBJECT + + public: + explicit LabelOutOfFocusEventFilter(QObject* parent); + bool eventFilter(QObject* watched, QEvent* event) override; + }; + + /** * Makes a QTableView last column feel as if it was being resized from its left border. * Also makes sure the column widths are never larger than the table's viewport. * In Qt, all columns are resizable from the right, but it's not intuitive resizing the last column from the right. diff --git a/src/qt/modaloverlay.cpp b/src/qt/modaloverlay.cpp index 0ba1beaf3e..8070aa627c 100644 --- a/src/qt/modaloverlay.cpp +++ b/src/qt/modaloverlay.cpp @@ -171,6 +171,8 @@ void ModalOverlay::showHide(bool hide, bool userRequested) if ( (layerIsVisible && !hide) || (!layerIsVisible && hide) || (!hide && userClosed && !userRequested)) return; + Q_EMIT triggered(hide); + if (!isVisible() && !hide) setVisible(true); diff --git a/src/qt/modaloverlay.h b/src/qt/modaloverlay.h index 1d84046d3d..7b07777641 100644 --- a/src/qt/modaloverlay.h +++ b/src/qt/modaloverlay.h @@ -25,16 +25,20 @@ public: explicit ModalOverlay(bool enable_wallet, QWidget *parent); ~ModalOverlay(); -public Q_SLOTS: void tipUpdate(int count, const QDateTime& blockDate, double nVerificationProgress); void setKnownBestHeight(int count, const QDateTime& blockDate); - void toggleVisibility(); // will show or hide the modal layer void showHide(bool hide = false, bool userRequested = false); - void closeClicked(); bool isLayerVisible() const { return layerIsVisible; } +public Q_SLOTS: + void toggleVisibility(); + void closeClicked(); + +Q_SIGNALS: + void triggered(bool hidden); + protected: bool eventFilter(QObject * obj, QEvent * ev) override; bool event(QEvent* ev) override; diff --git a/src/qt/optionsmodel.h b/src/qt/optionsmodel.h index 6ca5ac9d75..14fdf9046e 100644 --- a/src/qt/optionsmodel.h +++ b/src/qt/optionsmodel.h @@ -6,6 +6,7 @@ #define BITCOIN_QT_OPTIONSMODEL_H #include <amount.h> +#include <cstdint> #include <qt/guiconstants.h> #include <QAbstractListModel> @@ -15,7 +16,7 @@ class Node; } extern const char *DEFAULT_GUI_PROXY_HOST; -static constexpr unsigned short DEFAULT_GUI_PROXY_PORT = 9050; +static constexpr uint16_t DEFAULT_GUI_PROXY_PORT = 9050; /** * Convert configured prune target MiB to displayed GB. Round up to avoid underestimating max disk usage. diff --git a/src/qt/paymentserver.cpp b/src/qt/paymentserver.cpp index beca78a021..a1da85bda7 100644 --- a/src/qt/paymentserver.cpp +++ b/src/qt/paymentserver.cpp @@ -14,9 +14,9 @@ #include <chainparams.h> #include <interfaces/node.h> -#include <policy/policy.h> #include <key_io.h> -#include <ui_interface.h> +#include <node/ui_interface.h> +#include <policy/policy.h> #include <util/system.h> #include <wallet/wallet.h> diff --git a/src/qt/res/movies/makespinner.sh b/src/qt/res/animation/makespinner.sh index 4fa8dadf86..4fa8dadf86 100755 --- a/src/qt/res/movies/makespinner.sh +++ b/src/qt/res/animation/makespinner.sh diff --git a/src/qt/res/movies/spinner-000.png b/src/qt/res/animation/spinner-000.png Binary files differindex 0dc48d0d8c..0dc48d0d8c 100644 --- a/src/qt/res/movies/spinner-000.png +++ b/src/qt/res/animation/spinner-000.png diff --git a/src/qt/res/movies/spinner-001.png b/src/qt/res/animation/spinner-001.png Binary files differindex d167f20541..d167f20541 100644 --- a/src/qt/res/movies/spinner-001.png +++ b/src/qt/res/animation/spinner-001.png diff --git a/src/qt/res/movies/spinner-002.png b/src/qt/res/animation/spinner-002.png Binary files differindex 4a1f1f8e56..4a1f1f8e56 100644 --- a/src/qt/res/movies/spinner-002.png +++ b/src/qt/res/animation/spinner-002.png diff --git a/src/qt/res/movies/spinner-003.png b/src/qt/res/animation/spinner-003.png Binary files differindex fb1c2cd4ad..fb1c2cd4ad 100644 --- a/src/qt/res/movies/spinner-003.png +++ b/src/qt/res/animation/spinner-003.png diff --git a/src/qt/res/movies/spinner-004.png b/src/qt/res/animation/spinner-004.png Binary files differindex 4df2132344..4df2132344 100644 --- a/src/qt/res/movies/spinner-004.png +++ b/src/qt/res/animation/spinner-004.png diff --git a/src/qt/res/movies/spinner-005.png b/src/qt/res/animation/spinner-005.png Binary files differindex 5d6f41e0dc..5d6f41e0dc 100644 --- a/src/qt/res/movies/spinner-005.png +++ b/src/qt/res/animation/spinner-005.png diff --git a/src/qt/res/movies/spinner-006.png b/src/qt/res/animation/spinner-006.png Binary files differindex c1f7d18899..c1f7d18899 100644 --- a/src/qt/res/movies/spinner-006.png +++ b/src/qt/res/animation/spinner-006.png diff --git a/src/qt/res/movies/spinner-007.png b/src/qt/res/animation/spinner-007.png Binary files differindex 1e794b2626..1e794b2626 100644 --- a/src/qt/res/movies/spinner-007.png +++ b/src/qt/res/animation/spinner-007.png diff --git a/src/qt/res/movies/spinner-008.png b/src/qt/res/animation/spinner-008.png Binary files differindex df12ea8719..df12ea8719 100644 --- a/src/qt/res/movies/spinner-008.png +++ b/src/qt/res/animation/spinner-008.png diff --git a/src/qt/res/movies/spinner-009.png b/src/qt/res/animation/spinner-009.png Binary files differindex 18fc3a7d16..18fc3a7d16 100644 --- a/src/qt/res/movies/spinner-009.png +++ b/src/qt/res/animation/spinner-009.png diff --git a/src/qt/res/movies/spinner-010.png b/src/qt/res/animation/spinner-010.png Binary files differindex a79c845fe8..a79c845fe8 100644 --- a/src/qt/res/movies/spinner-010.png +++ b/src/qt/res/animation/spinner-010.png diff --git a/src/qt/res/movies/spinner-011.png b/src/qt/res/animation/spinner-011.png Binary files differindex 57baf66895..57baf66895 100644 --- a/src/qt/res/movies/spinner-011.png +++ b/src/qt/res/animation/spinner-011.png diff --git a/src/qt/res/movies/spinner-012.png b/src/qt/res/animation/spinner-012.png Binary files differindex 9deae7853a..9deae7853a 100644 --- a/src/qt/res/movies/spinner-012.png +++ b/src/qt/res/animation/spinner-012.png diff --git a/src/qt/res/movies/spinner-013.png b/src/qt/res/animation/spinner-013.png Binary files differindex 0659d48dec..0659d48dec 100644 --- a/src/qt/res/movies/spinner-013.png +++ b/src/qt/res/animation/spinner-013.png diff --git a/src/qt/res/movies/spinner-014.png b/src/qt/res/animation/spinner-014.png Binary files differindex bc1ef51bde..bc1ef51bde 100644 --- a/src/qt/res/movies/spinner-014.png +++ b/src/qt/res/animation/spinner-014.png diff --git a/src/qt/res/movies/spinner-015.png b/src/qt/res/animation/spinner-015.png Binary files differindex 24b57b62c2..24b57b62c2 100644 --- a/src/qt/res/movies/spinner-015.png +++ b/src/qt/res/animation/spinner-015.png diff --git a/src/qt/res/movies/spinner-016.png b/src/qt/res/animation/spinner-016.png Binary files differindex d622872651..d622872651 100644 --- a/src/qt/res/movies/spinner-016.png +++ b/src/qt/res/animation/spinner-016.png diff --git a/src/qt/res/movies/spinner-017.png b/src/qt/res/animation/spinner-017.png Binary files differindex f48f688db2..f48f688db2 100644 --- a/src/qt/res/movies/spinner-017.png +++ b/src/qt/res/animation/spinner-017.png diff --git a/src/qt/res/movies/spinner-018.png b/src/qt/res/animation/spinner-018.png Binary files differindex a2c8f38b1d..a2c8f38b1d 100644 --- a/src/qt/res/movies/spinner-018.png +++ b/src/qt/res/animation/spinner-018.png diff --git a/src/qt/res/movies/spinner-019.png b/src/qt/res/animation/spinner-019.png Binary files differindex 9d7cc35d82..9d7cc35d82 100644 --- a/src/qt/res/movies/spinner-019.png +++ b/src/qt/res/animation/spinner-019.png diff --git a/src/qt/res/movies/spinner-020.png b/src/qt/res/animation/spinner-020.png Binary files differindex 1a07acc454..1a07acc454 100644 --- a/src/qt/res/movies/spinner-020.png +++ b/src/qt/res/animation/spinner-020.png diff --git a/src/qt/res/movies/spinner-021.png b/src/qt/res/animation/spinner-021.png Binary files differindex 9cea8f2543..9cea8f2543 100644 --- a/src/qt/res/movies/spinner-021.png +++ b/src/qt/res/animation/spinner-021.png diff --git a/src/qt/res/movies/spinner-022.png b/src/qt/res/animation/spinner-022.png Binary files differindex 60250f6dea..60250f6dea 100644 --- a/src/qt/res/movies/spinner-022.png +++ b/src/qt/res/animation/spinner-022.png diff --git a/src/qt/res/movies/spinner-023.png b/src/qt/res/animation/spinner-023.png Binary files differindex fc290a0cf2..fc290a0cf2 100644 --- a/src/qt/res/movies/spinner-023.png +++ b/src/qt/res/animation/spinner-023.png diff --git a/src/qt/res/movies/spinner-024.png b/src/qt/res/animation/spinner-024.png Binary files differindex c5dcf1eae9..c5dcf1eae9 100644 --- a/src/qt/res/movies/spinner-024.png +++ b/src/qt/res/animation/spinner-024.png diff --git a/src/qt/res/movies/spinner-025.png b/src/qt/res/animation/spinner-025.png Binary files differindex 7f3577a4de..7f3577a4de 100644 --- a/src/qt/res/movies/spinner-025.png +++ b/src/qt/res/animation/spinner-025.png diff --git a/src/qt/res/movies/spinner-026.png b/src/qt/res/animation/spinner-026.png Binary files differindex 1663ddf44c..1663ddf44c 100644 --- a/src/qt/res/movies/spinner-026.png +++ b/src/qt/res/animation/spinner-026.png diff --git a/src/qt/res/movies/spinner-027.png b/src/qt/res/animation/spinner-027.png Binary files differindex d0e6da4503..d0e6da4503 100644 --- a/src/qt/res/movies/spinner-027.png +++ b/src/qt/res/animation/spinner-027.png diff --git a/src/qt/res/movies/spinner-028.png b/src/qt/res/animation/spinner-028.png Binary files differindex 2a7aba50e2..2a7aba50e2 100644 --- a/src/qt/res/movies/spinner-028.png +++ b/src/qt/res/animation/spinner-028.png diff --git a/src/qt/res/movies/spinner-029.png b/src/qt/res/animation/spinner-029.png Binary files differindex c8ca15c1e1..c8ca15c1e1 100644 --- a/src/qt/res/movies/spinner-029.png +++ b/src/qt/res/animation/spinner-029.png diff --git a/src/qt/res/movies/spinner-030.png b/src/qt/res/animation/spinner-030.png Binary files differindex c847c99a93..c847c99a93 100644 --- a/src/qt/res/movies/spinner-030.png +++ b/src/qt/res/animation/spinner-030.png diff --git a/src/qt/res/movies/spinner-031.png b/src/qt/res/animation/spinner-031.png Binary files differindex 403443144e..403443144e 100644 --- a/src/qt/res/movies/spinner-031.png +++ b/src/qt/res/animation/spinner-031.png diff --git a/src/qt/res/movies/spinner-032.png b/src/qt/res/animation/spinner-032.png Binary files differindex f9db080567..f9db080567 100644 --- a/src/qt/res/movies/spinner-032.png +++ b/src/qt/res/animation/spinner-032.png diff --git a/src/qt/res/movies/spinner-033.png b/src/qt/res/animation/spinner-033.png Binary files differindex 43f57719e7..43f57719e7 100644 --- a/src/qt/res/movies/spinner-033.png +++ b/src/qt/res/animation/spinner-033.png diff --git a/src/qt/res/movies/spinner-034.png b/src/qt/res/animation/spinner-034.png Binary files differindex c26656ff17..c26656ff17 100644 --- a/src/qt/res/movies/spinner-034.png +++ b/src/qt/res/animation/spinner-034.png diff --git a/src/qt/res/movies/spinner-035.png b/src/qt/res/animation/spinner-035.png Binary files differindex e471f950a3..e471f950a3 100644 --- a/src/qt/res/movies/spinner-035.png +++ b/src/qt/res/animation/spinner-035.png diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index dafd517ca8..821a337a62 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -24,7 +24,6 @@ #include <univalue.h> #ifdef ENABLE_WALLET -#include <wallet/bdb.h> #include <wallet/db.h> #include <wallet/wallet.h> #endif @@ -467,6 +466,7 @@ RPCConsole::RPCConsole(interfaces::Node& node, const PlatformStyle *_platformSty // Install event filter for up and down arrow ui->lineEdit->installEventFilter(this); + ui->lineEdit->setMaxLength(16 * 1024 * 1024); ui->messagesWidget->installEventFilter(this); connect(ui->clearButton, &QPushButton::clicked, this, &RPCConsole::clear); @@ -1119,15 +1119,20 @@ void RPCConsole::updateNodeDetail(const CNodeCombinedStats *stats) ui->peerSubversion->setText(QString::fromStdString(stats->nodeStats.cleanSubVer)); ui->peerDirection->setText(stats->nodeStats.fInbound ? tr("Inbound") : tr("Outbound")); ui->peerHeight->setText(QString::number(stats->nodeStats.nStartingHeight)); - ui->peerWhitelisted->setText(stats->nodeStats.m_legacyWhitelisted ? tr("Yes") : tr("No")); + if (stats->nodeStats.m_permissionFlags == PF_NONE) { + ui->peerPermissions->setText(tr("N/A")); + } else { + QStringList permissions; + for (const auto& permission : NetPermissions::ToStrings(stats->nodeStats.m_permissionFlags)) { + permissions.append(QString::fromStdString(permission)); + } + ui->peerPermissions->setText(permissions.join(" & ")); + } ui->peerMappedAS->setText(stats->nodeStats.m_mapped_as != 0 ? QString::number(stats->nodeStats.m_mapped_as) : tr("N/A")); // This check fails for example if the lock was busy and // nodeStateStats couldn't be fetched. if (stats->fNodeStateStatsAvailable) { - // Ban score is init to 0 - ui->peerBanScore->setText(QString("%1").arg(stats->nodeStateStats.nMisbehavior)); - // Sync height is init to -1 if (stats->nodeStateStats.nSyncHeight > -1) ui->peerSyncHeight->setText(QString("%1").arg(stats->nodeStateStats.nSyncHeight)); @@ -1218,7 +1223,7 @@ void RPCConsole::banSelectedNode(int bantime) // Find possible nodes, ban it and clear the selected node const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(detailNodeRow); if (stats) { - m_node.ban(stats->nodeStats.addr, BanReasonManuallyAdded, bantime); + m_node.ban(stats->nodeStats.addr, bantime); m_node.disconnectByAddress(stats->nodeStats.addr); } } diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp index 0ac61f3adc..97fb88d71c 100644 --- a/src/qt/sendcoinsdialog.cpp +++ b/src/qt/sendcoinsdialog.cpp @@ -21,9 +21,9 @@ #include <chainparams.h> #include <interfaces/node.h> #include <key_io.h> +#include <node/ui_interface.h> #include <policy/fees.h> #include <txmempool.h> -#include <ui_interface.h> #include <wallet/coincontrol.h> #include <wallet/fees.h> #include <wallet/wallet.h> diff --git a/src/qt/splashscreen.cpp b/src/qt/splashscreen.cpp index ced6a299d5..6e6b2b8466 100644 --- a/src/qt/splashscreen.cpp +++ b/src/qt/splashscreen.cpp @@ -14,7 +14,6 @@ #include <interfaces/wallet.h> #include <qt/guiutil.h> #include <qt/networkstyle.h> -#include <ui_interface.h> #include <util/system.h> #include <util/translation.h> diff --git a/src/qt/test/addressbooktests.cpp b/src/qt/test/addressbooktests.cpp index 9347ff9e42..035c8196bc 100644 --- a/src/qt/test/addressbooktests.cpp +++ b/src/qt/test/addressbooktests.cpp @@ -18,6 +18,7 @@ #include <key.h> #include <key_io.h> #include <wallet/wallet.h> +#include <walletinitinterface.h> #include <QApplication> #include <QTimer> @@ -59,6 +60,7 @@ void EditAddressAndSubmit( void TestAddAddressesToSendBook(interfaces::Node& node) { TestChain100Setup test; + node.setContext(&test.m_node); std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), WalletLocation(), CreateMockWalletDatabase()); wallet->SetupLegacyScriptPubKeyMan(); bool firstRun; diff --git a/src/qt/test/apptests.cpp b/src/qt/test/apptests.cpp index f88d57c716..b880a99baf 100644 --- a/src/qt/test/apptests.cpp +++ b/src/qt/test/apptests.cpp @@ -62,9 +62,10 @@ void AppTests::appTests() } #endif - BasicTestingSetup test{CBaseChainParams::REGTEST}; // Create a temp data directory to backup the gui settings to - ECC_Stop(); // Already started by the common test setup, so stop it to avoid interference - LogInstance().DisconnectTestLogger(); + fs::create_directories([] { + BasicTestingSetup test{CBaseChainParams::REGTEST}; // Create a temp data directory to backup the gui settings to + return GetDataDir() / "blocks"; + }()); m_app.parameterSetup(); m_app.createOptionsModel(true /* reset settings */); @@ -80,8 +81,9 @@ void AppTests::appTests() m_app.exec(); // Reset global state to avoid interfering with later tests. + LogInstance().DisconnectTestLogger(); AbortShutdown(); - UnloadBlockIndex(); + UnloadBlockIndex(/* mempool */ nullptr); WITH_LOCK(::cs_main, g_chainman.Reset()); } diff --git a/src/qt/test/test_main.cpp b/src/qt/test/test_main.cpp index aefdcd2716..031913bd02 100644 --- a/src/qt/test/test_main.cpp +++ b/src/qt/test/test_main.cpp @@ -40,7 +40,7 @@ Q_IMPORT_PLUGIN(QCocoaIntegrationPlugin); const std::function<void(const std::string&)> G_TEST_LOG_FUN{}; // This is all you need to run all the tests -int main(int argc, char *argv[]) +int main(int argc, char* argv[]) { // Initialize persistent globals with the testing setup state for sanity. // E.g. -datadir in gArgs is set to a temp directory dummy value (instead @@ -52,7 +52,8 @@ int main(int argc, char *argv[]) BasicTestingSetup dummy{CBaseChainParams::REGTEST}; } - std::unique_ptr<interfaces::Node> node = interfaces::MakeNode(); + NodeContext node_context; + std::unique_ptr<interfaces::Node> node = interfaces::MakeNode(&node_context); bool fInvalid = false; @@ -70,6 +71,8 @@ int main(int argc, char *argv[]) BitcoinApplication app(*node); app.setApplicationName("Bitcoin-Qt-test"); + node->setupServerArgs(); // Make gArgs available in the NodeContext + node->context()->args->ClearArgs(); // Clear added args again AppTests app_tests(app); if (QTest::qExec(&app_tests) != 0) { fInvalid = true; diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp index 6648029bae..475fd589af 100644 --- a/src/qt/test/wallettests.cpp +++ b/src/qt/test/wallettests.cpp @@ -138,8 +138,7 @@ void TestGUI(interfaces::Node& node) for (int i = 0; i < 5; ++i) { test.CreateAndProcessBlock({}, GetScriptForRawPubKey(test.coinbaseKey.GetPubKey())); } - node.context()->connman = std::move(test.m_node.connman); - node.context()->mempool = std::move(test.m_node.mempool); + node.setContext(&test.m_node); std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), WalletLocation(), CreateMockWalletDatabase()); bool firstRun; wallet->LoadWallet(firstRun); diff --git a/src/qt/transactionrecord.cpp b/src/qt/transactionrecord.cpp index 52007ef350..632a18de5c 100644 --- a/src/qt/transactionrecord.cpp +++ b/src/qt/transactionrecord.cpp @@ -234,6 +234,7 @@ void TransactionRecord::updateStatus(const interfaces::WalletTxStatus& wtx, cons bool TransactionRecord::statusUpdateNeeded(const uint256& block_hash) const { + assert(!block_hash.IsNull()); return status.m_cur_block_hash != block_hash || status.needsUpdate; } diff --git a/src/qt/transactiontablemodel.cpp b/src/qt/transactiontablemodel.cpp index 327a489488..c560dc58e7 100644 --- a/src/qt/transactiontablemodel.cpp +++ b/src/qt/transactiontablemodel.cpp @@ -178,21 +178,16 @@ public: TransactionRecord* index(interfaces::Wallet& wallet, const uint256& cur_block_hash, const int idx) { - if(idx >= 0 && idx < cachedWallet.size()) - { + if (idx >= 0 && idx < cachedWallet.size()) { TransactionRecord *rec = &cachedWallet[idx]; - // Get required locks upfront. This avoids the GUI from getting - // stuck if the core is holding the locks for a longer time - for - // example, during a wallet rescan. - // // If a status update is needed (blocks came in since last check), - // update the status of this transaction from the wallet. Otherwise, - // simply re-use the cached status. + // try to update the status of this transaction from the wallet. + // Otherwise, simply re-use the cached status. interfaces::WalletTxStatus wtx; int numBlocks; int64_t block_time; - if (rec->statusUpdateNeeded(cur_block_hash) && wallet.tryGetTxStatus(rec->hash, wtx, numBlocks, block_time)) { + if (!cur_block_hash.IsNull() && rec->statusUpdateNeeded(cur_block_hash) && wallet.tryGetTxStatus(rec->hash, wtx, numBlocks, block_time)) { rec->updateStatus(wtx, cur_block_hash, numBlocks, block_time); } return rec; @@ -664,7 +659,7 @@ QVariant TransactionTableModel::headerData(int section, Qt::Orientation orientat QModelIndex TransactionTableModel::index(int row, int column, const QModelIndex &parent) const { Q_UNUSED(parent); - TransactionRecord* data = priv->index(walletModel->wallet(), walletModel->clientModel().getBestBlockHash(), row); + TransactionRecord* data = priv->index(walletModel->wallet(), walletModel->getLastBlockProcessed(), row); if(data) { return createIndex(row, column, data); diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp index 3df81807f0..54ecfc38ec 100644 --- a/src/qt/transactionview.cpp +++ b/src/qt/transactionview.cpp @@ -17,7 +17,7 @@ #include <qt/transactiontablemodel.h> #include <qt/walletmodel.h> -#include <ui_interface.h> +#include <node/ui_interface.h> #include <QApplication> #include <QComboBox> diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp index 72c75f7be0..e374dd191c 100644 --- a/src/qt/walletmodel.cpp +++ b/src/qt/walletmodel.cpp @@ -21,8 +21,8 @@ #include <interfaces/handler.h> #include <interfaces/node.h> #include <key_io.h> +#include <node/ui_interface.h> #include <psbt.h> -#include <ui_interface.h> #include <util/system.h> // for GetBoolArg #include <util/translation.h> #include <wallet/coincontrol.h> @@ -87,7 +87,7 @@ void WalletModel::pollBalanceChanged() { // Avoid recomputing wallet balances unless a TransactionChanged or // BlockTip notification was received. - if (!fForceCheckBalanceChanged && m_cached_last_update_tip == m_client_model->getBestBlockHash()) return; + if (!fForceCheckBalanceChanged && m_cached_last_update_tip == getLastBlockProcessed()) return; // Try to get balances and return early if locks can't be acquired. This // avoids the GUI from getting stuck on periodical polls if the core is @@ -588,3 +588,8 @@ void WalletModel::refresh(bool pk_hash_only) { addressTableModel = new AddressTableModel(this, pk_hash_only); } + +uint256 WalletModel::getLastBlockProcessed() const +{ + return m_client_model ? m_client_model->getBestBlockHash() : uint256{}; +} diff --git a/src/qt/walletmodel.h b/src/qt/walletmodel.h index 38e8a14556..fd52db2da3 100644 --- a/src/qt/walletmodel.h +++ b/src/qt/walletmodel.h @@ -155,6 +155,9 @@ public: AddressTableModel* getAddressTableModel() const { return addressTableModel; } void refresh(bool pk_hash_only = false); + + uint256 getLastBlockProcessed() const; + private: std::unique_ptr<interfaces::Wallet> m_wallet; std::unique_ptr<interfaces::Handler> m_handler_unload; diff --git a/src/qt/walletview.cpp b/src/qt/walletview.cpp index cec9b0eeb8..2fc883a5f5 100644 --- a/src/qt/walletview.cpp +++ b/src/qt/walletview.cpp @@ -20,8 +20,8 @@ #include <qt/walletmodel.h> #include <interfaces/node.h> +#include <node/ui_interface.h> #include <psbt.h> -#include <ui_interface.h> #include <util/strencodings.h> #include <QAction> diff --git a/src/random.cpp b/src/random.cpp index 9c9a35709a..af9504e0ce 100644 --- a/src/random.cpp +++ b/src/random.cpp @@ -315,12 +315,16 @@ void GetOSRand(unsigned char *ent32) if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) { RandFailure(); } + // Silence a compiler warning about unused function. + (void)GetDevURandom; #elif defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX) /* getentropy() is available on macOS 10.12 and later. */ if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) { RandFailure(); } + // Silence a compiler warning about unused function. + (void)GetDevURandom; #elif defined(HAVE_SYSCTL_ARND) /* FreeBSD, NetBSD and similar. It is possible for the call to return less * bytes than requested, so need to read in a loop. @@ -334,6 +338,8 @@ void GetOSRand(unsigned char *ent32) } have += len; } while (have < NUM_OS_RANDOM_BYTES); + // Silence a compiler warning about unused function. + (void)GetDevURandom; #else /* Fall back to /dev/urandom if there is no specific method implemented to * get system entropy for this OS. diff --git a/src/rest.cpp b/src/rest.cpp index 8cb594a03b..7130625d5c 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -68,13 +68,32 @@ static bool RESTERR(HTTPRequest* req, enum HTTPStatusCode status, std::string me } /** - * Get the node context mempool. + * Get the node context. * - * Set the HTTP error and return nullptr if node context - * mempool is not found. + * @param[in] req The HTTP request, whose status code will be set if node + * context is not found. + * @returns Pointer to the node context or nullptr if not found. + */ +static NodeContext* GetNodeContext(const util::Ref& context, HTTPRequest* req) +{ + NodeContext* node = context.Has<NodeContext>() ? &context.Get<NodeContext>() : nullptr; + if (!node) { + RESTERR(req, HTTP_INTERNAL_SERVER_ERROR, + strprintf("%s:%d (%s)\n" + "Internal bug detected: Node context not found!\n" + "You may report this issue here: %s\n", + __FILE__, __LINE__, __func__, PACKAGE_BUGREPORT)); + return nullptr; + } + return node; +} + +/** + * Get the node context mempool. * - * @param[in] req the HTTP request - * return pointer to the mempool or nullptr if no mempool found + * @param[in] req The HTTP request, whose status code will be set if node + * context mempool is not found. + * @returns Pointer to the mempool or nullptr if no mempool found. */ static CTxMemPool* GetMemPool(const util::Ref& context, HTTPRequest* req) { @@ -371,10 +390,13 @@ static bool rest_tx(const util::Ref& context, HTTPRequest* req, const std::strin g_txindex->BlockUntilSyncedToCurrentChain(); } - CTransactionRef tx; + const NodeContext* const node = GetNodeContext(context, req); + if (!node) return false; uint256 hashBlock = uint256(); - if (!GetTransaction(hash, tx, Params().GetConsensus(), hashBlock)) + const CTransactionRef tx = GetTransaction(/* block_index */ nullptr, node->mempool, hash, Params().GetConsensus(), hashBlock); + if (!tx) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); + } switch (rf) { case RetFormat::BINARY: { diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 64f8a5bb3b..f27373b57c 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -75,7 +75,10 @@ CTxMemPool& EnsureMemPool(const util::Ref& context) ChainstateManager& EnsureChainman(const util::Ref& context) { NodeContext& node = EnsureNodeContext(context); - return EnsureChainman(node); + if (!node.chainman) { + throw JSONRPCError(RPC_INTERNAL_ERROR, "Node chainman not found"); + } + return *node.chainman; } /* Calculate the difficulty for a given block index. @@ -522,9 +525,9 @@ static UniValue getrawmempool(const JSONRPCRequest& request) {RPCResult::Type::STR_HEX, "", "The transaction id"}, }}, RPCResult{"for verbose = true", - RPCResult::Type::OBJ, "", "", + RPCResult::Type::OBJ_DYN, "", "", { - {RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()}, + {RPCResult::Type::OBJ, "transactionid", "", MempoolEntryDescription()}, }}, }, RPCExamples{ @@ -553,7 +556,7 @@ static UniValue getmempoolancestors(const JSONRPCRequest& request) RPCResult::Type::ARR, "", "", {{RPCResult::Type::STR_HEX, "", "The transaction id of an in-mempool ancestor transaction"}}}, RPCResult{"for verbose = true", - RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()}, + RPCResult::Type::OBJ, "transactionid", "", MempoolEntryDescription()}, }, RPCExamples{ HelpExampleCli("getmempoolancestors", "\"mytxid\"") @@ -613,9 +616,9 @@ static UniValue getmempooldescendants(const JSONRPCRequest& request) RPCResult::Type::ARR, "", "", {{RPCResult::Type::STR_HEX, "", "The transaction id of an in-mempool descendant transaction"}}}, RPCResult{"for verbose = true", - RPCResult::Type::OBJ, "", "", + RPCResult::Type::OBJ_DYN, "", "", { - {RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()}, + {RPCResult::Type::OBJ, "transactionid", "", MempoolEntryDescription()}, }}, }, RPCExamples{ @@ -671,7 +674,7 @@ static UniValue getmempoolentry(const JSONRPCRequest& request) {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id (must be in mempool)"}, }, RPCResult{ - RPCResult::Type::OBJ_DYN, "", "", MempoolEntryDescription()}, + RPCResult::Type::OBJ, "", "", MempoolEntryDescription()}, RPCExamples{ HelpExampleCli("getmempoolentry", "\"mytxid\"") + HelpExampleRpc("getmempoolentry", "\"mytxid\"") @@ -795,10 +798,8 @@ static CBlock GetBlockChecked(const CBlockIndex* pblockindex) if (!ReadBlockFromDisk(block, pblockindex, Params().GetConsensus())) { // Block not found on disk. This could be because we have the block - // header in our index but don't have the block (for example if a - // non-whitelisted node sends us an unrequested long chain of valid - // blocks, we add the headers to our index, but don't accept the - // block). + // header in our index but not yet have the block or did not accept the + // block. throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk"); } @@ -972,7 +973,9 @@ static UniValue gettxoutsetinfo(const JSONRPCRequest& request) RPCHelpMan{"gettxoutsetinfo", "\nReturns statistics about the unspent transaction output set.\n" "Note this call may take some time.\n", - {}, + { + {"hash_type", RPCArg::Type::STR, /* default */ "hash_serialized_2", "Which UTXO set hash should be calculated. Options: 'hash_serialized_2' (the legacy algorithm), 'none'."}, + }, RPCResult{ RPCResult::Type::OBJ, "", "", { @@ -981,7 +984,7 @@ static UniValue gettxoutsetinfo(const JSONRPCRequest& request) {RPCResult::Type::NUM, "transactions", "The number of transactions with unspent outputs"}, {RPCResult::Type::NUM, "txouts", "The number of unspent transaction outputs"}, {RPCResult::Type::NUM, "bogosize", "A meaningless metric for UTXO set size"}, - {RPCResult::Type::STR_HEX, "hash_serialized_2", "The serialized hash"}, + {RPCResult::Type::STR_HEX, "hash_serialized_2", "The serialized hash (only present if 'hash_serialized_2' hash_type is chosen)"}, {RPCResult::Type::NUM, "disk_size", "The estimated size of the chainstate on disk"}, {RPCResult::Type::STR_AMOUNT, "total_amount", "The total amount"}, }}, @@ -996,14 +999,19 @@ static UniValue gettxoutsetinfo(const JSONRPCRequest& request) CCoinsStats stats; ::ChainstateActive().ForceFlushStateToDisk(); + const CoinStatsHashType hash_type = ParseHashType(request.params[0], CoinStatsHashType::HASH_SERIALIZED); + CCoinsView* coins_view = WITH_LOCK(cs_main, return &ChainstateActive().CoinsDB()); - if (GetUTXOStats(coins_view, stats, RpcInterruptionPoint)) { + NodeContext& node = EnsureNodeContext(request.context); + if (GetUTXOStats(coins_view, stats, hash_type, node.rpc_interruption_point)) { ret.pushKV("height", (int64_t)stats.nHeight); ret.pushKV("bestblock", stats.hashBlock.GetHex()); ret.pushKV("transactions", (int64_t)stats.nTransactions); ret.pushKV("txouts", (int64_t)stats.nTransactionOutputs); ret.pushKV("bogosize", (int64_t)stats.nBogoSize); - ret.pushKV("hash_serialized_2", stats.hashSerialized.GetHex()); + if (hash_type == CoinStatsHashType::HASH_SERIALIZED) { + ret.pushKV("hash_serialized_2", stats.hashSerialized.GetHex()); + } ret.pushKV("disk_size", stats.nDiskSize); ret.pushKV("total_amount", ValueFromAmount(stats.nTotalAmount)); } else { @@ -1325,50 +1333,48 @@ static UniValue getchaintips(const JSONRPCRequest& request) }, }.Check(request); + ChainstateManager& chainman = EnsureChainman(request.context); LOCK(cs_main); /* - * Idea: the set of chain tips is ::ChainActive().tip, plus orphan blocks which do not have another orphan building off of them. + * Idea: The set of chain tips is the active chain tip, plus orphan blocks which do not have another orphan building off of them. * Algorithm: * - Make one pass through BlockIndex(), picking out the orphan blocks, and also storing a set of the orphan block's pprev pointers. * - Iterate through the orphan blocks. If the block isn't pointed to by another orphan, it is a chain tip. - * - add ::ChainActive().Tip() + * - Add the active chain tip */ std::set<const CBlockIndex*, CompareBlocksByHeight> setTips; std::set<const CBlockIndex*> setOrphans; std::set<const CBlockIndex*> setPrevs; - for (const std::pair<const uint256, CBlockIndex*>& item : ::BlockIndex()) - { - if (!::ChainActive().Contains(item.second)) { + for (const std::pair<const uint256, CBlockIndex*>& item : chainman.BlockIndex()) { + if (!chainman.ActiveChain().Contains(item.second)) { setOrphans.insert(item.second); setPrevs.insert(item.second->pprev); } } - for (std::set<const CBlockIndex*>::iterator it = setOrphans.begin(); it != setOrphans.end(); ++it) - { + for (std::set<const CBlockIndex*>::iterator it = setOrphans.begin(); it != setOrphans.end(); ++it) { if (setPrevs.erase(*it) == 0) { setTips.insert(*it); } } // Always report the currently active tip. - setTips.insert(::ChainActive().Tip()); + setTips.insert(chainman.ActiveChain().Tip()); /* Construct the output array. */ UniValue res(UniValue::VARR); - for (const CBlockIndex* block : setTips) - { + for (const CBlockIndex* block : setTips) { UniValue obj(UniValue::VOBJ); obj.pushKV("height", block->nHeight); obj.pushKV("hash", block->phashBlock->GetHex()); - const int branchLen = block->nHeight - ::ChainActive().FindFork(block)->nHeight; + const int branchLen = block->nHeight - chainman.ActiveChain().FindFork(block)->nHeight; obj.pushKV("branchlen", branchLen); std::string status; - if (::ChainActive().Contains(block)) { + if (chainman.ActiveChain().Contains(block)) { // This block is part of the currently active chain. status = "active"; } else if (block->nStatus & BLOCK_FAILED_MASK) { @@ -1967,8 +1973,10 @@ static UniValue savemempool(const JSONRPCRequest& request) return NullUniValue; } +namespace { //! Search for a given set of pubkey scripts -bool FindScriptPubKey(std::atomic<int>& scan_progress, const std::atomic<bool>& should_abort, int64_t& count, CCoinsViewCursor* cursor, const std::set<CScript>& needles, std::map<COutPoint, Coin>& out_results) { +bool FindScriptPubKey(std::atomic<int>& scan_progress, const std::atomic<bool>& should_abort, int64_t& count, CCoinsViewCursor* cursor, const std::set<CScript>& needles, std::map<COutPoint, Coin>& out_results, std::function<void()>& interruption_point) +{ scan_progress = 0; count = 0; while (cursor->Valid()) { @@ -1976,7 +1984,7 @@ bool FindScriptPubKey(std::atomic<int>& scan_progress, const std::atomic<bool>& Coin coin; if (!cursor->GetKey(key) || !cursor->GetValue(coin)) return false; if (++count % 8192 == 0) { - RpcInterruptionPoint(); + interruption_point(); if (should_abort) { // allow to abort the scan via the abort reference return false; @@ -1995,6 +2003,7 @@ bool FindScriptPubKey(std::atomic<int>& scan_progress, const std::atomic<bool>& scan_progress = 100; return true; } +} // namespace /** RAII object to prevent concurrency issue when scanning the txout set */ static std::atomic<int> g_scan_progress; @@ -2143,7 +2152,8 @@ UniValue scantxoutset(const JSONRPCRequest& request) tip = ::ChainActive().Tip(); CHECK_NONFATAL(tip); } - bool res = FindScriptPubKey(g_scan_progress, g_should_abort_scan, count, pcursor.get(), needles, coins); + NodeContext& node = EnsureNodeContext(request.context); + bool res = FindScriptPubKey(g_scan_progress, g_should_abort_scan, count, pcursor.get(), needles, coins, node.rpc_interruption_point); result.pushKV("success", res); result.pushKV("txouts", count); result.pushKV("height", tip->nHeight); @@ -2298,6 +2308,7 @@ UniValue dumptxoutset(const JSONRPCRequest& request) std::unique_ptr<CCoinsViewCursor> pcursor; CCoinsStats stats; CBlockIndex* tip; + NodeContext& node = EnsureNodeContext(request.context); { // We need to lock cs_main to ensure that the coinsdb isn't written to @@ -2316,7 +2327,7 @@ UniValue dumptxoutset(const JSONRPCRequest& request) ::ChainstateActive().ForceFlushStateToDisk(); - if (!GetUTXOStats(&::ChainstateActive().CoinsDB(), stats, RpcInterruptionPoint)) { + if (!GetUTXOStats(&::ChainstateActive().CoinsDB(), stats, CoinStatsHashType::NONE, node.rpc_interruption_point)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to read UTXO set"); } @@ -2334,7 +2345,7 @@ UniValue dumptxoutset(const JSONRPCRequest& request) unsigned int iter{0}; while (pcursor->Valid()) { - if (iter % 5000 == 0) RpcInterruptionPoint(); + if (iter % 5000 == 0) node.rpc_interruption_point(); ++iter; if (pcursor->GetKey(key) && pcursor->GetValue(coin)) { afile << key; @@ -2377,7 +2388,7 @@ static const CRPCCommand commands[] = { "blockchain", "getmempoolinfo", &getmempoolinfo, {} }, { "blockchain", "getrawmempool", &getrawmempool, {"verbose"} }, { "blockchain", "gettxout", &gettxout, {"txid","n","include_mempool"} }, - { "blockchain", "gettxoutsetinfo", &gettxoutsetinfo, {} }, + { "blockchain", "gettxoutsetinfo", &gettxoutsetinfo, {"hash_type"} }, { "blockchain", "pruneblockchain", &pruneblockchain, {"height"} }, { "blockchain", "savemempool", &savemempool, {} }, { "blockchain", "verifychain", &verifychain, {"checklevel","nblocks"} }, diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp index 41050edaa6..d61e02aee2 100644 --- a/src/rpc/client.cpp +++ b/src/rpc/client.cpp @@ -174,6 +174,7 @@ static const CRPCConvertParam vRPCConvertParams[] = { "createwallet", 4, "avoid_reuse"}, { "createwallet", 5, "descriptors"}, { "getnodeaddresses", 0, "count"}, + { "addpeeraddress", 1, "port"}, { "stop", 0, "wait" }, }; // clang-format on diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index df1e0fe623..77d24a6e79 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -112,7 +112,7 @@ static UniValue getpeerinfo(const JSONRPCRequest& request) {RPCResult::Type::BOOL, "inbound", "Inbound (true) or Outbound (false)"}, {RPCResult::Type::BOOL, "addnode", "Whether connection was due to addnode/-connect or if it was an automatic/inbound connection"}, {RPCResult::Type::NUM, "startingheight", "The starting height (block) of the peer"}, - {RPCResult::Type::NUM, "banscore", "The ban score"}, + {RPCResult::Type::NUM, "banscore", "The ban score (DEPRECATED, returned only if config option -deprecatedrpc=banscore is passed)"}, {RPCResult::Type::NUM, "synced_headers", "The last header we have in common with this peer"}, {RPCResult::Type::NUM, "synced_blocks", "The last block we have in common with this peer"}, {RPCResult::Type::ARR, "inflight", "", @@ -191,7 +191,10 @@ static UniValue getpeerinfo(const JSONRPCRequest& request) obj.pushKV("addnode", stats.m_manual_connection); obj.pushKV("startingheight", stats.nStartingHeight); if (fStateStats) { - obj.pushKV("banscore", statestats.nMisbehavior); + if (IsDeprecatedRPCEnabled("banscore")) { + // banscore is deprecated in v0.21 for removal in v0.22 + obj.pushKV("banscore", statestats.nMisbehavior); + } obj.pushKV("synced_headers", statestats.nSyncHeight); obj.pushKV("synced_blocks", statestats.nCommonHeight); UniValue heights(UniValue::VARR); @@ -261,7 +264,7 @@ static UniValue addnode(const JSONRPCRequest& request) if (strCommand == "onetry") { CAddress addr; - node.connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), false, false, true); + node.connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), ConnectionType::MANUAL); return NullUniValue; } @@ -273,7 +276,7 @@ static UniValue addnode(const JSONRPCRequest& request) else if(strCommand == "remove") { if(!node.connman->RemoveAddedNode(strNode)) - throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node has not been added."); + throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node could not be removed. It has not been added previously."); } return NullUniValue; @@ -614,12 +617,12 @@ static UniValue setban(const JSONRPCRequest& request) absolute = true; if (isSubnet) { - node.banman->Ban(subNet, BanReasonManuallyAdded, banTime, absolute); + node.banman->Ban(subNet, banTime, absolute); if (node.connman) { node.connman->DisconnectNode(subNet); } } else { - node.banman->Ban(netAddr, BanReasonManuallyAdded, banTime, absolute); + node.banman->Ban(netAddr, banTime, absolute); if (node.connman) { node.connman->DisconnectNode(netAddr); } @@ -628,7 +631,7 @@ static UniValue setban(const JSONRPCRequest& request) else if(strCommand == "remove") { if (!( isSubnet ? node.banman->Unban(subNet) : node.banman->Unban(netAddr) )) { - throw JSONRPCError(RPC_CLIENT_INVALID_IP_OR_SUBNET, "Error: Unban failed. Requested address/subnet was not previously banned."); + throw JSONRPCError(RPC_CLIENT_INVALID_IP_OR_SUBNET, "Error: Unban failed. Requested address/subnet was not previously manually banned."); } } return NullUniValue; @@ -637,7 +640,7 @@ static UniValue setban(const JSONRPCRequest& request) static UniValue listbanned(const JSONRPCRequest& request) { RPCHelpMan{"listbanned", - "\nList all banned IPs/Subnets.\n", + "\nList all manually banned IPs/Subnets.\n", {}, RPCResult{RPCResult::Type::ARR, "", "", { @@ -646,7 +649,6 @@ static UniValue listbanned(const JSONRPCRequest& request) {RPCResult::Type::STR, "address", ""}, {RPCResult::Type::NUM_TIME, "banned_until", ""}, {RPCResult::Type::NUM_TIME, "ban_created", ""}, - {RPCResult::Type::STR, "ban_reason", ""}, }}, }}, RPCExamples{ @@ -671,7 +673,6 @@ static UniValue listbanned(const JSONRPCRequest& request) rec.pushKV("address", entry.first.ToString()); rec.pushKV("banned_until", banEntry.nBanUntil); rec.pushKV("ban_created", banEntry.nCreateTime); - rec.pushKV("ban_reason", banEntry.banReasonToString()); bannedAddresses.push_back(rec); } @@ -726,7 +727,7 @@ static UniValue getnodeaddresses(const JSONRPCRequest& request) RPCHelpMan{"getnodeaddresses", "\nReturn known addresses which can potentially be used to find new nodes in the network\n", { - {"count", RPCArg::Type::NUM, /* default */ "1", "How many addresses to return. Limited to the smaller of " + ToString(ADDRMAN_GETADDR_MAX) + " or " + ToString(ADDRMAN_GETADDR_MAX_PCT) + "% of all known addresses."}, + {"count", RPCArg::Type::NUM, /* default */ "1", "The maximum number of addresses to return. Specify 0 to return all known addresses."}, }, RPCResult{ RPCResult::Type::ARR, "", "", @@ -753,18 +754,16 @@ static UniValue getnodeaddresses(const JSONRPCRequest& request) int count = 1; if (!request.params[0].isNull()) { count = request.params[0].get_int(); - if (count <= 0) { + if (count < 0) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Address count out of range"); } } // returns a shuffled list of CAddress - std::vector<CAddress> vAddr = node.connman->GetAddresses(); + std::vector<CAddress> vAddr = node.connman->GetAddresses(count, /* max_pct */ 0); UniValue ret(UniValue::VARR); - int address_return_count = std::min<int>(count, vAddr.size()); - for (int i = 0; i < address_return_count; ++i) { + for (const CAddress& addr : vAddr) { UniValue obj(UniValue::VOBJ); - const CAddress& addr = vAddr[i]; obj.pushKV("time", (int)addr.nTime); obj.pushKV("services", (uint64_t)addr.nServices); obj.pushKV("address", addr.ToStringIP()); @@ -774,6 +773,54 @@ static UniValue getnodeaddresses(const JSONRPCRequest& request) return ret; } +static UniValue addpeeraddress(const JSONRPCRequest& request) +{ + RPCHelpMan{"addpeeraddress", + "\nAdd the address of a potential peer to the address manager. This RPC is for testing only.\n", + { + {"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The IP address of the peer"}, + {"port", RPCArg::Type::NUM, RPCArg::Optional::NO, "The port of the peer"}, + }, + RPCResult{ + RPCResult::Type::OBJ, "", "", + { + {RPCResult::Type::BOOL, "success", "whether the peer address was successfully added to the address manager"}, + }, + }, + RPCExamples{ + HelpExampleCli("addpeeraddress", "\"1.2.3.4\" 8333") + + HelpExampleRpc("addpeeraddress", "\"1.2.3.4\", 8333") + }, + }.Check(request); + + NodeContext& node = EnsureNodeContext(request.context); + if (!node.connman) { + throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); + } + + UniValue obj(UniValue::VOBJ); + + std::string addr_string = request.params[0].get_str(); + uint16_t port = request.params[1].get_int(); + + CNetAddr net_addr; + if (!LookupHost(addr_string, net_addr, false)) { + obj.pushKV("success", false); + return obj; + } + CAddress address = CAddress({net_addr, port}, ServiceFlags(NODE_NETWORK|NODE_WITNESS)); + address.nTime = GetAdjustedTime(); + // The source address is set equal to the address. This is equivalent to the peer + // announcing itself. + if (!node.connman->AddNewAddresses({address}, address)) { + obj.pushKV("success", false); + return obj; + } + + obj.pushKV("success", true); + return obj; +} + void RegisterNetRPCCommands(CRPCTable &t) { // clang-format off @@ -793,6 +840,7 @@ static const CRPCCommand commands[] = { "network", "clearbanned", &clearbanned, {} }, { "network", "setnetworkactive", &setnetworkactive, {"state"} }, { "network", "getnodeaddresses", &getnodeaddresses, {"count"} }, + { "hidden", "addpeeraddress", &addpeeraddress, {"address", "port"} }, }; // clang-format on diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index a53b91c4bc..9b6ef15785 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -28,6 +28,7 @@ #include <script/signingprovider.h> #include <script/standard.h> #include <uint256.h> +#include <util/bip32.h> #include <util/moneystr.h> #include <util/strencodings.h> #include <util/string.h> @@ -156,6 +157,8 @@ static UniValue getrawtransaction(const JSONRPCRequest& request) }, }.Check(request); + const NodeContext& node = EnsureNodeContext(request.context); + bool in_active_chain = true; uint256 hash = ParseHashV(request.params[0], "parameter 1"); CBlockIndex* blockindex = nullptr; @@ -187,9 +190,9 @@ static UniValue getrawtransaction(const JSONRPCRequest& request) f_txindex_ready = g_txindex->BlockUntilSyncedToCurrentChain(); } - CTransactionRef tx; uint256 hash_block; - if (!GetTransaction(hash, tx, Params().GetConsensus(), hash_block, blockindex)) { + const CTransactionRef tx = GetTransaction(blockindex, node.mempool, hash, Params().GetConsensus(), hash_block); + if (!tx) { std::string errmsg; if (blockindex) { if (!(blockindex->nStatus & BLOCK_HAVE_DATA)) { @@ -244,10 +247,11 @@ static UniValue gettxoutproof(const JSONRPCRequest& request) for (unsigned int idx = 0; idx < txids.size(); idx++) { const UniValue& txid = txids[idx]; uint256 hash(ParseHashV(txid, "txid")); - if (setTxids.count(hash)) - throw JSONRPCError(RPC_INVALID_PARAMETER, std::string("Invalid parameter, duplicated txid: ")+txid.get_str()); - setTxids.insert(hash); - oneTxid = hash; + if (setTxids.count(hash)) { + throw JSONRPCError(RPC_INVALID_PARAMETER, std::string("Invalid parameter, duplicated txid: ") + txid.get_str()); + } + setTxids.insert(hash); + oneTxid = hash; } CBlockIndex* pblockindex = nullptr; @@ -280,11 +284,11 @@ static UniValue gettxoutproof(const JSONRPCRequest& request) LOCK(cs_main); - if (pblockindex == nullptr) - { - CTransactionRef tx; - if (!GetTransaction(oneTxid, tx, Params().GetConsensus(), hashBlock) || hashBlock.IsNull()) + if (pblockindex == nullptr) { + const CTransactionRef tx = GetTransaction(/* block_index */ nullptr, /* mempool */ nullptr, oneTxid, Params().GetConsensus(), hashBlock); + if (!tx || hashBlock.IsNull()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not yet in block"); + } pblockindex = LookupBlockIndex(hashBlock); if (!pblockindex) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Transaction index corrupt"); @@ -292,15 +296,19 @@ static UniValue gettxoutproof(const JSONRPCRequest& request) } CBlock block; - if(!ReadBlockFromDisk(block, pblockindex, Params().GetConsensus())) + if (!ReadBlockFromDisk(block, pblockindex, Params().GetConsensus())) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Can't read block from disk"); + } unsigned int ntxFound = 0; - for (const auto& tx : block.vtx) - if (setTxids.count(tx->GetHash())) + for (const auto& tx : block.vtx) { + if (setTxids.count(tx->GetHash())) { ntxFound++; - if (ntxFound != setTxids.size()) + } + } + if (ntxFound != setTxids.size()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Not all transactions found in specified or retrieved block"); + } CDataStream ssMB(SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS); CMerkleBlock mb(block, setTxids); @@ -511,12 +519,12 @@ static UniValue decoderawtransaction(const JSONRPCRequest& request) static std::string GetAllOutputTypes() { - std::string ret; - for (int i = TX_NONSTANDARD; i <= TX_WITNESS_UNKNOWN; ++i) { - if (i != TX_NONSTANDARD) ret += ", "; - ret += GetTxnOutputType(static_cast<txnouttype>(i)); + std::vector<std::string> ret; + using U = std::underlying_type<TxoutType>::type; + for (U i = (U)TxoutType::NONSTANDARD; i <= (U)TxoutType::WITNESS_UNKNOWN; ++i) { + ret.emplace_back(GetTxnOutputType(static_cast<TxoutType>(i))); } - return ret; + return Join(ret, ", "); } static UniValue decodescript(const JSONRPCRequest& request) @@ -580,10 +588,10 @@ static UniValue decodescript(const JSONRPCRequest& request) // is a witness program, don't return addresses for a segwit programs. if (type.get_str() == "pubkey" || type.get_str() == "pubkeyhash" || type.get_str() == "multisig" || type.get_str() == "nonstandard") { std::vector<std::vector<unsigned char>> solutions_data; - txnouttype which_type = Solver(script, solutions_data); + TxoutType which_type = Solver(script, solutions_data); // Uncompressed pubkeys cannot be used with segwit checksigs. // If the script contains an uncompressed pubkey, skip encoding of a segwit program. - if ((which_type == TX_PUBKEY) || (which_type == TX_MULTISIG)) { + if ((which_type == TxoutType::PUBKEY) || (which_type == TxoutType::MULTISIG)) { for (const auto& solution : solutions_data) { if ((solution.size() != 1) && !CPubKey(solution).IsCompressed()) { return r; @@ -592,9 +600,9 @@ static UniValue decodescript(const JSONRPCRequest& request) } UniValue sr(UniValue::VOBJ); CScript segwitScr; - if (which_type == TX_PUBKEY) { - segwitScr = GetScriptForDestination(WitnessV0KeyHash(Hash160(solutions_data[0].begin(), solutions_data[0].end()))); - } else if (which_type == TX_PUBKEYHASH) { + if (which_type == TxoutType::PUBKEY) { + segwitScr = GetScriptForDestination(WitnessV0KeyHash(Hash160(solutions_data[0]))); + } else if (which_type == TxoutType::PUBKEYHASH) { segwitScr = GetScriptForDestination(WitnessV0KeyHash(uint160{solutions_data[0]})); } else { // Scripts that are not fit for P2WPKH are encoded as P2WSH. @@ -938,25 +946,6 @@ static UniValue testmempoolaccept(const JSONRPCRequest& request) return result; } -static std::string WriteHDKeypath(std::vector<uint32_t>& keypath) -{ - std::string keypath_str = "m"; - for (uint32_t num : keypath) { - keypath_str += "/"; - bool hardened = false; - if (num & 0x80000000) { - hardened = true; - num &= ~0x80000000; - } - - keypath_str += ToString(num); - if (hardened) { - keypath_str += "'"; - } - } - return keypath_str; -} - UniValue decodepsbt(const JSONRPCRequest& request) { RPCHelpMan{"decodepsbt", @@ -1104,30 +1093,34 @@ UniValue decodepsbt(const JSONRPCRequest& request) const PSBTInput& input = psbtx.inputs[i]; UniValue in(UniValue::VOBJ); // UTXOs + bool have_a_utxo = false; + CTxOut txout; if (!input.witness_utxo.IsNull()) { - const CTxOut& txout = input.witness_utxo; - - UniValue out(UniValue::VOBJ); - - out.pushKV("amount", ValueFromAmount(txout.nValue)); - if (MoneyRange(txout.nValue) && MoneyRange(total_in + txout.nValue)) { - total_in += txout.nValue; - } else { - // Hack to just not show fee later - have_all_utxos = false; - } + txout = input.witness_utxo; UniValue o(UniValue::VOBJ); ScriptToUniv(txout.scriptPubKey, o, true); + + UniValue out(UniValue::VOBJ); + out.pushKV("amount", ValueFromAmount(txout.nValue)); out.pushKV("scriptPubKey", o); + in.pushKV("witness_utxo", out); - } else if (input.non_witness_utxo) { + + have_a_utxo = true; + } + if (input.non_witness_utxo) { + txout = input.non_witness_utxo->vout[psbtx.tx->vin[i].prevout.n]; + UniValue non_wit(UniValue::VOBJ); TxToUniv(*input.non_witness_utxo, uint256(), non_wit, false); in.pushKV("non_witness_utxo", non_wit); - CAmount utxo_val = input.non_witness_utxo->vout[psbtx.tx->vin[i].prevout.n].nValue; - if (MoneyRange(utxo_val) && MoneyRange(total_in + utxo_val)) { - total_in += utxo_val; + + have_a_utxo = true; + } + if (have_a_utxo) { + if (MoneyRange(txout.nValue) && MoneyRange(total_in + txout.nValue)) { + total_in += txout.nValue; } else { // Hack to just not show fee later have_all_utxos = false; @@ -1355,7 +1348,7 @@ UniValue finalizepsbt(const JSONRPCRequest& request) if (complete && extract) { ssTx << mtx; - result_str = HexStr(ssTx.str()); + result_str = HexStr(ssTx); result.pushKV("hex", result_str); } else { ssTx << psbtx; @@ -1628,7 +1621,7 @@ UniValue joinpsbts(const JSONRPCRequest& request) throw JSONRPCError(RPC_INVALID_PARAMETER, "At least two PSBTs are required to join PSBTs."); } - int32_t best_version = 1; + uint32_t best_version = 1; uint32_t best_locktime = 0xffffffff; for (unsigned int i = 0; i < txs.size(); ++i) { PartiallySignedTransaction psbtx; @@ -1638,8 +1631,8 @@ UniValue joinpsbts(const JSONRPCRequest& request) } psbtxs.push_back(psbtx); // Choose the highest version number - if (psbtx.tx->nVersion > best_version) { - best_version = psbtx.tx->nVersion; + if (static_cast<uint32_t>(psbtx.tx->nVersion) > best_version) { + best_version = static_cast<uint32_t>(psbtx.tx->nVersion); } // Choose the lowest lock time if (psbtx.tx->nLockTime < best_locktime) { @@ -1650,7 +1643,7 @@ UniValue joinpsbts(const JSONRPCRequest& request) // Create a blank psbt where everything will be added PartiallySignedTransaction merged_psbt; merged_psbt.tx = CMutableTransaction(); - merged_psbt.tx->nVersion = best_version; + merged_psbt.tx->nVersion = static_cast<int32_t>(best_version); merged_psbt.tx->nLockTime = best_locktime; // Merge diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp index 7fef45f50e..d9ad70fa37 100644 --- a/src/rpc/request.cpp +++ b/src/rpc/request.cpp @@ -78,7 +78,7 @@ bool GenerateAuthCookie(std::string *cookie_out) const size_t COOKIE_SIZE = 32; unsigned char rand_pwd[COOKIE_SIZE]; GetRandBytes(rand_pwd, COOKIE_SIZE); - std::string cookie = COOKIEAUTH_USER + ":" + HexStr(rand_pwd, rand_pwd+COOKIE_SIZE); + std::string cookie = COOKIEAUTH_USER + ":" + HexStr(rand_pwd); /** the umask determines what permissions are used to create this file - * these are set to 077 in init.cpp unless overridden with -sysperms. diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index 844f62cbc6..e5f6b1b9f1 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -20,10 +20,10 @@ #include <mutex> #include <unordered_map> -static RecursiveMutex cs_rpcWarmup; +static Mutex g_rpc_warmup_mutex; static std::atomic<bool> g_rpc_running{false}; -static bool fRPCInWarmup GUARDED_BY(cs_rpcWarmup) = true; -static std::string rpcWarmupStatus GUARDED_BY(cs_rpcWarmup) = "RPC server started"; +static bool fRPCInWarmup GUARDED_BY(g_rpc_warmup_mutex) = true; +static std::string rpcWarmupStatus GUARDED_BY(g_rpc_warmup_mutex) = "RPC server started"; /* Timer-creating functions */ static RPCTimerInterface* timerInterface = nullptr; /* Map of name to timer. */ @@ -130,11 +130,9 @@ std::string CRPCTable::help(const std::string& strCommand, const JSONRPCRequest& return strRet; } -UniValue help(const JSONRPCRequest& jsonRequest) +static RPCHelpMan help() { - if (jsonRequest.fHelp || jsonRequest.params.size() > 1) - throw std::runtime_error( - RPCHelpMan{"help", + return RPCHelpMan{"help", "\nList all commands, or get help for a specified command.\n", { {"command", RPCArg::Type::STR, /* default */ "all commands", "The command to get help on"}, @@ -143,32 +141,32 @@ UniValue help(const JSONRPCRequest& jsonRequest) RPCResult::Type::STR, "", "The help text" }, RPCExamples{""}, - }.ToString() - ); - + [&](const RPCHelpMan& self, const JSONRPCRequest& jsonRequest) -> UniValue +{ std::string strCommand; if (jsonRequest.params.size() > 0) strCommand = jsonRequest.params[0].get_str(); return tableRPC.help(strCommand, jsonRequest); +}, + }; } - -UniValue stop(const JSONRPCRequest& jsonRequest) +static RPCHelpMan stop() { static const std::string RESULT{PACKAGE_NAME " stopping"}; - // Accept the deprecated and ignored 'detach' boolean argument + return RPCHelpMan{"stop", // Also accept the hidden 'wait' integer argument (milliseconds) // For instance, 'stop 1000' makes the call wait 1 second before returning // to the client (intended for testing) - if (jsonRequest.fHelp || jsonRequest.params.size() > 1) - throw std::runtime_error( - RPCHelpMan{"stop", "\nRequest a graceful shutdown of " PACKAGE_NAME ".", - {}, + { + {"wait", RPCArg::Type::NUM, RPCArg::Optional::OMITTED_NAMED_ARG, "how long to wait in ms", "", {}, /* hidden */ true}, + }, RPCResult{RPCResult::Type::STR, "", "A string with the content '" + RESULT + "'"}, RPCExamples{""}, - }.ToString()); + [&](const RPCHelpMan& self, const JSONRPCRequest& jsonRequest) -> UniValue +{ // Event loop will exit after current HTTP requests have been handled, so // this reply will get back to the client. StartShutdown(); @@ -176,11 +174,13 @@ UniValue stop(const JSONRPCRequest& jsonRequest) UninterruptibleSleep(std::chrono::milliseconds{jsonRequest.params[0].get_int()}); } return RESULT; +}, + }; } -static UniValue uptime(const JSONRPCRequest& jsonRequest) +static RPCHelpMan uptime() { - RPCHelpMan{"uptime", + return RPCHelpMan{"uptime", "\nReturns the total uptime of the server.\n", {}, RPCResult{ @@ -190,14 +190,16 @@ static UniValue uptime(const JSONRPCRequest& jsonRequest) HelpExampleCli("uptime", "") + HelpExampleRpc("uptime", "") }, - }.Check(jsonRequest); - + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ return GetTime() - GetStartupTime(); } + }; +} -static UniValue getrpcinfo(const JSONRPCRequest& request) +static RPCHelpMan getrpcinfo() { - RPCHelpMan{"getrpcinfo", + return RPCHelpMan{"getrpcinfo", "\nReturns details of the RPC server.\n", {}, RPCResult{ @@ -217,8 +219,8 @@ static UniValue getrpcinfo(const JSONRPCRequest& request) RPCExamples{ HelpExampleCli("getrpcinfo", "") + HelpExampleRpc("getrpcinfo", "")}, - }.Check(request); - + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ LOCK(g_rpc_server_info.mutex); UniValue active_commands(UniValue::VARR); for (const RPCCommandExecutionInfo& info : g_rpc_server_info.active_commands) { @@ -237,6 +239,8 @@ static UniValue getrpcinfo(const JSONRPCRequest& request) return result; } + }; +} // clang-format off static const CRPCCommand vRPCCommands[] = @@ -327,20 +331,20 @@ void RpcInterruptionPoint() void SetRPCWarmupStatus(const std::string& newStatus) { - LOCK(cs_rpcWarmup); + LOCK(g_rpc_warmup_mutex); rpcWarmupStatus = newStatus; } void SetRPCWarmupFinished() { - LOCK(cs_rpcWarmup); + LOCK(g_rpc_warmup_mutex); assert(fRPCInWarmup); fRPCInWarmup = false; } bool RPCIsInWarmup(std::string *outStatus) { - LOCK(cs_rpcWarmup); + LOCK(g_rpc_warmup_mutex); if (outStatus) *outStatus = rpcWarmupStatus; return fRPCInWarmup; @@ -439,7 +443,7 @@ UniValue CRPCTable::execute(const JSONRPCRequest &request) const { // Return immediately if in warmup { - LOCK(cs_rpcWarmup); + LOCK(g_rpc_warmup_mutex); if (fRPCInWarmup) throw JSONRPCError(RPC_IN_WARMUP, rpcWarmupStatus); } diff --git a/src/rpc/server.h b/src/rpc/server.h index d7a04ff6e8..6da3e94ea2 100644 --- a/src/rpc/server.h +++ b/src/rpc/server.h @@ -8,6 +8,7 @@ #include <amount.h> #include <rpc/request.h> +#include <rpc/util.h> #include <functional> #include <map> @@ -85,6 +86,7 @@ void RPCUnsetTimerInterface(RPCTimerInterface *iface); void RPCRunLater(const std::string& name, std::function<void()> func, int64_t nSeconds); typedef UniValue(*rpcfn_type)(const JSONRPCRequest& jsonRequest); +typedef RPCHelpMan (*RpcMethodFnType)(); class CRPCCommand { @@ -101,6 +103,19 @@ public: { } + //! Simplified constructor taking plain RpcMethodFnType function pointer. + CRPCCommand(std::string category, std::string name_in, RpcMethodFnType fn, std::vector<std::string> args_in) + : CRPCCommand( + category, + fn().m_name, + [fn](const JSONRPCRequest& request, UniValue& result, bool) { result = fn().HandleRequest(request); return true; }, + fn().GetArgNames(), + intptr_t(fn)) + { + CHECK_NONFATAL(fn().m_name == name_in); + CHECK_NONFATAL(fn().GetArgNames() == args_in); + } + //! Simplified constructor taking plain rpcfn_type function pointer. CRPCCommand(const char* category, const char* name, rpcfn_type fn, std::initializer_list<const char*> args) : CRPCCommand(category, name, @@ -117,7 +132,7 @@ public: }; /** - * Bitcoin RPC command dispatcher. + * RPC command dispatcher. */ class CRPCTable { diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp index 54ea352a72..073a7688a9 100644 --- a/src/rpc/util.cpp +++ b/src/rpc/util.cpp @@ -113,6 +113,23 @@ std::vector<unsigned char> ParseHexO(const UniValue& o, std::string strKey) return ParseHexV(find_value(o, strKey), strKey); } +CoinStatsHashType ParseHashType(const UniValue& param, const CoinStatsHashType default_type) +{ + if (param.isNull()) { + return default_type; + } else { + std::string hash_type_input = param.get_str(); + + if (hash_type_input == "hash_serialized_2") { + return CoinStatsHashType::HASH_SERIALIZED; + } else if (hash_type_input == "none") { + return CoinStatsHashType::NONE; + } else { + throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("%d is not a valid hash_type", hash_type_input)); + } + } +} + std::string HelpExampleCli(const std::string& methodname, const std::string& args) { return "> bitcoin-cli " + methodname + " " + args + "\n"; @@ -243,7 +260,7 @@ public: UniValue obj(UniValue::VOBJ); obj.pushKV("iswitness", true); obj.pushKV("witness_version", (int)id.version); - obj.pushKV("witness_program", HexStr(id.program, id.program + id.length)); + obj.pushKV("witness_program", HexStr(Span<const unsigned char>(id.program, id.length))); return obj; } }; @@ -368,9 +385,7 @@ struct Sections { PushSection({indent + "]" + (outer_type != OuterType::NONE ? "," : ""), ""}); break; } - - // no default case, so the compiler can warn about missing cases - } + } // no default case, so the compiler can warn about missing cases } /** @@ -381,6 +396,9 @@ struct Sections { std::string ret; const size_t pad = m_max_pad + 4; for (const auto& s : m_sections) { + // The left part of a section is assumed to be a single line, usually it is the name of the JSON struct or a + // brace like {, }, [, or ] + CHECK_NONFATAL(s.m_left.find('\n') == std::string::npos); if (s.m_right.empty()) { ret += s.m_left; ret += "\n"; @@ -415,7 +433,11 @@ struct Sections { }; RPCHelpMan::RPCHelpMan(std::string name, std::string description, std::vector<RPCArg> args, RPCResults results, RPCExamples examples) + : RPCHelpMan{std::move(name), std::move(description), std::move(args), std::move(results), std::move(examples), nullptr} {} + +RPCHelpMan::RPCHelpMan(std::string name, std::string description, std::vector<RPCArg> args, RPCResults results, RPCExamples examples, RPCMethodImpl fun) : m_name{std::move(name)}, + m_fun{std::move(fun)}, m_description{std::move(description)}, m_args{std::move(args)}, m_results{std::move(results)}, @@ -464,6 +486,16 @@ bool RPCHelpMan::IsValidNumArgs(size_t num_args) const } return num_required_args <= num_args && num_args <= m_args.size(); } + +std::vector<std::string> RPCHelpMan::GetArgNames() const +{ + std::vector<std::string> ret; + for (const auto& arg : m_args) { + ret.emplace_back(arg.m_names); + } + return ret; +} + std::string RPCHelpMan::ToString() const { std::string ret; @@ -472,6 +504,7 @@ std::string RPCHelpMan::ToString() const ret += m_name; bool was_optional{false}; for (const auto& arg : m_args) { + if (arg.m_hidden) continue; const bool optional = arg.IsOptional(); ret += " "; if (optional) { @@ -493,6 +526,7 @@ std::string RPCHelpMan::ToString() const Sections sections; for (size_t i{0}; i < m_args.size(); ++i) { const auto& arg = m_args.at(i); + if (arg.m_hidden) continue; if (i == 0) ret += "\nArguments:\n"; @@ -572,9 +606,7 @@ std::string RPCArg::ToDescriptionString() const ret += "json array"; break; } - - // no default case, so the compiler can warn about missing cases - } + } // no default case, so the compiler can warn about missing cases } if (m_fallback.which() == 1) { ret += ", optional, default=" + boost::get<std::string>(m_fallback); @@ -592,9 +624,7 @@ std::string RPCArg::ToDescriptionString() const ret += ", required"; break; } - - // no default case, so the compiler can warn about missing cases - } + } // no default case, so the compiler can warn about missing cases } ret += ")"; ret += m_description.empty() ? "" : " " + m_description; @@ -689,10 +719,7 @@ void RPCResult::ToSections(Sections& sections, const OuterType outer_type, const sections.PushSection({indent + "}" + maybe_separator, ""}); return; } - - // no default case, so the compiler can warn about missing cases - } - + } // no default case, so the compiler can warn about missing cases CHECK_NONFATAL(false); } @@ -729,9 +756,7 @@ std::string RPCArg::ToStringObj(const bool oneline) const case Type::OBJ_USER_KEYS: // Currently unused, so avoid writing dead code CHECK_NONFATAL(false); - - // no default case, so the compiler can warn about missing cases - } + } // no default case, so the compiler can warn about missing cases CHECK_NONFATAL(false); } @@ -766,9 +791,7 @@ std::string RPCArg::ToString(const bool oneline) const } return "[" + res + "...]"; } - - // no default case, so the compiler can warn about missing cases - } + } // no default case, so the compiler can warn about missing cases CHECK_NONFATAL(false); } diff --git a/src/rpc/util.h b/src/rpc/util.h index 53dce2c397..45b0bb0c7e 100644 --- a/src/rpc/util.h +++ b/src/rpc/util.h @@ -5,6 +5,7 @@ #ifndef BITCOIN_RPC_UTIL_H #define BITCOIN_RPC_UTIL_H +#include <node/coinstats.h> #include <node/transaction.h> #include <outputtype.h> #include <protocol.h> @@ -77,6 +78,8 @@ extern uint256 ParseHashO(const UniValue& o, std::string strKey); extern std::vector<unsigned char> ParseHexV(const UniValue& v, std::string strName); extern std::vector<unsigned char> ParseHexO(const UniValue& o, std::string strKey); +CoinStatsHashType ParseHashType(const UniValue& param, const CoinStatsHashType default_type); + extern CAmount AmountFromValue(const UniValue& value); extern std::string HelpExampleCli(const std::string& methodname, const std::string& args); extern std::string HelpExampleRpc(const std::string& methodname, const std::string& args); @@ -144,6 +147,7 @@ struct RPCArg { using Fallback = boost::variant<Optional, /* default value for optional args */ std::string>; const std::string m_names; //!< The name of the arg (can be empty for inner args, can contain multiple aliases separated by | for named request arguments) const Type m_type; + const bool m_hidden; const std::vector<RPCArg> m_inner; //!< Only used for arrays or dicts const Fallback m_fallback; const std::string m_description; @@ -156,9 +160,11 @@ struct RPCArg { const Fallback fallback, const std::string description, const std::string oneline_description = "", - const std::vector<std::string> type_str = {}) + const std::vector<std::string> type_str = {}, + const bool hidden = false) : m_names{std::move(name)}, m_type{std::move(type)}, + m_hidden{hidden}, m_fallback{std::move(fallback)}, m_description{std::move(description)}, m_oneline_description{std::move(oneline_description)}, @@ -177,6 +183,7 @@ struct RPCArg { const std::vector<std::string> type_str = {}) : m_names{std::move(name)}, m_type{std::move(type)}, + m_hidden{false}, m_inner{std::move(inner)}, m_fallback{std::move(fallback)}, m_description{std::move(description)}, @@ -326,8 +333,15 @@ class RPCHelpMan { public: RPCHelpMan(std::string name, std::string description, std::vector<RPCArg> args, RPCResults results, RPCExamples examples); + using RPCMethodImpl = std::function<UniValue(const RPCHelpMan&, const JSONRPCRequest&)>; + RPCHelpMan(std::string name, std::string description, std::vector<RPCArg> args, RPCResults results, RPCExamples examples, RPCMethodImpl fun); std::string ToString() const; + UniValue HandleRequest(const JSONRPCRequest& request) + { + Check(request); + return m_fun(*this, request); + } /** If the supplied number of args is neither too small nor too high */ bool IsValidNumArgs(size_t num_args) const; /** @@ -340,8 +354,12 @@ public: } } -private: + std::vector<std::string> GetArgNames() const; + const std::string m_name; + +private: + const RPCMethodImpl m_fun; const std::string m_description; const std::vector<RPCArg> m_args; const RPCResults m_results; diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 2634d3ad4f..6c0a98cca2 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -190,7 +190,7 @@ class OriginPubkeyProvider final : public PubkeyProvider std::string OriginString() const { - return HexStr(std::begin(m_origin.fingerprint), std::end(m_origin.fingerprint)) + FormatHDKeypath(m_origin.path); + return HexStr(m_origin.fingerprint) + FormatHDKeypath(m_origin.path); } public: @@ -825,8 +825,9 @@ std::unique_ptr<PubkeyProvider> ParsePubkey(uint32_t key_exp_index, const Span<c return nullptr; } if (origin_split.size() == 1) return ParsePubkeyInner(key_exp_index, origin_split[0], permit_uncompressed, out, error); - if (origin_split[0].size() < 1 || origin_split[0][0] != '[') { - error = strprintf("Key origin start '[ character expected but not found, got '%c' instead", origin_split[0][0]); + if (origin_split[0].empty() || origin_split[0][0] != '[') { + error = strprintf("Key origin start '[ character expected but not found, got '%c' instead", + origin_split[0].empty() ? /** empty, implies split char */ ']' : origin_split[0][0]); return nullptr; } auto slash_split = Split(origin_split[0].subspan(1), '/'); @@ -896,7 +897,7 @@ std::unique_ptr<DescriptorImpl> ParseScript(uint32_t key_exp_index, Span<const c providers.emplace_back(std::move(pk)); key_exp_index++; } - if (providers.size() < 1 || providers.size() > 16) { + if (providers.empty() || providers.size() > 16) { error = strprintf("Cannot have %u keys in multisig; must have between 1 and 16 keys, inclusive", providers.size()); return nullptr; } else if (thres < 1) { @@ -985,15 +986,15 @@ std::unique_ptr<PubkeyProvider> InferPubkey(const CPubKey& pubkey, ParseScriptCo std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptContext ctx, const SigningProvider& provider) { std::vector<std::vector<unsigned char>> data; - txnouttype txntype = Solver(script, data); + TxoutType txntype = Solver(script, data); - if (txntype == TX_PUBKEY) { + if (txntype == TxoutType::PUBKEY) { CPubKey pubkey(data[0].begin(), data[0].end()); if (pubkey.IsValid()) { return MakeUnique<PKDescriptor>(InferPubkey(pubkey, ctx, provider)); } } - if (txntype == TX_PUBKEYHASH) { + if (txntype == TxoutType::PUBKEYHASH) { uint160 hash(data[0]); CKeyID keyid(hash); CPubKey pubkey; @@ -1001,7 +1002,7 @@ std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptCo return MakeUnique<PKHDescriptor>(InferPubkey(pubkey, ctx, provider)); } } - if (txntype == TX_WITNESS_V0_KEYHASH && ctx != ParseScriptContext::P2WSH) { + if (txntype == TxoutType::WITNESS_V0_KEYHASH && ctx != ParseScriptContext::P2WSH) { uint160 hash(data[0]); CKeyID keyid(hash); CPubKey pubkey; @@ -1009,7 +1010,7 @@ std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptCo return MakeUnique<WPKHDescriptor>(InferPubkey(pubkey, ctx, provider)); } } - if (txntype == TX_MULTISIG) { + if (txntype == TxoutType::MULTISIG) { std::vector<std::unique_ptr<PubkeyProvider>> providers; for (size_t i = 1; i + 1 < data.size(); ++i) { CPubKey pubkey(data[i].begin(), data[i].end()); @@ -1017,7 +1018,7 @@ std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptCo } return MakeUnique<MultisigDescriptor>((int)data[0][0], std::move(providers)); } - if (txntype == TX_SCRIPTHASH && ctx == ParseScriptContext::TOP) { + if (txntype == TxoutType::SCRIPTHASH && ctx == ParseScriptContext::TOP) { uint160 hash(data[0]); CScriptID scriptid(hash); CScript subscript; @@ -1026,7 +1027,7 @@ std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptCo if (sub) return MakeUnique<SHDescriptor>(std::move(sub)); } } - if (txntype == TX_WITNESS_V0_SCRIPTHASH && ctx != ParseScriptContext::P2WSH) { + if (txntype == TxoutType::WITNESS_V0_SCRIPTHASH && ctx != ParseScriptContext::P2WSH) { CScriptID scriptid; CRIPEMD160().Write(data[0].data(), data[0].size()).Finalize(scriptid.begin()); CScript subscript; diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index 9415bba585..39feb4ccc9 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -986,9 +986,9 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& else if (opcode == OP_SHA256) CSHA256().Write(vch.data(), vch.size()).Finalize(vchHash.data()); else if (opcode == OP_HASH160) - CHash160().Write(vch.data(), vch.size()).Finalize(vchHash.data()); + CHash160().Write(vch).Finalize(vchHash); else if (opcode == OP_HASH256) - CHash256().Write(vch.data(), vch.size()).Finalize(vchHash.data()); + CHash256().Write(vch).Finalize(vchHash); popstack(stack); stack.push_back(vchHash); } diff --git a/src/script/sign.cpp b/src/script/sign.cpp index 43988c4fd7..f425215549 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -92,11 +92,11 @@ static bool CreateSig(const BaseSignatureCreator& creator, SignatureData& sigdat /** * Sign scriptPubKey using signature made with creator. * Signatures are returned in scriptSigRet (or returns false if scriptPubKey can't be signed), - * unless whichTypeRet is TX_SCRIPTHASH, in which case scriptSigRet is the redemption script. + * unless whichTypeRet is TxoutType::SCRIPTHASH, in which case scriptSigRet is the redemption script. * Returns false if scriptPubKey could not be completely satisfied. */ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator& creator, const CScript& scriptPubKey, - std::vector<valtype>& ret, txnouttype& whichTypeRet, SigVersion sigversion, SignatureData& sigdata) + std::vector<valtype>& ret, TxoutType& whichTypeRet, SigVersion sigversion, SignatureData& sigdata) { CScript scriptRet; uint160 h160; @@ -108,15 +108,15 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator switch (whichTypeRet) { - case TX_NONSTANDARD: - case TX_NULL_DATA: - case TX_WITNESS_UNKNOWN: + case TxoutType::NONSTANDARD: + case TxoutType::NULL_DATA: + case TxoutType::WITNESS_UNKNOWN: return false; - case TX_PUBKEY: + case TxoutType::PUBKEY: if (!CreateSig(creator, sigdata, provider, sig, CPubKey(vSolutions[0]), scriptPubKey, sigversion)) return false; ret.push_back(std::move(sig)); return true; - case TX_PUBKEYHASH: { + case TxoutType::PUBKEYHASH: { CKeyID keyID = CKeyID(uint160(vSolutions[0])); CPubKey pubkey; if (!GetPubKey(provider, sigdata, keyID, pubkey)) { @@ -129,7 +129,7 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator ret.push_back(ToByteVector(pubkey)); return true; } - case TX_SCRIPTHASH: + case TxoutType::SCRIPTHASH: h160 = uint160(vSolutions[0]); if (GetCScript(provider, sigdata, CScriptID{h160}, scriptRet)) { ret.push_back(std::vector<unsigned char>(scriptRet.begin(), scriptRet.end())); @@ -139,7 +139,7 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator sigdata.missing_redeem_script = h160; return false; - case TX_MULTISIG: { + case TxoutType::MULTISIG: { size_t required = vSolutions.front()[0]; ret.push_back(valtype()); // workaround CHECKMULTISIG bug for (size_t i = 1; i < vSolutions.size() - 1; ++i) { @@ -159,11 +159,11 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator } return ok; } - case TX_WITNESS_V0_KEYHASH: + case TxoutType::WITNESS_V0_KEYHASH: ret.push_back(vSolutions[0]); return true; - case TX_WITNESS_V0_SCRIPTHASH: + case TxoutType::WITNESS_V0_SCRIPTHASH: CRIPEMD160().Write(&vSolutions[0][0], vSolutions[0].size()).Finalize(h160.begin()); if (GetCScript(provider, sigdata, CScriptID{h160}, scriptRet)) { ret.push_back(std::vector<unsigned char>(scriptRet.begin(), scriptRet.end())); @@ -198,44 +198,44 @@ bool ProduceSignature(const SigningProvider& provider, const BaseSignatureCreato if (sigdata.complete) return true; std::vector<valtype> result; - txnouttype whichType; + TxoutType whichType; bool solved = SignStep(provider, creator, fromPubKey, result, whichType, SigVersion::BASE, sigdata); bool P2SH = false; CScript subscript; sigdata.scriptWitness.stack.clear(); - if (solved && whichType == TX_SCRIPTHASH) + if (solved && whichType == TxoutType::SCRIPTHASH) { // Solver returns the subscript that needs to be evaluated; // the final scriptSig is the signatures from that // and then the serialized subscript: subscript = CScript(result[0].begin(), result[0].end()); sigdata.redeem_script = subscript; - solved = solved && SignStep(provider, creator, subscript, result, whichType, SigVersion::BASE, sigdata) && whichType != TX_SCRIPTHASH; + solved = solved && SignStep(provider, creator, subscript, result, whichType, SigVersion::BASE, sigdata) && whichType != TxoutType::SCRIPTHASH; P2SH = true; } - if (solved && whichType == TX_WITNESS_V0_KEYHASH) + if (solved && whichType == TxoutType::WITNESS_V0_KEYHASH) { CScript witnessscript; witnessscript << OP_DUP << OP_HASH160 << ToByteVector(result[0]) << OP_EQUALVERIFY << OP_CHECKSIG; - txnouttype subType; + TxoutType subType; solved = solved && SignStep(provider, creator, witnessscript, result, subType, SigVersion::WITNESS_V0, sigdata); sigdata.scriptWitness.stack = result; sigdata.witness = true; result.clear(); } - else if (solved && whichType == TX_WITNESS_V0_SCRIPTHASH) + else if (solved && whichType == TxoutType::WITNESS_V0_SCRIPTHASH) { CScript witnessscript(result[0].begin(), result[0].end()); sigdata.witness_script = witnessscript; - txnouttype subType; - solved = solved && SignStep(provider, creator, witnessscript, result, subType, SigVersion::WITNESS_V0, sigdata) && subType != TX_SCRIPTHASH && subType != TX_WITNESS_V0_SCRIPTHASH && subType != TX_WITNESS_V0_KEYHASH; + TxoutType subType; + solved = solved && SignStep(provider, creator, witnessscript, result, subType, SigVersion::WITNESS_V0, sigdata) && subType != TxoutType::SCRIPTHASH && subType != TxoutType::WITNESS_V0_SCRIPTHASH && subType != TxoutType::WITNESS_V0_KEYHASH; result.push_back(std::vector<unsigned char>(witnessscript.begin(), witnessscript.end())); sigdata.scriptWitness.stack = result; sigdata.witness = true; result.clear(); - } else if (solved && whichType == TX_WITNESS_UNKNOWN) { + } else if (solved && whichType == TxoutType::WITNESS_UNKNOWN) { sigdata.witness = true; } @@ -301,11 +301,11 @@ SignatureData DataFromTransaction(const CMutableTransaction& tx, unsigned int nI // Get scripts std::vector<std::vector<unsigned char>> solutions; - txnouttype script_type = Solver(txout.scriptPubKey, solutions); + TxoutType script_type = Solver(txout.scriptPubKey, solutions); SigVersion sigversion = SigVersion::BASE; CScript next_script = txout.scriptPubKey; - if (script_type == TX_SCRIPTHASH && !stack.script.empty() && !stack.script.back().empty()) { + if (script_type == TxoutType::SCRIPTHASH && !stack.script.empty() && !stack.script.back().empty()) { // Get the redeemScript CScript redeem_script(stack.script.back().begin(), stack.script.back().end()); data.redeem_script = redeem_script; @@ -315,7 +315,7 @@ SignatureData DataFromTransaction(const CMutableTransaction& tx, unsigned int nI script_type = Solver(next_script, solutions); stack.script.pop_back(); } - if (script_type == TX_WITNESS_V0_SCRIPTHASH && !stack.witness.empty() && !stack.witness.back().empty()) { + if (script_type == TxoutType::WITNESS_V0_SCRIPTHASH && !stack.witness.empty() && !stack.witness.back().empty()) { // Get the witnessScript CScript witness_script(stack.witness.back().begin(), stack.witness.back().end()); data.witness_script = witness_script; @@ -328,7 +328,7 @@ SignatureData DataFromTransaction(const CMutableTransaction& tx, unsigned int nI stack.witness.clear(); sigversion = SigVersion::WITNESS_V0; } - if (script_type == TX_MULTISIG && !stack.script.empty()) { + if (script_type == TxoutType::MULTISIG && !stack.script.empty()) { // Build a map of pubkey -> signature by matching sigs to pubkeys: assert(solutions.size() > 1); unsigned int num_pubkeys = solutions.size()-2; @@ -454,13 +454,13 @@ bool IsSegWitOutput(const SigningProvider& provider, const CScript& script) { std::vector<valtype> solutions; auto whichtype = Solver(script, solutions); - if (whichtype == TX_WITNESS_V0_SCRIPTHASH || whichtype == TX_WITNESS_V0_KEYHASH || whichtype == TX_WITNESS_UNKNOWN) return true; - if (whichtype == TX_SCRIPTHASH) { + if (whichtype == TxoutType::WITNESS_V0_SCRIPTHASH || whichtype == TxoutType::WITNESS_V0_KEYHASH || whichtype == TxoutType::WITNESS_UNKNOWN) return true; + if (whichtype == TxoutType::SCRIPTHASH) { auto h160 = uint160(solutions[0]); CScript subscript; if (provider.GetCScript(CScriptID{h160}, subscript)) { whichtype = Solver(subscript, solutions); - if (whichtype == TX_WITNESS_V0_SCRIPTHASH || whichtype == TX_WITNESS_V0_KEYHASH || whichtype == TX_WITNESS_UNKNOWN) return true; + if (whichtype == TxoutType::WITNESS_V0_SCRIPTHASH || whichtype == TxoutType::WITNESS_V0_KEYHASH || whichtype == TxoutType::WITNESS_UNKNOWN) return true; } } return false; diff --git a/src/script/standard.cpp b/src/script/standard.cpp index 2adf6ce56d..3a4882f280 100644 --- a/src/script/standard.cpp +++ b/src/script/standard.cpp @@ -16,10 +16,10 @@ typedef std::vector<unsigned char> valtype; bool fAcceptDatacarrier = DEFAULT_ACCEPT_DATACARRIER; unsigned nMaxDatacarrierBytes = MAX_OP_RETURN_RELAY; -CScriptID::CScriptID(const CScript& in) : BaseHash(Hash160(in.begin(), in.end())) {} +CScriptID::CScriptID(const CScript& in) : BaseHash(Hash160(in)) {} CScriptID::CScriptID(const ScriptHash& in) : BaseHash(static_cast<uint160>(in)) {} -ScriptHash::ScriptHash(const CScript& in) : BaseHash(Hash160(in.begin(), in.end())) {} +ScriptHash::ScriptHash(const CScript& in) : BaseHash(Hash160(in)) {} ScriptHash::ScriptHash(const CScriptID& in) : BaseHash(static_cast<uint160>(in)) {} PKHash::PKHash(const CPubKey& pubkey) : BaseHash(pubkey.GetID()) {} @@ -43,20 +43,20 @@ WitnessV0ScriptHash::WitnessV0ScriptHash(const CScript& in) CSHA256().Write(in.data(), in.size()).Finalize(begin()); } -std::string GetTxnOutputType(txnouttype t) +std::string GetTxnOutputType(TxoutType t) { switch (t) { - case TX_NONSTANDARD: return "nonstandard"; - case TX_PUBKEY: return "pubkey"; - case TX_PUBKEYHASH: return "pubkeyhash"; - case TX_SCRIPTHASH: return "scripthash"; - case TX_MULTISIG: return "multisig"; - case TX_NULL_DATA: return "nulldata"; - case TX_WITNESS_V0_KEYHASH: return "witness_v0_keyhash"; - case TX_WITNESS_V0_SCRIPTHASH: return "witness_v0_scripthash"; - case TX_WITNESS_UNKNOWN: return "witness_unknown"; - } + case TxoutType::NONSTANDARD: return "nonstandard"; + case TxoutType::PUBKEY: return "pubkey"; + case TxoutType::PUBKEYHASH: return "pubkeyhash"; + case TxoutType::SCRIPTHASH: return "scripthash"; + case TxoutType::MULTISIG: return "multisig"; + case TxoutType::NULL_DATA: return "nulldata"; + case TxoutType::WITNESS_V0_KEYHASH: return "witness_v0_keyhash"; + case TxoutType::WITNESS_V0_SCRIPTHASH: return "witness_v0_scripthash"; + case TxoutType::WITNESS_UNKNOWN: return "witness_unknown"; + } // no default case, so the compiler can warn about missing cases assert(false); } @@ -106,7 +106,7 @@ static bool MatchMultisig(const CScript& script, unsigned int& required, std::ve return (it + 1 == script.end()); } -txnouttype Solver(const CScript& scriptPubKey, std::vector<std::vector<unsigned char>>& vSolutionsRet) +TxoutType Solver(const CScript& scriptPubKey, std::vector<std::vector<unsigned char>>& vSolutionsRet) { vSolutionsRet.clear(); @@ -116,7 +116,7 @@ txnouttype Solver(const CScript& scriptPubKey, std::vector<std::vector<unsigned { std::vector<unsigned char> hashBytes(scriptPubKey.begin()+2, scriptPubKey.begin()+22); vSolutionsRet.push_back(hashBytes); - return TX_SCRIPTHASH; + return TxoutType::SCRIPTHASH; } int witnessversion; @@ -124,18 +124,18 @@ txnouttype Solver(const CScript& scriptPubKey, std::vector<std::vector<unsigned if (scriptPubKey.IsWitnessProgram(witnessversion, witnessprogram)) { if (witnessversion == 0 && witnessprogram.size() == WITNESS_V0_KEYHASH_SIZE) { vSolutionsRet.push_back(witnessprogram); - return TX_WITNESS_V0_KEYHASH; + return TxoutType::WITNESS_V0_KEYHASH; } if (witnessversion == 0 && witnessprogram.size() == WITNESS_V0_SCRIPTHASH_SIZE) { vSolutionsRet.push_back(witnessprogram); - return TX_WITNESS_V0_SCRIPTHASH; + return TxoutType::WITNESS_V0_SCRIPTHASH; } if (witnessversion != 0) { vSolutionsRet.push_back(std::vector<unsigned char>{(unsigned char)witnessversion}); vSolutionsRet.push_back(std::move(witnessprogram)); - return TX_WITNESS_UNKNOWN; + return TxoutType::WITNESS_UNKNOWN; } - return TX_NONSTANDARD; + return TxoutType::NONSTANDARD; } // Provably prunable, data-carrying output @@ -144,18 +144,18 @@ txnouttype Solver(const CScript& scriptPubKey, std::vector<std::vector<unsigned // byte passes the IsPushOnly() test we don't care what exactly is in the // script. if (scriptPubKey.size() >= 1 && scriptPubKey[0] == OP_RETURN && scriptPubKey.IsPushOnly(scriptPubKey.begin()+1)) { - return TX_NULL_DATA; + return TxoutType::NULL_DATA; } std::vector<unsigned char> data; if (MatchPayToPubkey(scriptPubKey, data)) { vSolutionsRet.push_back(std::move(data)); - return TX_PUBKEY; + return TxoutType::PUBKEY; } if (MatchPayToPubkeyHash(scriptPubKey, data)) { vSolutionsRet.push_back(std::move(data)); - return TX_PUBKEYHASH; + return TxoutType::PUBKEYHASH; } unsigned int required; @@ -164,19 +164,19 @@ txnouttype Solver(const CScript& scriptPubKey, std::vector<std::vector<unsigned vSolutionsRet.push_back({static_cast<unsigned char>(required)}); // safe as required is in range 1..16 vSolutionsRet.insert(vSolutionsRet.end(), keys.begin(), keys.end()); vSolutionsRet.push_back({static_cast<unsigned char>(keys.size())}); // safe as size is in range 1..16 - return TX_MULTISIG; + return TxoutType::MULTISIG; } vSolutionsRet.clear(); - return TX_NONSTANDARD; + return TxoutType::NONSTANDARD; } bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet) { std::vector<valtype> vSolutions; - txnouttype whichType = Solver(scriptPubKey, vSolutions); + TxoutType whichType = Solver(scriptPubKey, vSolutions); - if (whichType == TX_PUBKEY) { + if (whichType == TxoutType::PUBKEY) { CPubKey pubKey(vSolutions[0]); if (!pubKey.IsValid()) return false; @@ -184,26 +184,26 @@ bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet) addressRet = PKHash(pubKey); return true; } - else if (whichType == TX_PUBKEYHASH) + else if (whichType == TxoutType::PUBKEYHASH) { addressRet = PKHash(uint160(vSolutions[0])); return true; } - else if (whichType == TX_SCRIPTHASH) + else if (whichType == TxoutType::SCRIPTHASH) { addressRet = ScriptHash(uint160(vSolutions[0])); return true; - } else if (whichType == TX_WITNESS_V0_KEYHASH) { + } else if (whichType == TxoutType::WITNESS_V0_KEYHASH) { WitnessV0KeyHash hash; std::copy(vSolutions[0].begin(), vSolutions[0].end(), hash.begin()); addressRet = hash; return true; - } else if (whichType == TX_WITNESS_V0_SCRIPTHASH) { + } else if (whichType == TxoutType::WITNESS_V0_SCRIPTHASH) { WitnessV0ScriptHash hash; std::copy(vSolutions[0].begin(), vSolutions[0].end(), hash.begin()); addressRet = hash; return true; - } else if (whichType == TX_WITNESS_UNKNOWN) { + } else if (whichType == TxoutType::WITNESS_UNKNOWN) { WitnessUnknown unk; unk.version = vSolutions[0][0]; std::copy(vSolutions[1].begin(), vSolutions[1].end(), unk.program); @@ -215,19 +215,19 @@ bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet) return false; } -bool ExtractDestinations(const CScript& scriptPubKey, txnouttype& typeRet, std::vector<CTxDestination>& addressRet, int& nRequiredRet) +bool ExtractDestinations(const CScript& scriptPubKey, TxoutType& typeRet, std::vector<CTxDestination>& addressRet, int& nRequiredRet) { addressRet.clear(); std::vector<valtype> vSolutions; typeRet = Solver(scriptPubKey, vSolutions); - if (typeRet == TX_NONSTANDARD) { + if (typeRet == TxoutType::NONSTANDARD) { return false; - } else if (typeRet == TX_NULL_DATA) { + } else if (typeRet == TxoutType::NULL_DATA) { // This is data, not addresses return false; } - if (typeRet == TX_MULTISIG) + if (typeRet == TxoutType::MULTISIG) { nRequiredRet = vSolutions.front()[0]; for (unsigned int i = 1; i < vSolutions.size()-1; i++) @@ -290,14 +290,11 @@ public: return CScript() << CScript::EncodeOP_N(id.version) << std::vector<unsigned char>(id.program, id.program + id.length); } }; - -const CScriptVisitor g_script_visitor; - } // namespace CScript GetScriptForDestination(const CTxDestination& dest) { - return boost::apply_visitor(::g_script_visitor, dest); + return boost::apply_visitor(CScriptVisitor(), dest); } CScript GetScriptForRawPubKey(const CPubKey& pubKey) @@ -319,10 +316,10 @@ CScript GetScriptForMultisig(int nRequired, const std::vector<CPubKey>& keys) CScript GetScriptForWitness(const CScript& redeemscript) { std::vector<std::vector<unsigned char> > vSolutions; - txnouttype typ = Solver(redeemscript, vSolutions); - if (typ == TX_PUBKEY) { - return GetScriptForDestination(WitnessV0KeyHash(Hash160(vSolutions[0].begin(), vSolutions[0].end()))); - } else if (typ == TX_PUBKEYHASH) { + TxoutType typ = Solver(redeemscript, vSolutions); + if (typ == TxoutType::PUBKEY) { + return GetScriptForDestination(WitnessV0KeyHash(Hash160(vSolutions[0]))); + } else if (typ == TxoutType::PUBKEYHASH) { return GetScriptForDestination(WitnessV0KeyHash(uint160{vSolutions[0]})); } return GetScriptForDestination(WitnessV0ScriptHash(redeemscript)); diff --git a/src/script/standard.h b/src/script/standard.h index 4baed6da6e..992e37675f 100644 --- a/src/script/standard.h +++ b/src/script/standard.h @@ -79,6 +79,9 @@ public: { return m_hash.size(); } + + unsigned char* data() { return m_hash.data(); } + const unsigned char* data() const { return m_hash.data(); } }; /** A reference to a CScript: the Hash160 of its serialization (see script.h) */ @@ -99,11 +102,11 @@ static const unsigned int MAX_OP_RETURN_RELAY = 83; /** * A data carrying output is an unspendable output containing data. The script - * type is designated as TX_NULL_DATA. + * type is designated as TxoutType::NULL_DATA. */ extern bool fAcceptDatacarrier; -/** Maximum size of TX_NULL_DATA scripts that this node considers standard. */ +/** Maximum size of TxoutType::NULL_DATA scripts that this node considers standard. */ extern unsigned nMaxDatacarrierBytes; /** @@ -116,18 +119,17 @@ extern unsigned nMaxDatacarrierBytes; */ static const unsigned int MANDATORY_SCRIPT_VERIFY_FLAGS = SCRIPT_VERIFY_P2SH; -enum txnouttype -{ - TX_NONSTANDARD, +enum class TxoutType { + NONSTANDARD, // 'standard' transaction types: - TX_PUBKEY, - TX_PUBKEYHASH, - TX_SCRIPTHASH, - TX_MULTISIG, - TX_NULL_DATA, //!< unspendable OP_RETURN script that carries data - TX_WITNESS_V0_SCRIPTHASH, - TX_WITNESS_V0_KEYHASH, - TX_WITNESS_UNKNOWN, //!< Only for Witness versions not already defined above + PUBKEY, + PUBKEYHASH, + SCRIPTHASH, + MULTISIG, + NULL_DATA, //!< unspendable OP_RETURN script that carries data + WITNESS_V0_SCRIPTHASH, + WITNESS_V0_KEYHASH, + WITNESS_UNKNOWN, //!< Only for Witness versions not already defined above }; class CNoDestination { @@ -200,11 +202,11 @@ struct WitnessUnknown /** * A txout script template with a specific destination. It is either: * * CNoDestination: no destination set - * * PKHash: TX_PUBKEYHASH destination (P2PKH) - * * ScriptHash: TX_SCRIPTHASH destination (P2SH) - * * WitnessV0ScriptHash: TX_WITNESS_V0_SCRIPTHASH destination (P2WSH) - * * WitnessV0KeyHash: TX_WITNESS_V0_KEYHASH destination (P2WPKH) - * * WitnessUnknown: TX_WITNESS_UNKNOWN destination (P2W???) + * * PKHash: TxoutType::PUBKEYHASH destination (P2PKH) + * * ScriptHash: TxoutType::SCRIPTHASH destination (P2SH) + * * WitnessV0ScriptHash: TxoutType::WITNESS_V0_SCRIPTHASH destination (P2WSH) + * * WitnessV0KeyHash: TxoutType::WITNESS_V0_KEYHASH destination (P2WPKH) + * * WitnessUnknown: TxoutType::WITNESS_UNKNOWN destination (P2W???) * A CTxDestination is the internal data type encoded in a bitcoin address */ typedef boost::variant<CNoDestination, PKHash, ScriptHash, WitnessV0ScriptHash, WitnessV0KeyHash, WitnessUnknown> CTxDestination; @@ -212,8 +214,8 @@ typedef boost::variant<CNoDestination, PKHash, ScriptHash, WitnessV0ScriptHash, /** Check whether a CTxDestination is a CNoDestination. */ bool IsValidDestination(const CTxDestination& dest); -/** Get the name of a txnouttype as a C string, or nullptr if unknown. */ -std::string GetTxnOutputType(txnouttype t); +/** Get the name of a TxoutType as a string */ +std::string GetTxnOutputType(TxoutType t); /** * Parse a scriptPubKey and identify script type for standard scripts. If @@ -223,9 +225,9 @@ std::string GetTxnOutputType(txnouttype t); * * @param[in] scriptPubKey Script to parse * @param[out] vSolutionsRet Vector of parsed pubkeys and hashes - * @return The script type. TX_NONSTANDARD represents a failed solve. + * @return The script type. TxoutType::NONSTANDARD represents a failed solve. */ -txnouttype Solver(const CScript& scriptPubKey, std::vector<std::vector<unsigned char>>& vSolutionsRet); +TxoutType Solver(const CScript& scriptPubKey, std::vector<std::vector<unsigned char>>& vSolutionsRet); /** * Parse a standard scriptPubKey for the destination address. Assigns result to @@ -246,7 +248,7 @@ bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet) * encodable as an address) with key identifiers (of keys involved in a * CScript), and its use should be phased out. */ -bool ExtractDestinations(const CScript& scriptPubKey, txnouttype& typeRet, std::vector<CTxDestination>& addressRet, int& nRequiredRet); +bool ExtractDestinations(const CScript& scriptPubKey, TxoutType& typeRet, std::vector<CTxDestination>& addressRet, int& nRequiredRet); /** * Generate a Bitcoin scriptPubKey for the given CTxDestination. Returns a P2PKH diff --git a/src/serialize.h b/src/serialize.h index 71c2cfa164..7a94e704b2 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -9,13 +9,13 @@ #include <compat/endian.h> #include <algorithm> +#include <cstdint> #include <cstring> #include <ios> #include <limits> #include <map> #include <memory> #include <set> -#include <stdint.h> #include <string> #include <string.h> #include <utility> @@ -272,7 +272,7 @@ template<typename Stream> inline void Unserialize(Stream& s, bool& a) { char f=s inline unsigned int GetSizeOfCompactSize(uint64_t nSize) { if (nSize < 253) return sizeof(unsigned char); - else if (nSize <= std::numeric_limits<unsigned short>::max()) return sizeof(unsigned char) + sizeof(unsigned short); + else if (nSize <= std::numeric_limits<uint16_t>::max()) return sizeof(unsigned char) + sizeof(uint16_t); else if (nSize <= std::numeric_limits<unsigned int>::max()) return sizeof(unsigned char) + sizeof(unsigned int); else return sizeof(unsigned char) + sizeof(uint64_t); } @@ -286,7 +286,7 @@ void WriteCompactSize(Stream& os, uint64_t nSize) { ser_writedata8(os, nSize); } - else if (nSize <= std::numeric_limits<unsigned short>::max()) + else if (nSize <= std::numeric_limits<uint16_t>::max()) { ser_writedata8(os, 253); ser_writedata16(os, nSize); diff --git a/src/span.h b/src/span.h index 4931507719..4afb383a59 100644 --- a/src/span.h +++ b/src/span.h @@ -21,6 +21,62 @@ /** A Span is an object that can refer to a contiguous sequence of objects. * * It implements a subset of C++20's std::span. + * + * Things to be aware of when writing code that deals with Spans: + * + * - Similar to references themselves, Spans are subject to reference lifetime + * issues. The user is responsible for making sure the objects pointed to by + * a Span live as long as the Span is used. For example: + * + * std::vector<int> vec{1,2,3,4}; + * Span<int> sp(vec); + * vec.push_back(5); + * printf("%i\n", sp.front()); // UB! + * + * may exhibit undefined behavior, as increasing the size of a vector may + * invalidate references. + * + * - One particular pitfall is that Spans can be constructed from temporaries, + * but this is unsafe when the Span is stored in a variable, outliving the + * temporary. For example, this will compile, but exhibits undefined behavior: + * + * Span<const int> sp(std::vector<int>{1, 2, 3}); + * printf("%i\n", sp.front()); // UB! + * + * The lifetime of the vector ends when the statement it is created in ends. + * Thus the Span is left with a dangling reference, and using it is undefined. + * + * - Due to Span's automatic creation from range-like objects (arrays, and data + * types that expose a data() and size() member function), functions that + * accept a Span as input parameter can be called with any compatible + * range-like object. For example, this works: +* + * void Foo(Span<const int> arg); + * + * Foo(std::vector<int>{1, 2, 3}); // Works + * + * This is very useful in cases where a function truly does not care about the + * container, and only about having exactly a range of elements. However it + * may also be surprising to see automatic conversions in this case. + * + * When a function accepts a Span with a mutable element type, it will not + * accept temporaries; only variables or other references. For example: + * + * void FooMut(Span<int> arg); + * + * FooMut(std::vector<int>{1, 2, 3}); // Does not compile + * std::vector<int> baz{1, 2, 3}; + * FooMut(baz); // Works + * + * This is similar to how functions that take (non-const) lvalue references + * as input cannot accept temporaries. This does not work either: + * + * void FooVec(std::vector<int>& arg); + * FooVec(std::vector<int>{1, 2, 3}); // Does not compile + * + * The idea is that if a function accepts a mutable reference, a meaningful + * result will be present in that variable after the call. Passing a temporary + * is useless in that context. */ template<typename C> class Span @@ -95,6 +151,7 @@ public: return m_data[m_size - 1]; } constexpr std::size_t size() const noexcept { return m_size; } + constexpr bool empty() const noexcept { return size() == 0; } CONSTEXPR_IF_NOT_DEBUG C& operator[](std::size_t pos) const noexcept { ASSERT_IF_DEBUG(size() > pos); @@ -150,4 +207,16 @@ T& SpanPopBack(Span<T>& span) return back; } +// Helper functions to safely cast to unsigned char pointers. +inline unsigned char* UCharCast(char* c) { return (unsigned char*)c; } +inline unsigned char* UCharCast(unsigned char* c) { return c; } +inline const unsigned char* UCharCast(const char* c) { return (unsigned char*)c; } +inline const unsigned char* UCharCast(const unsigned char* c) { return c; } + +// Helper function to safely convert a Span to a Span<[const] unsigned char>. +template <typename T> constexpr auto UCharSpanCast(Span<T> s) -> Span<typename std::remove_pointer<decltype(UCharCast(s.data()))>::type> { return {UCharCast(s.data()), s.size()}; } + +/** Like MakeSpan, but for (const) unsigned char member types only. Only works for (un)signed char containers. */ +template <typename V> constexpr auto MakeUCharSpan(V&& v) -> decltype(UCharSpanCast(MakeSpan(std::forward<V>(v)))) { return UCharSpanCast(MakeSpan(std::forward<V>(v))); } + #endif diff --git a/src/streams.h b/src/streams.h index e1d1b0eab2..6ce8065da8 100644 --- a/src/streams.h +++ b/src/streams.h @@ -814,18 +814,6 @@ public: return true; } - bool Seek(uint64_t nPos) { - long nLongPos = nPos; - if (nPos != (uint64_t)nLongPos) - return false; - if (fseek(src, nLongPos, SEEK_SET)) - return false; - nLongPos = ftell(src); - nSrcPos = nLongPos; - nReadPos = nLongPos; - return true; - } - //! prevent reading beyond a certain position //! no argument removes the limit bool SetLimit(uint64_t nPos = std::numeric_limits<uint64_t>::max()) { diff --git a/src/sync.cpp b/src/sync.cpp index 9abdedbed4..4be13a3c48 100644 --- a/src/sync.cpp +++ b/src/sync.cpp @@ -60,7 +60,7 @@ struct CLockLocation { std::string ToString() const { return strprintf( - "%s %s:%s%s (in thread %s)", + "'%s' in %s:%s%s (in thread '%s')", mutexName, sourceFile, sourceLine, (fTry ? " (TRY)" : ""), m_thread_name); } @@ -105,7 +105,7 @@ static void potential_deadlock_detected(const LockPair& mismatch, const LockStac { LogPrintf("POTENTIAL DEADLOCK DETECTED\n"); LogPrintf("Previous lock order was:\n"); - for (const LockStackItem& i : s2) { + for (const LockStackItem& i : s1) { if (i.first == mismatch.first) { LogPrintf(" (1)"); /* Continued */ } @@ -114,21 +114,25 @@ static void potential_deadlock_detected(const LockPair& mismatch, const LockStac } LogPrintf(" %s\n", i.second.ToString()); } + + std::string mutex_a, mutex_b; LogPrintf("Current lock order is:\n"); - for (const LockStackItem& i : s1) { + for (const LockStackItem& i : s2) { if (i.first == mismatch.first) { LogPrintf(" (1)"); /* Continued */ + mutex_a = i.second.Name(); } if (i.first == mismatch.second) { LogPrintf(" (2)"); /* Continued */ + mutex_b = i.second.Name(); } LogPrintf(" %s\n", i.second.ToString()); } if (g_debug_lockorder_abort) { - tfm::format(std::cerr, "Assertion failed: detected inconsistent lock order at %s:%i, details in debug log.\n", __FILE__, __LINE__); + tfm::format(std::cerr, "Assertion failed: detected inconsistent lock order for %s, details in debug log.\n", s2.back().second.ToString()); abort(); } - throw std::logic_error("potential deadlock detected"); + throw std::logic_error(strprintf("potential deadlock detected: %s -> %s -> %s", mutex_b, mutex_a, mutex_b)); } static void push_lock(void* c, const CLockLocation& locklocation) @@ -145,12 +149,17 @@ static void push_lock(void* c, const CLockLocation& locklocation) const LockPair p1 = std::make_pair(i.first, c); if (lockdata.lockorders.count(p1)) continue; - lockdata.lockorders.emplace(p1, lock_stack); const LockPair p2 = std::make_pair(c, i.first); + if (lockdata.lockorders.count(p2)) { + auto lock_stack_copy = lock_stack; + lock_stack.pop_back(); + potential_deadlock_detected(p1, lockdata.lockorders[p2], lock_stack_copy); + // potential_deadlock_detected() does not return. + } + + lockdata.lockorders.emplace(p1, lock_stack); lockdata.invlockorders.insert(p2); - if (lockdata.lockorders.count(p2)) - potential_deadlock_detected(p1, lockdata.lockorders[p2], lockdata.lockorders[p1]); } } @@ -255,6 +264,17 @@ void DeleteLock(void* cs) } } +bool LockStackEmpty() +{ + LockData& lockdata = GetLockData(); + std::lock_guard<std::mutex> lock(lockdata.dd_mutex); + const auto it = lockdata.m_lock_stacks.find(std::this_thread::get_id()); + if (it == lockdata.m_lock_stacks.end()) { + return true; + } + return it->second.empty(); +} + bool g_debug_lockorder_abort = true; #endif /* DEBUG_LOCKORDER */ diff --git a/src/sync.h b/src/sync.h index 77327d8bfe..05ff2ee8a9 100644 --- a/src/sync.h +++ b/src/sync.h @@ -56,6 +56,7 @@ template <typename MutexType> void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs); void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs); void DeleteLock(void* cs); +bool LockStackEmpty(); /** * Call abort() if a potential lock order deadlock bug is detected, instead of @@ -64,13 +65,14 @@ void DeleteLock(void* cs); */ extern bool g_debug_lockorder_abort; #else -void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {} -void static inline LeaveCritical() {} -void static inline CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {} +inline void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {} +inline void LeaveCritical() {} +inline void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {} template <typename MutexType> -void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs) {} -void static inline AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {} -void static inline DeleteLock(void* cs) {} +inline void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs) {} +inline void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {} +inline void DeleteLock(void* cs) {} +inline bool LockStackEmpty() { return true; } #endif #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) #define AssertLockNotHeld(cs) AssertLockNotHeldInternal(#cs, __FILE__, __LINE__, &cs) diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp index bc6b38c682..25fdd64568 100644 --- a/src/test/addrman_tests.cpp +++ b/src/test/addrman_tests.cpp @@ -392,7 +392,7 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr) // Test: Sanity check, GetAddr should never return anything if addrman // is empty. BOOST_CHECK_EQUAL(addrman.size(), 0U); - std::vector<CAddress> vAddr1 = addrman.GetAddr(); + std::vector<CAddress> vAddr1 = addrman.GetAddr(/* max_addresses */ 0, /* max_pct */0); BOOST_CHECK_EQUAL(vAddr1.size(), 0U); CAddress addr1 = CAddress(ResolveService("250.250.2.1", 8333), NODE_NONE); @@ -415,13 +415,15 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr) BOOST_CHECK(addrman.Add(addr4, source2)); BOOST_CHECK(addrman.Add(addr5, source1)); - // GetAddr returns 23% of addresses, 23% of 5 is 1 rounded down. - BOOST_CHECK_EQUAL(addrman.GetAddr().size(), 1U); + BOOST_CHECK_EQUAL(addrman.GetAddr(/* max_addresses */ 0, /* max_pct */ 0).size(), 5U); + // Net processing asks for 23% of addresses. 23% of 5 is 1 rounded down. + BOOST_CHECK_EQUAL(addrman.GetAddr(/* max_addresses */ 2500, /* max_pct */ 23).size(), 1U); // Test: Ensure GetAddr works with new and tried addresses. addrman.Good(CAddress(addr1, NODE_NONE)); addrman.Good(CAddress(addr2, NODE_NONE)); - BOOST_CHECK_EQUAL(addrman.GetAddr().size(), 1U); + BOOST_CHECK_EQUAL(addrman.GetAddr(/* max_addresses */ 0, /* max_pct */ 0).size(), 5U); + BOOST_CHECK_EQUAL(addrman.GetAddr(/* max_addresses */ 2500, /* max_pct */ 23).size(), 1U); // Test: Ensure GetAddr still returns 23% when addrman has many addrs. for (unsigned int i = 1; i < (8 * 256); i++) { @@ -436,7 +438,7 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr) if (i % 8 == 0) addrman.Good(addr); } - std::vector<CAddress> vAddr = addrman.GetAddr(); + std::vector<CAddress> vAddr = addrman.GetAddr(/* max_addresses */ 2500, /* max_pct */ 23); size_t percent23 = (addrman.size() * 23) / 100; BOOST_CHECK_EQUAL(vAddr.size(), percent23); diff --git a/src/test/blockfilter_index_tests.cpp b/src/test/blockfilter_index_tests.cpp index 7dff2e6e86..00c4bdc14e 100644 --- a/src/test/blockfilter_index_tests.cpp +++ b/src/test/blockfilter_index_tests.cpp @@ -94,7 +94,7 @@ bool BuildChainTestingSetup::BuildChain(const CBlockIndex* pindex, CBlockHeader header = block->GetBlockHeader(); BlockValidationState state; - if (!EnsureChainman(m_node).ProcessNewBlockHeaders({header}, state, Params(), &pindex)) { + if (!Assert(m_node.chainman)->ProcessNewBlockHeaders({header}, state, Params(), &pindex)) { return false; } } @@ -171,7 +171,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) uint256 chainA_last_header = last_header; for (size_t i = 0; i < 2; i++) { const auto& block = chainA[i]; - BOOST_REQUIRE(EnsureChainman(m_node).ProcessNewBlock(Params(), block, true, nullptr)); + BOOST_REQUIRE(Assert(m_node.chainman)->ProcessNewBlock(Params(), block, true, nullptr)); } for (size_t i = 0; i < 2; i++) { const auto& block = chainA[i]; @@ -189,7 +189,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) uint256 chainB_last_header = last_header; for (size_t i = 0; i < 3; i++) { const auto& block = chainB[i]; - BOOST_REQUIRE(EnsureChainman(m_node).ProcessNewBlock(Params(), block, true, nullptr)); + BOOST_REQUIRE(Assert(m_node.chainman)->ProcessNewBlock(Params(), block, true, nullptr)); } for (size_t i = 0; i < 3; i++) { const auto& block = chainB[i]; @@ -220,7 +220,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) // Reorg back to chain A. for (size_t i = 2; i < 4; i++) { const auto& block = chainA[i]; - BOOST_REQUIRE(EnsureChainman(m_node).ProcessNewBlock(Params(), block, true, nullptr)); + BOOST_REQUIRE(Assert(m_node.chainman)->ProcessNewBlock(Params(), block, true, nullptr)); } // Check that chain A and B blocks can be retrieved. diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp index f64251fe32..b3cc8cefd9 100644 --- a/src/test/crypto_tests.cpp +++ b/src/test/crypto_tests.cpp @@ -183,7 +183,7 @@ static void TestHKDF_SHA256_32(const std::string &ikm_hex, const std::string &sa CHKDF_HMAC_SHA256_L32 hkdf32(initial_key_material.data(), initial_key_material.size(), salt_stringified); unsigned char out[32]; hkdf32.Expand32(info_stringified, out); - BOOST_CHECK(HexStr(out, out + 32) == okm_check_hex); + BOOST_CHECK(HexStr(out) == okm_check_hex); } static std::string LongTestString() @@ -743,7 +743,7 @@ BOOST_AUTO_TEST_CASE(sha256d64) in[j] = InsecureRandBits(8); } for (int j = 0; j < i; ++j) { - CHash256().Write(in + 64 * j, 64).Finalize(out1 + 32 * j); + CHash256().Write({in + 64 * j, 64}).Finalize({out1 + 32 * j, 32}); } SHA256D64(out2, in, i); BOOST_CHECK(memcmp(out1, out2, 32 * i) == 0); diff --git a/src/test/data/script_tests.json b/src/test/data/script_tests.json index c01ef307b7..724789bbf9 100644 --- a/src/test/data/script_tests.json +++ b/src/test/data/script_tests.json @@ -678,7 +678,7 @@ ["0 0x02 0x0000 0", "CHECKMULTISIGVERIFY 1", "", "OK"], ["While not really correctly DER encoded, the empty signature is allowed by"], -["STRICTENC to provide a compact way to provide a delibrately invalid signature."], +["STRICTENC to provide a compact way to provide a deliberately invalid signature."], ["0", "0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 CHECKSIG NOT", "STRICTENC", "OK"], ["0 0", "1 0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 1 CHECKMULTISIG NOT", "STRICTENC", "OK"], diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp index 348b170536..0115803e58 100644 --- a/src/test/denialofservice_tests.cpp +++ b/src/test/denialofservice_tests.cpp @@ -4,10 +4,12 @@ // Unit tests for denial-of-service detection/prevention code +#include <arith_uint256.h> #include <banman.h> #include <chainparams.h> #include <net.h> #include <net_processing.h> +#include <pubkey.h> #include <script/sign.h> #include <script/signingprovider.h> #include <script/standard.h> @@ -82,7 +84,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) // Mock an outbound peer CAddress addr1(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", /*fInboundIn=*/ false); + CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::OUTBOUND); dummyNode1.SetSendVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode1); @@ -98,11 +100,11 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) // Test starts here { - LOCK2(cs_main, dummyNode1.cs_sendProcessing); + LOCK(dummyNode1.cs_sendProcessing); BOOST_CHECK(peerLogic->SendMessages(&dummyNode1)); // should result in getheaders } { - LOCK2(cs_main, dummyNode1.cs_vSend); + LOCK(dummyNode1.cs_vSend); BOOST_CHECK(dummyNode1.vSendMsg.size() > 0); dummyNode1.vSendMsg.clear(); } @@ -111,17 +113,17 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) // Wait 21 minutes SetMockTime(nStartTime+21*60); { - LOCK2(cs_main, dummyNode1.cs_sendProcessing); + LOCK(dummyNode1.cs_sendProcessing); BOOST_CHECK(peerLogic->SendMessages(&dummyNode1)); // should result in getheaders } { - LOCK2(cs_main, dummyNode1.cs_vSend); + LOCK(dummyNode1.cs_vSend); BOOST_CHECK(dummyNode1.vSendMsg.size() > 0); } // Wait 3 more minutes SetMockTime(nStartTime+24*60); { - LOCK2(cs_main, dummyNode1.cs_sendProcessing); + LOCK(dummyNode1.cs_sendProcessing); BOOST_CHECK(peerLogic->SendMessages(&dummyNode1)); // should result in disconnect } BOOST_CHECK(dummyNode1.fDisconnect == true); @@ -134,7 +136,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) static void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerLogicValidation &peerLogic, CConnmanTest* connman) { CAddress addr(ip(g_insecure_rand_ctx.randbits(32)), NODE_NONE); - vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr, 0, 0, CAddress(), "", /*fInboundIn=*/ false)); + vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr, 0, 0, CAddress(), "", ConnectionType::OUTBOUND)); CNode &node = *vNodes.back(); node.SetSendVersion(PROTOCOL_VERSION); @@ -217,7 +219,7 @@ BOOST_AUTO_TEST_CASE(stale_tip_peer_management) connman->ClearNodes(); } -BOOST_AUTO_TEST_CASE(DoS_banning) +BOOST_AUTO_TEST_CASE(peer_discouragement) { auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME); auto connman = MakeUnique<CConnman>(0x1337, 0x1337); @@ -225,100 +227,54 @@ BOOST_AUTO_TEST_CASE(DoS_banning) banman->ClearBanned(); CAddress addr1(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", true); + CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::INBOUND); dummyNode1.SetSendVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode1); dummyNode1.nVersion = 1; dummyNode1.fSuccessfullyConnected = true; { LOCK(cs_main); - Misbehaving(dummyNode1.GetId(), 100); // Should get banned + Misbehaving(dummyNode1.GetId(), DISCOURAGEMENT_THRESHOLD); // Should be discouraged } { - LOCK2(cs_main, dummyNode1.cs_sendProcessing); + LOCK(dummyNode1.cs_sendProcessing); BOOST_CHECK(peerLogic->SendMessages(&dummyNode1)); } - BOOST_CHECK(banman->IsBanned(addr1)); - BOOST_CHECK(!banman->IsBanned(ip(0xa0b0c001|0x0000ff00))); // Different IP, not banned + BOOST_CHECK(banman->IsDiscouraged(addr1)); + BOOST_CHECK(!banman->IsDiscouraged(ip(0xa0b0c001|0x0000ff00))); // Different IP, not discouraged CAddress addr2(ip(0xa0b0c002), NODE_NONE); - CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, 1, CAddress(), "", true); + CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, 1, CAddress(), "", ConnectionType::INBOUND); dummyNode2.SetSendVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode2); dummyNode2.nVersion = 1; dummyNode2.fSuccessfullyConnected = true; { LOCK(cs_main); - Misbehaving(dummyNode2.GetId(), 50); + Misbehaving(dummyNode2.GetId(), DISCOURAGEMENT_THRESHOLD - 1); } { - LOCK2(cs_main, dummyNode2.cs_sendProcessing); + LOCK(dummyNode2.cs_sendProcessing); BOOST_CHECK(peerLogic->SendMessages(&dummyNode2)); } - BOOST_CHECK(!banman->IsBanned(addr2)); // 2 not banned yet... - BOOST_CHECK(banman->IsBanned(addr1)); // ... but 1 still should be + BOOST_CHECK(!banman->IsDiscouraged(addr2)); // 2 not discouraged yet... + BOOST_CHECK(banman->IsDiscouraged(addr1)); // ... but 1 still should be { LOCK(cs_main); - Misbehaving(dummyNode2.GetId(), 50); + Misbehaving(dummyNode2.GetId(), 1); // 2 reaches discouragement threshold } { - LOCK2(cs_main, dummyNode2.cs_sendProcessing); + LOCK(dummyNode2.cs_sendProcessing); BOOST_CHECK(peerLogic->SendMessages(&dummyNode2)); } - BOOST_CHECK(banman->IsBanned(addr2)); + BOOST_CHECK(banman->IsDiscouraged(addr1)); // Expect both 1 and 2 + BOOST_CHECK(banman->IsDiscouraged(addr2)); // to be discouraged now bool dummy; peerLogic->FinalizeNode(dummyNode1.GetId(), dummy); peerLogic->FinalizeNode(dummyNode2.GetId(), dummy); } -BOOST_AUTO_TEST_CASE(DoS_banscore) -{ - auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME); - auto connman = MakeUnique<CConnman>(0x1337, 0x1337); - auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool); - - banman->ClearBanned(); - gArgs.ForceSetArg("-banscore", "111"); // because 11 is my favorite number - CAddress addr1(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 3, 1, CAddress(), "", true); - dummyNode1.SetSendVersion(PROTOCOL_VERSION); - peerLogic->InitializeNode(&dummyNode1); - dummyNode1.nVersion = 1; - dummyNode1.fSuccessfullyConnected = true; - { - LOCK(cs_main); - Misbehaving(dummyNode1.GetId(), 100); - } - { - LOCK2(cs_main, dummyNode1.cs_sendProcessing); - BOOST_CHECK(peerLogic->SendMessages(&dummyNode1)); - } - BOOST_CHECK(!banman->IsBanned(addr1)); - { - LOCK(cs_main); - Misbehaving(dummyNode1.GetId(), 10); - } - { - LOCK2(cs_main, dummyNode1.cs_sendProcessing); - BOOST_CHECK(peerLogic->SendMessages(&dummyNode1)); - } - BOOST_CHECK(!banman->IsBanned(addr1)); - { - LOCK(cs_main); - Misbehaving(dummyNode1.GetId(), 1); - } - { - LOCK2(cs_main, dummyNode1.cs_sendProcessing); - BOOST_CHECK(peerLogic->SendMessages(&dummyNode1)); - } - BOOST_CHECK(banman->IsBanned(addr1)); - gArgs.ForceSetArg("-banscore", ToString(DEFAULT_BANSCORE_THRESHOLD)); - - bool dummy; - peerLogic->FinalizeNode(dummyNode1.GetId(), dummy); -} - BOOST_AUTO_TEST_CASE(DoS_bantime) { auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME); @@ -330,7 +286,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) SetMockTime(nStartTime); // Overrides future calls to GetTime() CAddress addr(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, 4, CAddress(), "", true); + CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, 4, CAddress(), "", ConnectionType::INBOUND); dummyNode.SetSendVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode); dummyNode.nVersion = 1; @@ -338,19 +294,13 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) { LOCK(cs_main); - Misbehaving(dummyNode.GetId(), 100); + Misbehaving(dummyNode.GetId(), DISCOURAGEMENT_THRESHOLD); } { - LOCK2(cs_main, dummyNode.cs_sendProcessing); + LOCK(dummyNode.cs_sendProcessing); BOOST_CHECK(peerLogic->SendMessages(&dummyNode)); } - BOOST_CHECK(banman->IsBanned(addr)); - - SetMockTime(nStartTime+60*60); - BOOST_CHECK(banman->IsBanned(addr)); - - SetMockTime(nStartTime+60*60*24+1); - BOOST_CHECK(!banman->IsBanned(addr)); + BOOST_CHECK(banman->IsDiscouraged(addr)); bool dummy; peerLogic->FinalizeNode(dummyNode.GetId(), dummy); @@ -366,10 +316,26 @@ static CTransactionRef RandomOrphan() return it->second.tx; } +static void MakeNewKeyWithFastRandomContext(CKey& key) +{ + std::vector<unsigned char> keydata; + keydata = g_insecure_rand_ctx.randbytes(32); + key.Set(keydata.data(), keydata.data() + keydata.size(), /*fCompressedIn*/ true); + assert(key.IsValid()); +} + BOOST_AUTO_TEST_CASE(DoS_mapOrphans) { + // This test had non-deterministic coverage due to + // randomly selected seeds. + // This seed is chosen so that all branches of the function + // ecdsa_signature_parse_der_lax are executed during this test. + // Specifically branches that run only when an ECDSA + // signature's R and S values have leading zeros. + g_insecure_rand_ctx = FastRandomContext(ArithToUint256(arith_uint256(33))); + CKey key; - key.MakeNewKey(true); + MakeNewKeyWithFastRandomContext(key); FillableSigningProvider keystore; BOOST_CHECK(keystore.AddKey(key)); diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp index 5d7065dafb..20132d5782 100644 --- a/src/test/descriptor_tests.cpp +++ b/src/test/descriptor_tests.cpp @@ -135,7 +135,7 @@ void DoCheck(const std::string& prv, const std::string& pub, int flags, const st // When the descriptor is hardened, evaluate with access to the private keys inside. const FlatSigningProvider& key_provider = (flags & HARDENED) ? keys_priv : keys_pub; - // Evaluate the descriptor selected by `t` in poisition `i`. + // Evaluate the descriptor selected by `t` in position `i`. FlatSigningProvider script_provider, script_provider_cached; std::vector<CScript> spks, spks_cached; DescriptorCache desc_cache; diff --git a/src/test/fuzz/addrdb.cpp b/src/test/fuzz/addrdb.cpp index 524cea83fe..16b1cb755a 100644 --- a/src/test/fuzz/addrdb.cpp +++ b/src/test/fuzz/addrdb.cpp @@ -17,19 +17,13 @@ void test_one_input(const std::vector<uint8_t>& buffer) { FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + // The point of this code is to exercise all CBanEntry constructors. const CBanEntry ban_entry = [&] { - switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 3)) { + switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 2)) { case 0: return CBanEntry{fuzzed_data_provider.ConsumeIntegral<int64_t>()}; break; - case 1: - return CBanEntry{fuzzed_data_provider.ConsumeIntegral<int64_t>(), fuzzed_data_provider.PickValueInArray<BanReason>({ - BanReason::BanReasonUnknown, - BanReason::BanReasonNodeMisbehaving, - BanReason::BanReasonManuallyAdded, - })}; - break; - case 2: { + case 1: { const std::optional<CBanEntry> ban_entry = ConsumeDeserializable<CBanEntry>(fuzzed_data_provider); if (ban_entry) { return *ban_entry; @@ -39,5 +33,5 @@ void test_one_input(const std::vector<uint8_t>& buffer) } return CBanEntry{}; }(); - assert(!ban_entry.banReasonToString().empty()); + (void)ban_entry; // currently unused } diff --git a/src/test/fuzz/autofile.cpp b/src/test/fuzz/autofile.cpp new file mode 100644 index 0000000000..7ea0bdd2a7 --- /dev/null +++ b/src/test/fuzz/autofile.cpp @@ -0,0 +1,72 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <optional.h> +#include <streams.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> + +#include <array> +#include <cstdint> +#include <iostream> +#include <optional> +#include <string> +#include <vector> + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; + FuzzedAutoFileProvider fuzzed_auto_file_provider = ConsumeAutoFile(fuzzed_data_provider); + CAutoFile auto_file = fuzzed_auto_file_provider.open(); + while (fuzzed_data_provider.ConsumeBool()) { + switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 5)) { + case 0: { + std::array<uint8_t, 4096> arr{}; + try { + auto_file.read((char*)arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096)); + } catch (const std::ios_base::failure&) { + } + break; + } + case 1: { + const std::array<uint8_t, 4096> arr{}; + try { + auto_file.write((const char*)arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096)); + } catch (const std::ios_base::failure&) { + } + break; + } + case 2: { + try { + auto_file.ignore(fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096)); + } catch (const std::ios_base::failure&) { + } + break; + } + case 3: { + auto_file.fclose(); + break; + } + case 4: { + ReadFromStream(fuzzed_data_provider, auto_file); + break; + } + case 5: { + WriteToStream(fuzzed_data_provider, auto_file); + break; + } + } + } + (void)auto_file.Get(); + (void)auto_file.GetType(); + (void)auto_file.GetVersion(); + (void)auto_file.IsNull(); + if (fuzzed_data_provider.ConsumeBool()) { + FILE* f = auto_file.release(); + if (f != nullptr) { + fclose(f); + } + } +} diff --git a/src/test/fuzz/banman.cpp b/src/test/fuzz/banman.cpp new file mode 100644 index 0000000000..fc4a1d9261 --- /dev/null +++ b/src/test/fuzz/banman.cpp @@ -0,0 +1,88 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <banman.h> +#include <fs.h> +#include <netaddress.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> +#include <util/system.h> + +#include <cstdint> +#include <limits> +#include <string> +#include <vector> + +namespace { +int64_t ConsumeBanTimeOffset(FuzzedDataProvider& fuzzed_data_provider) noexcept +{ + // Avoid signed integer overflow by capping to int32_t max: + // banman.cpp:137:73: runtime error: signed integer overflow: 1591700817 + 9223372036854775807 cannot be represented in type 'long' + return fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(std::numeric_limits<int64_t>::min(), std::numeric_limits<int32_t>::max()); +} +} // namespace + +void initialize() +{ + InitializeFuzzingContext(); +} + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; + const fs::path banlist_file = GetDataDir() / "fuzzed_banlist.dat"; + fs::remove(banlist_file); + { + BanMan ban_man{banlist_file, nullptr, ConsumeBanTimeOffset(fuzzed_data_provider)}; + while (fuzzed_data_provider.ConsumeBool()) { + switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 11)) { + case 0: { + ban_man.Ban(ConsumeNetAddr(fuzzed_data_provider), + ConsumeBanTimeOffset(fuzzed_data_provider), fuzzed_data_provider.ConsumeBool()); + break; + } + case 1: { + ban_man.Ban(ConsumeSubNet(fuzzed_data_provider), + ConsumeBanTimeOffset(fuzzed_data_provider), fuzzed_data_provider.ConsumeBool()); + break; + } + case 2: { + ban_man.ClearBanned(); + break; + } + case 4: { + ban_man.IsBanned(ConsumeNetAddr(fuzzed_data_provider)); + break; + } + case 5: { + ban_man.IsBanned(ConsumeSubNet(fuzzed_data_provider)); + break; + } + case 6: { + ban_man.Unban(ConsumeNetAddr(fuzzed_data_provider)); + break; + } + case 7: { + ban_man.Unban(ConsumeSubNet(fuzzed_data_provider)); + break; + } + case 8: { + banmap_t banmap; + ban_man.GetBanned(banmap); + break; + } + case 9: { + ban_man.DumpBanlist(); + break; + } + case 11: { + ban_man.Discourage(ConsumeNetAddr(fuzzed_data_provider)); + break; + } + } + } + } + fs::remove(banlist_file); +} diff --git a/src/test/fuzz/buffered_file.cpp b/src/test/fuzz/buffered_file.cpp new file mode 100644 index 0000000000..e575640be5 --- /dev/null +++ b/src/test/fuzz/buffered_file.cpp @@ -0,0 +1,74 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <optional.h> +#include <streams.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> + +#include <array> +#include <cstdint> +#include <iostream> +#include <optional> +#include <string> +#include <vector> + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; + FuzzedFileProvider fuzzed_file_provider = ConsumeFile(fuzzed_data_provider); + std::optional<CBufferedFile> opt_buffered_file; + FILE* fuzzed_file = fuzzed_file_provider.open(); + try { + opt_buffered_file.emplace(fuzzed_file, fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096), fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096), fuzzed_data_provider.ConsumeIntegral<int>(), fuzzed_data_provider.ConsumeIntegral<int>()); + } catch (const std::ios_base::failure&) { + if (fuzzed_file != nullptr) { + fclose(fuzzed_file); + } + } + if (opt_buffered_file && fuzzed_file != nullptr) { + bool setpos_fail = false; + while (fuzzed_data_provider.ConsumeBool()) { + switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 4)) { + case 0: { + std::array<uint8_t, 4096> arr{}; + try { + opt_buffered_file->read((char*)arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096)); + } catch (const std::ios_base::failure&) { + } + break; + } + case 1: { + opt_buffered_file->SetLimit(fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096)); + break; + } + case 2: { + if (!opt_buffered_file->SetPos(fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096))) { + setpos_fail = true; + } + break; + } + case 3: { + if (setpos_fail) { + // Calling FindByte(...) after a failed SetPos(...) call may result in an infinite loop. + break; + } + try { + opt_buffered_file->FindByte(fuzzed_data_provider.ConsumeIntegral<char>()); + } catch (const std::ios_base::failure&) { + } + break; + } + case 4: { + ReadFromStream(fuzzed_data_provider, *opt_buffered_file); + break; + } + } + } + opt_buffered_file->GetPos(); + opt_buffered_file->GetType(); + opt_buffered_file->GetVersion(); + } +} diff --git a/src/test/fuzz/coins_view.cpp b/src/test/fuzz/coins_view.cpp index 52dd62a145..c186bef7ae 100644 --- a/src/test/fuzz/coins_view.cpp +++ b/src/test/fuzz/coins_view.cpp @@ -278,7 +278,7 @@ void test_one_input(const std::vector<uint8_t>& buffer) CCoinsStats stats; bool expected_code_path = false; try { - (void)GetUTXOStats(&coins_view_cache, stats); + (void)GetUTXOStats(&coins_view_cache, stats, CoinStatsHashType::HASH_SERIALIZED); } catch (const std::logic_error&) { expected_code_path = true; } diff --git a/src/test/fuzz/crypto.cpp b/src/test/fuzz/crypto.cpp index 595cdf9abb..3edcf96495 100644 --- a/src/test/fuzz/crypto.cpp +++ b/src/test/fuzz/crypto.cpp @@ -44,8 +44,8 @@ void test_one_input(const std::vector<uint8_t>& buffer) } } - (void)hash160.Write(data.data(), data.size()); - (void)hash256.Write(data.data(), data.size()); + (void)hash160.Write(data); + (void)hash256.Write(data); (void)hmac_sha256.Write(data.data(), data.size()); (void)hmac_sha512.Write(data.data(), data.size()); (void)ripemd160.Write(data.data(), data.size()); @@ -54,9 +54,8 @@ void test_one_input(const std::vector<uint8_t>& buffer) (void)sha512.Write(data.data(), data.size()); (void)sip_hasher.Write(data.data(), data.size()); - (void)Hash(data.begin(), data.end()); + (void)Hash(data); (void)Hash160(data); - (void)Hash160(data.begin(), data.end()); (void)sha512.Size(); break; } @@ -73,12 +72,12 @@ void test_one_input(const std::vector<uint8_t>& buffer) switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 8)) { case 0: { data.resize(CHash160::OUTPUT_SIZE); - hash160.Finalize(data.data()); + hash160.Finalize(data); break; } case 1: { data.resize(CHash256::OUTPUT_SIZE); - hash256.Finalize(data.data()); + hash256.Finalize(data); break; } case 2: { diff --git a/src/test/fuzz/crypto_aes256.cpp b/src/test/fuzz/crypto_aes256.cpp new file mode 100644 index 0000000000..ae14073c96 --- /dev/null +++ b/src/test/fuzz/crypto_aes256.cpp @@ -0,0 +1,30 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <crypto/aes.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> + +#include <cassert> +#include <cstdint> +#include <vector> + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; + const std::vector<uint8_t> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, AES256_KEYSIZE); + + AES256Encrypt encrypt{key.data()}; + AES256Decrypt decrypt{key.data()}; + + while (fuzzed_data_provider.ConsumeBool()) { + const std::vector<uint8_t> plaintext = ConsumeFixedLengthByteVector(fuzzed_data_provider, AES_BLOCKSIZE); + std::vector<uint8_t> ciphertext(AES_BLOCKSIZE); + encrypt.Encrypt(ciphertext.data(), plaintext.data()); + std::vector<uint8_t> decrypted_plaintext(AES_BLOCKSIZE); + decrypt.Decrypt(decrypted_plaintext.data(), ciphertext.data()); + assert(decrypted_plaintext == plaintext); + } +} diff --git a/src/test/fuzz/crypto_aes256cbc.cpp b/src/test/fuzz/crypto_aes256cbc.cpp new file mode 100644 index 0000000000..52983c7e79 --- /dev/null +++ b/src/test/fuzz/crypto_aes256cbc.cpp @@ -0,0 +1,34 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <crypto/aes.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> + +#include <cassert> +#include <cstdint> +#include <vector> + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; + const std::vector<uint8_t> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, AES256_KEYSIZE); + const std::vector<uint8_t> iv = ConsumeFixedLengthByteVector(fuzzed_data_provider, AES_BLOCKSIZE); + const bool pad = fuzzed_data_provider.ConsumeBool(); + + AES256CBCEncrypt encrypt{key.data(), iv.data(), pad}; + AES256CBCDecrypt decrypt{key.data(), iv.data(), pad}; + + while (fuzzed_data_provider.ConsumeBool()) { + const std::vector<uint8_t> plaintext = ConsumeRandomLengthByteVector(fuzzed_data_provider); + std::vector<uint8_t> ciphertext(plaintext.size() + AES_BLOCKSIZE); + const int encrypt_ret = encrypt.Encrypt(plaintext.data(), plaintext.size(), ciphertext.data()); + ciphertext.resize(encrypt_ret); + std::vector<uint8_t> decrypted_plaintext(ciphertext.size()); + const int decrypt_ret = decrypt.Decrypt(ciphertext.data(), ciphertext.size(), decrypted_plaintext.data()); + decrypted_plaintext.resize(decrypt_ret); + assert(decrypted_plaintext == plaintext || (!pad && plaintext.size() % AES_BLOCKSIZE != 0 && encrypt_ret == 0 && decrypt_ret == 0)); + } +} diff --git a/src/test/fuzz/crypto_chacha20.cpp b/src/test/fuzz/crypto_chacha20.cpp new file mode 100644 index 0000000000..b7438d312d --- /dev/null +++ b/src/test/fuzz/crypto_chacha20.cpp @@ -0,0 +1,50 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <crypto/chacha20.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> + +#include <cstdint> +#include <vector> + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; + + ChaCha20 chacha20; + if (fuzzed_data_provider.ConsumeBool()) { + const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, fuzzed_data_provider.ConsumeIntegralInRange<size_t>(16, 32)); + chacha20 = ChaCha20{key.data(), key.size()}; + } + while (fuzzed_data_provider.ConsumeBool()) { + switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 4)) { + case 0: { + const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, fuzzed_data_provider.ConsumeIntegralInRange<size_t>(16, 32)); + chacha20.SetKey(key.data(), key.size()); + break; + } + case 1: { + chacha20.SetIV(fuzzed_data_provider.ConsumeIntegral<uint64_t>()); + break; + } + case 2: { + chacha20.Seek(fuzzed_data_provider.ConsumeIntegral<uint64_t>()); + break; + } + case 3: { + std::vector<uint8_t> output(fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096)); + chacha20.Keystream(output.data(), output.size()); + break; + } + case 4: { + std::vector<uint8_t> output(fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096)); + const std::vector<uint8_t> input = ConsumeFixedLengthByteVector(fuzzed_data_provider, output.size()); + chacha20.Crypt(input.data(), output.data(), input.size()); + break; + } + } + } +} diff --git a/src/test/fuzz/crypto_chacha20_poly1305_aead.cpp b/src/test/fuzz/crypto_chacha20_poly1305_aead.cpp new file mode 100644 index 0000000000..48e4263f27 --- /dev/null +++ b/src/test/fuzz/crypto_chacha20_poly1305_aead.cpp @@ -0,0 +1,72 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <crypto/chacha_poly_aead.h> +#include <crypto/poly1305.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> + +#include <cassert> +#include <cstdint> +#include <limits> +#include <vector> + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; + + const std::vector<uint8_t> k1 = ConsumeFixedLengthByteVector(fuzzed_data_provider, CHACHA20_POLY1305_AEAD_KEY_LEN); + const std::vector<uint8_t> k2 = ConsumeFixedLengthByteVector(fuzzed_data_provider, CHACHA20_POLY1305_AEAD_KEY_LEN); + + ChaCha20Poly1305AEAD aead(k1.data(), k1.size(), k2.data(), k2.size()); + uint64_t seqnr_payload = 0; + uint64_t seqnr_aad = 0; + int aad_pos = 0; + size_t buffer_size = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096); + std::vector<uint8_t> in(buffer_size + CHACHA20_POLY1305_AEAD_AAD_LEN + POLY1305_TAGLEN, 0); + std::vector<uint8_t> out(buffer_size + CHACHA20_POLY1305_AEAD_AAD_LEN + POLY1305_TAGLEN, 0); + bool is_encrypt = fuzzed_data_provider.ConsumeBool(); + while (fuzzed_data_provider.ConsumeBool()) { + switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 6)) { + case 0: { + buffer_size = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(64, 4096); + in = std::vector<uint8_t>(buffer_size + CHACHA20_POLY1305_AEAD_AAD_LEN + POLY1305_TAGLEN, 0); + out = std::vector<uint8_t>(buffer_size + CHACHA20_POLY1305_AEAD_AAD_LEN + POLY1305_TAGLEN, 0); + break; + } + case 1: { + (void)aead.Crypt(seqnr_payload, seqnr_aad, aad_pos, out.data(), out.size(), in.data(), buffer_size, is_encrypt); + break; + } + case 2: { + uint32_t len = 0; + const bool ok = aead.GetLength(&len, seqnr_aad, aad_pos, in.data()); + assert(ok); + break; + } + case 3: { + seqnr_payload += 1; + aad_pos += CHACHA20_POLY1305_AEAD_AAD_LEN; + if (aad_pos + CHACHA20_POLY1305_AEAD_AAD_LEN > CHACHA20_ROUND_OUTPUT) { + aad_pos = 0; + seqnr_aad += 1; + } + break; + } + case 4: { + seqnr_payload = fuzzed_data_provider.ConsumeIntegral<int>(); + break; + } + case 5: { + seqnr_aad = fuzzed_data_provider.ConsumeIntegral<int>(); + break; + } + case 6: { + is_encrypt = fuzzed_data_provider.ConsumeBool(); + break; + } + } + } +} diff --git a/src/test/fuzz/crypto_hkdf_hmac_sha256_l32.cpp b/src/test/fuzz/crypto_hkdf_hmac_sha256_l32.cpp new file mode 100644 index 0000000000..e0a4e90c10 --- /dev/null +++ b/src/test/fuzz/crypto_hkdf_hmac_sha256_l32.cpp @@ -0,0 +1,25 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <crypto/hkdf_sha256_32.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> + +#include <cstdint> +#include <string> +#include <vector> + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; + + const std::vector<uint8_t> initial_key_material = ConsumeRandomLengthByteVector(fuzzed_data_provider); + + CHKDF_HMAC_SHA256_L32 hkdf_hmac_sha256_l32(initial_key_material.data(), initial_key_material.size(), fuzzed_data_provider.ConsumeRandomLengthString(1024)); + while (fuzzed_data_provider.ConsumeBool()) { + std::vector<uint8_t> out(32); + hkdf_hmac_sha256_l32.Expand32(fuzzed_data_provider.ConsumeRandomLengthString(128), out.data()); + } +} diff --git a/src/test/fuzz/crypto_poly1305.cpp b/src/test/fuzz/crypto_poly1305.cpp new file mode 100644 index 0000000000..5681e6a693 --- /dev/null +++ b/src/test/fuzz/crypto_poly1305.cpp @@ -0,0 +1,22 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <crypto/poly1305.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> + +#include <cstdint> +#include <vector> + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; + + const std::vector<uint8_t> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, POLY1305_KEYLEN); + const std::vector<uint8_t> in = ConsumeRandomLengthByteVector(fuzzed_data_provider); + + std::vector<uint8_t> tag_out(POLY1305_TAGLEN); + poly1305_auth(tag_out.data(), in.data(), in.size(), key.data()); +} diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp index 82e1d55c0b..1e1807d734 100644 --- a/src/test/fuzz/fuzz.cpp +++ b/src/test/fuzz/fuzz.cpp @@ -12,7 +12,16 @@ const std::function<void(const std::string&)> G_TEST_LOG_FUN{}; -#if defined(__AFL_COMPILER) +// Decide if main(...) should be provided: +// * AFL needs main(...) regardless of platform. +// * macOS handles __attribute__((weak)) main(...) poorly when linking +// against libFuzzer. See https://github.com/bitcoin/bitcoin/pull/18008 +// for details. +#if defined(__AFL_COMPILER) || !defined(MAC_OSX) +#define PROVIDE_MAIN_FUNCTION +#endif + +#if defined(PROVIDE_MAIN_FUNCTION) static bool read_stdin(std::vector<uint8_t>& data) { uint8_t buffer[1024]; @@ -44,9 +53,8 @@ extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv) return 0; } -// Generally, the fuzzer will provide main(), except for AFL -#if defined(__AFL_COMPILER) -int main(int argc, char** argv) +#if defined(PROVIDE_MAIN_FUNCTION) +__attribute__((weak)) int main(int argc, char** argv) { initialize(); #ifdef __AFL_INIT diff --git a/src/test/fuzz/http_request.cpp b/src/test/fuzz/http_request.cpp index ebf89749e9..36d44e361f 100644 --- a/src/test/fuzz/http_request.cpp +++ b/src/test/fuzz/http_request.cpp @@ -7,6 +7,7 @@ #include <test/fuzz/FuzzedDataProvider.h> #include <test/fuzz/fuzz.h> #include <test/fuzz/util.h> +#include <util/strencodings.h> #include <event2/buffer.h> #include <event2/event.h> @@ -48,7 +49,14 @@ void test_one_input(const std::vector<uint8_t>& buffer) assert(evbuf != nullptr); const std::vector<uint8_t> http_buffer = ConsumeRandomLengthByteVector(fuzzed_data_provider, 4096); evbuffer_add(evbuf, http_buffer.data(), http_buffer.size()); - if (evhttp_parse_firstline_(evreq, evbuf) != 1 || evhttp_parse_headers_(evreq, evbuf) != 1) { + // Avoid constructing requests that will be interpreted by libevent as PROXY requests to avoid triggering + // a nullptr dereference. The dereference (req->evcon->http_server) takes place in evhttp_parse_request_line + // and is a consequence of our hacky but necessary use of the internal function evhttp_parse_firstline_ in + // this fuzzing harness. The workaround is not aesthetically pleasing, but it successfully avoids the troublesome + // code path. " http:// HTTP/1.1\n" was a crashing input prior to this workaround. + const std::string http_buffer_str = ToLower({http_buffer.begin(), http_buffer.end()}); + if (http_buffer_str.find(" http://") != std::string::npos || http_buffer_str.find(" https://") != std::string::npos || + evhttp_parse_firstline_(evreq, evbuf) != 1 || evhttp_parse_headers_(evreq, evbuf) != 1) { evbuffer_free(evbuf); evhttp_request_free(evreq); return; diff --git a/src/test/fuzz/key.cpp b/src/test/fuzz/key.cpp index 58735545c9..955b954700 100644 --- a/src/test/fuzz/key.cpp +++ b/src/test/fuzz/key.cpp @@ -85,7 +85,7 @@ void test_one_input(const std::vector<uint8_t>& buffer) assert(negated_key == key); } - const uint256 random_uint256 = Hash(buffer.begin(), buffer.end()); + const uint256 random_uint256 = Hash(buffer); { CKey child_key; @@ -157,25 +157,25 @@ void test_one_input(const std::vector<uint8_t>& buffer) assert(ok_add_key_pubkey); assert(fillable_signing_provider_pub.HaveKey(pubkey.GetID())); - txnouttype which_type_tx_pubkey; + TxoutType which_type_tx_pubkey; const bool is_standard_tx_pubkey = IsStandard(tx_pubkey_script, which_type_tx_pubkey); assert(is_standard_tx_pubkey); - assert(which_type_tx_pubkey == txnouttype::TX_PUBKEY); + assert(which_type_tx_pubkey == TxoutType::PUBKEY); - txnouttype which_type_tx_multisig; + TxoutType which_type_tx_multisig; const bool is_standard_tx_multisig = IsStandard(tx_multisig_script, which_type_tx_multisig); assert(is_standard_tx_multisig); - assert(which_type_tx_multisig == txnouttype::TX_MULTISIG); + assert(which_type_tx_multisig == TxoutType::MULTISIG); std::vector<std::vector<unsigned char>> v_solutions_ret_tx_pubkey; - const txnouttype outtype_tx_pubkey = Solver(tx_pubkey_script, v_solutions_ret_tx_pubkey); - assert(outtype_tx_pubkey == txnouttype::TX_PUBKEY); + const TxoutType outtype_tx_pubkey = Solver(tx_pubkey_script, v_solutions_ret_tx_pubkey); + assert(outtype_tx_pubkey == TxoutType::PUBKEY); assert(v_solutions_ret_tx_pubkey.size() == 1); assert(v_solutions_ret_tx_pubkey[0].size() == 33); std::vector<std::vector<unsigned char>> v_solutions_ret_tx_multisig; - const txnouttype outtype_tx_multisig = Solver(tx_multisig_script, v_solutions_ret_tx_multisig); - assert(outtype_tx_multisig == txnouttype::TX_MULTISIG); + const TxoutType outtype_tx_multisig = Solver(tx_multisig_script, v_solutions_ret_tx_multisig); + assert(outtype_tx_multisig == TxoutType::MULTISIG); assert(v_solutions_ret_tx_multisig.size() == 3); assert(v_solutions_ret_tx_multisig[0].size() == 1); assert(v_solutions_ret_tx_multisig[1].size() == 33); diff --git a/src/test/fuzz/load_external_block_file.cpp b/src/test/fuzz/load_external_block_file.cpp new file mode 100644 index 0000000000..d9de9d9866 --- /dev/null +++ b/src/test/fuzz/load_external_block_file.cpp @@ -0,0 +1,31 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <chainparams.h> +#include <flatfile.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> +#include <test/util/setup_common.h> +#include <validation.h> + +#include <cstdint> +#include <vector> + +void initialize() +{ + InitializeFuzzingContext(); +} + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; + FuzzedFileProvider fuzzed_file_provider = ConsumeFile(fuzzed_data_provider); + FILE* fuzzed_block_file = fuzzed_file_provider.open(); + if (fuzzed_block_file == nullptr) { + return; + } + FlatFilePos flat_file_pos; + LoadExternalBlockFile(Params(), fuzzed_block_file, fuzzed_data_provider.ConsumeBool() ? &flat_file_pos : nullptr); +} diff --git a/src/test/fuzz/net_permissions.cpp b/src/test/fuzz/net_permissions.cpp index ae531f4462..8a674ac1e9 100644 --- a/src/test/fuzz/net_permissions.cpp +++ b/src/test/fuzz/net_permissions.cpp @@ -24,6 +24,7 @@ void test_one_input(const std::vector<uint8_t>& buffer) NetPermissionFlags::PF_FORCERELAY, NetPermissionFlags::PF_NOBAN, NetPermissionFlags::PF_MEMPOOL, + NetPermissionFlags::PF_ADDR, NetPermissionFlags::PF_ISIMPLICIT, NetPermissionFlags::PF_ALL, }) : diff --git a/src/test/fuzz/netaddress.cpp b/src/test/fuzz/netaddress.cpp index d8d53566c7..2901c704f6 100644 --- a/src/test/fuzz/netaddress.cpp +++ b/src/test/fuzz/netaddress.cpp @@ -5,41 +5,13 @@ #include <netaddress.h> #include <test/fuzz/FuzzedDataProvider.h> #include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> #include <cassert> #include <cstdint> #include <netinet/in.h> #include <vector> -namespace { -CNetAddr ConsumeNetAddr(FuzzedDataProvider& fuzzed_data_provider) noexcept -{ - const Network network = fuzzed_data_provider.PickValueInArray({Network::NET_IPV4, Network::NET_IPV6, Network::NET_INTERNAL, Network::NET_ONION}); - if (network == Network::NET_IPV4) { - const in_addr v4_addr = { - .s_addr = fuzzed_data_provider.ConsumeIntegral<uint32_t>()}; - return CNetAddr{v4_addr}; - } else if (network == Network::NET_IPV6) { - if (fuzzed_data_provider.remaining_bytes() < 16) { - return CNetAddr{}; - } - in6_addr v6_addr = {}; - memcpy(v6_addr.s6_addr, fuzzed_data_provider.ConsumeBytes<uint8_t>(16).data(), 16); - return CNetAddr{v6_addr, fuzzed_data_provider.ConsumeIntegral<uint32_t>()}; - } else if (network == Network::NET_INTERNAL) { - CNetAddr net_addr; - net_addr.SetInternal(fuzzed_data_provider.ConsumeBytesAsString(32)); - return net_addr; - } else if (network == Network::NET_ONION) { - CNetAddr net_addr; - net_addr.SetSpecial(fuzzed_data_provider.ConsumeBytesAsString(32)); - return net_addr; - } else { - assert(false); - } -} -}; // namespace - void test_one_input(const std::vector<uint8_t>& buffer) { FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); diff --git a/src/test/fuzz/p2p_transport_deserializer.cpp b/src/test/fuzz/p2p_transport_deserializer.cpp index 57393fed45..6fba2bfaba 100644 --- a/src/test/fuzz/p2p_transport_deserializer.cpp +++ b/src/test/fuzz/p2p_transport_deserializer.cpp @@ -30,7 +30,7 @@ void test_one_input(const std::vector<uint8_t>& buffer) pch += handled; n_bytes -= handled; if (deserializer.Complete()) { - const int64_t m_time = std::numeric_limits<int64_t>::max(); + const std::chrono::microseconds m_time{std::numeric_limits<int64_t>::max()}; const CNetMessage msg = deserializer.GetMessage(Params().MessageStart(), m_time); assert(msg.m_command.size() <= CMessageHeader::COMMAND_SIZE); assert(msg.m_raw_message_size <= buffer.size()); diff --git a/src/test/fuzz/policy_estimator.cpp b/src/test/fuzz/policy_estimator.cpp index 1cbf9b347f..6c94a47f3c 100644 --- a/src/test/fuzz/policy_estimator.cpp +++ b/src/test/fuzz/policy_estimator.cpp @@ -14,6 +14,11 @@ #include <string> #include <vector> +void initialize() +{ + InitializeFuzzingContext(); +} + void test_one_input(const std::vector<uint8_t>& buffer) { FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); @@ -66,4 +71,10 @@ void test_one_input(const std::vector<uint8_t>& buffer) (void)block_policy_estimator.estimateSmartFee(fuzzed_data_provider.ConsumeIntegral<int>(), fuzzed_data_provider.ConsumeBool() ? &fee_calculation : nullptr, fuzzed_data_provider.ConsumeBool()); (void)block_policy_estimator.HighestTargetTracked(fuzzed_data_provider.PickValueInArray({FeeEstimateHorizon::SHORT_HALFLIFE, FeeEstimateHorizon::MED_HALFLIFE, FeeEstimateHorizon::LONG_HALFLIFE})); } + { + FuzzedAutoFileProvider fuzzed_auto_file_provider = ConsumeAutoFile(fuzzed_data_provider); + CAutoFile fuzzed_auto_file = fuzzed_auto_file_provider.open(); + block_policy_estimator.Write(fuzzed_auto_file); + block_policy_estimator.Read(fuzzed_auto_file); + } } diff --git a/src/test/fuzz/policy_estimator_io.cpp b/src/test/fuzz/policy_estimator_io.cpp new file mode 100644 index 0000000000..0edcf201c7 --- /dev/null +++ b/src/test/fuzz/policy_estimator_io.cpp @@ -0,0 +1,28 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <policy/fees.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> + +#include <cstdint> +#include <vector> + +void initialize() +{ + InitializeFuzzingContext(); +} + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + FuzzedAutoFileProvider fuzzed_auto_file_provider = ConsumeAutoFile(fuzzed_data_provider); + CAutoFile fuzzed_auto_file = fuzzed_auto_file_provider.open(); + // Re-using block_policy_estimator across runs to avoid costly creation of CBlockPolicyEstimator object. + static CBlockPolicyEstimator block_policy_estimator; + if (block_policy_estimator.Read(fuzzed_auto_file)) { + block_policy_estimator.Write(fuzzed_auto_file); + } +} diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp index 2fa751b987..677b87a47a 100644 --- a/src/test/fuzz/process_message.cpp +++ b/src/test/fuzz/process_message.cpp @@ -34,11 +34,11 @@ void ProcessMessage( CNode& pfrom, const std::string& msg_type, CDataStream& vRecv, - int64_t nTimeReceived, + const std::chrono::microseconds time_received, const CChainParams& chainparams, ChainstateManager& chainman, CTxMemPool& mempool, - CConnman* connman, + CConnman& connman, BanMan* banman, const std::atomic<bool>& interruptMsgProc); @@ -80,16 +80,16 @@ void test_one_input(const std::vector<uint8_t>& buffer) return; } CDataStream random_bytes_data_stream{fuzzed_data_provider.ConsumeRemainingBytes<unsigned char>(), SER_NETWORK, PROTOCOL_VERSION}; - CNode& p2p_node = *MakeUnique<CNode>(0, ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_BLOOM), 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, false).release(); + CNode& p2p_node = *MakeUnique<CNode>(0, ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_BLOOM), 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, ConnectionType::OUTBOUND).release(); p2p_node.fSuccessfullyConnected = true; p2p_node.nVersion = PROTOCOL_VERSION; p2p_node.SetSendVersion(PROTOCOL_VERSION); connman.AddTestNode(p2p_node); g_setup->m_node.peer_logic->InitializeNode(&p2p_node); try { - ProcessMessage(p2p_node, random_message_type, random_bytes_data_stream, GetTimeMillis(), + ProcessMessage(p2p_node, random_message_type, random_bytes_data_stream, GetTime<std::chrono::microseconds>(), Params(), *g_setup->m_node.chainman, *g_setup->m_node.mempool, - g_setup->m_node.connman.get(), g_setup->m_node.banman.get(), + *g_setup->m_node.connman, g_setup->m_node.banman.get(), std::atomic<bool>{false}); } catch (const std::ios_base::failure&) { } diff --git a/src/test/fuzz/process_messages.cpp b/src/test/fuzz/process_messages.cpp index 91ebf9fb1b..ef427442e9 100644 --- a/src/test/fuzz/process_messages.cpp +++ b/src/test/fuzz/process_messages.cpp @@ -44,9 +44,8 @@ void test_one_input(const std::vector<uint8_t>& buffer) const auto num_peers_to_add = fuzzed_data_provider.ConsumeIntegralInRange(1, 3); for (int i = 0; i < num_peers_to_add; ++i) { const ServiceFlags service_flags = ServiceFlags(fuzzed_data_provider.ConsumeIntegral<uint64_t>()); - const bool inbound{fuzzed_data_provider.ConsumeBool()}; - const bool block_relay_only{fuzzed_data_provider.ConsumeBool()}; - peers.push_back(MakeUnique<CNode>(i, service_flags, 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, inbound, block_relay_only).release()); + const ConnectionType conn_type = fuzzed_data_provider.PickValueInArray({ConnectionType::INBOUND, ConnectionType::OUTBOUND, ConnectionType::MANUAL, ConnectionType::FEELER, ConnectionType::BLOCK_RELAY, ConnectionType::ADDR_FETCH}); + peers.push_back(MakeUnique<CNode>(i, service_flags, 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, conn_type).release()); CNode& p2p_node = *peers.back(); p2p_node.fSuccessfullyConnected = true; diff --git a/src/test/fuzz/psbt.cpp b/src/test/fuzz/psbt.cpp index 64328fb66e..908e2b16f2 100644 --- a/src/test/fuzz/psbt.cpp +++ b/src/test/fuzz/psbt.cpp @@ -39,7 +39,6 @@ void test_one_input(const std::vector<uint8_t>& buffer) } (void)psbt.IsNull(); - (void)psbt.IsSane(); Optional<CMutableTransaction> tx = psbt.tx; if (tx) { @@ -50,7 +49,6 @@ void test_one_input(const std::vector<uint8_t>& buffer) for (const PSBTInput& input : psbt.inputs) { (void)PSBTInputSigned(input); (void)input.IsNull(); - (void)input.IsSane(); } for (const PSBTOutput& output : psbt.outputs) { diff --git a/src/test/fuzz/script.cpp b/src/test/fuzz/script.cpp index 933cf9049d..85aac6ac7a 100644 --- a/src/test/fuzz/script.cpp +++ b/src/test/fuzz/script.cpp @@ -48,7 +48,7 @@ void test_one_input(const std::vector<uint8_t>& buffer) if (CompressScript(script, compressed)) { const unsigned int size = compressed[0]; compressed.erase(compressed.begin()); - assert(size >= 0 && size <= 5); + assert(size <= 5); CScript decompressed_script; const bool ok = DecompressScript(decompressed_script, size, compressed); assert(ok); @@ -58,7 +58,7 @@ void test_one_input(const std::vector<uint8_t>& buffer) CTxDestination address; (void)ExtractDestination(script, address); - txnouttype type_ret; + TxoutType type_ret; std::vector<CTxDestination> addresses; int required_ret; (void)ExtractDestinations(script, type_ret, addresses, required_ret); @@ -72,7 +72,7 @@ void test_one_input(const std::vector<uint8_t>& buffer) (void)IsSolvable(signing_provider, script); - txnouttype which_type; + TxoutType which_type; (void)IsStandard(script, which_type); (void)RecursiveDynamicUsage(script); diff --git a/src/test/fuzz/scriptnum_ops.cpp b/src/test/fuzz/scriptnum_ops.cpp index f4e079fb89..68c1ae58ca 100644 --- a/src/test/fuzz/scriptnum_ops.cpp +++ b/src/test/fuzz/scriptnum_ops.cpp @@ -33,7 +33,7 @@ void test_one_input(const std::vector<uint8_t>& buffer) case 0: { const int64_t i = fuzzed_data_provider.ConsumeIntegral<int64_t>(); assert((script_num == i) != (script_num != i)); - assert((script_num <= i) != script_num > i); + assert((script_num <= i) != (script_num > i)); assert((script_num >= i) != (script_num < i)); // Avoid signed integer overflow: // script/script.h:264:93: runtime error: signed integer overflow: -2261405121394637306 + -9223372036854775802 cannot be represented in type 'long' diff --git a/src/test/fuzz/signature_checker.cpp b/src/test/fuzz/signature_checker.cpp index 4a8c7a63af..3aaeb66649 100644 --- a/src/test/fuzz/signature_checker.cpp +++ b/src/test/fuzz/signature_checker.cpp @@ -28,17 +28,17 @@ public: { } - virtual bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const + bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override { return m_fuzzed_data_provider.ConsumeBool(); } - virtual bool CheckLockTime(const CScriptNum& nLockTime) const + bool CheckLockTime(const CScriptNum& nLockTime) const override { return m_fuzzed_data_provider.ConsumeBool(); } - virtual bool CheckSequence(const CScriptNum& nSequence) const + bool CheckSequence(const CScriptNum& nSequence) const override { return m_fuzzed_data_provider.ConsumeBool(); } diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h index 1c1b2cd254..9f9552edb9 100644 --- a/src/test/fuzz/util.h +++ b/src/test/fuzz/util.h @@ -8,8 +8,11 @@ #include <amount.h> #include <arith_uint256.h> #include <attributes.h> +#include <chainparamsbase.h> #include <coins.h> #include <consensus/consensus.h> +#include <netaddress.h> +#include <netbase.h> #include <primitives/transaction.h> #include <script/script.h> #include <script/standard.h> @@ -17,12 +20,14 @@ #include <streams.h> #include <test/fuzz/FuzzedDataProvider.h> #include <test/fuzz/fuzz.h> +#include <test/util/setup_common.h> #include <txmempool.h> #include <uint256.h> #include <version.h> #include <algorithm> #include <cstdint> +#include <cstdio> #include <optional> #include <string> #include <vector> @@ -228,4 +233,241 @@ NODISCARD inline std::vector<uint8_t> ConsumeFixedLengthByteVector(FuzzedDataPro return result; } +CNetAddr ConsumeNetAddr(FuzzedDataProvider& fuzzed_data_provider) noexcept +{ + const Network network = fuzzed_data_provider.PickValueInArray({Network::NET_IPV4, Network::NET_IPV6, Network::NET_INTERNAL, Network::NET_ONION}); + CNetAddr net_addr; + if (network == Network::NET_IPV4) { + const in_addr v4_addr = { + .s_addr = fuzzed_data_provider.ConsumeIntegral<uint32_t>()}; + net_addr = CNetAddr{v4_addr}; + } else if (network == Network::NET_IPV6) { + if (fuzzed_data_provider.remaining_bytes() >= 16) { + in6_addr v6_addr = {}; + memcpy(v6_addr.s6_addr, fuzzed_data_provider.ConsumeBytes<uint8_t>(16).data(), 16); + net_addr = CNetAddr{v6_addr, fuzzed_data_provider.ConsumeIntegral<uint32_t>()}; + } + } else if (network == Network::NET_INTERNAL) { + net_addr.SetInternal(fuzzed_data_provider.ConsumeBytesAsString(32)); + } else if (network == Network::NET_ONION) { + net_addr.SetSpecial(fuzzed_data_provider.ConsumeBytesAsString(32)); + } + return net_addr; +} + +CSubNet ConsumeSubNet(FuzzedDataProvider& fuzzed_data_provider) noexcept +{ + return {ConsumeNetAddr(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral<int32_t>()}; +} + +void InitializeFuzzingContext(const std::string& chain_name = CBaseChainParams::REGTEST) +{ + static const BasicTestingSetup basic_testing_setup{chain_name, {"-nodebuglogfile"}}; +} + +class FuzzedFileProvider +{ + FuzzedDataProvider& m_fuzzed_data_provider; + int64_t m_offset = 0; + +public: + FuzzedFileProvider(FuzzedDataProvider& fuzzed_data_provider) : m_fuzzed_data_provider{fuzzed_data_provider} + { + } + + FILE* open() + { + if (m_fuzzed_data_provider.ConsumeBool()) { + return nullptr; + } + std::string mode; + switch (m_fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 5)) { + case 0: { + mode = "r"; + break; + } + case 1: { + mode = "r+"; + break; + } + case 2: { + mode = "w"; + break; + } + case 3: { + mode = "w+"; + break; + } + case 4: { + mode = "a"; + break; + } + case 5: { + mode = "a+"; + break; + } + } +#ifdef _GNU_SOURCE + const cookie_io_functions_t io_hooks = { + FuzzedFileProvider::read, + FuzzedFileProvider::write, + FuzzedFileProvider::seek, + FuzzedFileProvider::close, + }; + return fopencookie(this, mode.c_str(), io_hooks); +#else + (void)mode; + return nullptr; +#endif + } + + static ssize_t read(void* cookie, char* buf, size_t size) + { + FuzzedFileProvider* fuzzed_file = (FuzzedFileProvider*)cookie; + if (buf == nullptr || size == 0 || fuzzed_file->m_fuzzed_data_provider.ConsumeBool()) { + return fuzzed_file->m_fuzzed_data_provider.ConsumeBool() ? 0 : -1; + } + const std::vector<uint8_t> random_bytes = fuzzed_file->m_fuzzed_data_provider.ConsumeBytes<uint8_t>(size); + if (random_bytes.empty()) { + return 0; + } + std::memcpy(buf, random_bytes.data(), random_bytes.size()); + if (AdditionOverflow(fuzzed_file->m_offset, (int64_t)random_bytes.size())) { + return fuzzed_file->m_fuzzed_data_provider.ConsumeBool() ? 0 : -1; + } + fuzzed_file->m_offset += random_bytes.size(); + return random_bytes.size(); + } + + static ssize_t write(void* cookie, const char* buf, size_t size) + { + FuzzedFileProvider* fuzzed_file = (FuzzedFileProvider*)cookie; + const ssize_t n = fuzzed_file->m_fuzzed_data_provider.ConsumeIntegralInRange<ssize_t>(0, size); + if (AdditionOverflow(fuzzed_file->m_offset, (int64_t)n)) { + return fuzzed_file->m_fuzzed_data_provider.ConsumeBool() ? 0 : -1; + } + fuzzed_file->m_offset += n; + return n; + } + + static int seek(void* cookie, int64_t* offset, int whence) + { + assert(whence == SEEK_SET || whence == SEEK_CUR); // SEEK_END not implemented yet. + FuzzedFileProvider* fuzzed_file = (FuzzedFileProvider*)cookie; + int64_t new_offset = 0; + if (whence == SEEK_SET) { + new_offset = *offset; + } else if (whence == SEEK_CUR) { + if (AdditionOverflow(fuzzed_file->m_offset, *offset)) { + return -1; + } + new_offset = fuzzed_file->m_offset + *offset; + } + if (new_offset < 0) { + return -1; + } + fuzzed_file->m_offset = new_offset; + *offset = new_offset; + return fuzzed_file->m_fuzzed_data_provider.ConsumeIntegralInRange<int>(-1, 0); + } + + static int close(void* cookie) + { + FuzzedFileProvider* fuzzed_file = (FuzzedFileProvider*)cookie; + return fuzzed_file->m_fuzzed_data_provider.ConsumeIntegralInRange<int>(-1, 0); + } +}; + +NODISCARD inline FuzzedFileProvider ConsumeFile(FuzzedDataProvider& fuzzed_data_provider) noexcept +{ + return {fuzzed_data_provider}; +} + +class FuzzedAutoFileProvider +{ + FuzzedDataProvider& m_fuzzed_data_provider; + FuzzedFileProvider m_fuzzed_file_provider; + +public: + FuzzedAutoFileProvider(FuzzedDataProvider& fuzzed_data_provider) : m_fuzzed_data_provider{fuzzed_data_provider}, m_fuzzed_file_provider{fuzzed_data_provider} + { + } + + CAutoFile open() + { + return {m_fuzzed_file_provider.open(), m_fuzzed_data_provider.ConsumeIntegral<int>(), m_fuzzed_data_provider.ConsumeIntegral<int>()}; + } +}; + +NODISCARD inline FuzzedAutoFileProvider ConsumeAutoFile(FuzzedDataProvider& fuzzed_data_provider) noexcept +{ + return {fuzzed_data_provider}; +} + +#define WRITE_TO_STREAM_CASE(id, type, consume) \ + case id: { \ + type o = consume; \ + stream << o; \ + break; \ + } +template <typename Stream> +void WriteToStream(FuzzedDataProvider& fuzzed_data_provider, Stream& stream) noexcept +{ + while (fuzzed_data_provider.ConsumeBool()) { + try { + switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 13)) { + WRITE_TO_STREAM_CASE(0, bool, fuzzed_data_provider.ConsumeBool()) + WRITE_TO_STREAM_CASE(1, char, fuzzed_data_provider.ConsumeIntegral<char>()) + WRITE_TO_STREAM_CASE(2, int8_t, fuzzed_data_provider.ConsumeIntegral<int8_t>()) + WRITE_TO_STREAM_CASE(3, uint8_t, fuzzed_data_provider.ConsumeIntegral<uint8_t>()) + WRITE_TO_STREAM_CASE(4, int16_t, fuzzed_data_provider.ConsumeIntegral<int16_t>()) + WRITE_TO_STREAM_CASE(5, uint16_t, fuzzed_data_provider.ConsumeIntegral<uint16_t>()) + WRITE_TO_STREAM_CASE(6, int32_t, fuzzed_data_provider.ConsumeIntegral<int32_t>()) + WRITE_TO_STREAM_CASE(7, uint32_t, fuzzed_data_provider.ConsumeIntegral<uint32_t>()) + WRITE_TO_STREAM_CASE(8, int64_t, fuzzed_data_provider.ConsumeIntegral<int64_t>()) + WRITE_TO_STREAM_CASE(9, uint64_t, fuzzed_data_provider.ConsumeIntegral<uint64_t>()) + WRITE_TO_STREAM_CASE(10, float, fuzzed_data_provider.ConsumeFloatingPoint<float>()) + WRITE_TO_STREAM_CASE(11, double, fuzzed_data_provider.ConsumeFloatingPoint<double>()) + WRITE_TO_STREAM_CASE(12, std::string, fuzzed_data_provider.ConsumeRandomLengthString(32)) + WRITE_TO_STREAM_CASE(13, std::vector<char>, ConsumeRandomLengthIntegralVector<char>(fuzzed_data_provider)) + } + } catch (const std::ios_base::failure&) { + break; + } + } +} + +#define READ_FROM_STREAM_CASE(id, type) \ + case id: { \ + type o; \ + stream >> o; \ + break; \ + } +template <typename Stream> +void ReadFromStream(FuzzedDataProvider& fuzzed_data_provider, Stream& stream) noexcept +{ + while (fuzzed_data_provider.ConsumeBool()) { + try { + switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 13)) { + READ_FROM_STREAM_CASE(0, bool) + READ_FROM_STREAM_CASE(1, char) + READ_FROM_STREAM_CASE(2, int8_t) + READ_FROM_STREAM_CASE(3, uint8_t) + READ_FROM_STREAM_CASE(4, int16_t) + READ_FROM_STREAM_CASE(5, uint16_t) + READ_FROM_STREAM_CASE(6, int32_t) + READ_FROM_STREAM_CASE(7, uint32_t) + READ_FROM_STREAM_CASE(8, int64_t) + READ_FROM_STREAM_CASE(9, uint64_t) + READ_FROM_STREAM_CASE(10, float) + READ_FROM_STREAM_CASE(11, double) + READ_FROM_STREAM_CASE(12, std::string) + READ_FROM_STREAM_CASE(13, std::vector<char>) + } + } catch (const std::ios_base::failure&) { + break; + } + } +} + #endif // BITCOIN_TEST_FUZZ_UTIL_H diff --git a/src/test/key_tests.cpp b/src/test/key_tests.cpp index fd35537c77..4e4c44266a 100644 --- a/src/test/key_tests.cpp +++ b/src/test/key_tests.cpp @@ -77,7 +77,7 @@ BOOST_AUTO_TEST_CASE(key_test1) for (int n=0; n<16; n++) { std::string strMsg = strprintf("Very secret message %i: 11", n); - uint256 hashMsg = Hash(strMsg.begin(), strMsg.end()); + uint256 hashMsg = Hash(strMsg); // normal signatures @@ -134,7 +134,7 @@ BOOST_AUTO_TEST_CASE(key_test1) std::vector<unsigned char> detsig, detsigc; std::string strMsg = "Very deterministic message"; - uint256 hashMsg = Hash(strMsg.begin(), strMsg.end()); + uint256 hashMsg = Hash(strMsg); BOOST_CHECK(key1.Sign(hashMsg, detsig)); BOOST_CHECK(key1C.Sign(hashMsg, detsigc)); BOOST_CHECK(detsig == detsigc); @@ -158,7 +158,7 @@ BOOST_AUTO_TEST_CASE(key_signature_tests) // When entropy is specified, we should see at least one high R signature within 20 signatures CKey key = DecodeSecret(strSecret1); std::string msg = "A message to be signed"; - uint256 msg_hash = Hash(msg.begin(), msg.end()); + uint256 msg_hash = Hash(msg); std::vector<unsigned char> sig; bool found = false; @@ -179,7 +179,7 @@ BOOST_AUTO_TEST_CASE(key_signature_tests) for (int i = 0; i < 256; ++i) { sig.clear(); std::string msg = "A message to be signed" + ToString(i); - msg_hash = Hash(msg.begin(), msg.end()); + msg_hash = Hash(msg); BOOST_CHECK(key.Sign(msg_hash, sig)); found = sig[3] == 0x20; BOOST_CHECK(sig.size() <= 70); @@ -196,7 +196,7 @@ BOOST_AUTO_TEST_CASE(key_key_negation) std::string str = "Bitcoin key verification\n"; GetRandBytes(rnd, sizeof(rnd)); uint256 hash; - CHash256().Write((unsigned char*)str.data(), str.size()).Write(rnd, sizeof(rnd)).Finalize(hash.begin()); + CHash256().Write(MakeUCharSpan(str)).Write(rnd).Finalize(hash); // import the static test key CKey key = DecodeSecret(strSecret1C); diff --git a/src/test/merkle_tests.cpp b/src/test/merkle_tests.cpp index 03dce552fc..9bc7cc5dab 100644 --- a/src/test/merkle_tests.cpp +++ b/src/test/merkle_tests.cpp @@ -13,9 +13,9 @@ static uint256 ComputeMerkleRootFromBranch(const uint256& leaf, const std::vecto uint256 hash = leaf; for (std::vector<uint256>::const_iterator it = vMerkleBranch.begin(); it != vMerkleBranch.end(); ++it) { if (nIndex & 1) { - hash = Hash(it->begin(), it->end(), hash.begin(), hash.end()); + hash = Hash(*it, hash); } else { - hash = Hash(hash.begin(), hash.end(), it->begin(), it->end()); + hash = Hash(hash, *it); } nIndex >>= 1; } @@ -60,7 +60,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot } } mutated |= (inner[level] == h); - CHash256().Write(inner[level].begin(), 32).Write(h.begin(), 32).Finalize(h.begin()); + CHash256().Write(inner[level]).Write(h).Finalize(h); } // Store the resulting hash at inner position level. inner[level] = h; @@ -86,7 +86,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot if (pbranch && matchh) { pbranch->push_back(h); } - CHash256().Write(h.begin(), 32).Write(h.begin(), 32).Finalize(h.begin()); + CHash256().Write(h).Write(h).Finalize(h); // Increment count to the value it would have if two entries at this // level had existed. count += (((uint32_t)1) << level); @@ -101,7 +101,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot matchh = true; } } - CHash256().Write(inner[level].begin(), 32).Write(h.begin(), 32).Finalize(h.begin()); + CHash256().Write(inner[level]).Write(h).Finalize(h); level++; } } @@ -144,8 +144,7 @@ static uint256 BlockBuildMerkleTree(const CBlock& block, bool* fMutated, std::ve // Two identical hashes at the end of the list at a particular level. mutated = true; } - vMerkleTree.push_back(Hash(vMerkleTree[j+i].begin(), vMerkleTree[j+i].end(), - vMerkleTree[j+i2].begin(), vMerkleTree[j+i2].end())); + vMerkleTree.push_back(Hash(vMerkleTree[j+i], vMerkleTree[j+i2])); } j += nSize; } diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp index 11ff7b833b..62a0dc4241 100644 --- a/src/test/miner_tests.cpp +++ b/src/test/miner_tests.cpp @@ -253,7 +253,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) pblock->nNonce = blockinfo[i].nonce; } std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock); - BOOST_CHECK(EnsureChainman(m_node).ProcessNewBlock(chainparams, shared_pblock, true, nullptr)); + BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlock(chainparams, shared_pblock, true, nullptr)); pblock->hashPrevBlock = pblock->GetHash(); } diff --git a/src/test/multisig_tests.cpp b/src/test/multisig_tests.cpp index dd2890c134..e14d2dd72d 100644 --- a/src/test/multisig_tests.cpp +++ b/src/test/multisig_tests.cpp @@ -141,7 +141,7 @@ BOOST_AUTO_TEST_CASE(multisig_IsStandard) for (int i = 0; i < 4; i++) key[i].MakeNewKey(true); - txnouttype whichType; + TxoutType whichType; CScript a_and_b; a_and_b << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG; diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index 84bf593497..317000c771 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -6,6 +6,7 @@ #include <addrman.h> #include <chainparams.h> #include <clientversion.h> +#include <cstdint> #include <net.h> #include <netbase.h> #include <serialize.h> @@ -83,10 +84,10 @@ BOOST_FIXTURE_TEST_SUITE(net_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(cnode_listen_port) { // test default - unsigned short port = GetListenPort(); + uint16_t port = GetListenPort(); BOOST_CHECK(port == Params().GetDefaultPort()); // test set port - unsigned short altPort = 12345; + uint16_t altPort = 12345; BOOST_CHECK(gArgs.SoftSetArg("-port", ToString(altPort))); port = GetListenPort(); BOOST_CHECK(port == altPort); @@ -179,17 +180,12 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) CAddress addr = CAddress(CService(ipv4Addr, 7777), NODE_NETWORK); std::string pszDest; - bool fInboundIn = false; - // Test that fFeeler is false by default. - std::unique_ptr<CNode> pnode1 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 0, 0, CAddress(), pszDest, fInboundIn); - BOOST_CHECK(pnode1->fInbound == false); - BOOST_CHECK(pnode1->fFeeler == false); + std::unique_ptr<CNode> pnode1 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 0, 0, CAddress(), pszDest, ConnectionType::OUTBOUND); + BOOST_CHECK(pnode1->IsInboundConn() == false); - fInboundIn = true; - std::unique_ptr<CNode> pnode2 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 1, 1, CAddress(), pszDest, fInboundIn); - BOOST_CHECK(pnode2->fInbound == true); - BOOST_CHECK(pnode2->fFeeler == false); + std::unique_ptr<CNode> pnode2 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 1, 1, CAddress(), pszDest, ConnectionType::INBOUND); + BOOST_CHECK(pnode2->IsInboundConn() == true); } // prior to PR #14728, this test triggers an undefined behavior @@ -213,7 +209,7 @@ BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test) in_addr ipv4AddrPeer; ipv4AddrPeer.s_addr = 0xa0b0c001; CAddress addr = CAddress(CService(ipv4AddrPeer, 7777), NODE_NETWORK); - std::unique_ptr<CNode> pnode = MakeUnique<CNode>(0, NODE_NETWORK, 0, INVALID_SOCKET, addr, 0, 0, CAddress{}, std::string{}, false); + std::unique_ptr<CNode> pnode = MakeUnique<CNode>(0, NODE_NETWORK, 0, INVALID_SOCKET, addr, 0, 0, CAddress{}, std::string{}, ConnectionType::OUTBOUND); pnode->fSuccessfullyConnected.store(true); // the peer claims to be reaching us via IPv6 diff --git a/src/test/netbase_tests.cpp b/src/test/netbase_tests.cpp index 0fbf257f0e..49073ea657 100644 --- a/src/test/netbase_tests.cpp +++ b/src/test/netbase_tests.cpp @@ -138,6 +138,14 @@ BOOST_AUTO_TEST_CASE(onioncat_test) } +BOOST_AUTO_TEST_CASE(embedded_test) +{ + CNetAddr addr1(ResolveIP("1.2.3.4")); + CNetAddr addr2(ResolveIP("::FFFF:0102:0304")); + BOOST_CHECK(addr2.IsIPv4()); + BOOST_CHECK_EQUAL(addr1.ToString(), addr2.ToString()); +} + BOOST_AUTO_TEST_CASE(subnet_test) { @@ -158,12 +166,13 @@ BOOST_AUTO_TEST_CASE(subnet_test) BOOST_CHECK(ResolveSubNet("1.2.2.1/24").Match(ResolveIP("1.2.2.4"))); BOOST_CHECK(ResolveSubNet("1.2.2.110/31").Match(ResolveIP("1.2.2.111"))); BOOST_CHECK(ResolveSubNet("1.2.2.20/26").Match(ResolveIP("1.2.2.63"))); - // All-Matching IPv6 Matches arbitrary IPv4 and IPv6 + // All-Matching IPv6 Matches arbitrary IPv6 BOOST_CHECK(ResolveSubNet("::/0").Match(ResolveIP("1:2:3:4:5:6:7:1234"))); // But not `::` or `0.0.0.0` because they are considered invalid addresses BOOST_CHECK(!ResolveSubNet("::/0").Match(ResolveIP("::"))); BOOST_CHECK(!ResolveSubNet("::/0").Match(ResolveIP("0.0.0.0"))); - BOOST_CHECK(ResolveSubNet("::/0").Match(ResolveIP("1.2.3.4"))); + // Addresses from one network (IPv4) don't belong to subnets of another network (IPv6) + BOOST_CHECK(!ResolveSubNet("::/0").Match(ResolveIP("1.2.3.4"))); // All-Matching IPv4 does not Match IPv6 BOOST_CHECK(!ResolveSubNet("0.0.0.0/0").Match(ResolveIP("1:2:3:4:5:6:7:1234"))); // Invalid subnets Match nothing (not even invalid addresses) @@ -383,7 +392,7 @@ BOOST_AUTO_TEST_CASE(netpermissions_test) BOOST_CHECK(!NetWhitebindPermissions::TryParse("bloom,forcerelay,oopsie@1.2.3.4:32", whitebindPermissions, error)); BOOST_CHECK(error.original.find("Invalid P2P permission") != std::string::npos); - // Check whitelist error + // Check netmask error BOOST_CHECK(!NetWhitelistPermissions::TryParse("bloom,forcerelay,noban@1.2.3.4:32", whitelistPermissions, error)); BOOST_CHECK(error.original.find("Invalid netmask specified in -whitelist") != std::string::npos); @@ -397,12 +406,14 @@ BOOST_AUTO_TEST_CASE(netpermissions_test) BOOST_CHECK(NetWhitelistPermissions::TryParse("bloom,forcerelay,noban,relay,mempool@1.2.3.4/32", whitelistPermissions, error)); const auto strings = NetPermissions::ToStrings(PF_ALL); - BOOST_CHECK_EQUAL(strings.size(), 5U); + BOOST_CHECK_EQUAL(strings.size(), 7U); BOOST_CHECK(std::find(strings.begin(), strings.end(), "bloomfilter") != strings.end()); BOOST_CHECK(std::find(strings.begin(), strings.end(), "forcerelay") != strings.end()); BOOST_CHECK(std::find(strings.begin(), strings.end(), "relay") != strings.end()); BOOST_CHECK(std::find(strings.begin(), strings.end(), "noban") != strings.end()); BOOST_CHECK(std::find(strings.begin(), strings.end(), "mempool") != strings.end()); + BOOST_CHECK(std::find(strings.begin(), strings.end(), "download") != strings.end()); + BOOST_CHECK(std::find(strings.begin(), strings.end(), "addr") != strings.end()); } BOOST_AUTO_TEST_CASE(netbase_dont_resolve_strings_with_embedded_nul_characters) diff --git a/src/test/policy_fee_tests.cpp b/src/test/policy_fee_tests.cpp new file mode 100644 index 0000000000..6d8872b11e --- /dev/null +++ b/src/test/policy_fee_tests.cpp @@ -0,0 +1,34 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <amount.h> +#include <policy/fees.h> + +#include <test/util/setup_common.h> + +#include <boost/test/unit_test.hpp> + +BOOST_FIXTURE_TEST_SUITE(policy_fee_tests, BasicTestingSetup) + +BOOST_AUTO_TEST_CASE(FeeRounder) +{ + FeeFilterRounder fee_rounder{CFeeRate{1000}}; + + // check that 1000 rounds to 974 or 1071 + std::set<CAmount> results; + while (results.size() < 2) { + results.emplace(fee_rounder.round(1000)); + } + BOOST_CHECK_EQUAL(*results.begin(), 974); + BOOST_CHECK_EQUAL(*++results.begin(), 1071); + + // check that negative amounts rounds to 0 + BOOST_CHECK_EQUAL(fee_rounder.round(-0), 0); + BOOST_CHECK_EQUAL(fee_rounder.round(-1), 0); + + // check that MAX_MONEY rounds to 9170997 + BOOST_CHECK_EQUAL(fee_rounder.round(MAX_MONEY), 9170997); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/script_standard_tests.cpp b/src/test/script_standard_tests.cpp index b185d3b4ac..87678af4d1 100644 --- a/src/test/script_standard_tests.cpp +++ b/src/test/script_standard_tests.cpp @@ -31,35 +31,35 @@ BOOST_AUTO_TEST_CASE(script_standard_Solver_success) CScript s; std::vector<std::vector<unsigned char> > solutions; - // TX_PUBKEY + // TxoutType::PUBKEY s.clear(); s << ToByteVector(pubkeys[0]) << OP_CHECKSIG; - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_PUBKEY); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::PUBKEY); BOOST_CHECK_EQUAL(solutions.size(), 1U); BOOST_CHECK(solutions[0] == ToByteVector(pubkeys[0])); - // TX_PUBKEYHASH + // TxoutType::PUBKEYHASH s.clear(); s << OP_DUP << OP_HASH160 << ToByteVector(pubkeys[0].GetID()) << OP_EQUALVERIFY << OP_CHECKSIG; - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_PUBKEYHASH); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::PUBKEYHASH); BOOST_CHECK_EQUAL(solutions.size(), 1U); BOOST_CHECK(solutions[0] == ToByteVector(pubkeys[0].GetID())); - // TX_SCRIPTHASH + // TxoutType::SCRIPTHASH CScript redeemScript(s); // initialize with leftover P2PKH script s.clear(); s << OP_HASH160 << ToByteVector(CScriptID(redeemScript)) << OP_EQUAL; - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_SCRIPTHASH); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::SCRIPTHASH); BOOST_CHECK_EQUAL(solutions.size(), 1U); BOOST_CHECK(solutions[0] == ToByteVector(CScriptID(redeemScript))); - // TX_MULTISIG + // TxoutType::MULTISIG s.clear(); s << OP_1 << ToByteVector(pubkeys[0]) << ToByteVector(pubkeys[1]) << OP_2 << OP_CHECKMULTISIG; - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_MULTISIG); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::MULTISIG); BOOST_CHECK_EQUAL(solutions.size(), 4U); BOOST_CHECK(solutions[0] == std::vector<unsigned char>({1})); BOOST_CHECK(solutions[1] == ToByteVector(pubkeys[0])); @@ -72,7 +72,7 @@ BOOST_AUTO_TEST_CASE(script_standard_Solver_success) ToByteVector(pubkeys[1]) << ToByteVector(pubkeys[2]) << OP_3 << OP_CHECKMULTISIG; - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_MULTISIG); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::MULTISIG); BOOST_CHECK_EQUAL(solutions.size(), 5U); BOOST_CHECK(solutions[0] == std::vector<unsigned char>({2})); BOOST_CHECK(solutions[1] == ToByteVector(pubkeys[0])); @@ -80,37 +80,37 @@ BOOST_AUTO_TEST_CASE(script_standard_Solver_success) BOOST_CHECK(solutions[3] == ToByteVector(pubkeys[2])); BOOST_CHECK(solutions[4] == std::vector<unsigned char>({3})); - // TX_NULL_DATA + // TxoutType::NULL_DATA s.clear(); s << OP_RETURN << std::vector<unsigned char>({0}) << std::vector<unsigned char>({75}) << std::vector<unsigned char>({255}); - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_NULL_DATA); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::NULL_DATA); BOOST_CHECK_EQUAL(solutions.size(), 0U); - // TX_WITNESS_V0_KEYHASH + // TxoutType::WITNESS_V0_KEYHASH s.clear(); s << OP_0 << ToByteVector(pubkeys[0].GetID()); - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_WITNESS_V0_KEYHASH); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::WITNESS_V0_KEYHASH); BOOST_CHECK_EQUAL(solutions.size(), 1U); BOOST_CHECK(solutions[0] == ToByteVector(pubkeys[0].GetID())); - // TX_WITNESS_V0_SCRIPTHASH + // TxoutType::WITNESS_V0_SCRIPTHASH uint256 scriptHash; CSHA256().Write(&redeemScript[0], redeemScript.size()) .Finalize(scriptHash.begin()); s.clear(); s << OP_0 << ToByteVector(scriptHash); - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_WITNESS_V0_SCRIPTHASH); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::WITNESS_V0_SCRIPTHASH); BOOST_CHECK_EQUAL(solutions.size(), 1U); BOOST_CHECK(solutions[0] == ToByteVector(scriptHash)); - // TX_NONSTANDARD + // TxoutType::NONSTANDARD s.clear(); s << OP_9 << OP_ADD << OP_11 << OP_EQUAL; - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_NONSTANDARD); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::NONSTANDARD); } BOOST_AUTO_TEST_CASE(script_standard_Solver_failure) @@ -123,50 +123,50 @@ BOOST_AUTO_TEST_CASE(script_standard_Solver_failure) CScript s; std::vector<std::vector<unsigned char> > solutions; - // TX_PUBKEY with incorrectly sized pubkey + // TxoutType::PUBKEY with incorrectly sized pubkey s.clear(); s << std::vector<unsigned char>(30, 0x01) << OP_CHECKSIG; - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_NONSTANDARD); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::NONSTANDARD); - // TX_PUBKEYHASH with incorrectly sized key hash + // TxoutType::PUBKEYHASH with incorrectly sized key hash s.clear(); s << OP_DUP << OP_HASH160 << ToByteVector(pubkey) << OP_EQUALVERIFY << OP_CHECKSIG; - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_NONSTANDARD); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::NONSTANDARD); - // TX_SCRIPTHASH with incorrectly sized script hash + // TxoutType::SCRIPTHASH with incorrectly sized script hash s.clear(); s << OP_HASH160 << std::vector<unsigned char>(21, 0x01) << OP_EQUAL; - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_NONSTANDARD); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::NONSTANDARD); - // TX_MULTISIG 0/2 + // TxoutType::MULTISIG 0/2 s.clear(); s << OP_0 << ToByteVector(pubkey) << OP_1 << OP_CHECKMULTISIG; - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_NONSTANDARD); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::NONSTANDARD); - // TX_MULTISIG 2/1 + // TxoutType::MULTISIG 2/1 s.clear(); s << OP_2 << ToByteVector(pubkey) << OP_1 << OP_CHECKMULTISIG; - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_NONSTANDARD); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::NONSTANDARD); - // TX_MULTISIG n = 2 with 1 pubkey + // TxoutType::MULTISIG n = 2 with 1 pubkey s.clear(); s << OP_1 << ToByteVector(pubkey) << OP_2 << OP_CHECKMULTISIG; - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_NONSTANDARD); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::NONSTANDARD); - // TX_MULTISIG n = 1 with 0 pubkeys + // TxoutType::MULTISIG n = 1 with 0 pubkeys s.clear(); s << OP_1 << OP_1 << OP_CHECKMULTISIG; - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_NONSTANDARD); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::NONSTANDARD); - // TX_NULL_DATA with other opcodes + // TxoutType::NULL_DATA with other opcodes s.clear(); s << OP_RETURN << std::vector<unsigned char>({75}) << OP_ADD; - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_NONSTANDARD); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::NONSTANDARD); - // TX_WITNESS with incorrect program size + // TxoutType::WITNESS_UNKNOWN with incorrect program size s.clear(); s << OP_0 << std::vector<unsigned char>(19, 0x01); - BOOST_CHECK_EQUAL(Solver(s, solutions), TX_NONSTANDARD); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::NONSTANDARD); } BOOST_AUTO_TEST_CASE(script_standard_ExtractDestination) @@ -179,21 +179,21 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestination) CScript s; CTxDestination address; - // TX_PUBKEY + // TxoutType::PUBKEY s.clear(); s << ToByteVector(pubkey) << OP_CHECKSIG; BOOST_CHECK(ExtractDestination(s, address)); BOOST_CHECK(boost::get<PKHash>(&address) && *boost::get<PKHash>(&address) == PKHash(pubkey)); - // TX_PUBKEYHASH + // TxoutType::PUBKEYHASH s.clear(); s << OP_DUP << OP_HASH160 << ToByteVector(pubkey.GetID()) << OP_EQUALVERIFY << OP_CHECKSIG; BOOST_CHECK(ExtractDestination(s, address)); BOOST_CHECK(boost::get<PKHash>(&address) && *boost::get<PKHash>(&address) == PKHash(pubkey)); - // TX_SCRIPTHASH + // TxoutType::SCRIPTHASH CScript redeemScript(s); // initialize with leftover P2PKH script s.clear(); s << OP_HASH160 << ToByteVector(CScriptID(redeemScript)) << OP_EQUAL; @@ -201,25 +201,25 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestination) BOOST_CHECK(boost::get<ScriptHash>(&address) && *boost::get<ScriptHash>(&address) == ScriptHash(redeemScript)); - // TX_MULTISIG + // TxoutType::MULTISIG s.clear(); s << OP_1 << ToByteVector(pubkey) << OP_1 << OP_CHECKMULTISIG; BOOST_CHECK(!ExtractDestination(s, address)); - // TX_NULL_DATA + // TxoutType::NULL_DATA s.clear(); s << OP_RETURN << std::vector<unsigned char>({75}); BOOST_CHECK(!ExtractDestination(s, address)); - // TX_WITNESS_V0_KEYHASH + // TxoutType::WITNESS_V0_KEYHASH s.clear(); s << OP_0 << ToByteVector(pubkey.GetID()); BOOST_CHECK(ExtractDestination(s, address)); WitnessV0KeyHash keyhash; - CHash160().Write(pubkey.begin(), pubkey.size()).Finalize(keyhash.begin()); + CHash160().Write(pubkey).Finalize(keyhash); BOOST_CHECK(boost::get<WitnessV0KeyHash>(&address) && *boost::get<WitnessV0KeyHash>(&address) == keyhash); - // TX_WITNESS_V0_SCRIPTHASH + // TxoutType::WITNESS_V0_SCRIPTHASH s.clear(); WitnessV0ScriptHash scripthash; CSHA256().Write(redeemScript.data(), redeemScript.size()).Finalize(scripthash.begin()); @@ -227,7 +227,7 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestination) BOOST_CHECK(ExtractDestination(s, address)); BOOST_CHECK(boost::get<WitnessV0ScriptHash>(&address) && *boost::get<WitnessV0ScriptHash>(&address) == scripthash); - // TX_WITNESS with unknown version + // TxoutType::WITNESS_UNKNOWN with unknown version s.clear(); s << OP_1 << ToByteVector(pubkey); BOOST_CHECK(ExtractDestination(s, address)); @@ -248,49 +248,49 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestinations) } CScript s; - txnouttype whichType; + TxoutType whichType; std::vector<CTxDestination> addresses; int nRequired; - // TX_PUBKEY + // TxoutType::PUBKEY s.clear(); s << ToByteVector(pubkeys[0]) << OP_CHECKSIG; BOOST_CHECK(ExtractDestinations(s, whichType, addresses, nRequired)); - BOOST_CHECK_EQUAL(whichType, TX_PUBKEY); + BOOST_CHECK_EQUAL(whichType, TxoutType::PUBKEY); BOOST_CHECK_EQUAL(addresses.size(), 1U); BOOST_CHECK_EQUAL(nRequired, 1); BOOST_CHECK(boost::get<PKHash>(&addresses[0]) && *boost::get<PKHash>(&addresses[0]) == PKHash(pubkeys[0])); - // TX_PUBKEYHASH + // TxoutType::PUBKEYHASH s.clear(); s << OP_DUP << OP_HASH160 << ToByteVector(pubkeys[0].GetID()) << OP_EQUALVERIFY << OP_CHECKSIG; BOOST_CHECK(ExtractDestinations(s, whichType, addresses, nRequired)); - BOOST_CHECK_EQUAL(whichType, TX_PUBKEYHASH); + BOOST_CHECK_EQUAL(whichType, TxoutType::PUBKEYHASH); BOOST_CHECK_EQUAL(addresses.size(), 1U); BOOST_CHECK_EQUAL(nRequired, 1); BOOST_CHECK(boost::get<PKHash>(&addresses[0]) && *boost::get<PKHash>(&addresses[0]) == PKHash(pubkeys[0])); - // TX_SCRIPTHASH + // TxoutType::SCRIPTHASH CScript redeemScript(s); // initialize with leftover P2PKH script s.clear(); s << OP_HASH160 << ToByteVector(CScriptID(redeemScript)) << OP_EQUAL; BOOST_CHECK(ExtractDestinations(s, whichType, addresses, nRequired)); - BOOST_CHECK_EQUAL(whichType, TX_SCRIPTHASH); + BOOST_CHECK_EQUAL(whichType, TxoutType::SCRIPTHASH); BOOST_CHECK_EQUAL(addresses.size(), 1U); BOOST_CHECK_EQUAL(nRequired, 1); BOOST_CHECK(boost::get<ScriptHash>(&addresses[0]) && *boost::get<ScriptHash>(&addresses[0]) == ScriptHash(redeemScript)); - // TX_MULTISIG + // TxoutType::MULTISIG s.clear(); s << OP_2 << ToByteVector(pubkeys[0]) << ToByteVector(pubkeys[1]) << OP_2 << OP_CHECKMULTISIG; BOOST_CHECK(ExtractDestinations(s, whichType, addresses, nRequired)); - BOOST_CHECK_EQUAL(whichType, TX_MULTISIG); + BOOST_CHECK_EQUAL(whichType, TxoutType::MULTISIG); BOOST_CHECK_EQUAL(addresses.size(), 2U); BOOST_CHECK_EQUAL(nRequired, 2); BOOST_CHECK(boost::get<PKHash>(&addresses[0]) && @@ -298,7 +298,7 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestinations) BOOST_CHECK(boost::get<PKHash>(&addresses[1]) && *boost::get<PKHash>(&addresses[1]) == PKHash(pubkeys[1])); - // TX_NULL_DATA + // TxoutType::NULL_DATA s.clear(); s << OP_RETURN << std::vector<unsigned char>({75}); BOOST_CHECK(!ExtractDestinations(s, whichType, addresses, nRequired)); diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index cb3ae290d1..0830743d61 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -282,7 +282,7 @@ public: CScript scriptPubKey = script; if (wm == WitnessMode::PKH) { uint160 hash; - CHash160().Write(&script[1], script.size() - 1).Finalize(hash.begin()); + CHash160().Write(MakeSpan(script).subspan(1)).Finalize(hash); script = CScript() << OP_DUP << OP_HASH160 << ToByteVector(hash) << OP_EQUALVERIFY << OP_CHECKSIG; scriptPubKey = CScript() << witnessversion << ToByteVector(hash); } else if (wm == WitnessMode::SH) { diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index c2328f931c..f625b67c2a 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -145,7 +145,7 @@ BOOST_AUTO_TEST_CASE(floats) for (int i = 0; i < 1000; i++) { ss << float(i); } - BOOST_CHECK(Hash(ss.begin(), ss.end()) == uint256S("8e8b4cf3e4df8b332057e3e23af42ebc663b61e0495d5e7e32d85099d7f3fe0c")); + BOOST_CHECK(Hash(ss) == uint256S("8e8b4cf3e4df8b332057e3e23af42ebc663b61e0495d5e7e32d85099d7f3fe0c")); // decode for (int i = 0; i < 1000; i++) { @@ -162,7 +162,7 @@ BOOST_AUTO_TEST_CASE(doubles) for (int i = 0; i < 1000; i++) { ss << double(i); } - BOOST_CHECK(Hash(ss.begin(), ss.end()) == uint256S("43d0c82591953c4eafe114590d392676a01585d25b25d433557f0d7878b23f96")); + BOOST_CHECK(Hash(ss) == uint256S("43d0c82591953c4eafe114590d392676a01585d25b25d433557f0d7878b23f96")); // decode for (int i = 0; i < 1000; i++) { diff --git a/src/test/settings_tests.cpp b/src/test/settings_tests.cpp index fcd831ccda..548fd020a6 100644 --- a/src/test/settings_tests.cpp +++ b/src/test/settings_tests.cpp @@ -12,10 +12,90 @@ #include <univalue.h> #include <util/strencodings.h> #include <util/string.h> +#include <util/system.h> #include <vector> +inline bool operator==(const util::SettingsValue& a, const util::SettingsValue& b) +{ + return a.write() == b.write(); +} + +inline std::ostream& operator<<(std::ostream& os, const util::SettingsValue& value) +{ + os << value.write(); + return os; +} + +inline std::ostream& operator<<(std::ostream& os, const std::pair<std::string, util::SettingsValue>& kv) +{ + util::SettingsValue out(util::SettingsValue::VOBJ); + out.__pushKV(kv.first, kv.second); + os << out.write(); + return os; +} + +inline void WriteText(const fs::path& path, const std::string& text) +{ + fsbridge::ofstream file; + file.open(path); + file << text; +} + BOOST_FIXTURE_TEST_SUITE(settings_tests, BasicTestingSetup) +BOOST_AUTO_TEST_CASE(ReadWrite) +{ + fs::path path = GetDataDir() / "settings.json"; + + WriteText(path, R"({ + "string": "string", + "num": 5, + "bool": true, + "null": null + })"); + + std::map<std::string, util::SettingsValue> expected{ + {"string", "string"}, + {"num", 5}, + {"bool", true}, + {"null", {}}, + }; + + // Check file read. + std::map<std::string, util::SettingsValue> values; + std::vector<std::string> errors; + BOOST_CHECK(util::ReadSettings(path, values, errors)); + BOOST_CHECK_EQUAL_COLLECTIONS(values.begin(), values.end(), expected.begin(), expected.end()); + BOOST_CHECK(errors.empty()); + + // Check no errors if file doesn't exist. + fs::remove(path); + BOOST_CHECK(util::ReadSettings(path, values, errors)); + BOOST_CHECK(values.empty()); + BOOST_CHECK(errors.empty()); + + // Check duplicate keys not allowed + WriteText(path, R"({ + "dupe": "string", + "dupe": "dupe" + })"); + BOOST_CHECK(!util::ReadSettings(path, values, errors)); + std::vector<std::string> dup_keys = {strprintf("Found duplicate key dupe in settings file %s", path.string())}; + BOOST_CHECK_EQUAL_COLLECTIONS(errors.begin(), errors.end(), dup_keys.begin(), dup_keys.end()); + + // Check non-kv json files not allowed + WriteText(path, R"("non-kv")"); + BOOST_CHECK(!util::ReadSettings(path, values, errors)); + std::vector<std::string> non_kv = {strprintf("Found non-object value \"non-kv\" in settings file %s", path.string())}; + BOOST_CHECK_EQUAL_COLLECTIONS(errors.begin(), errors.end(), non_kv.begin(), non_kv.end()); + + // Check invalid json not allowed + WriteText(path, R"(invalid json)"); + BOOST_CHECK(!util::ReadSettings(path, values, errors)); + std::vector<std::string> fail_parse = {strprintf("Unable to parse settings file %s", path.string())}; + BOOST_CHECK_EQUAL_COLLECTIONS(errors.begin(), errors.end(), fail_parse.begin(), fail_parse.end()); +} + //! Check settings struct contents against expected json strings. static void CheckValues(const util::Settings& settings, const std::string& single_val, const std::string& list_val) { @@ -148,7 +228,7 @@ BOOST_FIXTURE_TEST_CASE(Merge, MergeTestingSetup) if (OnlyHasDefaultSectionSetting(settings, network, name)) desc += " ignored"; desc += "\n"; - out_sha.Write((const unsigned char*)desc.data(), desc.size()); + out_sha.Write(MakeUCharSpan(desc)); if (out_file) { BOOST_REQUIRE(fwrite(desc.data(), 1, desc.size(), out_file) == desc.size()); } @@ -161,7 +241,7 @@ BOOST_FIXTURE_TEST_CASE(Merge, MergeTestingSetup) unsigned char out_sha_bytes[CSHA256::OUTPUT_SIZE]; out_sha.Finalize(out_sha_bytes); - std::string out_sha_hex = HexStr(std::begin(out_sha_bytes), std::end(out_sha_bytes)); + std::string out_sha_hex = HexStr(out_sha_bytes); // If check below fails, should manually dump the results with: // diff --git a/src/test/sync_tests.cpp b/src/test/sync_tests.cpp index 5c6c2ee38e..19029ebd3c 100644 --- a/src/test/sync_tests.cpp +++ b/src/test/sync_tests.cpp @@ -14,13 +14,15 @@ void TestPotentialDeadLockDetected(MutexType& mutex1, MutexType& mutex2) { LOCK2(mutex1, mutex2); } + BOOST_CHECK(LockStackEmpty()); bool error_thrown = false; try { LOCK2(mutex2, mutex1); } catch (const std::logic_error& e) { - BOOST_CHECK_EQUAL(e.what(), "potential deadlock detected"); + BOOST_CHECK_EQUAL(e.what(), "potential deadlock detected: mutex1 -> mutex2 -> mutex1"); error_thrown = true; } + BOOST_CHECK(LockStackEmpty()); #ifdef DEBUG_LOCKORDER BOOST_CHECK(error_thrown); #else @@ -40,9 +42,13 @@ BOOST_AUTO_TEST_CASE(potential_deadlock_detected) RecursiveMutex rmutex1, rmutex2; TestPotentialDeadLockDetected(rmutex1, rmutex2); + // The second test ensures that lock tracking data have not been broken by exception. + TestPotentialDeadLockDetected(rmutex1, rmutex2); Mutex mutex1, mutex2; TestPotentialDeadLockDetected(mutex1, mutex2); + // The second test ensures that lock tracking data have not been broken by exception. + TestPotentialDeadLockDetected(mutex1, mutex2); #ifdef DEBUG_LOCKORDER g_debug_lockorder_abort = prev; diff --git a/src/test/system_tests.cpp b/src/test/system_tests.cpp new file mode 100644 index 0000000000..a55145c738 --- /dev/null +++ b/src/test/system_tests.cpp @@ -0,0 +1,95 @@ +// Copyright (c) 2019 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +// +#include <test/util/setup_common.h> +#include <util/system.h> +#include <univalue.h> + +#ifdef HAVE_BOOST_PROCESS +#include <boost/process.hpp> +#endif // HAVE_BOOST_PROCESS + +#include <boost/test/unit_test.hpp> + +BOOST_FIXTURE_TEST_SUITE(system_tests, BasicTestingSetup) + +// At least one test is required (in case HAVE_BOOST_PROCESS is not defined). +// Workaround for https://github.com/bitcoin/bitcoin/issues/19128 +BOOST_AUTO_TEST_CASE(dummy) +{ + BOOST_CHECK(true); +} + +#ifdef HAVE_BOOST_PROCESS + +bool checkMessage(const std::runtime_error& ex) +{ + // On Linux & Mac: "No such file or directory" + // On Windows: "The system cannot find the file specified." + const std::string what(ex.what()); + BOOST_CHECK(what.find("file") != std::string::npos); + return true; +} + +bool checkMessageFalse(const std::runtime_error& ex) +{ + BOOST_CHECK_EQUAL(ex.what(), std::string("RunCommandParseJSON error: process(false) returned 1: \n")); + return true; +} + +bool checkMessageStdErr(const std::runtime_error& ex) +{ + const std::string what(ex.what()); + BOOST_CHECK(what.find("RunCommandParseJSON error:") != std::string::npos); + return checkMessage(ex); +} + +BOOST_AUTO_TEST_CASE(run_command) +{ + { + const UniValue result = RunCommandParseJSON(""); + BOOST_CHECK(result.isNull()); + } + { +#ifdef WIN32 + // Windows requires single quotes to prevent escaping double quotes from the JSON... + const UniValue result = RunCommandParseJSON("echo '{\"success\": true}'"); +#else + // ... but Linux and macOS echo a single quote if it's used + const UniValue result = RunCommandParseJSON("echo \"{\"success\": true}\""); +#endif + BOOST_CHECK(result.isObject()); + const UniValue& success = find_value(result, "success"); + BOOST_CHECK(!success.isNull()); + BOOST_CHECK_EQUAL(success.getBool(), true); + } + { + // An invalid command is handled by Boost + BOOST_CHECK_EXCEPTION(RunCommandParseJSON("invalid_command"), boost::process::process_error, checkMessage); // Command failed + } + { + // Return non-zero exit code, no output to stderr + BOOST_CHECK_EXCEPTION(RunCommandParseJSON("false"), std::runtime_error, checkMessageFalse); + } + { + // Return non-zero exit code, with error message for stderr + BOOST_CHECK_EXCEPTION(RunCommandParseJSON("ls nosuchfile"), std::runtime_error, checkMessageStdErr); + } + { + BOOST_REQUIRE_THROW(RunCommandParseJSON("echo \"{\""), std::runtime_error); // Unable to parse JSON + } + // Test std::in, except for Windows +#ifndef WIN32 + { + const UniValue result = RunCommandParseJSON("cat", "{\"success\": true}"); + BOOST_CHECK(result.isObject()); + const UniValue& success = find_value(result, "success"); + BOOST_CHECK(!success.isNull()); + BOOST_CHECK_EQUAL(success.getBool(), true); + } +#endif +} +#endif // HAVE_BOOST_PROCESS + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index ddbc68f8e2..4bf6e734ce 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -716,12 +716,12 @@ BOOST_AUTO_TEST_CASE(test_IsStandard) BOOST_CHECK(!IsStandardTx(CTransaction(t), reason)); BOOST_CHECK_EQUAL(reason, "scriptpubkey"); - // MAX_OP_RETURN_RELAY-byte TX_NULL_DATA (standard) + // MAX_OP_RETURN_RELAY-byte TxoutType::NULL_DATA (standard) t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef3804678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38"); BOOST_CHECK_EQUAL(MAX_OP_RETURN_RELAY, t.vout[0].scriptPubKey.size()); BOOST_CHECK(IsStandardTx(CTransaction(t), reason)); - // MAX_OP_RETURN_RELAY+1-byte TX_NULL_DATA (non-standard) + // MAX_OP_RETURN_RELAY+1-byte TxoutType::NULL_DATA (non-standard) t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef3804678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef3800"); BOOST_CHECK_EQUAL(MAX_OP_RETURN_RELAY + 1, t.vout[0].scriptPubKey.size()); reason.clear(); @@ -745,12 +745,12 @@ BOOST_AUTO_TEST_CASE(test_IsStandard) BOOST_CHECK(!IsStandardTx(CTransaction(t), reason)); BOOST_CHECK_EQUAL(reason, "scriptpubkey"); - // TX_NULL_DATA w/o PUSHDATA + // TxoutType::NULL_DATA w/o PUSHDATA t.vout.resize(1); t.vout[0].scriptPubKey = CScript() << OP_RETURN; BOOST_CHECK(IsStandardTx(CTransaction(t), reason)); - // Only one TX_NULL_DATA permitted in all cases + // Only one TxoutType::NULL_DATA permitted in all cases t.vout.resize(2); t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38"); t.vout[1].scriptPubKey = CScript() << OP_RETURN << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38"); diff --git a/src/test/util/mining.cpp b/src/test/util/mining.cpp index dac7f1a07b..74536ae74c 100644 --- a/src/test/util/mining.cpp +++ b/src/test/util/mining.cpp @@ -11,6 +11,7 @@ #include <node/context.h> #include <pow.h> #include <script/standard.h> +#include <util/check.h> #include <validation.h> CTxIn generatetoaddress(const NodeContext& node, const std::string& address) @@ -31,7 +32,7 @@ CTxIn MineBlock(const NodeContext& node, const CScript& coinbase_scriptPubKey) assert(block->nNonce); } - bool processed{EnsureChainman(node).ProcessNewBlock(Params(), block, true, nullptr)}; + bool processed{Assert(node.chainman)->ProcessNewBlock(Params(), block, true, nullptr)}; assert(processed); return CTxIn{block->vtx[0]->GetHash(), 0}; @@ -39,9 +40,8 @@ CTxIn MineBlock(const NodeContext& node, const CScript& coinbase_scriptPubKey) std::shared_ptr<CBlock> PrepareBlock(const NodeContext& node, const CScript& coinbase_scriptPubKey) { - assert(node.mempool); auto block = std::make_shared<CBlock>( - BlockAssembler{*node.mempool, Params()} + BlockAssembler{*Assert(node.mempool), Params()} .CreateNewBlock(coinbase_scriptPubKey) ->block); diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 709d357b8a..b2ae1cb845 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -11,6 +11,7 @@ #include <consensus/validation.h> #include <crypto/sha256.h> #include <init.h> +#include <interfaces/chain.h> #include <miner.h> #include <net.h> #include <net_processing.h> @@ -32,6 +33,7 @@ #include <util/vector.h> #include <validation.h> #include <validationinterface.h> +#include <walletinitinterface.h> #include <functional> @@ -75,11 +77,13 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve "dummy", "-printtoconsole=0", "-logtimemicros", + "-logthreadnames", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", }, extra_args); + util::ThreadRename("test"); fs::create_directories(m_path_root); gArgs.ForceSetArg("-datadir", m_path_root.string()); ClearDatadirCache(); @@ -102,6 +106,8 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve SetupNetworking(); InitSignatureCache(); InitScriptExecutionCache(); + m_node.chain = interfaces::MakeChain(m_node); + g_wallet_init_interface.Construct(m_node); fCheckBlockIndex = true; static bool noui_connected = false; if (!noui_connected) { @@ -130,7 +136,7 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const // We have to run a scheduler thread to prevent ActivateBestChain // from blocking due to queue overrun. - threadGroup.create_thread([&]{ m_node.scheduler->serviceQueue(); }); + threadGroup.create_thread([&] { TraceThread("scheduler", [&] { m_node.scheduler->serviceQueue(); }); }); GetMainSignals().RegisterBackgroundSignalScheduler(*m_node.scheduler); pblocktree.reset(new CBlockTreeDB(1 << 20, true)); @@ -140,7 +146,7 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const ::ChainstateActive().InitCoinsDB( /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false); assert(!::ChainstateActive().CanFlushToDisk()); - ::ChainstateActive().InitCoinsCache(); + ::ChainstateActive().InitCoinsCache(1 << 23); assert(::ChainstateActive().CanFlushToDisk()); if (!LoadGenesisBlock(chainparams)) { throw std::runtime_error("LoadGenesisBlock failed."); @@ -180,9 +186,9 @@ TestingSetup::~TestingSetup() m_node.connman.reset(); m_node.banman.reset(); m_node.args = nullptr; + UnloadBlockIndex(m_node.mempool); m_node.mempool = nullptr; m_node.scheduler.reset(); - UnloadBlockIndex(); m_node.chainman->Reset(); m_node.chainman = nullptr; pblocktree.reset(); @@ -229,7 +235,7 @@ CBlock TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransa while (!CheckProofOfWork(block.GetHash(), block.nBits, chainparams.GetConsensus())) ++block.nNonce; std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(block); - EnsureChainman(m_node).ProcessNewBlock(chainparams, shared_pblock, true, nullptr); + Assert(m_node.chainman)->ProcessNewBlock(chainparams, shared_pblock, true, nullptr); CBlock result = block; return result; diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h index e480782c12..78b279e42a 100644 --- a/src/test/util/setup_common.h +++ b/src/test/util/setup_common.h @@ -12,6 +12,7 @@ #include <pubkey.h> #include <random.h> #include <txmempool.h> +#include <util/check.h> #include <util/string.h> #include <type_traits> diff --git a/src/test/util/transaction_utils.h b/src/test/util/transaction_utils.h index 1beddd334b..6f2faeec6c 100644 --- a/src/test/util/transaction_utils.h +++ b/src/test/util/transaction_utils.h @@ -22,8 +22,8 @@ CMutableTransaction BuildCreditingTransaction(const CScript& scriptPubKey, int n CMutableTransaction BuildSpendingTransaction(const CScript& scriptSig, const CScriptWitness& scriptWitness, const CTransaction& txCredit); // Helper: create two dummy transactions, each with two outputs. -// The first has nValues[0] and nValues[1] outputs paid to a TX_PUBKEY, -// the second nValues[2] and nValues[3] outputs paid to a TX_PUBKEYHASH. +// The first has nValues[0] and nValues[1] outputs paid to a TxoutType::PUBKEY, +// the second nValues[2] and nValues[3] outputs paid to a TxoutType::PUBKEYHASH. std::vector<CMutableTransaction> SetupDummyInputs(FillableSigningProvider& keystoreRet, CCoinsViewCache& coinsRet, const std::array<CAmount,4>& nValues); #endif // BITCOIN_TEST_UTIL_TRANSACTION_UTILS_H diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index 257328974b..b49370c967 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -9,6 +9,7 @@ #include <key.h> // For CKey #include <optional.h> #include <sync.h> +#include <test/util/logging.h> #include <test/util/setup_common.h> #include <test/util/str.h> #include <uint256.h> @@ -41,6 +42,16 @@ namespace BCLog { BOOST_FIXTURE_TEST_SUITE(util_tests, BasicTestingSetup) +BOOST_AUTO_TEST_CASE(util_check) +{ + // Check that Assert can forward + const std::unique_ptr<int> p_two = Assert(MakeUnique<int>(2)); + // Check that Assert works on lvalues and rvalues + const int two = *Assert(p_two); + Assert(two == 2); + Assert(true); +} + BOOST_AUTO_TEST_CASE(util_criticalsection) { RecursiveMutex cs; @@ -94,47 +105,24 @@ BOOST_AUTO_TEST_CASE(util_ParseHex) BOOST_AUTO_TEST_CASE(util_HexStr) { BOOST_CHECK_EQUAL( - HexStr(ParseHex_expected, ParseHex_expected + sizeof(ParseHex_expected)), + HexStr(ParseHex_expected), "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f"); BOOST_CHECK_EQUAL( - HexStr(ParseHex_expected + sizeof(ParseHex_expected), - ParseHex_expected + sizeof(ParseHex_expected)), + HexStr(Span<const unsigned char>( + ParseHex_expected + sizeof(ParseHex_expected), + ParseHex_expected + sizeof(ParseHex_expected))), ""); BOOST_CHECK_EQUAL( - HexStr(ParseHex_expected, ParseHex_expected), + HexStr(Span<const unsigned char>(ParseHex_expected, ParseHex_expected)), ""); std::vector<unsigned char> ParseHex_vec(ParseHex_expected, ParseHex_expected + 5); BOOST_CHECK_EQUAL( - HexStr(ParseHex_vec.rbegin(), ParseHex_vec.rend()), - "b0fd8a6704" - ); - - BOOST_CHECK_EQUAL( - HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected), - std::reverse_iterator<const uint8_t *>(ParseHex_expected)), - "" - ); - - BOOST_CHECK_EQUAL( - HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected + 1), - std::reverse_iterator<const uint8_t *>(ParseHex_expected)), - "04" - ); - - BOOST_CHECK_EQUAL( - HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected + 5), - std::reverse_iterator<const uint8_t *>(ParseHex_expected)), - "b0fd8a6704" - ); - - BOOST_CHECK_EQUAL( - HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected + 65), - std::reverse_iterator<const uint8_t *>(ParseHex_expected)), - "5f1df16b2b704c8a578d0bbaf74d385cde12c11ee50455f3c438ef4c3fbcf649b6de611feae06279a60939e028a8d65c10b73071a6f16719274855feb0fd8a6704" + HexStr(ParseHex_vec), + "04678afdb0" ); } @@ -998,7 +986,7 @@ BOOST_FIXTURE_TEST_CASE(util_ArgsMerge, ArgsMergeTestingSetup) desc += "\n"; - out_sha.Write((const unsigned char*)desc.data(), desc.size()); + out_sha.Write(MakeUCharSpan(desc)); if (out_file) { BOOST_REQUIRE(fwrite(desc.data(), 1, desc.size(), out_file) == desc.size()); } @@ -1011,7 +999,7 @@ BOOST_FIXTURE_TEST_CASE(util_ArgsMerge, ArgsMergeTestingSetup) unsigned char out_sha_bytes[CSHA256::OUTPUT_SIZE]; out_sha.Finalize(out_sha_bytes); - std::string out_sha_hex = HexStr(std::begin(out_sha_bytes), std::end(out_sha_bytes)); + std::string out_sha_hex = HexStr(out_sha_bytes); // If check below fails, should manually dump the results with: // @@ -1101,7 +1089,7 @@ BOOST_FIXTURE_TEST_CASE(util_ChainMerge, ChainMergeTestingSetup) } desc += "\n"; - out_sha.Write((const unsigned char*)desc.data(), desc.size()); + out_sha.Write(MakeUCharSpan(desc)); if (out_file) { BOOST_REQUIRE(fwrite(desc.data(), 1, desc.size(), out_file) == desc.size()); } @@ -1114,7 +1102,7 @@ BOOST_FIXTURE_TEST_CASE(util_ChainMerge, ChainMergeTestingSetup) unsigned char out_sha_bytes[CSHA256::OUTPUT_SIZE]; out_sha.Finalize(out_sha_bytes); - std::string out_sha_hex = HexStr(std::begin(out_sha_bytes), std::end(out_sha_bytes)); + std::string out_sha_hex = HexStr(out_sha_bytes); // If check below fails, should manually dump the results with: // @@ -1128,6 +1116,28 @@ BOOST_FIXTURE_TEST_CASE(util_ChainMerge, ChainMergeTestingSetup) BOOST_CHECK_EQUAL(out_sha_hex, "f0b3a3c29869edc765d579c928f7f1690a71fbb673b49ccf39cbc4de18156a0d"); } +BOOST_AUTO_TEST_CASE(util_ReadWriteSettings) +{ + // Test writing setting. + TestArgsManager args1; + args1.LockSettings([&](util::Settings& settings) { settings.rw_settings["name"] = "value"; }); + args1.WriteSettingsFile(); + + // Test reading setting. + TestArgsManager args2; + args2.ReadSettingsFile(); + args2.LockSettings([&](util::Settings& settings) { BOOST_CHECK_EQUAL(settings.rw_settings["name"].get_str(), "value"); }); + + // Test error logging, and remove previously written setting. + { + ASSERT_DEBUG_LOG("Failed renaming settings file"); + fs::remove(GetDataDir() / "settings.json"); + fs::create_directory(GetDataDir() / "settings.json"); + args2.WriteSettingsFile(); + fs::remove(GetDataDir() / "settings.json"); + } +} + BOOST_AUTO_TEST_CASE(util_FormatMoney) { BOOST_CHECK_EQUAL(FormatMoney(0), "0.00"); @@ -2153,8 +2163,8 @@ BOOST_AUTO_TEST_CASE(message_hash) std::string(1, (char)unsigned_tx.length()) + unsigned_tx; - const uint256 signature_hash = Hash(unsigned_tx.begin(), unsigned_tx.end()); - const uint256 message_hash1 = Hash(prefixed_message.begin(), prefixed_message.end()); + const uint256 signature_hash = Hash(unsigned_tx); + const uint256 message_hash1 = Hash(prefixed_message); const uint256 message_hash2 = MessageHash(unsigned_tx); BOOST_CHECK_EQUAL(message_hash1, message_hash2); diff --git a/src/test/util_threadnames_tests.cpp b/src/test/util_threadnames_tests.cpp index 4dcc080b2d..f3f9fb2bff 100644 --- a/src/test/util_threadnames_tests.cpp +++ b/src/test/util_threadnames_tests.cpp @@ -53,8 +53,6 @@ std::set<std::string> RenameEnMasse(int num_threads) */ BOOST_AUTO_TEST_CASE(util_threadnames_test_rename_threaded) { - BOOST_CHECK_EQUAL(util::ThreadGetInternalName(), ""); - #if !defined(HAVE_THREAD_LOCAL) // This test doesn't apply to platforms where we don't have thread_local. return; diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp index 45e0c5484e..8e85b7df3e 100644 --- a/src/test/validation_block_tests.cpp +++ b/src/test/validation_block_tests.cpp @@ -163,10 +163,10 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering) std::transform(blocks.begin(), blocks.end(), std::back_inserter(headers), [](std::shared_ptr<const CBlock> b) { return b->GetBlockHeader(); }); // Process all the headers so we understand the toplogy of the chain - BOOST_CHECK(EnsureChainman(m_node).ProcessNewBlockHeaders(headers, state, Params())); + BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlockHeaders(headers, state, Params())); // Connect the genesis block and drain any outstanding events - BOOST_CHECK(EnsureChainman(m_node).ProcessNewBlock(Params(), std::make_shared<CBlock>(Params().GenesisBlock()), true, &ignored)); + BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlock(Params(), std::make_shared<CBlock>(Params().GenesisBlock()), true, &ignored)); SyncWithValidationInterfaceQueue(); // subscribe to events (this subscriber will validate event ordering) @@ -188,13 +188,13 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering) FastRandomContext insecure; for (int i = 0; i < 1000; i++) { auto block = blocks[insecure.randrange(blocks.size() - 1)]; - EnsureChainman(m_node).ProcessNewBlock(Params(), block, true, &ignored); + Assert(m_node.chainman)->ProcessNewBlock(Params(), block, true, &ignored); } // to make sure that eventually we process the full chain - do it here for (auto block : blocks) { if (block->vtx.size() == 1) { - bool processed = EnsureChainman(m_node).ProcessNewBlock(Params(), block, true, &ignored); + bool processed = Assert(m_node.chainman)->ProcessNewBlock(Params(), block, true, &ignored); assert(processed); } } @@ -233,7 +233,7 @@ BOOST_AUTO_TEST_CASE(mempool_locks_reorg) { bool ignored; auto ProcessBlock = [&](std::shared_ptr<const CBlock> block) -> bool { - return EnsureChainman(m_node).ProcessNewBlock(Params(), block, /* fForceProcessing */ true, /* fNewBlock */ &ignored); + return Assert(m_node.chainman)->ProcessNewBlock(Params(), block, /* fForceProcessing */ true, /* fNewBlock */ &ignored); }; // Process all mined blocks diff --git a/src/test/validation_chainstate_tests.cpp b/src/test/validation_chainstate_tests.cpp new file mode 100644 index 0000000000..2076a1096a --- /dev/null +++ b/src/test/validation_chainstate_tests.cpp @@ -0,0 +1,74 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +// +#include <random.h> +#include <uint256.h> +#include <consensus/validation.h> +#include <sync.h> +#include <test/util/setup_common.h> +#include <validation.h> + +#include <vector> + +#include <boost/test/unit_test.hpp> + +BOOST_FIXTURE_TEST_SUITE(validation_chainstate_tests, TestingSetup) + +//! Test resizing coins-related CChainState caches during runtime. +//! +BOOST_AUTO_TEST_CASE(validation_chainstate_resize_caches) +{ + ChainstateManager manager; + + //! Create and add a Coin with DynamicMemoryUsage of 80 bytes to the given view. + auto add_coin = [](CCoinsViewCache& coins_view) -> COutPoint { + Coin newcoin; + uint256 txid = InsecureRand256(); + COutPoint outp{txid, 0}; + newcoin.nHeight = 1; + newcoin.out.nValue = InsecureRand32(); + newcoin.out.scriptPubKey.assign((uint32_t)56, 1); + coins_view.AddCoin(outp, std::move(newcoin), false); + + return outp; + }; + + CChainState& c1 = *WITH_LOCK(cs_main, return &manager.InitializeChainstate()); + c1.InitCoinsDB( + /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false); + WITH_LOCK(::cs_main, c1.InitCoinsCache(1 << 23)); + + // Add a coin to the in-memory cache, upsize once, then downsize. + { + LOCK(::cs_main); + auto outpoint = add_coin(c1.CoinsTip()); + + // Set a meaningless bestblock value in the coinsview cache - otherwise we won't + // flush during ResizecoinsCaches() and will subsequently hit an assertion. + c1.CoinsTip().SetBestBlock(InsecureRand256()); + + BOOST_CHECK(c1.CoinsTip().HaveCoinInCache(outpoint)); + + c1.ResizeCoinsCaches( + 1 << 24, // upsizing the coinsview cache + 1 << 22 // downsizing the coinsdb cache + ); + + // View should still have the coin cached, since we haven't destructed the cache on upsize. + BOOST_CHECK(c1.CoinsTip().HaveCoinInCache(outpoint)); + + c1.ResizeCoinsCaches( + 1 << 22, // downsizing the coinsview cache + 1 << 23 // upsizing the coinsdb cache + ); + + // The view cache should be empty since we had to destruct to downsize. + BOOST_CHECK(!c1.CoinsTip().HaveCoinInCache(outpoint)); + } + + // Avoid triggering the address sanitizer. + WITH_LOCK(::cs_main, manager.Unload()); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp index 0d149285ad..887a48124f 100644 --- a/src/test/validation_chainstatemanager_tests.cpp +++ b/src/test/validation_chainstatemanager_tests.cpp @@ -28,13 +28,11 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) // Create a legacy (IBD) chainstate. // - ENTER_CRITICAL_SECTION(cs_main); - CChainState& c1 = manager.InitializeChainstate(); - LEAVE_CRITICAL_SECTION(cs_main); + CChainState& c1 = *WITH_LOCK(::cs_main, return &manager.InitializeChainstate()); chainstates.push_back(&c1); c1.InitCoinsDB( /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false); - WITH_LOCK(::cs_main, c1.InitCoinsCache()); + WITH_LOCK(::cs_main, c1.InitCoinsCache(1 << 23)); BOOST_CHECK(!manager.IsSnapshotActive()); BOOST_CHECK(!manager.IsSnapshotValidated()); @@ -56,13 +54,11 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) // Create a snapshot-based chainstate. // - ENTER_CRITICAL_SECTION(cs_main); - CChainState& c2 = manager.InitializeChainstate(GetRandHash()); - LEAVE_CRITICAL_SECTION(cs_main); + CChainState& c2 = *WITH_LOCK(::cs_main, return &manager.InitializeChainstate(GetRandHash())); chainstates.push_back(&c2); c2.InitCoinsDB( /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false); - WITH_LOCK(::cs_main, c2.InitCoinsCache()); + WITH_LOCK(::cs_main, c2.InitCoinsCache(1 << 23)); // Unlike c1, which doesn't have any blocks. Gets us different tip, height. c2.LoadGenesisBlock(chainparams); BlockValidationState _; @@ -104,4 +100,54 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) WITH_LOCK(::cs_main, manager.Unload()); } +//! Test rebalancing the caches associated with each chainstate. +BOOST_AUTO_TEST_CASE(chainstatemanager_rebalance_caches) +{ + ChainstateManager manager; + size_t max_cache = 10000; + manager.m_total_coinsdb_cache = max_cache; + manager.m_total_coinstip_cache = max_cache; + + std::vector<CChainState*> chainstates; + + // Create a legacy (IBD) chainstate. + // + CChainState& c1 = *WITH_LOCK(cs_main, return &manager.InitializeChainstate()); + chainstates.push_back(&c1); + c1.InitCoinsDB( + /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false); + + { + LOCK(::cs_main); + c1.InitCoinsCache(1 << 23); + c1.CoinsTip().SetBestBlock(InsecureRand256()); + manager.MaybeRebalanceCaches(); + } + + BOOST_CHECK_EQUAL(c1.m_coinstip_cache_size_bytes, max_cache); + BOOST_CHECK_EQUAL(c1.m_coinsdb_cache_size_bytes, max_cache); + + // Create a snapshot-based chainstate. + // + CChainState& c2 = *WITH_LOCK(cs_main, return &manager.InitializeChainstate(GetRandHash())); + chainstates.push_back(&c2); + c2.InitCoinsDB( + /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false); + + { + LOCK(::cs_main); + c2.InitCoinsCache(1 << 23); + c2.CoinsTip().SetBestBlock(InsecureRand256()); + manager.MaybeRebalanceCaches(); + } + + // Since both chainstates are considered to be in initial block download, + // the snapshot chainstate should take priority. + BOOST_CHECK_CLOSE(c1.m_coinstip_cache_size_bytes, max_cache * 0.05, 1); + BOOST_CHECK_CLOSE(c1.m_coinsdb_cache_size_bytes, max_cache * 0.05, 1); + BOOST_CHECK_CLOSE(c2.m_coinstip_cache_size_bytes, max_cache * 0.95, 1); + BOOST_CHECK_CLOSE(c2.m_coinsdb_cache_size_bytes, max_cache * 0.95, 1); + +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/validation_flush_tests.cpp b/src/test/validation_flush_tests.cpp index a863e3a4d5..8bac914f05 100644 --- a/src/test/validation_flush_tests.cpp +++ b/src/test/validation_flush_tests.cpp @@ -21,7 +21,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate) BlockManager blockman{}; CChainState chainstate{blockman}; chainstate.InitCoinsDB(/*cache_size_bytes*/ 1 << 10, /*in_memory*/ true, /*should_wipe*/ false); - WITH_LOCK(::cs_main, chainstate.InitCoinsCache()); + WITH_LOCK(::cs_main, chainstate.InitCoinsCache(1 << 10)); CTxMemPool tx_pool{}; constexpr bool is_64_bit = sizeof(void*) == 8; @@ -56,7 +56,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate) // Without any coins in the cache, we shouldn't need to flush. BOOST_CHECK_EQUAL( - chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0), + chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0), CoinsCacheSizeState::OK); // If the initial memory allocations of cacheCoins don't match these common @@ -71,7 +71,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate) } BOOST_CHECK_EQUAL( - chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0), + chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0), CoinsCacheSizeState::CRITICAL); BOOST_TEST_MESSAGE("Exiting cache flush tests early due to unsupported arch"); @@ -92,7 +92,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate) print_view_mem_usage(view); BOOST_CHECK_EQUAL(view.AccessCoin(res).DynamicMemoryUsage(), COIN_SIZE); BOOST_CHECK_EQUAL( - chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0), + chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0), CoinsCacheSizeState::OK); } @@ -100,26 +100,26 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate) for (int i{0}; i < 4; ++i) { add_coin(view); print_view_mem_usage(view); - if (chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0) == + if (chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0) == CoinsCacheSizeState::CRITICAL) { break; } } BOOST_CHECK_EQUAL( - chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0), + chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0), CoinsCacheSizeState::CRITICAL); // Passing non-zero max mempool usage should allow us more headroom. BOOST_CHECK_EQUAL( - chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10), + chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10), CoinsCacheSizeState::OK); for (int i{0}; i < 3; ++i) { add_coin(view); print_view_mem_usage(view); BOOST_CHECK_EQUAL( - chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10), + chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10), CoinsCacheSizeState::OK); } @@ -135,7 +135,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate) BOOST_CHECK(usage_percentage >= 0.9); BOOST_CHECK(usage_percentage < 1); BOOST_CHECK_EQUAL( - chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, 1 << 10), + chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, 1 << 10), CoinsCacheSizeState::LARGE); } @@ -143,7 +143,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate) for (int i{0}; i < 1000; ++i) { add_coin(view); BOOST_CHECK_EQUAL( - chainstate.GetCoinsCacheSizeState(tx_pool), + chainstate.GetCoinsCacheSizeState(&tx_pool), CoinsCacheSizeState::OK); } @@ -151,7 +151,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate) // preallocated memory that doesn't get reclaimed even after flush. BOOST_CHECK_EQUAL( - chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, 0), + chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, 0), CoinsCacheSizeState::CRITICAL); view.SetBestBlock(InsecureRand256()); @@ -159,7 +159,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate) print_view_mem_usage(view); BOOST_CHECK_EQUAL( - chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, 0), + chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, 0), CoinsCacheSizeState::CRITICAL); } diff --git a/src/timedata.cpp b/src/timedata.cpp index 16dac24a48..6b3a79017b 100644 --- a/src/timedata.cpp +++ b/src/timedata.cpp @@ -9,8 +9,8 @@ #include <timedata.h> #include <netaddress.h> +#include <node/ui_interface.h> #include <sync.h> -#include <ui_interface.h> #include <util/system.h> #include <util/translation.h> #include <warnings.h> diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp index 84118b36ef..5d56d1ff89 100644 --- a/src/torcontrol.cpp +++ b/src/torcontrol.cpp @@ -405,7 +405,7 @@ static bool WriteBinaryFile(const fs::path &filename, const std::string &data) /****** Bitcoin specific TorController implementation ********/ /** Controller that connects to Tor control socket, authenticate, then create - * and maintain an ephemeral hidden service. + * and maintain an ephemeral onion service. */ class TorController { @@ -534,7 +534,7 @@ void TorController::auth_cb(TorControlConnection& _conn, const TorControlReply& // Finally - now create the service if (private_key.empty()) // No private key, generate one private_key = "NEW:RSA1024"; // Explicitly request RSA1024 - see issue #9214 - // Request hidden service, redirect port. + // Request onion service, redirect port. // Note that the 'virtual' port is always the default port to avoid decloaking nodes using other ports. _conn.Command(strprintf("ADD_ONION %s Port=%i,127.0.0.1:%i", private_key, Params().GetDefaultPort(), GetListenPort()), std::bind(&TorController::add_onion_cb, this, std::placeholders::_1, std::placeholders::_2)); diff --git a/src/txdb.cpp b/src/txdb.cpp index 6f652c1375..72460e7c69 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -5,11 +5,12 @@ #include <txdb.h> +#include <node/ui_interface.h> #include <pow.h> #include <random.h> #include <shutdown.h> -#include <ui_interface.h> #include <uint256.h> +#include <util/memory.h> #include <util/system.h> #include <util/translation.h> #include <util/vector.h> @@ -39,35 +40,45 @@ struct CoinEntry { } -CCoinsViewDB::CCoinsViewDB(fs::path ldb_path, size_t nCacheSize, bool fMemory, bool fWipe) : db(ldb_path, nCacheSize, fMemory, fWipe, true) +CCoinsViewDB::CCoinsViewDB(fs::path ldb_path, size_t nCacheSize, bool fMemory, bool fWipe) : + m_db(MakeUnique<CDBWrapper>(ldb_path, nCacheSize, fMemory, fWipe, true)), + m_ldb_path(ldb_path), + m_is_memory(fMemory) { } + +void CCoinsViewDB::ResizeCache(size_t new_cache_size) { + // Have to do a reset first to get the original `m_db` state to release its + // filesystem lock. + m_db.reset(); + m_db = MakeUnique<CDBWrapper>( + m_ldb_path, new_cache_size, m_is_memory, /*fWipe*/ false, /*obfuscate*/ true); } bool CCoinsViewDB::GetCoin(const COutPoint &outpoint, Coin &coin) const { - return db.Read(CoinEntry(&outpoint), coin); + return m_db->Read(CoinEntry(&outpoint), coin); } bool CCoinsViewDB::HaveCoin(const COutPoint &outpoint) const { - return db.Exists(CoinEntry(&outpoint)); + return m_db->Exists(CoinEntry(&outpoint)); } uint256 CCoinsViewDB::GetBestBlock() const { uint256 hashBestChain; - if (!db.Read(DB_BEST_BLOCK, hashBestChain)) + if (!m_db->Read(DB_BEST_BLOCK, hashBestChain)) return uint256(); return hashBestChain; } std::vector<uint256> CCoinsViewDB::GetHeadBlocks() const { std::vector<uint256> vhashHeadBlocks; - if (!db.Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) { + if (!m_db->Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) { return std::vector<uint256>(); } return vhashHeadBlocks; } bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { - CDBBatch batch(db); + CDBBatch batch(*m_db); size_t count = 0; size_t changed = 0; size_t batch_size = (size_t)gArgs.GetArg("-dbbatchsize", nDefaultDbBatchSize); @@ -105,7 +116,7 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { mapCoins.erase(itOld); if (batch.SizeEstimate() > batch_size) { LogPrint(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0)); - db.WriteBatch(batch); + m_db->WriteBatch(batch); batch.Clear(); if (crash_simulate) { static FastRandomContext rng; @@ -122,14 +133,14 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { batch.Write(DB_BEST_BLOCK, hashBlock); LogPrint(BCLog::COINDB, "Writing final batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0)); - bool ret = db.WriteBatch(batch); + bool ret = m_db->WriteBatch(batch); LogPrint(BCLog::COINDB, "Committed %u changed transaction outputs (out of %u) to coin database...\n", (unsigned int)changed, (unsigned int)count); return ret; } size_t CCoinsViewDB::EstimateSize() const { - return db.EstimateSize(DB_COIN, (char)(DB_COIN+1)); + return m_db->EstimateSize(DB_COIN, (char)(DB_COIN+1)); } CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(GetDataDir() / "blocks" / "index", nCacheSize, fMemory, fWipe) { @@ -156,7 +167,7 @@ bool CBlockTreeDB::ReadLastBlockFile(int &nFile) { CCoinsViewCursor *CCoinsViewDB::Cursor() const { - CCoinsViewDBCursor *i = new CCoinsViewDBCursor(const_cast<CDBWrapper&>(db).NewIterator(), GetBestBlock()); + CCoinsViewDBCursor *i = new CCoinsViewDBCursor(const_cast<CDBWrapper&>(*m_db).NewIterator(), GetBestBlock()); /* It seems that there are no "const iterators" for LevelDB. Since we only need read operations on it, use a const-cast to get around that restriction. */ @@ -335,7 +346,7 @@ public: * Currently implemented: from the per-tx utxo model (0.8..0.14.x) to per-txout. */ bool CCoinsViewDB::Upgrade() { - std::unique_ptr<CDBIterator> pcursor(db.NewIterator()); + std::unique_ptr<CDBIterator> pcursor(m_db->NewIterator()); pcursor->Seek(std::make_pair(DB_COINS, uint256())); if (!pcursor->Valid()) { return true; @@ -346,7 +357,7 @@ bool CCoinsViewDB::Upgrade() { LogPrintf("[0%%]..."); /* Continued */ uiInterface.ShowProgress(_("Upgrading UTXO database").translated, 0, true); size_t batch_size = 1 << 24; - CDBBatch batch(db); + CDBBatch batch(*m_db); int reportDone = 0; std::pair<unsigned char, uint256> key; std::pair<unsigned char, uint256> prev_key = {DB_COINS, uint256()}; @@ -380,9 +391,9 @@ bool CCoinsViewDB::Upgrade() { } batch.Erase(key); if (batch.SizeEstimate() > batch_size) { - db.WriteBatch(batch); + m_db->WriteBatch(batch); batch.Clear(); - db.CompactRange(prev_key, key); + m_db->CompactRange(prev_key, key); prev_key = key; } pcursor->Next(); @@ -390,8 +401,8 @@ bool CCoinsViewDB::Upgrade() { break; } } - db.WriteBatch(batch); - db.CompactRange({DB_COINS, uint256()}, key); + m_db->WriteBatch(batch); + m_db->CompactRange({DB_COINS, uint256()}, key); uiInterface.ShowProgress("", 100, false); LogPrintf("[%s].\n", ShutdownRequested() ? "CANCELLED" : "DONE"); return !ShutdownRequested(); diff --git a/src/txdb.h b/src/txdb.h index 488c24f935..0cf7e2f1b8 100644 --- a/src/txdb.h +++ b/src/txdb.h @@ -39,11 +39,16 @@ static const int64_t max_filter_index_cache = 1024; //! Max memory allocated to coin DB specific cache (MiB) static const int64_t nMaxCoinsDBCache = 8; +// Actually declared in validation.cpp; can't include because of circular dependency. +extern RecursiveMutex cs_main; + /** CCoinsView backed by the coin database (chainstate/) */ class CCoinsViewDB final : public CCoinsView { protected: - CDBWrapper db; + std::unique_ptr<CDBWrapper> m_db; + fs::path m_ldb_path; + bool m_is_memory; public: /** * @param[in] ldb_path Location in the filesystem where leveldb data will be stored. @@ -60,6 +65,9 @@ public: //! Attempt to update from an older database format. Returns whether an error occurred. bool Upgrade(); size_t EstimateSize() const override; + + //! Dynamically alter the underlying leveldb cache size. + void ResizeCache(size_t new_cache_size) EXCLUSIVE_LOCKS_REQUIRED(cs_main); }; /** Specialization of CCoinsViewCursor to iterate over a CCoinsViewDB */ diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 7d8eb8a323..de1a3ec68f 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -726,12 +726,12 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const assert(innerUsage == cachedInnerUsage); } -bool CTxMemPool::CompareDepthAndScore(const uint256& hasha, const uint256& hashb) +bool CTxMemPool::CompareDepthAndScore(const uint256& hasha, const uint256& hashb, bool wtxid) { LOCK(cs); - indexed_transaction_set::const_iterator i = mapTx.find(hasha); + indexed_transaction_set::const_iterator i = wtxid ? get_iter_from_wtxid(hasha) : mapTx.find(hasha); if (i == mapTx.end()) return false; - indexed_transaction_set::const_iterator j = mapTx.find(hashb); + indexed_transaction_set::const_iterator j = wtxid ? get_iter_from_wtxid(hashb) : mapTx.find(hashb); if (j == mapTx.end()) return true; uint64_t counta = i->GetCountWithAncestors(); uint64_t countb = j->GetCountWithAncestors(); @@ -811,15 +811,17 @@ CTransactionRef CTxMemPool::get(const uint256& hash) const return i->GetSharedTx(); } -TxMempoolInfo CTxMemPool::info(const uint256& hash) const +TxMempoolInfo CTxMemPool::info(const GenTxid& gtxid) const { LOCK(cs); - indexed_transaction_set::const_iterator i = mapTx.find(hash); + indexed_transaction_set::const_iterator i = (gtxid.IsWtxid() ? get_iter_from_wtxid(gtxid.GetHash()) : mapTx.find(gtxid.GetHash())); if (i == mapTx.end()) return TxMempoolInfo(); return GetInfo(i); } +TxMempoolInfo CTxMemPool::info(const uint256& txid) const { return info(GenTxid{false, txid}); } + void CTxMemPool::PrioritiseTransaction(const uint256& hash, const CAmount& nFeeDelta) { { @@ -917,8 +919,8 @@ bool CCoinsViewMemPool::GetCoin(const COutPoint &outpoint, Coin &coin) const { size_t CTxMemPool::DynamicMemoryUsage() const { LOCK(cs); - // Estimate the overhead of mapTx to be 12 pointers + an allocation, as no exact formula for boost::multi_index_contained is implemented. - return memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 12 * sizeof(void*)) * mapTx.size() + memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas) + memusage::DynamicUsage(mapLinks) + memusage::DynamicUsage(vTxHashes) + cachedInnerUsage; + // Estimate the overhead of mapTx to be 15 pointers + an allocation, as no exact formula for boost::multi_index_contained is implemented. + return memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 15 * sizeof(void*)) * mapTx.size() + memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas) + memusage::DynamicUsage(mapLinks) + memusage::DynamicUsage(vTxHashes) + cachedInnerUsage; } void CTxMemPool::RemoveUnbroadcastTx(const uint256& txid, const bool unchecked) { diff --git a/src/txmempool.h b/src/txmempool.h index 583f7614b7..4743e1b63a 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -198,6 +198,22 @@ struct mempoolentry_txid } }; +// extracts a transaction witness-hash from CTxMemPoolEntry or CTransactionRef +struct mempoolentry_wtxid +{ + typedef uint256 result_type; + result_type operator() (const CTxMemPoolEntry &entry) const + { + return entry.GetTx().GetWitnessHash(); + } + + result_type operator() (const CTransactionRef& tx) const + { + return tx->GetWitnessHash(); + } +}; + + /** \class CompareTxMemPoolEntryByDescendantScore * * Sort an entry by max(score/size of entry's tx, score/size with all descendants). @@ -318,6 +334,7 @@ public: struct descendant_score {}; struct entry_time {}; struct ancestor_score {}; +struct index_by_wtxid {}; class CBlockPolicyEstimator; @@ -383,8 +400,9 @@ public: * * CTxMemPool::mapTx, and CTxMemPoolEntry bookkeeping: * - * mapTx is a boost::multi_index that sorts the mempool on 4 criteria: - * - transaction hash + * mapTx is a boost::multi_index that sorts the mempool on 5 criteria: + * - transaction hash (txid) + * - witness-transaction hash (wtxid) * - descendant feerate [we use max(feerate of tx, feerate of tx with all descendants)] * - time in mempool * - ancestor feerate [we use min(feerate of tx, feerate of tx with all unconfirmed ancestors)] @@ -469,6 +487,12 @@ public: boost::multi_index::indexed_by< // sorted by txid boost::multi_index::hashed_unique<mempoolentry_txid, SaltedTxidHasher>, + // sorted by wtxid + boost::multi_index::hashed_unique< + boost::multi_index::tag<index_by_wtxid>, + mempoolentry_wtxid, + SaltedTxidHasher + >, // sorted by fee rate boost::multi_index::ordered_non_unique< boost::multi_index::tag<descendant_score>, @@ -549,8 +573,11 @@ private: std::vector<indexed_transaction_set::const_iterator> GetSortedDepthAndScore() const EXCLUSIVE_LOCKS_REQUIRED(cs); - /** track locally submitted transactions to periodically retry initial broadcast */ - std::set<uint256> m_unbroadcast_txids GUARDED_BY(cs); + /** + * track locally submitted transactions to periodically retry initial broadcast + * map of txid -> wtxid + */ + std::map<uint256, uint256> m_unbroadcast_txids GUARDED_BY(cs); public: indirectmap<COutPoint, const CTransaction*> mapNextTx GUARDED_BY(cs); @@ -586,7 +613,7 @@ public: void clear(); void _clear() EXCLUSIVE_LOCKS_REQUIRED(cs); //lock free - bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb); + bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb, bool wtxid=false); void queryHashes(std::vector<uint256>& vtxid) const; bool isSpent(const COutPoint& outpoint) const; unsigned int GetTransactionsUpdated() const; @@ -689,24 +716,34 @@ public: return totalTxSize; } - bool exists(const uint256& hash) const + bool exists(const GenTxid& gtxid) const { LOCK(cs); - return (mapTx.count(hash) != 0); + if (gtxid.IsWtxid()) { + return (mapTx.get<index_by_wtxid>().count(gtxid.GetHash()) != 0); + } + return (mapTx.count(gtxid.GetHash()) != 0); } + bool exists(const uint256& txid) const { return exists(GenTxid{false, txid}); } CTransactionRef get(const uint256& hash) const; + txiter get_iter_from_wtxid(const uint256& wtxid) const EXCLUSIVE_LOCKS_REQUIRED(cs) + { + AssertLockHeld(cs); + return mapTx.project<0>(mapTx.get<index_by_wtxid>().find(wtxid)); + } TxMempoolInfo info(const uint256& hash) const; + TxMempoolInfo info(const GenTxid& gtxid) const; std::vector<TxMempoolInfo> infoAll() const; size_t DynamicMemoryUsage() const; /** Adds a transaction to the unbroadcast set */ - void AddUnbroadcastTx(const uint256& txid) { + void AddUnbroadcastTx(const uint256& txid, const uint256& wtxid) { LOCK(cs); // Sanity Check: the transaction should also be in the mempool if (exists(txid)) { - m_unbroadcast_txids.insert(txid); + m_unbroadcast_txids[txid] = wtxid; } } @@ -714,7 +751,7 @@ public: void RemoveUnbroadcastTx(const uint256& txid, const bool unchecked = false); /** Returns transactions in unbroadcast set */ - std::set<uint256> GetUnbroadcastTxs() const { + std::map<uint256, uint256> GetUnbroadcastTxs() const { LOCK(cs); return m_unbroadcast_txids; } diff --git a/src/uint256.cpp b/src/uint256.cpp index a943e71062..ee1b34eadd 100644 --- a/src/uint256.cpp +++ b/src/uint256.cpp @@ -12,20 +12,24 @@ template <unsigned int BITS> base_blob<BITS>::base_blob(const std::vector<unsigned char>& vch) { - assert(vch.size() == sizeof(data)); - memcpy(data, vch.data(), sizeof(data)); + assert(vch.size() == sizeof(m_data)); + memcpy(m_data, vch.data(), sizeof(m_data)); } template <unsigned int BITS> std::string base_blob<BITS>::GetHex() const { - return HexStr(std::reverse_iterator<const uint8_t*>(data + sizeof(data)), std::reverse_iterator<const uint8_t*>(data)); + uint8_t m_data_rev[WIDTH]; + for (int i = 0; i < WIDTH; ++i) { + m_data_rev[i] = m_data[WIDTH - 1 - i]; + } + return HexStr(m_data_rev); } template <unsigned int BITS> void base_blob<BITS>::SetHex(const char* psz) { - memset(data, 0, sizeof(data)); + memset(m_data, 0, sizeof(m_data)); // skip leading spaces while (IsSpace(*psz)) @@ -39,7 +43,7 @@ void base_blob<BITS>::SetHex(const char* psz) size_t digits = 0; while (::HexDigit(psz[digits]) != -1) digits++; - unsigned char* p1 = (unsigned char*)data; + unsigned char* p1 = (unsigned char*)m_data; unsigned char* pend = p1 + WIDTH; while (digits > 0 && p1 < pend) { *p1 = ::HexDigit(psz[--digits]); diff --git a/src/uint256.h b/src/uint256.h index b36598f572..8ab747ef49 100644 --- a/src/uint256.h +++ b/src/uint256.h @@ -18,11 +18,11 @@ class base_blob { protected: static constexpr int WIDTH = BITS / 8; - uint8_t data[WIDTH]; + uint8_t m_data[WIDTH]; public: base_blob() { - memset(data, 0, sizeof(data)); + memset(m_data, 0, sizeof(m_data)); } explicit base_blob(const std::vector<unsigned char>& vch); @@ -30,17 +30,17 @@ public: bool IsNull() const { for (int i = 0; i < WIDTH; i++) - if (data[i] != 0) + if (m_data[i] != 0) return false; return true; } void SetNull() { - memset(data, 0, sizeof(data)); + memset(m_data, 0, sizeof(m_data)); } - inline int Compare(const base_blob& other) const { return memcmp(data, other.data, sizeof(data)); } + inline int Compare(const base_blob& other) const { return memcmp(m_data, other.m_data, sizeof(m_data)); } friend inline bool operator==(const base_blob& a, const base_blob& b) { return a.Compare(b) == 0; } friend inline bool operator!=(const base_blob& a, const base_blob& b) { return a.Compare(b) != 0; } @@ -51,34 +51,37 @@ public: void SetHex(const std::string& str); std::string ToString() const; + const unsigned char* data() const { return m_data; } + unsigned char* data() { return m_data; } + unsigned char* begin() { - return &data[0]; + return &m_data[0]; } unsigned char* end() { - return &data[WIDTH]; + return &m_data[WIDTH]; } const unsigned char* begin() const { - return &data[0]; + return &m_data[0]; } const unsigned char* end() const { - return &data[WIDTH]; + return &m_data[WIDTH]; } unsigned int size() const { - return sizeof(data); + return sizeof(m_data); } uint64_t GetUint64(int pos) const { - const uint8_t* ptr = data + pos * 8; + const uint8_t* ptr = m_data + pos * 8; return ((uint64_t)ptr[0]) | \ ((uint64_t)ptr[1]) << 8 | \ ((uint64_t)ptr[2]) << 16 | \ @@ -92,13 +95,13 @@ public: template<typename Stream> void Serialize(Stream& s) const { - s.write((char*)data, sizeof(data)); + s.write((char*)m_data, sizeof(m_data)); } template<typename Stream> void Unserialize(Stream& s) { - s.read((char*)data, sizeof(data)); + s.read((char*)m_data, sizeof(m_data)); } }; diff --git a/src/util/check.h b/src/util/check.h index 5c0f32cf51..9edf394492 100644 --- a/src/util/check.h +++ b/src/util/check.h @@ -25,7 +25,7 @@ class NonFatalCheckError : public std::runtime_error * - where the condition is assumed to be true, not for error handling or validating user input * - where a failure to fulfill the condition is recoverable and does not abort the program * - * For example in RPC code, where it is undersirable to crash the whole program, this can be generally used to replace + * For example in RPC code, where it is undesirable to crash the whole program, this can be generally used to replace * asserts or recoverable logic errors. A NonFatalCheckError in RPC code is caught and passed as a string to the RPC * caller, which can then report the issue to the developers. */ @@ -42,4 +42,18 @@ class NonFatalCheckError : public std::runtime_error } \ } while (false) +#if defined(NDEBUG) +#error "Cannot compile without assertions!" +#endif + +/** Helper for Assert(). TODO remove in C++14 and replace `decltype(get_pure_r_value(val))` with `T` (templated lambda) */ +template <typename T> +T get_pure_r_value(T&& val) +{ + return std::forward<T>(val); +} + +/** Identity function. Abort if the value compares equal to zero */ +#define Assert(val) [&]() -> decltype(get_pure_r_value(val)) { auto&& check = (val); assert(#val && check); return std::forward<decltype(get_pure_r_value(val))>(check); }() + #endif // BITCOIN_UTIL_CHECK_H diff --git a/src/util/settings.cpp b/src/util/settings.cpp index e4979df755..b92b1d30c3 100644 --- a/src/util/settings.cpp +++ b/src/util/settings.cpp @@ -4,6 +4,7 @@ #include <util/settings.h> +#include <tinyformat.h> #include <univalue.h> namespace util { @@ -12,12 +13,13 @@ namespace { enum class Source { FORCED, COMMAND_LINE, + RW_SETTINGS, CONFIG_FILE_NETWORK_SECTION, CONFIG_FILE_DEFAULT_SECTION }; //! Merge settings from multiple sources in precedence order: -//! Forced config > command line > config file network-specific section > config file default section +//! Forced config > command line > read-write settings file > config file network-specific section > config file default section //! //! This function is provided with a callback function fn that contains //! specific logic for how to merge the sources. @@ -32,6 +34,10 @@ static void MergeSettings(const Settings& settings, const std::string& section, if (auto* values = FindKey(settings.command_line_options, name)) { fn(SettingsSpan(*values), Source::COMMAND_LINE); } + // Merge in the read-write settings + if (const SettingsValue* value = FindKey(settings.rw_settings, name)) { + fn(SettingsSpan(*value), Source::RW_SETTINGS); + } // Merge in the network-specific section of the config file if (!section.empty()) { if (auto* map = FindKey(settings.ro_config, section)) { @@ -49,6 +55,62 @@ static void MergeSettings(const Settings& settings, const std::string& section, } } // namespace +bool ReadSettings(const fs::path& path, std::map<std::string, SettingsValue>& values, std::vector<std::string>& errors) +{ + values.clear(); + errors.clear(); + + fsbridge::ifstream file; + file.open(path); + if (!file.is_open()) return true; // Ok for file not to exist. + + SettingsValue in; + if (!in.read(std::string{std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>()})) { + errors.emplace_back(strprintf("Unable to parse settings file %s", path.string())); + return false; + } + + if (file.fail()) { + errors.emplace_back(strprintf("Failed reading settings file %s", path.string())); + return false; + } + file.close(); // Done with file descriptor. Release while copying data. + + if (!in.isObject()) { + errors.emplace_back(strprintf("Found non-object value %s in settings file %s", in.write(), path.string())); + return false; + } + + const std::vector<std::string>& in_keys = in.getKeys(); + const std::vector<SettingsValue>& in_values = in.getValues(); + for (size_t i = 0; i < in_keys.size(); ++i) { + auto inserted = values.emplace(in_keys[i], in_values[i]); + if (!inserted.second) { + errors.emplace_back(strprintf("Found duplicate key %s in settings file %s", in_keys[i], path.string())); + } + } + return errors.empty(); +} + +bool WriteSettings(const fs::path& path, + const std::map<std::string, SettingsValue>& values, + std::vector<std::string>& errors) +{ + SettingsValue out(SettingsValue::VOBJ); + for (const auto& value : values) { + out.__pushKV(value.first, value.second); + } + fsbridge::ofstream file; + file.open(path); + if (file.fail()) { + errors.emplace_back(strprintf("Error: Unable to open settings file %s for writing", path.string())); + return false; + } + file << out.write(/* prettyIndent= */ 1, /* indentLevel= */ 4) << std::endl; + file.close(); + return true; +} + SettingsValue GetSetting(const Settings& settings, const std::string& section, const std::string& name, diff --git a/src/util/settings.h b/src/util/settings.h index 1d03639fa2..ed36349232 100644 --- a/src/util/settings.h +++ b/src/util/settings.h @@ -5,6 +5,8 @@ #ifndef BITCOIN_UTIL_SETTINGS_H #define BITCOIN_UTIL_SETTINGS_H +#include <fs.h> + #include <map> #include <string> #include <vector> @@ -24,19 +26,31 @@ namespace util { //! https://github.com/bitcoin/bitcoin/pull/15934/files#r337691812) using SettingsValue = UniValue; -//! Stored bitcoin settings. This struct combines settings from the command line -//! and a read-only configuration file. +//! Stored settings. This struct combines settings from the command line, a +//! read-only configuration file, and a read-write runtime settings file. struct Settings { //! Map of setting name to forced setting value. std::map<std::string, SettingsValue> forced_settings; //! Map of setting name to list of command line values. std::map<std::string, std::vector<SettingsValue>> command_line_options; + //! Map of setting name to read-write file setting value. + std::map<std::string, SettingsValue> rw_settings; //! Map of config section name and setting name to list of config file values. std::map<std::string, std::map<std::string, std::vector<SettingsValue>>> ro_config; }; +//! Read settings file. +bool ReadSettings(const fs::path& path, + std::map<std::string, SettingsValue>& values, + std::vector<std::string>& errors); + +//! Write settings file. +bool WriteSettings(const fs::path& path, + const std::map<std::string, SettingsValue>& values, + std::vector<std::string>& errors); + //! Get settings value from combined sources: forced settings, command line -//! arguments and the read-only config file. +//! arguments, runtime read-write settings, and the read-only config file. //! //! @param ignore_default_section_config - ignore values in the default section //! of the config file (part before any diff --git a/src/util/strencodings.cpp b/src/util/strencodings.cpp index 3a903b6897..d10f92ffe6 100644 --- a/src/util/strencodings.cpp +++ b/src/util/strencodings.cpp @@ -569,3 +569,16 @@ std::string Capitalize(std::string str) str[0] = ToUpper(str.front()); return str; } + +std::string HexStr(const Span<const uint8_t> s) +{ + std::string rv; + static constexpr char hexmap[16] = { '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; + rv.reserve(s.size() * 2); + for (uint8_t v: s) { + rv.push_back(hexmap[v >> 4]); + rv.push_back(hexmap[v & 15]); + } + return rv; +} diff --git a/src/util/strencodings.h b/src/util/strencodings.h index bd988f1410..eaa0fa9992 100644 --- a/src/util/strencodings.h +++ b/src/util/strencodings.h @@ -10,6 +10,7 @@ #define BITCOIN_UTIL_STRENCODINGS_H #include <attributes.h> +#include <span.h> #include <cstdint> #include <iterator> @@ -119,27 +120,11 @@ NODISCARD bool ParseUInt64(const std::string& str, uint64_t *out); */ NODISCARD bool ParseDouble(const std::string& str, double *out); -template<typename T> -std::string HexStr(const T itbegin, const T itend) -{ - std::string rv; - static const char hexmap[16] = { '0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; - rv.reserve(std::distance(itbegin, itend) * 2); - for(T it = itbegin; it < itend; ++it) - { - unsigned char val = (unsigned char)(*it); - rv.push_back(hexmap[val>>4]); - rv.push_back(hexmap[val&15]); - } - return rv; -} - -template<typename T> -inline std::string HexStr(const T& vch) -{ - return HexStr(vch.begin(), vch.end()); -} +/** + * Convert a span of bytes to a lower-case hexadecimal string. + */ +std::string HexStr(const Span<const uint8_t> s); +inline std::string HexStr(const Span<const char> s) { return HexStr(MakeUCharSpan(s)); } /** * Format a paragraph of text to a fixed width, adding spaces for diff --git a/src/util/system.cpp b/src/util/system.cpp index 7e7ba840cd..7b74789b32 100644 --- a/src/util/system.cpp +++ b/src/util/system.cpp @@ -6,6 +6,10 @@ #include <sync.h> #include <util/system.h> +#ifdef HAVE_BOOST_PROCESS +#include <boost/process.hpp> +#endif // HAVE_BOOST_PROCESS + #include <chainparamsbase.h> #include <util/strencodings.h> #include <util/string.h> @@ -73,6 +77,7 @@ const int64_t nStartupTime = GetTime(); const char * const BITCOIN_CONF_FILENAME = "bitcoin.conf"; +const char * const BITCOIN_SETTINGS_FILENAME = "settings.json"; ArgsManager gArgs; @@ -372,6 +377,84 @@ bool ArgsManager::IsArgSet(const std::string& strArg) const return !GetSetting(strArg).isNull(); } +bool ArgsManager::InitSettings(std::string& error) +{ + if (!GetSettingsPath()) { + return true; // Do nothing if settings file disabled. + } + + std::vector<std::string> errors; + if (!ReadSettingsFile(&errors)) { + error = strprintf("Failed loading settings file:\n- %s\n", Join(errors, "\n- ")); + return false; + } + if (!WriteSettingsFile(&errors)) { + error = strprintf("Failed saving settings file:\n- %s\n", Join(errors, "\n- ")); + return false; + } + return true; +} + +bool ArgsManager::GetSettingsPath(fs::path* filepath, bool temp) const +{ + if (IsArgNegated("-settings")) { + return false; + } + if (filepath) { + std::string settings = GetArg("-settings", BITCOIN_SETTINGS_FILENAME); + *filepath = fs::absolute(temp ? settings + ".tmp" : settings, GetDataDir(/* net_specific= */ true)); + } + return true; +} + +static void SaveErrors(const std::vector<std::string> errors, std::vector<std::string>* error_out) +{ + for (const auto& error : errors) { + if (error_out) { + error_out->emplace_back(error); + } else { + LogPrintf("%s\n", error); + } + } +} + +bool ArgsManager::ReadSettingsFile(std::vector<std::string>* errors) +{ + fs::path path; + if (!GetSettingsPath(&path, /* temp= */ false)) { + return true; // Do nothing if settings file disabled. + } + + LOCK(cs_args); + m_settings.rw_settings.clear(); + std::vector<std::string> read_errors; + if (!util::ReadSettings(path, m_settings.rw_settings, read_errors)) { + SaveErrors(read_errors, errors); + return false; + } + return true; +} + +bool ArgsManager::WriteSettingsFile(std::vector<std::string>* errors) const +{ + fs::path path, path_tmp; + if (!GetSettingsPath(&path, /* temp= */ false) || !GetSettingsPath(&path_tmp, /* temp= */ true)) { + throw std::logic_error("Attempt to write settings file when dynamic settings are disabled."); + } + + LOCK(cs_args); + std::vector<std::string> write_errors; + if (!util::WriteSettings(path_tmp, m_settings.rw_settings, write_errors)) { + SaveErrors(write_errors, errors); + return false; + } + if (!RenameOver(path_tmp, path)) { + SaveErrors({strprintf("Failed renaming settings file %s to %s\n", path_tmp.string(), path.string())}, errors); + return false; + } + return true; +} + bool ArgsManager::IsArgNegated(const std::string& strArg) const { return GetSetting(strArg).isFalse(); @@ -893,6 +976,9 @@ void ArgsManager::LogArgs() const for (const auto& section : m_settings.ro_config) { logArgsPrefix("Config file arg:", section.first, section.second); } + for (const auto& setting : m_settings.rw_settings) { + LogPrintf("Setting file arg: %s = %s\n", setting.first, setting.second.write()); + } logArgsPrefix("Command-line arg:", "", m_settings.command_line_options); } @@ -939,7 +1025,7 @@ bool FileCommit(FILE *file) return false; } #else - #if defined(__linux__) || defined(__NetBSD__) + #if defined(HAVE_FDATASYNC) if (fdatasync(fileno(file)) != 0 && errno != EINVAL) { // Ignore EINVAL for filesystems that don't support sync LogPrintf("%s: fdatasync failed: %d\n", __func__, errno); return false; @@ -1079,6 +1165,43 @@ void runCommand(const std::string& strCommand) } #endif +#ifdef HAVE_BOOST_PROCESS +UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in) +{ + namespace bp = boost::process; + + UniValue result_json; + bp::opstream stdin_stream; + bp::ipstream stdout_stream; + bp::ipstream stderr_stream; + + if (str_command.empty()) return UniValue::VNULL; + + bp::child c( + str_command, + bp::std_out > stdout_stream, + bp::std_err > stderr_stream, + bp::std_in < stdin_stream + ); + if (!str_std_in.empty()) { + stdin_stream << str_std_in << std::endl; + } + stdin_stream.pipe().close(); + + std::string result; + std::string error; + std::getline(stdout_stream, result); + std::getline(stderr_stream, error); + + c.wait(); + const int n_error = c.exit_code(); + if (n_error) throw std::runtime_error(strprintf("RunCommandParseJSON error: process(%s) returned %d: %s\n", str_command, n_error, error)); + if (!result_json.read(result)) throw std::runtime_error("Unable to parse JSON: " + result); + + return result_json; +} +#endif // HAVE_BOOST_PROCESS + void SetupEnvironment() { #ifdef HAVE_MALLOPT_ARENA_MAX diff --git a/src/util/system.h b/src/util/system.h index a5eea5dfab..1df194ca84 100644 --- a/src/util/system.h +++ b/src/util/system.h @@ -37,10 +37,13 @@ #include <boost/thread/condition_variable.hpp> // for boost::thread_interrupted +class UniValue; + // Application startup time (used for uptime calculation) int64_t GetStartupTime(); extern const char * const BITCOIN_CONF_FILENAME; +extern const char * const BITCOIN_SETTINGS_FILENAME; void SetupEnvironment(); bool SetupNetworking(); @@ -95,6 +98,16 @@ std::string ShellEscape(const std::string& arg); #if HAVE_SYSTEM void runCommand(const std::string& strCommand); #endif +#ifdef HAVE_BOOST_PROCESS +/** + * Execute a command which returns JSON, and parse the result. + * + * @param str_command The command to execute, including any arguments + * @param str_std_in string to pass to stdin + * @return parsed JSON + */ +UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in=""); +#endif // HAVE_BOOST_PROCESS /** * Most paths passed as configuration arguments are treated as relative to @@ -334,6 +347,39 @@ public: Optional<unsigned int> GetArgFlags(const std::string& name) const; /** + * Read and update settings file with saved settings. This needs to be + * called after SelectParams() because the settings file location is + * network-specific. + */ + bool InitSettings(std::string& error); + + /** + * Get settings file path, or return false if read-write settings were + * disabled with -nosettings. + */ + bool GetSettingsPath(fs::path* filepath = nullptr, bool temp = false) const; + + /** + * Read settings file. Push errors to vector, or log them if null. + */ + bool ReadSettingsFile(std::vector<std::string>* errors = nullptr); + + /** + * Write settings file. Push errors to vector, or log them if null. + */ + bool WriteSettingsFile(std::vector<std::string>* errors = nullptr) const; + + /** + * Access settings with lock held. + */ + template <typename Fn> + void LockSettings(Fn&& fn) + { + LOCK(cs_args); + fn(m_settings); + } + + /** * Log the config file options and the command line arguments, * useful for troubleshooting. */ diff --git a/src/util/time.h b/src/util/time.h index b00c25f67c..af934e423b 100644 --- a/src/util/time.h +++ b/src/util/time.h @@ -15,10 +15,15 @@ void UninterruptibleSleep(const std::chrono::microseconds& n); /** * Helper to count the seconds of a duration. * - * All durations should be using std::chrono and calling this should generally be avoided in code. Though, it is still - * preferred to an inline t.count() to protect against a reliance on the exact type of t. + * All durations should be using std::chrono and calling this should generally + * be avoided in code. Though, it is still preferred to an inline t.count() to + * protect against a reliance on the exact type of t. + * + * This helper is used to convert durations before passing them over an + * interface that doesn't support std::chrono (e.g. RPC, debug log, or the GUI) */ inline int64_t count_seconds(std::chrono::seconds t) { return t.count(); } +inline int64_t count_microseconds(std::chrono::microseconds t) { return t.count(); } /** * DEPRECATED diff --git a/src/util/ui_change_type.h b/src/util/ui_change_type.h new file mode 100644 index 0000000000..1db761a18d --- /dev/null +++ b/src/util/ui_change_type.h @@ -0,0 +1,15 @@ +// Copyright (c) 2012-2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_UTIL_UI_CHANGE_TYPE_H +#define BITCOIN_UTIL_UI_CHANGE_TYPE_H + +/** General change type (added, updated, removed). */ +enum ChangeType { + CT_NEW, + CT_UPDATED, + CT_DELETED +}; + +#endif // BITCOIN_UTIL_UI_CHANGE_TYPE_H diff --git a/src/validation.cpp b/src/validation.cpp index 8bb03fdb97..cf2f9dde62 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -20,6 +20,7 @@ #include <index/txindex.h> #include <logging.h> #include <logging/timer.h> +#include <node/ui_interface.h> #include <optional.h> #include <policy/fees.h> #include <policy/policy.h> @@ -36,9 +37,9 @@ #include <tinyformat.h> #include <txdb.h> #include <txmempool.h> -#include <ui_interface.h> #include <uint256.h> #include <undo.h> +#include <util/check.h> // For NDEBUG compile time check #include <util/moneystr.h> #include <util/rbf.h> #include <util/strencodings.h> @@ -51,10 +52,6 @@ #include <boost/algorithm/string/replace.hpp> -#if defined(NDEBUG) -# error "Bitcoin cannot be compiled without assertions." -#endif - #define MICRO 0.000001 #define MILLI 0.001 @@ -142,7 +139,6 @@ bool fPruneMode = false; bool fRequireStandard = true; bool fCheckBlockIndex = false; bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED; -size_t nCoinCacheUsage = 5000 * 300; uint64_t nPruneTarget = 0; int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE; @@ -576,8 +572,9 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) CAmount& nConflictingFees = ws.m_conflicting_fees; size_t& nConflictingSize = ws.m_conflicting_size; - if (!CheckTransaction(tx, state)) + if (!CheckTransaction(tx, state)) { return false; // state filled in by CheckTransaction + } // Coinbase is only valid in a block, not as a loose transaction if (tx.IsCoinBase()) @@ -687,12 +684,13 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) CAmount nFees = 0; if (!Consensus::CheckTxInputs(tx, state, m_view, GetSpendHeight(m_view), nFees)) { - return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString()); + return false; // state filled in by CheckTxInputs } // Check for non-standard pay-to-script-hash in inputs - if (fRequireStandard && !AreInputsStandard(tx, m_view)) - return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-nonstandard-inputs"); + if (fRequireStandard && !AreInputsStandard(tx, m_view)) { + return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs"); + } // Check for non-standard witness in P2WSH if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view)) @@ -941,7 +939,7 @@ bool MemPoolAccept::PolicyScriptChecks(ATMPArgs& args, Workspace& ws, Precompute if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) && !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) { // Only the witness is missing, so the transaction itself may be fine. - state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, + state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED, state.GetRejectReason(), state.GetDebugMessage()); } return false; // state filled in by CheckInputScripts @@ -1091,45 +1089,33 @@ bool AcceptToMemoryPool(CTxMemPool& pool, TxValidationState &state, const CTrans return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, GetTime(), plTxnReplaced, bypass_limits, nAbsurdFee, test_accept); } -/** - * Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock. - * If blockIndex is provided, the transaction is fetched from the corresponding block. - */ -bool GetTransaction(const uint256& hash, CTransactionRef& txOut, const Consensus::Params& consensusParams, uint256& hashBlock, const CBlockIndex* const block_index) +CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock) { LOCK(cs_main); - if (!block_index) { - CTransactionRef ptx = mempool.get(hash); - if (ptx) { - txOut = ptx; - return true; - } - - if (g_txindex) { - return g_txindex->FindTx(hash, hashBlock, txOut); - } - } else { + if (block_index) { CBlock block; if (ReadBlockFromDisk(block, block_index, consensusParams)) { for (const auto& tx : block.vtx) { if (tx->GetHash() == hash) { - txOut = tx; hashBlock = block_index->GetBlockHash(); - return true; + return tx; } } } + return nullptr; } - - return false; + if (mempool) { + CTransactionRef ptx = mempool->get(hash); + if (ptx) return ptx; + } + if (g_txindex) { + CTransactionRef tx; + if (g_txindex->FindTx(hash, hashBlock, tx)) return tx; + } + return nullptr; } - - - - - ////////////////////////////////////////////////////////////////////////////// // // CBlock and CBlockIndex @@ -1213,8 +1199,8 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c if (memcmp(blk_start, message_start, CMessageHeader::MESSAGE_START_SIZE)) { return error("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(), - HexStr(blk_start, blk_start + CMessageHeader::MESSAGE_START_SIZE), - HexStr(message_start, message_start + CMessageHeader::MESSAGE_START_SIZE)); + HexStr(blk_start), + HexStr(message_start)); } if (blk_size > MAX_SIZE) { @@ -1286,9 +1272,10 @@ void CChainState::InitCoinsDB( leveldb_name, cache_size_bytes, in_memory, should_wipe); } -void CChainState::InitCoinsCache() +void CChainState::InitCoinsCache(size_t cache_size_bytes) { assert(m_coins_views != nullptr); + m_coinstip_cache_size_bytes = cache_size_bytes; m_coins_views->InitCache(); } @@ -1321,12 +1308,6 @@ bool CChainState::IsInitialBlockDownload() const static CBlockIndex *pindexBestForkTip = nullptr, *pindexBestForkBase = nullptr; -BlockMap& BlockIndex() -{ - LOCK(::cs_main); - return g_chainman.m_blockman.m_block_index; -} - static void AlertNotify(const std::string& strMessage) { uiInterface.NotifyAlertChanged(); @@ -1430,12 +1411,12 @@ void static InvalidChainFound(CBlockIndex* pindexNew) EXCLUSIVE_LOCKS_REQUIRED(c pindexBestHeader = ::ChainActive().Tip(); } - LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__, + LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n", __func__, pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime())); CBlockIndex *tip = ::ChainActive().Tip(); assert (tip); - LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__, + LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n", __func__, tip->GetBlockHash().ToString(), ::ChainActive().Height(), log(tip->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(tip->GetBlockTime())); CheckForkWarningConditions(); @@ -2247,20 +2228,20 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, return true; } -CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(const CTxMemPool& tx_pool) +CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(const CTxMemPool* tx_pool) { return this->GetCoinsCacheSizeState( tx_pool, - nCoinCacheUsage, + m_coinstip_cache_size_bytes, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000); } CoinsCacheSizeState CChainState::GetCoinsCacheSizeState( - const CTxMemPool& tx_pool, + const CTxMemPool* tx_pool, size_t max_coins_cache_size_bytes, size_t max_mempool_size_bytes) { - int64_t nMempoolUsage = tx_pool.DynamicMemoryUsage(); + const int64_t nMempoolUsage = tx_pool ? tx_pool->DynamicMemoryUsage() : 0; int64_t cacheSize = CoinsTip().DynamicMemoryUsage(); int64_t nTotalSpace = max_coins_cache_size_bytes + std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0); @@ -2299,7 +2280,7 @@ bool CChainState::FlushStateToDisk( { bool fFlushForPrune = false; bool fDoFullFlush = false; - CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(::mempool); + CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(&::mempool); LOCK(cs_LastBlockFile); if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) { if (nManualPruneHeight > 0) { @@ -2485,7 +2466,7 @@ void static UpdateTip(const CBlockIndex* pindexNew, const CChainParams& chainPar if (nUpgraded > 0) AppendWarning(warning_messages, strprintf(_("%d of last 100 blocks have unexpected version"), nUpgraded)); } - LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n", __func__, + LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n", __func__, pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion, log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx, FormatISO8601DateTime(pindexNew->GetBlockTime()), @@ -3454,7 +3435,7 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc if (consensusParams.SegwitHeight != std::numeric_limits<int>::max()) { if (commitpos == -1) { uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr); - CHash256().Write(witnessroot.begin(), 32).Write(ret.data(), 32).Finalize(witnessroot.begin()); + CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot); CTxOut out; out.nValue = 0; out.scriptPubKey.resize(MINIMUM_WITNESS_COMMITMENT); @@ -3599,7 +3580,7 @@ static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& stat if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) { return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__)); } - CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0]->vin[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin()); + CHash256().Write(hashWitness).Write(block.vtx[0]->vin[0].scriptWitness.stack[0]).Finalize(hashWitness); if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) { return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__)); } @@ -3649,8 +3630,10 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS return true; } - if (!CheckBlockHeader(block, state, chainparams.GetConsensus())) - return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString()); + if (!CheckBlockHeader(block, state, chainparams.GetConsensus())) { + LogPrint(BCLog::VALIDATION, "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString()); + return false; + } // Get prev block index CBlockIndex* pindexPrev = nullptr; @@ -4324,7 +4307,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, } } // check level 3: check for inconsistencies during memory-only disconnect of tip blocks - if (nCheckLevel >= 3 && (coins.DynamicMemoryUsage() + ::ChainstateActive().CoinsTip().DynamicMemoryUsage()) <= nCoinCacheUsage) { + if (nCheckLevel >= 3 && (coins.DynamicMemoryUsage() + ::ChainstateActive().CoinsTip().DynamicMemoryUsage()) <= ::ChainstateActive().m_coinstip_cache_size_bytes) { assert(coins.GetBestBlock() == pindex->GetBlockHash()); DisconnectResult res = ::ChainstateActive().DisconnectBlock(block, pindex, coins); if (res == DISCONNECT_FAILED) { @@ -4605,13 +4588,13 @@ void CChainState::UnloadBlockIndex() { // May NOT be used after any connections are up as much // of the peer-processing logic assumes a consistent // block index state -void UnloadBlockIndex() +void UnloadBlockIndex(CTxMemPool* mempool) { LOCK(cs_main); g_chainman.Unload(); pindexBestInvalid = nullptr; pindexBestHeader = nullptr; - mempool.clear(); + if (mempool) mempool->clear(); vinfoBlockFile.clear(); nLastBlockFile = 0; setDirtyBlockIndex.clear(); @@ -4716,7 +4699,6 @@ void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFi if (dbp) dbp->nPos = nBlockPos; blkdat.SetLimit(nBlockPos + nSize); - blkdat.SetPos(nBlockPos); std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); CBlock& block = *pblock; blkdat >> block; @@ -4988,6 +4970,39 @@ std::string CChainState::ToString() tip ? tip->nHeight : -1, tip ? tip->GetBlockHash().ToString() : "null"); } +bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size) +{ + if (coinstip_size == m_coinstip_cache_size_bytes && + coinsdb_size == m_coinsdb_cache_size_bytes) { + // Cache sizes are unchanged, no need to continue. + return true; + } + size_t old_coinstip_size = m_coinstip_cache_size_bytes; + m_coinstip_cache_size_bytes = coinstip_size; + m_coinsdb_cache_size_bytes = coinsdb_size; + CoinsDB().ResizeCache(coinsdb_size); + + LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n", + this->ToString(), coinsdb_size * (1.0 / 1024 / 1024)); + LogPrintf("[%s] resized coinstip cache to %.1f MiB\n", + this->ToString(), coinstip_size * (1.0 / 1024 / 1024)); + + BlockValidationState state; + const CChainParams& chainparams = Params(); + + bool ret; + + if (coinstip_size > old_coinstip_size) { + // Likely no need to flush if cache sizes have grown. + ret = FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED); + } else { + // Otherwise, flush state to disk and deallocate the in-memory coins map. + ret = FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS); + CoinsTip().ReallocateCache(); + } + return ret; +} + std::string CBlockFileInfo::ToString() const { return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast)); @@ -5091,19 +5106,22 @@ bool LoadMempool(CTxMemPool& pool) } // TODO: remove this try except in v0.22 + std::map<uint256, uint256> unbroadcast_txids; try { - std::set<uint256> unbroadcast_txids; file >> unbroadcast_txids; unbroadcast = unbroadcast_txids.size(); - - for (const auto& txid : unbroadcast_txids) { - pool.AddUnbroadcastTx(txid); - } } catch (const std::exception&) { // mempool.dat files created prior to v0.21 will not have an // unbroadcast set. No need to log a failure if parsing fails here. } - + for (const auto& elem : unbroadcast_txids) { + // Don't add unbroadcast transactions that didn't get back into the + // mempool. + const CTransactionRef& added_tx = pool.get(elem.first); + if (added_tx != nullptr) { + pool.AddUnbroadcastTx(elem.first, added_tx->GetWitnessHash()); + } + } } catch (const std::exception& e) { LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e.what()); return false; @@ -5119,7 +5137,7 @@ bool DumpMempool(const CTxMemPool& pool) std::map<uint256, CAmount> mapDeltas; std::vector<TxMempoolInfo> vinfo; - std::set<uint256> unbroadcast_txids; + std::map<uint256, uint256> unbroadcast_txids; static Mutex dump_mutex; LOCK(dump_mutex); @@ -5251,10 +5269,10 @@ CChainState& ChainstateManager::InitializeChainstate(const uint256& snapshot_blo return *to_modify; } -CChain& ChainstateManager::ActiveChain() const +CChainState& ChainstateManager::ActiveChainstate() const { assert(m_active_chainstate); - return m_active_chainstate->m_chain; + return *m_active_chainstate; } bool ChainstateManager::IsSnapshotActive() const @@ -5293,3 +5311,33 @@ void ChainstateManager::Reset() m_active_chainstate = nullptr; m_snapshot_validated = false; } + +void ChainstateManager::MaybeRebalanceCaches() +{ + if (m_ibd_chainstate && !m_snapshot_chainstate) { + LogPrintf("[snapshot] allocating all cache to the IBD chainstate\n"); + // Allocate everything to the IBD chainstate. + m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache); + } + else if (m_snapshot_chainstate && !m_ibd_chainstate) { + LogPrintf("[snapshot] allocating all cache to the snapshot chainstate\n"); + // Allocate everything to the snapshot chainstate. + m_snapshot_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache); + } + else if (m_ibd_chainstate && m_snapshot_chainstate) { + // If both chainstates exist, determine who needs more cache based on IBD status. + // + // Note: shrink caches first so that we don't inadvertently overwhelm available memory. + if (m_snapshot_chainstate->IsInitialBlockDownload()) { + m_ibd_chainstate->ResizeCoinsCaches( + m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05); + m_snapshot_chainstate->ResizeCoinsCaches( + m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95); + } else { + m_snapshot_chainstate->ResizeCoinsCaches( + m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05); + m_ibd_chainstate->ResizeCoinsCaches( + m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95); + } + } +} diff --git a/src/validation.h b/src/validation.h index e403bcb51a..534162d64a 100644 --- a/src/validation.h +++ b/src/validation.h @@ -74,7 +74,6 @@ static const int64_t DEFAULT_MAX_TIP_AGE = 24 * 60 * 60; static const bool DEFAULT_CHECKPOINTS_ENABLED = true; static const bool DEFAULT_TXINDEX = false; static const char* const DEFAULT_BLOCKFILTERINDEX = "0"; -static const unsigned int DEFAULT_BANSCORE_THRESHOLD = 100; /** Default for -persistmempool */ static const bool DEFAULT_PERSIST_MEMPOOL = true; /** Default for using fee filter */ @@ -128,7 +127,6 @@ extern bool g_parallel_script_checks; extern bool fRequireStandard; extern bool fCheckBlockIndex; extern bool fCheckpointsEnabled; -extern size_t nCoinCacheUsage; /** A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation) */ extern CFeeRate minRelayTxFee; /** If the tip is older than this (in seconds), the node is considered to be in initial block download. */ @@ -162,11 +160,22 @@ void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFi /** Ensures we have a genesis block in the block tree, possibly writing one to disk. */ bool LoadGenesisBlock(const CChainParams& chainparams); /** Unload database information */ -void UnloadBlockIndex(); +void UnloadBlockIndex(CTxMemPool* mempool); /** Run an instance of the script checking thread */ void ThreadScriptCheck(int worker_num); -/** Retrieve a transaction (from memory pool, or from disk, if possible) */ -bool GetTransaction(const uint256& hash, CTransactionRef& tx, const Consensus::Params& params, uint256& hashBlock, const CBlockIndex* const blockIndex = nullptr); +/** + * Return transaction from the block at block_index. + * If block_index is not provided, fall back to mempool. + * If mempool is not provided or the tx couldn't be found in mempool, fall back to g_txindex. + * + * @param[in] block_index The block to read from disk, or nullptr + * @param[in] mempool If block_index is not provided, look in the mempool, if provided + * @param[in] hash The txid + * @param[in] consensusParams The params + * @param[out] hashBlock The hash of block_index, if the tx was found via block_index + * @returns The tx if found, otherwise nullptr + */ +CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock); /** * Find the best known block, and make it the tip of the block chain * @@ -522,7 +531,7 @@ public: //! Initialize the in-memory coins cache (to be done after the health of the on-disk database //! is verified). - void InitCoinsCache() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + void InitCoinsCache(size_t cache_size_bytes) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); //! @returns whether or not the CoinsViews object has been fully initialized and we can //! safely flush this object to disk. @@ -571,6 +580,17 @@ public: //! Destructs all objects related to accessing the UTXO set. void ResetCoinsViews() { m_coins_views.reset(); } + //! The cache size of the on-disk coins view. + size_t m_coinsdb_cache_size_bytes{0}; + + //! The cache size of the in-memory coins view. + size_t m_coinstip_cache_size_bytes{0}; + + //! Resize the CoinsViews caches dynamically and flush state to disk. + //! @returns true unless an error occurred during the flush. + bool ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size) + EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + /** * Update the on-disk chain state. * The caches and indexes are flushed depending on the mode we're called with @@ -654,11 +674,11 @@ public: //! Dictates whether we need to flush the cache to disk or not. //! //! @return the state of the size of the coins cache. - CoinsCacheSizeState GetCoinsCacheSizeState(const CTxMemPool& tx_pool) + CoinsCacheSizeState GetCoinsCacheSizeState(const CTxMemPool* tx_pool) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); CoinsCacheSizeState GetCoinsCacheSizeState( - const CTxMemPool& tx_pool, + const CTxMemPool* tx_pool, size_t max_coins_cache_size_bytes, size_t max_mempool_size_bytes) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); @@ -787,6 +807,14 @@ public: //! chainstate to avoid duplicating block metadata. BlockManager m_blockman GUARDED_BY(::cs_main); + //! The total number of bytes available for us to use across all in-memory + //! coins caches. This will be split somehow across chainstates. + int64_t m_total_coinstip_cache{0}; + // + //! The total number of bytes available for us to use across all leveldb + //! coins databases. This will be split somehow across chainstates. + int64_t m_total_coinsdb_cache{0}; + //! Instantiate a new chainstate and assign it based upon whether it is //! from a snapshot. //! @@ -799,7 +827,8 @@ public: std::vector<CChainState*> GetAll(); //! The most-work chain. - CChain& ActiveChain() const; + CChainState& ActiveChainstate() const; + CChain& ActiveChain() const { return ActiveChainstate().m_chain; } int ActiveHeight() const { return ActiveChain().Height(); } CBlockIndex* ActiveTip() const { return ActiveChain().Tip(); } @@ -844,7 +873,7 @@ public: * validationinterface callback. * * @param[in] pblock The block we want to process. - * @param[in] fForceProcessing Process this block even if unrequested; used for non-network block sources and whitelisted peers. + * @param[in] fForceProcessing Process this block even if unrequested; used for non-network block sources. * @param[out] fNewBlock A boolean which is set to indicate if the block was first received via this call * @returns If the block was processed, independently of block validity */ @@ -874,20 +903,21 @@ public: //! Clear (deconstruct) chainstate data. void Reset(); + + //! Check to see if caches are out of balance and if so, call + //! ResizeCoinsCaches() as needed. + void MaybeRebalanceCaches() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); }; /** DEPRECATED! Please use node.chainman instead. May only be used in validation.cpp internally */ extern ChainstateManager g_chainman GUARDED_BY(::cs_main); -/** @returns the most-work valid chainstate. */ +/** Please prefer the identical ChainstateManager::ActiveChainstate */ CChainState& ChainstateActive(); -/** @returns the most-work chain. */ +/** Please prefer the identical ChainstateManager::ActiveChain */ CChain& ChainActive(); -/** @returns the global block index map. */ -BlockMap& BlockIndex(); - /** Global variable that points to the active block tree (protected by cs_main) */ extern std::unique_ptr<CBlockTreeDB> pblocktree; diff --git a/src/version.h b/src/version.h index d932b512d4..b5f379e1b8 100644 --- a/src/version.h +++ b/src/version.h @@ -9,20 +9,13 @@ * network protocol versioning */ -static const int PROTOCOL_VERSION = 70015; +static const int PROTOCOL_VERSION = 70016; //! initial proto version, to be increased after version/verack negotiation static const int INIT_PROTO_VERSION = 209; -//! In this version, 'getheaders' was introduced. -static const int GETHEADERS_VERSION = 31800; - //! disconnect from peers older than this proto version -static const int MIN_PEER_PROTO_VERSION = GETHEADERS_VERSION; - -//! nTime field added to CAddress, starting with this version; -//! if possible, avoid requesting addresses nodes older than this -static const int CADDR_TIME_VERSION = 31402; +static const int MIN_PEER_PROTO_VERSION = 31800; //! BIP 0031, pong message, is enabled for all versions AFTER this one static const int BIP0031_VERSION = 60000; @@ -42,4 +35,7 @@ static const int SHORT_IDS_BLOCKS_VERSION = 70014; //! not banning for invalid compact blocks starts with this version static const int INVALID_CB_NO_BAN_VERSION = 70015; +//! "wtxidrelay" command for wtxid-based relay starts with this version +static const int WTXID_RELAY_VERSION = 70016; + #endif // BITCOIN_VERSION_H diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index 125bf004e4..24eb2ee34c 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -32,13 +32,13 @@ void CheckUniqueFileid(const BerkeleyEnvironment& env, const std::string& filena int ret = db.get_mpf()->get_fileid(fileid.value); if (ret != 0) { - throw std::runtime_error(strprintf("BerkeleyBatch: Can't open database %s (get_fileid failed with %d)", filename, ret)); + throw std::runtime_error(strprintf("BerkeleyDatabase: Can't open database %s (get_fileid failed with %d)", filename, ret)); } for (const auto& item : env.m_fileids) { if (fileid == item.second && &fileid != &item.second) { - throw std::runtime_error(strprintf("BerkeleyBatch: Can't open database %s (duplicates fileid %s from %s)", filename, - HexStr(std::begin(item.second.value), std::end(item.second.value)), item.first)); + throw std::runtime_error(strprintf("BerkeleyDatabase: Can't open database %s (duplicates fileid %s from %s)", filename, + HexStr(item.second.value), item.first)); } } } @@ -97,9 +97,8 @@ void BerkeleyEnvironment::Close() fDbEnvInit = false; for (auto& db : m_databases) { - auto count = mapFileUseCount.find(db.first); - assert(count == mapFileUseCount.end() || count->second == 0); BerkeleyDatabase& database = db.second.get(); + assert(database.m_refcount <= 0); if (database.m_db) { database.m_db->close(0); database.m_db.reset(); @@ -139,7 +138,7 @@ BerkeleyEnvironment::~BerkeleyEnvironment() Close(); } -bool BerkeleyEnvironment::Open(bool retry) +bool BerkeleyEnvironment::Open(bilingual_str& err) { if (fDbEnvInit) { return true; @@ -149,6 +148,7 @@ bool BerkeleyEnvironment::Open(bool retry) TryCreateDirectories(pathIn); if (!LockDirectory(pathIn, ".walletlock")) { LogPrintf("Cannot obtain a lock on wallet directory %s. Another instance of bitcoin may be using it.\n", strPath); + err = strprintf(_("Error initializing wallet database environment %s!"), Directory()); return false; } @@ -188,23 +188,11 @@ bool BerkeleyEnvironment::Open(bool retry) LogPrintf("BerkeleyEnvironment::Open: Error %d closing failed database environment: %s\n", ret2, DbEnv::strerror(ret2)); } Reset(); - if (retry) { - // try moving the database env out of the way - fs::path pathDatabaseBak = pathIn / strprintf("database.%d.bak", GetTime()); - try { - fs::rename(pathLogDir, pathDatabaseBak); - LogPrintf("Moved old %s to %s. Retrying.\n", pathLogDir.string(), pathDatabaseBak.string()); - } catch (const fs::filesystem_error&) { - // failure is ok (well, not really, but it's not worse than what we started with) - } - // try opening it again one more time - if (!Open(false /* retry */)) { - // if it still fails, it probably means we can't even create the database env - return false; - } - } else { - return false; + err = strprintf(_("Error initializing wallet database environment %s!"), Directory()); + if (ret == DB_RUNRECOVERY) { + err += Untranslated(" ") + _("This error could occur if this wallet was not shutdown cleanly and was last loaded using a build with a newer version of Berkeley DB. If so, please use the software that last loaded this wallet"); } + return false; } fDbEnvInit = true; @@ -243,16 +231,6 @@ BerkeleyEnvironment::BerkeleyEnvironment() fMockDb = true; } -bool BerkeleyEnvironment::Verify(const std::string& strFile) -{ - LOCK(cs_db); - assert(mapFileUseCount.count(strFile) == 0); - - Db db(dbenv.get(), 0); - int result = db.verify(strFile.c_str(), nullptr, nullptr, 0); - return result == 0; -} - BerkeleyBatch::SafeDbt::SafeDbt() { m_dbt.set_flags(DB_DBT_MALLOC); @@ -292,33 +270,26 @@ BerkeleyBatch::SafeDbt::operator Dbt*() return &m_dbt; } -bool BerkeleyBatch::VerifyEnvironment(const fs::path& file_path, bilingual_str& errorStr) +bool BerkeleyDatabase::Verify(bilingual_str& errorStr) { - std::string walletFile; - std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(file_path, walletFile); fs::path walletDir = env->Directory(); + fs::path file_path = walletDir / strFile; LogPrintf("Using BerkeleyDB version %s\n", BerkeleyDatabaseVersion()); LogPrintf("Using wallet %s\n", file_path.string()); - if (!env->Open(true /* retry */)) { - errorStr = strprintf(_("Error initializing wallet database environment %s!"), walletDir); + if (!env->Open(errorStr)) { return false; } - return true; -} - -bool BerkeleyBatch::VerifyDatabaseFile(const fs::path& file_path, bilingual_str& errorStr) -{ - std::string walletFile; - std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(file_path, walletFile); - fs::path walletDir = env->Directory(); - - if (fs::exists(walletDir / walletFile)) + if (fs::exists(file_path)) { - if (!env->Verify(walletFile)) { - errorStr = strprintf(_("%s corrupt. Try using the wallet tool bitcoin-wallet to salvage or restoring a backup."), walletFile); + assert(m_refcount == 0); + + Db db(env->dbenv.get(), 0); + int result = db.verify(strFile.c_str(), nullptr, nullptr, 0); + if (result != 0) { + errorStr = strprintf(_("%s corrupt. Try using the wallet tool bitcoin-wallet to salvage or restoring a backup."), file_path); return false; } } @@ -334,17 +305,38 @@ void BerkeleyEnvironment::CheckpointLSN(const std::string& strFile) dbenv->lsn_reset(strFile.c_str(), 0); } +BerkeleyDatabase::~BerkeleyDatabase() +{ + if (env) { + LOCK(cs_db); + env->CloseDb(strFile); + assert(!m_db); + size_t erased = env->m_databases.erase(strFile); + assert(erased == 1); + env->m_fileids.erase(strFile); + } +} -BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr) +BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr), m_cursor(nullptr), m_database(database) { + database.AddRef(); + database.Open(pszMode); fReadOnly = (!strchr(pszMode, '+') && !strchr(pszMode, 'w')); fFlushOnClose = fFlushOnCloseIn; env = database.env.get(); - if (database.IsDummy()) { - return; + pdb = database.m_db.get(); + strFile = database.strFile; + bool fCreate = strchr(pszMode, 'c') != nullptr; + if (fCreate && !Exists(std::string("version"))) { + bool fTmp = fReadOnly; + fReadOnly = false; + Write(std::string("version"), CLIENT_VERSION); + fReadOnly = fTmp; } - const std::string &strFilename = database.strFile; +} +void BerkeleyDatabase::Open(const char* pszMode) +{ bool fCreate = strchr(pszMode, 'c') != nullptr; unsigned int nFlags = DB_THREAD; if (fCreate) @@ -352,11 +344,11 @@ BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bo { LOCK(cs_db); - if (!env->Open(false /* retry */)) - throw std::runtime_error("BerkeleyBatch: Failed to open database environment."); + bilingual_str open_err; + if (!env->Open(open_err)) + throw std::runtime_error("BerkeleyDatabase: Failed to open database environment."); - pdb = database.m_db.get(); - if (pdb == nullptr) { + if (m_db == nullptr) { int ret; std::unique_ptr<Db> pdb_temp = MakeUnique<Db>(env->dbenv.get(), 0); @@ -365,52 +357,30 @@ BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bo DbMpoolFile* mpf = pdb_temp->get_mpf(); ret = mpf->set_flags(DB_MPOOL_NOFILE, 1); if (ret != 0) { - throw std::runtime_error(strprintf("BerkeleyBatch: Failed to configure for no temp file backing for database %s", strFilename)); + throw std::runtime_error(strprintf("BerkeleyDatabase: Failed to configure for no temp file backing for database %s", strFile)); } } ret = pdb_temp->open(nullptr, // Txn pointer - fMockDb ? nullptr : strFilename.c_str(), // Filename - fMockDb ? strFilename.c_str() : "main", // Logical db name + fMockDb ? nullptr : strFile.c_str(), // Filename + fMockDb ? strFile.c_str() : "main", // Logical db name DB_BTREE, // Database type nFlags, // Flags 0); if (ret != 0) { - throw std::runtime_error(strprintf("BerkeleyBatch: Error %d, can't open database %s", ret, strFilename)); + throw std::runtime_error(strprintf("BerkeleyDatabase: Error %d, can't open database %s", ret, strFile)); } + m_file_path = (env->Directory() / strFile).string(); // Call CheckUniqueFileid on the containing BDB environment to // avoid BDB data consistency bugs that happen when different data // files in the same environment have the same fileid. - // - // Also call CheckUniqueFileid on all the other g_dbenvs to prevent - // bitcoin from opening the same data file through another - // environment when the file is referenced through equivalent but - // not obviously identical symlinked or hard linked or bind mounted - // paths. In the future a more relaxed check for equal inode and - // device ids could be done instead, which would allow opening - // different backup copies of a wallet at the same time. Maybe even - // more ideally, an exclusive lock for accessing the database could - // be implemented, so no equality checks are needed at all. (Newer - // versions of BDB have an set_lk_exclusive method for this - // purpose, but the older version we use does not.) - for (const auto& env : g_dbenvs) { - CheckUniqueFileid(*env.second.lock().get(), strFilename, *pdb_temp, this->env->m_fileids[strFilename]); - } + CheckUniqueFileid(*env, strFile, *pdb_temp, this->env->m_fileids[strFile]); - pdb = pdb_temp.release(); - database.m_db.reset(pdb); + m_db.reset(pdb_temp.release()); - if (fCreate && !Exists(std::string("version"))) { - bool fTmp = fReadOnly; - fReadOnly = false; - Write(std::string("version"), CLIENT_VERSION); - fReadOnly = fTmp; - } } - ++env->mapFileUseCount[strFilename]; - strFile = strFilename; } } @@ -434,6 +404,12 @@ void BerkeleyDatabase::IncrementUpdateCounter() ++nUpdateCounter; } +BerkeleyBatch::~BerkeleyBatch() +{ + Close(); + m_database.RemoveRef(); +} + void BerkeleyBatch::Close() { if (!pdb) @@ -442,15 +418,10 @@ void BerkeleyBatch::Close() activeTxn->abort(); activeTxn = nullptr; pdb = nullptr; + CloseCursor(); if (fFlushOnClose) Flush(); - - { - LOCK(cs_db); - --env->mapFileUseCount[strFile]; - } - env->m_db_in_use.notify_all(); } void BerkeleyEnvironment::CloseDb(const std::string& strFile) @@ -474,8 +445,8 @@ void BerkeleyEnvironment::ReloadDbEnv() AssertLockNotHeld(cs_db); std::unique_lock<RecursiveMutex> lock(cs_db); m_db_in_use.wait(lock, [this](){ - for (auto& count : mapFileUseCount) { - if (count.second > 0) return false; + for (auto& db : m_databases) { + if (db.second.get().m_refcount > 0) return false; } return true; }); @@ -491,30 +462,26 @@ void BerkeleyEnvironment::ReloadDbEnv() // Reset the environment Flush(true); // This will flush and close the environment Reset(); - Open(true); + bilingual_str open_err; + Open(open_err); } -bool BerkeleyBatch::Rewrite(BerkeleyDatabase& database, const char* pszSkip) +bool BerkeleyDatabase::Rewrite(const char* pszSkip) { - if (database.IsDummy()) { - return true; - } - BerkeleyEnvironment *env = database.env.get(); - const std::string& strFile = database.strFile; while (true) { { LOCK(cs_db); - if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0) { + if (m_refcount <= 0) { // Flush log data to the dat file env->CloseDb(strFile); env->CheckpointLSN(strFile); - env->mapFileUseCount.erase(strFile); + m_refcount = -1; bool fSuccess = true; LogPrintf("BerkeleyBatch::Rewrite: Rewriting %s...\n", strFile); std::string strFileRes = strFile + ".rewrite"; { // surround usage of db with extra {} - BerkeleyBatch db(database, "r"); + BerkeleyBatch db(*this, "r"); std::unique_ptr<Db> pdbCopy = MakeUnique<Db>(env->dbenv.get(), 0); int ret = pdbCopy->open(nullptr, // Txn pointer @@ -528,17 +495,15 @@ bool BerkeleyBatch::Rewrite(BerkeleyDatabase& database, const char* pszSkip) fSuccess = false; } - Dbc* pcursor = db.GetCursor(); - if (pcursor) + if (db.StartCursor()) { while (fSuccess) { CDataStream ssKey(SER_DISK, CLIENT_VERSION); CDataStream ssValue(SER_DISK, CLIENT_VERSION); - int ret1 = db.ReadAtCursor(pcursor, ssKey, ssValue); - if (ret1 == DB_NOTFOUND) { - pcursor->close(); + bool complete; + bool ret1 = db.ReadAtCursor(ssKey, ssValue, complete); + if (complete) { break; - } else if (ret1 != 0) { - pcursor->close(); + } else if (!ret1) { fSuccess = false; break; } @@ -556,6 +521,8 @@ bool BerkeleyBatch::Rewrite(BerkeleyDatabase& database, const char* pszSkip) if (ret2 > 0) fSuccess = false; } + db.CloseCursor(); + } if (fSuccess) { db.Close(); env->CloseDb(strFile); @@ -592,10 +559,11 @@ void BerkeleyEnvironment::Flush(bool fShutdown) return; { LOCK(cs_db); - std::map<std::string, int>::iterator mi = mapFileUseCount.begin(); - while (mi != mapFileUseCount.end()) { - std::string strFile = (*mi).first; - int nRefCount = (*mi).second; + bool no_dbs_accessed = true; + for (auto& db_it : m_databases) { + std::string strFile = db_it.first; + int nRefCount = db_it.second.get().m_refcount; + if (nRefCount < 0) continue; LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: Flushing %s (refcount = %d)...\n", strFile, nRefCount); if (nRefCount == 0) { // Move log data to the dat file @@ -606,14 +574,15 @@ void BerkeleyEnvironment::Flush(bool fShutdown) if (!fMockDb) dbenv->lsn_reset(strFile.c_str(), 0); LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s closed\n", strFile); - mapFileUseCount.erase(mi++); - } else - mi++; + nRefCount = -1; + } else { + no_dbs_accessed = false; + } } LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: Flush(%s)%s took %15dms\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started", GetTimeMillis() - nStart); if (fShutdown) { char** listp; - if (mapFileUseCount.empty()) { + if (no_dbs_accessed) { dbenv->log_archive(&listp, DB_ARCH_REMOVE); Close(); if (!fMockDb) { @@ -624,68 +593,44 @@ void BerkeleyEnvironment::Flush(bool fShutdown) } } -bool BerkeleyBatch::PeriodicFlush(BerkeleyDatabase& database) +bool BerkeleyDatabase::PeriodicFlush() { - if (database.IsDummy()) { - return true; - } - bool ret = false; - BerkeleyEnvironment *env = database.env.get(); - const std::string& strFile = database.strFile; + // Don't flush if we can't acquire the lock. TRY_LOCK(cs_db, lockDb); - if (lockDb) - { - // Don't do this if any databases are in use - int nRefCount = 0; - std::map<std::string, int>::iterator mit = env->mapFileUseCount.begin(); - while (mit != env->mapFileUseCount.end()) - { - nRefCount += (*mit).second; - mit++; - } + if (!lockDb) return false; - if (nRefCount == 0) - { - std::map<std::string, int>::iterator mi = env->mapFileUseCount.find(strFile); - if (mi != env->mapFileUseCount.end()) - { - LogPrint(BCLog::WALLETDB, "Flushing %s\n", strFile); - int64_t nStart = GetTimeMillis(); + // Don't flush if any databases are in use + for (auto& it : env->m_databases) { + if (it.second.get().m_refcount > 0) return false; + } - // Flush wallet file so it's self contained - env->CloseDb(strFile); - env->CheckpointLSN(strFile); + // Don't flush if there haven't been any batch writes for this database. + if (m_refcount < 0) return false; - env->mapFileUseCount.erase(mi++); - LogPrint(BCLog::WALLETDB, "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart); - ret = true; - } - } - } + LogPrint(BCLog::WALLETDB, "Flushing %s\n", strFile); + int64_t nStart = GetTimeMillis(); - return ret; -} + // Flush wallet file so it's self contained + env->CloseDb(strFile); + env->CheckpointLSN(strFile); + m_refcount = -1; -bool BerkeleyDatabase::Rewrite(const char* pszSkip) -{ - return BerkeleyBatch::Rewrite(*this, pszSkip); + LogPrint(BCLog::WALLETDB, "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart); + + return true; } bool BerkeleyDatabase::Backup(const std::string& strDest) const { - if (IsDummy()) { - return false; - } while (true) { { LOCK(cs_db); - if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0) + if (m_refcount <= 0) { // Flush log data to the dat file env->CloseDb(strFile); env->CheckpointLSN(strFile); - env->mapFileUseCount.erase(strFile); // Copy wallet file fs::path pathSrc = env->Directory() / strFile; @@ -712,53 +657,45 @@ bool BerkeleyDatabase::Backup(const std::string& strDest) const } } -void BerkeleyDatabase::Flush(bool shutdown) +void BerkeleyDatabase::Flush() { - if (!IsDummy()) { - env->Flush(shutdown); - if (shutdown) { - LOCK(cs_db); - g_dbenvs.erase(env->Directory().string()); - env = nullptr; - } else { - // TODO: To avoid g_dbenvs.erase erasing the environment prematurely after the - // first database shutdown when multiple databases are open in the same - // environment, should replace raw database `env` pointers with shared or weak - // pointers, or else separate the database and environment shutdowns so - // environments can be shut down after databases. - env->m_fileids.erase(strFile); - } - } + env->Flush(false); +} + +void BerkeleyDatabase::Close() +{ + env->Flush(true); } void BerkeleyDatabase::ReloadDbEnv() { - if (!IsDummy()) { - env->ReloadDbEnv(); - } + env->ReloadDbEnv(); } -Dbc* BerkeleyBatch::GetCursor() +bool BerkeleyBatch::StartCursor() { + assert(!m_cursor); if (!pdb) - return nullptr; - Dbc* pcursor = nullptr; - int ret = pdb->cursor(nullptr, &pcursor, 0); - if (ret != 0) - return nullptr; - return pcursor; + return false; + int ret = pdb->cursor(nullptr, &m_cursor, 0); + return ret == 0; } -int BerkeleyBatch::ReadAtCursor(Dbc* pcursor, CDataStream& ssKey, CDataStream& ssValue) +bool BerkeleyBatch::ReadAtCursor(CDataStream& ssKey, CDataStream& ssValue, bool& complete) { + complete = false; + if (m_cursor == nullptr) return false; // Read at cursor SafeDbt datKey; SafeDbt datValue; - int ret = pcursor->get(datKey, datValue, DB_NEXT); + int ret = m_cursor->get(datKey, datValue, DB_NEXT); + if (ret == DB_NOTFOUND) { + complete = true; + } if (ret != 0) - return ret; + return false; else if (datKey.get_data() == nullptr || datValue.get_data() == nullptr) - return 99999; + return false; // Convert to streams ssKey.SetType(SER_DISK); @@ -767,7 +704,14 @@ int BerkeleyBatch::ReadAtCursor(Dbc* pcursor, CDataStream& ssKey, CDataStream& s ssValue.SetType(SER_DISK); ssValue.clear(); ssValue.write((char*)datValue.get_data(), datValue.get_size()); - return 0; + return true; +} + +void BerkeleyBatch::CloseCursor() +{ + if (!m_cursor) return; + m_cursor->close(); + m_cursor = nullptr; } bool BerkeleyBatch::TxnBegin() @@ -804,15 +748,13 @@ std::string BerkeleyDatabaseVersion() return DbEnv::version(nullptr, nullptr, nullptr); } -bool BerkeleyBatch::ReadKey(CDataStream& key, CDataStream& value) +bool BerkeleyBatch::ReadKey(CDataStream&& key, CDataStream& value) { if (!pdb) return false; - // Key SafeDbt datKey(key.data(), key.size()); - // Read SafeDbt datValue; int ret = pdb->get(activeTxn, datKey, datValue, 0); if (ret == 0 && datValue.get_data() != nullptr) { @@ -822,48 +764,63 @@ bool BerkeleyBatch::ReadKey(CDataStream& key, CDataStream& value) return false; } -bool BerkeleyBatch::WriteKey(CDataStream& key, CDataStream& value, bool overwrite) +bool BerkeleyBatch::WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite) { if (!pdb) - return true; + return false; if (fReadOnly) assert(!"Write called on database in read-only mode"); - // Key SafeDbt datKey(key.data(), key.size()); - // Value SafeDbt datValue(value.data(), value.size()); - // Write int ret = pdb->put(activeTxn, datKey, datValue, (overwrite ? 0 : DB_NOOVERWRITE)); return (ret == 0); } -bool BerkeleyBatch::EraseKey(CDataStream& key) +bool BerkeleyBatch::EraseKey(CDataStream&& key) { if (!pdb) return false; if (fReadOnly) assert(!"Erase called on database in read-only mode"); - // Key SafeDbt datKey(key.data(), key.size()); - // Erase int ret = pdb->del(activeTxn, datKey, 0); return (ret == 0 || ret == DB_NOTFOUND); } -bool BerkeleyBatch::HasKey(CDataStream& key) +bool BerkeleyBatch::HasKey(CDataStream&& key) { if (!pdb) return false; - // Key SafeDbt datKey(key.data(), key.size()); - // Exists int ret = pdb->exists(activeTxn, datKey, 0); return ret == 0; } + +void BerkeleyDatabase::AddRef() +{ + LOCK(cs_db); + if (m_refcount < 0) { + m_refcount = 1; + } else { + m_refcount++; + } +} + +void BerkeleyDatabase::RemoveRef() +{ + LOCK(cs_db); + m_refcount--; + if (env) env->m_db_in_use.notify_all(); +} + +std::unique_ptr<DatabaseBatch> BerkeleyDatabase::MakeBatch(const char* mode, bool flush_on_close) +{ + return MakeUnique<BerkeleyBatch>(*this, mode, flush_on_close); +} diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h index c121bb4228..75546924e8 100644 --- a/src/wallet/bdb.h +++ b/src/wallet/bdb.h @@ -52,7 +52,6 @@ private: public: std::unique_ptr<DbEnv> dbenv; - std::map<std::string, int> mapFileUseCount; std::map<std::string, std::reference_wrapper<BerkeleyDatabase>> m_databases; std::unordered_map<std::string, WalletDatabaseFileId> m_fileids; std::condition_variable_any m_db_in_use; @@ -67,9 +66,7 @@ public: bool IsDatabaseLoaded(const std::string& db_filename) const { return m_databases.find(db_filename) != m_databases.end(); } fs::path Directory() const { return strPath; } - bool Verify(const std::string& strFile); - - bool Open(bool retry); + bool Open(bilingual_str& error); void Close(); void Flush(bool fShutdown); void CheckpointLSN(const std::string& strFile); @@ -90,56 +87,63 @@ public: /** Get BerkeleyEnvironment and database filename given a wallet path. */ std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& wallet_path, std::string& database_filename); -/** Return wheter a BDB wallet database is currently loaded. */ +/** Return whether a BDB wallet database is currently loaded. */ bool IsBDBWalletLoaded(const fs::path& wallet_path); +class BerkeleyBatch; + /** An instance of this class represents one database. * For BerkeleyDB this is just a (env, strFile) tuple. **/ -class BerkeleyDatabase +class BerkeleyDatabase : public WalletDatabase { - friend class BerkeleyBatch; public: - /** Create dummy DB handle */ - BerkeleyDatabase() : nUpdateCounter(0), nLastSeen(0), nLastFlushed(0), nLastWalletUpdate(0), env(nullptr) - { - } + BerkeleyDatabase() = delete; /** Create DB handle to real database */ BerkeleyDatabase(std::shared_ptr<BerkeleyEnvironment> env, std::string filename) : - nUpdateCounter(0), nLastSeen(0), nLastFlushed(0), nLastWalletUpdate(0), env(std::move(env)), strFile(std::move(filename)) + WalletDatabase(), env(std::move(env)), strFile(std::move(filename)) { auto inserted = this->env->m_databases.emplace(strFile, std::ref(*this)); assert(inserted.second); } - ~BerkeleyDatabase() { - if (env) { - size_t erased = env->m_databases.erase(strFile); - assert(erased == 1); - } - } + ~BerkeleyDatabase() override; + + /** Open the database if it is not already opened. + * Dummy function, doesn't do anything right now, but is needed for class abstraction */ + void Open(const char* mode) override; /** Rewrite the entire database on disk, with the exception of key pszSkip if non-zero */ - bool Rewrite(const char* pszSkip=nullptr); + bool Rewrite(const char* pszSkip=nullptr) override; + + /** Indicate the a new database user has began using the database. */ + void AddRef() override; + /** Indicate that database user has stopped using the database and that it could be flushed or closed. */ + void RemoveRef() override; /** Back up the entire database to a file. */ - bool Backup(const std::string& strDest) const; + bool Backup(const std::string& strDest) const override; - /** Make sure all changes are flushed to disk. + /** Make sure all changes are flushed to database file. + */ + void Flush() override; + /** Flush to the database file and close the database. + * Also close the environment if no other databases are open in it. */ - void Flush(bool shutdown); + void Close() override; + /* flush the wallet passively (TRY_LOCK) + ideal to be called periodically */ + bool PeriodicFlush() override; - void IncrementUpdateCounter(); + void IncrementUpdateCounter() override; - void ReloadDbEnv(); + void ReloadDbEnv() override; - std::atomic<unsigned int> nUpdateCounter; - unsigned int nLastSeen; - unsigned int nLastFlushed; - int64_t nLastWalletUpdate; + /** Verifies the environment and database file */ + bool Verify(bilingual_str& error) override; /** * Pointer to shared database environment. @@ -155,18 +159,14 @@ public: /** Database pointer. This is initialized lazily and reset during flushes, so it can be null. */ std::unique_ptr<Db> m_db; -private: std::string strFile; - /** Return whether this database handle is a dummy for testing. - * Only to be used at a low level, application should ideally not care - * about this. - */ - bool IsDummy() const { return env == nullptr; } + /** Make a BerkeleyBatch connected to this database */ + std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) override; }; /** RAII class that provides access to a Berkeley database */ -class BerkeleyBatch +class BerkeleyBatch : public DatabaseBatch { /** RAII class that automatically cleanses its data on destruction */ class SafeDbt final @@ -189,108 +189,37 @@ class BerkeleyBatch }; private: - bool ReadKey(CDataStream& key, CDataStream& value); - bool WriteKey(CDataStream& key, CDataStream& value, bool overwrite=true); - bool EraseKey(CDataStream& key); - bool HasKey(CDataStream& key); + bool ReadKey(CDataStream&& key, CDataStream& value) override; + bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite = true) override; + bool EraseKey(CDataStream&& key) override; + bool HasKey(CDataStream&& key) override; protected: Db* pdb; std::string strFile; DbTxn* activeTxn; + Dbc* m_cursor; bool fReadOnly; bool fFlushOnClose; BerkeleyEnvironment *env; + BerkeleyDatabase& m_database; public: explicit BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode = "r+", bool fFlushOnCloseIn=true); - ~BerkeleyBatch() { Close(); } + ~BerkeleyBatch() override; BerkeleyBatch(const BerkeleyBatch&) = delete; BerkeleyBatch& operator=(const BerkeleyBatch&) = delete; - void Flush(); - void Close(); - - /* flush the wallet passively (TRY_LOCK) - ideal to be called periodically */ - static bool PeriodicFlush(BerkeleyDatabase& database); - /* verifies the database environment */ - static bool VerifyEnvironment(const fs::path& file_path, bilingual_str& errorStr); - /* verifies the database file */ - static bool VerifyDatabaseFile(const fs::path& file_path, bilingual_str& errorStr); - - template <typename K, typename T> - bool Read(const K& key, T& value) - { - // Key - CDataStream ssKey(SER_DISK, CLIENT_VERSION); - ssKey.reserve(1000); - ssKey << key; - - CDataStream ssValue(SER_DISK, CLIENT_VERSION); - bool success = false; - bool ret = ReadKey(ssKey, ssValue); - if (ret) { - // Unserialize value - try { - ssValue >> value; - success = true; - } catch (const std::exception&) { - // In this case success remains 'false' - } - } - return ret && success; - } - - template <typename K, typename T> - bool Write(const K& key, const T& value, bool fOverwrite = true) - { - // Key - CDataStream ssKey(SER_DISK, CLIENT_VERSION); - ssKey.reserve(1000); - ssKey << key; - - // Value - CDataStream ssValue(SER_DISK, CLIENT_VERSION); - ssValue.reserve(10000); - ssValue << value; - - // Write - return WriteKey(ssKey, ssValue, fOverwrite); - } - - template <typename K> - bool Erase(const K& key) - { - // Key - CDataStream ssKey(SER_DISK, CLIENT_VERSION); - ssKey.reserve(1000); - ssKey << key; - - // Erase - return EraseKey(ssKey); - } - - template <typename K> - bool Exists(const K& key) - { - // Key - CDataStream ssKey(SER_DISK, CLIENT_VERSION); - ssKey.reserve(1000); - ssKey << key; - - // Exists - return HasKey(ssKey); - } - - Dbc* GetCursor(); - int ReadAtCursor(Dbc* pcursor, CDataStream& ssKey, CDataStream& ssValue); - bool TxnBegin(); - bool TxnCommit(); - bool TxnAbort(); + void Flush() override; + void Close() override; - bool static Rewrite(BerkeleyDatabase& database, const char* pszSkip = nullptr); + bool StartCursor() override; + bool ReadAtCursor(CDataStream& ssKey, CDataStream& ssValue, bool& complete) override; + void CloseCursor() override; + bool TxnBegin() override; + bool TxnCommit() override; + bool TxnAbort() override; }; std::string BerkeleyDatabaseVersion(); diff --git a/src/wallet/context.h b/src/wallet/context.h index 3c8fdd1c59..a83591154f 100644 --- a/src/wallet/context.h +++ b/src/wallet/context.h @@ -5,6 +5,7 @@ #ifndef BITCOIN_WALLET_CONTEXT_H #define BITCOIN_WALLET_CONTEXT_H +class ArgsManager; namespace interfaces { class Chain; } // namespace interfaces @@ -21,6 +22,7 @@ class Chain; //! behavior. struct WalletContext { interfaces::Chain* chain{nullptr}; + ArgsManager* args{nullptr}; //! Declare default constructor and destructor that are not inline, so code //! instantiating the WalletContext struct doesn't need to #include class diff --git a/src/wallet/db.h b/src/wallet/db.h index 1322bf54fa..0afaba5fd1 100644 --- a/src/wallet/db.h +++ b/src/wallet/db.h @@ -6,12 +6,193 @@ #ifndef BITCOIN_WALLET_DB_H #define BITCOIN_WALLET_DB_H +#include <clientversion.h> #include <fs.h> +#include <streams.h> +#include <util/memory.h> +#include <atomic> +#include <memory> #include <string> +struct bilingual_str; + /** Given a wallet directory path or legacy file path, return path to main data file in the wallet database. */ fs::path WalletDataFilePath(const fs::path& wallet_path); void SplitWalletPath(const fs::path& wallet_path, fs::path& env_directory, std::string& database_filename); +/** RAII class that provides access to a WalletDatabase */ +class DatabaseBatch +{ +private: + virtual bool ReadKey(CDataStream&& key, CDataStream& value) = 0; + virtual bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite=true) = 0; + virtual bool EraseKey(CDataStream&& key) = 0; + virtual bool HasKey(CDataStream&& key) = 0; + +public: + explicit DatabaseBatch() {} + virtual ~DatabaseBatch() {} + + DatabaseBatch(const DatabaseBatch&) = delete; + DatabaseBatch& operator=(const DatabaseBatch&) = delete; + + virtual void Flush() = 0; + virtual void Close() = 0; + + template <typename K, typename T> + bool Read(const K& key, T& value) + { + CDataStream ssKey(SER_DISK, CLIENT_VERSION); + ssKey.reserve(1000); + ssKey << key; + + CDataStream ssValue(SER_DISK, CLIENT_VERSION); + if (!ReadKey(std::move(ssKey), ssValue)) return false; + try { + ssValue >> value; + return true; + } catch (const std::exception&) { + return false; + } + } + + template <typename K, typename T> + bool Write(const K& key, const T& value, bool fOverwrite = true) + { + CDataStream ssKey(SER_DISK, CLIENT_VERSION); + ssKey.reserve(1000); + ssKey << key; + + CDataStream ssValue(SER_DISK, CLIENT_VERSION); + ssValue.reserve(10000); + ssValue << value; + + return WriteKey(std::move(ssKey), std::move(ssValue), fOverwrite); + } + + template <typename K> + bool Erase(const K& key) + { + CDataStream ssKey(SER_DISK, CLIENT_VERSION); + ssKey.reserve(1000); + ssKey << key; + + return EraseKey(std::move(ssKey)); + } + + template <typename K> + bool Exists(const K& key) + { + CDataStream ssKey(SER_DISK, CLIENT_VERSION); + ssKey.reserve(1000); + ssKey << key; + + return HasKey(std::move(ssKey)); + } + + virtual bool StartCursor() = 0; + virtual bool ReadAtCursor(CDataStream& ssKey, CDataStream& ssValue, bool& complete) = 0; + virtual void CloseCursor() = 0; + virtual bool TxnBegin() = 0; + virtual bool TxnCommit() = 0; + virtual bool TxnAbort() = 0; +}; + +/** An instance of this class represents one database. + **/ +class WalletDatabase +{ +public: + /** Create dummy DB handle */ + WalletDatabase() : nUpdateCounter(0), nLastSeen(0), nLastFlushed(0), nLastWalletUpdate(0) {} + virtual ~WalletDatabase() {}; + + /** Open the database if it is not already opened. */ + virtual void Open(const char* mode) = 0; + + //! Counts the number of active database users to be sure that the database is not closed while someone is using it + std::atomic<int> m_refcount{0}; + /** Indicate the a new database user has began using the database. Increments m_refcount */ + virtual void AddRef() = 0; + /** Indicate that database user has stopped using the database and that it could be flushed or closed. Decrement m_refcount */ + virtual void RemoveRef() = 0; + + /** Rewrite the entire database on disk, with the exception of key pszSkip if non-zero + */ + virtual bool Rewrite(const char* pszSkip=nullptr) = 0; + + /** Back up the entire database to a file. + */ + virtual bool Backup(const std::string& strDest) const = 0; + + /** Make sure all changes are flushed to database file. + */ + virtual void Flush() = 0; + /** Flush to the database file and close the database. + * Also close the environment if no other databases are open in it. + */ + virtual void Close() = 0; + /* flush the wallet passively (TRY_LOCK) + ideal to be called periodically */ + virtual bool PeriodicFlush() = 0; + + virtual void IncrementUpdateCounter() = 0; + + virtual void ReloadDbEnv() = 0; + + std::atomic<unsigned int> nUpdateCounter; + unsigned int nLastSeen; + unsigned int nLastFlushed; + int64_t nLastWalletUpdate; + + /** Verifies the environment and database file */ + virtual bool Verify(bilingual_str& error) = 0; + + std::string m_file_path; + + /** Make a DatabaseBatch connected to this database */ + virtual std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) = 0; +}; + +/** RAII class that provides access to a DummyDatabase. Never fails. */ +class DummyBatch : public DatabaseBatch +{ +private: + bool ReadKey(CDataStream&& key, CDataStream& value) override { return true; } + bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite=true) override { return true; } + bool EraseKey(CDataStream&& key) override { return true; } + bool HasKey(CDataStream&& key) override { return true; } + +public: + void Flush() override {} + void Close() override {} + + bool StartCursor() override { return true; } + bool ReadAtCursor(CDataStream& ssKey, CDataStream& ssValue, bool& complete) override { return true; } + void CloseCursor() override {} + bool TxnBegin() override { return true; } + bool TxnCommit() override { return true; } + bool TxnAbort() override { return true; } +}; + +/** A dummy WalletDatabase that does nothing and never fails. Only used by unit tests. + **/ +class DummyDatabase : public WalletDatabase +{ +public: + void Open(const char* mode) override {}; + void AddRef() override {} + void RemoveRef() override {} + bool Rewrite(const char* pszSkip=nullptr) override { return true; } + bool Backup(const std::string& strDest) const override { return true; } + void Close() override {} + void Flush() override {} + bool PeriodicFlush() override { return true; } + void IncrementUpdateCounter() override { ++nUpdateCounter; } + void ReloadDbEnv() override {} + bool Verify(bilingual_str& errorStr) override { return true; } + std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) override { return MakeUnique<DummyBatch>(); } +}; + #endif // BITCOIN_WALLET_DB_H diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp index 3885eb6185..52162ab521 100644 --- a/src/wallet/init.cpp +++ b/src/wallet/init.cpp @@ -7,8 +7,9 @@ #include <interfaces/chain.h> #include <net.h> #include <node/context.h> +#include <node/ui_interface.h> #include <outputtype.h> -#include <ui_interface.h> +#include <util/check.h> #include <util/moneystr.h> #include <util/system.h> #include <util/translation.h> @@ -16,14 +17,14 @@ #include <wallet/wallet.h> #include <walletinitinterface.h> -class WalletInit : public WalletInitInterface { +class WalletInit : public WalletInitInterface +{ public: - //! Was the wallet component compiled in. bool HasWalletSupport() const override {return true;} //! Return the wallets help message. - void AddWalletOptions() const override; + void AddWalletOptions(ArgsManager& argsman) const override; //! Wallets parameter interaction bool ParameterInteraction() const override; @@ -34,42 +35,42 @@ public: const WalletInitInterface& g_wallet_init_interface = WalletInit(); -void WalletInit::AddWalletOptions() const +void WalletInit::AddWalletOptions(ArgsManager& argsman) const { - gArgs.AddArg("-addresstype", strprintf("What type of addresses to use (\"legacy\", \"p2sh-segwit\", or \"bech32\", default: \"%s\")", FormatOutputType(DEFAULT_ADDRESS_TYPE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-avoidpartialspends", strprintf("Group outputs by address, selecting all or none, instead of selecting on a per-output basis. Privacy is improved as an address is only used once (unless someone sends to it after spending from it), but may result in slightly higher fees as suboptimal coin selection may result due to the added limitation (default: %u (always enabled for wallets with \"avoid_reuse\" enabled))", DEFAULT_AVOIDPARTIALSPENDS), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-changetype", "What type of change to use (\"legacy\", \"p2sh-segwit\", or \"bech32\"). Default is same as -addresstype, except when -addresstype=p2sh-segwit a native segwit output is used when sending to a native segwit address)", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-disablewallet", "Do not load the wallet and disable wallet RPC calls", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-discardfee=<amt>", strprintf("The fee rate (in %s/kB) that indicates your tolerance for discarding change by adding it to the fee (default: %s). " + argsman.AddArg("-addresstype", strprintf("What type of addresses to use (\"legacy\", \"p2sh-segwit\", or \"bech32\", default: \"%s\")", FormatOutputType(DEFAULT_ADDRESS_TYPE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); + argsman.AddArg("-avoidpartialspends", strprintf("Group outputs by address, selecting all or none, instead of selecting on a per-output basis. Privacy is improved as an address is only used once (unless someone sends to it after spending from it), but may result in slightly higher fees as suboptimal coin selection may result due to the added limitation (default: %u (always enabled for wallets with \"avoid_reuse\" enabled))", DEFAULT_AVOIDPARTIALSPENDS), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); + argsman.AddArg("-changetype", "What type of change to use (\"legacy\", \"p2sh-segwit\", or \"bech32\"). Default is same as -addresstype, except when -addresstype=p2sh-segwit a native segwit output is used when sending to a native segwit address)", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); + argsman.AddArg("-disablewallet", "Do not load the wallet and disable wallet RPC calls", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); + argsman.AddArg("-discardfee=<amt>", strprintf("The fee rate (in %s/kB) that indicates your tolerance for discarding change by adding it to the fee (default: %s). " "Note: An output is discarded if it is dust at this rate, but we will always discard up to the dust relay fee and a discard fee above that is limited by the fee estimate for the longest target", CURRENCY_UNIT, FormatMoney(DEFAULT_DISCARD_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-fallbackfee=<amt>", strprintf("A fee rate (in %s/kB) that will be used when fee estimation has insufficient data. 0 to entirely disable the fallbackfee feature. (default: %s)", + argsman.AddArg("-fallbackfee=<amt>", strprintf("A fee rate (in %s/kB) that will be used when fee estimation has insufficient data. 0 to entirely disable the fallbackfee feature. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_FALLBACK_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-keypool=<n>", strprintf("Set key pool size to <n> (default: %u). Warning: Smaller sizes may increase the risk of losing funds when restoring from an old backup, if none of the addresses in the original keypool have been used.", DEFAULT_KEYPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-maxtxfee=<amt>", strprintf("Maximum total fees (in %s) to use in a single wallet transaction; setting this too low may abort large transactions (default: %s)", + argsman.AddArg("-keypool=<n>", strprintf("Set key pool size to <n> (default: %u). Warning: Smaller sizes may increase the risk of losing funds when restoring from an old backup, if none of the addresses in the original keypool have been used.", DEFAULT_KEYPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); + argsman.AddArg("-maxtxfee=<amt>", strprintf("Maximum total fees (in %s) to use in a single wallet transaction; setting this too low may abort large transactions (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE)), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - gArgs.AddArg("-mintxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for transaction creation (default: %s)", + argsman.AddArg("-mintxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for transaction creation (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MINFEE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-paytxfee=<amt>", strprintf("Fee (in %s/kB) to add to transactions you send (default: %s)", + argsman.AddArg("-paytxfee=<amt>", strprintf("Fee (in %s/kB) to add to transactions you send (default: %s)", CURRENCY_UNIT, FormatMoney(CFeeRate{DEFAULT_PAY_TX_FEE}.GetFeePerK())), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-rescan", "Rescan the block chain for missing wallet transactions on startup", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-spendzeroconfchange", strprintf("Spend unconfirmed change when sending transactions (default: %u)", DEFAULT_SPEND_ZEROCONF_CHANGE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-txconfirmtarget=<n>", strprintf("If paytxfee is not set, include enough fee so transactions begin confirmation on average within n blocks (default: %u)", DEFAULT_TX_CONFIRM_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-wallet=<path>", "Specify wallet database path. Can be specified multiple times to load multiple wallets. Path is interpreted relative to <walletdir> if it is not absolute, and will be created if it does not exist (as a directory containing a wallet.dat file and log files). For backwards compatibility this will also accept names of existing data files in <walletdir>.)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET); - gArgs.AddArg("-walletbroadcast", strprintf("Make the wallet broadcast transactions (default: %u)", DEFAULT_WALLETBROADCAST), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-walletdir=<dir>", "Specify directory to hold wallets (default: <datadir>/wallets if it exists, otherwise <datadir>)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET); + argsman.AddArg("-rescan", "Rescan the block chain for missing wallet transactions on startup", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); + argsman.AddArg("-spendzeroconfchange", strprintf("Spend unconfirmed change when sending transactions (default: %u)", DEFAULT_SPEND_ZEROCONF_CHANGE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); + argsman.AddArg("-txconfirmtarget=<n>", strprintf("If paytxfee is not set, include enough fee so transactions begin confirmation on average within n blocks (default: %u)", DEFAULT_TX_CONFIRM_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); + argsman.AddArg("-wallet=<path>", "Specify wallet database path. Can be specified multiple times to load multiple wallets. Path is interpreted relative to <walletdir> if it is not absolute, and will be created if it does not exist (as a directory containing a wallet.dat file and log files). For backwards compatibility this will also accept names of existing data files in <walletdir>.)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET); + argsman.AddArg("-walletbroadcast", strprintf("Make the wallet broadcast transactions (default: %u)", DEFAULT_WALLETBROADCAST), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); + argsman.AddArg("-walletdir=<dir>", "Specify directory to hold wallets (default: <datadir>/wallets if it exists, otherwise <datadir>)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET); #if HAVE_SYSTEM - gArgs.AddArg("-walletnotify=<cmd>", "Execute command when a wallet transaction changes. %s in cmd is replaced by TxID and %w is replaced by wallet name. %w is not currently implemented on windows. On systems where %w is supported, it should NOT be quoted because this would break shell escaping used to invoke the command.", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); + argsman.AddArg("-walletnotify=<cmd>", "Execute command when a wallet transaction changes. %s in cmd is replaced by TxID and %w is replaced by wallet name. %w is not currently implemented on windows. On systems where %w is supported, it should NOT be quoted because this would break shell escaping used to invoke the command.", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); #endif - gArgs.AddArg("-walletrbf", strprintf("Send transactions with full-RBF opt-in enabled (RPC only, default: %u)", DEFAULT_WALLET_RBF), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-zapwallettxes=<mode>", "Delete all wallet transactions and only recover those parts of the blockchain through -rescan on startup" + argsman.AddArg("-walletrbf", strprintf("Send transactions with full-RBF opt-in enabled (RPC only, default: %u)", DEFAULT_WALLET_RBF), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); + argsman.AddArg("-zapwallettxes=<mode>", "Delete all wallet transactions and only recover those parts of the blockchain through -rescan on startup" " (1 = keep tx meta data e.g. payment request information, 2 = drop tx meta data)", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); - gArgs.AddArg("-dblogsize=<n>", strprintf("Flush wallet database activity from memory to disk log every <n> megabytes (default: %u)", DEFAULT_WALLET_DBLOGSIZE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST); - gArgs.AddArg("-flushwallet", strprintf("Run a thread to flush wallet periodically (default: %u)", DEFAULT_FLUSHWALLET), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST); - gArgs.AddArg("-privdb", strprintf("Sets the DB_PRIVATE flag in the wallet db environment (default: %u)", DEFAULT_WALLET_PRIVDB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST); - gArgs.AddArg("-walletrejectlongchains", strprintf("Wallet will not create transactions that violate mempool chain limits (default: %u)", DEFAULT_WALLET_REJECT_LONG_CHAINS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST); + argsman.AddArg("-dblogsize=<n>", strprintf("Flush wallet database activity from memory to disk log every <n> megabytes (default: %u)", DEFAULT_WALLET_DBLOGSIZE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST); + argsman.AddArg("-flushwallet", strprintf("Run a thread to flush wallet periodically (default: %u)", DEFAULT_FLUSHWALLET), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST); + argsman.AddArg("-privdb", strprintf("Sets the DB_PRIVATE flag in the wallet db environment (default: %u)", DEFAULT_WALLET_PRIVDB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST); + argsman.AddArg("-walletrejectlongchains", strprintf("Wallet will not create transactions that violate mempool chain limits (default: %u)", DEFAULT_WALLET_REJECT_LONG_CHAINS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST); } bool WalletInit::ParameterInteraction() const @@ -112,10 +113,11 @@ bool WalletInit::ParameterInteraction() const void WalletInit::Construct(NodeContext& node) const { - if (gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) { + ArgsManager& args = *Assert(node.args); + if (args.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) { LogPrintf("Wallet disabled!\n"); return; } - gArgs.SoftSetArg("-wallet", ""); - node.chain_clients.emplace_back(interfaces::MakeWalletClient(*node.chain, gArgs.GetArgs("-wallet"))); + args.SoftSetArg("-wallet", ""); + node.chain_clients.emplace_back(interfaces::MakeWalletClient(*node.chain, args, args.GetArgs("-wallet"))); } diff --git a/src/wallet/load.cpp b/src/wallet/load.cpp index 8df3e78215..2a81d30133 100644 --- a/src/wallet/load.cpp +++ b/src/wallet/load.cpp @@ -11,6 +11,7 @@ #include <util/system.h> #include <util/translation.h> #include <wallet/wallet.h> +#include <wallet/walletdb.h> bool VerifyWallets(interfaces::Chain& chain, const std::vector<std::string>& wallet_files) { @@ -82,28 +83,30 @@ bool LoadWallets(interfaces::Chain& chain, const std::vector<std::string>& walle } } -void StartWallets(CScheduler& scheduler) +void StartWallets(CScheduler& scheduler, const ArgsManager& args) { for (const std::shared_ptr<CWallet>& pwallet : GetWallets()) { pwallet->postInitProcess(); } // Schedule periodic wallet flushes and tx rebroadcasts - scheduler.scheduleEvery(MaybeCompactWalletDB, std::chrono::milliseconds{500}); + if (args.GetBoolArg("-flushwallet", DEFAULT_FLUSHWALLET)) { + scheduler.scheduleEvery(MaybeCompactWalletDB, std::chrono::milliseconds{500}); + } scheduler.scheduleEvery(MaybeResendWalletTxs, std::chrono::milliseconds{1000}); } void FlushWallets() { for (const std::shared_ptr<CWallet>& pwallet : GetWallets()) { - pwallet->Flush(false); + pwallet->Flush(); } } void StopWallets() { for (const std::shared_ptr<CWallet>& pwallet : GetWallets()) { - pwallet->Flush(true); + pwallet->Close(); } } diff --git a/src/wallet/load.h b/src/wallet/load.h index e24b1f2e69..ff4f5b4b23 100644 --- a/src/wallet/load.h +++ b/src/wallet/load.h @@ -9,6 +9,7 @@ #include <string> #include <vector> +class ArgsManager; class CScheduler; namespace interfaces { @@ -22,7 +23,7 @@ bool VerifyWallets(interfaces::Chain& chain, const std::vector<std::string>& wal bool LoadWallets(interfaces::Chain& chain, const std::vector<std::string>& wallet_files); //! Complete startup of wallets. -void StartWallets(CScheduler& scheduler); +void StartWallets(CScheduler& scheduler, const ArgsManager& args); //! Flush all wallets in preparation for shutdown. void FlushWallets(); diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp index 6a1048539f..e0c3a1287a 100644 --- a/src/wallet/rpcdump.cpp +++ b/src/wallet/rpcdump.cpp @@ -34,7 +34,7 @@ std::string static EncodeDumpString(const std::string &str) { std::stringstream ret; for (const unsigned char c : str) { if (c <= 32 || c >= 128 || c == '%') { - ret << '%' << HexStr(&c, &c + 1); + ret << '%' << HexStr(Span<const unsigned char>(&c, 1)); } else { ret << c; } @@ -856,20 +856,20 @@ static std::string RecurseImportData(const CScript& script, ImportData& import_d { // Use Solver to obtain script type and parsed pubkeys or hashes: std::vector<std::vector<unsigned char>> solverdata; - txnouttype script_type = Solver(script, solverdata); + TxoutType script_type = Solver(script, solverdata); switch (script_type) { - case TX_PUBKEY: { + case TxoutType::PUBKEY: { CPubKey pubkey(solverdata[0].begin(), solverdata[0].end()); import_data.used_keys.emplace(pubkey.GetID(), false); return ""; } - case TX_PUBKEYHASH: { + case TxoutType::PUBKEYHASH: { CKeyID id = CKeyID(uint160(solverdata[0])); import_data.used_keys[id] = true; return ""; } - case TX_SCRIPTHASH: { + case TxoutType::SCRIPTHASH: { if (script_ctx == ScriptContext::P2SH) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Trying to nest P2SH inside another P2SH"); if (script_ctx == ScriptContext::WITNESS_V0) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Trying to nest P2SH inside a P2WSH"); CHECK_NONFATAL(script_ctx == ScriptContext::TOP); @@ -880,14 +880,14 @@ static std::string RecurseImportData(const CScript& script, ImportData& import_d import_data.import_scripts.emplace(*subscript); return RecurseImportData(*subscript, import_data, ScriptContext::P2SH); } - case TX_MULTISIG: { + case TxoutType::MULTISIG: { for (size_t i = 1; i + 1< solverdata.size(); ++i) { CPubKey pubkey(solverdata[i].begin(), solverdata[i].end()); import_data.used_keys.emplace(pubkey.GetID(), false); } return ""; } - case TX_WITNESS_V0_SCRIPTHASH: { + case TxoutType::WITNESS_V0_SCRIPTHASH: { if (script_ctx == ScriptContext::WITNESS_V0) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Trying to nest P2WSH inside another P2WSH"); uint256 fullid(solverdata[0]); CScriptID id; @@ -901,7 +901,7 @@ static std::string RecurseImportData(const CScript& script, ImportData& import_d import_data.import_scripts.emplace(*subscript); return RecurseImportData(*subscript, import_data, ScriptContext::WITNESS_V0); } - case TX_WITNESS_V0_KEYHASH: { + case TxoutType::WITNESS_V0_KEYHASH: { if (script_ctx == ScriptContext::WITNESS_V0) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Trying to nest P2WPKH inside P2WSH"); CKeyID id = CKeyID(uint160(solverdata[0])); import_data.used_keys[id] = true; @@ -910,10 +910,10 @@ static std::string RecurseImportData(const CScript& script, ImportData& import_d } return ""; } - case TX_NULL_DATA: + case TxoutType::NULL_DATA: return "unspendable script"; - case TX_NONSTANDARD: - case TX_WITNESS_UNKNOWN: + case TxoutType::NONSTANDARD: + case TxoutType::WITNESS_UNKNOWN: default: return "unrecognized script"; } @@ -1547,7 +1547,7 @@ static UniValue ProcessDescriptorImport(CWallet * const pwallet, const UniValue& if (!w_desc.descriptor->GetOutputType()) { warnings.push_back("Unknown output type, cannot set descriptor to active."); } else { - pwallet->SetActiveScriptPubKeyMan(spk_manager->GetID(), *w_desc.descriptor->GetOutputType(), internal); + pwallet->AddActiveScriptPubKeyMan(spk_manager->GetID(), *w_desc.descriptor->GetOutputType(), internal); } } diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index 63f4afa3bc..17460a2932 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -223,7 +223,7 @@ static void SetFeeEstimateMode(const CWallet* pwallet, CCoinControl& cc, const U cc.m_feerate = CFeeRate(fee_rate); // default RBF to true for explicit fee rate modes - if (cc.m_signal_bip125_rbf == boost::none) cc.m_signal_bip125_rbf = true; + if (cc.m_signal_bip125_rbf == nullopt) cc.m_signal_bip125_rbf = true; } else if (!estimate_param.isNull()) { cc.m_confirm_target = ParseConfirmTarget(estimate_param, pwallet->chain().estimateMaxBlocks()); } @@ -306,7 +306,7 @@ static UniValue getrawchangeaddress(const JSONRPCRequest& request) throw JSONRPCError(RPC_WALLET_ERROR, "Error: This wallet has no available keys"); } - OutputType output_type = pwallet->m_default_change_type != OutputType::CHANGE_AUTO ? pwallet->m_default_change_type : pwallet->m_default_address_type; + OutputType output_type = pwallet->m_default_change_type.get_value_or(pwallet->m_default_address_type); if (!request.params[0].isNull()) { if (!ParseOutputType(request.params[0].get_str(), output_type)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("Unknown address type '%s'", request.params[0].get_str())); @@ -359,36 +359,54 @@ static UniValue setlabel(const JSONRPCRequest& request) return NullUniValue; } +void ParseRecipients(const UniValue& address_amounts, const UniValue& subtract_fee_outputs, std::vector<CRecipient> &recipients) { + std::set<CTxDestination> destinations; + int i = 0; + for (const std::string& address: address_amounts.getKeys()) { + CTxDestination dest = DecodeDestination(address); + if (!IsValidDestination(dest)) { + throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, std::string("Invalid Bitcoin address: ") + address); + } -static CTransactionRef SendMoney(CWallet* const pwallet, const CTxDestination& address, CAmount nValue, bool fSubtractFeeFromAmount, const CCoinControl& coin_control, mapValue_t mapValue) -{ - CAmount curBalance = pwallet->GetBalance(0, coin_control.m_avoid_address_reuse).m_mine_trusted; + if (destinations.count(dest)) { + throw JSONRPCError(RPC_INVALID_PARAMETER, std::string("Invalid parameter, duplicated address: ") + address); + } + destinations.insert(dest); - // Check amount - if (nValue <= 0) - throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid amount"); + CScript script_pub_key = GetScriptForDestination(dest); + CAmount amount = AmountFromValue(address_amounts[i++]); - if (nValue > curBalance) - throw JSONRPCError(RPC_WALLET_INSUFFICIENT_FUNDS, "Insufficient funds"); + bool subtract_fee = false; + for (unsigned int idx = 0; idx < subtract_fee_outputs.size(); idx++) { + const UniValue& addr = subtract_fee_outputs[idx]; + if (addr.get_str() == address) { + subtract_fee = true; + } + } - // Parse Bitcoin address - CScript scriptPubKey = GetScriptForDestination(address); + CRecipient recipient = {script_pub_key, amount, subtract_fee}; + recipients.push_back(recipient); + } +} + +UniValue SendMoney(CWallet* const pwallet, const CCoinControl &coin_control, std::vector<CRecipient> &recipients, mapValue_t map_value) +{ + EnsureWalletIsUnlocked(pwallet); - // Create and send the transaction + // Shuffle recipient list + std::shuffle(recipients.begin(), recipients.end(), FastRandomContext()); + + // Send CAmount nFeeRequired = 0; - bilingual_str error; - std::vector<CRecipient> vecSend; int nChangePosRet = -1; - CRecipient recipient = {scriptPubKey, nValue, fSubtractFeeFromAmount}; - vecSend.push_back(recipient); + bilingual_str error; CTransactionRef tx; - if (!pwallet->CreateTransaction(vecSend, tx, nFeeRequired, nChangePosRet, error, coin_control)) { - if (!fSubtractFeeFromAmount && nValue + nFeeRequired > curBalance) - error = strprintf(Untranslated("Error: This transaction requires a transaction fee of at least %s"), FormatMoney(nFeeRequired)); - throw JSONRPCError(RPC_WALLET_ERROR, error.original); + bool fCreated = pwallet->CreateTransaction(recipients, tx, nFeeRequired, nChangePosRet, error, coin_control, !pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)); + if (!fCreated) { + throw JSONRPCError(RPC_WALLET_INSUFFICIENT_FUNDS, error.original); } - pwallet->CommitTransaction(tx, std::move(mapValue), {} /* orderForm */); - return tx; + pwallet->CommitTransaction(tx, std::move(map_value), {} /* orderForm */); + return tx->GetHash().GetHex(); } static UniValue sendtoaddress(const JSONRPCRequest& request) @@ -436,16 +454,6 @@ static UniValue sendtoaddress(const JSONRPCRequest& request) LOCK(pwallet->cs_wallet); - CTxDestination dest = DecodeDestination(request.params[0].get_str()); - if (!IsValidDestination(dest)) { - throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address"); - } - - // Amount - CAmount nAmount = AmountFromValue(request.params[1]); - if (nAmount <= 0) - throw JSONRPCError(RPC_TYPE_ERROR, "Invalid amount for send"); - // Wallet comments mapValue_t mapValue; if (!request.params[2].isNull() && !request.params[2].get_str().empty()) @@ -471,8 +479,18 @@ static UniValue sendtoaddress(const JSONRPCRequest& request) EnsureWalletIsUnlocked(pwallet); - CTransactionRef tx = SendMoney(pwallet, dest, nAmount, fSubtractFeeFromAmount, coin_control, std::move(mapValue)); - return tx->GetHash().GetHex(); + UniValue address_amounts(UniValue::VOBJ); + const std::string address = request.params[0].get_str(); + address_amounts.pushKV(address, request.params[1]); + UniValue subtractFeeFromAmount(UniValue::VARR); + if (fSubtractFeeFromAmount) { + subtractFeeFromAmount.push_back(address); + } + + std::vector<CRecipient> recipients; + ParseRecipients(address_amounts, subtractFeeFromAmount, recipients); + + return SendMoney(pwallet, coin_control, recipients, mapValue); } static UniValue listaddressgroupings(const JSONRPCRequest& request) @@ -860,52 +878,10 @@ static UniValue sendmany(const JSONRPCRequest& request) SetFeeEstimateMode(pwallet, coin_control, request.params[7], request.params[6]); - std::set<CTxDestination> destinations; - std::vector<CRecipient> vecSend; + std::vector<CRecipient> recipients; + ParseRecipients(sendTo, subtractFeeFromAmount, recipients); - std::vector<std::string> keys = sendTo.getKeys(); - for (const std::string& name_ : keys) { - CTxDestination dest = DecodeDestination(name_); - if (!IsValidDestination(dest)) { - throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, std::string("Invalid Bitcoin address: ") + name_); - } - - if (destinations.count(dest)) { - throw JSONRPCError(RPC_INVALID_PARAMETER, std::string("Invalid parameter, duplicated address: ") + name_); - } - destinations.insert(dest); - - CScript scriptPubKey = GetScriptForDestination(dest); - CAmount nAmount = AmountFromValue(sendTo[name_]); - if (nAmount <= 0) - throw JSONRPCError(RPC_TYPE_ERROR, "Invalid amount for send"); - - bool fSubtractFeeFromAmount = false; - for (unsigned int idx = 0; idx < subtractFeeFromAmount.size(); idx++) { - const UniValue& addr = subtractFeeFromAmount[idx]; - if (addr.get_str() == name_) - fSubtractFeeFromAmount = true; - } - - CRecipient recipient = {scriptPubKey, nAmount, fSubtractFeeFromAmount}; - vecSend.push_back(recipient); - } - - EnsureWalletIsUnlocked(pwallet); - - // Shuffle recipient list - std::shuffle(vecSend.begin(), vecSend.end(), FastRandomContext()); - - // Send - CAmount nFeeRequired = 0; - int nChangePosRet = -1; - bilingual_str error; - CTransactionRef tx; - bool fCreated = pwallet->CreateTransaction(vecSend, tx, nFeeRequired, nChangePosRet, error, coin_control); - if (!fCreated) - throw JSONRPCError(RPC_WALLET_INSUFFICIENT_FUNDS, error.original); - pwallet->CommitTransaction(tx, std::move(mapValue), {} /* orderForm */); - return tx->GetHash().GetHex(); + return SendMoney(pwallet, coin_control, recipients, std::move(mapValue)); } static UniValue addmultisigaddress(const JSONRPCRequest& request) @@ -2362,7 +2338,7 @@ static UniValue getwalletinfo(const JSONRPCRequest& request) {RPCResult::Type::NUM_TIME, "keypoololdest", "the " + UNIX_EPOCH_TIME + " of the oldest pre-generated key in the key pool. Legacy wallets only."}, {RPCResult::Type::NUM, "keypoolsize", "how many new keys are pre-generated (only counts external keys)"}, {RPCResult::Type::NUM, "keypoolsize_hd_internal", "how many new keys are pre-generated for internal use (used for change outputs, only appears if the wallet is using this feature, otherwise external keys are used)"}, - {RPCResult::Type::NUM_TIME, "unlocked_until", "the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked"}, + {RPCResult::Type::NUM_TIME, "unlocked_until", /* optional */ true, "the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked (only present for passphrase-encrypted wallets)"}, {RPCResult::Type::STR_AMOUNT, "paytxfee", "the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB"}, {RPCResult::Type::STR_HEX, "hdseedid", /* optional */ true, "the Hash160 of the HD seed (only present when HD is enabled)"}, {RPCResult::Type::BOOL, "private_keys_enabled", "false if privatekeys are disabled for this wallet (enforced watch-only wallet)"}, @@ -2993,10 +2969,11 @@ void FundTransaction(CWallet* const pwallet, CMutableTransaction& tx, CAmount& f if (options.exists("changeAddress")) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Cannot specify both changeAddress and address_type options"); } - coinControl.m_change_type = pwallet->m_default_change_type; - if (!ParseOutputType(options["change_type"].get_str(), *coinControl.m_change_type)) { + OutputType out_type; + if (!ParseOutputType(options["change_type"].get_str(), out_type)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("Unknown change type '%s'", options["change_type"].get_str())); } + coinControl.m_change_type.emplace(out_type); } coinControl.fAllowWatchOnly = ParseIncludeWatchonly(options["includeWatching"], *pwallet); @@ -3140,7 +3117,7 @@ static UniValue fundrawtransaction(const JSONRPCRequest& request) CAmount fee; int change_position; CCoinControl coin_control; - // Automatically select (additional) coins. Can be overriden by options.add_inputs. + // Automatically select (additional) coins. Can be overridden by options.add_inputs. coin_control.m_add_inputs = true; FundTransaction(pwallet, tx, fee, change_position, request.params[1], coin_control); @@ -3522,7 +3499,7 @@ public: { // Always present: script type and redeemscript std::vector<std::vector<unsigned char>> solutions_data; - txnouttype which_type = Solver(subscript, solutions_data); + TxoutType which_type = Solver(subscript, solutions_data); obj.pushKV("script", GetTxnOutputType(which_type)); obj.pushKV("hex", HexStr(subscript)); @@ -3539,7 +3516,7 @@ public: // Always report the pubkey at the top level, so that `getnewaddress()['pubkey']` always works. if (subobj.exists("pubkey")) obj.pushKV("pubkey", subobj["pubkey"]); obj.pushKV("embedded", std::move(subobj)); - } else if (which_type == TX_MULTISIG) { + } else if (which_type == TxoutType::MULTISIG) { // Also report some information on multisig scripts (which do not have a corresponding address). // TODO: abstract out the common functionality between this logic and ExtractDestinations. obj.pushKV("sigsrequired", solutions_data[0][0]); @@ -3730,7 +3707,7 @@ UniValue getaddressinfo(const JSONRPCRequest& request) if (meta->has_key_origin) { ret.pushKV("hdkeypath", WriteHDKeypath(meta->key_origin.path)); ret.pushKV("hdseedid", meta->hd_seed_id.GetHex()); - ret.pushKV("hdmasterfingerprint", HexStr(meta->key_origin.fingerprint, meta->key_origin.fingerprint + 4)); + ret.pushKV("hdmasterfingerprint", HexStr(meta->key_origin.fingerprint)); } } } @@ -4093,7 +4070,7 @@ UniValue walletcreatefundedpsbt(const JSONRPCRequest& request) CMutableTransaction rawTx = ConstructTransaction(request.params[0], request.params[1], request.params[2], rbf); CCoinControl coin_control; // Automatically select coins, unless at least one is manually selected. Can - // be overriden by options.add_inputs. + // be overridden by options.add_inputs. coin_control.m_add_inputs = rawTx.vin.size() == 0; FundTransaction(pwallet, rawTx, fee, change_position, request.params[3], coin_control); diff --git a/src/wallet/salvage.cpp b/src/wallet/salvage.cpp index d42950ee42..af57210f01 100644 --- a/src/wallet/salvage.cpp +++ b/src/wallet/salvage.cpp @@ -5,6 +5,7 @@ #include <fs.h> #include <streams.h> +#include <util/translation.h> #include <wallet/salvage.h> #include <wallet/wallet.h> #include <wallet/walletdb.h> @@ -20,6 +21,12 @@ bool RecoverDatabaseFile(const fs::path& file_path) std::string filename; std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(file_path, filename); + bilingual_str open_err; + if (!env->Open(open_err)) { + tfm::format(std::cerr, "%s\n", open_err.original); + return false; + } + // Recovery procedure: // move wallet file to walletfilename.timestamp.bak // Call Salvage with fAggressive=true to diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index d57f99a205..51715462c5 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -88,16 +88,16 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s IsMineResult ret = IsMineResult::NO; std::vector<valtype> vSolutions; - txnouttype whichType = Solver(scriptPubKey, vSolutions); + TxoutType whichType = Solver(scriptPubKey, vSolutions); CKeyID keyID; switch (whichType) { - case TX_NONSTANDARD: - case TX_NULL_DATA: - case TX_WITNESS_UNKNOWN: + case TxoutType::NONSTANDARD: + case TxoutType::NULL_DATA: + case TxoutType::WITNESS_UNKNOWN: break; - case TX_PUBKEY: + case TxoutType::PUBKEY: keyID = CPubKey(vSolutions[0]).GetID(); if (!PermitsUncompressed(sigversion) && vSolutions[0].size() != 33) { return IsMineResult::INVALID; @@ -106,7 +106,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s ret = std::max(ret, IsMineResult::SPENDABLE); } break; - case TX_WITNESS_V0_KEYHASH: + case TxoutType::WITNESS_V0_KEYHASH: { if (sigversion == IsMineSigVersion::WITNESS_V0) { // P2WPKH inside P2WSH is invalid. @@ -121,7 +121,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s ret = std::max(ret, IsMineInner(keystore, GetScriptForDestination(PKHash(uint160(vSolutions[0]))), IsMineSigVersion::WITNESS_V0)); break; } - case TX_PUBKEYHASH: + case TxoutType::PUBKEYHASH: keyID = CKeyID(uint160(vSolutions[0])); if (!PermitsUncompressed(sigversion)) { CPubKey pubkey; @@ -133,7 +133,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s ret = std::max(ret, IsMineResult::SPENDABLE); } break; - case TX_SCRIPTHASH: + case TxoutType::SCRIPTHASH: { if (sigversion != IsMineSigVersion::TOP) { // P2SH inside P2WSH or P2SH is invalid. @@ -146,7 +146,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s } break; } - case TX_WITNESS_V0_SCRIPTHASH: + case TxoutType::WITNESS_V0_SCRIPTHASH: { if (sigversion == IsMineSigVersion::WITNESS_V0) { // P2WSH inside P2WSH is invalid. @@ -165,7 +165,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s break; } - case TX_MULTISIG: + case TxoutType::MULTISIG: { // Never treat bare multisig outputs as ours (they can still be made watchonly-though) if (sigversion == IsMineSigVersion::TOP) { @@ -597,11 +597,6 @@ TransactionError LegacyScriptPubKeyMan::FillPSBT(PartiallySignedTransaction& psb continue; } - // Verify input looks sane. This will check that we have at most one uxto, witness or non-witness. - if (!input.IsSane()) { - return TransactionError::INVALID_PSBT; - } - // Get the Sighash type if (sign && input.sighash_type > 0 && input.sighash_type != sighash_type) { return TransactionError::SIGHASH_MISMATCH; @@ -836,7 +831,7 @@ bool LegacyScriptPubKeyMan::HaveWatchOnly() const static bool ExtractPubKey(const CScript &dest, CPubKey& pubKeyOut) { std::vector<std::vector<unsigned char>> solutions; - return Solver(dest, solutions) == TX_PUBKEY && + return Solver(dest, solutions) == TxoutType::PUBKEY && (pubKeyOut = CPubKey(solutions[0])).IsFullyValid(); } @@ -910,20 +905,22 @@ bool LegacyScriptPubKeyMan::AddWatchOnly(const CScript& dest, int64_t nCreateTim return AddWatchOnly(dest); } -void LegacyScriptPubKeyMan::SetHDChain(const CHDChain& chain, bool memonly) +void LegacyScriptPubKeyMan::LoadHDChain(const CHDChain& chain) { LOCK(cs_KeyStore); - // memonly == true means we are loading the wallet file - // memonly == false means that the chain is actually being changed - if (!memonly) { - // Store the new chain - if (!WalletBatch(m_storage.GetDatabase()).WriteHDChain(chain)) { - throw std::runtime_error(std::string(__func__) + ": writing chain failed"); - } - // When there's an old chain, add it as an inactive chain as we are now rotating hd chains - if (!m_hd_chain.seed_id.IsNull()) { - AddInactiveHDChain(m_hd_chain); - } + m_hd_chain = chain; +} + +void LegacyScriptPubKeyMan::AddHDChain(const CHDChain& chain) +{ + LOCK(cs_KeyStore); + // Store the new chain + if (!WalletBatch(m_storage.GetDatabase()).WriteHDChain(chain)) { + throw std::runtime_error(std::string(__func__) + ": writing chain failed"); + } + // When there's an old chain, add it as an inactive chain as we are now rotating hd chains + if (!m_hd_chain.seed_id.IsNull()) { + AddInactiveHDChain(m_hd_chain); } m_hd_chain = chain; @@ -1177,7 +1174,7 @@ void LegacyScriptPubKeyMan::SetHDSeed(const CPubKey& seed) CHDChain newHdChain; newHdChain.nVersion = m_storage.CanSupportFeature(FEATURE_HD_SPLIT) ? CHDChain::VERSION_HD_CHAIN_SPLIT : CHDChain::VERSION_HD_BASE; newHdChain.seed_id = seed.GetID(); - SetHDChain(newHdChain, false); + AddHDChain(newHdChain); NotifyCanGetAddressesChanged(); WalletBatch batch(m_storage.GetDatabase()); m_storage.UnsetBlankWalletFlag(batch); @@ -1900,8 +1897,8 @@ bool DescriptorScriptPubKeyMan::SetupDescriptorGeneration(const CExtKey& master_ desc_prefix = "wpkh(" + xpub + "/84'"; break; } - default: assert(false); - } + } // no default case, so the compiler can warn about missing cases + assert(!desc_prefix.empty()); // Mainnet derives at 0', testnet and regtest derive at 1' if (Params().IsTestChain()) { @@ -2086,11 +2083,6 @@ TransactionError DescriptorScriptPubKeyMan::FillPSBT(PartiallySignedTransaction& continue; } - // Verify input looks sane. This will check that we have at most one uxto, witness or non-witness. - if (!input.IsSane()) { - return TransactionError::INVALID_PSBT; - } - // Get the Sighash type if (sign && input.sighash_type > 0 && input.sighash_type != sighash_type) { return TransactionError::SIGHASH_MISMATCH; diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h index 9fa2a68284..a96d971734 100644 --- a/src/wallet/scriptpubkeyman.h +++ b/src/wallet/scriptpubkeyman.h @@ -422,8 +422,10 @@ public: //! Generate a new key CPubKey GenerateNewKey(WalletBatch& batch, CHDChain& hd_chain, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); - /* Set the HD chain model (chain child index counters) */ - void SetHDChain(const CHDChain& chain, bool memonly); + /* Set the HD chain model (chain child index counters) and writes it to the database */ + void AddHDChain(const CHDChain& chain); + //! Load a HD chain model (used by LoadWallet) + void LoadHDChain(const CHDChain& chain); const CHDChain& GetHDChain() const { return m_hd_chain; } void AddInactiveHDChain(const CHDChain& chain); diff --git a/src/wallet/test/init_test_fixture.cpp b/src/wallet/test/init_test_fixture.cpp index 797a0d634f..35bd965673 100644 --- a/src/wallet/test/init_test_fixture.cpp +++ b/src/wallet/test/init_test_fixture.cpp @@ -3,13 +3,14 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <fs.h> +#include <util/check.h> #include <util/system.h> #include <wallet/test/init_test_fixture.h> -InitWalletDirTestingSetup::InitWalletDirTestingSetup(const std::string& chainName): BasicTestingSetup(chainName) +InitWalletDirTestingSetup::InitWalletDirTestingSetup(const std::string& chainName) : BasicTestingSetup(chainName) { - m_chain_client = MakeWalletClient(*m_chain, {}); + m_chain_client = MakeWalletClient(*m_chain, *Assert(m_node.args), {}); std::string sep; sep += fs::path::preferred_separator; diff --git a/src/wallet/test/psbt_wallet_tests.cpp b/src/wallet/test/psbt_wallet_tests.cpp index 3f85a48ff3..ce7e661b67 100644 --- a/src/wallet/test/psbt_wallet_tests.cpp +++ b/src/wallet/test/psbt_wallet_tests.cpp @@ -64,7 +64,7 @@ BOOST_AUTO_TEST_CASE(psbt_updater_test) CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); ssTx << psbtx; std::string final_hex = HexStr(ssTx); - BOOST_CHECK_EQUAL(final_hex, "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f0000008000000080010000800001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e88701042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f00000080000000800200008000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000"); + BOOST_CHECK_EQUAL(final_hex, "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f0000008000000080010000800001008a020000000158e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd7501000000171600145f275f436b09a8cc9a2eb2a2f528485c68a56323feffffff02d8231f1b0100000017a914aed962d6654f9a2b36608eb9d64d2b260db4f1118700c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8876500000001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e88701042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f00000080000000800200008000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000"); // Mutate the transaction so that one of the inputs is invalid psbtx.tx->vin[0].prevout.n = 2; diff --git a/src/wallet/test/wallet_test_fixture.h b/src/wallet/test/wallet_test_fixture.h index 6c32868b1e..99d7cfe921 100644 --- a/src/wallet/test/wallet_test_fixture.h +++ b/src/wallet/test/wallet_test_fixture.h @@ -10,17 +10,18 @@ #include <interfaces/chain.h> #include <interfaces/wallet.h> #include <node/context.h> +#include <util/check.h> #include <wallet/wallet.h> #include <memory> /** Testing setup and teardown for wallet. */ -struct WalletTestingSetup: public TestingSetup { +struct WalletTestingSetup : public TestingSetup { explicit WalletTestingSetup(const std::string& chainName = CBaseChainParams::MAIN); std::unique_ptr<interfaces::Chain> m_chain = interfaces::MakeChain(m_node); - std::unique_ptr<interfaces::ChainClient> m_chain_client = interfaces::MakeWalletClient(*m_chain, {}); + std::unique_ptr<interfaces::ChainClient> m_chain_client = interfaces::MakeWalletClient(*m_chain, *Assert(m_node.args), {}); CWallet m_wallet; std::unique_ptr<interfaces::Handler> m_chain_notifications_handler; }; diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index 497ccd14bb..7ef06663b5 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -28,6 +28,11 @@ extern UniValue importmulti(const JSONRPCRequest& request); extern UniValue dumpwallet(const JSONRPCRequest& request); extern UniValue importwallet(const JSONRPCRequest& request); +// Ensure that fee levels defined in the wallet are at least as high +// as the default levels for node policy. +static_assert(DEFAULT_TRANSACTION_MINFEE >= DEFAULT_MIN_RELAY_TX_FEE, "wallet minimum fee is smaller than default relay fee"); +static_assert(WALLET_INCREMENTAL_RELAY_FEE >= DEFAULT_INCREMENTAL_RELAY_FEE, "wallet incremental fee is smaller than default incremental relay fee"); + BOOST_FIXTURE_TEST_SUITE(wallet_tests, WalletTestingSetup) static std::shared_ptr<CWallet> TestLoadWallet(interfaces::Chain& chain) @@ -118,7 +123,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup) // Prune the older block file. { LOCK(cs_main); - EnsureChainman(m_node).PruneOneBlockFile(oldTip->GetBlockPos().nFile); + Assert(m_node.chainman)->PruneOneBlockFile(oldTip->GetBlockPos().nFile); } UnlinkPrunedFiles({oldTip->GetBlockPos().nFile}); @@ -144,7 +149,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup) // Prune the remaining block file. { LOCK(cs_main); - EnsureChainman(m_node).PruneOneBlockFile(newTip->GetBlockPos().nFile); + Assert(m_node.chainman)->PruneOneBlockFile(newTip->GetBlockPos().nFile); } UnlinkPrunedFiles({newTip->GetBlockPos().nFile}); @@ -181,7 +186,7 @@ BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup) // Prune the older block file. { LOCK(cs_main); - EnsureChainman(m_node).PruneOneBlockFile(oldTip->GetBlockPos().nFile); + Assert(m_node.chainman)->PruneOneBlockFile(oldTip->GetBlockPos().nFile); } UnlinkPrunedFiles({oldTip->GetBlockPos().nFile}); @@ -333,7 +338,7 @@ BOOST_FIXTURE_TEST_CASE(coin_mark_dirty_immature_credit, TestChain100Setup) BOOST_CHECK_EQUAL(wtx.GetImmatureCredit(), 50*COIN); } -static int64_t AddTx(CWallet& wallet, uint32_t lockTime, int64_t mockTime, int64_t blockTime) +static int64_t AddTx(ChainstateManager& chainman, CWallet& wallet, uint32_t lockTime, int64_t mockTime, int64_t blockTime) { CMutableTransaction tx; CWalletTx::Confirmation confirm; @@ -341,7 +346,8 @@ static int64_t AddTx(CWallet& wallet, uint32_t lockTime, int64_t mockTime, int64 SetMockTime(mockTime); CBlockIndex* block = nullptr; if (blockTime > 0) { - auto inserted = ::BlockIndex().emplace(GetRandHash(), new CBlockIndex); + LOCK(cs_main); + auto inserted = chainman.BlockIndex().emplace(GetRandHash(), new CBlockIndex); assert(inserted.second); const uint256& hash = inserted.first->first; block = inserted.first->second; @@ -363,24 +369,24 @@ static int64_t AddTx(CWallet& wallet, uint32_t lockTime, int64_t mockTime, int64 BOOST_AUTO_TEST_CASE(ComputeTimeSmart) { // New transaction should use clock time if lower than block time. - BOOST_CHECK_EQUAL(AddTx(m_wallet, 1, 100, 120), 100); + BOOST_CHECK_EQUAL(AddTx(*m_node.chainman, m_wallet, 1, 100, 120), 100); // Test that updating existing transaction does not change smart time. - BOOST_CHECK_EQUAL(AddTx(m_wallet, 1, 200, 220), 100); + BOOST_CHECK_EQUAL(AddTx(*m_node.chainman, m_wallet, 1, 200, 220), 100); // New transaction should use clock time if there's no block time. - BOOST_CHECK_EQUAL(AddTx(m_wallet, 2, 300, 0), 300); + BOOST_CHECK_EQUAL(AddTx(*m_node.chainman, m_wallet, 2, 300, 0), 300); // New transaction should use block time if lower than clock time. - BOOST_CHECK_EQUAL(AddTx(m_wallet, 3, 420, 400), 400); + BOOST_CHECK_EQUAL(AddTx(*m_node.chainman, m_wallet, 3, 420, 400), 400); // New transaction should use latest entry time if higher than // min(block time, clock time). - BOOST_CHECK_EQUAL(AddTx(m_wallet, 4, 500, 390), 400); + BOOST_CHECK_EQUAL(AddTx(*m_node.chainman, m_wallet, 4, 500, 390), 400); // If there are future entries, new transaction should use time of the // newest entry that is no more than 300 seconds ahead of the clock time. - BOOST_CHECK_EQUAL(AddTx(m_wallet, 5, 50, 600), 300); + BOOST_CHECK_EQUAL(AddTx(*m_node.chainman, m_wallet, 5, 50, 600), 300); // Reset mock time for other tests. SetMockTime(0); @@ -624,13 +630,13 @@ static size_t CalculateNestedKeyhashInputSize(bool use_max_sig) CPubKey pubkey = key.GetPubKey(); // Generate pubkey hash - uint160 key_hash(Hash160(pubkey.begin(), pubkey.end())); + uint160 key_hash(Hash160(pubkey)); // Create inner-script to enter into keystore. Key hash can't be 0... CScript inner_script = CScript() << OP_0 << std::vector<unsigned char>(key_hash.begin(), key_hash.end()); // Create outer P2SH script for the output - uint160 script_id(Hash160(inner_script.begin(), inner_script.end())); + uint160 script_id(Hash160(inner_script)); CScript script_pubkey = CScript() << OP_HASH160 << std::vector<unsigned char>(script_id.begin(), script_id.end()) << OP_EQUAL; // Add inner-script to key store and key to watchonly @@ -790,4 +796,37 @@ BOOST_FIXTURE_TEST_CASE(CreateWalletFromFile, TestChain100Setup) TestUnloadWallet(std::move(wallet)); } +BOOST_FIXTURE_TEST_CASE(ZapSelectTx, TestChain100Setup) +{ + auto chain = interfaces::MakeChain(m_node); + auto wallet = TestLoadWallet(*chain); + CKey key; + key.MakeNewKey(true); + AddKey(*wallet, key); + + std::string error; + m_coinbase_txns.push_back(CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]); + auto block_tx = TestSimpleSpend(*m_coinbase_txns[0], 0, coinbaseKey, GetScriptForRawPubKey(key.GetPubKey())); + CreateAndProcessBlock({block_tx}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())); + + SyncWithValidationInterfaceQueue(); + + { + auto block_hash = block_tx.GetHash(); + auto prev_hash = m_coinbase_txns[0]->GetHash(); + + LOCK(wallet->cs_wallet); + BOOST_CHECK(wallet->HasWalletSpend(prev_hash)); + BOOST_CHECK_EQUAL(wallet->mapWallet.count(block_hash), 1u); + + std::vector<uint256> vHashIn{ block_hash }, vHashOut; + BOOST_CHECK_EQUAL(wallet->ZapSelectTx(vHashIn, vHashOut), DBErrors::LOAD_OK); + + BOOST_CHECK(!wallet->HasWalletSpend(prev_hash)); + BOOST_CHECK_EQUAL(wallet->mapWallet.count(block_hash), 0u); + } + + TestUnloadWallet(std::move(wallet)); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 57eec9baf9..cee2f2214c 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -99,9 +99,11 @@ std::unique_ptr<interfaces::Handler> HandleLoadWallet(LoadWalletFn load_wallet) return interfaces::MakeHandler([it] { LOCK(cs_wallets); g_load_wallet_fns.erase(it); }); } +static Mutex g_loading_wallet_mutex; static Mutex g_wallet_release_mutex; static std::condition_variable g_wallet_release_cv; -static std::set<std::string> g_unloading_wallet_set; +static std::set<std::string> g_loading_wallet_set GUARDED_BY(g_loading_wallet_mutex); +static std::set<std::string> g_unloading_wallet_set GUARDED_BY(g_wallet_release_mutex); // Custom deleter for shared_ptr<CWallet>. static void ReleaseWallet(CWallet* wallet) @@ -145,7 +147,8 @@ void UnloadWallet(std::shared_ptr<CWallet>&& wallet) } } -std::shared_ptr<CWallet> LoadWallet(interfaces::Chain& chain, const WalletLocation& location, bilingual_str& error, std::vector<bilingual_str>& warnings) +namespace { +std::shared_ptr<CWallet> LoadWalletInternal(interfaces::Chain& chain, const WalletLocation& location, bilingual_str& error, std::vector<bilingual_str>& warnings) { try { if (!CWallet::Verify(chain, location, error, warnings)) { @@ -166,6 +169,19 @@ std::shared_ptr<CWallet> LoadWallet(interfaces::Chain& chain, const WalletLocati return nullptr; } } +} // namespace + +std::shared_ptr<CWallet> LoadWallet(interfaces::Chain& chain, const WalletLocation& location, bilingual_str& error, std::vector<bilingual_str>& warnings) +{ + auto result = WITH_LOCK(g_loading_wallet_mutex, return g_loading_wallet_set.insert(location.GetName())); + if (!result.second) { + error = Untranslated("Wallet already being loading."); + return nullptr; + } + auto wallet = LoadWalletInternal(chain, location, error, warnings); + WITH_LOCK(g_loading_wallet_mutex, g_loading_wallet_set.erase(result.first)); + return wallet; +} std::shared_ptr<CWallet> LoadWallet(interfaces::Chain& chain, const std::string& name, bilingual_str& error, std::vector<bilingual_str>& warnings) { @@ -423,9 +439,14 @@ bool CWallet::HasWalletSpend(const uint256& txid) const return (iter != mapTxSpends.end() && iter->first.hash == txid); } -void CWallet::Flush(bool shutdown) +void CWallet::Flush() +{ + database->Flush(); +} + +void CWallet::Close() { - database->Flush(shutdown); + database->Close(); } void CWallet::SyncMetaData(std::pair<TxSpends::iterator, TxSpends::iterator> range) @@ -1406,19 +1427,28 @@ bool CWallet::IsWalletFlagSet(uint64_t flag) const return (m_wallet_flags & flag); } -bool CWallet::SetWalletFlags(uint64_t overwriteFlags, bool memonly) +bool CWallet::LoadWalletFlags(uint64_t flags) { LOCK(cs_wallet); - m_wallet_flags = overwriteFlags; - if (((overwriteFlags & KNOWN_WALLET_FLAGS) >> 32) ^ (overwriteFlags >> 32)) { + if (((flags & KNOWN_WALLET_FLAGS) >> 32) ^ (flags >> 32)) { // contains unknown non-tolerable wallet flags return false; } - if (!memonly && !WalletBatch(*database).WriteWalletFlags(m_wallet_flags)) { + m_wallet_flags = flags; + + return true; +} + +bool CWallet::AddWalletFlags(uint64_t flags) +{ + LOCK(cs_wallet); + // We should never be writing unknown non-tolerable wallet flags + assert(((flags & KNOWN_WALLET_FLAGS) >> 32) == (flags >> 32)); + if (!WalletBatch(*database).WriteWalletFlags(flags)) { throw std::runtime_error(std::string(__func__) + ": writing wallet flags failed"); } - return true; + return LoadWalletFlags(flags); } int64_t CWalletTx::GetTxTime() const @@ -2491,13 +2521,8 @@ TransactionError CWallet::FillPSBT(PartiallySignedTransaction& psbtx, bool& comp continue; } - // Verify input looks sane. This will check that we have at most one uxto, witness or non-witness. - if (!input.IsSane()) { - return TransactionError::INVALID_PSBT; - } - // If we have no utxo, grab it from the wallet. - if (!input.non_witness_utxo && input.witness_utxo.IsNull()) { + if (!input.non_witness_utxo) { const uint256& txhash = txin.prevout.hash; const auto it = mapWallet.find(txhash); if (it != mapWallet.end()) { @@ -2653,11 +2678,11 @@ static uint32_t GetLocktimeForNewTransaction(interfaces::Chain& chain, const uin return locktime; } -OutputType CWallet::TransactionChangeType(OutputType change_type, const std::vector<CRecipient>& vecSend) +OutputType CWallet::TransactionChangeType(const Optional<OutputType>& change_type, const std::vector<CRecipient>& vecSend) { // If -changetype is specified, always use that change type. - if (change_type != OutputType::CHANGE_AUTO) { - return change_type; + if (change_type) { + return *change_type; } // if m_default_address_type is legacy, use legacy address as change (even @@ -3109,9 +3134,11 @@ DBErrors CWallet::ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256 { AssertLockHeld(cs_wallet); DBErrors nZapSelectTxRet = WalletBatch(*database, "cr+").ZapSelectTx(vHashIn, vHashOut); - for (uint256 hash : vHashOut) { + for (const uint256& hash : vHashOut) { const auto& it = mapWallet.find(hash); wtxOrdered.erase(it->second.m_it_wtxOrdered); + for (const auto& txin : it->second.tx->vin) + mapTxSpends.erase(txin.prevout); mapWallet.erase(it); NotifyTransactionChanged(this, hash, CT_DELETED); } @@ -3721,15 +3748,11 @@ bool CWallet::Verify(interfaces::Chain& chain, const WalletLocation& location, b std::unique_ptr<WalletDatabase> database = CreateWalletDatabase(wallet_path); try { - if (!WalletBatch::VerifyEnvironment(wallet_path, error_string)) { - return false; - } + return database->Verify(error_string); } catch (const fs::filesystem_error& e) { error_string = Untranslated(strprintf("Error loading wallet %s. %s", location.GetName(), fsbridge::get_filesystem_error_message(e))); return false; } - - return WalletBatch::VerifyDatabaseFile(wallet_path, error_string); } std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain, const WalletLocation& location, bilingual_str& error, std::vector<bilingual_str>& warnings, uint64_t wallet_creation_flags) @@ -3789,7 +3812,7 @@ std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain, // ensure this wallet.dat can only be opened by clients supporting HD with chain split and expects no default key walletInstance->SetMinVersion(FEATURE_LATEST); - walletInstance->SetWalletFlags(wallet_creation_flags, false); + walletInstance->AddWalletFlags(wallet_creation_flags); // Only create LegacyScriptPubKeyMan when not descriptor wallet if (!walletInstance->IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)) { @@ -3826,14 +3849,20 @@ std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain, } } - if (!gArgs.GetArg("-addresstype", "").empty() && !ParseOutputType(gArgs.GetArg("-addresstype", ""), walletInstance->m_default_address_type)) { - error = strprintf(_("Unknown address type '%s'"), gArgs.GetArg("-addresstype", "")); - return nullptr; + if (!gArgs.GetArg("-addresstype", "").empty()) { + if (!ParseOutputType(gArgs.GetArg("-addresstype", ""), walletInstance->m_default_address_type)) { + error = strprintf(_("Unknown address type '%s'"), gArgs.GetArg("-addresstype", "")); + return nullptr; + } } - if (!gArgs.GetArg("-changetype", "").empty() && !ParseOutputType(gArgs.GetArg("-changetype", ""), walletInstance->m_default_change_type)) { - error = strprintf(_("Unknown change type '%s'"), gArgs.GetArg("-changetype", "")); - return nullptr; + if (!gArgs.GetArg("-changetype", "").empty()) { + OutputType out_type; + if (!ParseOutputType(gArgs.GetArg("-changetype", ""), out_type)) { + error = strprintf(_("Unknown change type '%s'"), gArgs.GetArg("-changetype", "")); + return nullptr; + } + walletInstance->m_default_change_type = out_type; } if (gArgs.IsArgSet("-mintxfee")) { @@ -4404,12 +4433,21 @@ void CWallet::SetupDescriptorScriptPubKeyMans() spk_manager->SetupDescriptorGeneration(master_key, t); uint256 id = spk_manager->GetID(); m_spk_managers[id] = std::move(spk_manager); - SetActiveScriptPubKeyMan(id, t, internal); + AddActiveScriptPubKeyMan(id, t, internal); } } } -void CWallet::SetActiveScriptPubKeyMan(uint256 id, OutputType type, bool internal, bool memonly) +void CWallet::AddActiveScriptPubKeyMan(uint256 id, OutputType type, bool internal) +{ + WalletBatch batch(*database); + if (!batch.WriteActiveScriptPubKeyMan(static_cast<uint8_t>(type), id, internal)) { + throw std::runtime_error(std::string(__func__) + ": writing active ScriptPubKeyMan id failed"); + } + LoadActiveScriptPubKeyMan(id, type, internal); +} + +void CWallet::LoadActiveScriptPubKeyMan(uint256 id, OutputType type, bool internal) { WalletLogPrintf("Setting spkMan to active: id = %s, type = %d, internal = %d\n", id.ToString(), static_cast<int>(type), static_cast<int>(internal)); auto& spk_mans = internal ? m_internal_spk_managers : m_external_spk_managers; @@ -4417,12 +4455,6 @@ void CWallet::SetActiveScriptPubKeyMan(uint256 id, OutputType type, bool interna spk_man->SetInternal(internal); spk_mans[type] = spk_man; - if (!memonly) { - WalletBatch batch(*database); - if (!batch.WriteActiveScriptPubKeyMan(static_cast<uint8_t>(type), id, internal)) { - throw std::runtime_error(std::string(__func__) + ": writing active ScriptPubKeyMan id failed"); - } - } NotifyCanGetAddressesChanged(); } diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 9931671fb4..a761caf38c 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -13,11 +13,11 @@ #include <policy/feerate.h> #include <psbt.h> #include <tinyformat.h> -#include <ui_interface.h> #include <util/message.h> #include <util/strencodings.h> #include <util/string.h> #include <util/system.h> +#include <util/ui_change_type.h> #include <validationinterface.h> #include <wallet/coinselection.h> #include <wallet/crypter.h> @@ -105,9 +105,6 @@ class ReserveDestination; //! Default for -addresstype constexpr OutputType DEFAULT_ADDRESS_TYPE{OutputType::BECH32}; -//! Default for -changetype -constexpr OutputType DEFAULT_CHANGE_TYPE{OutputType::CHANGE_AUTO}; - static constexpr uint64_t KNOWN_WALLET_FLAGS = WALLET_FLAG_AVOID_REUSE | WALLET_FLAG_BLANK_WALLET @@ -934,7 +931,7 @@ public: Balance GetBalance(int min_depth = 0, bool avoid_reuse = true) const; CAmount GetAvailableBalance(const CCoinControl* coinControl = nullptr) const; - OutputType TransactionChangeType(OutputType change_type, const std::vector<CRecipient>& vecSend); + OutputType TransactionChangeType(const Optional<OutputType>& change_type, const std::vector<CRecipient>& vecSend); /** * Insert additional inputs into the transaction by @@ -1012,7 +1009,13 @@ public: CFeeRate m_fallback_fee{DEFAULT_FALLBACK_FEE}; CFeeRate m_discard_rate{DEFAULT_DISCARD_FEE}; OutputType m_default_address_type{DEFAULT_ADDRESS_TYPE}; - OutputType m_default_change_type{DEFAULT_CHANGE_TYPE}; + /** + * Default output type for change outputs. When unset, automatically choose type + * based on address type setting and the types other of non-change outputs + * (see -changetype option documentation and implementation in + * CWallet::TransactionChangeType for details). + */ + Optional<OutputType> m_default_change_type{}; /** Absolute maximum transaction fee (in satoshis) used by default for the wallet */ CAmount m_default_max_tx_fee{DEFAULT_TRANSACTION_MAXFEE}; @@ -1084,7 +1087,10 @@ public: bool HasWalletSpend(const uint256& txid) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); //! Flush wallet (bitdb flush) - void Flush(bool shutdown=false); + void Flush(); + + //! Close wallet database + void Close(); /** Wallet is about to be unloaded */ boost::signals2::signal<void ()> NotifyUnload; @@ -1173,7 +1179,9 @@ public: /** overwrite all flags by the given uint64_t returns false if unknown, non-tolerable flags are present */ - bool SetWalletFlags(uint64_t overwriteFlags, bool memOnly); + bool AddWalletFlags(uint64_t flags); + /** Loads the flags into the wallet. (used by LoadWallet) */ + bool LoadWalletFlags(uint64_t flags); /** Determine if we are a legacy wallet */ bool IsLegacy() const; @@ -1251,12 +1259,17 @@ public: //! Instantiate a descriptor ScriptPubKeyMan from the WalletDescriptor and load it void LoadDescriptorScriptPubKeyMan(uint256 id, WalletDescriptor& desc); - //! Sets the active ScriptPubKeyMan for the specified type and internal + //! Adds the active ScriptPubKeyMan for the specified type and internal. Writes it to the wallet file + //! @param[in] id The unique id for the ScriptPubKeyMan + //! @param[in] type The OutputType this ScriptPubKeyMan provides addresses for + //! @param[in] internal Whether this ScriptPubKeyMan provides change addresses + void AddActiveScriptPubKeyMan(uint256 id, OutputType type, bool internal); + + //! Loads an active ScriptPubKeyMan for the specified type and internal. (used by LoadWallet) //! @param[in] id The unique id for the ScriptPubKeyMan //! @param[in] type The OutputType this ScriptPubKeyMan provides addresses for //! @param[in] internal Whether this ScriptPubKeyMan provides change addresses - //! @param[in] memonly Whether to record this update to the database. Set to true for wallet loading, normally false when actually updating the wallet. - void SetActiveScriptPubKeyMan(uint256 id, OutputType type, bool internal, bool memonly = false); + void LoadActiveScriptPubKeyMan(uint256 id, OutputType type, bool internal); //! Create new DescriptorScriptPubKeyMans and add them to the wallet void SetupDescriptorScriptPubKeyMans(); diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index 603887ee58..fa6814d0d3 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -103,7 +103,7 @@ bool WalletBatch::WriteKey(const CPubKey& vchPubKey, const CPrivKey& vchPrivKey, vchKey.insert(vchKey.end(), vchPubKey.begin(), vchPubKey.end()); vchKey.insert(vchKey.end(), vchPrivKey.begin(), vchPrivKey.end()); - return WriteIC(std::make_pair(DBKeys::KEY, vchPubKey), std::make_pair(vchPrivKey, Hash(vchKey.begin(), vchKey.end())), false); + return WriteIC(std::make_pair(DBKeys::KEY, vchPubKey), std::make_pair(vchPrivKey, Hash(vchKey)), false); } bool WalletBatch::WriteCryptedKey(const CPubKey& vchPubKey, @@ -115,13 +115,13 @@ bool WalletBatch::WriteCryptedKey(const CPubKey& vchPubKey, } // Compute a checksum of the encrypted key - uint256 checksum = Hash(vchCryptedSecret.begin(), vchCryptedSecret.end()); + uint256 checksum = Hash(vchCryptedSecret); const auto key = std::make_pair(DBKeys::CRYPTED_KEY, vchPubKey); if (!WriteIC(key, std::make_pair(vchCryptedSecret, checksum), false)) { // It may already exist, so try writing just the checksum std::vector<unsigned char> val; - if (!m_batch.Read(key, val)) { + if (!m_batch->Read(key, val)) { return false; } if (!WriteIC(key, std::make_pair(val, checksum), true)) { @@ -166,8 +166,8 @@ bool WalletBatch::WriteBestBlock(const CBlockLocator& locator) bool WalletBatch::ReadBestBlock(CBlockLocator& locator) { - if (m_batch.Read(DBKeys::BESTBLOCK, locator) && !locator.vHave.empty()) return true; - return m_batch.Read(DBKeys::BESTBLOCK_NOMERKLE, locator); + if (m_batch->Read(DBKeys::BESTBLOCK, locator) && !locator.vHave.empty()) return true; + return m_batch->Read(DBKeys::BESTBLOCK_NOMERKLE, locator); } bool WalletBatch::WriteOrderPosNext(int64_t nOrderPosNext) @@ -177,7 +177,7 @@ bool WalletBatch::WriteOrderPosNext(int64_t nOrderPosNext) bool WalletBatch::ReadPool(int64_t nPool, CKeyPool& keypool) { - return m_batch.Read(std::make_pair(DBKeys::POOL, nPool), keypool); + return m_batch->Read(std::make_pair(DBKeys::POOL, nPool), keypool); } bool WalletBatch::WritePool(int64_t nPool, const CKeyPool& keypool) @@ -209,7 +209,7 @@ bool WalletBatch::WriteDescriptorKey(const uint256& desc_id, const CPubKey& pubk key.insert(key.end(), pubkey.begin(), pubkey.end()); key.insert(key.end(), privkey.begin(), privkey.end()); - return WriteIC(std::make_pair(DBKeys::WALLETDESCRIPTORKEY, std::make_pair(desc_id, pubkey)), std::make_pair(privkey, Hash(key.begin(), key.end())), false); + return WriteIC(std::make_pair(DBKeys::WALLETDESCRIPTORKEY, std::make_pair(desc_id, pubkey)), std::make_pair(privkey, Hash(key)), false); } bool WalletBatch::WriteCryptedDescriptorKey(const uint256& desc_id, const CPubKey& pubkey, const std::vector<unsigned char>& secret) @@ -365,7 +365,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, vchKey.insert(vchKey.end(), vchPubKey.begin(), vchPubKey.end()); vchKey.insert(vchKey.end(), pkey.begin(), pkey.end()); - if (Hash(vchKey.begin(), vchKey.end()) != hash) + if (Hash(vchKey) != hash) { strErr = "Error reading wallet database: CPubKey/CPrivKey corrupt"; return false; @@ -414,7 +414,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, if (!ssValue.eof()) { uint256 checksum; ssValue >> checksum; - if ((checksum_valid = Hash(vchPrivKey.begin(), vchPrivKey.end()) != checksum)) { + if ((checksum_valid = Hash(vchPrivKey) != checksum)) { strErr = "Error reading wallet database: Crypted key corrupt"; return false; } @@ -539,11 +539,11 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, } else if (strType == DBKeys::HDCHAIN) { CHDChain chain; ssValue >> chain; - pwallet->GetOrCreateLegacyScriptPubKeyMan()->SetHDChain(chain, true); + pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadHDChain(chain); } else if (strType == DBKeys::FLAGS) { uint64_t flags; ssValue >> flags; - if (!pwallet->SetWalletFlags(flags, true)) { + if (!pwallet->LoadWalletFlags(flags)) { strErr = "Error reading wallet database: Unknown non-tolerable wallet flags found"; return false; } @@ -592,9 +592,6 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, ssValue >> ser_xpub; CExtPubKey xpub; xpub.Decode(ser_xpub.data()); - if (wss.m_descriptor_caches.count(desc_id)) { - wss.m_descriptor_caches[desc_id] = DescriptorCache(); - } if (parent) { wss.m_descriptor_caches[desc_id].CacheParentExtPubKey(key_exp_index, xpub); } else { @@ -624,7 +621,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, to_hash.insert(to_hash.end(), pubkey.begin(), pubkey.end()); to_hash.insert(to_hash.end(), pkey.begin(), pkey.end()); - if (Hash(to_hash.begin(), to_hash.end()) != hash) + if (Hash(to_hash) != hash) { strErr = "Error reading wallet database: CPubKey/CPrivKey corrupt"; return false; @@ -693,15 +690,14 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet) LOCK(pwallet->cs_wallet); try { int nMinVersion = 0; - if (m_batch.Read(DBKeys::MINVERSION, nMinVersion)) { + if (m_batch->Read(DBKeys::MINVERSION, nMinVersion)) { if (nMinVersion > FEATURE_LATEST) return DBErrors::TOO_NEW; pwallet->LoadMinVersion(nMinVersion); } // Get cursor - Dbc* pcursor = m_batch.GetCursor(); - if (!pcursor) + if (!m_batch->StartCursor()) { pwallet->WalletLogPrintf("Error getting wallet database cursor\n"); return DBErrors::CORRUPT; @@ -712,11 +708,14 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet) // Read next record CDataStream ssKey(SER_DISK, CLIENT_VERSION); CDataStream ssValue(SER_DISK, CLIENT_VERSION); - int ret = m_batch.ReadAtCursor(pcursor, ssKey, ssValue); - if (ret == DB_NOTFOUND) + bool complete; + bool ret = m_batch->ReadAtCursor(ssKey, ssValue, complete); + if (complete) { break; - else if (ret != 0) + } + else if (!ret) { + m_batch->CloseCursor(); pwallet->WalletLogPrintf("Error reading next record from wallet database\n"); return DBErrors::CORRUPT; } @@ -743,17 +742,17 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet) if (!strErr.empty()) pwallet->WalletLogPrintf("%s\n", strErr); } - pcursor->close(); } catch (...) { result = DBErrors::CORRUPT; } + m_batch->CloseCursor(); // Set the active ScriptPubKeyMans for (auto spk_man_pair : wss.m_active_external_spks) { - pwallet->SetActiveScriptPubKeyMan(spk_man_pair.second, spk_man_pair.first, /* internal */ false, /* memonly */ true); + pwallet->LoadActiveScriptPubKeyMan(spk_man_pair.second, spk_man_pair.first, /* internal */ false); } for (auto spk_man_pair : wss.m_active_internal_spks) { - pwallet->SetActiveScriptPubKeyMan(spk_man_pair.second, spk_man_pair.first, /* internal */ true, /* memonly */ true); + pwallet->LoadActiveScriptPubKeyMan(spk_man_pair.second, spk_man_pair.first, /* internal */ true); } // Set the descriptor caches @@ -783,7 +782,7 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet) // Last client version to open this wallet, was previously the file version number int last_client = CLIENT_VERSION; - m_batch.Read(DBKeys::VERSION, last_client); + m_batch->Read(DBKeys::VERSION, last_client); int wallet_version = pwallet->GetVersion(); pwallet->WalletLogPrintf("Wallet File Version = %d\n", wallet_version > 0 ? wallet_version : last_client); @@ -808,7 +807,7 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet) return DBErrors::NEED_REWRITE; if (last_client < CLIENT_VERSION) // Update - m_batch.Write(DBKeys::VERSION, CLIENT_VERSION); + m_batch->Write(DBKeys::VERSION, CLIENT_VERSION); if (wss.fAnyUnordered) result = pwallet->ReorderTransactions(); @@ -844,14 +843,13 @@ DBErrors WalletBatch::FindWalletTx(std::vector<uint256>& vTxHash, std::list<CWal try { int nMinVersion = 0; - if (m_batch.Read(DBKeys::MINVERSION, nMinVersion)) { + if (m_batch->Read(DBKeys::MINVERSION, nMinVersion)) { if (nMinVersion > FEATURE_LATEST) return DBErrors::TOO_NEW; } // Get cursor - Dbc* pcursor = m_batch.GetCursor(); - if (!pcursor) + if (!m_batch->StartCursor()) { LogPrintf("Error getting wallet database cursor\n"); return DBErrors::CORRUPT; @@ -862,11 +860,12 @@ DBErrors WalletBatch::FindWalletTx(std::vector<uint256>& vTxHash, std::list<CWal // Read next record CDataStream ssKey(SER_DISK, CLIENT_VERSION); CDataStream ssValue(SER_DISK, CLIENT_VERSION); - int ret = m_batch.ReadAtCursor(pcursor, ssKey, ssValue); - if (ret == DB_NOTFOUND) + bool complete; + bool ret = m_batch->ReadAtCursor(ssKey, ssValue, complete); + if (complete) { break; - else if (ret != 0) - { + } else if (!ret) { + m_batch->CloseCursor(); LogPrintf("Error reading next record from wallet database\n"); return DBErrors::CORRUPT; } @@ -881,10 +880,10 @@ DBErrors WalletBatch::FindWalletTx(std::vector<uint256>& vTxHash, std::list<CWal ssValue >> vWtx.back(); } } - pcursor->close(); } catch (...) { result = DBErrors::CORRUPT; } + m_batch->CloseCursor(); return result; } @@ -950,9 +949,6 @@ void MaybeCompactWalletDB() if (fOneThread.exchange(true)) { return; } - if (!gArgs.GetBoolArg("-flushwallet", DEFAULT_FLUSHWALLET)) { - return; - } for (const std::shared_ptr<CWallet>& pwallet : GetWallets()) { WalletDatabase& dbh = pwallet->GetDBHandle(); @@ -965,7 +961,7 @@ void MaybeCompactWalletDB() } if (dbh.nLastFlushed != nUpdateCounter && GetTime() - dbh.nLastWalletUpdate >= 2) { - if (BerkeleyBatch::PeriodicFlush(dbh)) { + if (dbh.PeriodicFlush()) { dbh.nLastFlushed = nUpdateCounter; } } @@ -974,16 +970,6 @@ void MaybeCompactWalletDB() fOneThread = false; } -bool WalletBatch::VerifyEnvironment(const fs::path& wallet_path, bilingual_str& errorStr) -{ - return BerkeleyBatch::VerifyEnvironment(wallet_path, errorStr); -} - -bool WalletBatch::VerifyDatabaseFile(const fs::path& wallet_path, bilingual_str& errorStr) -{ - return BerkeleyBatch::VerifyDatabaseFile(wallet_path, errorStr); -} - bool WalletBatch::WriteDestData(const std::string &address, const std::string &key, const std::string &value) { return WriteIC(std::make_pair(DBKeys::DESTDATA, std::make_pair(address, key)), value); @@ -1007,17 +993,17 @@ bool WalletBatch::WriteWalletFlags(const uint64_t flags) bool WalletBatch::TxnBegin() { - return m_batch.TxnBegin(); + return m_batch->TxnBegin(); } bool WalletBatch::TxnCommit() { - return m_batch.TxnCommit(); + return m_batch->TxnCommit(); } bool WalletBatch::TxnAbort() { - return m_batch.TxnAbort(); + return m_batch->TxnAbort(); } bool IsWalletLoaded(const fs::path& wallet_path) @@ -1026,20 +1012,20 @@ bool IsWalletLoaded(const fs::path& wallet_path) } /** Return object for accessing database at specified path. */ -std::unique_ptr<BerkeleyDatabase> CreateWalletDatabase(const fs::path& path) +std::unique_ptr<WalletDatabase> CreateWalletDatabase(const fs::path& path) { std::string filename; return MakeUnique<BerkeleyDatabase>(GetWalletEnv(path, filename), std::move(filename)); } /** Return object for accessing dummy database with no read/write capabilities. */ -std::unique_ptr<BerkeleyDatabase> CreateDummyWalletDatabase() +std::unique_ptr<WalletDatabase> CreateDummyWalletDatabase() { - return MakeUnique<BerkeleyDatabase>(); + return MakeUnique<DummyDatabase>(); } /** Return object for accessing temporary in-memory database. */ -std::unique_ptr<BerkeleyDatabase> CreateMockWalletDatabase() +std::unique_ptr<WalletDatabase> CreateMockWalletDatabase() { return MakeUnique<BerkeleyDatabase>(std::make_shared<BerkeleyEnvironment>(), ""); } diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h index 61e0f19e56..7c5bf7652b 100644 --- a/src/wallet/walletdb.h +++ b/src/wallet/walletdb.h @@ -40,9 +40,6 @@ class CWalletTx; class uint160; class uint256; -/** Backend-agnostic database type. */ -using WalletDatabase = BerkeleyDatabase; - /** Error statuses for the wallet database */ enum class DBErrors { @@ -183,12 +180,12 @@ private: template <typename K, typename T> bool WriteIC(const K& key, const T& value, bool fOverwrite = true) { - if (!m_batch.Write(key, value, fOverwrite)) { + if (!m_batch->Write(key, value, fOverwrite)) { return false; } m_database.IncrementUpdateCounter(); if (m_database.nUpdateCounter % 1000 == 0) { - m_batch.Flush(); + m_batch->Flush(); } return true; } @@ -196,19 +193,19 @@ private: template <typename K> bool EraseIC(const K& key) { - if (!m_batch.Erase(key)) { + if (!m_batch->Erase(key)) { return false; } m_database.IncrementUpdateCounter(); if (m_database.nUpdateCounter % 1000 == 0) { - m_batch.Flush(); + m_batch->Flush(); } return true; } public: explicit WalletBatch(WalletDatabase& database, const char* pszMode = "r+", bool _fFlushOnClose = true) : - m_batch(database, pszMode, _fFlushOnClose), + m_batch(database.MakeBatch(pszMode, _fFlushOnClose)), m_database(database) { } @@ -280,7 +277,7 @@ public: //! Abort current transaction bool TxnAbort(); private: - BerkeleyBatch m_batch; + std::unique_ptr<DatabaseBatch> m_batch; WalletDatabase& m_database; }; @@ -294,12 +291,12 @@ bool ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, st bool IsWalletLoaded(const fs::path& wallet_path); /** Return object for accessing database at specified path. */ -std::unique_ptr<BerkeleyDatabase> CreateWalletDatabase(const fs::path& path); +std::unique_ptr<WalletDatabase> CreateWalletDatabase(const fs::path& path); /** Return object for accessing dummy database with no read/write capabilities. */ -std::unique_ptr<BerkeleyDatabase> CreateDummyWalletDatabase(); +std::unique_ptr<WalletDatabase> CreateDummyWalletDatabase(); /** Return object for accessing temporary in-memory database. */ -std::unique_ptr<BerkeleyDatabase> CreateMockWalletDatabase(); +std::unique_ptr<WalletDatabase> CreateMockWalletDatabase(); #endif // BITCOIN_WALLET_WALLETDB_H diff --git a/src/wallet/wallettool.cpp b/src/wallet/wallettool.cpp index 77ed6beb5d..9f25b1ae7d 100644 --- a/src/wallet/wallettool.cpp +++ b/src/wallet/wallettool.cpp @@ -17,7 +17,7 @@ namespace WalletTool { static void WalletToolReleaseWallet(CWallet* wallet) { wallet->WalletLogPrintf("Releasing wallet\n"); - wallet->Flush(true); + wallet->Close(); delete wallet; } @@ -112,7 +112,7 @@ static bool SalvageWallet(const fs::path& path) // Initialize the environment before recovery bilingual_str error_string; try { - WalletBatch::VerifyEnvironment(path, error_string); + database->Verify(error_string); } catch (const fs::filesystem_error& e) { error_string = Untranslated(strprintf("Error loading wallet. %s", fsbridge::get_filesystem_error_message(e))); } @@ -133,24 +133,19 @@ bool ExecuteWalletToolFunc(const std::string& command, const std::string& name) std::shared_ptr<CWallet> wallet_instance = CreateWallet(name, path); if (wallet_instance) { WalletShowInfo(wallet_instance.get()); - wallet_instance->Flush(true); + wallet_instance->Close(); } } else if (command == "info" || command == "salvage") { if (!fs::exists(path)) { tfm::format(std::cerr, "Error: no wallet file at %s\n", name); return false; } - bilingual_str error; - if (!WalletBatch::VerifyEnvironment(path, error)) { - tfm::format(std::cerr, "%s\nError loading %s. Is wallet being used by other process?\n", error.original, name); - return false; - } if (command == "info") { std::shared_ptr<CWallet> wallet_instance = LoadWallet(name, path); if (!wallet_instance) return false; WalletShowInfo(wallet_instance.get()); - wallet_instance->Flush(true); + wallet_instance->Close(); } else if (command == "salvage") { return SalvageWallet(path); } diff --git a/src/walletinitinterface.h b/src/walletinitinterface.h index f4730273f1..a55e02f2dc 100644 --- a/src/walletinitinterface.h +++ b/src/walletinitinterface.h @@ -5,6 +5,8 @@ #ifndef BITCOIN_WALLETINITINTERFACE_H #define BITCOIN_WALLETINITINTERFACE_H +class ArgsManager; + struct NodeContext; class WalletInitInterface { @@ -12,7 +14,7 @@ public: /** Is the wallet component enabled */ virtual bool HasWalletSupport() const = 0; /** Get wallet help string */ - virtual void AddWalletOptions() const = 0; + virtual void AddWalletOptions(ArgsManager& argsman) const = 0; /** Check wallet parameter interaction */ virtual bool ParameterInteraction() const = 0; /** Add wallets that should be opened to list of chain clients. */ diff --git a/test/functional/example_test.py b/test/functional/example_test.py index 5d782026dc..34e4999329 100755 --- a/test/functional/example_test.py +++ b/test/functional/example_test.py @@ -166,7 +166,7 @@ class ExampleTest(BitcoinTestFramework): height = self.nodes[0].getblockcount() - for i in range(10): + for _ in range(10): # Use the mininode and blocktools functionality to manually build a block # Calling the generate() rpc is easier, but this allows us to exactly # control the blocks and transactions. diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py index 79777f5582..f19ee12f95 100755 --- a/test/functional/feature_assumevalid.py +++ b/test/functional/feature_assumevalid.py @@ -123,7 +123,7 @@ class AssumeValidTest(BitcoinTestFramework): height += 1 # Bury the block 100 deep so the coinbase output is spendable - for i in range(100): + for _ in range(100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.solve() self.blocks.append(block) @@ -149,7 +149,7 @@ class AssumeValidTest(BitcoinTestFramework): height += 1 # Bury the assumed valid block 2100 deep - for i in range(2100): + for _ in range(2100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.nVersion = 4 block.solve() diff --git a/test/functional/feature_backwards_compatibility.py b/test/functional/feature_backwards_compatibility.py index 5cddd6527e..07dd0f8f82 100755 --- a/test/functional/feature_backwards_compatibility.py +++ b/test/functional/feature_backwards_compatibility.py @@ -6,7 +6,7 @@ Test various backwards compatibility scenarios. Download the previous node binaries: -contrib/devtools/previous_release.sh -b v0.19.1 v0.18.1 v0.17.1 v0.16.3 v0.15.2 +contrib/devtools/previous_release.py -b v0.19.1 v0.18.1 v0.17.1 v0.16.3 v0.15.2 v0.15.2 is not required by this test, but it is used in wallet_upgradewallet.py. Due to a hardfork in regtest, it can't be used to sync nodes. diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py index 549e8b2029..1253c45418 100755 --- a/test/functional/feature_bip68_sequence.py +++ b/test/functional/feature_bip68_sequence.py @@ -30,7 +30,10 @@ class BIP68Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [ - ["-acceptnonstdtxn=1"], + [ + "-acceptnonstdtxn=1", + "-peertimeout=9999", # bump because mocktime might cause a disconnect otherwise + ], ["-acceptnonstdtxn=0"], ] @@ -138,7 +141,7 @@ class BIP68Test(BitcoinTestFramework): # some of those inputs to be sequence locked (and randomly choose # between height/time locking). Small random chance of making the locks # all pass. - for i in range(400): + for _ in range(400): # Randomly choose up to 10 inputs num_inputs = random.randint(1, 10) random.shuffle(utxos) @@ -257,7 +260,7 @@ class BIP68Test(BitcoinTestFramework): # Use prioritisetransaction to lower the effective feerate to 0 self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee*COIN)) cur_time = int(time.time()) - for i in range(10): + for _ in range(10): self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) cur_time += 600 diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index 6619d83dc4..c74761869b 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -125,7 +125,7 @@ class FullBlockTest(BitcoinTestFramework): # collect spendable outputs now to avoid cluttering the code later on out = [] - for i in range(NUM_OUTPUTS_TO_COLLECT): + for _ in range(NUM_OUTPUTS_TO_COLLECT): out.append(self.get_spendable_output()) # Start by building a couple of blocks on top (which output is spent is diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py index a4dc455d57..34e856c1ba 100755 --- a/test/functional/feature_config_args.py +++ b/test/functional/feature_config_args.py @@ -112,11 +112,38 @@ class ConfArgsTest(BitcoinTestFramework): ]) self.stop_node(0) + def test_networkactive(self): + self.log.info('Test -networkactive option') + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']): + self.start_node(0) + self.stop_node(0) + + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']): + self.start_node(0, extra_args=['-networkactive']) + self.stop_node(0) + + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']): + self.start_node(0, extra_args=['-networkactive=1']) + self.stop_node(0) + + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']): + self.start_node(0, extra_args=['-networkactive=0']) + self.stop_node(0) + + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']): + self.start_node(0, extra_args=['-nonetworkactive']) + self.stop_node(0) + + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']): + self.start_node(0, extra_args=['-nonetworkactive=1']) + self.stop_node(0) + def run_test(self): self.stop_node(0) self.test_log_buffer() self.test_args_log() + self.test_networkactive() self.test_config_file_parser() diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py index c6852ef017..dfb3683143 100755 --- a/test/functional/feature_csv_activation.py +++ b/test/functional/feature_csv_activation.py @@ -161,7 +161,7 @@ class BIP68_112_113Test(BitcoinTestFramework): def generate_blocks(self, number): test_blocks = [] - for i in range(number): + for _ in range(number): block = self.create_test_block([]) test_blocks.append(block) self.last_block_time += 600 @@ -209,22 +209,22 @@ class BIP68_112_113Test(BitcoinTestFramework): # Note we reuse inputs for v1 and v2 txs so must test these separately # 16 normal inputs bip68inputs = [] - for i in range(16): + for _ in range(16): bip68inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112basicinputs = [] - for j in range(2): + for _ in range(2): inputs = [] - for i in range(16): + for _ in range(16): inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) bip112basicinputs.append(inputs) # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112diverseinputs = [] - for j in range(2): + for _ in range(2): inputs = [] - for i in range(16): + for _ in range(16): inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) bip112diverseinputs.append(inputs) diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py index 7b38e09bf9..7a2e35c095 100755 --- a/test/functional/feature_dbcrash.py +++ b/test/functional/feature_dbcrash.py @@ -195,7 +195,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework): while len(utxo_list) >= 2 and num_transactions < count: tx = CTransaction() input_amount = 0 - for i in range(2): + for _ in range(2): utxo = utxo_list.pop() tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout']))) input_amount += int(utxo['amount'] * COIN) @@ -205,7 +205,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework): # Sanity check -- if we chose inputs that are too small, skip continue - for i in range(3): + for _ in range(3): tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey']))) # Sign and send the transaction to get into the mempool diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py index 3cf0fb8f7b..702a1d9995 100755 --- a/test/functional/feature_fee_estimation.py +++ b/test/functional/feature_fee_estimation.py @@ -176,9 +176,9 @@ class EstimateFeeTest(BitcoinTestFramework): # We shuffle our confirmed txout set before each set of transactions # small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible # resorting to tx's that depend on the mempool when those run out - for i in range(numblocks): + for _ in range(numblocks): random.shuffle(self.confutxo) - for j in range(random.randrange(100 - 50, 100 + 50)): + for _ in range(random.randrange(100 - 50, 100 + 50)): from_index = random.randint(1, 2) (txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo, self.memutxo, Decimal("0.005"), min_fee, min_fee) @@ -243,7 +243,7 @@ class EstimateFeeTest(BitcoinTestFramework): self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting self.log.info("Will output estimates for 1/2/3/6/15/25 blocks") - for i in range(2): + for _ in range(2): self.log.info("Creating transactions and mining them with a block size that can't keep up") # Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine self.transact_and_mine(10, self.nodes[2]) diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py index 7eabf86cad..0dc2839191 100755 --- a/test/functional/feature_maxuploadtarget.py +++ b/test/functional/feature_maxuploadtarget.py @@ -35,7 +35,11 @@ class MaxUploadTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 - self.extra_args = [["-maxuploadtarget=800", "-acceptnonstdtxn=1"]] + self.extra_args = [[ + "-maxuploadtarget=800", + "-acceptnonstdtxn=1", + "-peertimeout=9999", # bump because mocktime might cause a disconnect otherwise + ]] self.supports_cli = False # Cache for utxos, as the listunspent may take a long time later in the test @@ -100,7 +104,7 @@ class MaxUploadTest(BitcoinTestFramework): assert_equal(len(self.nodes[0].getpeerinfo()), 3) # At most a couple more tries should succeed (depending on how long # the test has been running so far). - for i in range(3): + for _ in range(3): p2p_conns[0].send_message(getdata_request) p2p_conns[0].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 2) @@ -137,8 +141,8 @@ class MaxUploadTest(BitcoinTestFramework): self.nodes[0].disconnect_p2ps() - self.log.info("Restarting node 0 with noban permission and 1MB maxuploadtarget") - self.restart_node(0, ["-whitelist=noban@127.0.0.1", "-maxuploadtarget=1"]) + self.log.info("Restarting node 0 with download permission and 1MB maxuploadtarget") + self.restart_node(0, ["-whitelist=download@127.0.0.1", "-maxuploadtarget=1"]) # Reconnect to self.nodes[0] self.nodes[0].add_p2p_connection(TestP2PConn()) @@ -151,9 +155,12 @@ class MaxUploadTest(BitcoinTestFramework): getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)] self.nodes[0].p2p.send_and_ping(getdata_request) - assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the noban permission - self.log.info("Peer still connected after trying to download old block (noban permission)") + self.log.info("Peer still connected after trying to download old block (download permission)") + peer_info = self.nodes[0].getpeerinfo() + assert_equal(len(peer_info), 1) # node is still connected + assert_equal(peer_info[0]['permissions'], ['download']) + if __name__ == '__main__': MaxUploadTest().main() diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index e46e5aacc8..02fa88f7c8 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -147,7 +147,7 @@ class PruneTest(BitcoinTestFramework): # Create stale blocks in manageable sized chunks self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") - for j in range(12): + for _ in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects disconnect_nodes(self.nodes[0], 1) diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py index acf551ef69..1b531ad51d 100755 --- a/test/functional/feature_rbf.py +++ b/test/functional/feature_rbf.py @@ -376,7 +376,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1)) outputs = [] - for i in range(MAX_REPLACEMENT_LIMIT+1): + for _ in range(MAX_REPLACEMENT_LIMIT+1): outputs.append(CTxOut(split_value, CScript([1]))) splitting_tx = CTransaction() diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py index 5195d20dcb..0842972779 100755 --- a/test/functional/feature_segwit.py +++ b/test/functional/feature_segwit.py @@ -126,11 +126,11 @@ class SegWitTest(BitcoinTestFramework): assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript)) p2sh_ids.append([]) wit_ids.append([]) - for v in range(2): + for _ in range(2): p2sh_ids[i].append([]) wit_ids[i].append([]) - for i in range(5): + for _ in range(5): for n in range(3): for v in range(2): wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999"))) diff --git a/test/functional/feature_settings.py b/test/functional/feature_settings.py new file mode 100755 index 0000000000..c565854bb0 --- /dev/null +++ b/test/functional/feature_settings.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017-2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test various command line arguments and configuration file parameters.""" + +import json + +from pathlib import Path + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.test_node import ErrorMatch +from test_framework.util import assert_equal + + +class SettingsTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def run_test(self): + node, = self.nodes + settings = Path(node.datadir, self.chain, "settings.json") + conf = Path(node.datadir, "bitcoin.conf") + + # Assert empty settings file was created + self.stop_node(0) + with settings.open() as fp: + assert_equal(json.load(fp), {}) + + # Assert settings are parsed and logged + with settings.open("w") as fp: + json.dump({"string": "string", "num": 5, "bool": True, "null": None, "list": [6,7]}, fp) + with node.assert_debug_log(expected_msgs=[ + 'Setting file arg: string = "string"', + 'Setting file arg: num = 5', + 'Setting file arg: bool = true', + 'Setting file arg: null = null', + 'Setting file arg: list = [6,7]']): + self.start_node(0) + self.stop_node(0) + + # Assert settings are unchanged after shutdown + with settings.open() as fp: + assert_equal(json.load(fp), {"string": "string", "num": 5, "bool": True, "null": None, "list": [6,7]}) + + # Test invalid json + with settings.open("w") as fp: + fp.write("invalid json") + node.assert_start_raises_init_error(expected_msg='Unable to parse settings file', match=ErrorMatch.PARTIAL_REGEX) + + # Test invalid json object + with settings.open("w") as fp: + fp.write('"string"') + node.assert_start_raises_init_error(expected_msg='Found non-object value "string" in settings file', match=ErrorMatch.PARTIAL_REGEX) + + # Test invalid settings file containing duplicate keys + with settings.open("w") as fp: + fp.write('{"key": 1, "key": 2}') + node.assert_start_raises_init_error(expected_msg='Found duplicate key key in settings file', match=ErrorMatch.PARTIAL_REGEX) + + # Test invalid settings file is ignored with command line -nosettings + with node.assert_debug_log(expected_msgs=['Command-line arg: settings=false']): + self.start_node(0, extra_args=["-nosettings"]) + self.stop_node(0) + + # Test invalid settings file is ignored with config file -nosettings + with conf.open('a') as conf: + conf.write('nosettings=1\n') + with node.assert_debug_log(expected_msgs=['Config file arg: [regtest] settings=false']): + self.start_node(0) + self.stop_node(0) + + # Test alternate settings path + altsettings = Path(node.datadir, "altsettings.json") + with altsettings.open("w") as fp: + fp.write('{"key": "value"}') + with node.assert_debug_log(expected_msgs=['Setting file arg: key = "value"']): + self.start_node(0, extra_args=["-settings={}".format(altsettings)]) + self.stop_node(0) + + +if __name__ == '__main__': + SettingsTest().main() diff --git a/test/functional/mempool_compatibility.py b/test/functional/mempool_compatibility.py index 999399dec0..31fb751904 100755 --- a/test/functional/mempool_compatibility.py +++ b/test/functional/mempool_compatibility.py @@ -8,7 +8,7 @@ NOTE: The test is designed to prevent cases when compatibility is broken acciden In case we need to break mempool compatibility we can continue to use the test by just bumping the version number. Download node binaries: -contrib/devtools/previous_release.sh -b v0.19.1 v0.18.1 v0.17.1 v0.16.3 v0.15.2 +contrib/devtools/previous_release.py -b v0.19.1 v0.18.1 v0.17.1 v0.16.3 v0.15.2 Only v0.15.2 is required by this test. The rest is used in other backwards compatibility tests. """ diff --git a/test/functional/mempool_package_onemore.py b/test/functional/mempool_package_onemore.py index 0739d7e29b..e956fe07d2 100755 --- a/test/functional/mempool_package_onemore.py +++ b/test/functional/mempool_package_onemore.py @@ -31,7 +31,7 @@ class MempoolPackagesTest(BitcoinTestFramework): for (txid, vout) in zip(parent_txids, vouts): inputs.append({'txid' : txid, 'vout' : vout}) outputs = {} - for i in range(num_outputs): + for _ in range(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs, 0, True) signedtx = node.signrawtransactionwithwallet(rawtx) diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py index 5b7216b253..98dac30ace 100755 --- a/test/functional/mempool_packages.py +++ b/test/functional/mempool_packages.py @@ -48,7 +48,7 @@ class MempoolPackagesTest(BitcoinTestFramework): send_value = satoshi_round((value - fee)/num_outputs) inputs = [ {'txid' : parent_txid, 'vout' : vout} ] outputs = {} - for i in range(num_outputs): + for _ in range(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs) signedtx = node.signrawtransactionwithwallet(rawtx) @@ -69,14 +69,19 @@ class MempoolPackagesTest(BitcoinTestFramework): fee = Decimal("0.0001") # MAX_ANCESTORS transactions off a confirmed tx should be fine chain = [] - for i in range(MAX_ANCESTORS): + witness_chain = [] + for _ in range(MAX_ANCESTORS): (txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1) value = sent_value chain.append(txid) + # We need the wtxids to check P2P announcements + fulltx = self.nodes[0].getrawtransaction(txid) + witnesstx = self.nodes[0].decoderawtransaction(fulltx, True) + witness_chain.append(witnesstx['hash']) # Wait until mempool transactions have passed initial broadcast (sent inv and received getdata) # Otherwise, getrawmempool may be inconsistent with getmempoolentry if unbroadcast changes in between - self.nodes[0].p2p.wait_for_broadcast(chain) + self.nodes[0].p2p.wait_for_broadcast(witness_chain) # Check mempool has MAX_ANCESTORS transactions in it, and descendant and ancestor # count and fees should look correct @@ -240,7 +245,7 @@ class MempoolPackagesTest(BitcoinTestFramework): # Sign and send up to MAX_DESCENDANT transactions chained off the parent tx chain = [] # save sent txs for the purpose of checking node1's mempool later (see below) - for i in range(MAX_DESCENDANTS - 1): + for _ in range(MAX_DESCENDANTS - 1): utxo = transaction_package.pop(0) (txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10) chain.append(txid) @@ -307,7 +312,7 @@ class MempoolPackagesTest(BitcoinTestFramework): send_value = satoshi_round((value - fee)/2) inputs = [ {'txid' : txid, 'vout' : vout} ] outputs = {} - for i in range(2): + for _ in range(2): outputs[self.nodes[0].getnewaddress()] = send_value rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx) @@ -321,7 +326,7 @@ class MempoolPackagesTest(BitcoinTestFramework): # Create tx2-7 vout = 1 txid = tx0_id - for i in range(6): + for _ in range(6): (txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1) vout = 0 value = sent_value diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py index 5d00648aed..85c4d6d570 100755 --- a/test/functional/mempool_persist.py +++ b/test/functional/mempool_persist.py @@ -62,7 +62,7 @@ class MempoolPersistTest(BitcoinTestFramework): def run_test(self): self.log.debug("Send 5 transactions from node2 (to its own address)") tx_creation_time_lower = int(time.time()) - for i in range(5): + for _ in range(5): last_txid = self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10")) node2_balance = self.nodes[2].getbalance() self.sync_all() diff --git a/test/functional/mempool_updatefromblock.py b/test/functional/mempool_updatefromblock.py index 8a703ef009..8baf974a0a 100755 --- a/test/functional/mempool_updatefromblock.py +++ b/test/functional/mempool_updatefromblock.py @@ -73,7 +73,7 @@ class MempoolUpdateFromBlockTest(BitcoinTestFramework): n_outputs = size - tx_count output_value = ((inputs_value - fee) / Decimal(n_outputs)).quantize(Decimal('0.00000001')) outputs = {} - for n in range(0, n_outputs): + for _ in range(n_outputs): outputs[self.nodes[0].getnewaddress()] = output_value else: output_value = (inputs_value - fee).quantize(Decimal('0.00000001')) diff --git a/test/functional/p2p_blocksonly.py b/test/functional/p2p_blocksonly.py index c155dda664..27e6b669f6 100755 --- a/test/functional/p2p_blocksonly.py +++ b/test/functional/p2p_blocksonly.py @@ -52,34 +52,35 @@ class P2PBlocksOnly(BitcoinTestFramework): self.log.info('Check that txs from rpc are not rejected and relayed to other peers') assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], True) txid = self.nodes[0].testmempoolaccept([sigtx])[0]['txid'] - with self.nodes[0].assert_debug_log(['received getdata for: tx {} peer=1'.format(txid)]): + with self.nodes[0].assert_debug_log(['received getdata for: wtx {} peer=1'.format(txid)]): self.nodes[0].sendrawtransaction(sigtx) self.nodes[0].p2p.wait_for_tx(txid) assert_equal(self.nodes[0].getmempoolinfo()['size'], 1) - self.log.info('Check that txs from whitelisted peers are not rejected and relayed to others') - self.log.info("Restarting node 0 with whitelist permission and blocksonly") + self.log.info('Check that txs from forcerelay peers are not rejected and relayed to others') + self.log.info("Restarting node 0 with forcerelay permission and blocksonly") self.restart_node(0, ["-persistmempool=0", "-whitelist=127.0.0.1", "-whitelistforcerelay", "-blocksonly"]) - assert_equal(self.nodes[0].getrawmempool(),[]) + assert_equal(self.nodes[0].getrawmempool(), []) first_peer = self.nodes[0].add_p2p_connection(P2PInterface()) second_peer = self.nodes[0].add_p2p_connection(P2PInterface()) peer_1_info = self.nodes[0].getpeerinfo()[0] assert_equal(peer_1_info['whitelisted'], True) - assert_equal(peer_1_info['permissions'], ['noban', 'forcerelay', 'relay', 'mempool']) + assert_equal(peer_1_info['permissions'], ['noban', 'forcerelay', 'relay', 'mempool', 'download']) peer_2_info = self.nodes[0].getpeerinfo()[1] assert_equal(peer_2_info['whitelisted'], True) - assert_equal(peer_2_info['permissions'], ['noban', 'forcerelay', 'relay', 'mempool']) + assert_equal(peer_2_info['permissions'], ['noban', 'forcerelay', 'relay', 'mempool', 'download']) assert_equal(self.nodes[0].testmempoolaccept([sigtx])[0]['allowed'], True) txid = self.nodes[0].testmempoolaccept([sigtx])[0]['txid'] - self.log.info('Check that the tx from whitelisted first_peer is relayed to others (ie.second_peer)') + self.log.info('Check that the tx from forcerelay first_peer is relayed to others (ie.second_peer)') with self.nodes[0].assert_debug_log(["received getdata"]): first_peer.send_message(msg_tx(FromHex(CTransaction(), sigtx))) - self.log.info('Check that the whitelisted peer is still connected after sending the transaction') + self.log.info('Check that the forcerelay peer is still connected after sending the transaction') assert_equal(first_peer.is_connected, True) second_peer.wait_for_tx(txid) assert_equal(self.nodes[0].getmempoolinfo()['size'], 1) - self.log.info("Whitelisted peer's transaction is accepted and relayed") + self.log.info("Forcerelay peer's transaction is accepted and relayed") + if __name__ == '__main__': P2PBlocksOnly().main() diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index 0b3738b572..225d393e1b 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -125,7 +125,7 @@ class CompactBlocksTest(BitcoinTestFramework): out_value = total_value // 10 tx = CTransaction() tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b'')) - for i in range(10): + for _ in range(10): tx.vout.append(CTxOut(out_value, CScript([OP_TRUE]))) tx.rehash() @@ -266,7 +266,7 @@ class CompactBlocksTest(BitcoinTestFramework): address = node.getnewaddress() segwit_tx_generated = False - for i in range(num_transactions): + for _ in range(num_transactions): txid = node.sendtoaddress(address, 0.1) hex_tx = node.gettransaction(txid)["hex"] tx = FromHex(CTransaction(), hex_tx) @@ -294,12 +294,11 @@ class CompactBlocksTest(BitcoinTestFramework): block.rehash() # Wait until the block was announced (via compact blocks) - wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) + wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: - assert "cmpctblock" in test_node.last_message # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block) @@ -309,12 +308,11 @@ class CompactBlocksTest(BitcoinTestFramework): inv = CInv(MSG_CMPCT_BLOCK, block_hash) test_node.send_message(msg_getdata([inv])) - wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) + wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: - assert "cmpctblock" in test_node.last_message # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block) @@ -418,7 +416,7 @@ class CompactBlocksTest(BitcoinTestFramework): def build_block_with_transactions(self, node, utxo, num_transactions): block = self.build_block_on_tip(node) - for i in range(num_transactions): + for _ in range(num_transactions): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b'')) tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) @@ -627,7 +625,7 @@ class CompactBlocksTest(BitcoinTestFramework): # Test that requesting old compactblocks doesn't work. MAX_CMPCTBLOCK_DEPTH = 5 new_blocks = [] - for i in range(MAX_CMPCTBLOCK_DEPTH + 1): + for _ in range(MAX_CMPCTBLOCK_DEPTH + 1): test_node.clear_block_announcement() new_blocks.append(node.generate(1)[0]) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) @@ -691,10 +689,9 @@ class CompactBlocksTest(BitcoinTestFramework): node.submitblock(ToHex(block)) for l in listeners: - wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock) + wait_until(lambda: "cmpctblock" in l.last_message, timeout=30, lock=mininode_lock) with mininode_lock: for l in listeners: - assert "cmpctblock" in l.last_message l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256) diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py index f939ea965c..0b51d8f4bb 100755 --- a/test/functional/p2p_feefilter.py +++ b/test/functional/p2p_feefilter.py @@ -7,7 +7,7 @@ from decimal import Decimal import time -from test_framework.messages import MSG_TX, msg_feefilter +from test_framework.messages import MSG_TX, MSG_WTX, msg_feefilter from test_framework.mininode import mininode_lock, P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal @@ -19,7 +19,7 @@ def hashToHex(hash): # Wait up to 60 secs to see if the testnode has received all the expected invs def allInvsMatch(invsExpected, testnode): - for x in range(60): + for _ in range(60): with mininode_lock: if (sorted(invsExpected) == sorted(testnode.txinvs)): return True @@ -45,7 +45,7 @@ class TestP2PConn(P2PInterface): def on_inv(self, message): for i in message.inv: - if (i.type == MSG_TX): + if (i.type == MSG_TX) or (i.type == MSG_WTX): self.txinvs.append(hashToHex(i.hash)) def clear_invs(self): @@ -91,7 +91,7 @@ class FeeFilterTest(BitcoinTestFramework): # Test that invs are received by test connection for all txs at # feerate of .2 sat/byte node1.settxfee(Decimal("0.00000200")) - txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] + txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)] assert allInvsMatch(txids, conn) conn.clear_invs() @@ -100,14 +100,14 @@ class FeeFilterTest(BitcoinTestFramework): # Test that txs are still being received by test connection (paying .15 sat/byte) node1.settxfee(Decimal("0.00000150")) - txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] + txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)] assert allInvsMatch(txids, conn) conn.clear_invs() # Change tx fee rate to .1 sat/byte and test they are no longer received # by the test connection node1.settxfee(Decimal("0.00000100")) - [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] + [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)] self.sync_mempools() # must be sure node 0 has received all txs # Send one transaction from node0 that should be received, so that we @@ -124,7 +124,7 @@ class FeeFilterTest(BitcoinTestFramework): # Remove fee filter and check that txs are received again conn.send_and_ping(msg_feefilter(0)) - txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] + txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)] assert allInvsMatch(txids, conn) conn.clear_invs() diff --git a/test/functional/p2p_filter.py b/test/functional/p2p_filter.py index 741da3be31..ce3856fc95 100755 --- a/test/functional/p2p_filter.py +++ b/test/functional/p2p_filter.py @@ -218,7 +218,6 @@ class FilterTest(BitcoinTestFramework): # Add peer but do not send version yet filter_peer_without_nrelay = self.nodes[0].add_p2p_connection(P2PBloomFilter(), send_version=False, wait_for_verack=False) # Send version with fRelay=False - filter_peer_without_nrelay.wait_until(lambda: filter_peer_without_nrelay.is_connected, timeout=10) version_without_fRelay = msg_version() version_without_fRelay.nRelay = 0 filter_peer_without_nrelay.send_message(version_without_fRelay) diff --git a/test/functional/p2p_getaddr_caching.py b/test/functional/p2p_getaddr_caching.py new file mode 100755 index 0000000000..c9278eab92 --- /dev/null +++ b/test/functional/p2p_getaddr_caching.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test addr response caching""" + +import time +from test_framework.messages import ( + CAddress, + NODE_NETWORK, + NODE_WITNESS, + msg_addr, + msg_getaddr, +) +from test_framework.mininode import ( + P2PInterface, + mininode_lock +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, +) + +MAX_ADDR_TO_SEND = 1000 + +def gen_addrs(n): + addrs = [] + for i in range(n): + addr = CAddress() + addr.time = int(time.time()) + addr.nServices = NODE_NETWORK | NODE_WITNESS + # Use first octets to occupy different AddrMan buckets + first_octet = i >> 8 + second_octet = i % 256 + addr.ip = "{}.{}.1.1".format(first_octet, second_octet) + addr.port = 8333 + addrs.append(addr) + return addrs + +class AddrReceiver(P2PInterface): + + def __init__(self): + super().__init__() + self.received_addrs = None + + def get_received_addrs(self): + with mininode_lock: + return self.received_addrs + + def on_addr(self, message): + self.received_addrs = [] + for addr in message.addrs: + self.received_addrs.append(addr.ip) + + def addr_received(self): + return self.received_addrs is not None + + +class AddrTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = False + self.num_nodes = 1 + + def run_test(self): + self.log.info('Create connection that sends and requests addr messages') + addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) + + msg_send_addrs = msg_addr() + self.log.info('Fill peer AddrMan with a lot of records') + # Since these addrs are sent from the same source, not all of them will be stored, + # because we allocate a limited number of AddrMan buckets per addr source. + total_addrs = 10000 + addrs = gen_addrs(total_addrs) + for i in range(int(total_addrs/MAX_ADDR_TO_SEND)): + msg_send_addrs.addrs = addrs[i * MAX_ADDR_TO_SEND:(i + 1) * MAX_ADDR_TO_SEND] + addr_source.send_and_ping(msg_send_addrs) + + responses = [] + self.log.info('Send many addr requests within short time to receive same response') + N = 5 + cur_mock_time = int(time.time()) + for i in range(N): + addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver()) + addr_receiver.send_and_ping(msg_getaddr()) + # Trigger response + cur_mock_time += 5 * 60 + self.nodes[0].setmocktime(cur_mock_time) + addr_receiver.wait_until(addr_receiver.addr_received) + responses.append(addr_receiver.get_received_addrs()) + for response in responses[1:]: + assert_equal(response, responses[0]) + assert(len(response) < MAX_ADDR_TO_SEND) + + cur_mock_time += 3 * 24 * 60 * 60 + self.nodes[0].setmocktime(cur_mock_time) + + self.log.info('After time passed, see a new response to addr request') + last_addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver()) + last_addr_receiver.send_and_ping(msg_getaddr()) + # Trigger response + cur_mock_time += 5 * 60 + self.nodes[0].setmocktime(cur_mock_time) + last_addr_receiver.wait_until(last_addr_receiver.addr_received) + # new response is different + assert(set(responses[0]) != set(last_addr_receiver.get_received_addrs())) + + +if __name__ == '__main__': + AddrTest().main() diff --git a/test/functional/p2p_ibd_txrelay.py b/test/functional/p2p_ibd_txrelay.py new file mode 100755 index 0000000000..c3e758b021 --- /dev/null +++ b/test/functional/p2p_ibd_txrelay.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test fee filters during and after IBD.""" + +from decimal import Decimal + +from test_framework.messages import COIN +from test_framework.test_framework import BitcoinTestFramework + +MAX_FEE_FILTER = Decimal(9170997) / COIN +NORMAL_FEE_FILTER = Decimal(100) / COIN + + +class P2PIBDTxRelayTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 2 + self.extra_args = [ + ["-minrelaytxfee={}".format(NORMAL_FEE_FILTER)], + ["-minrelaytxfee={}".format(NORMAL_FEE_FILTER)], + ] + + def run_test(self): + self.log.info("Check that nodes set minfilter to MAX_MONEY while still in IBD") + for node in self.nodes: + assert node.getblockchaininfo()['initialblockdownload'] + self.wait_until(lambda: all(peer['minfeefilter'] == MAX_FEE_FILTER for peer in node.getpeerinfo())) + + # Come out of IBD by generating a block + self.nodes[0].generate(1) + self.sync_all() + + self.log.info("Check that nodes reset minfilter after coming out of IBD") + for node in self.nodes: + assert not node.getblockchaininfo()['initialblockdownload'] + self.wait_until(lambda: all(peer['minfeefilter'] == NORMAL_FEE_FILTER for peer in node.getpeerinfo())) + + +if __name__ == '__main__': + P2PIBDTxRelayTest().main() diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py index 3b3dbd08f2..2fc5245241 100755 --- a/test/functional/p2p_leak.py +++ b/test/functional/p2p_leak.py @@ -26,7 +26,7 @@ from test_framework.util import ( wait_until, ) -banscore = 10 +DISCOURAGEMENT_THRESHOLD = 100 class CLazyNode(P2PInterface): @@ -63,15 +63,12 @@ class CLazyNode(P2PInterface): def on_getblocktxn(self, message): self.bad_message(message) def on_blocktxn(self, message): self.bad_message(message) + # Node that never sends a version. We'll use this to send a bunch of messages # anyway, and eventually get disconnected. -class CNodeNoVersionBan(CLazyNode): - # send a bunch of veracks without sending a message. This should get us disconnected. - # NOTE: implementation-specific check here. Remove if bitcoind ban behavior changes - def on_open(self): - super().on_open() - for i in range(banscore): - self.send_message(msg_verack()) +class CNodeNoVersionMisbehavior(CLazyNode): + pass + # Node that never sends a version. This one just sits idle and hopes to receive # any message (it shouldn't!) @@ -79,6 +76,7 @@ class CNodeNoVersionIdle(CLazyNode): def __init__(self): super().__init__() + # Node that sends a version but not a verack. class CNodeNoVerackIdle(CLazyNode): def __init__(self): @@ -106,18 +104,23 @@ class P2PVersionStore(P2PInterface): class P2PLeakTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 - self.extra_args = [['-banscore=' + str(banscore)]] def run_test(self): - no_version_bannode = self.nodes[0].add_p2p_connection(CNodeNoVersionBan(), send_version=False, wait_for_verack=False) + no_version_disconnect_node = self.nodes[0].add_p2p_connection( + CNodeNoVersionMisbehavior(), send_version=False, wait_for_verack=False) no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False, wait_for_verack=False) no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle(), wait_for_verack=False) + # Send enough veracks without a message to reach the peer discouragement + # threshold. This should get us disconnected. + for _ in range(DISCOURAGEMENT_THRESHOLD): + no_version_disconnect_node.send_message(msg_verack()) + # Wait until we got the verack in response to the version. Though, don't wait for the other node to receive the # verack, since we never sent one no_verack_idlenode.wait_for_verack() - wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock) + wait_until(lambda: no_version_disconnect_node.ever_connected, timeout=10, lock=mininode_lock) wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock) wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock) @@ -127,13 +130,13 @@ class P2PLeakTest(BitcoinTestFramework): #Give the node enough time to possibly leak out a message time.sleep(5) - #This node should have been banned - assert not no_version_bannode.is_connected + # Expect this node to be disconnected for misbehavior + assert not no_version_disconnect_node.is_connected self.nodes[0].disconnect_p2ps() # Make sure no unexpected messages came in - assert no_version_bannode.unexpected_msg == False + assert no_version_disconnect_node.unexpected_msg == False assert no_version_idlenode.unexpected_msg == False assert no_verack_idlenode.unexpected_msg == False @@ -152,7 +155,6 @@ class P2PLeakTest(BitcoinTestFramework): p2p_old_node = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False) old_version_msg = msg_version() old_version_msg.nVersion = 31799 - wait_until(lambda: p2p_old_node.is_connected) with self.nodes[0].assert_debug_log(['peer=4 using obsolete version 31799; disconnecting']): p2p_old_node.send_message(old_version_msg) p2p_old_node.wait_for_disconnect() diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py index bea202855d..254352c816 100755 --- a/test/functional/p2p_permissions.py +++ b/test/functional/p2p_permissions.py @@ -39,7 +39,8 @@ class P2PPermissionsTests(BitcoinTestFramework): self.checkpermission( # default permissions (no specific permissions) ["-whitelist=127.0.0.1"], - ["relay", "noban", "mempool"], + # Make sure the default values in the command line documentation match the ones here + ["relay", "noban", "mempool", "download"], True) self.checkpermission( @@ -51,7 +52,7 @@ class P2PPermissionsTests(BitcoinTestFramework): self.checkpermission( # relay permission removed (no specific permissions) ["-whitelist=127.0.0.1", "-whitelistrelay=0"], - ["noban", "mempool"], + ["noban", "mempool", "download"], True) self.checkpermission( @@ -59,7 +60,7 @@ class P2PPermissionsTests(BitcoinTestFramework): # Legacy parameter interaction which set whitelistrelay to true # if whitelistforcerelay is true ["-whitelist=127.0.0.1", "-whitelistforcerelay"], - ["forcerelay", "relay", "noban", "mempool"], + ["forcerelay", "relay", "noban", "mempool", "download"], True) # Let's make sure permissions are merged correctly @@ -70,32 +71,32 @@ class P2PPermissionsTests(BitcoinTestFramework): self.checkpermission( ["-whitelist=noban@127.0.0.1"], # Check parameter interaction forcerelay should activate relay - ["noban", "bloomfilter", "forcerelay", "relay"], + ["noban", "bloomfilter", "forcerelay", "relay", "download"], False) self.replaceinconfig(1, "whitebind=bloomfilter,forcerelay@" + ip_port, "bind=127.0.0.1") self.checkpermission( # legacy whitelistrelay should be ignored ["-whitelist=noban,mempool@127.0.0.1", "-whitelistrelay"], - ["noban", "mempool"], + ["noban", "mempool", "download"], False) self.checkpermission( # legacy whitelistforcerelay should be ignored ["-whitelist=noban,mempool@127.0.0.1", "-whitelistforcerelay"], - ["noban", "mempool"], + ["noban", "mempool", "download"], False) self.checkpermission( # missing mempool permission to be considered legacy whitelisted ["-whitelist=noban@127.0.0.1"], - ["noban"], + ["noban", "download"], False) self.checkpermission( # all permission added ["-whitelist=all@127.0.0.1"], - ["forcerelay", "noban", "mempool", "bloomfilter", "relay"], + ["forcerelay", "noban", "mempool", "bloomfilter", "relay", "download", "addr"], False) self.stop_node(1) @@ -107,9 +108,9 @@ class P2PPermissionsTests(BitcoinTestFramework): block_op_true = self.nodes[0].getblock(self.nodes[0].generatetoaddress(100, ADDRESS_BCRT1_P2WSH_OP_TRUE)[0]) self.sync_all() - self.log.debug("Create a connection from a whitelisted wallet that rebroadcasts raw txs") + self.log.debug("Create a connection from a forcerelay peer that rebroadcasts raw txs") # A python mininode is needed to send the raw transaction directly. If a full node was used, it could only - # rebroadcast via the inv-getdata mechanism. However, even for whitelisted connections, a full node would + # rebroadcast via the inv-getdata mechanism. However, even for forcerelay connections, a full node would # currently not request a txid that is already in the mempool. self.restart_node(1, extra_args=["-whitelist=forcerelay@127.0.0.1"]) p2p_rebroadcast_wallet = self.nodes[1].add_p2p_connection(P2PDataStore()) @@ -134,7 +135,7 @@ class P2PPermissionsTests(BitcoinTestFramework): self.log.debug("Check that node[1] will send the tx to node[0] even though it is already in the mempool") connect_nodes(self.nodes[1], 0) - with self.nodes[1].assert_debug_log(["Force relaying tx {} from whitelisted peer=0".format(txid)]): + with self.nodes[1].assert_debug_log(["Force relaying tx {} from peer=0".format(txid)]): p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1]) wait_until(lambda: txid in self.nodes[0].getrawmempool()) @@ -145,7 +146,7 @@ class P2PPermissionsTests(BitcoinTestFramework): [tx], self.nodes[1], success=False, - reject_reason='Not relaying non-mempool transaction {} from whitelisted peer=0'.format(txid), + reject_reason='Not relaying non-mempool transaction {} from forcerelay peer=0'.format(txid), ) def checkpermission(self, args, expectedPermissions, whitelisted): diff --git a/test/functional/p2p_ping.py b/test/functional/p2p_ping.py new file mode 100755 index 0000000000..5f5fd3e104 --- /dev/null +++ b/test/functional/p2p_ping.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test ping message +""" + +import time + +from test_framework.messages import msg_pong +from test_framework.mininode import P2PInterface +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +PING_INTERVAL = 2 * 60 + + +class msg_pong_corrupt(msg_pong): + def serialize(self): + return b"" + + +class NodePongAdd1(P2PInterface): + def on_ping(self, message): + self.send_message(msg_pong(message.nonce + 1)) + + +class NodeNoPong(P2PInterface): + def on_ping(self, message): + pass + + +class PingPongTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [['-peertimeout=3']] + + def check_peer_info(self, *, pingtime, minping, pingwait): + stats = self.nodes[0].getpeerinfo()[0] + assert_equal(stats.pop('pingtime', None), pingtime) + assert_equal(stats.pop('minping', None), minping) + assert_equal(stats.pop('pingwait', None), pingwait) + + def mock_forward(self, delta): + self.mock_time += delta + self.nodes[0].setmocktime(self.mock_time) + + def run_test(self): + self.mock_time = int(time.time()) + self.mock_forward(0) + + self.log.info('Check that ping is sent after connection is established') + no_pong_node = self.nodes[0].add_p2p_connection(NodeNoPong()) + self.mock_forward(3) + assert no_pong_node.last_message.pop('ping').nonce != 0 + self.check_peer_info(pingtime=None, minping=None, pingwait=3) + + self.log.info('Reply without nonce cancels ping') + with self.nodes[0].assert_debug_log(['pong peer=0: Short payload']): + no_pong_node.send_and_ping(msg_pong_corrupt()) + self.check_peer_info(pingtime=None, minping=None, pingwait=None) + + self.log.info('Reply without ping') + with self.nodes[0].assert_debug_log([ + 'pong peer=0: Unsolicited pong without ping, 0 expected, 0 received, 8 bytes', + ]): + no_pong_node.send_and_ping(msg_pong()) + self.check_peer_info(pingtime=None, minping=None, pingwait=None) + + self.log.info('Reply with wrong nonce does not cancel ping') + assert 'ping' not in no_pong_node.last_message + with self.nodes[0].assert_debug_log(['pong peer=0: Nonce mismatch']): + # mock time PING_INTERVAL ahead to trigger node into sending a ping + self.mock_forward(PING_INTERVAL + 1) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) + self.mock_forward(9) + # Send the wrong pong + no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce - 1)) + self.check_peer_info(pingtime=None, minping=None, pingwait=9) + + self.log.info('Reply with zero nonce does cancel ping') + with self.nodes[0].assert_debug_log(['pong peer=0: Nonce zero']): + no_pong_node.send_and_ping(msg_pong(0)) + self.check_peer_info(pingtime=None, minping=None, pingwait=None) + + self.log.info('Check that ping is properly reported on RPC') + assert 'ping' not in no_pong_node.last_message + # mock time PING_INTERVAL ahead to trigger node into sending a ping + self.mock_forward(PING_INTERVAL + 1) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) + ping_delay = 29 + self.mock_forward(ping_delay) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) + no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce)) + self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None) + + self.log.info('Check that minping is decreased after a fast roundtrip') + # mock time PING_INTERVAL ahead to trigger node into sending a ping + self.mock_forward(PING_INTERVAL + 1) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) + ping_delay = 9 + self.mock_forward(ping_delay) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) + no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce)) + self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None) + + self.log.info('Check that peer is disconnected after ping timeout') + assert 'ping' not in no_pong_node.last_message + self.nodes[0].ping() + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) + with self.nodes[0].assert_debug_log(['ping timeout: 1201.000000s']): + self.mock_forward(20 * 60 + 1) + time.sleep(4) # peertimeout + 1 + + +if __name__ == '__main__': + PingPongTest().main() diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 25dd765442..564e49f3d8 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -25,6 +25,7 @@ from test_framework.messages import ( MSG_BLOCK, MSG_TX, MSG_WITNESS_FLAG, + MSG_WTX, NODE_NETWORK, NODE_WITNESS, msg_no_witness_block, @@ -34,6 +35,7 @@ from test_framework.messages import ( msg_tx, msg_block, msg_no_witness_tx, + msg_verack, ser_uint256, ser_vector, sha256, @@ -81,6 +83,7 @@ from test_framework.util import ( softfork_active, hex_str_to_bytes, assert_raises_rpc_error, + wait_until, ) # The versionbit bit used to signal activation of SegWit @@ -143,25 +146,50 @@ def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=Non class TestP2PConn(P2PInterface): - def __init__(self): + def __init__(self, wtxidrelay=False): super().__init__() self.getdataset = set() + self.last_wtxidrelay = [] + self.lastgetdata = [] + self.wtxidrelay = wtxidrelay # Avoid sending out msg_getdata in the mininode thread as a reply to invs. # They are not needed and would only lead to races because we send msg_getdata out in the test thread def on_inv(self, message): pass + def on_version(self, message): + if self.wtxidrelay: + super().on_version(message) + else: + self.send_message(msg_verack()) + self.nServices = message.nServices + def on_getdata(self, message): + self.lastgetdata = message.inv for inv in message.inv: self.getdataset.add(inv.hash) - def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True): + def on_wtxidrelay(self, message): + self.last_wtxidrelay.append(message) + + def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True, use_wtxid=False): + if success: + # sanity check + assert (self.wtxidrelay and use_wtxid) or (not self.wtxidrelay and not use_wtxid) with mininode_lock: self.last_message.pop("getdata", None) - self.send_message(msg_inv(inv=[CInv(MSG_TX, tx.sha256)])) + if use_wtxid: + wtxid = tx.calc_sha256(True) + self.send_message(msg_inv(inv=[CInv(MSG_WTX, wtxid)])) + else: + self.send_message(msg_inv(inv=[CInv(MSG_TX, tx.sha256)])) + if success: - self.wait_for_getdata([tx.sha256], timeout) + if use_wtxid: + self.wait_for_getdata([wtxid], timeout) + else: + self.wait_for_getdata([tx.sha256], timeout) else: time.sleep(timeout) assert not self.last_message.get("getdata") @@ -234,6 +262,8 @@ class SegWitTest(BitcoinTestFramework): self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK) # self.std_node is for testing node1 (fRequireStandard=true) self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS) + # self.std_wtx_node is for testing node1 with wtxid relay + self.std_wtx_node = self.nodes[1].add_p2p_connection(TestP2PConn(wtxidrelay=True), services=NODE_NETWORK | NODE_WITNESS) assert self.test_node.nServices & NODE_WITNESS != 0 @@ -277,6 +307,7 @@ class SegWitTest(BitcoinTestFramework): self.test_upgrade_after_activation() self.test_witness_sigops() self.test_superfluous_witness() + self.test_wtxid_relay() # Individual tests @@ -916,7 +947,7 @@ class SegWitTest(BitcoinTestFramework): parent_tx = CTransaction() parent_tx.vin.append(CTxIn(prevout, b"")) child_value = int(value / NUM_OUTPUTS) - for i in range(NUM_OUTPUTS): + for _ in range(NUM_OUTPUTS): parent_tx.vout.append(CTxOut(child_value, script_pubkey)) parent_tx.vout[0].nValue -= 50000 assert parent_tx.vout[0].nValue > 0 @@ -926,7 +957,7 @@ class SegWitTest(BitcoinTestFramework): for i in range(NUM_OUTPUTS): child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b"")) child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))] - for i in range(NUM_OUTPUTS): + for _ in range(NUM_OUTPUTS): child_tx.wit.vtxinwit.append(CTxInWitness()) child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program] child_tx.rehash() @@ -1173,7 +1204,7 @@ class SegWitTest(BitcoinTestFramework): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) value = self.utxo[0].nValue - for i in range(10): + for _ in range(10): tx.vout.append(CTxOut(int(value / 10), script_pubkey)) tx.vout[0].nValue -= 1000 assert tx.vout[0].nValue >= 0 @@ -1270,7 +1301,6 @@ class SegWitTest(BitcoinTestFramework): test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False) # Verify that removing the witness succeeds. - self.test_node.announce_tx_and_wait_for_getdata(tx) test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True) # Now try to add extra witness data to a valid witness tx. @@ -1294,11 +1324,14 @@ class SegWitTest(BitcoinTestFramework): tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2] tx3.rehash() - # Node will not be blinded to the transaction + # Node will not be blinded to the transaction, requesting it any number of times + # if it is being announced via txid relay. + # Node will be blinded to the transaction via wtxid, however. self.std_node.announce_tx_and_wait_for_getdata(tx3) + self.std_wtx_node.announce_tx_and_wait_for_getdata(tx3, use_wtxid=True) test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size') self.std_node.announce_tx_and_wait_for_getdata(tx3) - test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size') + self.std_wtx_node.announce_tx_and_wait_for_getdata(tx3, use_wtxid=True, success=False) # Remove witness stuffing, instead add extra witness push on stack tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])) @@ -1349,7 +1382,7 @@ class SegWitTest(BitcoinTestFramework): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS - for i in range(NUM_SEGWIT_VERSIONS): + for _ in range(NUM_SEGWIT_VERSIONS): tx.vout.append(CTxOut(split_value, CScript([OP_TRUE]))) tx.rehash() block = self.build_next_block() @@ -1395,7 +1428,7 @@ class SegWitTest(BitcoinTestFramework): temp_utxo.pop() # last entry in temp_utxo was the output we just spent temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) - # Spend everything in temp_utxo back to an OP_TRUE output. + # Spend everything in temp_utxo into an segwit v1 output. tx3 = CTransaction() total_value = 0 for i in temp_utxo: @@ -1403,8 +1436,16 @@ class SegWitTest(BitcoinTestFramework): tx3.wit.vtxinwit.append(CTxInWitness()) total_value += i.nValue tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program] - tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE]))) + tx3.vout.append(CTxOut(total_value - 1000, script_pubkey)) tx3.rehash() + + # First we test this transaction against fRequireStandard=true node + # making sure the txid is added to the reject filter + self.std_node.announce_tx_and_wait_for_getdata(tx3) + test_transaction_acceptance(self.nodes[1], self.std_node, tx3, with_witness=True, accepted=False, reason="bad-txns-nonstandard-inputs") + # Now the node will no longer ask for getdata of this transaction when advertised by same txid + self.std_node.announce_tx_and_wait_for_getdata(tx3, timeout=5, success=False) + # Spending a higher version witness output is not allowed by policy, # even with fRequireStandard=false. test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades") @@ -1623,7 +1664,7 @@ class SegWitTest(BitcoinTestFramework): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS - for i in range(NUM_SIGHASH_TESTS): + for _ in range(NUM_SIGHASH_TESTS): tx.vout.append(CTxOut(split_value, script_pubkey)) tx.wit.vtxinwit.append(CTxInWitness()) sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key) @@ -1653,7 +1694,7 @@ class SegWitTest(BitcoinTestFramework): tx.wit.vtxinwit.append(CTxInWitness()) total_value += temp_utxos[i].nValue split_value = total_value // num_outputs - for i in range(num_outputs): + for _ in range(num_outputs): tx.vout.append(CTxOut(split_value, script_pubkey)) for i in range(num_inputs): # Now try to sign each input, using a random hashtype. @@ -1951,7 +1992,7 @@ class SegWitTest(BitcoinTestFramework): split_value = self.utxo[0].nValue // outputs tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - for i in range(outputs): + for _ in range(outputs): tx.vout.append(CTxOut(split_value, script_pubkey)) tx.vout[-2].scriptPubKey = script_pubkey_toomany tx.vout[-1].scriptPubKey = script_pubkey_justright @@ -2016,6 +2057,11 @@ class SegWitTest(BitcoinTestFramework): # TODO: test p2sh sigop counting + # Cleanup and prep for next test + self.utxo.pop(0) + self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) + + @subtest # type: ignore def test_superfluous_witness(self): # Serialization of tx that puts witness flag to 3 always def serialize_with_bogus_witness(tx): @@ -2032,7 +2078,7 @@ class SegWitTest(BitcoinTestFramework): if (len(tx.wit.vtxinwit) != len(tx.vin)): # vtxinwit must have the same length as vin tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)] - for i in range(len(tx.wit.vtxinwit), len(tx.vin)): + for _ in range(len(tx.wit.vtxinwit), len(tx.vin)): tx.wit.vtxinwit.append(CTxInWitness()) r += tx.wit.serialize() r += struct.pack("<I", tx.nLockTime) @@ -2059,6 +2105,67 @@ class SegWitTest(BitcoinTestFramework): with self.nodes[0].assert_debug_log(['Unknown transaction optional data']): self.nodes[0].p2p.send_and_ping(msg_bogus_tx(tx)) + @subtest # type: ignore + def test_wtxid_relay(self): + # Use brand new nodes to avoid contamination from earlier tests + self.wtx_node = self.nodes[0].add_p2p_connection(TestP2PConn(wtxidrelay=True), services=NODE_NETWORK | NODE_WITNESS) + self.tx_node = self.nodes[0].add_p2p_connection(TestP2PConn(wtxidrelay=False), services=NODE_NETWORK | NODE_WITNESS) + + # Check wtxidrelay feature negotiation message through connecting a new peer + def received_wtxidrelay(): + return (len(self.wtx_node.last_wtxidrelay) > 0) + wait_until(received_wtxidrelay, timeout=60, lock=mininode_lock) + + # Create a Segwit output from the latest UTXO + # and announce it to the network + witness_program = CScript([OP_TRUE]) + witness_hash = sha256(witness_program) + script_pubkey = CScript([OP_0, witness_hash]) + + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey)) + tx.rehash() + + # Create a Segwit transaction + tx2 = CTransaction() + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey)) + tx2.wit.vtxinwit.append(CTxInWitness()) + tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] + tx2.rehash() + + # Announce Segwit transaction with wtxid + # and wait for getdata + self.wtx_node.announce_tx_and_wait_for_getdata(tx2, use_wtxid=True) + with mininode_lock: + lgd = self.wtx_node.lastgetdata[:] + assert_equal(lgd, [CInv(MSG_WTX, tx2.calc_sha256(True))]) + + # Announce Segwit transaction from non wtxidrelay peer + # and wait for getdata + self.tx_node.announce_tx_and_wait_for_getdata(tx2, use_wtxid=False) + with mininode_lock: + lgd = self.tx_node.lastgetdata[:] + assert_equal(lgd, [CInv(MSG_TX|MSG_WITNESS_FLAG, tx2.sha256)]) + + # Send tx2 through; it's an orphan so won't be accepted + with mininode_lock: + self.wtx_node.last_message.pop("getdata", None) + test_transaction_acceptance(self.nodes[0], self.wtx_node, tx2, with_witness=True, accepted=False) + + # Expect a request for parent (tx) by txid despite use of WTX peer + self.wtx_node.wait_for_getdata([tx.sha256], 60) + with mininode_lock: + lgd = self.wtx_node.lastgetdata[:] + assert_equal(lgd, [CInv(MSG_TX|MSG_WITNESS_FLAG, tx.sha256)]) + + # Send tx through + test_transaction_acceptance(self.nodes[0], self.wtx_node, tx, with_witness=False, accepted=True) + + # Check tx2 is there now + assert_equal(tx2.hash in self.nodes[0].getrawmempool(), True) + if __name__ == '__main__': SegWitTest().main() diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py index 481b1c1841..126a46bd53 100755 --- a/test/functional/p2p_sendheaders.py +++ b/test/functional/p2p_sendheaders.py @@ -328,7 +328,7 @@ class SendHeadersTest(BitcoinTestFramework): for j in range(2): self.log.debug("Part 2.{}.{}: starting...".format(i, j)) blocks = [] - for b in range(i + 1): + for _ in range(i + 1): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 @@ -443,7 +443,7 @@ class SendHeadersTest(BitcoinTestFramework): # Create 2 blocks. Send the blocks, then send the headers. blocks = [] - for b in range(2): + for _ in range(2): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 @@ -461,7 +461,7 @@ class SendHeadersTest(BitcoinTestFramework): # This time, direct fetch should work blocks = [] - for b in range(3): + for _ in range(3): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 @@ -482,7 +482,7 @@ class SendHeadersTest(BitcoinTestFramework): blocks = [] # Create extra blocks for later - for b in range(20): + for _ in range(20): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 @@ -529,7 +529,7 @@ class SendHeadersTest(BitcoinTestFramework): test_node.last_message.pop("getdata", None) blocks = [] # Create two more blocks. - for j in range(2): + for _ in range(2): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 @@ -550,7 +550,7 @@ class SendHeadersTest(BitcoinTestFramework): # Now we test that if we repeatedly don't send connecting headers, we # don't go into an infinite loop trying to get them to connect. MAX_UNCONNECTING_HEADERS = 10 - for j in range(MAX_UNCONNECTING_HEADERS + 1): + for _ in range(MAX_UNCONNECTING_HEADERS + 1): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py index 10f5eea0e5..3ea1c6e5e7 100755 --- a/test/functional/p2p_tx_download.py +++ b/test/functional/p2p_tx_download.py @@ -12,6 +12,7 @@ from test_framework.messages import ( FromHex, MSG_TX, MSG_TYPE_MASK, + MSG_WTX, msg_inv, msg_notfound, ) @@ -36,7 +37,7 @@ class TestP2PConn(P2PInterface): def on_getdata(self, message): for i in message.inv: - if i.type & MSG_TYPE_MASK == MSG_TX: + if i.type & MSG_TYPE_MASK == MSG_TX or i.type & MSG_TYPE_MASK == MSG_WTX: self.tx_getdata_count += 1 @@ -44,12 +45,13 @@ class TestP2PConn(P2PInterface): GETDATA_TX_INTERVAL = 60 # seconds MAX_GETDATA_RANDOM_DELAY = 2 # seconds INBOUND_PEER_TX_DELAY = 2 # seconds +TXID_RELAY_DELAY = 2 # seconds MAX_GETDATA_IN_FLIGHT = 100 TX_EXPIRY_INTERVAL = GETDATA_TX_INTERVAL * 10 # Python test constants NUM_INBOUND = 10 -MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + MAX_GETDATA_RANDOM_DELAY + INBOUND_PEER_TX_DELAY +MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + MAX_GETDATA_RANDOM_DELAY + INBOUND_PEER_TX_DELAY + TXID_RELAY_DELAY class TxDownloadTest(BitcoinTestFramework): @@ -63,7 +65,7 @@ class TxDownloadTest(BitcoinTestFramework): txid = 0xdeadbeef self.log.info("Announce the txid from each incoming peer to node 0") - msg = msg_inv([CInv(t=MSG_TX, h=txid)]) + msg = msg_inv([CInv(t=MSG_WTX, h=txid)]) for p in self.nodes[0].p2ps: p.send_and_ping(msg) @@ -135,13 +137,13 @@ class TxDownloadTest(BitcoinTestFramework): with mininode_lock: p.tx_getdata_count = 0 - p.send_message(msg_inv([CInv(t=MSG_TX, h=i) for i in txids])) + p.send_message(msg_inv([CInv(t=MSG_WTX, h=i) for i in txids])) wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT, lock=mininode_lock) with mininode_lock: assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT) self.log.info("Now check that if we send a NOTFOUND for a transaction, we'll get one more request") - p.send_message(msg_notfound(vec=[CInv(t=MSG_TX, h=txids[0])])) + p.send_message(msg_notfound(vec=[CInv(t=MSG_WTX, h=txids[0])])) wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT + 1, timeout=10, lock=mininode_lock) with mininode_lock: assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT + 1) @@ -160,7 +162,7 @@ class TxDownloadTest(BitcoinTestFramework): # Setup the p2p connections self.peers = [] for node in self.nodes: - for i in range(NUM_INBOUND): + for _ in range(NUM_INBOUND): self.peers.append(node.add_p2p_connection(TestP2PConn())) self.log.info("Nodes are setup with {} incoming connections each".format(NUM_INBOUND)) diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py index c323168848..71b0b0f63a 100755 --- a/test/functional/p2p_unrequested_blocks.py +++ b/test/functional/p2p_unrequested_blocks.py @@ -4,7 +4,7 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test processing of unrequested blocks. -Setup: two nodes, node0+node1, not connected to each other. Node1 will have +Setup: two nodes, node0 + node1, not connected to each other. Node1 will have nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks. We have one P2PInterface connection to node0 called test_node, and one to node1 @@ -71,18 +71,10 @@ class AcceptBlockTest(BitcoinTestFramework): self.extra_args = [[], ["-minimumchainwork=0x10"]] def setup_network(self): - # Node0 will be used to test behavior of processing unrequested blocks - # from peers which are not whitelisted, while Node1 will be used for - # the whitelisted case. - # Node2 will be used for non-whitelisted peers to test the interaction - # with nMinimumChainWork. self.setup_nodes() def run_test(self): - # Setup the p2p connections - # test_node connects to node0 (not whitelisted) test_node = self.nodes[0].add_p2p_connection(P2PInterface()) - # min_work_node connects to node1 (whitelisted) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) # 1. Have nodes mine a block (leave IBD) @@ -226,7 +218,7 @@ class AcceptBlockTest(BitcoinTestFramework): self.nodes[0].getblock(all_blocks[286].hash) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) - self.log.info("Successfully reorged to longer chain from non-whitelisted peer") + self.log.info("Successfully reorged to longer chain") # 8. Create a chain which is invalid at a height longer than the # current chain, but which has more blocks on top of that diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index 6273c229ae..7c70f30ca3 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -241,6 +241,17 @@ class BlockchainTest(BitcoinTestFramework): del res['disk_size'], res3['disk_size'] assert_equal(res, res3) + self.log.info("Test hash_type option for gettxoutsetinfo()") + # Adding hash_type 'hash_serialized_2', which is the default, should + # not change the result. + res4 = node.gettxoutsetinfo(hash_type='hash_serialized_2') + del res4['disk_size'] + assert_equal(res, res4) + + # hash_type none should not return a UTXO set hash. + res5 = node.gettxoutsetinfo(hash_type='none') + assert 'hash_serialized_2' not in res5 + def _test_getblockheader(self): node = self.nodes[0] diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py index 57c8f511ac..2a0971b808 100755 --- a/test/functional/rpc_fundrawtransaction.py +++ b/test/functional/rpc_fundrawtransaction.py @@ -554,7 +554,7 @@ class RawTransactionsTest(BitcoinTestFramework): self.nodes[1].generate(1) self.sync_all() - for i in range(0,20): + for _ in range(20): self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) self.nodes[0].generate(1) self.sync_all() @@ -582,7 +582,7 @@ class RawTransactionsTest(BitcoinTestFramework): self.nodes[1].generate(1) self.sync_all() - for i in range(0,20): + for _ in range(20): self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) self.nodes[0].generate(1) self.sync_all() diff --git a/test/functional/rpc_generateblock.py b/test/functional/rpc_generateblock.py index aa58c0af9d..08ff0fba50 100755 --- a/test/functional/rpc_generateblock.py +++ b/test/functional/rpc_generateblock.py @@ -55,7 +55,7 @@ class GenerateBlockTest(BitcoinTestFramework): node.generatetoaddress(110, address) # Generate some extra mempool transactions to verify they don't get mined - for i in range(10): + for _ in range(10): node.sendtoaddress(address, 0.001) self.log.info('Generate block with txid') diff --git a/test/functional/rpc_getpeerinfo_banscore_deprecation.py b/test/functional/rpc_getpeerinfo_banscore_deprecation.py new file mode 100755 index 0000000000..b830248e1e --- /dev/null +++ b/test/functional/rpc_getpeerinfo_banscore_deprecation.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test deprecation of getpeerinfo RPC banscore field.""" + +from test_framework.test_framework import BitcoinTestFramework + + +class GetpeerinfoBanscoreDeprecationTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.extra_args = [[], ["-deprecatedrpc=banscore"]] + + def run_test(self): + self.log.info("Test getpeerinfo by default no longer returns a banscore field") + assert "banscore" not in self.nodes[0].getpeerinfo()[0].keys() + + self.log.info("Test getpeerinfo returns banscore with -deprecatedrpc=banscore") + assert "banscore" in self.nodes[1].getpeerinfo()[0].keys() + + +if __name__ == "__main__": + GetpeerinfoBanscoreDeprecationTest().main() diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 58d8c4abe1..192b60e5d2 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -22,8 +22,6 @@ from test_framework.util import ( from test_framework.mininode import P2PInterface import test_framework.messages from test_framework.messages import ( - CAddress, - msg_addr, NODE_NETWORK, NODE_WITNESS, ) @@ -46,10 +44,12 @@ class NetTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 - self.extra_args = [["-minrelaytxfee=0.00001000"],["-minrelaytxfee=0.00000500"]] + self.extra_args = [["-minrelaytxfee=0.00001000"], ["-minrelaytxfee=0.00000500"]] self.supports_cli = False def run_test(self): + self.log.info('Get out of IBD for the minfeefilter test') + self.nodes[0].generate(1) self.log.info('Connect nodes both way') connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[1], 0) @@ -100,12 +100,14 @@ class NetTest(BitcoinTestFramework): assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True) assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2) - self.nodes[0].setnetworkactive(state=False) + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']): + self.nodes[0].setnetworkactive(state=False) assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False) # Wait a bit for all sockets to close wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3) - self.nodes[0].setnetworkactive(state=True) + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']): + self.nodes[0].setnetworkactive(state=True) self.log.info('Connect nodes both way') connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[1], 0) @@ -127,6 +129,13 @@ class NetTest(BitcoinTestFramework): added_nodes = self.nodes[0].getaddednodeinfo(ip_port) assert_equal(len(added_nodes), 1) assert_equal(added_nodes[0]['addednode'], ip_port) + # check that node cannot be added again + assert_raises_rpc_error(-23, "Node already added", self.nodes[0].addnode, node=ip_port, command='add') + # check that node can be removed + self.nodes[0].addnode(node=ip_port, command='remove') + assert_equal(self.nodes[0].getaddednodeinfo(), []) + # check that trying to remove the node again returns an error + assert_raises_rpc_error(-24, "Node could not be removed", self.nodes[0].addnode, node=ip_port, command='remove') # check that a non-existent node returns an error assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1') @@ -150,30 +159,34 @@ class NetTest(BitcoinTestFramework): def _test_getnodeaddresses(self): self.nodes[0].add_p2p_connection(P2PInterface()) - # send some addresses to the node via the p2p message addr - msg = msg_addr() + # Add some addresses to the Address Manager over RPC. Due to the way + # bucket and bucket position are calculated, some of these addresses + # will collide. imported_addrs = [] - for i in range(256): - a = "123.123.123.{}".format(i) + for i in range(10000): + first_octet = i >> 8 + second_octet = i % 256 + a = "{}.{}.1.1".format(first_octet, second_octet) imported_addrs.append(a) - addr = CAddress() - addr.time = 100000000 - addr.nServices = NODE_NETWORK | NODE_WITNESS - addr.ip = a - addr.port = 8333 - msg.addrs.append(addr) - self.nodes[0].p2p.send_and_ping(msg) - - # obtain addresses via rpc call and check they were ones sent in before - REQUEST_COUNT = 10 - node_addresses = self.nodes[0].getnodeaddresses(REQUEST_COUNT) - assert_equal(len(node_addresses), REQUEST_COUNT) + self.nodes[0].addpeeraddress(a, 8333) + + # Obtain addresses via rpc call and check they were ones sent in before. + # + # Maximum possible addresses in addrman is 10000, although actual + # number will usually be less due to bucket and bucket position + # collisions. + node_addresses = self.nodes[0].getnodeaddresses(0) + assert_greater_than(len(node_addresses), 5000) + assert_greater_than(10000, len(node_addresses)) for a in node_addresses: - assert_greater_than(a["time"], 1527811200) # 1st June 2018 + assert_greater_than(a["time"], 1527811200) # 1st June 2018 assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS) assert a["address"] in imported_addrs assert_equal(a["port"], 8333) + node_addresses = self.nodes[0].getnodeaddresses(1) + assert_equal(len(node_addresses), 1) + assert_raises_rpc_error(-8, "Address count out of range", self.nodes[0].getnodeaddresses, -1) # addrman's size cannot be known reliably after insertion, as hash collisions may occur diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py index 660953be9b..f7f23bc8f4 100755 --- a/test/functional/rpc_psbt.py +++ b/test/functional/rpc_psbt.py @@ -38,6 +38,7 @@ class PSBTTest(BitcoinTestFramework): def skip_test_if_missing_module(self): self.skip_if_no_wallet() + # TODO: Re-enable this test with segwit v1 def test_utxo_conversion(self): mining_node = self.nodes[2] offline_node = self.nodes[0] @@ -154,8 +155,14 @@ class PSBTTest(BitcoinTestFramework): p2pkh_pos = out['n'] # spend single key from node 1 - rawtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt'] - walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(rawtx) + created_psbt = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99}) + walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(created_psbt['psbt']) + # Make sure it has both types of UTXOs + decoded = self.nodes[1].decodepsbt(walletprocesspsbt_out['psbt']) + assert 'non_witness_utxo' in decoded['inputs'][0] + assert 'witness_utxo' in decoded['inputs'][0] + # Check decodepsbt fee calculation (input values shall only be counted once per UTXO) + assert_equal(decoded['fee'], created_psbt['fee']) assert_equal(walletprocesspsbt_out['complete'], True) self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex']) @@ -352,7 +359,8 @@ class PSBTTest(BitcoinTestFramework): for i, signer in enumerate(signers): self.nodes[2].unloadwallet("wallet{}".format(i)) - self.test_utxo_conversion() + # TODO: Re-enable this for segwit v1 + # self.test_utxo_conversion() # Test that psbts with p2pkh outputs are created properly p2pkh = self.nodes[0].getnewaddress(address_type='legacy') @@ -422,7 +430,7 @@ class PSBTTest(BitcoinTestFramework): # Check that joining shuffles the inputs and outputs # 10 attempts should be enough to get a shuffled join shuffled = False - for i in range(0, 10): + for _ in range(10): shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2]) shuffled |= joined != shuffled_joined if shuffled: diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py index 14cad3d1b8..23b5e647d6 100755 --- a/test/functional/rpc_rawtransaction.py +++ b/test/functional/rpc_rawtransaction.py @@ -424,11 +424,12 @@ class RawTransactionsTest(BitcoinTestFramework): #################################### # Test the minimum transaction version number that fits in a signed 32-bit integer. + # As transaction version is unsigned, this should convert to its unsigned equivalent. tx = CTransaction() tx.nVersion = -0x80000000 rawtx = ToHex(tx) decrawtx = self.nodes[0].decoderawtransaction(rawtx) - assert_equal(decrawtx['version'], -0x80000000) + assert_equal(decrawtx['version'], 0x80000000) # Test the maximum transaction version number that fits in a signed 32-bit integer. tx = CTransaction() diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py index 05308931e3..81eb881234 100644 --- a/test/functional/test_framework/authproxy.py +++ b/test/functional/test_framework/authproxy.py @@ -115,6 +115,8 @@ class AuthServiceProxy(): except OSError as e: retry = ( '[WinError 10053] An established connection was aborted by the software in your host machine' in str(e)) + # Workaround for a bug on macOS. See https://bugs.python.org/issue33450 + retry = retry or ('[Errno 41] Protocol wrong type for socket' in str(e)) if retry: self.__conn.close() self.__conn.request(method, path, postdata, headers) diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index eb1244035f..da956a94de 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -31,7 +31,7 @@ from test_framework.siphash import siphash256 from test_framework.util import hex_str_to_bytes, assert_equal MIN_VERSION_SUPPORTED = 60001 -MY_VERSION = 70014 # past bip-31 for ping/pong +MY_VERSION = 70016 # past wtxid relay MY_SUBVERSION = b"/python-mininode-tester:0.0.3/" MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37) @@ -59,6 +59,7 @@ MSG_TX = 1 MSG_BLOCK = 2 MSG_FILTERED_BLOCK = 3 MSG_CMPCT_BLOCK = 4 +MSG_WTX = 5 MSG_WITNESS_FLAG = 1 << 30 MSG_TYPE_MASK = 0xffffffff >> 2 @@ -110,7 +111,7 @@ def deser_uint256(f): def ser_uint256(u): rs = b"" - for i in range(8): + for _ in range(8): rs += struct.pack("<I", u & 0xFFFFFFFF) u >>= 32 return rs @@ -133,7 +134,7 @@ def uint256_from_compact(c): def deser_vector(f, c): nit = deser_compact_size(f) r = [] - for i in range(nit): + for _ in range(nit): t = c() t.deserialize(f) r.append(t) @@ -156,7 +157,7 @@ def ser_vector(l, ser_function_name=None): def deser_uint256_vector(f): nit = deser_compact_size(f) r = [] - for i in range(nit): + for _ in range(nit): t = deser_uint256(f) r.append(t) return r @@ -172,7 +173,7 @@ def ser_uint256_vector(l): def deser_string_vector(f): nit = deser_compact_size(f) r = [] - for i in range(nit): + for _ in range(nit): t = deser_string(f) r.append(t) return r @@ -207,17 +208,19 @@ class CAddress: self.ip = "0.0.0.0" self.port = 0 - def deserialize(self, f, with_time=True): + def deserialize(self, f, *, with_time=True): if with_time: + # VERSION messages serialize CAddress objects without time self.time = struct.unpack("<i", f.read(4))[0] self.nServices = struct.unpack("<Q", f.read(8))[0] self.pchReserved = f.read(12) self.ip = socket.inet_ntoa(f.read(4)) self.port = struct.unpack(">H", f.read(2))[0] - def serialize(self, with_time=True): + def serialize(self, *, with_time=True): r = b"" if with_time: + # VERSION messages serialize CAddress objects without time r += struct.pack("<i", self.time) r += struct.pack("<Q", self.nServices) r += self.pchReserved @@ -240,7 +243,8 @@ class CInv: MSG_TX | MSG_WITNESS_FLAG: "WitnessTx", MSG_BLOCK | MSG_WITNESS_FLAG: "WitnessBlock", MSG_FILTERED_BLOCK: "filtered Block", - 4: "CompactBlock" + 4: "CompactBlock", + 5: "WTX", } def __init__(self, t=0, h=0): @@ -261,6 +265,9 @@ class CInv: return "CInv(type=%s hash=%064x)" \ % (self.typemap[self.type], self.hash) + def __eq__(self, other): + return isinstance(other, CInv) and self.hash == other.hash and self.type == other.type + class CBlockLocator: __slots__ = ("nVersion", "vHave") @@ -460,7 +467,7 @@ class CTransaction: else: self.vout = deser_vector(f, CTxOut) if flags != 0: - self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))] + self.wit.vtxinwit = [CTxInWitness() for _ in range(len(self.vin))] self.wit.deserialize(f) else: self.wit = CTxWitness() @@ -493,7 +500,7 @@ class CTransaction: if (len(self.wit.vtxinwit) != len(self.vin)): # vtxinwit must have the same length as vin self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)] - for i in range(len(self.wit.vtxinwit), len(self.vin)): + for _ in range(len(self.wit.vtxinwit), len(self.vin)): self.wit.vtxinwit.append(CTxInWitness()) r += self.wit.serialize() r += struct.pack("<I", self.nLockTime) @@ -728,7 +735,7 @@ class P2PHeaderAndShortIDs: self.header.deserialize(f) self.nonce = struct.unpack("<Q", f.read(8))[0] self.shortids_length = deser_compact_size(f) - for i in range(self.shortids_length): + for _ in range(self.shortids_length): # shortids are defined to be 6 bytes in the spec, so append # two zero bytes and read it in as an 8-byte number self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0]) @@ -845,7 +852,7 @@ class BlockTransactionsRequest: def deserialize(self, f): self.blockhash = deser_uint256(f) indexes_length = deser_compact_size(f) - for i in range(indexes_length): + for _ in range(indexes_length): self.indexes.append(deser_compact_size(f)) def serialize(self): @@ -973,10 +980,10 @@ class msg_version: self.nServices = struct.unpack("<Q", f.read(8))[0] self.nTime = struct.unpack("<q", f.read(8))[0] self.addrTo = CAddress() - self.addrTo.deserialize(f, False) + self.addrTo.deserialize(f, with_time=False) self.addrFrom = CAddress() - self.addrFrom.deserialize(f, False) + self.addrFrom.deserialize(f, with_time=False) self.nNonce = struct.unpack("<Q", f.read(8))[0] self.strSubVer = deser_string(f) @@ -996,8 +1003,8 @@ class msg_version: r += struct.pack("<i", self.nVersion) r += struct.pack("<Q", self.nServices) r += struct.pack("<q", self.nTime) - r += self.addrTo.serialize(False) - r += self.addrFrom.serialize(False) + r += self.addrTo.serialize(with_time=False) + r += self.addrFrom.serialize(with_time=False) r += struct.pack("<Q", self.nNonce) r += ser_string(self.strSubVer) r += struct.pack("<i", self.nStartingHeight) @@ -1122,6 +1129,22 @@ class msg_tx: def __repr__(self): return "msg_tx(tx=%s)" % (repr(self.tx)) +class msg_wtxidrelay: + __slots__ = () + msgtype = b"wtxidrelay" + + def __init__(self): + pass + + def deserialize(self, f): + pass + + def serialize(self): + return b"" + + def __repr__(self): + return "msg_wtxidrelay()" + class msg_no_witness_tx(msg_tx): __slots__ = () diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index e6da33763d..eaf637fbb8 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -59,6 +59,8 @@ from test_framework.messages import ( MSG_TYPE_MASK, msg_verack, msg_version, + MSG_WTX, + msg_wtxidrelay, NODE_NETWORK, NODE_WITNESS, sha256, @@ -96,6 +98,7 @@ MESSAGEMAP = { b"tx": msg_tx, b"verack": msg_verack, b"version": msg_version, + b"wtxidrelay": msg_wtxidrelay, } MAGIC_BYTES = { @@ -280,9 +283,13 @@ class P2PInterface(P2PConnection): def __init__(self): super().__init__() - # Track number of messages of each type received and the most recent - # message of each type + # Track number of messages of each type received. + # Should be read-only in a test. self.message_count = defaultdict(int) + + # Track the most recent message of each type. + # To wait for a message to be received, pop that message from + # this and use wait_until. self.last_message = {} # A count of the number of ping messages we've sent to the node @@ -356,6 +363,7 @@ class P2PInterface(P2PConnection): def on_sendcmpct(self, message): pass def on_sendheaders(self, message): pass def on_tx(self, message): pass + def on_wtxidrelay(self, message): pass def on_inv(self, message): want = msg_getdata() @@ -373,23 +381,29 @@ class P2PInterface(P2PConnection): def on_version(self, message): assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED) + if message.nVersion >= 70016: + self.send_message(msg_wtxidrelay()) self.send_message(msg_verack()) self.nServices = message.nServices # Connection helper methods - def wait_until(self, test_function, timeout=60): + def wait_until(self, test_function_in, *, timeout=60, check_connected=True): + def test_function(): + if check_connected: + assert self.is_connected + return test_function_in() + wait_until(test_function, timeout=timeout, lock=mininode_lock, timeout_factor=self.timeout_factor) def wait_for_disconnect(self, timeout=60): test_function = lambda: not self.is_connected - self.wait_until(test_function, timeout=timeout) + self.wait_until(test_function, timeout=timeout, check_connected=False) # Message receiving helper methods def wait_for_tx(self, txid, timeout=60): def test_function(): - assert self.is_connected if not self.last_message.get('tx'): return False return self.last_message['tx'].tx.rehash() == txid @@ -398,14 +412,12 @@ class P2PInterface(P2PConnection): def wait_for_block(self, blockhash, timeout=60): def test_function(): - assert self.is_connected return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash self.wait_until(test_function, timeout=timeout) def wait_for_header(self, blockhash, timeout=60): def test_function(): - assert self.is_connected last_headers = self.last_message.get('headers') if not last_headers: return False @@ -415,7 +427,6 @@ class P2PInterface(P2PConnection): def wait_for_merkleblock(self, blockhash, timeout=60): def test_function(): - assert self.is_connected last_filtered_block = self.last_message.get('merkleblock') if not last_filtered_block: return False @@ -427,9 +438,7 @@ class P2PInterface(P2PConnection): """Waits for a getdata message. The object hashes in the inventory vector must match the provided hash_list.""" - def test_function(): - assert self.is_connected last_data = self.last_message.get("getdata") if not last_data: return False @@ -444,9 +453,7 @@ class P2PInterface(P2PConnection): value must be explicitly cleared before calling this method, or this will return immediately with success. TODO: change this method to take a hash value and only return true if the correct block header has been requested.""" - def test_function(): - assert self.is_connected return self.last_message.get("getheaders") self.wait_until(test_function, timeout=timeout) @@ -457,7 +464,6 @@ class P2PInterface(P2PConnection): raise NotImplementedError("wait_for_inv() will only verify the first inv object") def test_function(): - assert self.is_connected return self.last_message.get("inv") and \ self.last_message["inv"].inv[0].type == expected_inv[0].type and \ self.last_message["inv"].inv[0].hash == expected_inv[0].hash @@ -466,7 +472,7 @@ class P2PInterface(P2PConnection): def wait_for_verack(self, timeout=60): def test_function(): - return self.message_count["verack"] + return "verack" in self.last_message self.wait_until(test_function, timeout=timeout) @@ -481,7 +487,6 @@ class P2PInterface(P2PConnection): self.send_message(msg_ping(nonce=self.ping_counter)) def test_function(): - assert self.is_connected return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter self.wait_until(test_function, timeout=timeout) @@ -599,7 +604,11 @@ class P2PDataStore(P2PInterface): self.send_message(msg_block(block=b)) else: self.send_message(msg_headers([CBlockHeader(block) for block in blocks])) - self.wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout) + self.wait_until( + lambda: blocks[-1].sha256 in self.getdata_requests, + timeout=timeout, + check_connected=success, + ) if expect_disconnect: self.wait_for_disconnect(timeout=timeout) @@ -654,7 +663,7 @@ class P2PTxInvStore(P2PInterface): super().on_inv(message) # Send getdata in response. # Store how many times invs have been received for each tx. for i in message.inv: - if i.type == MSG_TX: + if (i.type == MSG_TX) or (i.type == MSG_WTX): # save txid self.tx_invs_received[i.hash] += 1 @@ -667,6 +676,6 @@ class P2PTxInvStore(P2PInterface): The mempool should mark unbroadcast=False for these transactions. """ # Wait until invs have been received (and getdatas sent) for each txid. - self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout) + self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout=timeout) # Flush messages and wait for the getdatas to be processed self.sync_with_ping() diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index cc5f8307d3..5e35ba0fce 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -646,7 +646,7 @@ def LegacySignatureHash(script, txTo, inIdx, hashtype): tmp = txtmp.vout[outIdx] txtmp.vout = [] - for i in range(outIdx): + for _ in range(outIdx): txtmp.vout.append(CTxOut(-1)) txtmp.vout.append(tmp) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 9d9e065158..8d402d4888 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -31,6 +31,7 @@ from .util import ( disconnect_nodes, get_datadir_path, initialize_datadir, + wait_until, ) @@ -602,6 +603,9 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): self.sync_blocks(nodes) self.sync_mempools(nodes) + def wait_until(self, test_function, timeout=60, lock=None): + return wait_until(test_function, timeout=timeout, lock=lock, timeout_factor=self.options.timeout_factor) + # Private helper methods. These should not be accessed by the subclass test scripts. def _start_logging(self): diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 66bb2c89b5..8f0d45c7f9 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -524,6 +524,7 @@ class TestNode(): p2p_conn.peer_connect(**kwargs, net=self.chain, timeout_factor=self.timeout_factor)() self.p2ps.append(p2p_conn) + p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False) if wait_for_verack: # Wait for the node to send us the version and verack p2p_conn.wait_for_verack() @@ -637,7 +638,7 @@ class TestNodeCLI(): raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) - except json.JSONDecodeError: + except (json.JSONDecodeError, decimal.InvalidOperation): return cli_stdout.rstrip("\n") class RPCOverloadWrapper(): diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 506057f1fa..3362b41209 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -529,7 +529,7 @@ def create_confirmed_utxos(fee, node, count): addr2 = node.getnewaddress() if iterations <= 0: return utxos - for i in range(iterations): + for _ in range(iterations): t = utxos.pop() inputs = [] inputs.append({"txid": t["txid"], "vout": t["vout"]}) @@ -556,7 +556,7 @@ def gen_return_txouts(): # So we have big transactions (and therefore can't fit very many into each block) # create one script_pubkey script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes - for i in range(512): + for _ in range(512): script_pubkey = script_pubkey + "01" # concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change txouts = [] @@ -564,7 +564,7 @@ def gen_return_txouts(): txout = CTxOut() txout.nValue = 0 txout.scriptPubKey = hex_str_to_bytes(script_pubkey) - for k in range(128): + for _ in range(128): txouts.append(txout) return txouts diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 41f9bde183..b090e93394 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -159,6 +159,7 @@ BASE_SCRIPTS = [ 'rpc_deprecated.py', 'wallet_disable.py', 'p2p_addr_relay.py', + 'p2p_getaddr_caching.py', 'p2p_getdata.py', 'rpc_net.py', 'wallet_keypool.py', @@ -236,16 +237,20 @@ BASE_SCRIPTS = [ 'mempool_compatibility.py', 'rpc_deriveaddresses.py', 'rpc_deriveaddresses.py --usecli', + 'p2p_ping.py', 'rpc_scantxoutset.py', 'feature_logging.py', 'p2p_node_network_limited.py', 'p2p_permissions.py', 'feature_blocksdir.py', 'feature_config_args.py', + 'feature_settings.py', 'rpc_getdescriptorinfo.py', + 'rpc_getpeerinfo_banscore_deprecation.py', 'rpc_help.py', 'feature_help.py', 'feature_shutdown.py', + 'p2p_ibd_txrelay.py', # Don't append tests at the end to avoid merge conflicts # Put them in a random line within the section that fits their approximate run-time ] @@ -396,11 +401,12 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage= args = args or [] # Warn if bitcoind is already running - # pidof might fail or return an empty string if bitcoind is not running try: - if subprocess.check_output(["pidof", "bitcoind"]) not in [b'']: + # pgrep exits with code zero when one or more matching processes found + if subprocess.run(["pgrep", "-x", "bitcoind"], stdout=subprocess.DEVNULL).returncode == 0: print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0])) - except (OSError, subprocess.SubprocessError): + except OSError: + # pgrep not supported pass # Warn if there is a cache directory diff --git a/test/functional/tool_wallet.py b/test/functional/tool_wallet.py index 524e1593ba..18f0beb598 100755 --- a/test/functional/tool_wallet.py +++ b/test/functional/tool_wallet.py @@ -71,8 +71,7 @@ class ToolWalletTest(BitcoinTestFramework): self.assert_raises_tool_error('Error: two methods provided (info and create). Only one method should be provided.', 'info', 'create') self.assert_raises_tool_error('Error parsing command line arguments: Invalid parameter -foo', '-foo') self.assert_raises_tool_error( - 'Error initializing wallet database environment "{}"!\nError loading wallet.dat. Is wallet being used by other process?' - .format(os.path.join(self.nodes[0].datadir, self.chain, 'wallets')), + 'Error loading wallet.dat. Is wallet being used by another process?', '-wallet=wallet.dat', 'info', ) diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py index 9dd91b2495..4766355335 100755 --- a/test/functional/wallet_backup.py +++ b/test/functional/wallet_backup.py @@ -129,7 +129,7 @@ class WalletBackupTest(BitcoinTestFramework): self.log.info("Creating transactions") # Five rounds of sending each other transactions. - for i in range(5): + for _ in range(5): self.do_one_round() self.log.info("Backing up") @@ -142,7 +142,7 @@ class WalletBackupTest(BitcoinTestFramework): self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump')) self.log.info("More transactions") - for i in range(5): + for _ in range(5): self.do_one_round() # Generate 101 more blocks, so any fees paid mature diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py index 8962362276..d9a8b58a84 100755 --- a/test/functional/wallet_basic.py +++ b/test/functional/wallet_basic.py @@ -119,7 +119,7 @@ class WalletTest(BitcoinTestFramework): assert_raises_rpc_error(-8, "Invalid parameter, expected locked output", self.nodes[2].lockunspent, True, [unspent_0]) self.nodes[2].lockunspent(False, [unspent_0]) assert_raises_rpc_error(-8, "Invalid parameter, output already locked", self.nodes[2].lockunspent, False, [unspent_0]) - assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20) + assert_raises_rpc_error(-6, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20) assert_equal([unspent_0], self.nodes[2].listlockunspent()) self.nodes[2].lockunspent(True, [unspent_0]) assert_equal(len(self.nodes[2].listlockunspent()), 0) @@ -363,6 +363,9 @@ class WalletTest(BitcoinTestFramework): assert_equal(tx_obj['amount'], Decimal('-0.0001')) # General checks for errors from incorrect inputs + # This will raise an exception because the amount is negative + assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "-1") + # This will raise an exception because the amount type is wrong assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "1f-4") @@ -566,7 +569,7 @@ class WalletTest(BitcoinTestFramework): # So we should be able to generate exactly chainlimit txs for each original output sending_addr = self.nodes[1].getnewaddress() txid_list = [] - for i in range(chainlimit * 2): + for _ in range(chainlimit * 2): txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))) assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit * 2) assert_equal(len(txid_list), chainlimit * 2) @@ -590,7 +593,7 @@ class WalletTest(BitcoinTestFramework): node0_balance = self.nodes[0].getbalance() # With walletrejectlongchains we will not create the tx and store it in our wallet. - assert_raises_rpc_error(-4, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01')) + assert_raises_rpc_error(-6, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01')) # Verify nothing new in wallet assert_equal(total_txs, len(self.nodes[0].listtransactions("*", 99999))) diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py index 91f1bf561f..53496084ef 100755 --- a/test/functional/wallet_bumpfee.py +++ b/test/functional/wallet_bumpfee.py @@ -62,7 +62,7 @@ class BumpFeeTest(BitcoinTestFramework): self.log.info("Mining blocks...") peer_node.generate(110) self.sync_all() - for i in range(25): + for _ in range(25): peer_node.sendtoaddress(rbf_node_address, 0.001) self.sync_all() peer_node.generate(1) diff --git a/test/functional/wallet_create_tx.py b/test/functional/wallet_create_tx.py index 330de8b0fc..ed9159726a 100755 --- a/test/functional/wallet_create_tx.py +++ b/test/functional/wallet_create_tx.py @@ -45,7 +45,7 @@ class CreateTxWalletTest(BitcoinTestFramework): def test_tx_size_too_large(self): # More than 10kB of outputs, so that we hit -maxtxfee with a high feerate - outputs = {self.nodes[0].getnewaddress(address_type='bech32'): 0.000025 for i in range(400)} + outputs = {self.nodes[0].getnewaddress(address_type='bech32'): 0.000025 for _ in range(400)} raw_tx = self.nodes[0].createrawtransaction(inputs=[], outputs=outputs) for fee_setting in ['-minrelaytxfee=0.01', '-mintxfee=0.01', '-paytxfee=0.01']: diff --git a/test/functional/wallet_descriptor.py b/test/functional/wallet_descriptor.py index 289ccf43ec..9c63e8f7d3 100755 --- a/test/functional/wallet_descriptor.py +++ b/test/functional/wallet_descriptor.py @@ -107,7 +107,7 @@ class WalletDescriptorTest(BitcoinTestFramework): assert_equal(info2['desc'], info3['desc']) self.log.info("Test that getnewaddress still works after keypool is exhausted in an encrypted wallet") - for i in range(0, 500): + for _ in range(500): send_wrpc.getnewaddress() self.log.info("Test that unlock is needed when deriving only hardened keys in an encrypted wallet") @@ -120,7 +120,7 @@ class WalletDescriptorTest(BitcoinTestFramework): }]) send_wrpc.walletlock() # Exhaust keypool of 100 - for i in range(0, 100): + for _ in range(100): send_wrpc.getnewaddress(address_type='bech32') # This should now error assert_raises_rpc_error(-12, "Keypool ran out, please call keypoolrefill first", send_wrpc.getnewaddress, '', 'bech32') diff --git a/test/functional/wallet_dump.py b/test/functional/wallet_dump.py index ba1e494d9a..06f01ef191 100755 --- a/test/functional/wallet_dump.py +++ b/test/functional/wallet_dump.py @@ -116,7 +116,7 @@ class WalletDumpTest(BitcoinTestFramework): test_addr_count = 10 addrs = [] for address_type in ['legacy', 'p2sh-segwit', 'bech32']: - for i in range(0, test_addr_count): + for _ in range(test_addr_count): addr = self.nodes[0].getnewaddress(address_type=address_type) vaddr = self.nodes[0].getaddressinfo(addr) # required to get hd keypath addrs.append(vaddr) @@ -202,5 +202,10 @@ class WalletDumpTest(BitcoinTestFramework): result = self.nodes[0].getaddressinfo(multisig_addr) assert result['ismine'] + self.log.info('Check that wallet is flushed') + with self.nodes[0].assert_debug_log(['Flushing wallet.dat'], timeout=20): + self.nodes[0].getnewaddress() + + if __name__ == '__main__': WalletDumpTest().main() diff --git a/test/functional/wallet_encryption.py b/test/functional/wallet_encryption.py index 6cd82ad250..4509c1e0b2 100755 --- a/test/functional/wallet_encryption.py +++ b/test/functional/wallet_encryption.py @@ -13,6 +13,7 @@ from test_framework.util import ( assert_greater_than_or_equal, ) + class WalletEncryptionTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True @@ -72,20 +73,25 @@ class WalletEncryptionTest(BitcoinTestFramework): # Test timeout bounds assert_raises_rpc_error(-8, "Timeout cannot be negative.", self.nodes[0].walletpassphrase, passphrase2, -10) - # Check the timeout - # Check a time less than the limit + + self.log.info('Check a timeout less than the limit') MAX_VALUE = 100000000 expected_time = int(time.time()) + MAX_VALUE - 600 self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE - 600) + # give buffer for walletpassphrase, since it iterates over all crypted keys + expected_time_with_buffer = time.time() + MAX_VALUE - 600 actual_time = self.nodes[0].getwalletinfo()['unlocked_until'] assert_greater_than_or_equal(actual_time, expected_time) - assert_greater_than(expected_time + 5, actual_time) # 5 second buffer - # Check a time greater than the limit + assert_greater_than(expected_time_with_buffer, actual_time) + + self.log.info('Check a timeout greater than the limit') expected_time = int(time.time()) + MAX_VALUE - 1 self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE + 1000) + expected_time_with_buffer = time.time() + MAX_VALUE actual_time = self.nodes[0].getwalletinfo()['unlocked_until'] assert_greater_than_or_equal(actual_time, expected_time) - assert_greater_than(expected_time + 5, actual_time) # 5 second buffer + assert_greater_than(expected_time_with_buffer, actual_time) + if __name__ == '__main__': WalletEncryptionTest().main() diff --git a/test/functional/wallet_fallbackfee.py b/test/functional/wallet_fallbackfee.py index 0c67982bbe..dbf853b35c 100755 --- a/test/functional/wallet_fallbackfee.py +++ b/test/functional/wallet_fallbackfee.py @@ -22,7 +22,7 @@ class WalletRBFTest(BitcoinTestFramework): # test sending a tx with disabled fallback fee (must fail) self.restart_node(0, extra_args=["-fallbackfee=0"]) - assert_raises_rpc_error(-4, "Fee estimation failed", lambda: self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)) + assert_raises_rpc_error(-6, "Fee estimation failed", lambda: self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)) assert_raises_rpc_error(-4, "Fee estimation failed", lambda: self.nodes[0].fundrawtransaction(self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1}))) assert_raises_rpc_error(-6, "Fee estimation failed", lambda: self.nodes[0].sendmany("", {self.nodes[0].getnewaddress(): 1})) diff --git a/test/functional/wallet_groups.py b/test/functional/wallet_groups.py index 9dd55b4ab1..b6fe295127 100755 --- a/test/functional/wallet_groups.py +++ b/test/functional/wallet_groups.py @@ -27,8 +27,8 @@ class WalletGroupTest(BitcoinTestFramework): self.nodes[0].generate(110) # Get some addresses from the two nodes - addr1 = [self.nodes[1].getnewaddress() for i in range(3)] - addr2 = [self.nodes[2].getnewaddress() for i in range(3)] + addr1 = [self.nodes[1].getnewaddress() for _ in range(3)] + addr2 = [self.nodes[2].getnewaddress() for _ in range(3)] addrs = addr1 + addr2 # Send 1 + 0.5 coin to each address @@ -71,7 +71,7 @@ class WalletGroupTest(BitcoinTestFramework): # Fill node2's wallet with 10000 outputs corresponding to the same # scriptPubKey - for i in range(5): + for _ in range(5): raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}]) tx = FromHex(CTransaction(), raw_tx) tx.vin = [] diff --git a/test/functional/wallet_importdescriptors.py b/test/functional/wallet_importdescriptors.py index fc5d653a91..2d982edef8 100755 --- a/test/functional/wallet_importdescriptors.py +++ b/test/functional/wallet_importdescriptors.py @@ -146,6 +146,14 @@ class ImportDescriptorsTest(BitcoinTestFramework): ismine=True, solvable=True) + # Check persistence of data and that loading works correctly + w1.unloadwallet() + self.nodes[1].loadwallet('w1') + test_address(w1, + key.p2sh_p2wpkh_addr, + ismine=True, + solvable=True) + # # Test importing of a multisig descriptor key1 = get_generate_key() key2 = get_generate_key() @@ -370,6 +378,10 @@ class ImportDescriptorsTest(BitcoinTestFramework): self.sync_all() assert_equal(wmulti_pub.getbalance(), wmulti_priv.getbalance()) + # Make sure that descriptor wallets containing multiple xpubs in a single descriptor load correctly + wmulti_pub.unloadwallet() + self.nodes[1].loadwallet('wmulti_pub') + self.log.info("Multisig with distributed keys") self.nodes[1].createwallet(wallet_name="wmulti_priv1", descriptors=True) wmulti_priv1 = self.nodes[1].get_wallet_rpc("wmulti_priv1") diff --git a/test/functional/wallet_labels.py b/test/functional/wallet_labels.py index fb4a1f9792..cff59bd1c1 100755 --- a/test/functional/wallet_labels.py +++ b/test/functional/wallet_labels.py @@ -118,7 +118,7 @@ class WalletLabelsTest(BitcoinTestFramework): if not self.options.descriptors: for label in labels: addresses = [] - for x in range(10): + for _ in range(10): addresses.append(node.getnewaddress()) multisig_address = node.addmultisigaddress(5, addresses, label.name)['address'] label.add_address(multisig_address) diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py index ff9ff34185..1872545cdb 100755 --- a/test/functional/wallet_multiwallet.py +++ b/test/functional/wallet_multiwallet.py @@ -7,19 +7,36 @@ Verify that a bitcoind node can load multiple wallet files """ from decimal import Decimal +from threading import Thread import os import shutil import time +from test_framework.authproxy import JSONRPCException from test_framework.test_framework import BitcoinTestFramework from test_framework.test_node import ErrorMatch from test_framework.util import ( assert_equal, assert_raises_rpc_error, + get_rpc_proxy, ) FEATURE_LATEST = 169900 +got_loading_error = False +def test_load_unload(node, name): + global got_loading_error + for _ in range(10): + if got_loading_error: + return + try: + node.loadwallet(name) + node.unloadwallet(name) + except JSONRPCException as e: + if e.error['code'] == -4 and 'Wallet already being loading' in e.error['message']: + got_loading_error = True + return + class MultiWalletTest(BitcoinTestFramework): def set_test_params(self): @@ -103,7 +120,7 @@ class MultiWalletTest(BitcoinTestFramework): # should not initialize if one wallet is a copy of another shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy')) - exp_stderr = r"BerkeleyBatch: Can't open database w8_copy \(duplicates fileid \w+ from w8\)" + exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)" self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX) # should not initialize if wallet file is a symlink @@ -212,6 +229,18 @@ class MultiWalletTest(BitcoinTestFramework): w2 = node.get_wallet_rpc(wallet_names[1]) w2.getwalletinfo() + self.log.info("Concurrent wallet loading") + threads = [] + for _ in range(3): + n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir) + t = Thread(target=test_load_unload, args=(n, wallet_names[2], )) + t.start() + threads.append(t) + for t in threads: + t.join() + global got_loading_error + assert_equal(got_loading_error, True) + self.log.info("Load remaining wallets") for wallet_name in wallet_names[2:]: loadwallet_name = self.nodes[0].loadwallet(wallet_name) @@ -229,10 +258,10 @@ class MultiWalletTest(BitcoinTestFramework): assert_raises_rpc_error(-4, "Wallet file verification failed. Error loading wallet wallet.dat. Duplicate -wallet filename specified.", self.nodes[0].loadwallet, 'wallet.dat') # Fail to load if one wallet is a copy of another - assert_raises_rpc_error(-4, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy') + assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy') # Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304 - assert_raises_rpc_error(-4, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy') + assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy') # Fail to load if wallet file is a symlink diff --git a/test/functional/wallet_upgradewallet.py b/test/functional/wallet_upgradewallet.py index cc2139a027..1a76f65215 100755 --- a/test/functional/wallet_upgradewallet.py +++ b/test/functional/wallet_upgradewallet.py @@ -6,7 +6,7 @@ Test upgradewallet RPC. Download node binaries: -contrib/devtools/previous_release.sh -b v0.19.1 v0.18.1 v0.17.1 v0.16.3 v0.15.2 +contrib/devtools/previous_release.py -b v0.19.1 v0.18.1 v0.17.1 v0.16.3 v0.15.2 Only v0.15.2 and v0.16.3 are required by this test. The others are used in feature_backwards_compatibility.py """ diff --git a/test/lint/README.md b/test/lint/README.md index 6b95cc3540..d15c061288 100644 --- a/test/lint/README.md +++ b/test/lint/README.md @@ -23,6 +23,12 @@ maintained: * for `src/crypto/ctaes`: https://github.com/bitcoin-core/ctaes.git (branch master) * for `src/crc32c`: https://github.com/google/crc32c.git (branch master) +To do so, add the upstream repository as remote: + +``` +git remote add --fetch secp256k1 https://github.com/bitcoin-core/secp256k1.git +``` + Usage: `git-subtree-check.sh DIR (COMMIT)` `COMMIT` may be omitted, in which case `HEAD` is used. diff --git a/test/lint/git-subtree-check.sh b/test/lint/git-subtree-check.sh index caa7affc63..5a0500df25 100755 --- a/test/lint/git-subtree-check.sh +++ b/test/lint/git-subtree-check.sh @@ -81,7 +81,7 @@ fi # get the tree in the subtree commit referred to if [ "d$(git cat-file -t $rev 2>/dev/null)" != dcommit ]; then - echo "subtree commit $rev unavailable: cannot compare" >&2 + echo "subtree commit $rev unavailable: cannot compare. Did you add and fetch the remote?" >&2 exit fi tree_subtree=$(git show -s --format="%T" $rev) diff --git a/test/lint/lint-assertions.sh b/test/lint/lint-assertions.sh index 1aacc09bcc..d30a8ca231 100755 --- a/test/lint/lint-assertions.sh +++ b/test/lint/lint-assertions.sh @@ -23,7 +23,7 @@ fi # Macro CHECK_NONFATAL(condition) should be used instead of assert for RPC code, where it # is undesirable to crash the whole program. See: src/util/check.h # src/rpc/server.cpp is excluded from this check since it's mostly meta-code. -OUTPUT=$(git grep -nE 'assert *\(.*\);' -- "src/rpc/" "src/wallet/rpc*" ":(exclude)src/rpc/server.cpp") +OUTPUT=$(git grep -nE '\<(A|a)ssert *\(.*\);' -- "src/rpc/" "src/wallet/rpc*" ":(exclude)src/rpc/server.cpp") if [[ ${OUTPUT} != "" ]]; then echo "CHECK_NONFATAL(condition) should be used instead of assert for RPC code." echo diff --git a/test/lint/lint-git-commit-check.sh b/test/lint/lint-git-commit-check.sh new file mode 100755 index 0000000000..8947f67bf6 --- /dev/null +++ b/test/lint/lint-git-commit-check.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# +# Linter to check that commit messages have a new line before the body +# or no body at all + +export LC_ALL=C + +EXIT_CODE=0 + +while getopts "?" opt; do + case $opt in + ?) + echo "Usage: $0 [N]" + echo " COMMIT_RANGE='<commit range>' $0" + echo " $0 -?" + echo "Checks unmerged commits, the previous N commits, or a commit range." + echo "COMMIT_RANGE='47ba2c3...ee50c9e' $0" + exit ${EXIT_CODE} + ;; + esac +done + +if [ -z "${COMMIT_RANGE}" ]; then + if [ -n "$1" ]; then + COMMIT_RANGE="HEAD~$1...HEAD" + else + MERGE_BASE=$(git merge-base HEAD master) + COMMIT_RANGE="$MERGE_BASE..HEAD" + fi +fi + +while IFS= read -r commit_hash || [[ -n "$commit_hash" ]]; do + n_line=0 + while IFS= read -r line || [[ -n "$line" ]]; do + n_line=$((n_line+1)) + length=${#line} + if [ $n_line -eq 2 ] && [ $length -ne 0 ]; then + echo "The subject line of commit hash ${commit_hash} is followed by a non-empty line. Subject lines should always be followed by a blank line." + EXIT_CODE=1 + fi + done < <(git log --format=%B -n 1 "$commit_hash") +done < <(git log "${COMMIT_RANGE}" --format=%H) + +exit ${EXIT_CODE} diff --git a/test/lint/lint-include-guards.sh b/test/lint/lint-include-guards.sh index 3a0494c190..5d5a150db8 100755 --- a/test/lint/lint-include-guards.sh +++ b/test/lint/lint-include-guards.sh @@ -10,7 +10,7 @@ export LC_ALL=C HEADER_ID_PREFIX="BITCOIN_" HEADER_ID_SUFFIX="_H" -REGEXP_EXCLUDE_FILES_WITH_PREFIX="src/(crypto/ctaes/|leveldb/|crc32c/|secp256k1/|test/fuzz/FuzzedDataProvider.h|tinyformat.h|univalue/)" +REGEXP_EXCLUDE_FILES_WITH_PREFIX="src/(crypto/ctaes/|leveldb/|crc32c/|secp256k1/|test/fuzz/FuzzedDataProvider.h|tinyformat.h|bench/nanobench.h|univalue/)" EXIT_CODE=0 for HEADER_FILE in $(git ls-files -- "*.h" | grep -vE "^${REGEXP_EXCLUDE_FILES_WITH_PREFIX}") diff --git a/test/lint/lint-includes.sh b/test/lint/lint-includes.sh index 5404565b94..fde77aea2d 100755 --- a/test/lint/lint-includes.sh +++ b/test/lint/lint-includes.sh @@ -63,8 +63,9 @@ EXPECTED_BOOST_INCLUDES=( boost/optional.hpp boost/preprocessor/cat.hpp boost/preprocessor/stringize.hpp + boost/process.hpp boost/signals2/connection.hpp - boost/signals2/last_value.hpp + boost/signals2/optional_last_value.hpp boost/signals2/signal.hpp boost/test/unit_test.hpp boost/thread/condition_variable.hpp diff --git a/test/lint/lint-locale-dependence.sh b/test/lint/lint-locale-dependence.sh index e2bb403c4d..2e5b801849 100755 --- a/test/lint/lint-locale-dependence.sh +++ b/test/lint/lint-locale-dependence.sh @@ -97,6 +97,7 @@ LOCALE_DEPENDENT_FUNCTIONS=( snprintf sprintf sscanf + std::locale::global std::to_string stod stof diff --git a/test/sanitizer_suppressions/tsan b/test/sanitizer_suppressions/tsan index b8fe75c5c5..3d0ac7f995 100644 --- a/test/sanitizer_suppressions/tsan +++ b/test/sanitizer_suppressions/tsan @@ -19,8 +19,11 @@ race:CConnman::ThreadMessageHandler race:fHaveGenesis race:ProcessNewBlock race:ThreadImport +race:LoadWallet race:WalletBatch::WriteHDChain +race:BerkeleyBatch race:BerkeleyDatabase +race:DatabaseBatch race:zmq::* race:bitcoin-qt # deadlock (TODO fix) |