diff options
65 files changed, 1225 insertions, 316 deletions
diff --git a/.appveyor.yml b/.appveyor.yml index eace0b7121..0d026748b5 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -1,6 +1,6 @@ version: '{branch}.{build}' skip_tags: true -image: Visual Studio 2019 +image: Previous Visual Studio 2019 configuration: Release platform: x64 clone_depth: 5 diff --git a/.cirrus.yml b/.cirrus.yml index 916f172a6a..237560fc2e 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -8,7 +8,7 @@ container: memory: 8G # Set to 8GB to avoid OOM. https://cirrus-ci.org/guide/linux/#linux-containers kvm: true # Use kvm to avoid spurious CI failures in the default virtualization cluster, see https://github.com/bitcoin/bitcoin/issues/20093 env: - PACKAGE_MANAGER_INSTALL : "apt-get update && apt-get install -y" + PACKAGE_MANAGER_INSTALL: "apt-get update && apt-get install -y" MAKEJOBS: "-j4" DANGER_RUN_CI_ON_HOST: "1" # Containers will be discarded after the run, so there is no risk that the ci scripts modify the system TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache @@ -29,6 +29,7 @@ global_task_template: &GLOBAL_TASK_TEMPLATE depends_releases_cache: folder: "/tmp/cirrus-ci-build/releases" merge_base_script: + - if [ "$CIRRUS_PR" = "" ]; then exit 0; fi - bash -c "$PACKAGE_MANAGER_INSTALL git" - git fetch $CIRRUS_REPO_CLONE_URL $CIRRUS_BASE_BRANCH - git config --global user.email "ci@ci.ci" @@ -54,6 +55,22 @@ global_task_template: &GLOBAL_TASK_TEMPLATE # - choco install python --version=3.7.7 -y task: + name: 'ARM [GOAL: install] [buster] [unit tests, no functional tests]' + << : *GLOBAL_TASK_TEMPLATE + container: + image: debian:buster + env: + FILE_ENV: "./ci/test/00_setup_env_arm.sh" + +task: + name: 'Win64 [GOAL: deploy] [unit tests, no gui, no boost::process, no functional tests]' + << : *GLOBAL_TASK_TEMPLATE + container: + image: ubuntu:bionic + env: + FILE_ENV: "./ci/test/00_setup_env_win64.sh" + +task: name: 'x86_64 Linux [GOAL: install] [bionic] [C++17, previous releases, uses qt5 dev package and some depends packages] [unsigned char]' << : *GLOBAL_TASK_TEMPLATE container: @@ -97,14 +114,6 @@ task: FILE_ENV: "./ci/test/00_setup_env_native_fuzz.sh" task: - name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, fuzzers under valgrind]' - << : *GLOBAL_TASK_TEMPLATE - container: - image: ubuntu:focal - env: - FILE_ENV: "./ci/test/00_setup_env_native_fuzz_with_valgrind.sh" - -task: name: 'x86_64 Linux [GOAL: install] [focal] [multiprocess]' << : *GLOBAL_TASK_TEMPLATE container: @@ -119,3 +128,17 @@ task: image: ubuntu:bionic env: FILE_ENV: "./ci/test/00_setup_env_mac.sh" + +task: + name: 'macOS 10.14 native [GOAL: install] [GUI] [no depends]' + macos_brew_addon_script: + - brew install boost libevent berkeley-db4 qt miniupnpc ccache zeromq qrencode sqlite libtool automake pkg-config gnu-getopt + << : *GLOBAL_TASK_TEMPLATE + osx_instance: + # Use latest image, but hardcode version to avoid silent upgrades (and breaks) + image: catalina-xcode-12.1 # https://cirrus-ci.org/guide/macOS + env: + DANGER_RUN_CI_ON_HOST: "true" + CI_USE_APT_INSTALL: "no" + PACKAGE_MANAGER_INSTALL: "echo" # Nothing to do + FILE_ENV: "./ci/test/00_setup_env_mac_host.sh" diff --git a/.travis.yml b/.travis.yml index 55b34ae422..656eed9871 100644 --- a/.travis.yml +++ b/.travis.yml @@ -66,18 +66,6 @@ jobs: - set -o errexit; source ./ci/lint/06_script.sh - stage: test - name: 'ARM [GOAL: install] [buster] [unit tests, no functional tests]' - arch: arm64 # Can disable QEMU_USER_CMD and run the tests natively without qemu - env: >- - FILE_ENV="./ci/test/00_setup_env_arm.sh" - QEMU_USER_CMD="" - - - stage: test - name: 'Win64 [GOAL: deploy] [unit tests, no gui, no boost::process, no functional tests]' - env: >- - FILE_ENV="./ci/test/00_setup_env_win64.sh" - - - stage: test name: '32-bit + dash [GOAL: install] [CentOS 7] [gui]' env: >- FILE_ENV="./ci/test/00_setup_env_i686_centos.sh" @@ -86,24 +74,3 @@ jobs: name: 'x86_64 Linux [GOAL: install] [xenial] [no wallet]' env: >- FILE_ENV="./ci/test/00_setup_env_native_nowallet.sh" - - - stage: test - name: 'macOS 10.14 native [GOAL: install] [GUI] [no depends]' - os: osx - # Use the most recent version: - # Xcode 11.3.1, macOS 10.14, SDK 10.15 - # https://docs.travis-ci.com/user/reference/osx/#macos-version - osx_image: xcode11.3 - addons: - homebrew: - packages: - - berkeley-db4 - - miniupnpc - - qrencode - - sqlite - - ccache - - zeromq - env: >- - DANGER_RUN_CI_ON_HOST=true - CI_USE_APT_INSTALL=no - FILE_ENV="./ci/test/00_setup_env_mac_host.sh" diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh index 702e881862..72e29141a6 100755 --- a/ci/test/00_setup_env.sh +++ b/ci/test/00_setup_env.sh @@ -38,7 +38,7 @@ export RUN_SECURITY_TESTS=${RUN_SECURITY_TESTS:-false} # By how much to scale the test_runner timeouts (option --timeout-factor). # This is needed because some ci machines have slow CPU or disk, so sanitizers # might be slow or a reindex might be waiting on disk IO. -export TEST_RUNNER_TIMEOUT_FACTOR=${TEST_RUNNER_TIMEOUT_FACTOR:-4} +export TEST_RUNNER_TIMEOUT_FACTOR=${TEST_RUNNER_TIMEOUT_FACTOR:-40} export TEST_RUNNER_ENV=${TEST_RUNNER_ENV:-} export RUN_FUZZ_TESTS=${RUN_FUZZ_TESTS:-false} export EXPECTED_TESTS_DURATION_IN_SECONDS=${EXPECTED_TESTS_DURATION_IN_SECONDS:-1000} diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh index b62f1603f4..e4450a65ce 100644 --- a/ci/test/00_setup_env_mac.sh +++ b/ci/test/00_setup_env_mac.sh @@ -7,6 +7,7 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_macos_cross +export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic can cross-compile to macos (bionic is used in the gitian build as well) export HOST=x86_64-apple-darwin16 export PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python3-dev python3-setuptools" export XCODE_VERSION=11.3.1 diff --git a/ci/test/00_setup_env_mac_host.sh b/ci/test/00_setup_env_mac_host.sh index 5fb127b762..7c25a34cfe 100644 --- a/ci/test/00_setup_env_mac_host.sh +++ b/ci/test/00_setup_env_mac_host.sh @@ -7,16 +7,12 @@ export LC_ALL=C.UTF-8 export HOST=x86_64-apple-darwin16 -export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic can cross-compile to macos (bionic is used in the gitian build as well) export PIP_PACKAGES="zmq" export GOAL="install" export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror --with-boost-process" +export CI_OS_NAME="macos" export NO_DEPENDS=1 export OSX_SDK="" export CCACHE_SIZE=300M export RUN_SECURITY_TESTS="true" -if [ "$TRAVIS_REPO_SLUG" != "bitcoin/bitcoin" ]; then - export RUN_FUNCTIONAL_TESTS="false" - export EXPECTED_TESTS_DURATION_IN_SECONDS=200 -fi diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh index 2b351dff6d..72cc3f63c4 100644 --- a/ci/test/00_setup_env_win64.sh +++ b/ci/test/00_setup_env_win64.sh @@ -9,7 +9,7 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_win64 export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic can cross-compile to win64 (bionic is used in the gitian build as well) export HOST=x86_64-w64-mingw32 -export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64" +export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64 file" export RUN_FUNCTIONAL_TESTS=false export RUN_SECURITY_TESTS="true" export GOAL="deploy" diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh index 632bccf574..db74fe6569 100755 --- a/ci/test/04_install.sh +++ b/ci/test/04_install.sh @@ -13,8 +13,8 @@ if [[ $QEMU_USER_CMD == qemu-s390* ]]; then export LC_ALL=C fi -if [ "$TRAVIS_OS_NAME" == "osx" ]; then - ${CI_RETRY_EXE} pip3 install $PIP_PACKAGES +if [ "$CI_OS_NAME" == "macos" ]; then + IN_GETOPT_BIN="/usr/local/opt/gnu-getopt/bin/getopt" ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES fi # Create folders that are mounted into the docker @@ -26,9 +26,7 @@ export LSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/l export TSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/tsan:halt_on_error=1:log_path=${BASE_SCRATCH_DIR}/sanitizer-output/tsan" export UBSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1" env | grep -E '^(BITCOIN_CONFIG|BASE_|QEMU_|CCACHE_|LC_ALL|BOOST_TEST_RANDOM|DEBIAN_FRONTEND|CONFIG_SHELL|(ASAN|LSAN|TSAN|UBSAN)_OPTIONS|PREVIOUS_RELEASES_DIR)' | tee /tmp/env -if [[ $HOST = *-mingw32 ]]; then - DOCKER_ADMIN="--cap-add SYS_ADMIN" -elif [[ $BITCOIN_CONFIG = *--with-sanitizers=*address* ]]; then # If ran with (ASan + LSan), Docker needs access to ptrace (https://github.com/google/sanitizers/issues/764) +if [[ $BITCOIN_CONFIG = *--with-sanitizers=*address* ]]; then # If ran with (ASan + LSan), Docker needs access to ptrace (https://github.com/google/sanitizers/issues/764) DOCKER_ADMIN="--cap-add SYS_PTRACE" fi @@ -69,16 +67,16 @@ elif [ "$CI_USE_APT_INSTALL" != "no" ]; then ${CI_RETRY_EXE} DOCKER_EXEC apt-get install --no-install-recommends --no-upgrade -y $PACKAGES $DOCKER_PACKAGES fi -if [ "$TRAVIS_OS_NAME" == "osx" ]; then +if [ "$CI_OS_NAME" == "macos" ]; then top -l 1 -s 0 | awk ' /PhysMem/ {print}' echo "Number of CPUs: $(sysctl -n hw.logicalcpu)" else DOCKER_EXEC free -m -h DOCKER_EXEC echo "Number of CPUs \(nproc\):" \$\(nproc\) DOCKER_EXEC echo $(lscpu | grep Endian) - DOCKER_EXEC echo "Free disk space:" - DOCKER_EXEC df -h fi +DOCKER_EXEC echo "Free disk space:" +DOCKER_EXEC df -h if [ ! -d ${DIR_QA_ASSETS} ]; then DOCKER_EXEC git clone --depth=1 https://github.com/bitcoin-core/qa-assets ${DIR_QA_ASSETS} diff --git a/ci/test/05_before_script.sh b/ci/test/05_before_script.sh index 8ce839fc04..42c244c2f5 100755 --- a/ci/test/05_before_script.sh +++ b/ci/test/05_before_script.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 # Make sure default datadir does not exist and is never read by creating a dummy file -if [ "$TRAVIS_OS_NAME" == "osx" ]; then +if [ "$CI_OS_NAME" == "macos" ]; then echo > $HOME/Library/Application\ Support/Bitcoin else DOCKER_EXEC echo \> \$HOME/.bitcoin diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh index 87e9f31d0f..7aea21f257 100755 --- a/ci/test/06_script_b.sh +++ b/ci/test/06_script_b.sh @@ -6,6 +6,15 @@ export LC_ALL=C.UTF-8 +if [[ $HOST = *-mingw32 ]]; then + BEGIN_FOLD wrap-wine + # Generate all binaries, so that they can be wrapped + DOCKER_EXEC make $MAKEJOBS -C src/secp256k1 VERBOSE=1 + DOCKER_EXEC make $MAKEJOBS -C src/univalue VERBOSE=1 + DOCKER_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-wine.sh" + END_FOLD +fi + if [ -n "$QEMU_USER_CMD" ]; then BEGIN_FOLD wrap-qemu # Generate all binaries, so that they can be wrapped diff --git a/ci/test/wrap-wine.sh b/ci/test/wrap-wine.sh new file mode 100755 index 0000000000..58a8983e6e --- /dev/null +++ b/ci/test/wrap-wine.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +for b_name in {"${BASE_OUTDIR}/bin"/*,src/secp256k1/*tests,src/univalue/{no_nul,test_json,unitester,object}}.exe; do + # shellcheck disable=SC2044 + for b in $(find "${BASE_ROOT_DIR}" -executable -type f -name "$(basename $b_name)"); do + if (file "$b" | grep "Windows"); then + echo "Wrap $b ..." + mv "$b" "${b}_orig" + echo '#!/usr/bin/env bash' > "$b" + echo "wine64 \"${b}_orig\" \"\$@\"" >> "$b" + chmod +x "$b" + fi + done +done diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml index 65f9a2e5c9..ec716cf2f3 100644 --- a/contrib/gitian-descriptors/gitian-linux.yml +++ b/contrib/gitian-descriptors/gitian-linux.yml @@ -112,7 +112,7 @@ script: | # Create the source tarball mkdir -p "$(dirname "$GIT_ARCHIVE")" - git archive --output="$GIT_ARCHIVE" HEAD + git archive --prefix="${DISTNAME}/" --output="$GIT_ARCHIVE" HEAD ORIGPATH="$PATH" # Extract the git archive into a dir for each host and build @@ -129,7 +129,7 @@ script: | cd distsrc-${i} INSTALLPATH="${PWD}/installed/${DISTNAME}" mkdir -p ${INSTALLPATH} - tar -xf $GIT_ARCHIVE + tar --strip-components=1 -xf "${GIT_ARCHIVE}" ./autogen.sh CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}" LDFLAGS="${HOST_LDFLAGS}" diff --git a/contrib/gitian-descriptors/gitian-osx.yml b/contrib/gitian-descriptors/gitian-osx.yml index e0aaafc15a..df50f45188 100644 --- a/contrib/gitian-descriptors/gitian-osx.yml +++ b/contrib/gitian-descriptors/gitian-osx.yml @@ -111,7 +111,7 @@ script: | # Create the source tarball mkdir -p "$(dirname "$GIT_ARCHIVE")" - git archive --output="$GIT_ARCHIVE" HEAD + git archive --prefix="${DISTNAME}/" --output="$GIT_ARCHIVE" HEAD ORIGPATH="$PATH" # Extract the git archive into a dir for each host and build @@ -121,7 +121,7 @@ script: | cd distsrc-${i} INSTALLPATH="${PWD}/installed/${DISTNAME}" mkdir -p ${INSTALLPATH} - tar -xf $GIT_ARCHIVE + tar --strip-components=1 -xf "${GIT_ARCHIVE}" ./autogen.sh CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml index 5f671b95ce..4d61cded74 100644 --- a/contrib/gitian-descriptors/gitian-win.yml +++ b/contrib/gitian-descriptors/gitian-win.yml @@ -116,7 +116,7 @@ script: | # Create the source tarball mkdir -p "$(dirname "$GIT_ARCHIVE")" - git archive --output="$GIT_ARCHIVE" HEAD + git archive --prefix="${DISTNAME}/" --output="$GIT_ARCHIVE" HEAD ORIGPATH="$PATH" # Extract the git archive into a dir for each host and build @@ -126,7 +126,7 @@ script: | cd distsrc-${i} INSTALLPATH="${PWD}/installed/${DISTNAME}" mkdir -p ${INSTALLPATH} - tar -xf $GIT_ARCHIVE + tar --strip-components=1 -xf "${GIT_ARCHIVE}" ./autogen.sh CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}" diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh index 5be3baeefa..d658c4f6a6 100644 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -158,7 +158,7 @@ GIT_ARCHIVE="${OUTDIR}/src/${DISTNAME}.tar.gz" # Create the source tarball if not already there if [ ! -e "$GIT_ARCHIVE" ]; then mkdir -p "$(dirname "$GIT_ARCHIVE")" - git archive --output="$GIT_ARCHIVE" HEAD + git archive --prefix="${DISTNAME}/" --output="$GIT_ARCHIVE" HEAD fi ########################### @@ -193,7 +193,7 @@ export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}" cd "$DISTSRC" # Extract the source tarball - tar -xf "${GIT_ARCHIVE}" + tar --strip-components=1 -xf "${GIT_ARCHIVE}" ./autogen.sh diff --git a/doc/release-notes.md b/doc/release-notes.md index d3983b1689..0331328ff5 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -65,8 +65,8 @@ format of this file has been changed in a backwards-incompatible way in order to accommodate the storage of Tor v3 and other BIP155 addresses. This means that if the file is modified by 0.21.0 or newer then older versions will not be able to read it. Those old versions, in the event of a downgrade, will log an error -message that deserialization has failed and will continue normal operation -as if the file was missing, creating a new empty one. (#19954) +message "Incorrect keysize in addrman deserialization" and will continue normal +operation as if the file was missing, creating a new empty one. (#19954) Notable changes =============== diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 7fac78f973..9cc383c240 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -7,6 +7,7 @@ FUZZ_TARGETS = \ test/fuzz/addr_info_deserialize \ test/fuzz/addrdb \ test/fuzz/address_deserialize \ + test/fuzz/addrman \ test/fuzz/addrman_deserialize \ test/fuzz/asmap \ test/fuzz/asmap_direct \ @@ -35,6 +36,7 @@ FUZZ_TARGETS = \ test/fuzz/checkqueue \ test/fuzz/coins_deserialize \ test/fuzz/coins_view \ + test/fuzz/connman \ test/fuzz/crypto \ test/fuzz/crypto_aes256 \ test/fuzz/crypto_aes256cbc \ @@ -352,6 +354,12 @@ test_fuzz_address_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON) test_fuzz_address_deserialize_LDFLAGS = $(FUZZ_SUITE_LDFLAGS_COMMON) test_fuzz_address_deserialize_SOURCES = test/fuzz/deserialize.cpp +test_fuzz_addrman_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_addrman_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_addrman_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_addrman_LDFLAGS = $(FUZZ_SUITE_LDFLAGS_COMMON) +test_fuzz_addrman_SOURCES = test/fuzz/addrman.cpp + test_fuzz_addrman_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DADDRMAN_DESERIALIZE=1 test_fuzz_addrman_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_addrman_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON) @@ -520,6 +528,12 @@ test_fuzz_coins_view_LDADD = $(FUZZ_SUITE_LD_COMMON) test_fuzz_coins_view_LDFLAGS = $(FUZZ_SUITE_LDFLAGS_COMMON) test_fuzz_coins_view_SOURCES = test/fuzz/coins_view.cpp +test_fuzz_connman_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_connman_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_connman_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_connman_LDFLAGS = $(FUZZ_SUITE_LDFLAGS_COMMON) +test_fuzz_connman_SOURCES = test/fuzz/connman.cpp + test_fuzz_crypto_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) test_fuzz_crypto_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_crypto_LDADD = $(FUZZ_SUITE_LD_COMMON) diff --git a/src/Makefile.test_util.include b/src/Makefile.test_util.include index d7bc73defb..0621da8ddf 100644 --- a/src/Makefile.test_util.include +++ b/src/Makefile.test_util.include @@ -15,6 +15,7 @@ TEST_UTIL_H = \ test/util/setup_common.h \ test/util/str.h \ test/util/transaction_utils.h \ + test/util/validation.h \ test/util/wallet.h libtest_util_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(MINIUPNPC_CPPFLAGS) $(EVENT_CFLAGS) $(EVENT_PTHREADS_CFLAGS) @@ -27,6 +28,7 @@ libtest_util_a_SOURCES = \ test/util/setup_common.cpp \ test/util/str.cpp \ test/util/transaction_utils.cpp \ + test/util/validation.cpp \ test/util/wallet.cpp \ $(TEST_UTIL_H) diff --git a/src/addrman.h b/src/addrman.h index b4089dc894..04dd30b375 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -7,6 +7,7 @@ #define BITCOIN_ADDRMAN_H #include <clientversion.h> +#include <config/bitcoin-config.h> #include <netaddress.h> #include <protocol.h> #include <random.h> @@ -176,6 +177,28 @@ protected: mutable RecursiveMutex cs; private: + //! Serialization versions. + enum Format : uint8_t { + V0_HISTORICAL = 0, //!< historic format, before commit e6b343d88 + V1_DETERMINISTIC = 1, //!< for pre-asmap files + V2_ASMAP = 2, //!< for files including asmap version + V3_BIP155 = 3, //!< same as V2_ASMAP plus addresses are in BIP155 format + }; + + //! The maximum format this software knows it can unserialize. Also, we always serialize + //! in this format. + //! The format (first byte in the serialized stream) can be higher than this and + //! still this software may be able to unserialize the file - if the second byte + //! (see `lowest_compatible` in `Unserialize()`) is less or equal to this. + static constexpr Format FILE_FORMAT = Format::V3_BIP155; + + //! The initial value of a field that is incremented every time an incompatible format + //! change is made (such that old software versions would not be able to parse and + //! understand the new file format). This is 32 because we overtook the "key size" + //! field which was 32 historically. + //! @note Don't increment this. Increment `lowest_compatible` in `Serialize()` instead. + static constexpr uint8_t INCOMPATIBILITY_BASE = 32; + //! last used nId int nIdCount GUARDED_BY(cs); @@ -265,14 +288,6 @@ protected: void SetServices_(const CService &addr, ServiceFlags nServices) EXCLUSIVE_LOCKS_REQUIRED(cs); public: - //! Serialization versions. - enum class Format : uint8_t { - V0_HISTORICAL = 0, //!< historic format, before commit e6b343d88 - V1_DETERMINISTIC = 1, //!< for pre-asmap files - V2_ASMAP = 2, //!< for files including asmap version - V3_BIP155 = 3, //!< same as V2_ASMAP plus addresses are in BIP155 format - }; - // Compressed IP->ASN mapping, loaded from a file when a node starts. // Should be always empty if no file was provided. // This mapping is then used for bucketing nodes in Addrman. @@ -295,8 +310,18 @@ public: /** * Serialized format. - * * version byte (@see `Format`) - * * 0x20 + nKey (serialized as if it were a vector, for backward compatibility) + * * format version byte (@see `Format`) + * * lowest compatible format version byte. This is used to help old software decide + * whether to parse the file. For example: + * * Bitcoin Core version N knows how to parse up to format=3. If a new format=4 is + * introduced in version N+1 that is compatible with format=3 and it is known that + * version N will be able to parse it, then version N+1 will write + * (format=4, lowest_compatible=3) in the first two bytes of the file, and so + * version N will still try to parse it. + * * Bitcoin Core version N+2 introduces a new incompatible format=5. It will write + * (format=5, lowest_compatible=5) and so any versions that do not know how to parse + * format=5 will not try to read the file. + * * nKey * * nNew * * nTried * * number of "new" buckets XOR 2**30 @@ -327,12 +352,17 @@ public: { LOCK(cs); - // Always serialize in the latest version (currently Format::V3_BIP155). + // Always serialize in the latest version (FILE_FORMAT). OverrideStream<Stream> s(&s_, s_.GetType(), s_.GetVersion() | ADDRV2_FORMAT); - s << static_cast<uint8_t>(Format::V3_BIP155); - s << ((unsigned char)32); + s << static_cast<uint8_t>(FILE_FORMAT); + + // Increment `lowest_compatible` iff a newly introduced format is incompatible with + // the previous one. + static constexpr uint8_t lowest_compatible = Format::V3_BIP155; + s << static_cast<uint8_t>(INCOMPATIBILITY_BASE + lowest_compatible); + s << nKey; s << nNew; s << nTried; @@ -392,15 +422,6 @@ public: Format format; s_ >> Using<CustomUintFormatter<1>>(format); - static constexpr Format maximum_supported_format = Format::V3_BIP155; - if (format > maximum_supported_format) { - throw std::ios_base::failure(strprintf( - "Unsupported format of addrman database: %u. Maximum supported is %u. " - "Continuing operation without using the saved list of peers.", - static_cast<uint8_t>(format), - static_cast<uint8_t>(maximum_supported_format))); - } - int stream_version = s_.GetVersion(); if (format >= Format::V3_BIP155) { // Add ADDRV2_FORMAT to the version so that the CNetAddr and CAddress @@ -410,9 +431,16 @@ public: OverrideStream<Stream> s(&s_, s_.GetType(), stream_version); - unsigned char nKeySize; - s >> nKeySize; - if (nKeySize != 32) throw std::ios_base::failure("Incorrect keysize in addrman deserialization"); + uint8_t compat; + s >> compat; + const uint8_t lowest_compatible = compat - INCOMPATIBILITY_BASE; + if (lowest_compatible > FILE_FORMAT) { + throw std::ios_base::failure(strprintf( + "Unsupported format of addrman database: %u. It is compatible with formats >=%u, " + "but the maximum supported by this version of %s is %u.", + format, lowest_compatible, PACKAGE_NAME, static_cast<uint8_t>(FILE_FORMAT))); + } + s >> nKey; s >> nNew; s >> nTried; diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 9c32f0db4c..fedb032db2 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -322,8 +322,8 @@ public: consensus.nPowTargetSpacing = 10 * 60; consensus.fPowAllowMinDifficultyBlocks = false; consensus.fPowNoRetargeting = false; - consensus.nRuleChangeActivationThreshold = 1916; - consensus.nMinerConfirmationWindow = 2016; + consensus.nRuleChangeActivationThreshold = 1916; // 95% of 2016 + consensus.nMinerConfirmationWindow = 2016; // nPowTargetTimespan / nPowTargetSpacing consensus.MinBIP9WarningHeight = 0; consensus.powLimit = uint256S("00000377ae000000000000000000000000000000000000000000000000000000"); consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit = 28; diff --git a/src/dummywallet.cpp b/src/dummywallet.cpp index 8d2dcd0279..4543f098a1 100644 --- a/src/dummywallet.cpp +++ b/src/dummywallet.cpp @@ -40,7 +40,6 @@ void DummyWalletInit::AddWalletOptions(ArgsManager& argsman) const "-salvagewallet", "-spendzeroconfchange", "-txconfirmtarget=<n>", - "-upgradewallet", "-wallet=<path>", "-walletbroadcast", "-walletdir=<dir>", diff --git a/src/init.cpp b/src/init.cpp index 1387d6b982..495d96f938 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1145,7 +1145,7 @@ bool AppInitParameterInteraction(const ArgsManager& args) if (!ParseMoney(args.GetArg("-minrelaytxfee", ""), n)) { return InitError(AmountErrMsg("minrelaytxfee", args.GetArg("-minrelaytxfee", ""))); } - // High fee check is done afterward in CWallet::CreateWalletFromFile() + // High fee check is done afterward in CWallet::Create() ::minRelayTxFee = CFeeRate(n); } else if (incrementalRelayFee > ::minRelayTxFee) { // Allow only setting incrementalRelayFee to control both diff --git a/src/netaddress.cpp b/src/netaddress.cpp index c0193fa2e9..35e9161f58 100644 --- a/src/netaddress.cpp +++ b/src/netaddress.cpp @@ -255,10 +255,14 @@ bool CNetAddr::SetSpecial(const std::string& str) Span<const uint8_t> input_checksum{input.data() + ADDR_TORV3_SIZE, torv3::CHECKSUM_LEN}; Span<const uint8_t> input_version{input.data() + ADDR_TORV3_SIZE + torv3::CHECKSUM_LEN, sizeof(torv3::VERSION)}; + if (input_version != torv3::VERSION) { + return false; + } + uint8_t calculated_checksum[torv3::CHECKSUM_LEN]; torv3::Checksum(input_pubkey, calculated_checksum); - if (input_checksum != calculated_checksum || input_version != torv3::VERSION) { + if (input_checksum != calculated_checksum) { return false; } diff --git a/src/netaddress.h b/src/netaddress.h index f35b01d202..29b2eaafeb 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -29,7 +29,7 @@ * Make sure that this does not collide with any of the values in `version.h` * or with `SERIALIZE_TRANSACTION_NO_WITNESS`. */ -static const int ADDRV2_FORMAT = 0x20000000; +static constexpr int ADDRV2_FORMAT = 0x20000000; /** * A network type. diff --git a/src/netbase.cpp b/src/netbase.cpp index 0273839017..264029d8a2 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -52,14 +52,20 @@ enum Network ParseNetwork(const std::string& net_in) { return NET_UNROUTABLE; } -std::string GetNetworkName(enum Network net) { - switch(net) - { +std::string GetNetworkName(enum Network net) +{ + switch (net) { + case NET_UNROUTABLE: return "unroutable"; case NET_IPV4: return "ipv4"; case NET_IPV6: return "ipv6"; case NET_ONION: return "onion"; - default: return ""; - } + case NET_I2P: return "i2p"; + case NET_CJDNS: return "cjdns"; + case NET_INTERNAL: return "internal"; + case NET_MAX: assert(false); + } // no default case, so the compiler can warn about missing cases + + assert(false); } bool static LookupIntern(const std::string& name, std::vector<CNetAddr>& vIP, unsigned int nMaxSolutions, bool fAllowLookup) diff --git a/src/qt/transactiontablemodel.cpp b/src/qt/transactiontablemodel.cpp index c560dc58e7..3148089b52 100644 --- a/src/qt/transactiontablemodel.cpp +++ b/src/qt/transactiontablemodel.cpp @@ -54,6 +54,30 @@ struct TxLessThan } }; +// queue notifications to show a non freezing progress dialog e.g. for rescan +struct TransactionNotification +{ +public: + TransactionNotification() {} + TransactionNotification(uint256 _hash, ChangeType _status, bool _showTransaction): + hash(_hash), status(_status), showTransaction(_showTransaction) {} + + void invoke(QObject *ttm) + { + QString strHash = QString::fromStdString(hash.GetHex()); + qDebug() << "NotifyTransactionChanged: " + strHash + " status= " + QString::number(status); + bool invoked = QMetaObject::invokeMethod(ttm, "updateTransaction", Qt::QueuedConnection, + Q_ARG(QString, strHash), + Q_ARG(int, status), + Q_ARG(bool, showTransaction)); + assert(invoked); + } +private: + uint256 hash; + ChangeType status; + bool showTransaction; +}; + // Private implementation class TransactionTablePriv { @@ -71,6 +95,12 @@ public: */ QList<TransactionRecord> cachedWallet; + bool fQueueNotifications = false; + std::vector< TransactionNotification > vQueueNotifications; + + void NotifyTransactionChanged(const uint256 &hash, ChangeType status); + void ShowProgress(const std::string &title, int nProgress); + /* Query entire wallet anew from core. */ void refreshWallet(interfaces::Wallet& wallet) @@ -674,34 +704,7 @@ void TransactionTableModel::updateDisplayUnit() Q_EMIT dataChanged(index(0, Amount), index(priv->size()-1, Amount)); } -// queue notifications to show a non freezing progress dialog e.g. for rescan -struct TransactionNotification -{ -public: - TransactionNotification() {} - TransactionNotification(uint256 _hash, ChangeType _status, bool _showTransaction): - hash(_hash), status(_status), showTransaction(_showTransaction) {} - - void invoke(QObject *ttm) - { - QString strHash = QString::fromStdString(hash.GetHex()); - qDebug() << "NotifyTransactionChanged: " + strHash + " status= " + QString::number(status); - bool invoked = QMetaObject::invokeMethod(ttm, "updateTransaction", Qt::QueuedConnection, - Q_ARG(QString, strHash), - Q_ARG(int, status), - Q_ARG(bool, showTransaction)); - assert(invoked); - } -private: - uint256 hash; - ChangeType status; - bool showTransaction; -}; - -static bool fQueueNotifications = false; -static std::vector< TransactionNotification > vQueueNotifications; - -static void NotifyTransactionChanged(TransactionTableModel *ttm, const uint256 &hash, ChangeType status) +void TransactionTablePriv::NotifyTransactionChanged(const uint256 &hash, ChangeType status) { // Find transaction in wallet // Determine whether to show transaction or not (determine this here so that no relocking is needed in GUI thread) @@ -714,10 +717,10 @@ static void NotifyTransactionChanged(TransactionTableModel *ttm, const uint256 & vQueueNotifications.push_back(notification); return; } - notification.invoke(ttm); + notification.invoke(parent); } -static void ShowProgress(TransactionTableModel *ttm, const std::string &title, int nProgress) +void TransactionTablePriv::ShowProgress(const std::string &title, int nProgress) { if (nProgress == 0) fQueueNotifications = true; @@ -726,27 +729,27 @@ static void ShowProgress(TransactionTableModel *ttm, const std::string &title, i { fQueueNotifications = false; if (vQueueNotifications.size() > 10) { // prevent balloon spam, show maximum 10 balloons - bool invoked = QMetaObject::invokeMethod(ttm, "setProcessingQueuedTransactions", Qt::QueuedConnection, Q_ARG(bool, true)); + bool invoked = QMetaObject::invokeMethod(parent, "setProcessingQueuedTransactions", Qt::QueuedConnection, Q_ARG(bool, true)); assert(invoked); } for (unsigned int i = 0; i < vQueueNotifications.size(); ++i) { if (vQueueNotifications.size() - i <= 10) { - bool invoked = QMetaObject::invokeMethod(ttm, "setProcessingQueuedTransactions", Qt::QueuedConnection, Q_ARG(bool, false)); + bool invoked = QMetaObject::invokeMethod(parent, "setProcessingQueuedTransactions", Qt::QueuedConnection, Q_ARG(bool, false)); assert(invoked); } - vQueueNotifications[i].invoke(ttm); + vQueueNotifications[i].invoke(parent); } - std::vector<TransactionNotification >().swap(vQueueNotifications); // clear + vQueueNotifications.clear(); } } void TransactionTableModel::subscribeToCoreSignals() { // Connect signals to wallet - m_handler_transaction_changed = walletModel->wallet().handleTransactionChanged(std::bind(NotifyTransactionChanged, this, std::placeholders::_1, std::placeholders::_2)); - m_handler_show_progress = walletModel->wallet().handleShowProgress(std::bind(ShowProgress, this, std::placeholders::_1, std::placeholders::_2)); + m_handler_transaction_changed = walletModel->wallet().handleTransactionChanged(std::bind(&TransactionTablePriv::NotifyTransactionChanged, priv, std::placeholders::_1, std::placeholders::_2)); + m_handler_show_progress = walletModel->wallet().handleShowProgress(std::bind(&TransactionTablePriv::ShowProgress, priv, std::placeholders::_1, std::placeholders::_2)); } void TransactionTableModel::unsubscribeFromCoreSignals() diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index b81e6414a5..f98ea63782 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -497,11 +497,9 @@ static RPCHelpMan getnettotals() static UniValue GetNetworksInfo() { UniValue networks(UniValue::VARR); - for(int n=0; n<NET_MAX; ++n) - { + for (int n = 0; n < NET_MAX; ++n) { enum Network network = static_cast<enum Network>(n); - if(network == NET_UNROUTABLE || network == NET_INTERNAL) - continue; + if (network == NET_UNROUTABLE || network == NET_I2P || network == NET_CJDNS || network == NET_INTERNAL) continue; proxyType proxy; UniValue obj(UniValue::VOBJ); GetProxy(network, proxy); diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp new file mode 100644 index 0000000000..0ceeea2d36 --- /dev/null +++ b/src/test/fuzz/addrman.cpp @@ -0,0 +1,119 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <addrdb.h> +#include <addrman.h> +#include <chainparams.h> +#include <merkleblock.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> +#include <time.h> +#include <util/asmap.h> + +#include <cstdint> +#include <optional> +#include <string> +#include <vector> + +void initialize() +{ + SelectParams(CBaseChainParams::REGTEST); +} + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + + SetMockTime(ConsumeTime(fuzzed_data_provider)); + CAddrMan addr_man; + if (fuzzed_data_provider.ConsumeBool()) { + addr_man.m_asmap = ConsumeRandomLengthBitVector(fuzzed_data_provider); + if (!SanityCheckASMap(addr_man.m_asmap)) { + addr_man.m_asmap.clear(); + } + } + while (fuzzed_data_provider.ConsumeBool()) { + switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 11)) { + case 0: { + addr_man.Clear(); + break; + } + case 1: { + addr_man.ResolveCollisions(); + break; + } + case 2: { + (void)addr_man.SelectTriedCollision(); + break; + } + case 3: { + (void)addr_man.Select(fuzzed_data_provider.ConsumeBool()); + break; + } + case 4: { + (void)addr_man.GetAddr(fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096)); + break; + } + case 5: { + const std::optional<CAddress> opt_address = ConsumeDeserializable<CAddress>(fuzzed_data_provider); + const std::optional<CNetAddr> opt_net_addr = ConsumeDeserializable<CNetAddr>(fuzzed_data_provider); + if (opt_address && opt_net_addr) { + addr_man.Add(*opt_address, *opt_net_addr, fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(0, 100000000)); + } + break; + } + case 6: { + std::vector<CAddress> addresses; + while (fuzzed_data_provider.ConsumeBool()) { + const std::optional<CAddress> opt_address = ConsumeDeserializable<CAddress>(fuzzed_data_provider); + if (!opt_address) { + break; + } + addresses.push_back(*opt_address); + } + const std::optional<CNetAddr> opt_net_addr = ConsumeDeserializable<CNetAddr>(fuzzed_data_provider); + if (opt_net_addr) { + addr_man.Add(addresses, *opt_net_addr, fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(0, 100000000)); + } + break; + } + case 7: { + const std::optional<CService> opt_service = ConsumeDeserializable<CService>(fuzzed_data_provider); + if (opt_service) { + addr_man.Good(*opt_service, fuzzed_data_provider.ConsumeBool(), ConsumeTime(fuzzed_data_provider)); + } + break; + } + case 8: { + const std::optional<CService> opt_service = ConsumeDeserializable<CService>(fuzzed_data_provider); + if (opt_service) { + addr_man.Attempt(*opt_service, fuzzed_data_provider.ConsumeBool(), ConsumeTime(fuzzed_data_provider)); + } + break; + } + case 9: { + const std::optional<CService> opt_service = ConsumeDeserializable<CService>(fuzzed_data_provider); + if (opt_service) { + addr_man.Connected(*opt_service, ConsumeTime(fuzzed_data_provider)); + } + break; + } + case 10: { + const std::optional<CService> opt_service = ConsumeDeserializable<CService>(fuzzed_data_provider); + if (opt_service) { + addr_man.SetServices(*opt_service, ServiceFlags{fuzzed_data_provider.ConsumeIntegral<uint64_t>()}); + } + break; + } + case 11: { + (void)addr_man.Check(); + break; + } + } + } + (void)addr_man.size(); + CDataStream data_stream(SER_NETWORK, PROTOCOL_VERSION); + data_stream << addr_man; +} diff --git a/src/test/fuzz/connman.cpp b/src/test/fuzz/connman.cpp new file mode 100644 index 0000000000..6521c3f3b2 --- /dev/null +++ b/src/test/fuzz/connman.cpp @@ -0,0 +1,162 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <chainparams.h> +#include <chainparamsbase.h> +#include <net.h> +#include <netaddress.h> +#include <protocol.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <test/fuzz/util.h> +#include <util/translation.h> + +#include <cstdint> +#include <vector> + +void initialize() +{ + InitializeFuzzingContext(); +} + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; + CConnman connman{fuzzed_data_provider.ConsumeIntegral<uint64_t>(), fuzzed_data_provider.ConsumeIntegral<uint64_t>(), fuzzed_data_provider.ConsumeBool()}; + CAddress random_address; + CNetAddr random_netaddr; + CNode random_node = ConsumeNode(fuzzed_data_provider); + CService random_service; + CSubNet random_subnet; + std::string random_string; + while (fuzzed_data_provider.ConsumeBool()) { + switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 30)) { + case 0: + random_address = ConsumeAddress(fuzzed_data_provider); + break; + case 1: + random_netaddr = ConsumeNetAddr(fuzzed_data_provider); + break; + case 2: + random_service = ConsumeService(fuzzed_data_provider); + break; + case 3: + random_subnet = ConsumeSubNet(fuzzed_data_provider); + break; + case 4: + random_string = fuzzed_data_provider.ConsumeRandomLengthString(64); + break; + case 5: { + std::vector<CAddress> addresses; + while (fuzzed_data_provider.ConsumeBool()) { + addresses.push_back(ConsumeAddress(fuzzed_data_provider)); + } + // Limit nTimePenalty to int32_t to avoid signed integer overflow + (void)connman.AddNewAddresses(addresses, ConsumeAddress(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral<int32_t>()); + break; + } + case 6: + connman.AddNode(random_string); + break; + case 7: + connman.CheckIncomingNonce(fuzzed_data_provider.ConsumeIntegral<uint64_t>()); + break; + case 8: + connman.DisconnectNode(fuzzed_data_provider.ConsumeIntegral<NodeId>()); + break; + case 9: + connman.DisconnectNode(random_netaddr); + break; + case 10: + connman.DisconnectNode(random_string); + break; + case 11: + connman.DisconnectNode(random_subnet); + break; + case 12: + connman.ForEachNode([](auto) {}); + break; + case 13: + connman.ForEachNodeThen([](auto) {}, []() {}); + break; + case 14: + (void)connman.ForNode(fuzzed_data_provider.ConsumeIntegral<NodeId>(), [&](auto) { return fuzzed_data_provider.ConsumeBool(); }); + break; + case 15: + (void)connman.GetAddresses(fuzzed_data_provider.ConsumeIntegral<size_t>(), fuzzed_data_provider.ConsumeIntegral<size_t>()); + break; + case 16: { + (void)connman.GetAddresses(random_node, fuzzed_data_provider.ConsumeIntegral<size_t>(), fuzzed_data_provider.ConsumeIntegral<size_t>()); + break; + } + case 17: + (void)connman.GetDeterministicRandomizer(fuzzed_data_provider.ConsumeIntegral<uint64_t>()); + break; + case 18: + (void)connman.GetNodeCount(fuzzed_data_provider.PickValueInArray({CConnman::CONNECTIONS_NONE, CConnman::CONNECTIONS_IN, CConnman::CONNECTIONS_OUT, CConnman::CONNECTIONS_ALL})); + break; + case 19: + connman.MarkAddressGood(random_address); + break; + case 20: + (void)connman.OutboundTargetReached(fuzzed_data_provider.ConsumeBool()); + break; + case 21: + // Limit now to int32_t to avoid signed integer overflow + (void)connman.PoissonNextSendInbound(fuzzed_data_provider.ConsumeIntegral<int32_t>(), fuzzed_data_provider.ConsumeIntegral<int>()); + break; + case 22: { + CSerializedNetMsg serialized_net_msg; + serialized_net_msg.m_type = fuzzed_data_provider.ConsumeRandomLengthString(CMessageHeader::COMMAND_SIZE); + serialized_net_msg.data = ConsumeRandomLengthByteVector(fuzzed_data_provider); + connman.PushMessage(&random_node, std::move(serialized_net_msg)); + break; + } + case 23: + connman.RemoveAddedNode(random_string); + break; + case 24: { + const std::vector<bool> asmap = ConsumeRandomLengthBitVector(fuzzed_data_provider); + if (SanityCheckASMap(asmap)) { + connman.SetAsmap(asmap); + } + break; + } + case 25: + connman.SetBestHeight(fuzzed_data_provider.ConsumeIntegral<int>()); + break; + case 26: + connman.SetMaxOutboundTarget(fuzzed_data_provider.ConsumeIntegral<uint64_t>()); + break; + case 27: + connman.SetMaxOutboundTimeframe(fuzzed_data_provider.ConsumeIntegral<uint64_t>()); + break; + case 28: + connman.SetNetworkActive(fuzzed_data_provider.ConsumeBool()); + break; + case 29: + connman.SetServices(random_service, static_cast<ServiceFlags>(fuzzed_data_provider.ConsumeIntegral<uint64_t>())); + break; + case 30: + connman.SetTryNewOutboundPeer(fuzzed_data_provider.ConsumeBool()); + break; + } + } + (void)connman.GetAddedNodeInfo(); + (void)connman.GetBestHeight(); + (void)connman.GetExtraOutboundCount(); + (void)connman.GetLocalServices(); + (void)connman.GetMaxOutboundTarget(); + (void)connman.GetMaxOutboundTimeframe(); + (void)connman.GetMaxOutboundTimeLeftInCycle(); + (void)connman.GetNetworkActive(); + std::vector<CNodeStats> stats; + connman.GetNodeStats(stats); + (void)connman.GetOutboundTargetBytesLeft(); + (void)connman.GetReceiveFloodSize(); + (void)connman.GetTotalBytesRecv(); + (void)connman.GetTotalBytesSent(); + (void)connman.GetTryNewOutboundPeer(); + (void)connman.GetUseAddrmanOutgoing(); +} diff --git a/src/test/fuzz/deserialize.cpp b/src/test/fuzz/deserialize.cpp index 9803fdc882..8ca5366c8a 100644 --- a/src/test/fuzz/deserialize.cpp +++ b/src/test/fuzz/deserialize.cpp @@ -15,6 +15,7 @@ #include <net.h> #include <netbase.h> #include <node/utxo_snapshot.h> +#include <optional.h> #include <primitives/block.h> #include <protocol.h> #include <psbt.h> @@ -61,15 +62,19 @@ T Deserialize(CDataStream ds) } template <typename T> -void DeserializeFromFuzzingInput(const std::vector<uint8_t>& buffer, T& obj) +void DeserializeFromFuzzingInput(const std::vector<uint8_t>& buffer, T& obj, const Optional<int> protocol_version = nullopt) { CDataStream ds(buffer, SER_NETWORK, INIT_PROTO_VERSION); - try { - int version; - ds >> version; - ds.SetVersion(version); - } catch (const std::ios_base::failure&) { - throw invalid_fuzzing_input_exception(); + if (protocol_version) { + ds.SetVersion(*protocol_version); + } else { + try { + int version; + ds >> version; + ds.SetVersion(version); + } catch (const std::ios_base::failure&) { + throw invalid_fuzzing_input_exception(); + } } try { ds >> obj; @@ -125,9 +130,15 @@ void test_one_input(const std::vector<uint8_t>& buffer) CScript script; DeserializeFromFuzzingInput(buffer, script); #elif SUB_NET_DESERIALIZE - CSubNet sub_net; - DeserializeFromFuzzingInput(buffer, sub_net); - AssertEqualAfterSerializeDeserialize(sub_net); + CSubNet sub_net_1; + DeserializeFromFuzzingInput(buffer, sub_net_1, INIT_PROTO_VERSION); + AssertEqualAfterSerializeDeserialize(sub_net_1, INIT_PROTO_VERSION); + CSubNet sub_net_2; + DeserializeFromFuzzingInput(buffer, sub_net_2, INIT_PROTO_VERSION | ADDRV2_FORMAT); + AssertEqualAfterSerializeDeserialize(sub_net_2, INIT_PROTO_VERSION | ADDRV2_FORMAT); + CSubNet sub_net_3; + DeserializeFromFuzzingInput(buffer, sub_net_3); + AssertEqualAfterSerializeDeserialize(sub_net_3, INIT_PROTO_VERSION | ADDRV2_FORMAT); #elif TX_IN_DESERIALIZE CTxIn tx_in; DeserializeFromFuzzingInput(buffer, tx_in); @@ -195,6 +206,13 @@ void test_one_input(const std::vector<uint8_t>& buffer) AssertEqualAfterSerializeDeserialize(s); } AssertEqualAfterSerializeDeserialize(s, INIT_PROTO_VERSION | ADDRV2_FORMAT); + CService s1; + DeserializeFromFuzzingInput(buffer, s1, INIT_PROTO_VERSION); + AssertEqualAfterSerializeDeserialize(s1, INIT_PROTO_VERSION); + assert(s1.IsAddrV1Compatible()); + CService s2; + DeserializeFromFuzzingInput(buffer, s2, INIT_PROTO_VERSION | ADDRV2_FORMAT); + AssertEqualAfterSerializeDeserialize(s2, INIT_PROTO_VERSION | ADDRV2_FORMAT); #elif MESSAGEHEADER_DESERIALIZE CMessageHeader mh; DeserializeFromFuzzingInput(buffer, mh); diff --git a/src/test/fuzz/merkleblock.cpp b/src/test/fuzz/merkleblock.cpp index c44e334272..4710e75757 100644 --- a/src/test/fuzz/merkleblock.cpp +++ b/src/test/fuzz/merkleblock.cpp @@ -16,12 +16,36 @@ void test_one_input(const std::vector<uint8_t>& buffer) { FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); - std::optional<CPartialMerkleTree> partial_merkle_tree = ConsumeDeserializable<CPartialMerkleTree>(fuzzed_data_provider); - if (!partial_merkle_tree) { - return; + CPartialMerkleTree partial_merkle_tree; + switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 1)) { + case 0: { + const std::optional<CPartialMerkleTree> opt_partial_merkle_tree = ConsumeDeserializable<CPartialMerkleTree>(fuzzed_data_provider); + if (opt_partial_merkle_tree) { + partial_merkle_tree = *opt_partial_merkle_tree; + } + break; } - (void)partial_merkle_tree->GetNumTransactions(); + case 1: { + CMerkleBlock merkle_block; + const std::optional<CBlock> opt_block = ConsumeDeserializable<CBlock>(fuzzed_data_provider); + CBloomFilter bloom_filter; + std::set<uint256> txids; + if (opt_block && !opt_block->vtx.empty()) { + if (fuzzed_data_provider.ConsumeBool()) { + merkle_block = CMerkleBlock{*opt_block, bloom_filter}; + } else if (fuzzed_data_provider.ConsumeBool()) { + while (fuzzed_data_provider.ConsumeBool()) { + txids.insert(ConsumeUInt256(fuzzed_data_provider)); + } + merkle_block = CMerkleBlock{*opt_block, txids}; + } + } + partial_merkle_tree = merkle_block.txn; + break; + } + } + (void)partial_merkle_tree.GetNumTransactions(); std::vector<uint256> matches; std::vector<unsigned int> indices; - (void)partial_merkle_tree->ExtractMatches(matches, indices); + (void)partial_merkle_tree.ExtractMatches(matches, indices); } diff --git a/src/test/fuzz/net.cpp b/src/test/fuzz/net.cpp index 3818838765..c61d406291 100644 --- a/src/test/fuzz/net.cpp +++ b/src/test/fuzz/net.cpp @@ -63,7 +63,7 @@ void test_one_input(const std::vector<uint8_t>& buffer) break; } case 3: { - const std::vector<bool> asmap = ConsumeRandomLengthIntegralVector<bool>(fuzzed_data_provider, 128); + const std::vector<bool> asmap = ConsumeRandomLengthBitVector(fuzzed_data_provider); if (!SanityCheckASMap(asmap)) { break; } diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp index 3ef03137ec..9390399878 100644 --- a/src/test/fuzz/process_message.cpp +++ b/src/test/fuzz/process_message.cpp @@ -16,6 +16,7 @@ #include <test/util/mining.h> #include <test/util/net.h> #include <test/util/setup_common.h> +#include <test/util/validation.h> #include <util/memory.h> #include <validationinterface.h> #include <version.h> @@ -63,10 +64,14 @@ void test_one_input(const std::vector<uint8_t>& buffer) { FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); ConnmanTestMsg& connman = *(ConnmanTestMsg*)g_setup->m_node.connman.get(); + TestChainState& chainstate = *(TestChainState*)&g_setup->m_node.chainman->ActiveChainstate(); + chainstate.ResetIbd(); const std::string random_message_type{fuzzed_data_provider.ConsumeBytesAsString(CMessageHeader::COMMAND_SIZE).c_str()}; if (!LIMIT_TO_MESSAGE_TYPE.empty() && random_message_type != LIMIT_TO_MESSAGE_TYPE) { return; } + const bool jump_out_of_ibd{fuzzed_data_provider.ConsumeBool()}; + if (jump_out_of_ibd) chainstate.JumpOutOfIbd(); CDataStream random_bytes_data_stream{fuzzed_data_provider.ConsumeRemainingBytes<unsigned char>(), SER_NETWORK, PROTOCOL_VERSION}; CNode& p2p_node = *MakeUnique<CNode>(0, ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_BLOOM), 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, ConnectionType::OUTBOUND_FULL_RELAY).release(); p2p_node.fSuccessfullyConnected = true; @@ -76,7 +81,7 @@ void test_one_input(const std::vector<uint8_t>& buffer) g_setup->m_node.peerman->InitializeNode(&p2p_node); try { g_setup->m_node.peerman->ProcessMessage(p2p_node, random_message_type, random_bytes_data_stream, - GetTime<std::chrono::microseconds>(), std::atomic<bool>{false}); + GetTime<std::chrono::microseconds>(), std::atomic<bool>{false}); } catch (const std::ios_base::failure&) { } SyncWithValidationInterfaceQueue(); diff --git a/src/test/fuzz/process_messages.cpp b/src/test/fuzz/process_messages.cpp index f722eeac3a..19ea92b750 100644 --- a/src/test/fuzz/process_messages.cpp +++ b/src/test/fuzz/process_messages.cpp @@ -12,6 +12,7 @@ #include <test/util/mining.h> #include <test/util/net.h> #include <test/util/setup_common.h> +#include <test/util/validation.h> #include <util/memory.h> #include <validation.h> #include <validationinterface.h> @@ -39,7 +40,10 @@ void test_one_input(const std::vector<uint8_t>& buffer) FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); ConnmanTestMsg& connman = *(ConnmanTestMsg*)g_setup->m_node.connman.get(); + TestChainState& chainstate = *(TestChainState*)&g_setup->m_node.chainman->ActiveChainstate(); + chainstate.ResetIbd(); std::vector<CNode*> peers; + bool jump_out_of_ibd{false}; const auto num_peers_to_add = fuzzed_data_provider.ConsumeIntegralInRange(1, 3); for (int i = 0; i < num_peers_to_add; ++i) { @@ -58,6 +62,8 @@ void test_one_input(const std::vector<uint8_t>& buffer) } while (fuzzed_data_provider.ConsumeBool()) { + if (!jump_out_of_ibd) jump_out_of_ibd = fuzzed_data_provider.ConsumeBool(); + if (jump_out_of_ibd && chainstate.IsInitialBlockDownload()) chainstate.JumpOutOfIbd(); const std::string random_message_type{fuzzed_data_provider.ConsumeBytesAsString(CMessageHeader::COMMAND_SIZE).c_str()}; CSerializedNetMsg net_msg; diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h index ed6093a8a8..e99ed8d72d 100644 --- a/src/test/fuzz/util.h +++ b/src/test/fuzz/util.h @@ -11,6 +11,8 @@ #include <chainparamsbase.h> #include <coins.h> #include <consensus/consensus.h> +#include <merkleblock.h> +#include <net.h> #include <netaddress.h> #include <netbase.h> #include <primitives/transaction.h> @@ -23,6 +25,7 @@ #include <test/util/setup_common.h> #include <txmempool.h> #include <uint256.h> +#include <util/time.h> #include <version.h> #include <algorithm> @@ -38,6 +41,11 @@ NODISCARD inline std::vector<uint8_t> ConsumeRandomLengthByteVector(FuzzedDataPr return {s.begin(), s.end()}; } +NODISCARD inline std::vector<bool> ConsumeRandomLengthBitVector(FuzzedDataProvider& fuzzed_data_provider, const size_t max_length = 4096) noexcept +{ + return BytesToBits(ConsumeRandomLengthByteVector(fuzzed_data_provider, max_length)); +} + NODISCARD inline CDataStream ConsumeDataStream(FuzzedDataProvider& fuzzed_data_provider, const size_t max_length = 4096) noexcept { return {ConsumeRandomLengthByteVector(fuzzed_data_provider, max_length), SER_NETWORK, INIT_PROTO_VERSION}; @@ -88,6 +96,13 @@ NODISCARD inline CAmount ConsumeMoney(FuzzedDataProvider& fuzzed_data_provider) return fuzzed_data_provider.ConsumeIntegralInRange<CAmount>(0, MAX_MONEY); } +NODISCARD inline int64_t ConsumeTime(FuzzedDataProvider& fuzzed_data_provider) noexcept +{ + static const int64_t time_min = ParseISO8601DateTime("1970-01-01T00:00:00Z"); + static const int64_t time_max = ParseISO8601DateTime("9999-12-31T23:59:59Z"); + return fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(time_min, time_max); +} + NODISCARD inline CScript ConsumeScript(FuzzedDataProvider& fuzzed_data_provider) noexcept { const std::vector<uint8_t> b = ConsumeRandomLengthByteVector(fuzzed_data_provider); @@ -260,6 +275,32 @@ CSubNet ConsumeSubNet(FuzzedDataProvider& fuzzed_data_provider) noexcept return {ConsumeNetAddr(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral<uint8_t>()}; } +CService ConsumeService(FuzzedDataProvider& fuzzed_data_provider) noexcept +{ + return {ConsumeNetAddr(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral<uint16_t>()}; +} + +CAddress ConsumeAddress(FuzzedDataProvider& fuzzed_data_provider) noexcept +{ + return {ConsumeService(fuzzed_data_provider), static_cast<ServiceFlags>(fuzzed_data_provider.ConsumeIntegral<uint64_t>()), fuzzed_data_provider.ConsumeIntegral<uint32_t>()}; +} + +CNode ConsumeNode(FuzzedDataProvider& fuzzed_data_provider) noexcept +{ + const NodeId node_id = fuzzed_data_provider.ConsumeIntegral<NodeId>(); + const ServiceFlags local_services = static_cast<ServiceFlags>(fuzzed_data_provider.ConsumeIntegral<uint64_t>()); + const int my_starting_height = fuzzed_data_provider.ConsumeIntegral<int>(); + const SOCKET socket = INVALID_SOCKET; + const CAddress address = ConsumeAddress(fuzzed_data_provider); + const uint64_t keyed_net_group = fuzzed_data_provider.ConsumeIntegral<uint64_t>(); + const uint64_t local_host_nonce = fuzzed_data_provider.ConsumeIntegral<uint64_t>(); + const CAddress addr_bind = ConsumeAddress(fuzzed_data_provider); + const std::string addr_name = fuzzed_data_provider.ConsumeRandomLengthString(64); + const ConnectionType conn_type = fuzzed_data_provider.PickValueInArray({ConnectionType::INBOUND, ConnectionType::OUTBOUND_FULL_RELAY, ConnectionType::MANUAL, ConnectionType::FEELER, ConnectionType::BLOCK_RELAY, ConnectionType::ADDR_FETCH}); + const bool inbound_onion = fuzzed_data_provider.ConsumeBool(); + return {node_id, local_services, my_starting_height, socket, address, keyed_net_group, local_host_nonce, addr_bind, addr_name, conn_type, inbound_onion}; +} + void InitializeFuzzingContext(const std::string& chain_name = CBaseChainParams::REGTEST) { static const BasicTestingSetup basic_testing_setup{chain_name, {"-nodebuglogfile"}}; diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h index a09c8c122d..1812ce1666 100644 --- a/src/test/util/setup_common.h +++ b/src/test/util/setup_common.h @@ -11,6 +11,7 @@ #include <node/context.h> #include <pubkey.h> #include <random.h> +#include <stdexcept> #include <txmempool.h> #include <util/check.h> #include <util/string.h> @@ -158,13 +159,15 @@ std::ostream& operator<<(std::ostream& os, const uint256& num); * Use as * BOOST_CHECK_EXCEPTION(code that throws, exception type, HasReason("foo")); */ -class HasReason { +class HasReason +{ public: explicit HasReason(const std::string& reason) : m_reason(reason) {} - template <typename E> - bool operator() (const E& e) const { + bool operator()(const std::exception& e) const + { return std::string(e.what()).find(m_reason) != std::string::npos; }; + private: const std::string m_reason; }; diff --git a/src/test/util/validation.cpp b/src/test/util/validation.cpp new file mode 100644 index 0000000000..1aed492c3c --- /dev/null +++ b/src/test/util/validation.cpp @@ -0,0 +1,22 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <test/util/validation.h> + +#include <util/check.h> +#include <util/time.h> +#include <validation.h> + +void TestChainState::ResetIbd() +{ + m_cached_finished_ibd = false; + assert(IsInitialBlockDownload()); +} + +void TestChainState::JumpOutOfIbd() +{ + Assert(IsInitialBlockDownload()); + m_cached_finished_ibd = true; + Assert(!IsInitialBlockDownload()); +} diff --git a/src/test/util/validation.h b/src/test/util/validation.h new file mode 100644 index 0000000000..b13aa0be60 --- /dev/null +++ b/src/test/util/validation.h @@ -0,0 +1,17 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_TEST_UTIL_VALIDATION_H +#define BITCOIN_TEST_UTIL_VALIDATION_H + +#include <validation.h> + +struct TestChainState : public CChainState { + /** Reset the ibd cache to its initial state */ + void ResetIbd(); + /** Toggle IsInitialBlockDownload from true to false */ + void JumpOutOfIbd(); +}; + +#endif // BITCOIN_TEST_UTIL_VALIDATION_H diff --git a/src/validation.cpp b/src/validation.cpp index 8241cb159f..feb7502a0f 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5084,7 +5084,7 @@ bool LoadMempool(CTxMemPool& pool) pool.PrioritiseTransaction(tx->GetHash(), amountdelta); } TxValidationState state; - if (nTime + nExpiryTimeout > nNow) { + if (nTime > nNow - nExpiryTimeout) { LOCK(cs_main); AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, nTime, nullptr /* plTxnReplaced */, false /* bypass_limits */, diff --git a/src/validation.h b/src/validation.h index 3d9fa92c15..ffb038ad75 100644 --- a/src/validation.h +++ b/src/validation.h @@ -503,9 +503,9 @@ enum class CoinsCacheSizeState * whereas block information and metadata independent of the current tip is * kept in `BlockMetadataManager`. */ -class CChainState { -private: - +class CChainState +{ +protected: /** * Every received block is assigned a unique and increasing identifier, so we * know which one to give priority in case of a fork. diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp index 884ab58497..6b46868d10 100644 --- a/src/wallet/rpcdump.cpp +++ b/src/wallet/rpcdump.cpp @@ -1523,7 +1523,9 @@ static UniValue ProcessDescriptorImport(CWallet * const pwallet, const UniValue& // Need to ExpandPrivate to check if private keys are available for all pubkeys FlatSigningProvider expand_keys; std::vector<CScript> scripts; - parsed_desc->Expand(0, keys, scripts, expand_keys); + if (!parsed_desc->Expand(0, keys, scripts, expand_keys)) { + throw JSONRPCError(RPC_WALLET_ERROR, "Cannot expand descriptor. Probably because of hardened derivations without private keys provided"); + } parsed_desc->ExpandPrivate(0, keys, expand_keys); // Check if all private keys are provided @@ -1559,7 +1561,7 @@ static UniValue ProcessDescriptorImport(CWallet * const pwallet, const UniValue& } // Add descriptor to the wallet - auto spk_manager = pwallet->AddWalletDescriptor(w_desc, keys, label); + auto spk_manager = pwallet->AddWalletDescriptor(w_desc, keys, label, internal); if (spk_manager == nullptr) { throw JSONRPCError(RPC_WALLET_ERROR, strprintf("Could not add descriptor '%s'", descriptor)); } diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index ebcab1227d..0fbb212732 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -8,6 +8,7 @@ #include <interfaces/chain.h> #include <key_io.h> #include <node/context.h> +#include <optional.h> #include <outputtype.h> #include <policy/feerate.h> #include <policy/fees.h> @@ -3593,7 +3594,7 @@ static RPCHelpMan rescanblockchain() } int start_height = 0; - Optional<int> stop_height; + Optional<int> stop_height = MakeOptional(false, int()); uint256 start_block; { LOCK(pwallet->cs_wallet); diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index 188289b010..d2e1be6402 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -438,12 +438,12 @@ bool LegacyScriptPubKeyMan::CanGetAddresses(bool internal) const return keypool_has_keys; } -bool LegacyScriptPubKeyMan::Upgrade(int prev_version, bilingual_str& error) +bool LegacyScriptPubKeyMan::Upgrade(int prev_version, int new_version, bilingual_str& error) { LOCK(cs_KeyStore); bool hd_upgrade = false; bool split_upgrade = false; - if (m_storage.CanSupportFeature(FEATURE_HD) && !IsHDEnabled()) { + if (IsFeatureSupported(new_version, FEATURE_HD) && !IsHDEnabled()) { WalletLogPrintf("Upgrading wallet to HD\n"); m_storage.SetMinVersion(FEATURE_HD); @@ -453,10 +453,17 @@ bool LegacyScriptPubKeyMan::Upgrade(int prev_version, bilingual_str& error) hd_upgrade = true; } // Upgrade to HD chain split if necessary - if (m_storage.CanSupportFeature(FEATURE_HD_SPLIT)) { + if (IsFeatureSupported(new_version, FEATURE_HD_SPLIT)) { WalletLogPrintf("Upgrading wallet to use HD chain split\n"); m_storage.SetMinVersion(FEATURE_PRE_SPLIT_KEYPOOL); split_upgrade = FEATURE_HD_SPLIT > prev_version; + // Upgrade the HDChain + if (m_hd_chain.nVersion < CHDChain::VERSION_HD_CHAIN_SPLIT) { + m_hd_chain.nVersion = CHDChain::VERSION_HD_CHAIN_SPLIT; + if (!WalletBatch(m_storage.GetDatabase()).WriteHDChain(m_hd_chain)) { + throw std::runtime_error(std::string(__func__) + ": writing chain failed"); + } + } } // Mark all keys currently in the keypool as pre-split if (split_upgrade) { diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h index 63c10b7a0d..3bf8f78120 100644 --- a/src/wallet/scriptpubkeyman.h +++ b/src/wallet/scriptpubkeyman.h @@ -37,7 +37,7 @@ public: virtual bool IsWalletFlagSet(uint64_t) const = 0; virtual void UnsetBlankWalletFlag(WalletBatch&) = 0; virtual bool CanSupportFeature(enum WalletFeature) const = 0; - virtual void SetMinVersion(enum WalletFeature, WalletBatch* = nullptr, bool = false) = 0; + virtual void SetMinVersion(enum WalletFeature, WalletBatch* = nullptr) = 0; virtual const CKeyingMaterial& GetEncryptionKey() const = 0; virtual bool HasEncryptionKeys() const = 0; virtual bool IsLocked() const = 0; @@ -206,7 +206,7 @@ public: virtual bool CanGetAddresses(bool internal = false) const { return false; } /** Upgrades the wallet to the specified version */ - virtual bool Upgrade(int prev_version, bilingual_str& error) { return false; } + virtual bool Upgrade(int prev_version, int new_version, bilingual_str& error) { return false; } virtual bool HavePrivateKeys() const { return false; } @@ -371,7 +371,7 @@ public: bool SetupGeneration(bool force = false) override; - bool Upgrade(int prev_version, bilingual_str& error) override; + bool Upgrade(int prev_version, int new_version, bilingual_str& error) override; bool HavePrivateKeys() const override; diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index c42114c394..4911af08c6 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -688,7 +688,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_descriptor_test, BasicTestingSetup) BOOST_CHECK_EXCEPTION(vr >> w_desc, std::ios_base::failure, malformed_descriptor); } -//! Test CreateWalletFromFile function and its behavior handling potential race +//! Test CWallet::Create() and its behavior handling potential race //! conditions if it's called the same time an incoming transaction shows up in //! the mempool or a new block. //! @@ -706,7 +706,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_descriptor_test, BasicTestingSetup) //! wallet rescan and notifications are immediately synced, to verify the wallet //! must already have a handler in place for them, and there's no gap after //! rescanning where new transactions in new blocks could be lost. -BOOST_FIXTURE_TEST_CASE(CreateWalletFromFile, TestChain100Setup) +BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup) { // Create new wallet with known key and unload it. auto chain = interfaces::MakeChain(m_node); diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index d1cde6aa89..d414555511 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -436,21 +436,13 @@ void CWallet::chainStateFlushed(const CBlockLocator& loc) batch.WriteBestBlock(loc); } -void CWallet::SetMinVersion(enum WalletFeature nVersion, WalletBatch* batch_in, bool fExplicit) +void CWallet::SetMinVersion(enum WalletFeature nVersion, WalletBatch* batch_in) { LOCK(cs_wallet); if (nWalletVersion >= nVersion) return; - - // when doing an explicit upgrade, if we pass the max version permitted, upgrade all the way - if (fExplicit && nVersion > nWalletMaxVersion) - nVersion = FEATURE_LATEST; - nWalletVersion = nVersion; - if (nVersion > nWalletMaxVersion) - nWalletMaxVersion = nVersion; - { WalletBatch* batch = batch_in ? batch_in : new WalletBatch(*database); if (nWalletVersion > 40000) @@ -460,18 +452,6 @@ void CWallet::SetMinVersion(enum WalletFeature nVersion, WalletBatch* batch_in, } } -bool CWallet::SetMaxVersion(int nVersion) -{ - LOCK(cs_wallet); - // cannot downgrade below current version - if (nWalletVersion > nVersion) - return false; - - nWalletMaxVersion = nVersion; - - return true; -} - std::set<uint256> CWallet::GetConflicts(const uint256& txid) const { std::set<uint256> result; @@ -656,7 +636,7 @@ bool CWallet::EncryptWallet(const SecureString& strWalletPassphrase) } // Encryption was introduced in version 0.4.0 - SetMinVersion(FEATURE_WALLETCRYPT, encrypted_batch, true); + SetMinVersion(FEATURE_WALLETCRYPT, encrypted_batch); if (!encrypted_batch->TxnCommit()) { delete encrypted_batch; @@ -1778,7 +1758,11 @@ CWallet::ScanResult CWallet::ScanForWalletTransactions(const uint256& start_bloc double progress_current = progress_begin; int block_height = start_height; while (!fAbortRescan && !chain().shutdownRequested()) { - m_scanning_progress = (progress_current - progress_begin) / (progress_end - progress_begin); + if (progress_end - progress_begin > 0.0) { + m_scanning_progress = (progress_current - progress_begin) / (progress_end - progress_begin); + } else { // avoid divide-by-zero for single block scan range (i.e. start and stop hashes are equal) + m_scanning_progress = 0; + } if (block_height % 100 == 0 && progress_end - progress_begin > 0.0) { ShowProgress(strprintf("%s " + _("Rescanning...").translated, GetDisplayName()), std::max(1, std::min(99, (int)(m_scanning_progress * 100)))); } @@ -3106,10 +3090,10 @@ bool CWallet::CreateTransactionInternal( WalletLogPrintf("Fee Calculation: Fee:%d Bytes:%u Needed:%d Tgt:%d (requested %d) Reason:\"%s\" Decay %.5f: Estimation: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out) Fail: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out)\n", nFeeRet, nBytes, nFeeNeeded, feeCalc.returnedTarget, feeCalc.desiredTarget, StringForFeeReason(feeCalc.reason), feeCalc.est.decay, feeCalc.est.pass.start, feeCalc.est.pass.end, - 100 * feeCalc.est.pass.withinTarget / (feeCalc.est.pass.totalConfirmed + feeCalc.est.pass.inMempool + feeCalc.est.pass.leftMempool), + (feeCalc.est.pass.totalConfirmed + feeCalc.est.pass.inMempool + feeCalc.est.pass.leftMempool) > 0.0 ? 100 * feeCalc.est.pass.withinTarget / (feeCalc.est.pass.totalConfirmed + feeCalc.est.pass.inMempool + feeCalc.est.pass.leftMempool) : 0.0, feeCalc.est.pass.withinTarget, feeCalc.est.pass.totalConfirmed, feeCalc.est.pass.inMempool, feeCalc.est.pass.leftMempool, feeCalc.est.fail.start, feeCalc.est.fail.end, - 100 * feeCalc.est.fail.withinTarget / (feeCalc.est.fail.totalConfirmed + feeCalc.est.fail.inMempool + feeCalc.est.fail.leftMempool), + (feeCalc.est.fail.totalConfirmed + feeCalc.est.fail.inMempool + feeCalc.est.fail.leftMempool) > 0.0 ? 100 * feeCalc.est.fail.withinTarget / (feeCalc.est.fail.totalConfirmed + feeCalc.est.fail.inMempool + feeCalc.est.fail.leftMempool) : 0.0, feeCalc.est.fail.withinTarget, feeCalc.est.fail.totalConfirmed, feeCalc.est.fail.inMempool, feeCalc.est.fail.leftMempool); return true; } @@ -4121,33 +4105,31 @@ const CAddressBookData* CWallet::FindAddressBookEntry(const CTxDestination& dest bool CWallet::UpgradeWallet(int version, bilingual_str& error, std::vector<bilingual_str>& warnings) { int prev_version = GetVersion(); - int nMaxVersion = version; - if (nMaxVersion == 0) // the -upgradewallet without argument case - { + if (version == 0) { WalletLogPrintf("Performing wallet upgrade to %i\n", FEATURE_LATEST); - nMaxVersion = FEATURE_LATEST; - SetMinVersion(FEATURE_LATEST); // permanently upgrade the wallet immediately + version = FEATURE_LATEST; } else { - WalletLogPrintf("Allowing wallet upgrade up to %i\n", nMaxVersion); + WalletLogPrintf("Allowing wallet upgrade up to %i\n", version); } - if (nMaxVersion < GetVersion()) + if (version < prev_version) { error = _("Cannot downgrade wallet"); return false; } - SetMaxVersion(nMaxVersion); LOCK(cs_wallet); // Do not upgrade versions to any version between HD_SPLIT and FEATURE_PRE_SPLIT_KEYPOOL unless already supporting HD_SPLIT - int max_version = GetVersion(); - if (!CanSupportFeature(FEATURE_HD_SPLIT) && max_version >= FEATURE_HD_SPLIT && max_version < FEATURE_PRE_SPLIT_KEYPOOL) { + if (!CanSupportFeature(FEATURE_HD_SPLIT) && version >= FEATURE_HD_SPLIT && version < FEATURE_PRE_SPLIT_KEYPOOL) { error = _("Cannot upgrade a non HD split wallet without upgrading to support pre split keypool. Please use version 169900 or no version specified."); return false; } + // Permanently upgrade to the version + SetMinVersion(GetClosestWalletFeature(version)); + for (auto spk_man : GetActiveScriptPubKeyMans()) { - if (!spk_man->Upgrade(prev_version, error)) { + if (!spk_man->Upgrade(prev_version, version, error)) { return false; } } @@ -4511,7 +4493,7 @@ DescriptorScriptPubKeyMan* CWallet::GetDescriptorScriptPubKeyMan(const WalletDes return nullptr; } -ScriptPubKeyMan* CWallet::AddWalletDescriptor(WalletDescriptor& desc, const FlatSigningProvider& signing_provider, const std::string& label) +ScriptPubKeyMan* CWallet::AddWalletDescriptor(WalletDescriptor& desc, const FlatSigningProvider& signing_provider, const std::string& label, bool internal) { if (!IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)) { WalletLogPrintf("Cannot add WalletDescriptor to a non-descriptor wallet\n"); @@ -4556,7 +4538,10 @@ ScriptPubKeyMan* CWallet::AddWalletDescriptor(WalletDescriptor& desc, const Flat } // Top up key pool, the manager will generate new scriptPubKeys internally - new_spk_man->TopUp(); + if (!new_spk_man->TopUp()) { + WalletLogPrintf("Could not top up scriptPubKeys\n"); + return nullptr; + } // Apply the label if necessary // Note: we disable labels for ranged descriptors @@ -4568,7 +4553,7 @@ ScriptPubKeyMan* CWallet::AddWalletDescriptor(WalletDescriptor& desc, const Flat } CTxDestination dest; - if (ExtractDestination(script_pub_keys.at(0), dest)) { + if (!internal && ExtractDestination(script_pub_keys.at(0), dest)) { SetAddressBook(dest, label, "receive"); } } diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 74de55dcb5..0934213fc7 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -636,9 +636,6 @@ private: //! the current wallet version: clients below this version are not able to load the wallet int nWalletVersion GUARDED_BY(cs_wallet){FEATURE_BASE}; - //! the maximum wallet format version: memory-only variable that specifies to what version this wallet may be upgraded - int nWalletMaxVersion GUARDED_BY(cs_wallet) = FEATURE_BASE; - int64_t nNextResend = 0; bool fBroadcastTransactions = false; // Local time that the tip block was received. Used to schedule wallet rebroadcasts. @@ -800,8 +797,8 @@ public: const CWalletTx* GetWalletTx(const uint256& hash) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); bool IsTrusted(const CWalletTx& wtx, std::set<uint256>& trusted_parents) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); - //! check whether we are allowed to upgrade (or already support) to the named feature - bool CanSupportFeature(enum WalletFeature wf) const override EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) { AssertLockHeld(cs_wallet); return nWalletMaxVersion >= wf; } + //! check whether we support the named feature + bool CanSupportFeature(enum WalletFeature wf) const override EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) { AssertLockHeld(cs_wallet); return IsFeatureSupported(nWalletVersion, wf); } /** * populate vCoins with vector of available COutputs. @@ -853,7 +850,7 @@ public: //! Upgrade stored CKeyMetadata objects to store key origin info as KeyOriginInfo void UpgradeKeyMetadata() EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); - bool LoadMinVersion(int nVersion) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) { AssertLockHeld(cs_wallet); nWalletVersion = nVersion; nWalletMaxVersion = std::max(nWalletMaxVersion, nVersion); return true; } + bool LoadMinVersion(int nVersion) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) { AssertLockHeld(cs_wallet); nWalletVersion = nVersion; return true; } /** * Adds a destination data tuple to the store, and saves it to disk @@ -1076,11 +1073,8 @@ public: unsigned int GetKeyPoolSize() const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); - //! signify that a particular wallet feature is now used. this may change nWalletVersion and nWalletMaxVersion if those are lower - void SetMinVersion(enum WalletFeature, WalletBatch* batch_in = nullptr, bool fExplicit = false) override; - - //! change which version we're allowed to upgrade to (note that this does not immediately imply upgrading to that format) - bool SetMaxVersion(int nVersion); + //! signify that a particular wallet feature is now used. + void SetMinVersion(enum WalletFeature, WalletBatch* batch_in = nullptr) override; //! get the current wallet format (the oldest client version guaranteed to understand this wallet) int GetVersion() const { LOCK(cs_wallet); return nWalletVersion; } @@ -1280,7 +1274,7 @@ public: DescriptorScriptPubKeyMan* GetDescriptorScriptPubKeyMan(const WalletDescriptor& desc) const; //! Add a descriptor to the wallet, return a ScriptPubKeyMan & associated output type - ScriptPubKeyMan* AddWalletDescriptor(WalletDescriptor& desc, const FlatSigningProvider& signing_provider, const std::string& label); + ScriptPubKeyMan* AddWalletDescriptor(WalletDescriptor& desc, const FlatSigningProvider& signing_provider, const std::string& label, bool internal); }; /** diff --git a/src/wallet/walletutil.cpp b/src/wallet/walletutil.cpp index 2f3e597b90..702293e6c7 100644 --- a/src/wallet/walletutil.cpp +++ b/src/wallet/walletutil.cpp @@ -49,28 +49,51 @@ std::vector<fs::path> ListWalletDir() continue; } - // Get wallet path relative to walletdir by removing walletdir from the wallet path. - // This can be replaced by boost::filesystem::lexically_relative once boost is bumped to 1.60. - const fs::path path = it->path().string().substr(offset); + try { + // Get wallet path relative to walletdir by removing walletdir from the wallet path. + // This can be replaced by boost::filesystem::lexically_relative once boost is bumped to 1.60. + const fs::path path = it->path().string().substr(offset); - if (it->status().type() == fs::directory_file && - (ExistsBerkeleyDatabase(it->path()) || ExistsSQLiteDatabase(it->path()))) { - // Found a directory which contains wallet.dat btree file, add it as a wallet. - paths.emplace_back(path); - } else if (it.level() == 0 && it->symlink_status().type() == fs::regular_file && ExistsBerkeleyDatabase(it->path())) { - if (it->path().filename() == "wallet.dat") { - // Found top-level wallet.dat btree file, add top level directory "" - // as a wallet. - paths.emplace_back(); - } else { - // Found top-level btree file not called wallet.dat. Current bitcoin - // software will never create these files but will allow them to be - // opened in a shared database environment for backwards compatibility. - // Add it to the list of available wallets. + if (it->status().type() == fs::directory_file && + (ExistsBerkeleyDatabase(it->path()) || ExistsSQLiteDatabase(it->path()))) { + // Found a directory which contains wallet.dat btree file, add it as a wallet. paths.emplace_back(path); + } else if (it.level() == 0 && it->symlink_status().type() == fs::regular_file && ExistsBerkeleyDatabase(it->path())) { + if (it->path().filename() == "wallet.dat") { + // Found top-level wallet.dat btree file, add top level directory "" + // as a wallet. + paths.emplace_back(); + } else { + // Found top-level btree file not called wallet.dat. Current bitcoin + // software will never create these files but will allow them to be + // opened in a shared database environment for backwards compatibility. + // Add it to the list of available wallets. + paths.emplace_back(path); + } } + } catch (const std::exception& e) { + LogPrintf("%s: Error scanning %s: %s\n", __func__, it->path().string(), e.what()); + it.no_push(); } } return paths; } + +bool IsFeatureSupported(int wallet_version, int feature_version) +{ + return wallet_version >= feature_version; +} + +WalletFeature GetClosestWalletFeature(int version) +{ + if (version >= FEATURE_LATEST) return FEATURE_LATEST; + if (version >= FEATURE_PRE_SPLIT_KEYPOOL) return FEATURE_PRE_SPLIT_KEYPOOL; + if (version >= FEATURE_NO_DEFAULT_KEY) return FEATURE_NO_DEFAULT_KEY; + if (version >= FEATURE_HD_SPLIT) return FEATURE_HD_SPLIT; + if (version >= FEATURE_HD) return FEATURE_HD; + if (version >= FEATURE_COMPRPUBKEY) return FEATURE_COMPRPUBKEY; + if (version >= FEATURE_WALLETCRYPT) return FEATURE_WALLETCRYPT; + if (version >= FEATURE_BASE) return FEATURE_BASE; + return static_cast<WalletFeature>(0); +} diff --git a/src/wallet/walletutil.h b/src/wallet/walletutil.h index afdcb2e18a..27521abd81 100644 --- a/src/wallet/walletutil.h +++ b/src/wallet/walletutil.h @@ -29,7 +29,8 @@ enum WalletFeature FEATURE_LATEST = FEATURE_PRE_SPLIT_KEYPOOL }; - +bool IsFeatureSupported(int wallet_version, int feature_version); +WalletFeature GetClosestWalletFeature(int version); enum WalletFlags : uint64_t { // wallet flags in the upper section (> 1 << 31) will lead to not opening the wallet if flag is unknown diff --git a/test/functional/data/wallets/high_minversion/.walletlock b/test/functional/data/wallets/high_minversion/.walletlock deleted file mode 100644 index e69de29bb2..0000000000 --- a/test/functional/data/wallets/high_minversion/.walletlock +++ /dev/null diff --git a/test/functional/data/wallets/high_minversion/GENERATE.md b/test/functional/data/wallets/high_minversion/GENERATE.md deleted file mode 100644 index e55c4557ca..0000000000 --- a/test/functional/data/wallets/high_minversion/GENERATE.md +++ /dev/null @@ -1,8 +0,0 @@ -The wallet has been created by starting Bitcoin Core with the options -`-regtest -datadir=/tmp -nowallet -walletdir=$(pwd)/test/functional/data/wallets/`. - -In the source code, `WalletFeature::FEATURE_LATEST` has been modified to be large, so that the minversion is too high -for a current build of the wallet. - -The wallet has then been created with the RPC `createwallet high_minversion true true`, so that a blank wallet with -private keys disabled is created. diff --git a/test/functional/data/wallets/high_minversion/db.log b/test/functional/data/wallets/high_minversion/db.log deleted file mode 100644 index e69de29bb2..0000000000 --- a/test/functional/data/wallets/high_minversion/db.log +++ /dev/null diff --git a/test/functional/data/wallets/high_minversion/wallet.dat b/test/functional/data/wallets/high_minversion/wallet.dat Binary files differdeleted file mode 100644 index 99ab809263..0000000000 --- a/test/functional/data/wallets/high_minversion/wallet.dat +++ /dev/null diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py index dfae58e860..05b658ed87 100755 --- a/test/functional/feature_proxy.py +++ b/test/functional/feature_proxy.py @@ -26,6 +26,8 @@ addnode connect to IPv4 addnode connect to IPv6 addnode connect to onion addnode connect to generic DNS name + +- Test getnetworkinfo for each node """ import socket @@ -41,12 +43,16 @@ from test_framework.util import ( from test_framework.netutil import test_ipv6_local RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports -# From GetNetworkName() in netbase.cpp: -NET_UNROUTABLE = "" + +# Networks returned by RPC getpeerinfo, defined in src/netbase.cpp::GetNetworkName() +NET_UNROUTABLE = "unroutable" NET_IPV4 = "ipv4" NET_IPV6 = "ipv6" NET_ONION = "onion" +# Networks returned by RPC getnetworkinfo, defined in src/rpc/net.cpp::GetNetworksInfo() +NETWORKS = frozenset({NET_IPV4, NET_IPV6, NET_ONION}) + class ProxyTest(BitcoinTestFramework): def set_test_params(self): @@ -84,14 +90,14 @@ class ProxyTest(BitcoinTestFramework): self.serv3 = Socks5Server(self.conf3) self.serv3.start() - # Note: proxies are not used to connect to local nodes - # this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost + # Note: proxies are not used to connect to local nodes. This is because the proxy to + # use is based on CService.GetNetwork(), which returns NET_UNROUTABLE for localhost. args = [ ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'], ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'], ['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'], [] - ] + ] if self.have_ipv6: args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion'] self.add_nodes(self.num_nodes, extra_args=args) @@ -189,15 +195,17 @@ class ProxyTest(BitcoinTestFramework): r[x['name']] = x return r - # test RPC getnetworkinfo + self.log.info("Test RPC getnetworkinfo") n0 = networks_dict(self.nodes[0].getnetworkinfo()) - for net in ['ipv4','ipv6','onion']: + assert_equal(NETWORKS, n0.keys()) + for net in NETWORKS: assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr)) assert_equal(n0[net]['proxy_randomize_credentials'], True) assert_equal(n0['onion']['reachable'], True) n1 = networks_dict(self.nodes[1].getnetworkinfo()) - for net in ['ipv4','ipv6']: + assert_equal(NETWORKS, n1.keys()) + for net in ['ipv4', 'ipv6']: assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr)) assert_equal(n1[net]['proxy_randomize_credentials'], False) assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr)) @@ -205,14 +213,16 @@ class ProxyTest(BitcoinTestFramework): assert_equal(n1['onion']['reachable'], True) n2 = networks_dict(self.nodes[2].getnetworkinfo()) - for net in ['ipv4','ipv6','onion']: + assert_equal(NETWORKS, n2.keys()) + for net in NETWORKS: assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr)) assert_equal(n2[net]['proxy_randomize_credentials'], True) assert_equal(n2['onion']['reachable'], True) if self.have_ipv6: n3 = networks_dict(self.nodes[3].getnetworkinfo()) - for net in ['ipv4','ipv6']: + assert_equal(NETWORKS, n3.keys()) + for net in NETWORKS: assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr)) assert_equal(n3[net]['proxy_randomize_credentials'], False) assert_equal(n3['onion']['reachable'], False) diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py index 3e47e24a3b..6e28cfb265 100755 --- a/test/functional/feature_taproot.py +++ b/test/functional/feature_taproot.py @@ -9,6 +9,7 @@ from test_framework.blocktools import ( create_block, add_witness_commitment, MAX_BLOCK_SIGOPS_WEIGHT, + NORMAL_GBT_REQUEST_PARAMS, WITNESS_SCALE_FACTOR, ) from test_framework.messages import ( @@ -1199,7 +1200,7 @@ class TaprootTest(BitcoinTestFramework): self.num_nodes = 2 self.setup_clean_chain = True # Node 0 has Taproot inactive, Node 1 active. - self.extra_args = [["-whitelist=127.0.0.1", "-par=1", "-vbparams=taproot:1:1"], ["-whitelist=127.0.0.1", "-par=1"]] + self.extra_args = [["-par=1", "-vbparams=taproot:1:1"], ["-par=1"]] def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False): @@ -1218,7 +1219,7 @@ class TaprootTest(BitcoinTestFramework): witness and add_witness_commitment(block) block.rehash() block.solve() - block_response = node.submitblock(block.serialize(True).hex()) + block_response = node.submitblock(block.serialize().hex()) if err_msg is not None: assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg) if (accept): @@ -1436,17 +1437,27 @@ class TaprootTest(BitcoinTestFramework): self.log.info(" - Done") def run_test(self): - self.connect_nodes(0, 1) - # Post-taproot activation tests go first (pre-taproot tests' blocks are invalid post-taproot). self.log.info("Post-activation tests...") self.nodes[1].generate(101) self.test_spenders(self.nodes[1], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3]) - # Transfer % of funds to pre-taproot node. + # Transfer funds to pre-taproot node. addr = self.nodes[0].getnewaddress() - self.nodes[1].sendtoaddress(address=addr, amount=int(self.nodes[1].getbalance() * 70000000) / 100000000) - self.nodes[1].generate(1) + rawtx = self.nodes[1].createrawtransaction( + inputs=[{ + 'txid': i['txid'], + 'vout': i['vout'] + } for i in self.nodes[1].listunspent()], + outputs={addr: self.nodes[1].getbalance()}, + ) + rawtx = self.nodes[1].signrawtransactionwithwallet(rawtx)['hex'] + # Transaction is too large to fit into the mempool, so put it into a block + block = create_block(tmpl=self.nodes[1].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS), txlist=[rawtx]) + add_witness_commitment(block) + block.rehash() + block.solve() + assert_equal(None, self.nodes[1].submitblock(block.serialize().hex())) self.sync_blocks() # Pre-taproot activation tests. diff --git a/test/functional/test_framework/bdb.py b/test/functional/test_framework/bdb.py new file mode 100644 index 0000000000..9de358aa0a --- /dev/null +++ b/test/functional/test_framework/bdb.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +Utilities for working directly with the wallet's BDB database file + +This is specific to the configuration of BDB used in this project: + - pagesize: 4096 bytes + - Outer database contains single subdatabase named 'main' + - btree + - btree leaf pages + +Each key-value pair is two entries in a btree leaf. The first is the key, the one that follows +is the value. And so on. Note that the entry data is itself not in the correct order. Instead +entry offsets are stored in the correct order and those offsets are needed to then retrieve +the data itself. + +Page format can be found in BDB source code dbinc/db_page.h +This only implements the deserialization of btree metadata pages and normal btree pages. Overflow +pages are not implemented but may be needed in the future if dealing with wallets with large +transactions. + +`db_dump -da wallet.dat` is useful to see the data in a wallet.dat BDB file +""" + +import binascii +import struct + +# Important constants +PAGESIZE = 4096 +OUTER_META_PAGE = 0 +INNER_META_PAGE = 2 + +# Page type values +BTREE_INTERNAL = 3 +BTREE_LEAF = 5 +BTREE_META = 9 + +# Some magic numbers for sanity checking +BTREE_MAGIC = 0x053162 +DB_VERSION = 9 + +# Deserializes a leaf page into a dict. +# Btree internal pages have the same header, for those, return None. +# For the btree leaf pages, deserialize them and put all the data into a dict +def dump_leaf_page(data): + page_info = {} + page_header = data[0:26] + _, pgno, prev_pgno, next_pgno, entries, hf_offset, level, pg_type = struct.unpack('QIIIHHBB', page_header) + page_info['pgno'] = pgno + page_info['prev_pgno'] = prev_pgno + page_info['next_pgno'] = next_pgno + page_info['entries'] = entries + page_info['hf_offset'] = hf_offset + page_info['level'] = level + page_info['pg_type'] = pg_type + page_info['entry_offsets'] = struct.unpack('{}H'.format(entries), data[26:26 + entries * 2]) + page_info['entries'] = [] + + if pg_type == BTREE_INTERNAL: + # Skip internal pages. These are the internal nodes of the btree and don't contain anything relevant to us + return None + + assert pg_type == BTREE_LEAF, 'A non-btree leaf page has been encountered while dumping leaves' + + for i in range(0, entries): + offset = page_info['entry_offsets'][i] + entry = {'offset': offset} + page_data_header = data[offset:offset + 3] + e_len, pg_type = struct.unpack('HB', page_data_header) + entry['len'] = e_len + entry['pg_type'] = pg_type + entry['data'] = data[offset + 3:offset + 3 + e_len] + page_info['entries'].append(entry) + + return page_info + +# Deserializes a btree metadata page into a dict. +# Does a simple sanity check on the magic value, type, and version +def dump_meta_page(page): + # metadata page + # general metadata + metadata = {} + meta_page = page[0:72] + _, pgno, magic, version, pagesize, encrypt_alg, pg_type, metaflags, _, free, last_pgno, nparts, key_count, record_count, flags, uid = struct.unpack('QIIIIBBBBIIIIII20s', meta_page) + metadata['pgno'] = pgno + metadata['magic'] = magic + metadata['version'] = version + metadata['pagesize'] = pagesize + metadata['encrypt_alg'] = encrypt_alg + metadata['pg_type'] = pg_type + metadata['metaflags'] = metaflags + metadata['free'] = free + metadata['last_pgno'] = last_pgno + metadata['nparts'] = nparts + metadata['key_count'] = key_count + metadata['record_count'] = record_count + metadata['flags'] = flags + metadata['uid'] = binascii.hexlify(uid) + + assert magic == BTREE_MAGIC, 'bdb magic does not match bdb btree magic' + assert pg_type == BTREE_META, 'Metadata page is not a btree metadata page' + assert version == DB_VERSION, 'Database too new' + + # btree metadata + btree_meta_page = page[72:512] + _, minkey, re_len, re_pad, root, _, crypto_magic, _, iv, chksum = struct.unpack('IIIII368sI12s16s20s', btree_meta_page) + metadata['minkey'] = minkey + metadata['re_len'] = re_len + metadata['re_pad'] = re_pad + metadata['root'] = root + metadata['crypto_magic'] = crypto_magic + metadata['iv'] = binascii.hexlify(iv) + metadata['chksum'] = binascii.hexlify(chksum) + return metadata + +# Given the dict from dump_leaf_page, get the key-value pairs and put them into a dict +def extract_kv_pairs(page_data): + out = {} + last_key = None + for i, entry in enumerate(page_data['entries']): + # By virtue of these all being pairs, even number entries are keys, and odd are values + if i % 2 == 0: + out[entry['data']] = b'' + last_key = entry['data'] + else: + out[last_key] = entry['data'] + return out + +# Extract the key-value pairs of the BDB file given in filename +def dump_bdb_kv(filename): + # Read in the BDB file and start deserializing it + pages = [] + with open(filename, 'rb') as f: + data = f.read(PAGESIZE) + while len(data) > 0: + pages.append(data) + data = f.read(PAGESIZE) + + # Sanity check the meta pages + dump_meta_page(pages[OUTER_META_PAGE]) + dump_meta_page(pages[INNER_META_PAGE]) + + # Fetch the kv pairs from the leaf pages + kv = {} + for i in range(3, len(pages)): + info = dump_leaf_page(pages[i]) + if info is not None: + info_kv = extract_kv_pairs(info) + kv = {**kv, **info_kv} + return kv diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index 64bc7e0485..6b7214f03a 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -5,7 +5,6 @@ """Utilities for manipulating blocks and transactions.""" from binascii import a2b_hex -import io import struct import time import unittest @@ -45,7 +44,6 @@ from .script import ( hash160, ) from .util import assert_equal -from io import BytesIO WITNESS_SCALE_FACTOR = 4 MAX_BLOCK_SIGOPS = 20000 @@ -78,9 +76,7 @@ def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl if txlist: for tx in txlist: if not hasattr(tx, 'calc_sha256'): - txo = CTransaction() - txo.deserialize(io.BytesIO(tx)) - tx = txo + tx = FromHex(CTransaction(), tx) block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() @@ -166,8 +162,7 @@ def create_transaction(node, txid, to_address, *, amount): sign for the output that is being spent. """ raw_tx = create_raw_transaction(node, txid, to_address, amount=amount) - tx = CTransaction() - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx))) + tx = FromHex(CTransaction(), raw_tx) return tx def create_raw_transaction(node, txid, to_address, *, amount): diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py index abf2507154..f3d13c049b 100644 --- a/test/functional/test_framework/key.py +++ b/test/functional/test_framework/key.py @@ -10,7 +10,6 @@ import csv import hashlib import os import random -import sys import unittest from .util import modinv @@ -22,6 +21,7 @@ def TaggedHash(tag, data): return hashlib.sha256(ss).digest() def xor_bytes(b0, b1): + assert len(b0) == len(b1) return bytes(x ^ y for (x, y) in zip(b0, b1)) def jacobi_symbol(n, k): @@ -523,7 +523,8 @@ class TestFrameworkKey(unittest.TestCase): def test_schnorr_testvectors(self): """Implement the BIP340 test vectors (read from bip340_test_vectors.csv).""" num_tests = 0 - with open(os.path.join(sys.path[0], 'test_framework', 'bip340_test_vectors.csv'), newline='', encoding='utf8') as csvfile: + vectors_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'bip340_test_vectors.csv') + with open(vectors_file, newline='', encoding='utf8') as csvfile: reader = csv.reader(csvfile) next(reader) for row in reader: diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 3356f1ab10..62ff5c6e33 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -8,6 +8,7 @@ from base64 import b64encode from binascii import unhexlify from decimal import Decimal, ROUND_DOWN from subprocess import CalledProcessError +import hashlib import inspect import json import logging @@ -260,6 +261,14 @@ def wait_until_helper(predicate, *, attempts=float('inf'), timeout=float('inf'), raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout)) raise RuntimeError('Unreachable') +def sha256sum_file(filename): + h = hashlib.sha256() + with open(filename, 'rb') as f: + d = f.read(4096) + while len(d) > 0: + h.update(d) + d = f.read(4096) + return h.digest() # RPC/P2P connection constants and functions ############################################ diff --git a/test/functional/wallet_importdescriptors.py b/test/functional/wallet_importdescriptors.py index 949adeb703..2903a84998 100755 --- a/test/functional/wallet_importdescriptors.py +++ b/test/functional/wallet_importdescriptors.py @@ -15,6 +15,7 @@ variants. - `test_address()` is called to call getaddressinfo for an address on node1 and test the values returned.""" +from test_framework.address import key_to_p2pkh from test_framework.test_framework import BitcoinTestFramework from test_framework.descriptors import descsum_create from test_framework.util import ( @@ -107,6 +108,17 @@ class ImportDescriptorsTest(BitcoinTestFramework): error_code=-8, error_message="Internal addresses should not have a label") + self.log.info("Internal addresses should be detected as such") + key = get_generate_key() + addr = key_to_p2pkh(key.pubkey) + self.test_importdesc({"desc": descsum_create("pkh(" + key.pubkey + ")"), + "timestamp": "now", + "internal": True}, + success=True) + info = w1.getaddressinfo(addr) + assert_equal(info["ismine"], True) + assert_equal(info["ischange"], True) + # # Test importing of a P2SH-P2WPKH descriptor key = get_generate_key() self.log.info("Should not import a p2sh-p2wpkh descriptor without checksum") @@ -209,6 +221,15 @@ class ImportDescriptorsTest(BitcoinTestFramework): success=False, error_code=-4, error_message='Cannot import private keys to a wallet with private keys disabled') + + self.log.info("Should not import a descriptor with hardened derivations when private keys are disabled") + self.test_importdesc({"desc": descsum_create("wpkh(" + xpub + "/1h/*)"), + "timestamp": "now", + "range": 1}, + success=False, + error_code=-4, + error_message='Cannot expand descriptor. Probably because of hardened derivations without private keys provided') + for address in addresses: test_address(w1, address, diff --git a/test/functional/wallet_listsinceblock.py b/test/functional/wallet_listsinceblock.py index 07c14db6b1..6a1b9097c5 100755 --- a/test/functional/wallet_listsinceblock.py +++ b/test/functional/wallet_listsinceblock.py @@ -191,6 +191,7 @@ class ListSinceBlockTest(BitcoinTestFramework): address = key_to_p2wpkh(eckey.get_pubkey().get_bytes()) self.nodes[2].sendtoaddress(address, 10) self.nodes[2].generate(6) + self.sync_all() self.nodes[2].importprivkey(privkey) utxos = self.nodes[2].listunspent() utxo = [u for u in utxos if u["address"] == address][0] diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py index abdc279197..df16ec741f 100755 --- a/test/functional/wallet_multiwallet.py +++ b/test/functional/wallet_multiwallet.py @@ -10,6 +10,7 @@ from decimal import Decimal from threading import Thread import os import shutil +import stat import time from test_framework.authproxy import JSONRPCException @@ -78,6 +79,11 @@ class MultiWalletTest(BitcoinTestFramework): os.mkdir(wallet_dir('w7')) os.symlink('w7', wallet_dir('w7_symlink')) + os.symlink('..', wallet_dir('recursive_dir_symlink')) + + os.mkdir(wallet_dir('self_walletdat_symlink')) + os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat')) + # rename wallet.dat to make sure plain wallet file paths (as opposed to # directory paths) can be loaded # create another dummy wallet for use in testing backups later @@ -117,7 +123,16 @@ class MultiWalletTest(BitcoinTestFramework): self.nodes[0].createwallet(wallet_name) for wallet_name in to_load: self.nodes[0].loadwallet(wallet_name) - assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir)) + + os.mkdir(wallet_dir('no_access')) + os.chmod(wallet_dir('no_access'), 0) + try: + with self.nodes[0].assert_debug_log(expected_msgs=['Too many levels of symbolic links', 'Error scanning']): + walletlist = self.nodes[0].listwalletdir()['wallets'] + finally: + # Need to ensure access is restored for cleanup + os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) + assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir)) assert_equal(set(node.listwallets()), set(wallet_names)) @@ -156,6 +171,9 @@ class MultiWalletTest(BitcoinTestFramework): open(not_a_dir, 'a', encoding="utf8").close() self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory') + self.log.info("Do not allow -upgradewallet with multiwallet") + self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet") + # if wallets/ doesn't exist, datadir should be the default wallet dir wallet_dir2 = data_dir('walletdir') os.rename(wallet_dir(), wallet_dir2) diff --git a/test/functional/wallet_upgradewallet.py b/test/functional/wallet_upgradewallet.py index 15d9b109c5..8ab4b3f76c 100755 --- a/test/functional/wallet_upgradewallet.py +++ b/test/functional/wallet_upgradewallet.py @@ -13,23 +13,47 @@ Only v0.15.2 and v0.16.3 are required by this test. The others are used in featu import os import shutil +import struct +from io import BytesIO + +from test_framework.bdb import dump_bdb_kv +from test_framework.messages import deser_compact_size, deser_string from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_is_hex_string, + assert_raises_rpc_error, + sha256sum_file, ) +UPGRADED_KEYMETA_VERSION = 12 + +def deser_keymeta(f): + ver, create_time = struct.unpack('<Iq', f.read(12)) + kp_str = deser_string(f) + seed_id = f.read(20) + fpr = f.read(4) + path_len = 0 + path = [] + has_key_orig = False + if ver == UPGRADED_KEYMETA_VERSION: + path_len = deser_compact_size(f) + for i in range(0, path_len): + path.append(struct.unpack('<I', f.read(4))[0]) + has_key_orig = bool(f.read(1)) + return ver, create_time, kp_str, seed_id, fpr, path_len, path, has_key_orig + class UpgradeWalletTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [ - ["-addresstype=bech32"], # current wallet version - ["-usehd=1"], # v0.16.3 wallet - ["-usehd=0"] # v0.15.2 wallet + ["-addresstype=bech32", "-keypool=2"], # current wallet version + ["-usehd=1", "-keypool=2"], # v0.16.3 wallet + ["-usehd=0", "-keypool=2"] # v0.15.2 wallet ] self.wallet_names = [self.default_wallet_name, None, None] @@ -87,22 +111,53 @@ class UpgradeWalletTest(BitcoinTestFramework): self.log.info("Test upgradewallet RPC...") # Prepare for copying of the older wallet - node_master_wallet_dir = os.path.join(node_master.datadir, "regtest/wallets") + node_master_wallet_dir = os.path.join(node_master.datadir, "regtest/wallets", self.default_wallet_name) + node_master_wallet = os.path.join(node_master_wallet_dir, self.default_wallet_name, self.wallet_data_filename) v16_3_wallet = os.path.join(v16_3_node.datadir, "regtest/wallets/wallet.dat") v15_2_wallet = os.path.join(v15_2_node.datadir, "regtest/wallet.dat") + split_hd_wallet = os.path.join(v15_2_node.datadir, "regtest/splithd") self.stop_nodes() - # Copy the 0.16.3 wallet to the last Bitcoin Core version and open it: - shutil.rmtree(node_master_wallet_dir) - os.mkdir(node_master_wallet_dir) - shutil.copy( - v16_3_wallet, - node_master_wallet_dir - ) - self.restart_node(0, ['-nowallet']) - node_master.loadwallet('') + # Make split hd wallet + self.start_node(2, ['-usehd=1', '-keypool=2', '-wallet=splithd']) + self.stop_node(2) + + def copy_v16(): + node_master.get_wallet_rpc(self.default_wallet_name).unloadwallet() + # Copy the 0.16.3 wallet to the last Bitcoin Core version and open it: + shutil.rmtree(node_master_wallet_dir) + os.mkdir(node_master_wallet_dir) + shutil.copy( + v16_3_wallet, + node_master_wallet_dir + ) + node_master.loadwallet(self.default_wallet_name) + + def copy_non_hd(): + node_master.get_wallet_rpc(self.default_wallet_name).unloadwallet() + # Copy the 0.15.2 non hd wallet to the last Bitcoin Core version and open it: + shutil.rmtree(node_master_wallet_dir) + os.mkdir(node_master_wallet_dir) + shutil.copy( + v15_2_wallet, + node_master_wallet_dir + ) + node_master.loadwallet(self.default_wallet_name) - wallet = node_master.get_wallet_rpc('') + def copy_split_hd(): + node_master.get_wallet_rpc(self.default_wallet_name).unloadwallet() + # Copy the 0.15.2 split hd wallet to the last Bitcoin Core version and open it: + shutil.rmtree(node_master_wallet_dir) + os.mkdir(node_master_wallet_dir) + shutil.copy( + split_hd_wallet, + os.path.join(node_master_wallet_dir, 'wallet.dat') + ) + node_master.loadwallet(self.default_wallet_name) + + self.restart_node(0) + copy_v16() + wallet = node_master.get_wallet_rpc(self.default_wallet_name) old_version = wallet.getwalletinfo()["walletversion"] # calling upgradewallet without version arguments @@ -114,18 +169,8 @@ class UpgradeWalletTest(BitcoinTestFramework): # wallet should still contain the same balance assert_equal(wallet.getbalance(), v16_3_balance) - self.stop_node(0) - # Copy the 0.15.2 wallet to the last Bitcoin Core version and open it: - shutil.rmtree(node_master_wallet_dir) - os.mkdir(node_master_wallet_dir) - shutil.copy( - v15_2_wallet, - node_master_wallet_dir - ) - self.restart_node(0, ['-nowallet']) - node_master.loadwallet('') - - wallet = node_master.get_wallet_rpc('') + copy_non_hd() + wallet = node_master.get_wallet_rpc(self.default_wallet_name) # should have no master key hash before conversion assert_equal('hdseedid' in wallet.getwalletinfo(), False) # calling upgradewallet with explicit version number @@ -137,5 +182,165 @@ class UpgradeWalletTest(BitcoinTestFramework): # after conversion master key hash should be present assert_is_hex_string(wallet.getwalletinfo()['hdseedid']) + self.log.info('Intermediary versions don\'t effect anything') + copy_non_hd() + # Wallet starts with 60000 + assert_equal(60000, wallet.getwalletinfo()['walletversion']) + wallet.unloadwallet() + before_checksum = sha256sum_file(node_master_wallet) + node_master.loadwallet('') + # Can "upgrade" to 129999 which should have no effect on the wallet + wallet.upgradewallet(129999) + assert_equal(60000, wallet.getwalletinfo()['walletversion']) + wallet.unloadwallet() + assert_equal(before_checksum, sha256sum_file(node_master_wallet)) + node_master.loadwallet('') + + self.log.info('Wallets cannot be downgraded') + copy_non_hd() + assert_raises_rpc_error(-4, 'Cannot downgrade wallet', wallet.upgradewallet, 40000) + wallet.unloadwallet() + assert_equal(before_checksum, sha256sum_file(node_master_wallet)) + node_master.loadwallet('') + + self.log.info('Can upgrade to HD') + # Inspect the old wallet and make sure there is no hdchain + orig_kvs = dump_bdb_kv(node_master_wallet) + assert b'\x07hdchain' not in orig_kvs + # Upgrade to HD, no split + wallet.upgradewallet(130000) + assert_equal(130000, wallet.getwalletinfo()['walletversion']) + # Check that there is now a hd chain and it is version 1, no internal chain counter + new_kvs = dump_bdb_kv(node_master_wallet) + assert b'\x07hdchain' in new_kvs + hd_chain = new_kvs[b'\x07hdchain'] + assert_equal(28, len(hd_chain)) + hd_chain_version, external_counter, seed_id = struct.unpack('<iI20s', hd_chain) + assert_equal(1, hd_chain_version) + seed_id = bytearray(seed_id) + seed_id.reverse() + old_kvs = new_kvs + # First 2 keys should still be non-HD + for i in range(0, 2): + info = wallet.getaddressinfo(wallet.getnewaddress()) + assert 'hdkeypath' not in info + assert 'hdseedid' not in info + # Next key should be HD + info = wallet.getaddressinfo(wallet.getnewaddress()) + assert_equal(seed_id.hex(), info['hdseedid']) + assert_equal('m/0\'/0\'/0\'', info['hdkeypath']) + prev_seed_id = info['hdseedid'] + # Change key should be the same keypool + info = wallet.getaddressinfo(wallet.getrawchangeaddress()) + assert_equal(prev_seed_id, info['hdseedid']) + assert_equal('m/0\'/0\'/1\'', info['hdkeypath']) + + self.log.info('Cannot upgrade to HD Split, needs Pre Split Keypool') + assert_raises_rpc_error(-4, 'Cannot upgrade a non HD split wallet without upgrading to support pre split keypool', wallet.upgradewallet, 139900) + assert_equal(130000, wallet.getwalletinfo()['walletversion']) + assert_raises_rpc_error(-4, 'Cannot upgrade a non HD split wallet without upgrading to support pre split keypool', wallet.upgradewallet, 159900) + assert_equal(130000, wallet.getwalletinfo()['walletversion']) + assert_raises_rpc_error(-4, 'Cannot upgrade a non HD split wallet without upgrading to support pre split keypool', wallet.upgradewallet, 169899) + assert_equal(130000, wallet.getwalletinfo()['walletversion']) + + self.log.info('Upgrade HD to HD chain split') + wallet.upgradewallet(169900) + assert_equal(169900, wallet.getwalletinfo()['walletversion']) + # Check that the hdchain updated correctly + new_kvs = dump_bdb_kv(node_master_wallet) + hd_chain = new_kvs[b'\x07hdchain'] + assert_equal(32, len(hd_chain)) + hd_chain_version, external_counter, seed_id, internal_counter = struct.unpack('<iI20sI', hd_chain) + assert_equal(2, hd_chain_version) + assert_equal(0, internal_counter) + seed_id = bytearray(seed_id) + seed_id.reverse() + assert_equal(seed_id.hex(), prev_seed_id) + # Next change address is the same keypool + info = wallet.getaddressinfo(wallet.getrawchangeaddress()) + assert_equal(prev_seed_id, info['hdseedid']) + assert_equal('m/0\'/0\'/2\'', info['hdkeypath']) + # Next change address is the new keypool + info = wallet.getaddressinfo(wallet.getrawchangeaddress()) + assert_equal(prev_seed_id, info['hdseedid']) + assert_equal('m/0\'/1\'/0\'', info['hdkeypath']) + # External addresses use the same keypool + info = wallet.getaddressinfo(wallet.getnewaddress()) + assert_equal(prev_seed_id, info['hdseedid']) + assert_equal('m/0\'/0\'/3\'', info['hdkeypath']) + + self.log.info('Upgrade non-HD to HD chain split') + copy_non_hd() + wallet.upgradewallet(169900) + assert_equal(169900, wallet.getwalletinfo()['walletversion']) + # Check that the hdchain updated correctly + new_kvs = dump_bdb_kv(node_master_wallet) + hd_chain = new_kvs[b'\x07hdchain'] + assert_equal(32, len(hd_chain)) + hd_chain_version, external_counter, seed_id, internal_counter = struct.unpack('<iI20sI', hd_chain) + assert_equal(2, hd_chain_version) + assert_equal(2, internal_counter) + # Drain the keypool by fetching one external key and one change key. Should still be the same keypool + info = wallet.getaddressinfo(wallet.getnewaddress()) + assert 'hdseedid' not in info + assert 'hdkeypath' not in info + info = wallet.getaddressinfo(wallet.getrawchangeaddress()) + assert 'hdseedid' not in info + assert 'hdkeypath' not in info + # The next addresses are HD and should be on different HD chains + info = wallet.getaddressinfo(wallet.getnewaddress()) + ext_id = info['hdseedid'] + assert_equal('m/0\'/0\'/0\'', info['hdkeypath']) + info = wallet.getaddressinfo(wallet.getrawchangeaddress()) + assert_equal(ext_id, info['hdseedid']) + assert_equal('m/0\'/1\'/0\'', info['hdkeypath']) + + self.log.info('KeyMetadata should upgrade when loading into master') + copy_v16() + old_kvs = dump_bdb_kv(v16_3_wallet) + new_kvs = dump_bdb_kv(node_master_wallet) + for k, old_v in old_kvs.items(): + if k.startswith(b'\x07keymeta'): + new_ver, new_create_time, new_kp_str, new_seed_id, new_fpr, new_path_len, new_path, new_has_key_orig = deser_keymeta(BytesIO(new_kvs[k])) + old_ver, old_create_time, old_kp_str, old_seed_id, old_fpr, old_path_len, old_path, old_has_key_orig = deser_keymeta(BytesIO(old_v)) + assert_equal(10, old_ver) + if old_kp_str == b"": # imported things that don't have keymeta (i.e. imported coinbase privkeys) won't be upgraded + assert_equal(new_kvs[k], old_v) + continue + assert_equal(12, new_ver) + assert_equal(new_create_time, old_create_time) + assert_equal(new_kp_str, old_kp_str) + assert_equal(new_seed_id, old_seed_id) + assert_equal(0, old_path_len) + assert_equal(new_path_len, len(new_path)) + assert_equal([], old_path) + assert_equal(False, old_has_key_orig) + assert_equal(True, new_has_key_orig) + + # Check that the path is right + built_path = [] + for s in new_kp_str.decode().split('/')[1:]: + h = 0 + if s[-1] == '\'': + s = s[:-1] + h = 0x80000000 + p = int(s) | h + built_path.append(p) + assert_equal(new_path, built_path) + + self.log.info('Upgrading to NO_DEFAULT_KEY should not remove the defaultkey') + copy_split_hd() + # Check the wallet has a default key initially + old_kvs = dump_bdb_kv(node_master_wallet) + defaultkey = old_kvs[b'\x0adefaultkey'] + # Upgrade the wallet. Should still have the same default key + wallet.upgradewallet(159900) + new_kvs = dump_bdb_kv(node_master_wallet) + up_defaultkey = new_kvs[b'\x0adefaultkey'] + assert_equal(defaultkey, up_defaultkey) + # 0.16.3 doesn't have a default key + v16_3_kvs = dump_bdb_kv(v16_3_wallet) + assert b'\x0adefaultkey' not in v16_3_kvs + if __name__ == '__main__': UpgradeWalletTest().main() diff --git a/test/sanitizer_suppressions/tsan b/test/sanitizer_suppressions/tsan index 625085c55b..48f81f3dbf 100644 --- a/test/sanitizer_suppressions/tsan +++ b/test/sanitizer_suppressions/tsan @@ -32,7 +32,7 @@ deadlock:CConnman::ForNode deadlock:CConnman::GetNodeStats deadlock:CChainState::ConnectTip deadlock:UpdateTip -deadlock:wallet_tests::CreateWalletFromFile +deadlock:wallet_tests::CreateWallet # WalletBatch (unidentified deadlock) deadlock:WalletBatch @@ -47,3 +47,4 @@ deadlock:src/qt/test/* # External libraries deadlock:libdb race:libzmq +race:epoll_ctl # https://github.com/bitcoin/bitcoin/pull/20218 diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan index 75257d886b..e2f3fec851 100644 --- a/test/sanitizer_suppressions/ubsan +++ b/test/sanitizer_suppressions/ubsan @@ -1,8 +1,3 @@ -# -fsanitize=undefined suppressions -# ================================= -float-divide-by-zero:validation.cpp -float-divide-by-zero:wallet/wallet.cpp - # -fsanitize=integer suppressions # =============================== # Unsigned integer overflow occurs when the result of an unsigned integer |