aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--contrib/guix/README.md229
-rwxr-xr-xcontrib/guix/guix-build.sh39
-rw-r--r--contrib/guix/libexec/build.sh206
-rw-r--r--contrib/guix/manifest.scm158
-rw-r--r--depends/packages/qt.mk3
-rw-r--r--doc/release-notes.md14
-rw-r--r--src/chainparams.cpp8
-rw-r--r--src/chainparams.h8
-rw-r--r--src/init.cpp14
-rw-r--r--src/interfaces/wallet.cpp7
-rw-r--r--src/netbase.cpp222
-rw-r--r--src/protocol.h5
-rw-r--r--src/qt/guiutil.cpp9
-rw-r--r--src/rpc/blockchain.cpp4
-rw-r--r--src/torcontrol.cpp4
-rw-r--r--src/txdb.cpp2
-rw-r--r--src/validation.cpp207
-rw-r--r--src/validation.h150
-rw-r--r--src/wallet/feebumper.cpp9
-rw-r--r--src/wallet/rpcwallet.cpp10
-rw-r--r--src/wallet/test/wallet_tests.cpp7
-rw-r--r--src/wallet/wallet.cpp19
-rw-r--r--src/wallet/wallet.h8
-rw-r--r--test/README.md23
-rwxr-xr-xtest/functional/feature_bip68_sequence.py5
-rwxr-xr-xtest/functional/feature_block.py2
-rwxr-xr-xtest/functional/feature_cltv.py6
-rwxr-xr-xtest/functional/feature_config_args.py5
-rwxr-xr-xtest/functional/feature_maxuploadtarget.py2
-rwxr-xr-xtest/functional/feature_rbf.py2
-rwxr-xr-xtest/functional/feature_segwit.py3
-rwxr-xr-xtest/functional/mempool_accept.py1
-rwxr-xr-xtest/functional/mempool_limit.py6
-rwxr-xr-xtest/functional/mining_prioritisetransaction.py5
-rwxr-xr-xtest/functional/p2p_compactblocks.py3
-rwxr-xr-xtest/functional/p2p_invalid_tx.py3
-rwxr-xr-xtest/functional/p2p_segwit.py6
-rwxr-xr-xtest/functional/rpc_users.py182
-rw-r--r--test/functional/test_framework/util.py23
-rwxr-xr-xtest/functional/test_runner.py23
-rwxr-xr-xtest/functional/wallet_basic.py3
41 files changed, 1251 insertions, 394 deletions
diff --git a/contrib/guix/README.md b/contrib/guix/README.md
new file mode 100644
index 0000000000..4dfa1729a5
--- /dev/null
+++ b/contrib/guix/README.md
@@ -0,0 +1,229 @@
+# Bootstrappable Bitcoin Core Builds
+
+This directory contains the files necessary to perform bootstrappable Bitcoin
+Core builds.
+
+[Bootstrappability][b17e] furthers our binary security guarantees by allowing us
+to _audit and reproduce_ our toolchain instead of blindly _trusting_ binary
+downloads.
+
+We achieve bootstrappability by using Guix as a functional package manager.
+
+## Requirements
+
+Conservatively, a x86_64 machine with:
+
+- 2 or more logical cores
+- 4GB of free disk space on the partition that /gnu/store will reside in
+- 24GB of free disk space on the partition that the Bitcoin Core git repository
+ resides in
+
+> Note: these requirements are slightly less onerous than those of Gitian builds
+
+## Setup
+
+### Installing Guix
+
+If you're just testing this out, you can use the
+[Dockerfile][fanquake/guix-docker] for convenience. It automatically speeds up
+your builds by [using substitutes](#speeding-up-builds-with-substitute-servers).
+If you don't want this behaviour, refer to the [next
+section](#choosing-your-security-model).
+
+Otherwise, follow the [Guix installation guide][guix/bin-install].
+
+> Note: For those who like to keep their filesystems clean, Guix is designed to
+> be very standalone and _will not_ conflict with your system's package
+> manager/existing setup. It _only_ touches `/var/guix`, `/gnu`, and
+> `~/.config/guix`.
+
+### Choosing your security model
+
+Guix allows us to achieve better binary security by using our CPU time to build
+everything from scratch. However, it doesn't sacrifice user choice in pursuit of
+this: users can decide whether or not to bootstrap and to use substitutes.
+
+After installation, you may want to consider [adding substitute
+servers](#speeding-up-builds-with-substitute-servers) to speed up your build if
+that fits your security model (say, if you're just testing that this works).
+This is skippable if you're using the [Dockerfile][fanquake/guix-docker].
+
+If you prefer not to use any substitutes, make sure to set
+`ADDITIONAL_GUIX_ENVIRONMENT_FLAGS` like the following snippet. The first build
+will take a while, but the resulting packages will be cached for future builds.
+
+```sh
+export ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--no-substitutes'
+```
+
+Likewise, to perform a bootstrapped build (takes even longer):
+
+```sh
+export ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--bootstrap --no-substitutes'
+```
+
+### Using the right Guix
+
+Once Guix is installed, deploy our patched version into your current Guix
+profile. The changes there are slowly being upstreamed.
+
+```sh
+guix pull --url=https://github.com/dongcarl/guix.git \
+ --commit=82c77e52b8b46e0a3aad2cb12307c2e30547deec \
+ --max-jobs=4 # change accordingly
+```
+
+Make sure that you are using your current profile. (You are prompted to do this
+at the end of the `guix pull`)
+
+```bash
+export PATH="${HOME}/.config/guix/current/bin${PATH:+:}$PATH"
+```
+
+> Note: There is ongoing work to eliminate this entire section using Guix
+> [inferiors][guix/inferiors] and [channels][guix/channels].
+
+## Usage
+
+### As a Development Environment
+
+For a Bitcoin Core depends development environment, simply invoke
+
+```sh
+guix environment --manifest=contrib/guix/manifest.scm
+```
+
+And you'll land back in your shell with all the build dependencies required for
+a `depends` build injected into your environment.
+
+### As a Tool for Deterministic Builds
+
+From the top of a clean Bitcoin Core repository:
+
+```sh
+./contrib/guix/guix-build.sh
+```
+
+After the build finishes successfully (check the status code please), compare
+hashes:
+
+```sh
+find output/ -type f -print0 | sort -z | xargs -r0 sha256sum
+```
+
+#### Recognized environment variables
+
+* _**HOSTS**_
+
+ Override the space-separated list of platform triples for which to perform a
+ bootstrappable build. _(defaults to "i686-linux-gnu x86\_64-linux-gnu
+ arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu")_
+
+ > Windows and OS X platform triplet support are WIP.
+
+* _**SOURCES_PATH**_
+
+ Set the depends tree download cache for sources. This is passed through to the
+ depends tree. Setting this to the same directory across multiple builds of the
+ depends tree can eliminate unnecessary redownloading of package sources.
+
+* _**MAX_JOBS**_
+
+ Override the maximum number of jobs to run simultaneously, you might want to
+ do so on a memory-limited machine. This may be passed to `make` as in `make
+ --jobs="$MAX_JOBS"` or `xargs` as in `xargs -P"$MAX_JOBS"`. _(defaults to the
+ value of `nproc` outside the container)_
+
+* _**SOURCE_DATE_EPOCH**_
+
+ Override the reference UNIX timestamp used for bit-for-bit reproducibility,
+ the variable name conforms to [standard][r12e/source-date-epoch]. _(defaults
+ to the output of `$(git log --format=%at -1)`)_
+
+* _**V**_
+
+ If non-empty, will pass `V=1` to all `make` invocations, making `make` output
+ verbose.
+
+* _**ADDITIONAL_GUIX_ENVIRONMENT_FLAGS**_
+
+ Additional flags to be passed to `guix environment`. For a fully-bootstrapped
+ build, set this to `--bootstrap --no-substitutes` (refer to the [security
+ model section](#choosing-your-security-model) for more details). Note that a
+ fully-bootstrapped build will take quite a long time on the first run.
+
+## Tips and Tricks
+
+### Speeding up builds with substitute servers
+
+_This whole section is automatically done in the convenience
+[Dockerfiles][fanquake/guix-docker]_
+
+For those who are used to life in the fast _(and trustful)_ lane, you can use
+[substitute servers][guix/substitutes] to enable binary downloads of packages.
+
+> For those who only want to use substitutes from the official Guix build farm
+> and have authorized the build farm's signing key during Guix's installation,
+> you don't need to do anything.
+
+#### Authorize the signing keys
+
+For the official Guix build farm at https://ci.guix.gnu.org, run as root:
+
+```
+guix archive --authorize < ~root/.config/guix/current/share/guix/ci.guix.gnu.org.pub
+```
+
+For dongcarl's substitute server at https://guix.carldong.io, run as root:
+
+```sh
+wget -qO- 'https://guix.carldong.io/signing-key.pub' | guix archive --authorize
+```
+
+#### Use the substitute servers
+
+The official Guix build farm at https://ci.guix.gnu.org is automatically used
+unless the `--no-substitutes` flag is supplied.
+
+This can be overridden for all `guix` invocations by passing the
+`--substitute-urls` option to your invocation of `guix-daemon`. This can also be
+overridden on a call-by-call basis by passing the same `--substitute-urls`
+option to client tools such at `guix environment`.
+
+To use dongcarl's substitute server for Bitcoin Core builds after having
+[authorized his signing key](#authorize-the-signing-keys):
+
+```
+export ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--substitute-urls="https://guix.carldong.io https://ci.guix.gnu.org"'
+```
+
+## FAQ
+
+### How can I trust the binary installation?
+
+As mentioned at the bottom of [this manual page][guix/bin-install]:
+
+> The binary installation tarballs can be (re)produced and verified simply by
+> running the following command in the Guix source tree:
+>
+> make guix-binary.x86_64-linux.tar.xz
+
+### When will Guix be packaged in debian?
+
+Vagrant Cascadian has been making good progress on this
+[here][debian/guix-package]. We have all the pieces needed to put up an APT
+repository and will likely put one up soon.
+
+[b17e]: http://bootstrappable.org/
+[r12e/source-date-epoch]: https://reproducible-builds.org/docs/source-date-epoch/
+
+[guix/install.sh]: https://git.savannah.gnu.org/cgit/guix.git/plain/etc/guix-install.sh
+[guix/bin-install]: https://www.gnu.org/software/guix/manual/en/html_node/Binary-Installation.html
+[guix/env-setup]: https://www.gnu.org/software/guix/manual/en/html_node/Build-Environment-Setup.html
+[guix/substitutes]: https://www.gnu.org/software/guix/manual/en/html_node/Substitutes.html
+[guix/substitute-server-auth]: https://www.gnu.org/software/guix/manual/en/html_node/Substitute-Server-Authorization.html
+[guix/inferiors]: https://www.gnu.org/software/guix/manual/en/html_node/Inferiors.html
+[guix/channels]: https://www.gnu.org/software/guix/manual/en/html_node/Channels.html
+
+[debian/guix-package]: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=850644
+[fanquake/guix-docker]: https://github.com/fanquake/core-review/tree/master/guix
diff --git a/contrib/guix/guix-build.sh b/contrib/guix/guix-build.sh
new file mode 100755
index 0000000000..f8ba8c7ed2
--- /dev/null
+++ b/contrib/guix/guix-build.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+export LC_ALL=C
+set -e -o pipefail
+
+# Determine the maximum number of jobs to run simultaneously (overridable by
+# environment)
+MAX_JOBS="${MAX_JOBS:-$(nproc)}"
+
+# Download the depends sources now as we won't have internet access in the build
+# container
+make -C "${PWD}/depends" -j"$MAX_JOBS" download ${V:+V=1} ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"}
+
+# Determine the reference time used for determinism (overridable by environment)
+SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git log --format=%at -1)}"
+
+# Deterministically build Bitcoin Core for HOSTs (overriable by environment)
+for host in ${HOSTS=i686-linux-gnu x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu}; do
+
+ # Display proper warning when the user interrupts the build
+ trap 'echo "** INT received while building ${host}, you may want to clean up the relevant output and distsrc-* directories before rebuilding"' INT
+
+ # Run the build script 'contrib/guix/libexec/build.sh' in the build
+ # container specified by 'contrib/guix/manifest.scm'
+ # shellcheck disable=SC2086
+ guix environment --manifest="${PWD}/contrib/guix/manifest.scm" \
+ --container \
+ --pure \
+ --no-cwd \
+ --share="$PWD"=/bitcoin \
+ ${SOURCES_PATH:+--share="$SOURCES_PATH"} \
+ ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \
+ -- env HOST="$host" \
+ MAX_JOBS="$MAX_JOBS" \
+ SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \
+ ${V:+V=1} \
+ ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \
+ bash -c "cd /bitcoin && bash contrib/guix/libexec/build.sh"
+
+done
diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh
new file mode 100644
index 0000000000..56b972a5cb
--- /dev/null
+++ b/contrib/guix/libexec/build.sh
@@ -0,0 +1,206 @@
+#!/usr/bin/env bash
+export LC_ALL=C
+set -e -o pipefail
+
+# Check that environment variables assumed to be set by the environment are set
+echo "Building for platform triple ${HOST:?not set} with reference timestamp ${SOURCE_DATE_EPOCH:?not set}..."
+echo "At most ${MAX_JOBS:?not set} jobs will run at once..."
+
+#####################
+# Environment Setup #
+#####################
+
+# The depends folder also serves as a base-prefix for depends packages for
+# $HOSTs after successfully building.
+BASEPREFIX="${PWD}/depends"
+
+# Setup an output directory for our build
+OUTDIR="${OUTDIR:-${PWD}/output}"
+[ -e "$OUTDIR" ] || mkdir -p "$OUTDIR"
+
+# Setup the directory where our Bitcoin Core build for HOST will occur
+DISTSRC="${DISTSRC:-${PWD}/distsrc-${HOST}}"
+if [ -e "$DISTSRC" ]; then
+ echo "DISTSRC directory '${DISTSRC}' exists, probably because of previous builds... Aborting..."
+ exit 1
+else
+ mkdir -p "$DISTSRC"
+fi
+
+# Given a package name and an output name, return the path of that output in our
+# current guix environment
+store_path() {
+ grep --extended-regexp "/[^-]{32}-${1}-cross-${HOST}-[^-]+${2:+-${2}}" "${GUIX_ENVIRONMENT}/manifest" \
+ | head --lines=1 \
+ | sed --expression='s|^[[:space:]]*"||' \
+ --expression='s|"[[:space:]]*$||'
+}
+
+# Determine output paths to use in CROSS_* environment variables
+CROSS_GLIBC="$(store_path glibc)"
+CROSS_GLIBC_STATIC="$(store_path glibc static)"
+CROSS_KERNEL="$(store_path linux-libre-headers)"
+CROSS_GCC="$(store_path gcc)"
+
+# Set environment variables to point Guix's cross-toolchain to the right
+# includes/libs for $HOST
+export CROSS_C_INCLUDE_PATH="${CROSS_GCC}/include:${CROSS_GLIBC}/include:${CROSS_KERNEL}/include"
+export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GLIBC}/include:${CROSS_KERNEL}/include"
+export CROSS_LIBRARY_PATH="${CROSS_GLIBC}/lib:${CROSS_GLIBC_STATIC}/lib:${CROSS_GCC}/lib:${CROSS_GCC}/${HOST}/lib:${CROSS_KERNEL}/lib"
+
+# Disable Guix ld auto-rpath behavior
+export GUIX_LD_WRAPPER_DISABLE_RPATH=yes
+
+# Make /usr/bin if it doesn't exist
+[ -e /usr/bin ] || mkdir -p /usr/bin
+
+# Symlink file and env to a conventional path
+[ -e /usr/bin/file ] || ln -s --no-dereference "$(command -v file)" /usr/bin/file
+[ -e /usr/bin/env ] || ln -s --no-dereference "$(command -v env)" /usr/bin/env
+
+# Determine the correct value for -Wl,--dynamic-linker for the current $HOST
+glibc_dynamic_linker=$(
+ case "$HOST" in
+ i686-linux-gnu) echo /lib/ld-linux.so.2 ;;
+ x86_64-linux-gnu) echo /lib64/ld-linux-x86-64.so.2 ;;
+ arm-linux-gnueabihf) echo /lib/ld-linux-armhf.so.3 ;;
+ aarch64-linux-gnu) echo /lib/ld-linux-aarch64.so.1 ;;
+ riscv64-linux-gnu) echo /lib/ld-linux-riscv64-lp64d.so.1 ;;
+ *) exit 1 ;;
+ esac
+)
+
+# Environment variables for determinism
+export QT_RCC_TEST=1
+export QT_RCC_SOURCE_DATE_OVERRIDE=1
+export TAR_OPTIONS="--owner=0 --group=0 --numeric-owner --mtime='@${SOURCE_DATE_EPOCH}' --sort=name"
+export TZ="UTC"
+
+####################
+# Depends Building #
+####################
+
+# Build the depends tree, overriding variables that assume multilib gcc
+make -C depends --jobs="$MAX_JOBS" HOST="$HOST" \
+ ${V:+V=1} \
+ ${SOURCES_PATH+SOURCES_PATH="$SOURCES_PATH"} \
+ i686_linux_CC=i686-linux-gnu-gcc \
+ i686_linux_CXX=i686-linux-gnu-g++ \
+ i686_linux_AR=i686-linux-gnu-ar \
+ i686_linux_RANLIB=i686-linux-gnu-ranlib \
+ i686_linux_NM=i686-linux-gnu-nm \
+ i686_linux_STRIP=i686-linux-gnu-strip \
+ x86_64_linux_CC=x86_64-linux-gnu-gcc \
+ x86_64_linux_CXX=x86_64-linux-gnu-g++ \
+ x86_64_linux_AR=x86_64-linux-gnu-ar \
+ x86_64_linux_RANLIB=x86_64-linux-gnu-ranlib \
+ x86_64_linux_NM=x86_64-linux-gnu-nm \
+ x86_64_linux_STRIP=x86_64-linux-gnu-strip \
+ qt_config_opts_i686_linux='-platform linux-g++ -xplatform bitcoin-linux-g++'
+
+
+###########################
+# Source Tarball Building #
+###########################
+
+# Create the source tarball and move it to "${OUTDIR}/src" if not already there
+if [ -z "$(find "${OUTDIR}/src" -name 'bitcoin-*.tar.gz')" ]; then
+ ./autogen.sh
+ env CONFIG_SITE="${BASEPREFIX}/${HOST}/share/config.site" ./configure --prefix=/
+ make dist GZIP_ENV='-9n' ${V:+V=1}
+ mkdir -p "${OUTDIR}/src"
+ mv "$(find "${PWD}" -name 'bitcoin-*.tar.gz')" "${OUTDIR}/src/"
+fi
+
+# Determine the full path to our source tarball
+SOURCEDIST="$(find "${OUTDIR}/src" -name 'bitcoin-*.tar.gz')"
+# Determine our distribution name (e.g. bitcoin-0.18.0)
+DISTNAME="$(basename "$SOURCEDIST" '.tar.gz')"
+
+###########################
+# Binary Tarball Building #
+###########################
+
+# Create a spec file to normalize ssp linking behaviour
+spec_file="$(mktemp)"
+cat << EOF > "$spec_file"
+*link_ssp:
+%{fstack-protector|fstack-protector-all|fstack-protector-strong|fstack-protector-explicit:}
+EOF
+
+# Similar flags to Gitian
+CONFIGFLAGS="--enable-glibc-back-compat --enable-reduce-exports --disable-bench --disable-gui-tests"
+HOST_CFLAGS="-O2 -g -specs=${spec_file} -ffile-prefix-map=${PWD}=."
+HOST_CXXFLAGS="-O2 -g -specs=${spec_file} -ffile-prefix-map=${PWD}=."
+HOST_LDFLAGS="-Wl,--as-needed -Wl,--dynamic-linker=$glibc_dynamic_linker -static-libstdc++"
+
+# Make $HOST-specific native binaries from depends available in $PATH
+export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}"
+(
+ cd "$DISTSRC"
+
+ # Extract the source tarball
+ tar --strip-components=1 -xf "${SOURCEDIST}"
+
+ # Configure this DISTSRC for $HOST
+ # shellcheck disable=SC2086
+ env CONFIG_SITE="${BASEPREFIX}/${HOST}/share/config.site" \
+ ./configure --prefix=/ \
+ --disable-ccache \
+ --disable-maintainer-mode \
+ --disable-dependency-tracking \
+ ${CONFIGFLAGS} \
+ CFLAGS="${HOST_CFLAGS}" \
+ CXXFLAGS="${HOST_CXXFLAGS}" \
+ LDFLAGS="${HOST_LDFLAGS}"
+
+ sed -i.old 's/-lstdc++ //g' config.status libtool src/univalue/config.status src/univalue/libtool
+
+ # Build Bitcoin Core
+ make --jobs="$MAX_JOBS" ${V:+V=1}
+
+ # Perform basic ELF security checks on a series of executables.
+ make -C src --jobs=1 check-security ${V:+V=1}
+ # Check that executables only contain allowed gcc, glibc and libstdc++
+ # version symbols for Linux distro back-compatibility.
+ make -C src --jobs=1 check-symbols ${V:+V=1}
+
+ # Setup the directory where our Bitcoin Core build for HOST will be
+ # installed. This directory will also later serve as the input for our
+ # binary tarballs.
+ INSTALLPATH="${PWD}/installed/${DISTNAME}"
+ mkdir -p "${INSTALLPATH}"
+ # Install built Bitcoin Core to $INSTALLPATH
+ make install DESTDIR="${INSTALLPATH}" ${V:+V=1}
+ (
+ cd installed
+
+ # Prune libtool and object archives
+ find . -name "lib*.la" -delete
+ find . -name "lib*.a" -delete
+
+ # Prune pkg-config files
+ rm -r "${DISTNAME}/lib/pkgconfig"
+
+ # Split binaries and libraries from their debug symbols
+ {
+ find "${DISTNAME}/bin" -type f -executable -print0
+ find "${DISTNAME}/lib" -type f -print0
+ } | xargs -0 -n1 -P"$MAX_JOBS" -I{} "${DISTSRC}/contrib/devtools/split-debug.sh" {} {} {}.dbg
+
+ cp "${DISTSRC}/doc/README.md" "${DISTNAME}/"
+
+ # Finally, deterministically produce {non-,}debug binary tarballs ready
+ # for release
+ find "${DISTNAME}" -not -name "*.dbg" -print0 \
+ | sort --zero-terminated \
+ | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \
+ | gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}.tar.gz" \
+ || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}.tar.gz" && exit 1 )
+ find "${DISTNAME}" -name "*.dbg" -print0 \
+ | sort --zero-terminated \
+ | tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \
+ | gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}-debug.tar.gz" \
+ || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}-debug.tar.gz" && exit 1 )
+ )
+)
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
new file mode 100644
index 0000000000..ca11d7a0f0
--- /dev/null
+++ b/contrib/guix/manifest.scm
@@ -0,0 +1,158 @@
+(use-modules (gnu)
+ (gnu packages)
+ (gnu packages autotools)
+ (gnu packages base)
+ (gnu packages bash)
+ (gnu packages check)
+ (gnu packages commencement)
+ (gnu packages compression)
+ (gnu packages cross-base)
+ (gnu packages file)
+ (gnu packages gawk)
+ (gnu packages gcc)
+ (gnu packages linux)
+ (gnu packages perl)
+ (gnu packages pkg-config)
+ (gnu packages python)
+ (gnu packages shells)
+ (guix build-system trivial)
+ (guix gexp)
+ (guix packages)
+ (guix profiles)
+ (guix utils))
+
+(define (make-ssp-fixed-gcc xgcc)
+ "Given a XGCC package, return a modified package that uses the SSP function
+from glibc instead of from libssp.so. Taken from:
+http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html"
+ (package
+ (inherit xgcc)
+ (arguments
+ (substitute-keyword-arguments (package-arguments xgcc)
+ ((#:make-flags flags)
+ `(cons "gcc_cv_libc_provides_ssp=yes" ,flags))))))
+
+(define (make-gcc-rpath-link xgcc)
+ "Given a XGCC package, return a modified package that replace each instance of
+-rpath in the default system spec that's inserted by Guix with -rpath-link"
+ (package
+ (inherit xgcc)
+ (arguments
+ (substitute-keyword-arguments (package-arguments xgcc)
+ ((#:phases phases)
+ `(modify-phases ,phases
+ (add-after 'pre-configure 'replace-rpath-with-rpath-link
+ (lambda _
+ (substitute* (cons "gcc/config/rs6000/sysv4.h"
+ (find-files "gcc/config"
+ "^gnu-user.*\\.h$"))
+ (("-rpath=") "-rpath-link="))
+ #t))))))))
+
+(define (make-cross-toolchain target
+ base-gcc-for-libc
+ base-kernel-headers
+ base-libc
+ base-gcc)
+ "Create a cross-compilation toolchain package for TARGET"
+ (let* ((xbinutils (cross-binutils target))
+ ;; 1. Build a cross-compiling gcc without targeting any libc, derived
+ ;; from BASE-GCC-FOR-LIBC
+ (xgcc-sans-libc (cross-gcc target
+ #:xgcc base-gcc-for-libc
+ #:xbinutils xbinutils))
+ ;; 2. Build cross-compiled kernel headers with XGCC-SANS-LIBC, derived
+ ;; from BASE-KERNEL-HEADERS
+ (xkernel (cross-kernel-headers target
+ base-kernel-headers
+ xgcc-sans-libc
+ xbinutils))
+ ;; 3. Build a cross-compiled libc with XGCC-SANS-LIBC and XKERNEL,
+ ;; derived from BASE-LIBC
+ (xlibc (cross-libc target
+ base-libc
+ xgcc-sans-libc
+ xbinutils
+ xkernel))
+ ;; 4. Build a cross-compiling gcc targeting XLIBC, derived from
+ ;; BASE-GCC
+ (xgcc (cross-gcc target
+ #:xgcc base-gcc
+ #:xbinutils xbinutils
+ #:libc xlibc)))
+ ;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and
+ ;; XGCC
+ (package
+ (name (string-append target "-toolchain"))
+ (version (package-version xgcc))
+ (source #f)
+ (build-system trivial-build-system)
+ (arguments '(#:builder (begin (mkdir %output) #t)))
+ (propagated-inputs
+ `(("binutils" ,xbinutils)
+ ("libc" ,xlibc)
+ ("libc:static" ,xlibc "static")
+ ("gcc" ,xgcc)))
+ (synopsis (string-append "Complete GCC tool chain for " target))
+ (description (string-append "This package provides a complete GCC tool
+chain for " target " development."))
+ (home-page (package-home-page xgcc))
+ (license (package-license xgcc)))))
+
+(define* (make-bitcoin-cross-toolchain target
+ #:key
+ (base-gcc-for-libc gcc-5)
+ (base-kernel-headers linux-libre-headers-4.19)
+ (base-libc glibc-2.27)
+ (base-gcc (make-gcc-rpath-link
+ (make-ssp-fixed-gcc gcc-9))))
+ "Convienience wrapper around MAKE-CROSS-TOOLCHAIN with default values
+desirable for building Bitcoin Core release binaries."
+ (make-cross-toolchain target
+ base-gcc-for-libc
+ base-kernel-headers
+ base-libc
+ base-gcc))
+
+(packages->manifest
+ (list ;; The Basics
+ bash-minimal
+ which
+ coreutils
+ util-linux
+ ;; File(system) inspection
+ file
+ grep
+ diffutils
+ findutils
+ ;; File transformation
+ patch
+ gawk
+ sed
+ ;; Compression and archiving
+ tar
+ bzip2
+ gzip
+ xz
+ zlib
+ ;; Build tools
+ gnu-make
+ libtool
+ autoconf
+ automake
+ pkg-config
+ ;; Scripting
+ perl
+ python-3.7
+ ;; Native gcc 9 toolchain targeting glibc 2.27
+ (make-gcc-toolchain gcc-9 glibc-2.27)
+ ;; Cross gcc 9 toolchains targeting glibc 2.27
+ (make-bitcoin-cross-toolchain "i686-linux-gnu")
+ (make-bitcoin-cross-toolchain "x86_64-linux-gnu")
+ (make-bitcoin-cross-toolchain "aarch64-linux-gnu")
+ (make-bitcoin-cross-toolchain "arm-linux-gnueabihf")
+ ;; The glibc 2.27 for riscv64 needs gcc 7 to successfully build (see:
+ ;; https://www.gnu.org/software/gcc/gcc-7/changes.html#riscv). The final
+ ;; toolchain is still a gcc 9 toolchain targeting glibc 2.27.
+ (make-bitcoin-cross-toolchain "riscv64-linux-gnu"
+ #:base-gcc-for-libc gcc-7)))
diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk
index 23cde9ee6d..7c7081eaba 100644
--- a/depends/packages/qt.mk
+++ b/depends/packages/qt.mk
@@ -160,7 +160,8 @@ define $(package)_preprocess_cmds
echo "QMAKE_LINK_OBJECT_SCRIPT = object_script" >> qtbase/mkspecs/win32-g++/qmake.conf &&\
sed -i.old "s|QMAKE_CFLAGS = |!host_build: QMAKE_CFLAGS = $($(package)_cflags) $($(package)_cppflags) |" qtbase/mkspecs/win32-g++/qmake.conf && \
sed -i.old "s|QMAKE_LFLAGS = |!host_build: QMAKE_LFLAGS = $($(package)_ldflags) |" qtbase/mkspecs/win32-g++/qmake.conf && \
- sed -i.old "s|QMAKE_CXXFLAGS = |!host_build: QMAKE_CXXFLAGS = $($(package)_cxxflags) $($(package)_cppflags) |" qtbase/mkspecs/win32-g++/qmake.conf
+ sed -i.old "s|QMAKE_CXXFLAGS = |!host_build: QMAKE_CXXFLAGS = $($(package)_cxxflags) $($(package)_cppflags) |" qtbase/mkspecs/win32-g++/qmake.conf && \
+ sed -i.old "s/LIBRARY_PATH/(CROSS_)?\0/g" qtbase/mkspecs/features/toolchain.prf
endef
define $(package)_config_cmds
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 9efb6cbabb..83c84d34c9 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -103,6 +103,20 @@ Low-level Changes section below.
Low-level changes
=================
+RPC
+---
+
+
+Tests
+-----
+
+- The regression test chain, that can be enabled by the `-regtest` command line
+ flag, now requires transactions to not violate standard policy by default.
+ Making the default the same as for mainnet, makes it easier to test mainnet
+ behavior on regtest. Be reminded that the testnet still allows non-standard
+ txs by default and that the policy can be locally adjusted with the
+ `-acceptnonstdtxn` command line flag for both test chains.
+
Configuration
------------
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index b8e0ea23dd..f937e2754b 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -141,7 +141,7 @@ public:
fDefaultConsistencyChecks = false;
fRequireStandard = true;
- fMineBlocksOnDemand = false;
+ m_is_test_chain = false;
checkpointData = {
{
@@ -247,7 +247,7 @@ public:
fDefaultConsistencyChecks = false;
fRequireStandard = false;
- fMineBlocksOnDemand = false;
+ m_is_test_chain = true;
checkpointData = {
@@ -324,8 +324,8 @@ public:
vSeeds.clear(); //!< Regtest mode doesn't have any DNS seeds.
fDefaultConsistencyChecks = true;
- fRequireStandard = false;
- fMineBlocksOnDemand = true;
+ fRequireStandard = true;
+ m_is_test_chain = true;
checkpointData = {
{
diff --git a/src/chainparams.h b/src/chainparams.h
index 6ff3dbb7e5..b3fcd77cea 100644
--- a/src/chainparams.h
+++ b/src/chainparams.h
@@ -66,13 +66,15 @@ public:
bool DefaultConsistencyChecks() const { return fDefaultConsistencyChecks; }
/** Policy: Filter transactions that do not match well-defined patterns */
bool RequireStandard() const { return fRequireStandard; }
+ /** If this is a test chain */
+ bool IsTestChain() const { return m_is_test_chain; }
uint64_t PruneAfterHeight() const { return nPruneAfterHeight; }
/** Minimum free space (in GB) needed for data directory */
uint64_t AssumedBlockchainSize() const { return m_assumed_blockchain_size; }
/** Minimum free space (in GB) needed for data directory when pruned; Does not include prune target*/
uint64_t AssumedChainStateSize() const { return m_assumed_chain_state_size; }
- /** Make miner stop after a block is found. In RPC, don't return until nGenProcLimit blocks are generated */
- bool MineBlocksOnDemand() const { return fMineBlocksOnDemand; }
+ /** Whether it is possible to mine blocks on demand (no retargeting) */
+ bool MineBlocksOnDemand() const { return consensus.fPowNoRetargeting; }
/** Return the BIP70 network string (main, test or regtest) */
std::string NetworkIDString() const { return strNetworkID; }
/** Return true if the fallback fee is by default enabled for this network */
@@ -101,7 +103,7 @@ protected:
std::vector<SeedSpec6> vFixedSeeds;
bool fDefaultConsistencyChecks;
bool fRequireStandard;
- bool fMineBlocksOnDemand;
+ bool m_is_test_chain;
CCheckpointData checkpointData;
ChainTxData chainTxData;
bool m_fallback_fee_enabled;
diff --git a/src/init.cpp b/src/init.cpp
index 5d7c3b9af7..26b2f9d6c2 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -492,7 +492,7 @@ void SetupServerArgs()
"and level 4 tries to reconnect the blocks, "
"each level includes the checks of the previous levels "
"(0-4, default: %u)", DEFAULT_CHECKLEVEL), true, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-checkblockindex", strprintf("Do a full consistency check for mapBlockIndex, setBlockIndexCandidates, ::ChainActive() and mapBlocksUnlinked occasionally. (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), true, OptionsCategory::DEBUG_TEST);
+ gArgs.AddArg("-checkblockindex", strprintf("Do a full consistency check for the block tree, setBlockIndexCandidates, ::ChainActive() and mapBlocksUnlinked occasionally. (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), true, OptionsCategory::DEBUG_TEST);
gArgs.AddArg("-checkmempool=<n>", strprintf("Run checks every <n> transactions (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), true, OptionsCategory::DEBUG_TEST);
gArgs.AddArg("-checkpoints", strprintf("Disable expensive verification for known chain history (default: %u)", DEFAULT_CHECKPOINTS_ENABLED), true, OptionsCategory::DEBUG_TEST);
gArgs.AddArg("-deprecatedrpc=<method>", "Allows deprecated RPC method(s) to be used", true, OptionsCategory::DEBUG_TEST);
@@ -1150,8 +1150,9 @@ bool AppInitParameterInteraction()
}
fRequireStandard = !gArgs.GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard());
- if (chainparams.RequireStandard() && !fRequireStandard)
+ if (!chainparams.IsTestChain() && !fRequireStandard) {
return InitError(strprintf("acceptnonstdtxn is not currently supported for %s chain", chainparams.NetworkIDString()));
+ }
nBytesPerSigOp = gArgs.GetArg("-bytespersigop", nBytesPerSigOp);
if (!g_wallet_init_interface.ParameterInteraction()) return false;
@@ -1517,7 +1518,8 @@ bool AppInitMain(InitInterfaces& interfaces)
// If the loaded chain has a wrong genesis, bail out immediately
// (we're likely using a testnet datadir, or the other way around).
- if (!mapBlockIndex.empty() && !LookupBlockIndex(chainparams.GetConsensus().hashGenesisBlock)) {
+ if (!::BlockIndex().empty() &&
+ !LookupBlockIndex(chainparams.GetConsensus().hashGenesisBlock)) {
return InitError(_("Incorrect or no genesis block found. Wrong datadir for network?"));
}
@@ -1538,7 +1540,7 @@ bool AppInitMain(InitInterfaces& interfaces)
}
// At this point we're either in reindex or we've loaded a useful
- // block tree into mapBlockIndex!
+ // block tree into BlockIndex()!
pcoinsdbview.reset(new CCoinsViewDB(nCoinDBCache, false, fReset || fReindexChainState));
pcoinscatcher.reset(new CCoinsViewErrorCatcher(pcoinsdbview.get()));
@@ -1577,7 +1579,7 @@ bool AppInitMain(InitInterfaces& interfaces)
if (!fReset) {
// Note that RewindBlockIndex MUST run even if we're about to -reindex-chainstate.
// It both disconnects blocks based on ::ChainActive(), and drops block data in
- // mapBlockIndex based on lack of available witness data.
+ // BlockIndex() based on lack of available witness data.
uiInterface.InitMessage(_("Rewinding blocks..."));
if (!RewindBlockIndex(chainparams)) {
strLoadError = _("Unable to rewind the database to a pre-fork state. You will need to redownload the blockchain");
@@ -1749,7 +1751,7 @@ bool AppInitMain(InitInterfaces& interfaces)
//// debug print
{
LOCK(cs_main);
- LogPrintf("mapBlockIndex.size() = %u\n", mapBlockIndex.size());
+ LogPrintf("block tree size = %u\n", ::BlockIndex().size());
chain_active_height = ::ChainActive().Height();
}
LogPrintf("nBestHeight = %d\n", chain_active_height);
diff --git a/src/interfaces/wallet.cpp b/src/interfaces/wallet.cpp
index 8982c70fd2..deb1618ceb 100644
--- a/src/interfaces/wallet.cpp
+++ b/src/interfaces/wallet.cpp
@@ -36,7 +36,7 @@ namespace {
class PendingWalletTxImpl : public PendingWalletTx
{
public:
- explicit PendingWalletTxImpl(CWallet& wallet) : m_wallet(wallet), m_dest(&wallet) {}
+ explicit PendingWalletTxImpl(CWallet& wallet) : m_wallet(wallet) {}
const CTransaction& get() override { return *m_tx; }
@@ -47,7 +47,7 @@ public:
auto locked_chain = m_wallet.chain().lock();
LOCK(m_wallet.cs_wallet);
CValidationState state;
- if (!m_wallet.CommitTransaction(m_tx, std::move(value_map), std::move(order_form), m_dest, state)) {
+ if (!m_wallet.CommitTransaction(m_tx, std::move(value_map), std::move(order_form), state)) {
reject_reason = state.GetRejectReason();
return false;
}
@@ -56,7 +56,6 @@ public:
CTransactionRef m_tx;
CWallet& m_wallet;
- ReserveDestination m_dest;
};
//! Construct wallet tx struct.
@@ -238,7 +237,7 @@ public:
auto locked_chain = m_wallet->chain().lock();
LOCK(m_wallet->cs_wallet);
auto pending = MakeUnique<PendingWalletTxImpl>(*m_wallet);
- if (!m_wallet->CreateTransaction(*locked_chain, recipients, pending->m_tx, pending->m_dest, fee, change_pos,
+ if (!m_wallet->CreateTransaction(*locked_chain, recipients, pending->m_tx, fee, change_pos,
fail_reason, coin_control, sign)) {
return {};
}
diff --git a/src/netbase.cpp b/src/netbase.cpp
index 78b3b6ae3a..53e5985a0f 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -65,6 +65,12 @@ bool static LookupIntern(const char *pszName, std::vector<CNetAddr>& vIP, unsign
{
CNetAddr addr;
+ // From our perspective, onion addresses are not hostnames but rather
+ // direct encodings of CNetAddr much like IPv4 dotted-decimal notation
+ // or IPv6 colon-separated hextet notation. Since we can't use
+ // getaddrinfo to decode them and it wouldn't make sense to resolve
+ // them, we return a network address representing it instead. See
+ // CNetAddr::SetSpecial(const std::string&) for more details.
if (addr.SetSpecial(std::string(pszName))) {
vIP.push_back(addr);
return true;
@@ -74,15 +80,25 @@ bool static LookupIntern(const char *pszName, std::vector<CNetAddr>& vIP, unsign
struct addrinfo aiHint;
memset(&aiHint, 0, sizeof(struct addrinfo));
+ // We want a TCP port, which is a streaming socket type
aiHint.ai_socktype = SOCK_STREAM;
aiHint.ai_protocol = IPPROTO_TCP;
+ // We don't care which address family (IPv4 or IPv6) is returned
aiHint.ai_family = AF_UNSPEC;
+ // If we allow lookups of hostnames, use the AI_ADDRCONFIG flag to only
+ // return addresses whose family we have an address configured for.
+ //
+ // If we don't allow lookups, then use the AI_NUMERICHOST flag for
+ // getaddrinfo to only decode numerical network addresses and suppress
+ // hostname lookups.
aiHint.ai_flags = fAllowLookup ? AI_ADDRCONFIG : AI_NUMERICHOST;
struct addrinfo *aiRes = nullptr;
int nErr = getaddrinfo(pszName, nullptr, &aiHint, &aiRes);
if (nErr)
return false;
+ // Traverse the linked list starting with aiTrav, add all non-internal
+ // IPv4,v6 addresses to vIP while respecting nMaxSolutions.
struct addrinfo *aiTrav = aiRes;
while (aiTrav != nullptr && (nMaxSolutions == 0 || vIP.size() < nMaxSolutions))
{
@@ -112,6 +128,21 @@ bool static LookupIntern(const char *pszName, std::vector<CNetAddr>& vIP, unsign
return (vIP.size() > 0);
}
+/**
+ * Resolve a host string to its corresponding network addresses.
+ *
+ * @param pszName The string representing a host. Could be a name or a numerical
+ * IP address (IPv6 addresses in their bracketed form are
+ * allowed).
+ * @param[out] vIP The resulting network addresses to which the specified host
+ * string resolved.
+ *
+ * @returns Whether or not the specified host string successfully resolved to
+ * any resulting network addresses.
+ *
+ * @see Lookup(const char *, std::vector<CService>&, int, bool, unsigned int)
+ * for additional parameter descriptions.
+ */
bool LookupHost(const char *pszName, std::vector<CNetAddr>& vIP, unsigned int nMaxSolutions, bool fAllowLookup)
{
std::string strHost(pszName);
@@ -124,6 +155,12 @@ bool LookupHost(const char *pszName, std::vector<CNetAddr>& vIP, unsigned int nM
return LookupIntern(strHost.c_str(), vIP, nMaxSolutions, fAllowLookup);
}
+ /**
+ * Resolve a host string to its first corresponding network address.
+ *
+ * @see LookupHost(const char *, std::vector<CNetAddr>&, unsigned int, bool) for
+ * additional parameter descriptions.
+ */
bool LookupHost(const char *pszName, CNetAddr& addr, bool fAllowLookup)
{
std::vector<CNetAddr> vIP;
@@ -134,6 +171,26 @@ bool LookupHost(const char *pszName, CNetAddr& addr, bool fAllowLookup)
return true;
}
+/**
+ * Resolve a service string to its corresponding service.
+ *
+ * @param pszName The string representing a service. Could be a name or a
+ * numerical IP address (IPv6 addresses should be in their
+ * disambiguated bracketed form), optionally followed by a port
+ * number. (e.g. example.com:8333 or
+ * [2001:db8:85a3:8d3:1319:8a2e:370:7348]:420)
+ * @param[out] vAddr The resulting services to which the specified service string
+ * resolved.
+ * @param portDefault The default port for resulting services if not specified
+ * by the service string.
+ * @param fAllowLookup Whether or not hostname lookups are permitted. If yes,
+ * external queries may be performed.
+ * @param nMaxSolutions The maximum number of results we want, specifying 0
+ * means "as many solutions as we get."
+ *
+ * @returns Whether or not the service string successfully resolved to any
+ * resulting services.
+ */
bool Lookup(const char *pszName, std::vector<CService>& vAddr, int portDefault, bool fAllowLookup, unsigned int nMaxSolutions)
{
if (pszName[0] == 0)
@@ -152,6 +209,12 @@ bool Lookup(const char *pszName, std::vector<CService>& vAddr, int portDefault,
return true;
}
+/**
+ * Resolve a service string to its first corresponding service.
+ *
+ * @see Lookup(const char *, std::vector<CService>&, int, bool, unsigned int)
+ * for additional parameter descriptions.
+ */
bool Lookup(const char *pszName, CService& addr, int portDefault, bool fAllowLookup)
{
std::vector<CService> vService;
@@ -162,6 +225,16 @@ bool Lookup(const char *pszName, CService& addr, int portDefault, bool fAllowLoo
return true;
}
+/**
+ * Resolve a service string with a numeric IP to its first corresponding
+ * service.
+ *
+ * @returns The resulting CService if the resolution was successful, [::]:0
+ * otherwise.
+ *
+ * @see Lookup(const char *, CService&, int, bool) for additional parameter
+ * descriptions.
+ */
CService LookupNumeric(const char *pszName, int portDefault)
{
CService addr;
@@ -231,22 +304,29 @@ enum class IntrRecvError {
};
/**
- * Read bytes from socket. This will either read the full number of bytes requested
- * or return False on error or timeout.
- * This function can be interrupted by calling InterruptSocks5()
+ * Try to read a specified number of bytes from a socket. Please read the "see
+ * also" section for more detail.
*
- * @param data Buffer to receive into
- * @param len Length of data to receive
- * @param timeout Timeout in milliseconds for receive operation
+ * @param data The buffer where the read bytes should be stored.
+ * @param len The number of bytes to read into the specified buffer.
+ * @param timeout The total timeout in milliseconds for this read.
+ * @param hSocket The socket (has to be in non-blocking mode) from which to read
+ * bytes.
*
- * @note This function requires that hSocket is in non-blocking mode.
+ * @returns An IntrRecvError indicating the resulting status of this read.
+ * IntrRecvError::OK only if all of the specified number of bytes were
+ * read.
+ *
+ * @see This function can be interrupted by calling InterruptSocks5(bool).
+ * Sockets can be made non-blocking with SetSocketNonBlocking(const
+ * SOCKET&, bool).
*/
static IntrRecvError InterruptibleRecv(uint8_t* data, size_t len, int timeout, const SOCKET& hSocket)
{
int64_t curTime = GetTimeMillis();
int64_t endTime = curTime + timeout;
- // Maximum time to wait in one select call. It will take up until this time (in millis)
- // to break off in case of an interruption.
+ // Maximum time to wait for I/O readiness. It will take up until this time
+ // (in millis) to break off in case of an interruption.
const int64_t maxWait = 1000;
while (len > 0 && curTime < endTime) {
ssize_t ret = recv(hSocket, (char*)data, len, 0); // Optimistically try the recv first
@@ -261,6 +341,8 @@ static IntrRecvError InterruptibleRecv(uint8_t* data, size_t len, int timeout, c
if (!IsSelectableSocket(hSocket)) {
return IntrRecvError::NetworkError;
}
+ // Only wait at most maxWait milliseconds at a time, unless
+ // we're approaching the end of the specified total timeout
int timeout_ms = std::min(endTime - curTime, maxWait);
#ifdef USE_POLL
struct pollfd pollfd = {};
@@ -320,7 +402,24 @@ static std::string Socks5ErrorString(uint8_t err)
}
}
-/** Connect using SOCKS5 (as described in RFC1928) */
+/**
+ * Connect to a specified destination service through an already connected
+ * SOCKS5 proxy.
+ *
+ * @param strDest The destination fully-qualified domain name.
+ * @param port The destination port.
+ * @param auth The credentials with which to authenticate with the specified
+ * SOCKS5 proxy.
+ * @param hSocket The SOCKS5 proxy socket.
+ *
+ * @returns Whether or not the operation succeeded.
+ *
+ * @note The specified SOCKS5 proxy socket must already be connected to the
+ * SOCKS5 proxy.
+ *
+ * @see <a href="https://www.ietf.org/rfc/rfc1928.txt">RFC1928: SOCKS Protocol
+ * Version 5</a>
+ */
static bool Socks5(const std::string& strDest, int port, const ProxyCredentials *auth, const SOCKET& hSocket)
{
IntrRecvError recvr;
@@ -328,15 +427,15 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials
if (strDest.size() > 255) {
return error("Hostname too long");
}
- // Accepted authentication methods
+ // Construct the version identifier/method selection message
std::vector<uint8_t> vSocks5Init;
- vSocks5Init.push_back(SOCKSVersion::SOCKS5);
+ vSocks5Init.push_back(SOCKSVersion::SOCKS5); // We want the SOCK5 protocol
if (auth) {
- vSocks5Init.push_back(0x02); // Number of methods
+ vSocks5Init.push_back(0x02); // 2 method identifiers follow...
vSocks5Init.push_back(SOCKS5Method::NOAUTH);
vSocks5Init.push_back(SOCKS5Method::USER_PASS);
} else {
- vSocks5Init.push_back(0x01); // Number of methods
+ vSocks5Init.push_back(0x01); // 1 method identifier follows...
vSocks5Init.push_back(SOCKS5Method::NOAUTH);
}
ssize_t ret = send(hSocket, (const char*)vSocks5Init.data(), vSocks5Init.size(), MSG_NOSIGNAL);
@@ -440,8 +539,16 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials
return true;
}
+/**
+ * Try to create a socket file descriptor with specific properties in the
+ * communications domain (address family) of the specified service.
+ *
+ * For details on the desired properties, see the inline comments in the source
+ * code.
+ */
SOCKET CreateSocket(const CService &addrConnect)
{
+ // Create a sockaddr from the specified service.
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
if (!addrConnect.GetSockAddr((struct sockaddr*)&sockaddr, &len)) {
@@ -449,10 +556,13 @@ SOCKET CreateSocket(const CService &addrConnect)
return INVALID_SOCKET;
}
+ // Create a TCP socket in the address family of the specified service.
SOCKET hSocket = socket(((struct sockaddr*)&sockaddr)->sa_family, SOCK_STREAM, IPPROTO_TCP);
if (hSocket == INVALID_SOCKET)
return INVALID_SOCKET;
+ // Ensure that waiting for I/O on this socket won't result in undefined
+ // behavior.
if (!IsSelectableSocket(hSocket)) {
CloseSocket(hSocket);
LogPrintf("Cannot create connection: non-selectable socket created (fd >= FD_SETSIZE ?)\n");
@@ -461,17 +571,18 @@ SOCKET CreateSocket(const CService &addrConnect)
#ifdef SO_NOSIGPIPE
int set = 1;
- // Different way of disabling SIGPIPE on BSD
+ // Set the no-sigpipe option on the socket for BSD systems, other UNIXes
+ // should use the MSG_NOSIGNAL flag for every send.
setsockopt(hSocket, SOL_SOCKET, SO_NOSIGPIPE, (void*)&set, sizeof(int));
#endif
- //Disable Nagle's algorithm
+ // Set the no-delay option (disable Nagle's algorithm) on the TCP socket.
SetSocketNoDelay(hSocket);
- // Set to non-blocking
+ // Set the non-blocking option on the socket.
if (!SetSocketNonBlocking(hSocket, true)) {
CloseSocket(hSocket);
- LogPrintf("ConnectSocketDirectly: Setting socket to non-blocking failed, error %s\n", NetworkErrorString(WSAGetLastError()));
+ LogPrintf("CreateSocket: Setting socket to non-blocking failed, error %s\n", NetworkErrorString(WSAGetLastError()));
}
return hSocket;
}
@@ -486,8 +597,21 @@ static void LogConnectFailure(bool manual_connection, const char* fmt, const Arg
}
}
+/**
+ * Try to connect to the specified service on the specified socket.
+ *
+ * @param addrConnect The service to which to connect.
+ * @param hSocket The socket on which to connect.
+ * @param nTimeout Wait this many milliseconds for the connection to be
+ * established.
+ * @param manual_connection Whether or not the connection was manually requested
+ * (e.g. thru the addnode RPC)
+ *
+ * @returns Whether or not a connection was successfully made.
+ */
bool ConnectSocketDirectly(const CService &addrConnect, const SOCKET& hSocket, int nTimeout, bool manual_connection)
{
+ // Create a sockaddr from the specified service.
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
if (hSocket == INVALID_SOCKET) {
@@ -498,12 +622,17 @@ bool ConnectSocketDirectly(const CService &addrConnect, const SOCKET& hSocket, i
LogPrintf("Cannot connect to %s: unsupported network\n", addrConnect.ToString());
return false;
}
+
+ // Connect to the addrConnect service on the hSocket socket.
if (connect(hSocket, (struct sockaddr*)&sockaddr, len) == SOCKET_ERROR)
{
int nErr = WSAGetLastError();
// WSAEINVAL is here because some legacy version of winsock uses it
if (nErr == WSAEINPROGRESS || nErr == WSAEWOULDBLOCK || nErr == WSAEINVAL)
{
+ // Connection didn't actually fail, but is being established
+ // asynchronously. Thus, use async I/O api (select/poll)
+ // synchronously to check for successful connection with a timeout.
#ifdef USE_POLL
struct pollfd pollfd = {};
pollfd.fd = hSocket;
@@ -516,6 +645,10 @@ bool ConnectSocketDirectly(const CService &addrConnect, const SOCKET& hSocket, i
FD_SET(hSocket, &fdset);
int nRet = select(hSocket + 1, nullptr, &fdset, nullptr, &timeout);
#endif
+ // Upon successful completion, both select and poll return the total
+ // number of file descriptors that have been selected. A value of 0
+ // indicates that the call timed out and no file descriptors have
+ // been selected.
if (nRet == 0)
{
LogPrint(BCLog::NET, "connection to %s timeout\n", addrConnect.ToString());
@@ -526,6 +659,11 @@ bool ConnectSocketDirectly(const CService &addrConnect, const SOCKET& hSocket, i
LogPrintf("select() for %s failed: %s\n", addrConnect.ToString(), NetworkErrorString(WSAGetLastError()));
return false;
}
+
+ // Even if the select/poll was successful, the connect might not
+ // have been successful. The reason for this failure is hidden away
+ // in the SO_ERROR for the socket in modern systems. We read it into
+ // nRet here.
socklen_t nRetSize = sizeof(nRet);
if (getsockopt(hSocket, SOL_SOCKET, SO_ERROR, (sockopt_arg_type)&nRet, &nRetSize) == SOCKET_ERROR)
{
@@ -569,6 +707,22 @@ bool GetProxy(enum Network net, proxyType &proxyInfoOut) {
return true;
}
+/**
+ * Set the name proxy to use for all connections to nodes specified by a
+ * hostname. After setting this proxy, connecting to a node sepcified by a
+ * hostname won't result in a local lookup of said hostname, rather, connect to
+ * the node by asking the name proxy for a proxy connection to the hostname,
+ * effectively delegating the hostname lookup to the specified proxy.
+ *
+ * This delegation increases privacy for those who set the name proxy as they no
+ * longer leak their external hostname queries to their DNS servers.
+ *
+ * @returns Whether or not the operation succeeded.
+ *
+ * @note SOCKS5's support for UDP-over-SOCKS5 has been considered, but no SOCK5
+ * server in common use (most notably Tor) actually implements UDP
+ * support, and a DNS resolver is beyond the scope of this project.
+ */
bool SetNameProxy(const proxyType &addrProxy) {
if (!addrProxy.IsValid())
return false;
@@ -599,6 +753,21 @@ bool IsProxy(const CNetAddr &addr) {
return false;
}
+/**
+ * Connect to a specified destination service through a SOCKS5 proxy by first
+ * connecting to the SOCKS5 proxy.
+ *
+ * @param proxy The SOCKS5 proxy.
+ * @param strDest The destination service to which to connect.
+ * @param port The destination port.
+ * @param hSocket The socket on which to connect to the SOCKS5 proxy.
+ * @param nTimeout Wait this many milliseconds for the connection to the SOCKS5
+ * proxy to be established.
+ * @param outProxyConnectionFailed[out] Whether or not the connection to the
+ * SOCKS5 proxy failed.
+ *
+ * @returns Whether or not the operation succeeded.
+ */
bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int port, const SOCKET& hSocket, int nTimeout, bool *outProxyConnectionFailed)
{
// first connect to proxy server
@@ -623,6 +792,17 @@ bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int
return true;
}
+/**
+ * Parse and resolve a specified subnet string into the appropriate internal
+ * representation.
+ *
+ * @param pszName A string representation of a subnet of the form `network
+ * address [ "/", ( CIDR-style suffix | netmask ) ]`(e.g.
+ * `2001:db8::/32`, `192.0.2.0/255.255.255.0`, or `8.8.8.8`).
+ * @param ret The resulting internal representation of a subnet.
+ *
+ * @returns Whether the operation succeeded or not.
+ */
bool LookupSubNet(const char* pszName, CSubNet& ret)
{
std::string strSubnet(pszName);
@@ -630,6 +810,8 @@ bool LookupSubNet(const char* pszName, CSubNet& ret)
std::vector<CNetAddr> vIP;
std::string strAddress = strSubnet.substr(0, slash);
+ // TODO: Use LookupHost(const char *, CNetAddr&, bool) instead to just get
+ // one CNetAddr.
if (LookupHost(strAddress.c_str(), vIP, 1, false))
{
CNetAddr network = vIP[0];
@@ -637,8 +819,8 @@ bool LookupSubNet(const char* pszName, CSubNet& ret)
{
std::string strNetmask = strSubnet.substr(slash + 1);
int32_t n;
- // IPv4 addresses start at offset 12, and first 12 bytes must match, so just offset n
- if (ParseInt32(strNetmask, &n)) { // If valid number, assume /24 syntax
+ if (ParseInt32(strNetmask, &n)) {
+ // If valid number, assume CIDR variable-length subnet masking
ret = CSubNet(network, n);
return ret.IsValid();
}
diff --git a/src/protocol.h b/src/protocol.h
index a790a06906..91d043947b 100644
--- a/src/protocol.h
+++ b/src/protocol.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2018 The Bitcoin Core developers
+// Copyright (c) 2009-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -261,9 +261,6 @@ enum ServiceFlags : uint64_t {
// NODE_WITNESS indicates that a node can be asked for blocks and transactions including
// witness data.
NODE_WITNESS = (1 << 3),
- // NODE_XTHIN means the node supports Xtreme Thinblocks
- // If this is turned off then the node will not service nor make xthin requests
- NODE_XTHIN = (1 << 4),
// NODE_NETWORK_LIMITED means the same as NODE_NETWORK with the limitation of only
// serving the last 288 (2 day) blocks
// See BIP159 for details on how this is implemented.
diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp
index 3d68e9c5aa..dc1da7f8a9 100644
--- a/src/qt/guiutil.cpp
+++ b/src/qt/guiutil.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2018 The Bitcoin Core developers
+// Copyright (c) 2011-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -11,10 +11,10 @@
#include <base58.h>
#include <chainparams.h>
-#include <primitives/transaction.h>
-#include <key_io.h>
#include <interfaces/node.h>
+#include <key_io.h>
#include <policy/policy.h>
+#include <primitives/transaction.h>
#include <protocol.h>
#include <script/script.h>
#include <script/standard.h>
@@ -841,9 +841,6 @@ QString formatServicesStr(quint64 mask)
case NODE_WITNESS:
strList.append("WITNESS");
break;
- case NODE_XTHIN:
- strList.append("XTHIN");
- break;
default:
strList.append(QString("%1[%2]").arg("UNKNOWN").arg(check));
}
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 289c96aa51..b7dcd59c6d 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -1411,7 +1411,7 @@ static UniValue getchaintips(const JSONRPCRequest& request)
/*
* Idea: the set of chain tips is ::ChainActive().tip, plus orphan blocks which do not have another orphan building off of them.
* Algorithm:
- * - Make one pass through mapBlockIndex, picking out the orphan blocks, and also storing a set of the orphan block's pprev pointers.
+ * - Make one pass through g_blockman.m_block_index, picking out the orphan blocks, and also storing a set of the orphan block's pprev pointers.
* - Iterate through the orphan blocks. If the block isn't pointed to by another orphan, it is a chain tip.
* - add ::ChainActive().Tip()
*/
@@ -1419,7 +1419,7 @@ static UniValue getchaintips(const JSONRPCRequest& request)
std::set<const CBlockIndex*> setOrphans;
std::set<const CBlockIndex*> setPrevs;
- for (const std::pair<const uint256, CBlockIndex*>& item : mapBlockIndex)
+ for (const std::pair<const uint256, CBlockIndex*>& item : ::BlockIndex())
{
if (!::ChainActive().Contains(item.second)) {
setOrphans.insert(item.second);
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index a1c730ba08..3f40785c21 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -759,7 +759,9 @@ void InterruptTorControl()
{
if (gBase) {
LogPrintf("tor: Thread interrupt\n");
- event_base_loopbreak(gBase);
+ event_base_once(gBase, -1, EV_TIMEOUT, [](evutil_socket_t, short, void*) {
+ event_base_loopbreak(gBase);
+ }, nullptr, nullptr);
}
}
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 73fe2a8ee4..90b92969b9 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -250,7 +250,7 @@ bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams,
pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
- // Load mapBlockIndex
+ // Load m_block_index
while (pcursor->Valid()) {
boost::this_thread::interruption_point();
if (ShutdownRequested()) return false;
diff --git a/src/validation.cpp b/src/validation.cpp
index 262b6856a4..0bc6167bad 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -77,7 +77,11 @@ bool CBlockIndexWorkComparator::operator()(const CBlockIndex *pa, const CBlockIn
return false;
}
-static CChainState g_chainstate;
+namespace {
+BlockManager g_blockman;
+} // anon namespace
+
+static CChainState g_chainstate(g_blockman);
CChainState& ChainstateActive() { return g_chainstate; }
@@ -95,7 +99,6 @@ CChain& ChainActive() { return g_chainstate.m_chain; }
*/
RecursiveMutex cs_main;
-BlockMap& mapBlockIndex = ::ChainstateActive().mapBlockIndex;
CBlockIndex *pindexBestHeader = nullptr;
Mutex g_best_block_mutex;
std::condition_variable g_best_block_cv;
@@ -125,12 +128,7 @@ CScript COINBASE_FLAGS;
// Internal stuff
namespace {
- CBlockIndex *&pindexBestInvalid = ::ChainstateActive().pindexBestInvalid;
-
- /** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
- * Pruned nodes may have entries where B is missing data.
- */
- std::multimap<CBlockIndex*, CBlockIndex*>& mapBlocksUnlinked = ::ChainstateActive().mapBlocksUnlinked;
+ CBlockIndex* pindexBestInvalid = nullptr;
CCriticalSection cs_LastBlockFile;
std::vector<CBlockFileInfo> vinfoBlockFile;
@@ -148,6 +146,13 @@ namespace {
std::set<int> setDirtyFileInfo;
} // anon namespace
+CBlockIndex* LookupBlockIndex(const uint256& hash)
+{
+ AssertLockHeld(cs_main);
+ BlockMap::const_iterator it = g_blockman.m_block_index.find(hash);
+ return it == g_blockman.m_block_index.end() ? nullptr : it->second;
+}
+
CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator)
{
AssertLockHeld(cs_main);
@@ -1047,6 +1052,11 @@ bool CChainState::IsInitialBlockDownload() const
static CBlockIndex *pindexBestForkTip = nullptr, *pindexBestForkBase = nullptr;
+BlockMap& BlockIndex()
+{
+ return g_blockman.m_block_index;
+}
+
static void AlertNotify(const std::string& strMessage)
{
uiInterface.NotifyAlertChanged();
@@ -1160,7 +1170,7 @@ void static InvalidChainFound(CBlockIndex* pindexNew) EXCLUSIVE_LOCKS_REQUIRED(c
void CChainState::InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) {
if (state.GetReason() != ValidationInvalidReason::BLOCK_MUTATED) {
pindex->nStatus |= BLOCK_FAILED_VALID;
- m_failed_blocks.insert(pindex);
+ m_blockman.m_failed_blocks.insert(pindex);
setDirtyBlockIndex.insert(pindex);
setBlockIndexCandidates.erase(pindex);
InvalidChainFound(pindex);
@@ -1695,8 +1705,8 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
// relative to a piece of software is an objective fact these defaults can be easily reviewed.
// This setting doesn't force the selection of any particular chain but makes validating some faster by
// effectively caching the result of part of the verification.
- BlockMap::const_iterator it = mapBlockIndex.find(hashAssumeValid);
- if (it != mapBlockIndex.end()) {
+ BlockMap::const_iterator it = m_blockman.m_block_index.find(hashAssumeValid);
+ if (it != m_blockman.m_block_index.end()) {
if (it->second->GetAncestor(pindex->nHeight) == pindex &&
pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
pindexBestHeader->nChainWork >= nMinimumChainWork) {
@@ -2366,10 +2376,11 @@ CBlockIndex* CChainState::FindMostWorkChain() {
if (fFailedChain) {
pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
} else if (fMissingData) {
- // If we're missing data, then add back to mapBlocksUnlinked,
+ // If we're missing data, then add back to m_blocks_unlinked,
// so that if the block arrives in the future we can try adding
// to setBlockIndexCandidates again.
- mapBlocksUnlinked.insert(std::make_pair(pindexFailed->pprev, pindexFailed));
+ m_blockman.m_blocks_unlinked.insert(
+ std::make_pair(pindexFailed->pprev, pindexFailed));
}
setBlockIndexCandidates.erase(pindexFailed);
pindexFailed = pindexFailed->pprev;
@@ -2720,12 +2731,12 @@ bool CChainState::InvalidateBlock(CValidationState& state, const CChainParams& c
to_mark_failed->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(to_mark_failed);
setBlockIndexCandidates.erase(to_mark_failed);
- m_failed_blocks.insert(to_mark_failed);
+ m_blockman.m_failed_blocks.insert(to_mark_failed);
// The resulting new best tip may not be in setBlockIndexCandidates anymore, so
// add it again.
- BlockMap::iterator it = mapBlockIndex.begin();
- while (it != mapBlockIndex.end()) {
+ BlockMap::iterator it = m_blockman.m_block_index.begin();
+ while (it != m_blockman.m_block_index.end()) {
if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, m_chain.Tip())) {
setBlockIndexCandidates.insert(it->second);
}
@@ -2752,8 +2763,8 @@ void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
int nHeight = pindex->nHeight;
// Remove the invalidity flag from this block and all its descendants.
- BlockMap::iterator it = mapBlockIndex.begin();
- while (it != mapBlockIndex.end()) {
+ BlockMap::iterator it = m_blockman.m_block_index.begin();
+ while (it != m_blockman.m_block_index.end()) {
if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
it->second->nStatus &= ~BLOCK_FAILED_MASK;
setDirtyBlockIndex.insert(it->second);
@@ -2764,7 +2775,7 @@ void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
// Reset invalid block marker if it was pointing to one of those.
pindexBestInvalid = nullptr;
}
- m_failed_blocks.erase(it->second);
+ m_blockman.m_failed_blocks.erase(it->second);
}
it++;
}
@@ -2774,7 +2785,7 @@ void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
if (pindex->nStatus & BLOCK_FAILED_MASK) {
pindex->nStatus &= ~BLOCK_FAILED_MASK;
setDirtyBlockIndex.insert(pindex);
- m_failed_blocks.erase(pindex);
+ m_blockman.m_failed_blocks.erase(pindex);
}
pindex = pindex->pprev;
}
@@ -2784,14 +2795,14 @@ void ResetBlockFailureFlags(CBlockIndex *pindex) {
return ::ChainstateActive().ResetBlockFailureFlags(pindex);
}
-CBlockIndex* CChainState::AddToBlockIndex(const CBlockHeader& block)
+CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block)
{
AssertLockHeld(cs_main);
// Check for duplicate
uint256 hash = block.GetHash();
- BlockMap::iterator it = mapBlockIndex.find(hash);
- if (it != mapBlockIndex.end())
+ BlockMap::iterator it = m_block_index.find(hash);
+ if (it != m_block_index.end())
return it->second;
// Construct new block index object
@@ -2800,10 +2811,10 @@ CBlockIndex* CChainState::AddToBlockIndex(const CBlockHeader& block)
// to avoid miners withholding blocks but broadcasting headers, to get a
// competitive advantage.
pindexNew->nSequenceId = 0;
- BlockMap::iterator mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first;
+ BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
- BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock);
- if (miPrev != mapBlockIndex.end())
+ BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
+ if (miPrev != m_block_index.end())
{
pindexNew->pprev = (*miPrev).second;
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
@@ -2852,17 +2863,17 @@ void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pi
if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
setBlockIndexCandidates.insert(pindex);
}
- std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex);
+ std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = m_blockman.m_blocks_unlinked.equal_range(pindex);
while (range.first != range.second) {
std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
queue.push_back(it->second);
range.first++;
- mapBlocksUnlinked.erase(it);
+ m_blockman.m_blocks_unlinked.erase(it);
}
}
} else {
if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
- mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
+ m_blockman.m_blocks_unlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
}
}
}
@@ -3117,7 +3128,7 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationSta
if (fCheckpointsEnabled) {
// Don't accept any forks from the main chain prior to last checkpoint.
// GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
- // MapBlockIndex.
+ // g_blockman.m_block_index.
CBlockIndex* pcheckpoint = GetLastCheckpoint(params.Checkpoints());
if (pcheckpoint && nHeight < pcheckpoint->nHeight)
return state.Invalid(ValidationInvalidReason::BLOCK_CHECKPOINT, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint");
@@ -3230,15 +3241,15 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
return true;
}
-bool CChainState::AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
+bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
{
AssertLockHeld(cs_main);
// Check for duplicate
uint256 hash = block.GetHash();
- BlockMap::iterator miSelf = mapBlockIndex.find(hash);
+ BlockMap::iterator miSelf = m_block_index.find(hash);
CBlockIndex *pindex = nullptr;
if (hash != chainparams.GetConsensus().hashGenesisBlock) {
- if (miSelf != mapBlockIndex.end()) {
+ if (miSelf != m_block_index.end()) {
// Block header is already known.
pindex = miSelf->second;
if (ppindex)
@@ -3253,8 +3264,8 @@ bool CChainState::AcceptBlockHeader(const CBlockHeader& block, CValidationState&
// Get prev block index
CBlockIndex* pindexPrev = nullptr;
- BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock);
- if (mi == mapBlockIndex.end())
+ BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
+ if (mi == m_block_index.end())
return state.Invalid(ValidationInvalidReason::BLOCK_MISSING_PREV, error("%s: prev block not found", __func__), 0, "prev-blk-not-found");
pindexPrev = (*mi).second;
if (pindexPrev->nStatus & BLOCK_FAILED_MASK)
@@ -3306,8 +3317,6 @@ bool CChainState::AcceptBlockHeader(const CBlockHeader& block, CValidationState&
if (ppindex)
*ppindex = pindex;
- CheckBlockIndex(chainparams.GetConsensus());
-
return true;
}
@@ -3319,7 +3328,10 @@ bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidatio
LOCK(cs_main);
for (const CBlockHeader& header : headers) {
CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
- if (!::ChainstateActive().AcceptBlockHeader(header, state, chainparams, &pindex)) {
+ bool accepted = g_blockman.AcceptBlockHeader(header, state, chainparams, &pindex);
+ ::ChainstateActive().CheckBlockIndex(chainparams.GetConsensus());
+
+ if (!accepted) {
if (first_invalid) *first_invalid = header;
return false;
}
@@ -3362,7 +3374,10 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali
CBlockIndex *pindexDummy = nullptr;
CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
- if (!AcceptBlockHeader(block, state, chainparams, &pindex))
+ bool accepted_header = m_blockman.AcceptBlockHeader(block, state, chainparams, &pindex);
+ CheckBlockIndex(chainparams.GetConsensus());
+
+ if (!accepted_header)
return false;
// Try to process all requested blocks that we don't have, but only
@@ -3513,7 +3528,7 @@ void PruneOneBlockFile(const int fileNumber)
{
LOCK(cs_LastBlockFile);
- for (const auto& entry : mapBlockIndex) {
+ for (const auto& entry : g_blockman.m_block_index) {
CBlockIndex* pindex = entry.second;
if (pindex->nFile == fileNumber) {
pindex->nStatus &= ~BLOCK_HAVE_DATA;
@@ -3523,16 +3538,16 @@ void PruneOneBlockFile(const int fileNumber)
pindex->nUndoPos = 0;
setDirtyBlockIndex.insert(pindex);
- // Prune from mapBlocksUnlinked -- any block we prune would have
+ // Prune from m_blocks_unlinked -- any block we prune would have
// to be downloaded again in order to consider its chain, at which
// point it would be considered as a candidate for
- // mapBlocksUnlinked or setBlockIndexCandidates.
- std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex->pprev);
+ // m_blocks_unlinked or setBlockIndexCandidates.
+ auto range = g_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
while (range.first != range.second) {
std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first;
range.first++;
if (_it->second == pindex) {
- mapBlocksUnlinked.erase(_it);
+ g_blockman.m_blocks_unlinked.erase(_it);
}
}
}
@@ -3681,7 +3696,7 @@ fs::path GetBlockPosFilename(const FlatFilePos &pos)
return BlockFileSeq().FileName(pos);
}
-CBlockIndex * CChainState::InsertBlockIndex(const uint256& hash)
+CBlockIndex * BlockManager::InsertBlockIndex(const uint256& hash)
{
AssertLockHeld(cs_main);
@@ -3689,27 +3704,30 @@ CBlockIndex * CChainState::InsertBlockIndex(const uint256& hash)
return nullptr;
// Return existing
- BlockMap::iterator mi = mapBlockIndex.find(hash);
- if (mi != mapBlockIndex.end())
+ BlockMap::iterator mi = m_block_index.find(hash);
+ if (mi != m_block_index.end())
return (*mi).second;
// Create new
CBlockIndex* pindexNew = new CBlockIndex();
- mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first;
+ mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
return pindexNew;
}
-bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlockTreeDB& blocktree)
+bool BlockManager::LoadBlockIndex(
+ const Consensus::Params& consensus_params,
+ CBlockTreeDB& blocktree,
+ std::set<CBlockIndex*, CBlockIndexWorkComparator>& block_index_candidates)
{
if (!blocktree.LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }))
return false;
// Calculate nChainWork
std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
- vSortedByHeight.reserve(mapBlockIndex.size());
- for (const std::pair<const uint256, CBlockIndex*>& item : mapBlockIndex)
+ vSortedByHeight.reserve(m_block_index.size());
+ for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index)
{
CBlockIndex* pindex = item.second;
vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
@@ -3729,7 +3747,7 @@ bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlo
pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
} else {
pindex->nChainTx = 0;
- mapBlocksUnlinked.insert(std::make_pair(pindex->pprev, pindex));
+ m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
}
} else {
pindex->nChainTx = pindex->nTx;
@@ -3739,8 +3757,9 @@ bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlo
pindex->nStatus |= BLOCK_FAILED_CHILD;
setDirtyBlockIndex.insert(pindex);
}
- if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))
- setBlockIndexCandidates.insert(pindex);
+ if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr)) {
+ block_index_candidates.insert(pindex);
+ }
if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
pindexBestInvalid = pindex;
if (pindex->pprev)
@@ -3752,9 +3771,21 @@ bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlo
return true;
}
+void BlockManager::Unload() {
+ m_failed_blocks.clear();
+ m_blocks_unlinked.clear();
+
+ for (const BlockMap::value_type& entry : m_block_index) {
+ delete entry.second;
+ }
+
+ m_block_index.clear();
+}
+
bool static LoadBlockIndexDB(const CChainParams& chainparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
- if (!::ChainstateActive().LoadBlockIndex(chainparams.GetConsensus(), *pblocktree))
+ if (!g_blockman.LoadBlockIndex(
+ chainparams.GetConsensus(), *pblocktree, ::ChainstateActive().setBlockIndexCandidates))
return false;
// Load block file info
@@ -3777,7 +3808,7 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) EXCLUSIVE_LOCKS_RE
// Check presence of blk files
LogPrintf("Checking all blk files are present...\n");
std::set<int> setBlkDataFiles;
- for (const std::pair<const uint256, CBlockIndex*>& item : mapBlockIndex)
+ for (const std::pair<const uint256, CBlockIndex*>& item : g_blockman.m_block_index)
{
CBlockIndex* pindex = item.second;
if (pindex->nStatus & BLOCK_HAVE_DATA) {
@@ -3976,16 +4007,16 @@ bool CChainState::ReplayBlocks(const CChainParams& params, CCoinsView* view)
const CBlockIndex* pindexNew; // New tip during the interrupted flush.
const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
- if (mapBlockIndex.count(hashHeads[0]) == 0) {
+ if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
return error("ReplayBlocks(): reorganization to unknown block requested");
}
- pindexNew = mapBlockIndex[hashHeads[0]];
+ pindexNew = m_blockman.m_block_index[hashHeads[0]];
if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
- if (mapBlockIndex.count(hashHeads[1]) == 0) {
+ if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
return error("ReplayBlocks(): reorganization from unknown block requested");
}
- pindexOld = mapBlockIndex[hashHeads[1]];
+ pindexOld = m_blockman.m_block_index[hashHeads[1]];
pindexFork = LastCommonAncestor(pindexOld, pindexNew);
assert(pindexFork != nullptr);
}
@@ -4051,10 +4082,10 @@ void CChainState::EraseBlockData(CBlockIndex* index)
setDirtyBlockIndex.insert(index);
// Update indexes
setBlockIndexCandidates.erase(index);
- std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> ret = mapBlocksUnlinked.equal_range(index->pprev);
+ auto ret = m_blockman.m_blocks_unlinked.equal_range(index->pprev);
while (ret.first != ret.second) {
if (ret.first->second == index) {
- mapBlocksUnlinked.erase(ret.first++);
+ m_blockman.m_blocks_unlinked.erase(ret.first++);
} else {
++ret.first;
}
@@ -4074,7 +4105,7 @@ bool CChainState::RewindBlockIndex(const CChainParams& params)
// blocks will be dealt with below (releasing cs_main in between).
{
LOCK(cs_main);
- for (const auto& entry : mapBlockIndex) {
+ for (const auto& entry : m_blockman.m_block_index) {
if (IsWitnessEnabled(entry.second->pprev, params.GetConsensus()) && !(entry.second->nStatus & BLOCK_OPT_WITNESS) && !m_chain.Contains(entry.second)) {
EraseBlockData(entry.second);
}
@@ -4180,7 +4211,6 @@ bool RewindBlockIndex(const CChainParams& params) {
void CChainState::UnloadBlockIndex() {
nBlockSequenceId = 1;
- m_failed_blocks.clear();
setBlockIndexCandidates.clear();
}
@@ -4191,10 +4221,10 @@ void UnloadBlockIndex()
{
LOCK(cs_main);
::ChainActive().SetTip(nullptr);
+ g_blockman.Unload();
pindexBestInvalid = nullptr;
pindexBestHeader = nullptr;
mempool.clear();
- mapBlocksUnlinked.clear();
vinfoBlockFile.clear();
nLastBlockFile = 0;
setDirtyBlockIndex.clear();
@@ -4203,11 +4233,6 @@ void UnloadBlockIndex()
for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
warningcache[b].clear();
}
-
- for (const BlockMap::value_type& entry : mapBlockIndex) {
- delete entry.second;
- }
- mapBlockIndex.clear();
fHavePruned = false;
::ChainstateActive().UnloadBlockIndex();
@@ -4220,7 +4245,7 @@ bool LoadBlockIndex(const CChainParams& chainparams)
if (!fReindex) {
bool ret = LoadBlockIndexDB(chainparams);
if (!ret) return false;
- needs_init = mapBlockIndex.empty();
+ needs_init = g_blockman.m_block_index.empty();
}
if (needs_init) {
@@ -4240,10 +4265,10 @@ bool CChainState::LoadGenesisBlock(const CChainParams& chainparams)
LOCK(cs_main);
// Check whether we're already initialized by checking for genesis in
- // mapBlockIndex. Note that we can't use m_chain here, since it is
+ // m_blockman.m_block_index. Note that we can't use m_chain here, since it is
// set based on the coins db, not the block index db, which is the only
// thing loaded at this point.
- if (mapBlockIndex.count(chainparams.GenesisBlock().GetHash()))
+ if (m_blockman.m_block_index.count(chainparams.GenesisBlock().GetHash()))
return true;
try {
@@ -4251,7 +4276,7 @@ bool CChainState::LoadGenesisBlock(const CChainParams& chainparams)
FlatFilePos blockPos = SaveBlockToDisk(block, 0, chainparams, nullptr);
if (blockPos.IsNull())
return error("%s: writing genesis block to disk failed", __func__);
- CBlockIndex *pindex = AddToBlockIndex(block);
+ CBlockIndex *pindex = m_blockman.AddToBlockIndex(block);
ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
} catch (const std::runtime_error& e) {
return error("%s: failed to write genesis block: %s", __func__, e.what());
@@ -4396,20 +4421,20 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
LOCK(cs_main);
// During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
- // so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when
- // iterating the block tree require that m_chain has been initialized.)
+ // so we have the genesis block in m_blockman.m_block_index but no active chain. (A few of the
+ // tests when iterating the block tree require that m_chain has been initialized.)
if (m_chain.Height() < 0) {
- assert(mapBlockIndex.size() <= 1);
+ assert(m_blockman.m_block_index.size() <= 1);
return;
}
// Build forward-pointing map of the entire block tree.
std::multimap<CBlockIndex*,CBlockIndex*> forward;
- for (const std::pair<const uint256, CBlockIndex*>& entry : mapBlockIndex) {
+ for (const std::pair<const uint256, CBlockIndex*>& entry : m_blockman.m_block_index) {
forward.insert(std::make_pair(entry.second->pprev, entry.second));
}
- assert(forward.size() == mapBlockIndex.size());
+ assert(forward.size() == m_blockman.m_block_index.size());
std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr);
CBlockIndex *pindex = rangeGenesis.first->second;
@@ -4463,7 +4488,7 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
assert(pindex->nHeight == nHeight); // nHeight must be consistent.
assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
- assert(pindexFirstNotTreeValid == nullptr); // All mapBlockIndex entries must at least be TREE valid
+ assert(pindexFirstNotTreeValid == nullptr); // All m_blockman.m_block_index entries must at least be TREE valid
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid
@@ -4482,13 +4507,13 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
}
// If some parent is missing, then it could be that this block was in
// setBlockIndexCandidates but had to be removed because of the missing data.
- // In this case it must be in mapBlocksUnlinked -- see test below.
+ // In this case it must be in m_blocks_unlinked -- see test below.
}
} else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
assert(setBlockIndexCandidates.count(pindex) == 0);
}
- // Check whether this block is in mapBlocksUnlinked.
- std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev);
+ // Check whether this block is in m_blocks_unlinked.
+ std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
bool foundInUnlinked = false;
while (rangeUnlinked.first != rangeUnlinked.second) {
assert(rangeUnlinked.first->first == pindex->pprev);
@@ -4499,22 +4524,22 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
rangeUnlinked.first++;
}
if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) {
- // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked.
+ // If this block has block data available, some parent was never received, and has no invalid parents, it must be in m_blocks_unlinked.
assert(foundInUnlinked);
}
- if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
- if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
+ if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in m_blocks_unlinked if we don't HAVE_DATA
+ if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked.
if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {
// We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
assert(fHavePruned); // We must have pruned.
- // This block may have entered mapBlocksUnlinked if:
+ // This block may have entered m_blocks_unlinked if:
// - it has a descendant that at some point had more work than the
// tip, and
// - we tried switching to that descendant but were missing
// data for some intermediate block between m_chain and the
// tip.
// So if this block is itself better than m_chain.Tip() and it wasn't in
- // setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
+ // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
if (pindexFirstInvalid == nullptr) {
assert(foundInUnlinked);
@@ -4758,10 +4783,10 @@ public:
CMainCleanup() {}
~CMainCleanup() {
// block headers
- BlockMap::iterator it1 = mapBlockIndex.begin();
- for (; it1 != mapBlockIndex.end(); it1++)
+ BlockMap::iterator it1 = g_blockman.m_block_index.begin();
+ for (; it1 != g_blockman.m_block_index.end(); it1++)
delete (*it1).second;
- mapBlockIndex.clear();
+ g_blockman.m_block_index.clear();
}
};
static CMainCleanup instance_of_cmaincleanup;
diff --git a/src/validation.h b/src/validation.h
index 9573d62048..a1b8029e01 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -144,7 +144,6 @@ extern CCriticalSection cs_main;
extern CBlockPolicyEstimator feeEstimator;
extern CTxMemPool mempool;
typedef std::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
-extern BlockMap& mapBlockIndex GUARDED_BY(cs_main);
extern Mutex g_best_block_mutex;
extern std::condition_variable g_best_block_cv;
extern uint256 g_best_block;
@@ -406,12 +405,7 @@ public:
/** Replay blocks that aren't fully applied to the database. */
bool ReplayBlocks(const CChainParams& params, CCoinsView* view);
-inline CBlockIndex* LookupBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
-{
- AssertLockHeld(cs_main);
- BlockMap::const_iterator it = mapBlockIndex.find(hash);
- return it == mapBlockIndex.end() ? nullptr : it->second;
-}
+CBlockIndex* LookupBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Find the last common block between the parameter chain and a locator. */
CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@@ -439,27 +433,90 @@ struct CBlockIndexWorkComparator
};
/**
- * CChainState stores and provides an API to update our local knowledge of the
- * current best chain and header tree.
+ * Maintains a tree of blocks (stored in `m_block_index`) which is consulted
+ * to determine where the most-work tip is.
*
- * It generally provides access to the current block tree, as well as functions
- * to provide new data, which it will appropriately validate and incorporate in
- * its state as necessary.
+ * This data is used mostly in `CChainState` - information about, e.g.,
+ * candidate tips is not maintained here.
+ */
+class BlockManager {
+public:
+ BlockMap m_block_index GUARDED_BY(cs_main);
+
+ /** In order to efficiently track invalidity of headers, we keep the set of
+ * blocks which we tried to connect and found to be invalid here (ie which
+ * were set to BLOCK_FAILED_VALID since the last restart). We can then
+ * walk this set and check if a new header is a descendant of something in
+ * this set, preventing us from having to walk m_block_index when we try
+ * to connect a bad block and fail.
+ *
+ * While this is more complicated than marking everything which descends
+ * from an invalid block as invalid at the time we discover it to be
+ * invalid, doing so would require walking all of m_block_index to find all
+ * descendants. Since this case should be very rare, keeping track of all
+ * BLOCK_FAILED_VALID blocks in a set should be just fine and work just as
+ * well.
+ *
+ * Because we already walk m_block_index in height-order at startup, we go
+ * ahead and mark descendants of invalid blocks as FAILED_CHILD at that time,
+ * instead of putting things in this set.
+ */
+ std::set<CBlockIndex*> m_failed_blocks;
+
+ /**
+ * All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
+ * Pruned nodes may have entries where B is missing data.
+ */
+ std::multimap<CBlockIndex*, CBlockIndex*> m_blocks_unlinked;
+
+ /**
+ * Load the blocktree off disk and into memory. Populate certain metadata
+ * per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as peripheral
+ * collections like setDirtyBlockIndex.
+ *
+ * @param[out] block_index_candidates Fill this set with any valid blocks for
+ * which we've downloaded all transactions.
+ */
+ bool LoadBlockIndex(
+ const Consensus::Params& consensus_params,
+ CBlockTreeDB& blocktree,
+ std::set<CBlockIndex*, CBlockIndexWorkComparator>& block_index_candidates)
+ EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ /** Clear all data members. */
+ void Unload() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ CBlockIndex* AddToBlockIndex(const CBlockHeader& block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ /** Create a new block index entry for a given block hash */
+ CBlockIndex* InsertBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ /**
+ * If a block header hasn't already been seen, call CheckBlockHeader on it, ensure
+ * that it doesn't descend from an invalid block, and then add it to m_block_index.
+ */
+ bool AcceptBlockHeader(
+ const CBlockHeader& block,
+ CValidationState& state,
+ const CChainParams& chainparams,
+ CBlockIndex** ppindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+};
+
+/**
+ * CChainState stores and provides an API to update our local knowledge of the
+ * current best chain.
*
* Eventually, the API here is targeted at being exposed externally as a
* consumable libconsensus library, so any functions added must only call
* other class member functions, pure functions in other parts of the consensus
* library, callbacks via the validation interface, or read/write-to-disk
* functions (eventually this will also be via callbacks).
+ *
+ * Anything that is contingent on the current tip of the chain is stored here,
+ * whereas block information and metadata independent of the current tip is
+ * kept in `BlockMetadataManager`.
*/
class CChainState {
private:
- /**
- * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
- * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
- * missing the data for the block.
- */
- std::set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates;
/**
* Every received block is assigned a unique and increasing identifier, so we
@@ -473,26 +530,6 @@ private:
/** chainwork for the last block that preciousblock has been applied to. */
arith_uint256 nLastPreciousChainwork = 0;
- /** In order to efficiently track invalidity of headers, we keep the set of
- * blocks which we tried to connect and found to be invalid here (ie which
- * were set to BLOCK_FAILED_VALID since the last restart). We can then
- * walk this set and check if a new header is a descendant of something in
- * this set, preventing us from having to walk mapBlockIndex when we try
- * to connect a bad block and fail.
- *
- * While this is more complicated than marking everything which descends
- * from an invalid block as invalid at the time we discover it to be
- * invalid, doing so would require walking all of mapBlockIndex to find all
- * descendants. Since this case should be very rare, keeping track of all
- * BLOCK_FAILED_VALID blocks in a set should be just fine and work just as
- * well.
- *
- * Because we already walk mapBlockIndex in height-order at startup, we go
- * ahead and mark descendants of invalid blocks as FAILED_CHILD at that time,
- * instead of putting things in this set.
- */
- std::set<CBlockIndex*> m_failed_blocks;
-
/**
* the ChainState CriticalSection
* A lock that must be held when modifying this ChainState - held in ActivateBestChain()
@@ -507,15 +544,23 @@ private:
*/
mutable std::atomic<bool> m_cached_finished_ibd{false};
+ //! Reference to a BlockManager instance which itself is shared across all
+ //! CChainState instances. Keeping a local reference allows us to test more
+ //! easily as opposed to referencing a global.
+ BlockManager& m_blockman;
+
public:
+ CChainState(BlockManager& blockman) : m_blockman(blockman) { }
+
//! The current chain of blockheaders we consult and build on.
//! @see CChain, CBlockIndex.
CChain m_chain;
- BlockMap mapBlockIndex GUARDED_BY(cs_main);
- std::multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked;
- CBlockIndex *pindexBestInvalid = nullptr;
-
- bool LoadBlockIndex(const Consensus::Params& consensus_params, CBlockTreeDB& blocktree) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ /**
+ * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
+ * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
+ * missing the data for the block.
+ */
+ std::set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates;
/**
* Update the on-disk chain state.
@@ -541,11 +586,6 @@ public:
bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) LOCKS_EXCLUDED(cs_main);
- /**
- * If a block header hasn't already been seen, call CheckBlockHeader on it, ensure
- * that it doesn't descend from an invalid block, and then add it to mapBlockIndex.
- */
- bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
// Block (dis)connection on a given view:
@@ -572,13 +612,6 @@ public:
/** Check whether we are doing an initial block download (synchronizing from disk or network) */
bool IsInitialBlockDownload() const;
-private:
- bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) EXCLUSIVE_LOCKS_REQUIRED(cs_main, ::mempool.cs);
- bool ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions& disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, ::mempool.cs);
-
- CBlockIndex* AddToBlockIndex(const CBlockHeader& block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- /** Create a new block index entry for a given block hash */
- CBlockIndex* InsertBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Make various assertions about the state of the block index.
*
@@ -586,6 +619,10 @@ private:
*/
void CheckBlockIndex(const Consensus::Params& consensusParams);
+private:
+ bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) EXCLUSIVE_LOCKS_REQUIRED(cs_main, ::mempool.cs);
+ bool ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions& disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, ::mempool.cs);
+
void InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
CBlockIndex* FindMostWorkChain() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
void ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@@ -615,6 +652,9 @@ CChainState& ChainstateActive();
/** @returns the most-work chain. */
CChain& ChainActive();
+/** @returns the global block index map. */
+BlockMap& BlockIndex();
+
/** Global variable that points to the coins database (protected by cs_main) */
extern std::unique_ptr<CCoinsViewDB> pcoinsdbview;
diff --git a/src/wallet/feebumper.cpp b/src/wallet/feebumper.cpp
index 0d07ae860c..619197a57a 100644
--- a/src/wallet/feebumper.cpp
+++ b/src/wallet/feebumper.cpp
@@ -272,18 +272,14 @@ Result CreateRateBumpTransaction(CWallet* wallet, const uint256& txid, const CCo
new_coin_control.m_min_depth = 1;
CTransactionRef tx_new = MakeTransactionRef();
- ReserveDestination reservedest(wallet);
CAmount fee_ret;
int change_pos_in_out = -1; // No requested location for change
std::string fail_reason;
- if (!wallet->CreateTransaction(*locked_chain, recipients, tx_new, reservedest, fee_ret, change_pos_in_out, fail_reason, new_coin_control, false)) {
+ if (!wallet->CreateTransaction(*locked_chain, recipients, tx_new, fee_ret, change_pos_in_out, fail_reason, new_coin_control, false)) {
errors.push_back("Unable to create transaction: " + fail_reason);
return Result::WALLET_ERROR;
}
- // If change key hasn't been ReturnKey'ed by this point, we take it out of keypool
- reservedest.KeepDestination();
-
// Write back new fee if successful
new_fee = fee_ret;
@@ -330,9 +326,8 @@ Result CommitTransaction(CWallet* wallet, const uint256& txid, CMutableTransacti
mapValue_t mapValue = oldWtx.mapValue;
mapValue["replaces_txid"] = oldWtx.GetHash().ToString();
- ReserveDestination reservedest(wallet);
CValidationState state;
- if (!wallet->CommitTransaction(tx, std::move(mapValue), oldWtx.vOrderForm, reservedest, state)) {
+ if (!wallet->CommitTransaction(tx, std::move(mapValue), oldWtx.vOrderForm, state)) {
// NOTE: CommitTransaction never returns false, so this should never happen.
errors.push_back(strprintf("The transaction was rejected: %s", FormatStateMessage(state)));
return Result::WALLET_ERROR;
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 19becf93c7..ab732dc0d8 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -317,7 +317,6 @@ static CTransactionRef SendMoney(interfaces::Chain::Lock& locked_chain, CWallet
CScript scriptPubKey = GetScriptForDestination(address);
// Create and send the transaction
- ReserveDestination reservedest(pwallet);
CAmount nFeeRequired;
std::string strError;
std::vector<CRecipient> vecSend;
@@ -325,13 +324,13 @@ static CTransactionRef SendMoney(interfaces::Chain::Lock& locked_chain, CWallet
CRecipient recipient = {scriptPubKey, nValue, fSubtractFeeFromAmount};
vecSend.push_back(recipient);
CTransactionRef tx;
- if (!pwallet->CreateTransaction(locked_chain, vecSend, tx, reservedest, nFeeRequired, nChangePosRet, strError, coin_control)) {
+ if (!pwallet->CreateTransaction(locked_chain, vecSend, tx, nFeeRequired, nChangePosRet, strError, coin_control)) {
if (!fSubtractFeeFromAmount && nValue + nFeeRequired > curBalance)
strError = strprintf("Error: This transaction requires a transaction fee of at least %s", FormatMoney(nFeeRequired));
throw JSONRPCError(RPC_WALLET_ERROR, strError);
}
CValidationState state;
- if (!pwallet->CommitTransaction(tx, std::move(mapValue), {} /* orderForm */, reservedest, state)) {
+ if (!pwallet->CommitTransaction(tx, std::move(mapValue), {} /* orderForm */, state)) {
strError = strprintf("Error: The transaction was rejected! Reason given: %s", FormatStateMessage(state));
throw JSONRPCError(RPC_WALLET_ERROR, strError);
}
@@ -915,16 +914,15 @@ static UniValue sendmany(const JSONRPCRequest& request)
std::shuffle(vecSend.begin(), vecSend.end(), FastRandomContext());
// Send
- ReserveDestination changedest(pwallet);
CAmount nFeeRequired = 0;
int nChangePosRet = -1;
std::string strFailReason;
CTransactionRef tx;
- bool fCreated = pwallet->CreateTransaction(*locked_chain, vecSend, tx, changedest, nFeeRequired, nChangePosRet, strFailReason, coin_control);
+ bool fCreated = pwallet->CreateTransaction(*locked_chain, vecSend, tx, nFeeRequired, nChangePosRet, strFailReason, coin_control);
if (!fCreated)
throw JSONRPCError(RPC_WALLET_INSUFFICIENT_FUNDS, strFailReason);
CValidationState state;
- if (!pwallet->CommitTransaction(tx, std::move(mapValue), {} /* orderForm */, changedest, state)) {
+ if (!pwallet->CommitTransaction(tx, std::move(mapValue), {} /* orderForm */, state)) {
strFailReason = strprintf("Transaction commit failed:: %s", FormatStateMessage(state));
throw JSONRPCError(RPC_WALLET_ERROR, strFailReason);
}
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 609ae9a5e4..8af05dea45 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -272,7 +272,7 @@ static int64_t AddTx(CWallet& wallet, uint32_t lockTime, int64_t mockTime, int64
if (blockTime > 0) {
auto locked_chain = wallet.chain().lock();
LockAssertion lock(::cs_main);
- auto inserted = mapBlockIndex.emplace(GetRandHash(), new CBlockIndex);
+ auto inserted = ::BlockIndex().emplace(GetRandHash(), new CBlockIndex);
assert(inserted.second);
const uint256& hash = inserted.first->first;
block = inserted.first->second;
@@ -361,17 +361,16 @@ public:
CWalletTx& AddTx(CRecipient recipient)
{
CTransactionRef tx;
- ReserveDestination reservedest(wallet.get());
CAmount fee;
int changePos = -1;
std::string error;
CCoinControl dummy;
{
auto locked_chain = m_chain->lock();
- BOOST_CHECK(wallet->CreateTransaction(*locked_chain, {recipient}, tx, reservedest, fee, changePos, error, dummy));
+ BOOST_CHECK(wallet->CreateTransaction(*locked_chain, {recipient}, tx, fee, changePos, error, dummy));
}
CValidationState state;
- BOOST_CHECK(wallet->CommitTransaction(tx, {}, {}, reservedest, state));
+ BOOST_CHECK(wallet->CommitTransaction(tx, {}, {}, state));
CMutableTransaction blocktx;
{
LOCK(wallet->cs_wallet);
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 07fc8c35d5..452d4f7a6a 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -2779,17 +2779,13 @@ bool CWallet::FundTransaction(CMutableTransaction& tx, CAmount& nFeeRet, int& nC
auto locked_chain = chain().lock();
LOCK(cs_wallet);
- ReserveDestination reservedest(this);
CTransactionRef tx_new;
- if (!CreateTransaction(*locked_chain, vecSend, tx_new, reservedest, nFeeRet, nChangePosInOut, strFailReason, coinControl, false)) {
+ if (!CreateTransaction(*locked_chain, vecSend, tx_new, nFeeRet, nChangePosInOut, strFailReason, coinControl, false)) {
return false;
}
if (nChangePosInOut != -1) {
tx.vout.insert(tx.vout.begin() + nChangePosInOut, tx_new->vout[nChangePosInOut]);
- // We don't have the normal Create/Commit cycle, and don't want to risk
- // reusing change, so just remove the key from the keypool here.
- reservedest.KeepDestination();
}
// Copy output sizes from new transaction; they may have had the fee
@@ -2900,10 +2896,11 @@ OutputType CWallet::TransactionChangeType(OutputType change_type, const std::vec
return m_default_address_type;
}
-bool CWallet::CreateTransaction(interfaces::Chain::Lock& locked_chain, const std::vector<CRecipient>& vecSend, CTransactionRef& tx, ReserveDestination& reservedest, CAmount& nFeeRet,
+bool CWallet::CreateTransaction(interfaces::Chain::Lock& locked_chain, const std::vector<CRecipient>& vecSend, CTransactionRef& tx, CAmount& nFeeRet,
int& nChangePosInOut, std::string& strFailReason, const CCoinControl& coin_control, bool sign)
{
CAmount nValue = 0;
+ ReserveDestination reservedest(this);
int nChangePosRequest = nChangePosInOut;
unsigned int nSubtractFeeFromAmount = 0;
for (const auto& recipient : vecSend)
@@ -3183,8 +3180,6 @@ bool CWallet::CreateTransaction(interfaces::Chain::Lock& locked_chain, const std
}
}
- if (nChangePosInOut == -1) reservedest.ReturnDestination(); // Return any reserved address if we don't have change
-
// Shuffle selected coins and fill in final vin
txNew.vin.clear();
std::vector<CInputCoin> selected_coins(setCoins.begin(), setCoins.end());
@@ -3247,6 +3242,10 @@ bool CWallet::CreateTransaction(interfaces::Chain::Lock& locked_chain, const std
}
}
+ // Before we return success, we assume any change key will be used to prevent
+ // accidental re-use.
+ reservedest.KeepDestination();
+
WalletLogPrintf("Fee Calculation: Fee:%d Bytes:%u Needed:%d Tgt:%d (requested %d) Reason:\"%s\" Decay %.5f: Estimation: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out) Fail: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out)\n",
nFeeRet, nBytes, nFeeNeeded, feeCalc.returnedTarget, feeCalc.desiredTarget, StringForFeeReason(feeCalc.reason), feeCalc.est.decay,
feeCalc.est.pass.start, feeCalc.est.pass.end,
@@ -3261,7 +3260,7 @@ bool CWallet::CreateTransaction(interfaces::Chain::Lock& locked_chain, const std
/**
* Call after CreateTransaction unless you want to abort
*/
-bool CWallet::CommitTransaction(CTransactionRef tx, mapValue_t mapValue, std::vector<std::pair<std::string, std::string>> orderForm, ReserveDestination& reservedest, CValidationState& state)
+bool CWallet::CommitTransaction(CTransactionRef tx, mapValue_t mapValue, std::vector<std::pair<std::string, std::string>> orderForm, CValidationState& state)
{
{
auto locked_chain = chain().lock();
@@ -3275,8 +3274,6 @@ bool CWallet::CommitTransaction(CTransactionRef tx, mapValue_t mapValue, std::ve
WalletLogPrintf("CommitTransaction:\n%s", wtxNew.tx->ToString()); /* Continued */
{
- // Take key pair from key pool so it won't be used again
- reservedest.KeepDestination();
// Add tx to wallet, because if it has change it's also ours,
// otherwise just for transaction history.
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index 5f226cf5c0..6a7097bf44 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -266,8 +266,8 @@ public:
/** A wrapper to reserve an address from a wallet
*
- * ReserveDestination is used to reserve an address. It is passed around
- * during the CreateTransaction/CommitTransaction procedure.
+ * ReserveDestination is used to reserve an address.
+ * It is currently only used inside of CreateTransaction.
*
* Instantiating a ReserveDestination does not reserve an address. To do so,
* GetReservedDestination() needs to be called on the object. Once an address has been
@@ -1137,9 +1137,9 @@ public:
* selected by SelectCoins(); Also create the change output, when needed
* @note passing nChangePosInOut as -1 will result in setting a random position
*/
- bool CreateTransaction(interfaces::Chain::Lock& locked_chain, const std::vector<CRecipient>& vecSend, CTransactionRef& tx, ReserveDestination& reservedest, CAmount& nFeeRet, int& nChangePosInOut,
+ bool CreateTransaction(interfaces::Chain::Lock& locked_chain, const std::vector<CRecipient>& vecSend, CTransactionRef& tx, CAmount& nFeeRet, int& nChangePosInOut,
std::string& strFailReason, const CCoinControl& coin_control, bool sign = true);
- bool CommitTransaction(CTransactionRef tx, mapValue_t mapValue, std::vector<std::pair<std::string, std::string>> orderForm, ReserveDestination& reservedest, CValidationState& state);
+ bool CommitTransaction(CTransactionRef tx, mapValue_t mapValue, std::vector<std::pair<std::string, std::string>> orderForm, CValidationState& state);
bool DummySignTx(CMutableTransaction &txNew, const std::set<CTxOut> &txouts, bool use_max_sig = false) const
{
diff --git a/test/README.md b/test/README.md
index ecea3213ab..8f08b7afe4 100644
--- a/test/README.md
+++ b/test/README.md
@@ -49,6 +49,29 @@ You can run any combination (incl. duplicates) of tests by calling:
test/functional/test_runner.py <testname1> <testname2> <testname3> ...
```
+Wildcard test names can be passed, if the paths are coherent and the test runner
+is called from a `bash` shell or similar that does the globbing. For example,
+to run all the wallet tests:
+
+```
+test/functional/test_runner.py test/functional/wallet*
+functional/test_runner.py functional/wallet* (called from the test/ directory)
+test_runner.py wallet* (called from the test/functional/ directory)
+```
+
+but not
+
+```
+test/functional/test_runner.py wallet*
+```
+
+Combinations of wildcards can be passed:
+
+```
+test/functional/test_runner.py ./test/functional/tool* test/functional/mempool*
+test_runner.py tool* mempool*
+```
+
Run the regression test suite with:
```
diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py
index d38eca6cbe..f0bf09e172 100755
--- a/test/functional/feature_bip68_sequence.py
+++ b/test/functional/feature_bip68_sequence.py
@@ -29,7 +29,10 @@ NOT_FINAL_ERROR = "non-BIP68-final (code 64)"
class BIP68Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
- self.extra_args = [[], ["-acceptnonstdtxn=0"]]
+ self.extra_args = [
+ ["-acceptnonstdtxn=1"],
+ ["-acceptnonstdtxn=0"],
+ ]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py
index 3ad83cd2b3..b5eac88ba7 100755
--- a/test/functional/feature_block.py
+++ b/test/functional/feature_block.py
@@ -78,7 +78,7 @@ class FullBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
- self.extra_args = [[]]
+ self.extra_args = [['-acceptnonstdtxn=1']] # This is a consensus block test, we don't care about tx policy
def run_test(self):
node = self.nodes[0] # convenience reference to the node
diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py
index 7712e8bdf6..af34f9f0db 100755
--- a/test/functional/feature_cltv.py
+++ b/test/functional/feature_cltv.py
@@ -57,7 +57,11 @@ def cltv_validate(node, tx, height):
class BIP65Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
- self.extra_args = [['-whitelist=127.0.0.1', '-par=1']] # Use only one script thread to get the exact reject reason for testing
+ self.extra_args = [[
+ '-whitelist=127.0.0.1',
+ '-par=1', # Use only one script thread to get the exact reject reason for testing
+ '-acceptnonstdtxn=1', # cltv_invalidate is nonstandard
+ ]]
self.setup_clean_chain = True
self.rpc_timeout = 120
diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py
index f0d6bc21e6..7d5893641e 100755
--- a/test/functional/feature_config_args.py
+++ b/test/functional/feature_config_args.py
@@ -40,6 +40,11 @@ class ConfArgsTest(BitcoinTestFramework):
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Config setting for -wallet only applied on regtest network when in [regtest] section.')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
+ conf.write('regtest=0\n') # mainnet
+ conf.write('acceptnonstdtxn=1\n')
+ self.nodes[0].assert_start_raises_init_error(expected_msg='Error: acceptnonstdtxn is not currently supported for main chain')
+
+ with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('nono\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error reading configuration file: parse error on line 1: nono, if you intended to specify a negated option, use nono=1 instead')
diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py
index 87c318de9a..180ea0e51d 100755
--- a/test/functional/feature_maxuploadtarget.py
+++ b/test/functional/feature_maxuploadtarget.py
@@ -35,7 +35,7 @@ class MaxUploadTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
- self.extra_args = [["-maxuploadtarget=800"]]
+ self.extra_args = [["-maxuploadtarget=800", "-acceptnonstdtxn=1"]]
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py
index 1496c5d958..fd79df0b07 100755
--- a/test/functional/feature_rbf.py
+++ b/test/functional/feature_rbf.py
@@ -67,8 +67,8 @@ class ReplaceByFeeTest(BitcoinTestFramework):
self.num_nodes = 1
self.extra_args = [
[
+ "-acceptnonstdtxn=1",
"-maxorphantx=1000",
- "-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py
index 2d4dd96a1d..a71d4071d5 100755
--- a/test/functional/feature_segwit.py
+++ b/test/functional/feature_segwit.py
@@ -53,17 +53,20 @@ class SegWitTest(BitcoinTestFramework):
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
[
+ "-acceptnonstdtxn=1",
"-rpcserialversion=0",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
],
[
+ "-acceptnonstdtxn=1",
"-blockversion=4",
"-rpcserialversion=1",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
],
[
+ "-acceptnonstdtxn=1",
"-blockversion=536870915",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py
index 2bb5d8ab7d..209a222004 100755
--- a/test/functional/mempool_accept.py
+++ b/test/functional/mempool_accept.py
@@ -36,7 +36,6 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
self.num_nodes = 1
self.extra_args = [[
'-txindex',
- '-acceptnonstdtxn=0', # Try to mimic main-net
]] * self.num_nodes
def skip_test_if_missing_module(self):
diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py
index 351b27e94a..edf2069933 100755
--- a/test/functional/mempool_limit.py
+++ b/test/functional/mempool_limit.py
@@ -13,7 +13,11 @@ class MempoolLimitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
- self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0"]]
+ self.extra_args = [[
+ "-acceptnonstdtxn=1",
+ "-maxmempool=5",
+ "-spendzeroconfchange=0",
+ ]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/mining_prioritisetransaction.py b/test/functional/mining_prioritisetransaction.py
index b0a069be81..7e05a8e6c8 100755
--- a/test/functional/mining_prioritisetransaction.py
+++ b/test/functional/mining_prioritisetransaction.py
@@ -14,7 +14,10 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
- self.extra_args = [["-printpriority=1"], ["-printpriority=1"]]
+ self.extra_args = [[
+ "-printpriority=1",
+ "-acceptnonstdtxn=1",
+ ]] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py
index 0994857912..eb3336bd3b 100755
--- a/test/functional/p2p_compactblocks.py
+++ b/test/functional/p2p_compactblocks.py
@@ -95,6 +95,9 @@ class CompactBlocksTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
+ self.extra_args = [[
+ "-acceptnonstdtxn=1",
+ ]]
self.utxos = []
def skip_test_if_missing_module(self):
diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py
index 1b18dd3e58..3cca2d78db 100755
--- a/test/functional/p2p_invalid_tx.py
+++ b/test/functional/p2p_invalid_tx.py
@@ -25,6 +25,9 @@ from data import invalid_txs
class InvalidTxRequestTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
+ self.extra_args = [[
+ "-acceptnonstdtxn=1",
+ ]]
self.setup_clean_chain = True
def bootstrap_p2p(self, *, num_connections=1):
diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py
index b7fa42f593..dca71aec43 100755
--- a/test/functional/p2p_segwit.py
+++ b/test/functional/p2p_segwit.py
@@ -184,7 +184,11 @@ class SegWitTest(BitcoinTestFramework):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
- self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]]
+ self.extra_args = [
+ ["-whitelist=127.0.0.1", "-acceptnonstdtxn=1", "-vbparams=segwit:0:999999999999"],
+ ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999"],
+ ["-whitelist=127.0.0.1", "-acceptnonstdtxn=1", "-vbparams=segwit:0:0"],
+ ]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/rpc_users.py b/test/functional/rpc_users.py
index 102dd22594..8bbb3c04fa 100755
--- a/test/functional/rpc_users.py
+++ b/test/functional/rpc_users.py
@@ -20,6 +20,17 @@ import string
import configparser
import sys
+def call_with_auth(node, user, password):
+ url = urllib.parse.urlparse(node.url)
+ headers = {"Authorization": "Basic " + str_to_b64str('{}:{}'.format(user, password))}
+
+ conn = http.client.HTTPConnection(url.hostname, url.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ resp = conn.getresponse()
+ conn.close()
+ return resp
+
class HTTPBasicsTest(BitcoinTestFramework):
def set_test_params(self):
@@ -28,15 +39,24 @@ class HTTPBasicsTest(BitcoinTestFramework):
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
+ self.rtpassword = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
- rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
- rpcuser = "rpcuser=rpcuser💻"
- rpcpassword = "rpcpassword=rpcpassword🔑"
- self.user = ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
+ self.rpcuser = "rpcuser💻"
+ self.rpcpassword = "rpcpassword🔑"
+
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
gen_rpcauth = config['environment']['RPCAUTH']
+
+ # Generate RPCAUTH with specified password
+ self.rt2password = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
+ p = subprocess.Popen([sys.executable, gen_rpcauth, 'rt2', self.rt2password], stdout=subprocess.PIPE, universal_newlines=True)
+ lines = p.stdout.read().splitlines()
+ rpcauth2 = lines[1]
+
+ # Generate RPCAUTH without specifying password
+ self.user = ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
p = subprocess.Popen([sys.executable, gen_rpcauth, self.user], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth3 = lines[1]
@@ -47,160 +67,40 @@ class HTTPBasicsTest(BitcoinTestFramework):
f.write(rpcauth2+"\n")
f.write(rpcauth3+"\n")
with open(os.path.join(get_datadir_path(self.options.tmpdir, 1), "bitcoin.conf"), 'a', encoding='utf8') as f:
- f.write(rpcuser+"\n")
- f.write(rpcpassword+"\n")
-
- def run_test(self):
-
- ##################################################
- # Check correctness of the rpcauth config option #
- ##################################################
- url = urllib.parse.urlparse(self.nodes[0].url)
-
- #Old authpair
- authpair = url.username + ':' + url.password
-
- #New authpair generated via share/rpcauth tool
- password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
-
- #Second authpair with different username
- password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
- authpairnew = "rt:"+password
+ f.write("rpcuser={}\n".format(self.rpcuser))
+ f.write("rpcpassword={}\n".format(self.rpcpassword))
+ def test_auth(self, node, user, password):
self.log.info('Correct...')
- headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
+ assert_equal(200, call_with_auth(node, user, password).status)
- conn = http.client.HTTPConnection(url.hostname, url.port)
- conn.connect()
- conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
- resp = conn.getresponse()
- assert_equal(resp.status, 200)
- conn.close()
-
- #Use new authpair to confirm both work
- self.log.info('Correct...')
- headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
-
- conn = http.client.HTTPConnection(url.hostname, url.port)
- conn.connect()
- conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
- resp = conn.getresponse()
- assert_equal(resp.status, 200)
- conn.close()
-
- #Wrong login name with rt's password
self.log.info('Wrong...')
- authpairnew = "rtwrong:"+password
- headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
-
- conn = http.client.HTTPConnection(url.hostname, url.port)
- conn.connect()
- conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
- resp = conn.getresponse()
- assert_equal(resp.status, 401)
- conn.close()
+ assert_equal(401, call_with_auth(node, user, password+'wrong').status)
- #Wrong password for rt
self.log.info('Wrong...')
- authpairnew = "rt:"+password+"wrong"
- headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
+ assert_equal(401, call_with_auth(node, user+'wrong', password).status)
- conn = http.client.HTTPConnection(url.hostname, url.port)
- conn.connect()
- conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
- resp = conn.getresponse()
- assert_equal(resp.status, 401)
- conn.close()
-
- #Correct for rt2
- self.log.info('Correct...')
- authpairnew = "rt2:"+password2
- headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
-
- conn = http.client.HTTPConnection(url.hostname, url.port)
- conn.connect()
- conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
- resp = conn.getresponse()
- assert_equal(resp.status, 200)
- conn.close()
-
- #Wrong password for rt2
self.log.info('Wrong...')
- authpairnew = "rt2:"+password2+"wrong"
- headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
-
- conn = http.client.HTTPConnection(url.hostname, url.port)
- conn.connect()
- conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
- resp = conn.getresponse()
- assert_equal(resp.status, 401)
- conn.close()
+ assert_equal(401, call_with_auth(node, user+'wrong', password+'wrong').status)
- #Correct for randomly generated user
- self.log.info('Correct...')
- authpairnew = self.user+":"+self.password
- headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
-
- conn = http.client.HTTPConnection(url.hostname, url.port)
- conn.connect()
- conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
- resp = conn.getresponse()
- assert_equal(resp.status, 200)
- conn.close()
+ def run_test(self):
- #Wrong password for randomly generated user
- self.log.info('Wrong...')
- authpairnew = self.user+":"+self.password+"Wrong"
- headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
+ ##################################################
+ # Check correctness of the rpcauth config option #
+ ##################################################
+ url = urllib.parse.urlparse(self.nodes[0].url)
- conn = http.client.HTTPConnection(url.hostname, url.port)
- conn.connect()
- conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
- resp = conn.getresponse()
- assert_equal(resp.status, 401)
- conn.close()
+ self.test_auth(self.nodes[0], url.username, url.password)
+ self.test_auth(self.nodes[0], 'rt', self.rtpassword)
+ self.test_auth(self.nodes[0], 'rt2', self.rt2password)
+ self.test_auth(self.nodes[0], self.user, self.password)
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
- # rpcuser and rpcpassword authpair
- self.log.info('Correct...')
- rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
-
- headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
-
- conn = http.client.HTTPConnection(url.hostname, url.port)
- conn.connect()
- conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
- resp = conn.getresponse()
- assert_equal(resp.status, 200)
- conn.close()
-
- #Wrong login name with rpcuser's password
- rpcuserauthpair = "rpcuserwrong:rpcpassword"
- headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
-
- conn = http.client.HTTPConnection(url.hostname, url.port)
- conn.connect()
- conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
- resp = conn.getresponse()
- assert_equal(resp.status, 401)
- conn.close()
-
- #Wrong password for rpcuser
- self.log.info('Wrong...')
- rpcuserauthpair = "rpcuser:rpcpasswordwrong"
- headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
-
- conn = http.client.HTTPConnection(url.hostname, url.port)
- conn.connect()
- conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
- resp = conn.getresponse()
- assert_equal(resp.status, 401)
- conn.close()
-
+ self.test_auth(self.nodes[1], self.rpcuser, self.rpcpassword)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 26215083fb..efd962ea93 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -19,6 +19,7 @@ import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
+from io import BytesIO
logger = logging.getLogger("TestFramework.utils")
@@ -515,14 +516,13 @@ def gen_return_txouts():
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
- txouts = "81"
+ txouts = []
+ from .messages import CTxOut
+ txout = CTxOut()
+ txout.nValue = 0
+ txout.scriptPubKey = hex_str_to_bytes(script_pubkey)
for k in range(128):
- # add txout value
- txouts = txouts + "0000000000000000"
- # add length of script_pubkey
- txouts = txouts + "fd0402"
- # add script_pubkey
- txouts = txouts + script_pubkey
+ txouts.append(txout)
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
@@ -530,6 +530,7 @@ def gen_return_txouts():
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
+ from .messages import CTransaction
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
@@ -537,9 +538,11 @@ def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
- newtx = rawtx[0:92]
- newtx = newtx + txouts
- newtx = newtx + rawtx[94:]
+ tx = CTransaction()
+ tx.deserialize(BytesIO(hex_str_to_bytes(rawtx)))
+ for txout in txouts:
+ tx.vout.append(txout)
+ newtx = tx.serialize().hex()
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], 0)
txids.append(txid)
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 462547f44f..71738be1d0 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -234,6 +234,7 @@ def main():
parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
+ parser.add_argument('--filter', help='filter scripts to run by regular expression')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
@@ -269,11 +270,22 @@ def main():
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
- # in the ALL_SCRIPTS list. Accept the name with or without .py extension.
- tests = [test + ".py" if ".py" not in test else test for test in tests]
+ # in the ALL_SCRIPTS list. Accept names with or without a .py extension.
+ # Specified tests can contain wildcards, but in that case the supplied
+ # paths should be coherent, e.g. the same path as that provided to call
+ # test_runner.py. Examples:
+ # `test/functional/test_runner.py test/functional/wallet*`
+ # `test/functional/test_runner.py ./test/functional/wallet*`
+ # `test_runner.py wallet*`
+ # but not:
+ # `test/functional/test_runner.py wallet*`
+ # Multiple wildcards can be passed:
+ # `test_runner.py tool* mempool*`
for test in tests:
- if test in ALL_SCRIPTS:
- test_list.append(test)
+ script = test.split("/")[-1]
+ script = script + ".py" if ".py" not in script else script
+ if script in ALL_SCRIPTS:
+ test_list.append(script)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
@@ -294,6 +306,9 @@ def main():
if not exclude_list:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
+ if args.filter:
+ test_list = list(filter(re.compile(args.filter).search, test_list))
+
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py
index daa834b5b8..34e84fcf55 100755
--- a/test/functional/wallet_basic.py
+++ b/test/functional/wallet_basic.py
@@ -20,6 +20,9 @@ from test_framework.util import (
class WalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
+ self.extra_args = [[
+ "-acceptnonstdtxn=1",
+ ]] * self.num_nodes
self.setup_clean_chain = True
def skip_test_if_missing_module(self):