diff options
204 files changed, 8614 insertions, 2209 deletions
diff --git a/.travis.yml b/.travis.yml index b1bbfb091d..42fa653d8c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -132,6 +132,7 @@ jobs: - berkeley-db4 - miniupnpc - qrencode + - sqlite - ccache - zeromq env: >- diff --git a/build_msvc/libsecp256k1/libsecp256k1.vcxproj b/build_msvc/libsecp256k1/libsecp256k1.vcxproj index 99fb63fb02..c42918d6e1 100644 --- a/build_msvc/libsecp256k1/libsecp256k1.vcxproj +++ b/build_msvc/libsecp256k1/libsecp256k1.vcxproj @@ -12,7 +12,7 @@ </ItemGroup> <ItemDefinitionGroup> <ClCompile> - <PreprocessorDefinitions>ENABLE_MODULE_ECDH;ENABLE_MODULE_RECOVERY;%(PreprocessorDefinitions)</PreprocessorDefinitions> + <PreprocessorDefinitions>ENABLE_MODULE_ECDH;ENABLE_MODULE_RECOVERY;ENABLE_MODULE_EXTRAKEYS;ENABLE_MODULE_SCHNORRSIG;%(PreprocessorDefinitions)</PreprocessorDefinitions> <AdditionalIncludeDirectories>..\..\src\secp256k1;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> </ClCompile> </ItemDefinitionGroup> diff --git a/build_msvc/vcpkg.json b/build_msvc/vcpkg.json index 5d5f94227f..dfd3929c44 100644 --- a/build_msvc/vcpkg.json +++ b/build_msvc/vcpkg.json @@ -9,6 +9,7 @@ "boost-signals2", "boost-test", "boost-thread", + "sqlite3", "double-conversion", { "name": "libevent", @@ -16,4 +17,4 @@ }, "zeromq" ] -}
\ No newline at end of file +} diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh index 251ece7984..191b8049b0 100644 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_asan -export PACKAGES="clang llvm python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev" +export PACKAGES="clang llvm python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev libsqlite3-dev" export DOCKER_NAME_TAG=ubuntu:20.04 export NO_DEPENDS=1 export GOAL="install" diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh index 6a4979990b..b88ee2b50f 100644 --- a/ci/test/00_setup_env_native_msan.sh +++ b/ci/test/00_setup_env_native_msan.sh @@ -15,7 +15,7 @@ export BDB_PREFIX="${BASE_ROOT_DIR}/db4" export CONTAINER_NAME="ci_native_msan" export PACKAGES="clang-9 llvm-9 cmake" -export DEP_OPTS="NO_WALLET=1 NO_QT=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' boost_cxxflags='-std=c++11 -fvisibility=hidden -fPIC ${MSAN_AND_LIBCXX_FLAGS}' zeromq_cxxflags='-std=c++11 ${MSAN_AND_LIBCXX_FLAGS}'" +export DEP_OPTS="NO_BDB=1 NO_QT=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' boost_cxxflags='-std=c++11 -fvisibility=hidden -fPIC ${MSAN_AND_LIBCXX_FLAGS}' zeromq_cxxflags='-std=c++11 ${MSAN_AND_LIBCXX_FLAGS}'" export GOAL="install" export BITCOIN_CONFIG="--enable-wallet --with-sanitizers=memory --with-asm=no --prefix=${BASE_ROOT_DIR}/depends/x86_64-pc-linux-gnu/ CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' BDB_LIBS='-L${BDB_PREFIX}/lib -ldb_cxx-4.8' BDB_CFLAGS='-I${BDB_PREFIX}/include'" export USE_MEMORY_SANITIZER="true" diff --git a/ci/test/00_setup_env_native_valgrind.sh b/ci/test/00_setup_env_native_valgrind.sh index 710d9e1011..bfaea13a25 100644 --- a/ci/test/00_setup_env_native_valgrind.sh +++ b/ci/test/00_setup_env_native_valgrind.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_valgrind -export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev" +export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libsqlite3-dev" export USE_VALGRIND=1 export NO_DEPENDS=1 export TEST_RUNNER_EXTRA="--exclude rpc_bind" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh index d3566914ac..632bccf574 100755 --- a/ci/test/04_install.sh +++ b/ci/test/04_install.sh @@ -81,11 +81,10 @@ else fi if [ ! -d ${DIR_QA_ASSETS} ]; then - if [ "$RUN_FUZZ_TESTS" = "true" ]; then - DOCKER_EXEC git clone https://github.com/bitcoin-core/qa-assets ${DIR_QA_ASSETS} - fi + DOCKER_EXEC git clone --depth=1 https://github.com/bitcoin-core/qa-assets ${DIR_QA_ASSETS} fi export DIR_FUZZ_IN=${DIR_QA_ASSETS}/fuzz_seed_corpus/ +export DIR_UNIT_TEST_DATA=${DIR_QA_ASSETS}/unit_test_data/ DOCKER_EXEC mkdir -p "${BASE_SCRATCH_DIR}/sanitizer-output/" diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh index 6c14a3dfbe..607a2820dd 100755 --- a/ci/test/06_script_b.sh +++ b/ci/test/06_script_b.sh @@ -23,13 +23,13 @@ fi if [ "$RUN_UNIT_TESTS" = "true" ]; then BEGIN_FOLD unit-tests - DOCKER_EXEC LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib make $MAKEJOBS check VERBOSE=1 + DOCKER_EXEC DIR_UNIT_TEST_DATA=${DIR_UNIT_TEST_DATA} LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib make $MAKEJOBS check VERBOSE=1 END_FOLD fi if [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then BEGIN_FOLD unit-tests-seq - DOCKER_EXEC LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib "${BASE_BUILD_DIR}/bitcoin-*/src/test/test_bitcoin*" --catch_system_errors=no -l test_suite + DOCKER_EXEC DIR_UNIT_TEST_DATA=${DIR_UNIT_TEST_DATA} LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib "${BASE_BUILD_DIR}/bitcoin-*/src/test/test_bitcoin*" --catch_system_errors=no -l test_suite END_FOLD fi diff --git a/configure.ac b/configure.ac index 674ed1ee73..ab40991ce4 100644 --- a/configure.ac +++ b/configure.ac @@ -190,6 +190,16 @@ AC_ARG_ENABLE([ccache], [use_ccache=$enableval], [use_ccache=auto]) +dnl Suppress warnings from external headers (e.g. Boost, Qt). +dnl May be useful if warnings from external headers clutter the build output +dnl too much, so that it becomes difficult to spot Bitcoin Core warnings +dnl or if they cause a build failure with --enable-werror. +AC_ARG_ENABLE([suppress-external-warnings], + [AS_HELP_STRING([--enable-suppress-external-warnings], + [Suppress warnings from external headers (default is no)])], + [suppress_external_warnings=$enableval], + [suppress_external_warnings=no]) + AC_ARG_ENABLE([lcov], [AS_HELP_STRING([--enable-lcov], [enable lcov testing (default is no)])], @@ -1149,6 +1159,18 @@ AC_SUBST(LEVELDB_CPPFLAGS) AC_SUBST(LIBLEVELDB) AC_SUBST(LIBMEMENV) +dnl SUPPRESSED_CPPFLAGS=SUPPRESS_WARNINGS([$SOME_CPPFLAGS]) +dnl Replace -I with -isystem in $SOME_CPPFLAGS to suppress warnings from +dnl headers from its include directories and return the result. +dnl See -isystem documentation: +dnl https://gcc.gnu.org/onlinedocs/gcc/Directory-Options.html +dnl https://clang.llvm.org/docs/ClangCommandLineReference.html#cmdoption-clang-isystem-directory +dnl Do not change "-I/usr/include" to "-isystem /usr/include" because that +dnl is not necessary (/usr/include is already a system directory) and because +dnl it would break GCC's #include_next. +AC_DEFUN([SUPPRESS_WARNINGS], + [$(echo $1 |${SED} -E -e 's/(^| )-I/\1-isystem /g' -e 's;-isystem /usr/include([/ ]|$);-I/usr/include\1;g')]) + dnl enable-fuzz should disable all other targets if test "x$enable_fuzz" = "xyes"; then AC_MSG_WARN(enable-fuzz will disable all other targets) @@ -1184,11 +1206,25 @@ else dnl sets $bitcoin_enable_qt, $bitcoin_enable_qt_test, $bitcoin_enable_qt_dbus BITCOIN_QT_CONFIGURE([5.5.1]) + + dnl Keep a copy of the original $QT_INCLUDES and use it when invoking qt's moc + QT_INCLUDES_UNSUPPRESSED=$QT_INCLUDES + if test x$suppress_external_warnings != xno ; then + QT_INCLUDES=SUPPRESS_WARNINGS($QT_INCLUDES) + QT_DBUS_INCLUDES=SUPPRESS_WARNINGS($QT_DBUS_INCLUDES) + QT_TEST_INCLUDES=SUPPRESS_WARNINGS($QT_TEST_INCLUDES) + fi fi if test x$enable_wallet != xno; then dnl Check for libdb_cxx only if wallet enabled BITCOIN_FIND_BDB48 + if test x$suppress_external_warnings != xno ; then + BDB_CPPFLAGS=SUPPRESS_WARNINGS($BDB_CPPFLAGS) + fi + + dnl Check for sqlite3 + PKG_CHECK_MODULES([SQLITE], [sqlite3 >= 3.7.17], , [AC_MSG_ERROR([sqlite3 not found.])]) fi dnl Check for libminiupnpc (optional) @@ -1243,6 +1279,10 @@ AX_BOOST_THREAD dnl Opt-in to boost-process AS_IF([ test x$with_boost_process != x ], [ AX_BOOST_PROCESS ], [ ax_cv_boost_process=no ] ) +if test x$suppress_external_warnings != xno; then + BOOST_CPPFLAGS=SUPPRESS_WARNINGS($BOOST_CPPFLAGS) +fi + dnl Boost 1.56 through 1.62 allow using std::atomic instead of its own atomic dnl counter implementations. In 1.63 and later the std::atomic approach is default. m4_pattern_allow(DBOOST_AC_USE_STD_ATOMIC) dnl otherwise it's treated like a macro @@ -1606,6 +1646,7 @@ AC_SUBST(LIBTOOL_APP_LDFLAGS) AC_SUBST(USE_UPNP) AC_SUBST(USE_QRCODE) AC_SUBST(BOOST_LIBS) +AC_SUBST(SQLITE_LIBS) AC_SUBST(TESTDEFS) AC_SUBST(MINIUPNPC_CPPFLAGS) AC_SUBST(MINIUPNPC_LIBS) @@ -1659,7 +1700,7 @@ if test x$need_bundled_univalue = xyes; then AC_CONFIG_SUBDIRS([src/univalue]) fi -ac_configure_args="${ac_configure_args} --disable-shared --with-pic --enable-benchmark=no --with-bignum=no --enable-module-recovery" +ac_configure_args="${ac_configure_args} --disable-shared --with-pic --enable-benchmark=no --with-bignum=no --enable-module-recovery --enable-module-schnorrsig --enable-experimental" AC_CONFIG_SUBDIRS([src/secp256k1]) AC_OUTPUT diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml index e86ff83798..65f9a2e5c9 100644 --- a/contrib/gitian-descriptors/gitian-linux.yml +++ b/contrib/gitian-descriptors/gitian-linux.yml @@ -78,7 +78,7 @@ script: | echo "REAL=\`which -a ${i}-${prog}-8 | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${i}-${prog} echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog} + echo "\$REAL \"\$@\"" >> $WRAP_DIR/${i}-${prog} chmod +x ${WRAP_DIR}/${i}-${prog} fi done diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml index d05b6d426d..5f671b95ce 100644 --- a/contrib/gitian-descriptors/gitian-win.yml +++ b/contrib/gitian-descriptors/gitian-win.yml @@ -81,7 +81,7 @@ script: | echo "REAL=\`which -a ${i}-${prog}-posix | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog} echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${i}-${prog} echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog} - echo "\$REAL \$@" >> $WRAP_DIR/${i}-${prog} + echo "\$REAL \"\$@\"" >> $WRAP_DIR/${i}-${prog} chmod +x ${WRAP_DIR}/${i}-${prog} done done diff --git a/contrib/testgen/gen_key_io_test_vectors.py b/contrib/testgen/gen_key_io_test_vectors.py index a00acb1f41..49320d92e6 100755 --- a/contrib/testgen/gen_key_io_test_vectors.py +++ b/contrib/testgen/gen_key_io_test_vectors.py @@ -15,8 +15,7 @@ import os from itertools import islice from base58 import b58encode_chk, b58decode_chk, b58chars import random -from binascii import b2a_hex -from segwit_addr import bech32_encode, decode, convertbits, CHARSET +from segwit_addr import bech32_encode, decode_segwit_address, convertbits, CHARSET # key types PUBKEY_ADDRESS = 0 @@ -109,7 +108,7 @@ def is_valid(v): def is_valid_bech32(v): '''Check vector v for bech32 validity''' for hrp in ['bc', 'tb', 'bcrt']: - if decode(hrp, v) != (None, None): + if decode_segwit_address(hrp, v) != (None, None): return True return False @@ -141,9 +140,7 @@ def gen_valid_vectors(): rv, payload = valid_vector_generator(template) assert is_valid(rv) metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None} - hexrepr = b2a_hex(payload) - if isinstance(hexrepr, bytes): - hexrepr = hexrepr.decode('utf8') + hexrepr = payload.hex() yield (rv, hexrepr, metadata) def gen_invalid_base58_vector(template): diff --git a/depends/Makefile b/depends/Makefile index 2bc5df974a..1ad21f6821 100644 --- a/depends/Makefile +++ b/depends/Makefile @@ -134,7 +134,10 @@ qrencode_packages_$(NO_QR) = $(qrencode_packages) qt_packages_$(NO_QT) = $(qt_packages) $(qt_$(host_os)_packages) $(qt_$(host_arch)_$(host_os)_packages) $(qrencode_packages_) -wallet_packages_$(NO_WALLET) = $(wallet_packages) +bdb_packages_$(NO_BDB) = $(bdb_packages) +sqlite_packages_$(NO_SQLITE) = $(sqlite_packages) +wallet_packages_$(NO_WALLET) = $(bdb_packages_) $(sqlite_packages_) + upnp_packages_$(NO_UPNP) = $(upnp_packages) zmq_packages_$(NO_ZMQ) = $(zmq_packages) multiprocess_packages_$(MULTIPROCESS) = $(multiprocess_packages) diff --git a/depends/README.md b/depends/README.md index 2356e8be59..869ebe12fd 100644 --- a/depends/README.md +++ b/depends/README.md @@ -99,6 +99,10 @@ The following can be set when running make: `make FOO=bar` <dd>Don't download/build/cache packages needed for enabling zeromq</dd> <dt>NO_WALLET</dt> <dd>Don't download/build/cache libs needed to enable the wallet</dd> +<dt>NO_BDB</dt> +<dd>Don't download/build/cache BerkeleyDB</dd> +<dt>NO_SQLITE</dt> +<dd>Don't download/build/cache SQLite</dd> <dt>NO_UPNP</dt> <dd>Don't download/build/cache packages needed for enabling upnp</dd> <dt>MULTIPROCESS</dt> diff --git a/depends/packages/packages.mk b/depends/packages/packages.mk index 8fe2c771c9..4627acb521 100644 --- a/depends/packages/packages.mk +++ b/depends/packages/packages.mk @@ -10,7 +10,8 @@ qt_android_packages=qt qt_darwin_packages=qt qt_mingw32_packages=qt -wallet_packages=bdb +bdb_packages=bdb +sqlite_packages=sqlite zmq_packages=zeromq diff --git a/depends/packages/sqlite.mk b/depends/packages/sqlite.mk new file mode 100644 index 0000000000..5b3a61b239 --- /dev/null +++ b/depends/packages/sqlite.mk @@ -0,0 +1,26 @@ +package=sqlite +$(package)_version=3320100 +$(package)_download_path=https://sqlite.org/2020/ +$(package)_file_name=sqlite-autoconf-$($(package)_version).tar.gz +$(package)_sha256_hash=486748abfb16abd8af664e3a5f03b228e5f124682b0c942e157644bf6fff7d10 + +define $(package)_set_vars +$(package)_config_opts=--disable-shared --disable-readline --disable-dynamic-extensions --enable-option-checking +$(package)_config_opts_linux=--with-pic +endef + +define $(package)_config_cmds + $($(package)_autoconf) +endef + +define $(package)_build_cmds + $(MAKE) libsqlite3.la +endef + +define $(package)_stage_cmds + $(MAKE) DESTDIR=$($(package)_staging_dir) install-libLTLIBRARIES install-includeHEADERS install-pkgconfigDATA +endef + +define $(package)_postprocess_cmds + rm lib/*.la +endef diff --git a/doc/bips.md b/doc/bips.md index ad6f7a0767..8c20533c9b 100644 --- a/doc/bips.md +++ b/doc/bips.md @@ -45,3 +45,4 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.21.0**): * [`BIP 176`](https://github.com/bitcoin/bips/blob/master/bip-0176.mediawiki): Bits Denomination [QT only] is supported as of **v0.16.0** ([PR 12035](https://github.com/bitcoin/bitcoin/pull/12035)). * [`BIP 325`](https://github.com/bitcoin/bips/blob/master/bip-0325.mediawiki): Signet test network is supported as of **v0.21.0** ([PR 18267](https://github.com/bitcoin/bitcoin/pull/18267)). * [`BIP 339`](https://github.com/bitcoin/bips/blob/master/bip-0339.mediawiki): Relay of transactions by wtxid is supported as of **v0.21.0** ([PR 18044](https://github.com/bitcoin/bitcoin/pull/18044)). +* [`BIP 340`](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki) [`341`](https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki) [`342`](https://github.com/bitcoin/bips/blob/master/bip-0342.mediawiki): Validation rules for Taproot (including Schnorr signatures and Tapscript leaves) are implemented as of **v0.21.0** ([PR 19953](https://github.com/bitcoin/bitcoin/pull/19953)), without mainnet activation. diff --git a/doc/build-osx.md b/doc/build-osx.md index 7b76117c8b..2a7d71eea6 100644 --- a/doc/build-osx.md +++ b/doc/build-osx.md @@ -19,7 +19,7 @@ Then install [Homebrew](https://brew.sh). ## Dependencies ```shell -brew install automake berkeley-db4 libtool boost miniupnpc pkg-config python qt libevent qrencode +brew install automake berkeley-db4 libtool boost miniupnpc pkg-config python qt libevent qrencode sqlite ``` If you run into issues, check [Homebrew's troubleshooting page](https://docs.brew.sh/Troubleshooting). @@ -79,7 +79,7 @@ compiled in `disable-wallet` mode with: ./configure --disable-wallet ``` -In this case there is no dependency on Berkeley DB 4.8. +In this case there is no dependency on Berkeley DB 4.8 and SQLite. Mining is also possible in disable-wallet mode using the `getblocktemplate` RPC call. diff --git a/doc/build-unix.md b/doc/build-unix.md index 6b51db5f55..c076fb6fff 100644 --- a/doc/build-unix.md +++ b/doc/build-unix.md @@ -46,6 +46,7 @@ Optional dependencies: libqrencode | QR codes in GUI | Optional for generating QR codes (only needed when GUI enabled) univalue | Utility | JSON parsing and encoding (bundled version will be used unless --with-system-univalue passed to configure) libzmq3 | ZMQ notification | Optional, allows generating ZMQ notifications (requires ZMQ version >= 4.0.0) + sqlite3 | SQLite DB | Wallet storage (only needed when wallet enabled) For the versions used, see [dependencies.md](dependencies.md) @@ -91,6 +92,10 @@ pass `--with-incompatible-bdb` to configure. Otherwise, you can build from self-compiled `depends` (see above). +SQLite is required for the wallet: + + sudo apt install libsqlite3-dev + To build Bitcoin Core without wallet, see [*Disable-wallet mode*](/doc/build-unix.md#disable-wallet-mode) @@ -144,6 +149,10 @@ libqrencode (optional) can be installed with: sudo dnf install qrencode-devel +SQLite can be installed with: + + sudo dnf install sqlite-devel + Notes ----- The release is built with GCC and then "strip bitcoind" to strip the debug @@ -238,7 +247,7 @@ disable-wallet mode with: ./configure --disable-wallet -In this case there is no dependency on Berkeley DB 4.8. +In this case there is no dependency on Berkeley DB 4.8 and SQLite. Mining is also possible in disable-wallet mode using the `getblocktemplate` RPC call. diff --git a/doc/dependencies.md b/doc/dependencies.md index 92dea65309..ddd50ef296 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -21,6 +21,7 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct | Python (tests) | | [3.5](https://www.python.org/downloads) | | | | | qrencode | [3.4.4](https://fukuchi.org/works/qrencode) | | No | | | | Qt | [5.9.8](https://download.qt.io/official_releases/qt/) | [5.5.1](https://github.com/bitcoin/bitcoin/issues/13478) | No | | | +| SQLite | [3.32.1](https://sqlite.org/download.html) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | | | | | XCB | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) (Linux only) | | xkbcommon | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) (Linux only) | | ZeroMQ | [4.3.1](https://github.com/zeromq/libzmq/releases) | 4.0.0 | No | | | @@ -33,6 +34,7 @@ Some dependencies are not needed in all configurations. The following are some f #### Options passed to `./configure` * MiniUPnPc is not needed with `--with-miniupnpc=no`. * Berkeley DB is not needed with `--disable-wallet`. +* SQLite is not needed with `--disable-wallet`. * Qt is not needed with `--without-gui`. * If the qrencode dependency is absent, QR support won't be added. To force an error when that happens, pass `--with-qrencode`. * ZeroMQ is needed only with the `--with-zmq` option. diff --git a/doc/files.md b/doc/files.md index 64cff43d3f..65f98e6986 100644 --- a/doc/files.md +++ b/doc/files.md @@ -50,6 +50,7 @@ Subdirectory | File(s) | Description `indexes/blockfilter/basic/db/` | LevelDB database | Blockfilter index LevelDB database for the basic filtertype; *optional*, used if `-blockfilterindex=basic` `indexes/blockfilter/basic/` | `fltrNNNNN.dat`<sup>[\[2\]](#note2)</sup> | Blockfilter index filters for the basic filtertype; *optional*, used if `-blockfilterindex=basic` `wallets/` | | [Contains wallets](#multi-wallet-environment); can be specified by `-walletdir` option; if `wallets/` subdirectory does not exist, wallets reside in the [data directory](#data-directory-location) +`./` | `anchors.dat` | Anchor IP address database, created on shutdown and deleted at startup. Anchors are last known outgoing block-relay-only peers that are tried to re-connect to on startup `./` | `banlist.dat` | Stores the IPs/subnets of banned nodes `./` | `bitcoin.conf` | User-defined [configuration settings](bitcoin-conf.md) for `bitcoind` or `bitcoin-qt`. File is not written to by the software and must be created manually. Path can be specified by `-conf` option `./` | `bitcoind.pid` | Stores the process ID (PID) of `bitcoind` or `bitcoin-qt` while running; created at start and deleted on shutdown; can be specified by `-pid` option @@ -72,8 +73,9 @@ Subdirectory | File(s) | Description -------------|-------------------|------------ `database/` | BDB logging files | Part of BDB environment; created at start and deleted on shutdown; a user *must keep it as safe* as personal wallet `wallet.dat` `./` | `db.log` | BDB error file -`./` | `wallet.dat` | Personal wallet (BDB) with keys and transactions +`./` | `wallet.dat` | Personal wallet with keys and transactions. May be either a Berkeley DB or SQLite database file. `./` | `.walletlock` | Wallet lock file +`./` | `wallet.dat-journal` | SQLite Rollback Journal file for `wallet.dat`. Usually created at start and deleted on shutdown. A user *must keep it as safe* as the `wallet.dat` file. 1. Each user-defined wallet named "wallet_name" resides in `wallets/wallet_name/` subdirectory. diff --git a/doc/release-notes.md b/doc/release-notes.md index aef021a29d..4c69c61390 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -82,6 +82,14 @@ P2P and network changes node using P2P relay. This version reduces the initial broadcast guarantees for wallet transactions submitted via P2P to a node running the wallet. (#18038) +- The size of the set of transactions that peers have announced and we consider + for requests has been reduced from 100000 to 5000 (per peer), and further + announcements will be ignored when that limit is reached. If you need to dump + (very) large batches of transactions, exceptions can be made for trusted + peers using the "relay" network permission. For localhost for example it can + be enabled using the command line option `-whitelist=relay@127.0.0.1`. + (#19988) + - The Tor onion service that is automatically created by setting the `-listenonion` configuration parameter will now be created as a Tor v3 service instead of Tor v2. The private key that was used for Tor v2 (if any) will be @@ -138,8 +146,8 @@ will trigger BIP 125 (replace-by-fee) opt-in. (#11413) option `-deprecatedrpc=banscore` is used. The `banscore` field will be fully removed in the next major release. (#19469) -- The `testmempoolaccept` RPC returns `vsize` and a `fee` object with the `base` fee - if the transaction passes validation. (#19940) +- The `testmempoolaccept` RPC returns `vsize` and a `fees` object with the `base` fee + if the transaction would pass validation. (#19940) - The `getpeerinfo` RPC now returns a `connection_type` field. This indicates the type of connection established with the peer. It will return one of six @@ -448,6 +456,16 @@ RPC - Fee estimation failed - Transaction has too long of a mempool chain +- The `sendrawtransaction` error code for exceeding `maxfeerate` has been changed from + `-26` to `-25`. The error string has been changed from "absurdly-high-fee" to + "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)." The + `testmempoolaccept` RPC returns `max-fee-exceeded` rather than `absurdly-high-fee` + as the `reject-reason`. (#19339) + +- To make wallet and rawtransaction RPCs more consistent, the error message for + exceeding maximum feerate has been changed to "Fee exceeds maximum configured by user + (e.g. -maxtxfee, maxfeerate)." (#19339) + Tests ----- diff --git a/src/Makefile.am b/src/Makefile.am index aa63b5f516..b0d36717ce 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -151,7 +151,6 @@ BITCOIN_CORE_H = \ interfaces/wallet.h \ key.h \ key_io.h \ - limitedmap.h \ logging.h \ logging/timer.h \ memusage.h \ @@ -215,6 +214,7 @@ BITCOIN_CORE_H = \ timedata.h \ torcontrol.h \ txdb.h \ + txrequest.h \ txmempool.h \ undo.h \ util/asmap.h \ @@ -257,6 +257,7 @@ BITCOIN_CORE_H = \ wallet/rpcwallet.h \ wallet/salvage.h \ wallet/scriptpubkeyman.h \ + wallet/sqlite.h \ wallet/wallet.h \ wallet/walletdb.h \ wallet/wallettool.h \ @@ -327,6 +328,7 @@ libbitcoin_server_a_SOURCES = \ timedata.cpp \ torcontrol.cpp \ txdb.cpp \ + txrequest.cpp \ txmempool.cpp \ validation.cpp \ validationinterface.cpp \ @@ -370,6 +372,7 @@ libbitcoin_wallet_a_SOURCES = \ wallet/rpcwallet.cpp \ wallet/salvage.cpp \ wallet/scriptpubkeyman.cpp \ + wallet/sqlite.cpp \ wallet/wallet.cpp \ wallet/walletdb.cpp \ wallet/walletutil.cpp \ @@ -589,7 +592,7 @@ bitcoin_bin_ldadd = \ $(LIBMEMENV) \ $(LIBSECP256K1) -bitcoin_bin_ldadd += $(BOOST_LIBS) $(BDB_LIBS) $(MINIUPNPC_LIBS) $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) $(ZMQ_LIBS) +bitcoin_bin_ldadd += $(BOOST_LIBS) $(BDB_LIBS) $(MINIUPNPC_LIBS) $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) $(ZMQ_LIBS) $(SQLITE_LIBS) bitcoind_SOURCES = $(bitcoin_daemon_sources) bitcoind_CPPFLAGS = $(bitcoin_bin_cppflags) diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include index bd9143a381..beb3f8dfd2 100644 --- a/src/Makefile.bench.include +++ b/src/Makefile.bench.include @@ -74,7 +74,7 @@ bench_bench_bitcoin_SOURCES += bench/coin_selection.cpp bench_bench_bitcoin_SOURCES += bench/wallet_balance.cpp endif -bench_bench_bitcoin_LDADD += $(BOOST_LIBS) $(BDB_LIBS) $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) $(MINIUPNPC_LIBS) +bench_bench_bitcoin_LDADD += $(BOOST_LIBS) $(BDB_LIBS) $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) $(MINIUPNPC_LIBS) $(SQLITE_LIBS) bench_bench_bitcoin_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) $(PTHREAD_FLAGS) CLEAN_BITCOIN_BENCH = bench/*.gcda bench/*.gcno $(GENERATED_BENCH_FILES) diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include index 69ff0f0251..f46310a603 100644 --- a/src/Makefile.qt.include +++ b/src/Makefile.qt.include @@ -321,7 +321,7 @@ bitcoin_qt_ldadd += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS) endif bitcoin_qt_ldadd += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) \ $(BOOST_LIBS) $(QT_LIBS) $(QT_DBUS_LIBS) $(QR_LIBS) $(BDB_LIBS) $(MINIUPNPC_LIBS) $(LIBSECP256K1) \ - $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) + $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) $(SQLITE_LIBS) bitcoin_qt_ldflags = $(RELDFLAGS) $(AM_LDFLAGS) $(QT_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) $(PTHREAD_FLAGS) bitcoin_qt_libtoolflags = $(AM_LIBTOOLFLAGS) --tag CXX @@ -379,11 +379,11 @@ ui_%.h: %.ui $(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(UIC) -o $@ $< || (echo "Error creating $@"; false) %.moc: %.cpp - $(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(MOC) $(DEFAULT_INCLUDES) $(QT_INCLUDES) $(MOC_DEFS) $< | \ + $(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(MOC) $(DEFAULT_INCLUDES) $(QT_INCLUDES_UNSUPPRESSED) $(MOC_DEFS) $< | \ $(SED) -e '/^\*\*.*Created:/d' -e '/^\*\*.*by:/d' > $@ moc_%.cpp: %.h - $(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(MOC) $(DEFAULT_INCLUDES) $(QT_INCLUDES) $(MOC_DEFS) $< | \ + $(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(MOC) $(DEFAULT_INCLUDES) $(QT_INCLUDES_UNSUPPRESSED) $(MOC_DEFS) $< | \ $(SED) -e '/^\*\*.*Created:/d' -e '/^\*\*.*by:/d' > $@ %.qm: %.ts diff --git a/src/Makefile.qttest.include b/src/Makefile.qttest.include index d300398b25..c05dd38737 100644 --- a/src/Makefile.qttest.include +++ b/src/Makefile.qttest.include @@ -56,7 +56,7 @@ endif qt_test_test_bitcoin_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) \ $(LIBLEVELDB_SSE42) $(LIBMEMENV) $(BOOST_LIBS) $(QT_DBUS_LIBS) $(QT_TEST_LIBS) $(QT_LIBS) \ $(QR_LIBS) $(BDB_LIBS) $(MINIUPNPC_LIBS) $(LIBSECP256K1) \ - $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) + $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) $(SQLITE_LIBS) qt_test_test_bitcoin_qt_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(QT_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) $(PTHREAD_FLAGS) qt_test_test_bitcoin_qt_CXXFLAGS = $(AM_CXXFLAGS) $(QT_PIE_FLAGS) diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 06dde87ddd..7fac78f973 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -129,6 +129,7 @@ FUZZ_TARGETS = \ test/fuzz/script_deserialize \ test/fuzz/script_flags \ test/fuzz/script_interpreter \ + test/fuzz/script_assets_test_minimizer \ test/fuzz/script_ops \ test/fuzz/script_sigcache \ test/fuzz/script_sign \ @@ -151,6 +152,7 @@ FUZZ_TARGETS = \ test/fuzz/tx_in_deserialize \ test/fuzz/tx_out \ test/fuzz/txoutcompressor_deserialize \ + test/fuzz/txrequest \ test/fuzz/txundo_deserialize \ test/fuzz/uint160_deserialize \ test/fuzz/uint256_deserialize @@ -235,7 +237,6 @@ BITCOIN_TESTS =\ test/interfaces_tests.cpp \ test/key_io_tests.cpp \ test/key_tests.cpp \ - test/limitedmap_tests.cpp \ test/logging_tests.cpp \ test/dbwrapper_tests.cpp \ test/validation_tests.cpp \ @@ -275,6 +276,7 @@ BITCOIN_TESTS =\ test/torcontrol_tests.cpp \ test/transaction_tests.cpp \ test/txindex_tests.cpp \ + test/txrequest_tests.cpp \ test/txvalidation_tests.cpp \ test/txvalidationcache_tests.cpp \ test/uint256_tests.cpp \ @@ -315,7 +317,7 @@ test_test_bitcoin_LDADD += $(LIBBITCOIN_SERVER) $(LIBBITCOIN_CLI) $(LIBBITCOIN_C $(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) $(BOOST_LIBS) $(BOOST_UNIT_TEST_FRAMEWORK_LIB) $(LIBSECP256K1) $(EVENT_LIBS) $(EVENT_PTHREADS_LIBS) test_test_bitcoin_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) -test_test_bitcoin_LDADD += $(BDB_LIBS) $(MINIUPNPC_LIBS) +test_test_bitcoin_LDADD += $(BDB_LIBS) $(MINIUPNPC_LIBS) $(SQLITE_LIBS) test_test_bitcoin_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) $(PTHREAD_FLAGS) -static if ENABLE_ZMQ @@ -1082,6 +1084,12 @@ test_fuzz_script_interpreter_LDADD = $(FUZZ_SUITE_LD_COMMON) test_fuzz_script_interpreter_LDFLAGS = $(FUZZ_SUITE_LDFLAGS_COMMON) test_fuzz_script_interpreter_SOURCES = test/fuzz/script_interpreter.cpp +test_fuzz_script_assets_test_minimizer_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_script_assets_test_minimizer_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_script_assets_test_minimizer_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_script_assets_test_minimizer_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +test_fuzz_script_assets_test_minimizer_SOURCES = test/fuzz/script_assets_test_minimizer.cpp + test_fuzz_script_ops_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) test_fuzz_script_ops_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_script_ops_LDADD = $(FUZZ_SUITE_LD_COMMON) @@ -1214,6 +1222,12 @@ test_fuzz_txoutcompressor_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON) test_fuzz_txoutcompressor_deserialize_LDFLAGS = $(FUZZ_SUITE_LDFLAGS_COMMON) test_fuzz_txoutcompressor_deserialize_SOURCES = test/fuzz/deserialize.cpp +test_fuzz_txrequest_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_txrequest_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_txrequest_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_txrequest_LDFLAGS = $(FUZZ_SUITE_LDFLAGS_COMMON) +test_fuzz_txrequest_SOURCES = test/fuzz/txrequest.cpp + test_fuzz_txundo_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DTXUNDO_DESERIALIZE=1 test_fuzz_txundo_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_txundo_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON) diff --git a/src/addrdb.cpp b/src/addrdb.cpp index f3e8a19de2..27f22826a9 100644 --- a/src/addrdb.cpp +++ b/src/addrdb.cpp @@ -10,6 +10,7 @@ #include <clientversion.h> #include <cstdint> #include <hash.h> +#include <logging/timer.h> #include <random.h> #include <streams.h> #include <tinyformat.h> @@ -156,3 +157,22 @@ bool CAddrDB::Read(CAddrMan& addr, CDataStream& ssPeers) } return ret; } + +void DumpAnchors(const fs::path& anchors_db_path, const std::vector<CAddress>& anchors) +{ + LOG_TIME_SECONDS(strprintf("Flush %d outbound block-relay-only peer addresses to anchors.dat", anchors.size())); + SerializeFileDB("anchors", anchors_db_path, anchors); +} + +std::vector<CAddress> ReadAnchors(const fs::path& anchors_db_path) +{ + std::vector<CAddress> anchors; + if (DeserializeFileDB(anchors_db_path, anchors)) { + LogPrintf("Loaded %i addresses from %s\n", anchors.size(), anchors_db_path.filename()); + } else { + anchors.clear(); + } + + fs::remove(anchors_db_path); + return anchors; +} diff --git a/src/addrdb.h b/src/addrdb.h index 8410c3776c..4ac0e3e1b5 100644 --- a/src/addrdb.h +++ b/src/addrdb.h @@ -11,9 +11,9 @@ #include <serialize.h> #include <string> -#include <map> +#include <vector> -class CSubNet; +class CAddress; class CAddrMan; class CDataStream; @@ -73,4 +73,20 @@ public: bool Read(banmap_t& banSet); }; +/** + * Dump the anchor IP address database (anchors.dat) + * + * Anchors are last known outgoing block-relay-only peers that are + * tried to re-connect to on startup. + */ +void DumpAnchors(const fs::path& anchors_db_path, const std::vector<CAddress>& anchors); + +/** + * Read the anchor IP address database (anchors.dat) + * + * Deleting anchors.dat is intentional as it avoids renewed peering to anchors after + * an unclean shutdown and thus potential exploitation of the anchor peer policy. + */ +std::vector<CAddress> ReadAnchors(const fs::path& anchors_db_path); + #endif // BITCOIN_ADDRDB_H diff --git a/src/banman.cpp b/src/banman.cpp index 8752185a60..995fef3d07 100644 --- a/src/banman.cpp +++ b/src/banman.cpp @@ -184,7 +184,7 @@ void BanMan::SweepBanned() while (it != m_banned.end()) { CSubNet sub_net = (*it).first; CBanEntry ban_entry = (*it).second; - if (now > ban_entry.nBanUntil) { + if (!sub_net.IsValid() || now > ban_entry.nBanUntil) { m_banned.erase(it++); m_is_dirty = true; notify_ui = true; diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index 28394e7041..ed58f1bbab 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -39,8 +39,6 @@ static const char DEFAULT_RPCCONNECT[] = "127.0.0.1"; static const int DEFAULT_HTTP_CLIENT_TIMEOUT=900; static const bool DEFAULT_NAMED=false; static const int CONTINUE_EXECUTION=-1; -static const std::string ONION{".onion"}; -static const size_t ONION_LEN{ONION.size()}; /** Default number of blocks to generate for RPC generatetoaddress. */ static const std::string DEFAULT_NBLOCKS = "1"; @@ -298,30 +296,24 @@ public: class NetinfoRequestHandler : public BaseRequestHandler { private: - bool IsAddrIPv6(const std::string& addr) const + static constexpr int8_t UNKNOWN_NETWORK{-1}; + static constexpr size_t m_networks_size{3}; + const std::array<std::string, m_networks_size> m_networks{{"ipv4", "ipv6", "onion"}}; + std::array<std::array<uint16_t, m_networks_size + 2>, 3> m_counts{{{}}}; //!< Peer counts by (in/out/total, networks/total/block-relay) + int8_t NetworkStringToId(const std::string& str) const { - return !addr.empty() && addr.front() == '['; - } - bool IsInboundOnion(const std::string& addr_local, int mapped_as) const - { - return mapped_as == 0 && addr_local.find(ONION) != std::string::npos; - } - bool IsOutboundOnion(const std::string& addr, int mapped_as) const - { - const size_t addr_len{addr.size()}; - const size_t onion_pos{addr.rfind(ONION)}; - return mapped_as == 0 && onion_pos != std::string::npos && addr_len > ONION_LEN && - (onion_pos == addr_len - ONION_LEN || onion_pos == addr.find_last_of(":") - ONION_LEN); + for (size_t i = 0; i < m_networks_size; ++i) { + if (str == m_networks.at(i)) return i; + } + return UNKNOWN_NETWORK; } uint8_t m_details_level{0}; //!< Optional user-supplied arg to set dashboard details level bool DetailsRequested() const { return m_details_level > 0 && m_details_level < 5; } bool IsAddressSelected() const { return m_details_level == 2 || m_details_level == 4; } bool IsVersionSelected() const { return m_details_level == 3 || m_details_level == 4; } - enum struct NetType { - ipv4, - ipv6, - onion, - }; + bool m_is_asmap_on{false}; + size_t m_max_addr_length{0}; + size_t m_max_id_length{2}; struct Peer { int id; int mapped_as; @@ -334,30 +326,24 @@ private: double min_ping; double ping; std::string addr; + std::string network; std::string sub_version; - NetType net_type; bool is_block_relay; bool is_outbound; bool operator<(const Peer& rhs) const { return std::tie(is_outbound, min_ping) < std::tie(rhs.is_outbound, rhs.min_ping); } }; - std::string NetTypeEnumToString(NetType t) - { - switch (t) { - case NetType::ipv4: return "ipv4"; - case NetType::ipv6: return "ipv6"; - case NetType::onion: return "onion"; - } // no default case, so the compiler can warn about missing cases - assert(false); - } + std::vector<Peer> m_peers; std::string ChainToString() const { if (gArgs.GetChainName() == CBaseChainParams::TESTNET) return " testnet"; if (gArgs.GetChainName() == CBaseChainParams::REGTEST) return " regtest"; return ""; } + const int64_t m_time_now{GetSystemTimeInSeconds()}; + public: - const int ID_PEERINFO = 0; - const int ID_NETWORKINFO = 1; + static constexpr int ID_PEERINFO = 0; + static constexpr int ID_NETWORKINFO = 1; UniValue PrepareRequest(const std::string& method, const std::vector<std::string>& args) override { @@ -385,49 +371,25 @@ public: } // Count peer connection totals, and if DetailsRequested(), store peer data in a vector of structs. - const int64_t time_now{GetSystemTimeInSeconds()}; - int ipv4_i{0}, ipv6_i{0}, onion_i{0}, block_relay_i{0}, total_i{0}; // inbound conn counters - int ipv4_o{0}, ipv6_o{0}, onion_o{0}, block_relay_o{0}, total_o{0}; // outbound conn counters - size_t max_peer_id_length{2}, max_addr_length{0}; - bool is_asmap_on{false}; - std::vector<Peer> peers; - const UniValue& getpeerinfo{batch[ID_PEERINFO]["result"]}; - - for (const UniValue& peer : getpeerinfo.getValues()) { - const std::string addr{peer["addr"].get_str()}; - const std::string addr_local{peer["addrlocal"].isNull() ? "" : peer["addrlocal"].get_str()}; - const int mapped_as{peer["mapped_as"].isNull() ? 0 : peer["mapped_as"].get_int()}; + for (const UniValue& peer : batch[ID_PEERINFO]["result"].getValues()) { + const std::string network{peer["network"].get_str()}; + const int8_t network_id{NetworkStringToId(network)}; + if (network_id == UNKNOWN_NETWORK) continue; + const bool is_outbound{!peer["inbound"].get_bool()}; const bool is_block_relay{!peer["relaytxes"].get_bool()}; - const bool is_inbound{peer["inbound"].get_bool()}; - NetType net_type{NetType::ipv4}; - if (is_inbound) { - if (IsAddrIPv6(addr)) { - net_type = NetType::ipv6; - ++ipv6_i; - } else if (IsInboundOnion(addr_local, mapped_as)) { - net_type = NetType::onion; - ++onion_i; - } else { - ++ipv4_i; - } - if (is_block_relay) ++block_relay_i; - } else { - if (IsAddrIPv6(addr)) { - net_type = NetType::ipv6; - ++ipv6_o; - } else if (IsOutboundOnion(addr, mapped_as)) { - net_type = NetType::onion; - ++onion_o; - } else { - ++ipv4_o; - } - if (is_block_relay) ++block_relay_o; + ++m_counts.at(is_outbound).at(network_id); // in/out by network + ++m_counts.at(is_outbound).at(m_networks_size); // in/out overall + ++m_counts.at(2).at(network_id); // total by network + ++m_counts.at(2).at(m_networks_size); // total overall + if (is_block_relay) { + ++m_counts.at(is_outbound).at(m_networks_size + 1); // in/out block-relay + ++m_counts.at(2).at(m_networks_size + 1); // total block-relay } if (DetailsRequested()) { // Push data for this peer to the peers vector. const int peer_id{peer["id"].get_int()}; + const int mapped_as{peer["mapped_as"].isNull() ? 0 : peer["mapped_as"].get_int()}; const int version{peer["version"].get_int()}; - const std::string sub_version{peer["subver"].get_str()}; const int64_t conn_time{peer["conntime"].get_int64()}; const int64_t last_blck{peer["last_block"].get_int64()}; const int64_t last_recv{peer["lastrecv"].get_int64()}; @@ -435,10 +397,12 @@ public: const int64_t last_trxn{peer["last_transaction"].get_int64()}; const double min_ping{peer["minping"].isNull() ? -1 : peer["minping"].get_real()}; const double ping{peer["pingtime"].isNull() ? -1 : peer["pingtime"].get_real()}; - peers.push_back({peer_id, mapped_as, version, conn_time, last_blck, last_recv, last_send, last_trxn, min_ping, ping, addr, sub_version, net_type, is_block_relay, !is_inbound}); - max_peer_id_length = std::max(ToString(peer_id).length(), max_peer_id_length); - max_addr_length = std::max(addr.length() + 1, max_addr_length); - is_asmap_on |= (mapped_as != 0); + const std::string addr{peer["addr"].get_str()}; + const std::string sub_version{peer["subver"].get_str()}; + m_peers.push_back({peer_id, mapped_as, version, conn_time, last_blck, last_recv, last_send, last_trxn, min_ping, ping, addr, network, sub_version, is_block_relay, is_outbound}); + m_max_id_length = std::max(ToString(peer_id).length(), m_max_id_length); + m_max_addr_length = std::max(addr.length() + 1, m_max_addr_length); + m_is_asmap_on |= (mapped_as != 0); } } @@ -446,30 +410,30 @@ public: std::string result{strprintf("%s %s%s - %i%s\n\n", PACKAGE_NAME, FormatFullVersion(), ChainToString(), networkinfo["protocolversion"].get_int(), networkinfo["subversion"].get_str())}; // Report detailed peer connections list sorted by direction and minimum ping time. - if (DetailsRequested() && !peers.empty()) { - std::sort(peers.begin(), peers.end()); + if (DetailsRequested() && !m_peers.empty()) { + std::sort(m_peers.begin(), m_peers.end()); result += "Peer connections sorted by direction and min ping\n<-> relay net mping ping send recv txn blk uptime "; - if (is_asmap_on) result += " asmap "; - result += strprintf("%*s %-*s%s\n", max_peer_id_length, "id", IsAddressSelected() ? max_addr_length : 0, IsAddressSelected() ? "address" : "", IsVersionSelected() ? "version" : ""); - for (const Peer& peer : peers) { + if (m_is_asmap_on) result += " asmap "; + result += strprintf("%*s %-*s%s\n", m_max_id_length, "id", IsAddressSelected() ? m_max_addr_length : 0, IsAddressSelected() ? "address" : "", IsVersionSelected() ? "version" : ""); + for (const Peer& peer : m_peers) { std::string version{ToString(peer.version) + peer.sub_version}; result += strprintf( "%3s %5s %5s%6s%7s%5s%5s%5s%5s%7s%*i %*s %-*s%s\n", peer.is_outbound ? "out" : "in", peer.is_block_relay ? "block" : "full", - NetTypeEnumToString(peer.net_type), + peer.network, peer.min_ping == -1 ? "" : ToString(round(1000 * peer.min_ping)), peer.ping == -1 ? "" : ToString(round(1000 * peer.ping)), - peer.last_send == 0 ? "" : ToString(time_now - peer.last_send), - peer.last_recv == 0 ? "" : ToString(time_now - peer.last_recv), - peer.last_trxn == 0 ? "" : ToString((time_now - peer.last_trxn) / 60), - peer.last_blck == 0 ? "" : ToString((time_now - peer.last_blck) / 60), - peer.conn_time == 0 ? "" : ToString((time_now - peer.conn_time) / 60), - is_asmap_on ? 7 : 0, // variable spacing - is_asmap_on && peer.mapped_as != 0 ? ToString(peer.mapped_as) : "", - max_peer_id_length, // variable spacing + peer.last_send == 0 ? "" : ToString(m_time_now - peer.last_send), + peer.last_recv == 0 ? "" : ToString(m_time_now - peer.last_recv), + peer.last_trxn == 0 ? "" : ToString((m_time_now - peer.last_trxn) / 60), + peer.last_blck == 0 ? "" : ToString((m_time_now - peer.last_blck) / 60), + peer.conn_time == 0 ? "" : ToString((m_time_now - peer.conn_time) / 60), + m_is_asmap_on ? 7 : 0, // variable spacing + m_is_asmap_on && peer.mapped_as != 0 ? ToString(peer.mapped_as) : "", + m_max_id_length, // variable spacing peer.id, - IsAddressSelected() ? max_addr_length : 0, // variable spacing + IsAddressSelected() ? m_max_addr_length : 0, // variable spacing IsAddressSelected() ? peer.addr : "", IsVersionSelected() && version != "0" ? version : ""); } @@ -477,12 +441,11 @@ public: } // Report peer connection totals by type. - total_i = ipv4_i + ipv6_i + onion_i; - total_o = ipv4_o + ipv6_o + onion_o; result += " ipv4 ipv6 onion total block-relay\n"; - result += strprintf("in %5i %5i %5i %5i %5i\n", ipv4_i, ipv6_i, onion_i, total_i, block_relay_i); - result += strprintf("out %5i %5i %5i %5i %5i\n", ipv4_o, ipv6_o, onion_o, total_o, block_relay_o); - result += strprintf("total %5i %5i %5i %5i %5i\n", ipv4_i + ipv4_o, ipv6_i + ipv6_o, onion_i + onion_o, total_i + total_o, block_relay_i + block_relay_o); + const std::array<std::string, 3> rows{{"in", "out", "total"}}; + for (size_t i = 0; i < m_networks_size; ++i) { + result += strprintf("%-5s %5i %5i %5i %5i %5i\n", rows.at(i), m_counts.at(i).at(0), m_counts.at(i).at(1), m_counts.at(i).at(2), m_counts.at(i).at(m_networks_size), m_counts.at(i).at(m_networks_size + 1)); + } // Report local addresses, ports, and scores. result += "\nLocal addresses"; diff --git a/src/chainparams.cpp b/src/chainparams.cpp index ef501e9de2..be2f5b53a6 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -86,10 +86,12 @@ public: consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = 1199145601; // January 1, 2008 consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = 1230767999; // December 31, 2008 - // The best chain should have at least this much work. - consensus.nMinimumChainWork = uint256S("0x00000000000000000000000000000000000000000e1ab5ec9348e9f4b8eb8154"); + // Deployment of Taproot (BIPs 340-342) + consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].bit = 2; + consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nStartTime = 1199145601; // January 1, 2008 + consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = 1230767999; // December 31, 2008 - // By default assume that the signatures in ancestors of this block are valid. + consensus.nMinimumChainWork = uint256S("0x00000000000000000000000000000000000000000e1ab5ec9348e9f4b8eb8154"); consensus.defaultAssumeValid = uint256S("0x0000000000000000000f2adce67e49b0b6bdeb9de8b7c3d7e93b21e7fc1e819d"); // 623950 /** @@ -197,10 +199,12 @@ public: consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = 1199145601; // January 1, 2008 consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = 1230767999; // December 31, 2008 - // The best chain should have at least this much work. - consensus.nMinimumChainWork = uint256S("0x0000000000000000000000000000000000000000000001495c1d5a01e2af8a23"); + // Deployment of Taproot (BIPs 340-342) + consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].bit = 2; + consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nStartTime = 1199145601; // January 1, 2008 + consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = 1230767999; // December 31, 2008 - // By default assume that the signatures in ancestors of this block are valid. + consensus.nMinimumChainWork = uint256S("0x0000000000000000000000000000000000000000000001495c1d5a01e2af8a23"); consensus.defaultAssumeValid = uint256S("0x000000000000056c49030c174179b52a928c870e6e8a822c75973b7970cfbd01"); // 1692000 pchMessageStart[0] = 0x0b; @@ -287,6 +291,8 @@ public: } bin = ParseHex(signet_challenge[0]); + consensus.nMinimumChainWork = uint256{}; + consensus.defaultAssumeValid = uint256{}; m_assumed_blockchain_size = 0; m_assumed_chain_state_size = 0; chainTxData = ChainTxData{ @@ -305,7 +311,9 @@ public: consensus.signet_blocks = true; consensus.signet_challenge.assign(bin.begin(), bin.end()); consensus.nSubsidyHalvingInterval = 210000; + consensus.BIP16Exception = uint256{}; consensus.BIP34Height = 1; + consensus.BIP34Hash = uint256{}; consensus.BIP65Height = 1; consensus.BIP66Height = 1; consensus.CSVHeight = 1; @@ -316,11 +324,17 @@ public: consensus.fPowNoRetargeting = false; consensus.nRuleChangeActivationThreshold = 1916; consensus.nMinerConfirmationWindow = 2016; + consensus.MinBIP9WarningHeight = 0; consensus.powLimit = uint256S("00000377ae000000000000000000000000000000000000000000000000000000"); consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit = 28; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = 1199145601; // January 1, 2008 consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = 1230767999; // December 31, 2008 + // Activation of Taproot (BIPs 340-342) + consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].bit = 2; + consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nStartTime = Consensus::BIP9Deployment::ALWAYS_ACTIVE; + consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + // message start is defined as the first 4 bytes of the sha256d of the block script CHashWriter h(SER_DISK, 0); h << consensus.signet_challenge; @@ -380,12 +394,12 @@ public: consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit = 28; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = 0; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].bit = 2; + consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nStartTime = Consensus::BIP9Deployment::ALWAYS_ACTIVE; + consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; - // The best chain should have at least this much work. - consensus.nMinimumChainWork = uint256S("0x00"); - - // By default assume that the signatures in ancestors of this block are valid. - consensus.defaultAssumeValid = uint256S("0x00"); + consensus.nMinimumChainWork = uint256{}; + consensus.defaultAssumeValid = uint256{}; pchMessageStart[0] = 0xfa; pchMessageStart[1] = 0xbf; diff --git a/src/consensus/params.h b/src/consensus/params.h index 85ab3f61ef..0983595c6a 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -14,6 +14,7 @@ namespace Consensus { enum DeploymentPos { DEPLOYMENT_TESTDUMMY, + DEPLOYMENT_TAPROOT, // Deployment of Schnorr/Taproot (BIPs 340-342) // NOTE: Also add new deployments to VersionBitsDeploymentInfo in versionbits.cpp MAX_VERSION_BITS_DEPLOYMENTS }; @@ -78,7 +79,9 @@ struct Params { int64_t nPowTargetSpacing; int64_t nPowTargetTimespan; int64_t DifficultyAdjustmentInterval() const { return nPowTargetTimespan / nPowTargetSpacing; } + /** The best chain should have at least this much work */ uint256 nMinimumChainWork; + /** By default assume that the signatures in ancestors of this block are valid */ uint256 defaultAssumeValid; /** diff --git a/src/core_read.cpp b/src/core_read.cpp index 1c0a8a096d..121e62457c 100644 --- a/src/core_read.cpp +++ b/src/core_read.cpp @@ -117,19 +117,14 @@ static bool CheckTxScriptsSanity(const CMutableTransaction& tx) return true; } -bool DecodeHexTx(CMutableTransaction& tx, const std::string& hex_tx, bool try_no_witness, bool try_witness) +static bool DecodeTx(CMutableTransaction& tx, const std::vector<unsigned char>& tx_data, bool try_no_witness, bool try_witness) { - if (!IsHex(hex_tx)) { - return false; - } - - std::vector<unsigned char> txData(ParseHex(hex_tx)); - - if (try_no_witness) { - CDataStream ssData(txData, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS); + if (try_witness) { + CDataStream ssData(tx_data, SER_NETWORK, PROTOCOL_VERSION); try { ssData >> tx; - if (ssData.eof() && (!try_witness || CheckTxScriptsSanity(tx))) { + // If transaction looks sane, we don't try other mode even if requested + if (ssData.empty() && (!try_no_witness || CheckTxScriptsSanity(tx))) { return true; } } catch (const std::exception&) { @@ -137,8 +132,8 @@ bool DecodeHexTx(CMutableTransaction& tx, const std::string& hex_tx, bool try_no } } - if (try_witness) { - CDataStream ssData(txData, SER_NETWORK, PROTOCOL_VERSION); + if (try_no_witness) { + CDataStream ssData(tx_data, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS); try { ssData >> tx; if (ssData.empty()) { @@ -152,6 +147,16 @@ bool DecodeHexTx(CMutableTransaction& tx, const std::string& hex_tx, bool try_no return false; } +bool DecodeHexTx(CMutableTransaction& tx, const std::string& hex_tx, bool try_no_witness, bool try_witness) +{ + if (!IsHex(hex_tx)) { + return false; + } + + std::vector<unsigned char> txData(ParseHex(hex_tx)); + return DecodeTx(tx, txData, try_no_witness, try_witness); +} + bool DecodeHexBlockHeader(CBlockHeader& header, const std::string& hex_header) { if (!IsHex(hex_header)) return false; diff --git a/src/hash.cpp b/src/hash.cpp index 83b90ae063..3657b38639 100644 --- a/src/hash.cpp +++ b/src/hash.cpp @@ -6,6 +6,7 @@ #include <crypto/common.h> #include <crypto/hmac_sha512.h> +#include <string> inline uint32_t ROTL32(uint32_t x, int8_t r) { @@ -84,3 +85,12 @@ uint256 SHA256Uint256(const uint256& input) CSHA256().Write(input.begin(), 32).Finalize(result.begin()); return result; } + +CHashWriter TaggedHash(const std::string& tag) +{ + CHashWriter writer(SER_GETHASH, 0); + uint256 taghash; + CSHA256().Write((const unsigned char*)tag.data(), tag.size()).Finalize(taghash.begin()); + writer << taghash << taghash; + return writer; +} diff --git a/src/hash.h b/src/hash.h index c16bbb48ce..6d876076ee 100644 --- a/src/hash.h +++ b/src/hash.h @@ -15,6 +15,7 @@ #include <uint256.h> #include <version.h> +#include <string> #include <vector> typedef uint256 ChainCode; @@ -202,4 +203,12 @@ unsigned int MurmurHash3(unsigned int nHashSeed, Span<const unsigned char> vData void BIP32Hash(const ChainCode &chainCode, unsigned int nChild, unsigned char header, const unsigned char data[32], unsigned char output[64]); +/** Return a CHashWriter primed for tagged hashes (as specified in BIP 340). + * + * The returned object will have SHA256(tag) written to it twice (= 64 bytes). + * A tagged hash can be computed by feeding the message into this object, and + * then calling CHashWriter::GetSHA256(). + */ +CHashWriter TaggedHash(const std::string& tag); + #endif // BITCOIN_HASH_H diff --git a/src/limitedmap.h b/src/limitedmap.h deleted file mode 100644 index 7d66964e36..0000000000 --- a/src/limitedmap.h +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2012-2018 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#ifndef BITCOIN_LIMITEDMAP_H -#define BITCOIN_LIMITEDMAP_H - -#include <assert.h> -#include <map> - -/** STL-like map container that only keeps the N elements with the highest value. */ -template <typename K, typename V> -class limitedmap -{ -public: - typedef K key_type; - typedef V mapped_type; - typedef std::pair<const key_type, mapped_type> value_type; - typedef typename std::map<K, V>::const_iterator const_iterator; - typedef typename std::map<K, V>::size_type size_type; - -protected: - std::map<K, V> map; - typedef typename std::map<K, V>::iterator iterator; - std::multimap<V, iterator> rmap; - typedef typename std::multimap<V, iterator>::iterator rmap_iterator; - size_type nMaxSize; - -public: - explicit limitedmap(size_type nMaxSizeIn) - { - assert(nMaxSizeIn > 0); - nMaxSize = nMaxSizeIn; - } - const_iterator begin() const { return map.begin(); } - const_iterator end() const { return map.end(); } - size_type size() const { return map.size(); } - bool empty() const { return map.empty(); } - const_iterator find(const key_type& k) const { return map.find(k); } - size_type count(const key_type& k) const { return map.count(k); } - void insert(const value_type& x) - { - std::pair<iterator, bool> ret = map.insert(x); - if (ret.second) { - if (map.size() > nMaxSize) { - map.erase(rmap.begin()->second); - rmap.erase(rmap.begin()); - } - rmap.insert(make_pair(x.second, ret.first)); - } - } - void erase(const key_type& k) - { - iterator itTarget = map.find(k); - if (itTarget == map.end()) - return; - std::pair<rmap_iterator, rmap_iterator> itPair = rmap.equal_range(itTarget->second); - for (rmap_iterator it = itPair.first; it != itPair.second; ++it) - if (it->second == itTarget) { - rmap.erase(it); - map.erase(itTarget); - return; - } - // Shouldn't ever get here - assert(0); - } - void update(const_iterator itIn, const mapped_type& v) - { - // Using map::erase() with empty range instead of map::find() to get a non-const iterator, - // since it is a constant time operation in C++11. For more details, see - // https://stackoverflow.com/questions/765148/how-to-remove-constness-of-const-iterator - iterator itTarget = map.erase(itIn, itIn); - - if (itTarget == map.end()) - return; - std::pair<rmap_iterator, rmap_iterator> itPair = rmap.equal_range(itTarget->second); - for (rmap_iterator it = itPair.first; it != itPair.second; ++it) - if (it->second == itTarget) { - rmap.erase(it); - itTarget->second = v; - rmap.insert(make_pair(v, itTarget)); - return; - } - // Shouldn't ever get here - assert(0); - } - size_type max_size() const { return nMaxSize; } - size_type max_size(size_type s) - { - assert(s > 0); - while (map.size() > s) { - map.erase(rmap.begin()->second); - rmap.erase(rmap.begin()); - } - nMaxSize = s; - return nMaxSize; - } -}; - -#endif // BITCOIN_LIMITEDMAP_H diff --git a/src/net.cpp b/src/net.cpp index 54d572c68c..e8a27c3530 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -47,6 +47,12 @@ static_assert(MINIUPNPC_API_VERSION >= 10, "miniUPnPc API version >= 10 assumed" #include <math.h> +/** Maximum number of block-relay-only anchor connections */ +static constexpr size_t MAX_BLOCK_RELAY_ONLY_ANCHORS = 2; +static_assert (MAX_BLOCK_RELAY_ONLY_ANCHORS <= static_cast<size_t>(MAX_BLOCK_RELAY_ONLY_CONNECTIONS), "MAX_BLOCK_RELAY_ONLY_ANCHORS must not exceed MAX_BLOCK_RELAY_ONLY_CONNECTIONS."); +/** Anchor IP address database file name */ +const char* const ANCHORS_DATABASE_FILENAME = "anchors.dat"; + // How often to dump addresses to peers.dat static constexpr std::chrono::minutes DUMP_PEERS_INTERVAL{15}; @@ -552,6 +558,7 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap) X(nServices); X(addr); X(addrBind); + stats.m_network = GetNetworkName(ConnectedThroughNetwork()); stats.m_mapped_as = addr.GetMappedAS(m_asmap); if (m_tx_relay != nullptr) { LOCK(m_tx_relay->cs_filter); @@ -1932,10 +1939,12 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) ConnectionType conn_type = ConnectionType::OUTBOUND_FULL_RELAY; int64_t nTime = GetTimeMicros(); + bool anchor = false; bool fFeeler = false; // Determine what type of connection to open. Opening - // OUTBOUND_FULL_RELAY connections gets the highest priority until we + // BLOCK_RELAY connections to addresses from anchors.dat gets the highest + // priority. Then we open OUTBOUND_FULL_RELAY priority until we // meet our full-relay capacity. Then we open BLOCK_RELAY connection // until we hit our block-relay-only peer limit. // GetTryNewOutboundPeer() gets set when a stale tip is detected, so we @@ -1943,7 +1952,10 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) // these conditions are met, check the nNextFeeler timer to decide if // we should open a FEELER. - if (nOutboundFullRelay < m_max_outbound_full_relay) { + if (!m_anchors.empty() && (nOutboundBlockRelay < m_max_outbound_block_relay)) { + conn_type = ConnectionType::BLOCK_RELAY; + anchor = true; + } else if (nOutboundFullRelay < m_max_outbound_full_relay) { // OUTBOUND_FULL_RELAY } else if (nOutboundBlockRelay < m_max_outbound_block_relay) { conn_type = ConnectionType::BLOCK_RELAY; @@ -1964,6 +1976,24 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) int nTries = 0; while (!interruptNet) { + if (anchor && !m_anchors.empty()) { + const CAddress addr = m_anchors.back(); + m_anchors.pop_back(); + if (!addr.IsValid() || IsLocal(addr) || !IsReachable(addr) || + !HasAllDesirableServiceFlags(addr.nServices) || + setConnected.count(addr.GetGroup(addrman.m_asmap))) continue; + addrConnect = addr; + LogPrint(BCLog::NET, "Trying to make an anchor connection to %s\n", addrConnect.ToString()); + break; + } + + // If we didn't find an appropriate destination after trying 100 addresses fetched from addrman, + // stop this loop, and let the outer loop run again (which sleeps, adds seed nodes, recalculates + // already-connected network ranges, ...) before trying new addrman addresses. + nTries++; + if (nTries > 100) + break; + CAddrInfo addr = addrman.SelectTriedCollision(); // SelectTriedCollision returns an invalid address if it is empty. @@ -1981,13 +2011,6 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) break; } - // If we didn't find an appropriate destination after trying 100 addresses fetched from addrman, - // stop this loop, and let the outer loop run again (which sleeps, adds seed nodes, recalculates - // already-connected network ranges, ...) before trying new addrman addresses. - nTries++; - if (nTries > 100) - break; - if (!IsReachable(addr)) continue; @@ -2027,6 +2050,19 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) } } +std::vector<CAddress> CConnman::GetCurrentBlockRelayOnlyConns() const +{ + std::vector<CAddress> ret; + LOCK(cs_vNodes); + for (const CNode* pnode : vNodes) { + if (pnode->IsBlockOnlyConn()) { + ret.push_back(pnode->addr); + } + } + + return ret; +} + std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo() { std::vector<AddedNodeInfo> ret; @@ -2426,6 +2462,15 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions) } } + if (m_use_addrman_outgoing) { + // Load addresses from anchors.dat + m_anchors = ReadAnchors(GetDataDir() / ANCHORS_DATABASE_FILENAME); + if (m_anchors.size() > MAX_BLOCK_RELAY_ONLY_ANCHORS) { + m_anchors.resize(MAX_BLOCK_RELAY_ONLY_ANCHORS); + } + LogPrintf("%i block-relay-only anchors will be tried for connections.\n", m_anchors.size()); + } + uiInterface.InitMessage(_("Starting network threads...").translated); fAddressesInitialized = true; @@ -2541,6 +2586,15 @@ void CConnman::StopNodes() if (fAddressesInitialized) { DumpAddresses(); fAddressesInitialized = false; + + if (m_use_addrman_outgoing) { + // Anchor connections are only dumped during clean shutdown. + std::vector<CAddress> anchors_to_dump = GetCurrentBlockRelayOnlyConns(); + if (anchors_to_dump.size() > MAX_BLOCK_RELAY_ONLY_ANCHORS) { + anchors_to_dump.resize(MAX_BLOCK_RELAY_ONLY_ANCHORS); + } + DumpAnchors(GetDataDir() / ANCHORS_DATABASE_FILENAME, anchors_to_dump); + } } // Close sockets @@ -14,7 +14,6 @@ #include <compat.h> #include <crypto/siphash.h> #include <hash.h> -#include <limitedmap.h> #include <net_permissions.h> #include <netaddress.h> #include <optional.h> @@ -116,17 +115,12 @@ struct CSerializedNetMsg std::string m_type; }; -const std::vector<std::string> CONNECTION_TYPE_DOC{ - "outbound-full-relay (default automatic connections)", - "block-relay-only (does not relay transactions or addresses)", - "inbound (initiated by the peer)", - "manual (added via addnode RPC or -addnode/-connect configuration options)", - "addr-fetch (short-lived automatic connection for soliciting addresses)", - "feeler (short-lived automatic connection for testing addresses)"}; - /** Different types of connections to a peer. This enum encapsulates the * information we have available at the time of opening or accepting the - * connection. Aside from INBOUND, all types are initiated by us. */ + * connection. Aside from INBOUND, all types are initiated by us. + * + * If adding or removing types, please update CONNECTION_TYPE_DOC in + * src/rpc/net.cpp. */ enum class ConnectionType { /** * Inbound connections are those initiated by a peer. This is the only @@ -174,7 +168,9 @@ enum class ConnectionType { * attacks. By not relaying transactions or addresses, these connections * are harder to detect by a third party, thus helping obfuscate the * network topology. We automatically attempt to open - * MAX_BLOCK_RELAY_ONLY_CONNECTIONS using addresses from our AddrMan. + * MAX_BLOCK_RELAY_ONLY_ANCHORS using addresses from our anchors.dat. Then + * addresses from our AddrMan if MAX_BLOCK_RELAY_ONLY_CONNECTIONS + * isn't reached yet. */ BLOCK_RELAY, @@ -461,6 +457,11 @@ private: void RecordBytesRecv(uint64_t bytes); void RecordBytesSent(uint64_t bytes); + /** + * Return vector of current BLOCK_RELAY peers. + */ + std::vector<CAddress> GetCurrentBlockRelayOnlyConns() const; + // Whether the node should be passed out in ForEach* callbacks static bool NodeFullyConnected(const CNode* pnode); @@ -562,6 +563,12 @@ private: /** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */ BanMan* m_banman; + /** + * Addresses that were saved during the previous clean shutdown. We'll + * attempt to make block-relay-only connections to them. + */ + std::vector<CAddress> m_anchors; + /** SipHasher seeds for deterministic randomness */ const uint64_t nSeed0, nSeed1; @@ -706,6 +713,8 @@ public: CAddress addr; // Bind address of our side of the connection CAddress addrBind; + // Name of the network the peer connected through + std::string m_network; uint32_t m_mapped_as; std::string m_conn_type_string; }; @@ -848,7 +857,6 @@ public: RecursiveMutex cs_sendProcessing; - std::deque<CInv> vRecvGetData; uint64_t nRecvBytes GUARDED_BY(cs_vRecv){0}; std::atomic<int64_t> nLastSend{0}; @@ -1042,8 +1050,6 @@ public: // Whether a ping is requested. std::atomic<bool> fPingQueued{false}; - std::set<uint256> orphan_work_set; - CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn, ConnectionType conn_type_in, bool inbound_onion = false); ~CNode(); CNode(const CNode&) = delete; diff --git a/src/net_permissions.cpp b/src/net_permissions.cpp index 53648deb40..d40fdfb113 100644 --- a/src/net_permissions.cpp +++ b/src/net_permissions.cpp @@ -12,7 +12,7 @@ const std::vector<std::string> NET_PERMISSIONS_DOC{ "bloomfilter (allow requesting BIP37 filtered blocks and transactions)", "noban (do not ban for misbehavior; implies download)", "forcerelay (relay transactions that are already in the mempool; implies relay)", - "relay (relay even in -blocksonly mode)", + "relay (relay even in -blocksonly mode, and unlimited transaction announcements)", "mempool (allow requesting BIP35 mempool contents)", "download (allow getheaders during IBD, no disconnect after maxuploadtarget limit)", "addr (responses to GETADDR avoid hitting the cache and contain random records with the most up-to-date info)" diff --git a/src/net_permissions.h b/src/net_permissions.h index 5b68f635a7..bba0ea1695 100644 --- a/src/net_permissions.h +++ b/src/net_permissions.h @@ -19,6 +19,7 @@ enum NetPermissionFlags { // Can query bloomfilter even if -peerbloomfilters is false PF_BLOOMFILTER = (1U << 1), // Relay and accept transactions from this peer, even if -blocksonly is true + // This peer is also not subject to limits on how many transaction INVs are tracked PF_RELAY = (1U << 3), // Always relay transactions from this peer, even if already in mempool // Keep parameter interaction: forcerelay implies relay diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 9ad3f5d6f4..94d4052fa1 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -72,22 +72,22 @@ static constexpr std::chrono::minutes PING_INTERVAL{2}; static const unsigned int MAX_LOCATOR_SZ = 101; /** The maximum number of entries in an 'inv' protocol message */ static const unsigned int MAX_INV_SZ = 50000; -/** Maximum number of in-flight transactions from a peer */ -static constexpr int32_t MAX_PEER_TX_IN_FLIGHT = 100; -/** Maximum number of announced transactions from a peer */ -static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 2 * MAX_INV_SZ; -/** How many microseconds to delay requesting transactions via txids, if we have wtxid-relaying peers */ -static constexpr std::chrono::microseconds TXID_RELAY_DELAY{std::chrono::seconds{2}}; -/** How many microseconds to delay requesting transactions from inbound peers */ -static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY{std::chrono::seconds{2}}; +/** Maximum number of in-flight transaction requests from a peer. It is not a hard limit, but the threshold at which + * point the OVERLOADED_PEER_TX_DELAY kicks in. */ +static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT = 100; +/** Maximum number of transactions to consider for requesting, per peer. It provides a reasonable DoS limit to + * per-peer memory usage spent on announcements, while covering peers continuously sending INVs at the maximum + * rate (by our own policy, see INVENTORY_BROADCAST_PER_SECOND) for several minutes, while not receiving + * the actual transaction (from any peer) in response to requests for them. */ +static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 5000; +/** How long to delay requesting transactions via txids, if we have wtxid-relaying peers */ +static constexpr auto TXID_RELAY_DELAY = std::chrono::seconds{2}; +/** How long to delay requesting transactions from non-preferred peers */ +static constexpr auto NONPREF_PEER_TX_DELAY = std::chrono::seconds{2}; +/** How long to delay requesting transactions from overloaded peers (see MAX_PEER_TX_REQUEST_IN_FLIGHT). */ +static constexpr auto OVERLOADED_PEER_TX_DELAY = std::chrono::seconds{2}; /** How long to wait (in microseconds) before downloading a transaction from an additional peer */ static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{std::chrono::seconds{60}}; -/** Maximum delay (in microseconds) for transaction requests to avoid biasing some peers over others. */ -static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY{std::chrono::seconds{2}}; -/** How long to wait (in microseconds) before expiring an in-flight getdata request to a peer */ -static constexpr std::chrono::microseconds TX_EXPIRY_INTERVAL{GETDATA_TX_INTERVAL * 10}; -static_assert(INBOUND_PEER_TX_DELAY >= MAX_GETDATA_RANDOM_DELAY, -"To preserve security, MAX_GETDATA_RANDOM_DELAY should not exceed INBOUND_PEER_DELAY"); /** Limit to avoid sending big packets. Not used in processing incoming GETDATA for compatibility */ static const unsigned int MAX_GETDATA_SZ = 1000; /** Number of blocks that can be requested at any given time from a single peer. */ @@ -375,69 +375,6 @@ struct CNodeState { //! Time of last new block announcement int64_t m_last_block_announcement; - /* - * State associated with transaction download. - * - * Tx download algorithm: - * - * When inv comes in, queue up (process_time, txid) inside the peer's - * CNodeState (m_tx_process_time) as long as m_tx_announced for the peer - * isn't too big (MAX_PEER_TX_ANNOUNCEMENTS). - * - * The process_time for a transaction is set to nNow for outbound peers, - * nNow + 2 seconds for inbound peers. This is the time at which we'll - * consider trying to request the transaction from the peer in - * SendMessages(). The delay for inbound peers is to allow outbound peers - * a chance to announce before we request from inbound peers, to prevent - * an adversary from using inbound connections to blind us to a - * transaction (InvBlock). - * - * When we call SendMessages() for a given peer, - * we will loop over the transactions in m_tx_process_time, looking - * at the transactions whose process_time <= nNow. We'll request each - * such transaction that we don't have already and that hasn't been - * requested from another peer recently, up until we hit the - * MAX_PEER_TX_IN_FLIGHT limit for the peer. Then we'll update - * g_already_asked_for for each requested txid, storing the time of the - * GETDATA request. We use g_already_asked_for to coordinate transaction - * requests amongst our peers. - * - * For transactions that we still need but we have already recently - * requested from some other peer, we'll reinsert (process_time, txid) - * back into the peer's m_tx_process_time at the point in the future at - * which the most recent GETDATA request would time out (ie - * GETDATA_TX_INTERVAL + the request time stored in g_already_asked_for). - * We add an additional delay for inbound peers, again to prefer - * attempting download from outbound peers first. - * We also add an extra small random delay up to 2 seconds - * to avoid biasing some peers over others. (e.g., due to fixed ordering - * of peer processing in ThreadMessageHandler). - * - * When we receive a transaction from a peer, we remove the txid from the - * peer's m_tx_in_flight set and from their recently announced set - * (m_tx_announced). We also clear g_already_asked_for for that entry, so - * that if somehow the transaction is not accepted but also not added to - * the reject filter, then we will eventually redownload from other - * peers. - */ - struct TxDownloadState { - /* Track when to attempt download of announced transactions (process - * time in micros -> txid) - */ - std::multimap<std::chrono::microseconds, GenTxid> m_tx_process_time; - - //! Store all the transactions a peer has recently announced - std::set<uint256> m_tx_announced; - - //! Store transactions which were requested by us, with timestamp - std::map<uint256, std::chrono::microseconds> m_tx_in_flight; - - //! Periodically check for stuck getdata requests - std::chrono::microseconds m_check_expiry_timer{0}; - }; - - TxDownloadState m_tx_download; - //! Whether this peer is an inbound connection bool m_is_inbound; @@ -478,9 +415,6 @@ struct CNodeState { } }; -// Keeps track of the time (in microseconds) when transactions were requested last time -limitedmap<uint256, std::chrono::microseconds> g_already_asked_for GUARDED_BY(cs_main)(MAX_INV_SZ); - /** Map maintaining per-node state. */ static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main); @@ -512,6 +446,14 @@ struct Peer { /** Whether this peer should be disconnected and marked as discouraged (unless it has the noban permission). */ bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false}; + /** Set of txids to reconsider once their parent transactions have been accepted **/ + std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans); + + /** Protects m_getdata_requests **/ + Mutex m_getdata_requests_mutex; + /** Work queue of items requested by this peer **/ + std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex); + Peer(NodeId id) : m_id(id) {} }; @@ -817,73 +759,35 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec } } -void EraseTxRequest(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) -{ - g_already_asked_for.erase(gtxid.GetHash()); -} - -std::chrono::microseconds GetTxRequestTime(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) -{ - auto it = g_already_asked_for.find(gtxid.GetHash()); - if (it != g_already_asked_for.end()) { - return it->second; - } - return {}; -} - -void UpdateTxRequestTime(const GenTxid& gtxid, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main) -{ - auto it = g_already_asked_for.find(gtxid.GetHash()); - if (it == g_already_asked_for.end()) { - g_already_asked_for.insert(std::make_pair(gtxid.GetHash(), request_time)); - } else { - g_already_asked_for.update(it, request_time); - } -} - -std::chrono::microseconds CalculateTxGetDataTime(const GenTxid& gtxid, std::chrono::microseconds current_time, bool use_inbound_delay, bool use_txid_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main) -{ - std::chrono::microseconds process_time; - const auto last_request_time = GetTxRequestTime(gtxid); - // First time requesting this tx - if (last_request_time.count() == 0) { - process_time = current_time; - } else { - // Randomize the delay to avoid biasing some peers over others (such as due to - // fixed ordering of peer processing in ThreadMessageHandler) - process_time = last_request_time + GETDATA_TX_INTERVAL + GetRandMicros(MAX_GETDATA_RANDOM_DELAY); - } - - // We delay processing announcements from inbound peers - if (use_inbound_delay) process_time += INBOUND_PEER_TX_DELAY; - - // We delay processing announcements from peers that use txid-relay (instead of wtxid) - if (use_txid_delay) process_time += TXID_RELAY_DELAY; - - return process_time; -} +} // namespace -void RequestTx(CNodeState* state, const GenTxid& gtxid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void PeerManager::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time) { - CNodeState::TxDownloadState& peer_download_state = state->m_tx_download; - if (peer_download_state.m_tx_announced.size() >= MAX_PEER_TX_ANNOUNCEMENTS || - peer_download_state.m_tx_process_time.size() >= MAX_PEER_TX_ANNOUNCEMENTS || - peer_download_state.m_tx_announced.count(gtxid.GetHash())) { - // Too many queued announcements from this peer, or we already have - // this announcement + AssertLockHeld(::cs_main); // For m_txrequest + NodeId nodeid = node.GetId(); + if (!node.HasPermission(PF_RELAY) && m_txrequest.Count(nodeid) >= MAX_PEER_TX_ANNOUNCEMENTS) { + // Too many queued announcements from this peer return; } - peer_download_state.m_tx_announced.insert(gtxid.GetHash()); - - // Calculate the time to try requesting this transaction. Use - // fPreferredDownload as a proxy for outbound peers. - const auto process_time = CalculateTxGetDataTime(gtxid, current_time, !state->fPreferredDownload, !state->m_wtxid_relay && g_wtxid_relay_peers > 0); - - peer_download_state.m_tx_process_time.emplace(process_time, gtxid); + const CNodeState* state = State(nodeid); + + // Decide the TxRequestTracker parameters for this announcement: + // - "preferred": if fPreferredDownload is set (= outbound, or PF_NOBAN permission) + // - "reqtime": current time plus delays for: + // - NONPREF_PEER_TX_DELAY for announcements from non-preferred connections + // - TXID_RELAY_DELAY for txid announcements while wtxid peers are available + // - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least + // MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight (and don't have PF_RELAY). + auto delay = std::chrono::microseconds{0}; + const bool preferred = state->fPreferredDownload; + if (!preferred) delay += NONPREF_PEER_TX_DELAY; + if (!gtxid.IsWtxid() && g_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY; + const bool overloaded = !node.HasPermission(PF_RELAY) && + m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT; + if (overloaded) delay += OVERLOADED_PEER_TX_DELAY; + m_txrequest.ReceivedInv(nodeid, gtxid, preferred, current_time + delay); } -} // namespace - // This function is used for testing the stale tip eviction logic, see // denialofservice_tests.cpp void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) @@ -900,6 +804,7 @@ void PeerManager::InitializeNode(CNode *pnode) { { LOCK(cs_main); mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, pnode->IsInboundConn(), pnode->IsManualConn())); + assert(m_txrequest.Count(nodeid) == 0); } { PeerRef peer = std::make_shared<Peer>(nodeid); @@ -957,6 +862,7 @@ void PeerManager::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) { mapBlocksInFlight.erase(entry.hash); } EraseOrphansFor(nodeid); + m_txrequest.DisconnectedPeer(nodeid); nPreferredDownload -= state->fPreferredDownload; nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); assert(nPeersWithValidatedDownloads >= 0); @@ -974,6 +880,7 @@ void PeerManager::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) { assert(nPeersWithValidatedDownloads == 0); assert(g_outbound_peers_with_protect_from_disconnect == 0); assert(g_wtxid_relay_peers == 0); + assert(m_txrequest.Size() == 0); } LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); } @@ -1286,7 +1193,8 @@ PeerManager::PeerManager(const CChainParams& chainparams, CConnman& connman, Ban /** * Evict orphan txn pool entries (EraseOrphanTx) based on a newly connected - * block. Also save the time of the last tip update. + * block, remember the recently confirmed transactions, and delete tracked + * announcements for them. Also save the time of the last tip update. */ void PeerManager::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex) { @@ -1330,6 +1238,13 @@ void PeerManager::BlockConnected(const std::shared_ptr<const CBlock>& pblock, co } } } + { + LOCK(cs_main); + for (const auto& ptx : pblock->vtx) { + m_txrequest.ForgetTxHash(ptx->GetHash()); + m_txrequest.ForgetTxHash(ptx->GetWitnessHash()); + } + } } void PeerManager::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) @@ -1747,11 +1662,11 @@ static CTransactionRef FindTxForGetData(const CTxMemPool& mempool, const CNode& return {}; } -void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnman& connman, CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) LOCKS_EXCLUDED(cs_main) +void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainparams, CConnman& connman, CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!cs_main, peer.m_getdata_requests_mutex) { AssertLockNotHeld(cs_main); - std::deque<CInv>::iterator it = pfrom.vRecvGetData.begin(); + std::deque<CInv>::iterator it = peer.m_getdata_requests.begin(); std::vector<CInv> vNotFound; const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); @@ -1763,7 +1678,7 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm // Process as many TX items from the front of the getdata queue as // possible, since they're common and it's efficient to batch process // them. - while (it != pfrom.vRecvGetData.end() && it->IsGenTxMsg()) { + while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) { if (interruptMsgProc) return; // The send buffer provides backpressure. If there's no space in // the buffer, pause processing until the next call. @@ -1811,7 +1726,7 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm // Only process one BLOCK item per call, since they're uncommon and can be // expensive to process. - if (it != pfrom.vRecvGetData.end() && !pfrom.fPauseSend) { + if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) { const CInv &inv = *it++; if (inv.IsGenBlkMsg()) { ProcessGetBlockData(pfrom, chainparams, inv, connman); @@ -1820,7 +1735,7 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm // and continue processing the queue on the next call. } - pfrom.vRecvGetData.erase(pfrom.vRecvGetData.begin(), it); + peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it); if (!vNotFound.empty()) { // Let the peer know that we didn't find what it asked for, so it doesn't @@ -2363,6 +2278,8 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat return; } + PeerRef peer = GetPeerRef(pfrom.GetId()); + if (peer == nullptr) return; if (msg_type == NetMsgType::VERSION) { // Each connection can only send one version message @@ -2555,8 +2472,9 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat // At this point, the outgoing message serialization version can't change. const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); - if (msg_type == NetMsgType::VERACK) - { + if (msg_type == NetMsgType::VERACK) { + if (pfrom.fSuccessfullyConnected) return; + if (!pfrom.IsInboundConn()) { // Mark this node as currently connected, so we update its timestamp later. LOCK(cs_main); @@ -2770,7 +2688,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat pfrom.fDisconnect = true; return; } else if (!fAlreadyHave && !m_chainman.ActiveChainstate().IsInitialBlockDownload()) { - RequestTx(State(pfrom.GetId()), gtxid, current_time); + AddTxAnnouncement(pfrom, gtxid, current_time); } } else { LogPrint(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId()); @@ -2800,8 +2718,12 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId()); } - pfrom.vRecvGetData.insert(pfrom.vRecvGetData.end(), vInv.begin(), vInv.end()); - ProcessGetData(pfrom, m_chainparams, m_connman, m_mempool, interruptMsgProc); + { + LOCK(peer->m_getdata_requests_mutex); + peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end()); + ProcessGetData(pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc); + } + return; } @@ -2889,36 +2811,38 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat return; } - LOCK(cs_main); + { + LOCK(cs_main); - const CBlockIndex* pindex = LookupBlockIndex(req.blockhash); - if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) { - LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId()); - return; - } + const CBlockIndex* pindex = LookupBlockIndex(req.blockhash); + if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) { + LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId()); + return; + } - if (pindex->nHeight < ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) { - // If an older block is requested (should never happen in practice, - // but can happen in tests) send a block response instead of a - // blocktxn response. Sending a full block response instead of a - // small blocktxn response is preferable in the case where a peer - // might maliciously send lots of getblocktxn requests to trigger - // expensive disk reads, because it will require the peer to - // actually receive all the data read from disk over the network. - LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH); - CInv inv; - inv.type = State(pfrom.GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK; - inv.hash = req.blockhash; - pfrom.vRecvGetData.push_back(inv); - // The message processing loop will go around again (without pausing) and we'll respond then (without cs_main) - return; - } + if (pindex->nHeight >= ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) { + CBlock block; + bool ret = ReadBlockFromDisk(block, pindex, m_chainparams.GetConsensus()); + assert(ret); - CBlock block; - bool ret = ReadBlockFromDisk(block, pindex, m_chainparams.GetConsensus()); - assert(ret); + SendBlockTransactions(pfrom, block, req); + return; + } + } - SendBlockTransactions(pfrom, block, req); + // If an older block is requested (should never happen in practice, + // but can happen in tests) send a block response instead of a + // blocktxn response. Sending a full block response instead of a + // small blocktxn response is preferable in the case where a peer + // might maliciously send lots of getblocktxn requests to trigger + // expensive disk reads, because it will require the peer to + // actually receive all the data read from disk over the network. + LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH); + CInv inv; + WITH_LOCK(cs_main, inv.type = State(pfrom.GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK); + inv.hash = req.blockhash; + WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv)); + // The message processing loop will go around again (without pausing) and we'll respond then return; } @@ -3024,11 +2948,8 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat TxValidationState state; - for (const GenTxid& gtxid : {GenTxid(false, txid), GenTxid(true, wtxid)}) { - nodestate->m_tx_download.m_tx_announced.erase(gtxid.GetHash()); - nodestate->m_tx_download.m_tx_in_flight.erase(gtxid.GetHash()); - EraseTxRequest(gtxid); - } + m_txrequest.ReceivedResponse(pfrom.GetId(), txid); + if (tx.HasWitness()) m_txrequest.ReceivedResponse(pfrom.GetId(), wtxid); std::list<CTransactionRef> lRemovedTxn; @@ -3047,12 +2968,16 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat if (!AlreadyHaveTx(GenTxid(/* is_wtxid=*/true, wtxid), m_mempool) && AcceptToMemoryPool(m_mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */)) { m_mempool.check(&::ChainstateActive().CoinsTip()); + // As this version of the transaction was acceptable, we can forget about any + // requests for it. + m_txrequest.ForgetTxHash(tx.GetHash()); + m_txrequest.ForgetTxHash(tx.GetWitnessHash()); RelayTransaction(tx.GetHash(), tx.GetWitnessHash(), m_connman); for (unsigned int i = 0; i < tx.vout.size(); i++) { auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(txid, i)); if (it_by_prev != mapOrphanTransactionsByPrev.end()) { for (const auto& elem : it_by_prev->second) { - pfrom.orphan_work_set.insert(elem->first); + peer->m_orphan_work_set.insert(elem->first); } } } @@ -3069,7 +2994,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat } // Recursively process any orphan transactions that depended on this one - ProcessOrphanTx(pfrom.orphan_work_set); + ProcessOrphanTx(peer->m_orphan_work_set); } else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { @@ -3102,10 +3027,14 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat // protocol for getting all unconfirmed parents. const GenTxid gtxid{/* is_wtxid=*/false, parent_txid}; pfrom.AddKnownTx(parent_txid); - if (!AlreadyHaveTx(gtxid, m_mempool)) RequestTx(State(pfrom.GetId()), gtxid, current_time); + if (!AlreadyHaveTx(gtxid, m_mempool)) AddTxAnnouncement(pfrom, gtxid, current_time); } AddOrphanTx(ptx, pfrom.GetId()); + // Once added to the orphan pool, a tx is considered AlreadyHave, and we shouldn't request it anymore. + m_txrequest.ForgetTxHash(tx.GetHash()); + m_txrequest.ForgetTxHash(tx.GetWitnessHash()); + // DoS prevention: do not allow mapOrphanTransactions to grow unbounded (see CVE-2012-3789) unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS)); unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx); @@ -3122,6 +3051,8 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat // from any of our non-wtxidrelay peers. recentRejects->insert(tx.GetHash()); recentRejects->insert(tx.GetWitnessHash()); + m_txrequest.ForgetTxHash(tx.GetHash()); + m_txrequest.ForgetTxHash(tx.GetWitnessHash()); } } else { if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) { @@ -3140,6 +3071,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat // if we start doing this too early. assert(recentRejects); recentRejects->insert(tx.GetWitnessHash()); + m_txrequest.ForgetTxHash(tx.GetWitnessHash()); // If the transaction failed for TX_INPUTS_NOT_STANDARD, // then we know that the witness was irrelevant to the policy // failure, since this check depends only on the txid @@ -3150,6 +3082,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat // parent-fetching by txid via the orphan-handling logic). if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) { recentRejects->insert(tx.GetHash()); + m_txrequest.ForgetTxHash(tx.GetHash()); } if (RecursiveDynamicUsage(*ptx) < 100000) { AddToCompactExtraTransactions(ptx); @@ -3790,24 +3723,15 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat } if (msg_type == NetMsgType::NOTFOUND) { - // Remove the NOTFOUND transactions from the peer - LOCK(cs_main); - CNodeState *state = State(pfrom.GetId()); std::vector<CInv> vInv; vRecv >> vInv; - if (vInv.size() <= MAX_PEER_TX_IN_FLIGHT + MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + if (vInv.size() <= MAX_PEER_TX_ANNOUNCEMENTS + MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + LOCK(::cs_main); for (CInv &inv : vInv) { if (inv.IsGenTxMsg()) { - // If we receive a NOTFOUND message for a txid we requested, erase - // it from our data structures for this peer. - auto in_flight_it = state->m_tx_download.m_tx_in_flight.find(inv.hash); - if (in_flight_it == state->m_tx_download.m_tx_in_flight.end()) { - // Skip any further work if this is a spurious NOTFOUND - // message. - continue; - } - state->m_tx_download.m_tx_in_flight.erase(in_flight_it); - state->m_tx_download.m_tx_announced.erase(inv.hash); + // If we receive a NOTFOUND message for a tx we requested, mark the announcement for it as + // completed in TxRequestTracker. + m_txrequest.ReceivedResponse(pfrom.GetId(), inv.hash); } } } @@ -3865,21 +3789,37 @@ bool PeerManager::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgP { bool fMoreWork = false; - if (!pfrom->vRecvGetData.empty()) - ProcessGetData(*pfrom, m_chainparams, m_connman, m_mempool, interruptMsgProc); + PeerRef peer = GetPeerRef(pfrom->GetId()); + if (peer == nullptr) return false; + + { + LOCK(peer->m_getdata_requests_mutex); + if (!peer->m_getdata_requests.empty()) { + ProcessGetData(*pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc); + } + } - if (!pfrom->orphan_work_set.empty()) { + { LOCK2(cs_main, g_cs_orphans); - ProcessOrphanTx(pfrom->orphan_work_set); + if (!peer->m_orphan_work_set.empty()) { + ProcessOrphanTx(peer->m_orphan_work_set); + } } if (pfrom->fDisconnect) return false; // this maintains the order of responses - // and prevents vRecvGetData to grow unbounded - if (!pfrom->vRecvGetData.empty()) return true; - if (!pfrom->orphan_work_set.empty()) return true; + // and prevents m_getdata_requests to grow unbounded + { + LOCK(peer->m_getdata_requests_mutex); + if (!peer->m_getdata_requests.empty()) return true; + } + + { + LOCK(g_cs_orphans); + if (!peer->m_orphan_work_set.empty()) return true; + } // Don't bother if send buffer is too full to respond anyway if (pfrom->fPauseSend) @@ -3906,10 +3846,11 @@ bool PeerManager::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgP try { ProcessMessage(*pfrom, msg_type, msg.m_recv, msg.m_time, interruptMsgProc); - if (interruptMsgProc) - return false; - if (!pfrom->vRecvGetData.empty()) - fMoreWork = true; + if (interruptMsgProc) return false; + { + LOCK(peer->m_getdata_requests_mutex); + if (!peer->m_getdata_requests.empty()) fMoreWork = true; + } } catch (const std::exception& e) { LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg_type), nMessageSize, e.what(), typeid(e).name()); } catch (...) { @@ -4582,67 +4523,26 @@ bool PeerManager::SendMessages(CNode* pto) // // Message: getdata (non-blocks) // - - // For robustness, expire old requests after a long timeout, so that - // we can resume downloading transactions from a peer even if they - // were unresponsive in the past. - // Eventually we should consider disconnecting peers, but this is - // conservative. - if (state.m_tx_download.m_check_expiry_timer <= current_time) { - for (auto it=state.m_tx_download.m_tx_in_flight.begin(); it != state.m_tx_download.m_tx_in_flight.end();) { - if (it->second <= current_time - TX_EXPIRY_INTERVAL) { - LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n", it->first.ToString(), pto->GetId()); - state.m_tx_download.m_tx_announced.erase(it->first); - state.m_tx_download.m_tx_in_flight.erase(it++); - } else { - ++it; - } - } - // On average, we do this check every TX_EXPIRY_INTERVAL. Randomize - // so that we're not doing this for all peers at the same time. - state.m_tx_download.m_check_expiry_timer = current_time + TX_EXPIRY_INTERVAL / 2 + GetRandMicros(TX_EXPIRY_INTERVAL); - } - - auto& tx_process_time = state.m_tx_download.m_tx_process_time; - while (!tx_process_time.empty() && tx_process_time.begin()->first <= current_time && state.m_tx_download.m_tx_in_flight.size() < MAX_PEER_TX_IN_FLIGHT) { - const GenTxid gtxid = tx_process_time.begin()->second; - // Erase this entry from tx_process_time (it may be added back for - // processing at a later time, see below) - tx_process_time.erase(tx_process_time.begin()); - CInv inv(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash()); - if (!AlreadyHaveTx(ToGenTxid(inv), m_mempool)) { - // If this transaction was last requested more than 1 minute ago, - // then request. - const auto last_request_time = GetTxRequestTime(gtxid); - if (last_request_time <= current_time - GETDATA_TX_INTERVAL) { - LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId()); - vGetData.push_back(inv); - if (vGetData.size() >= MAX_GETDATA_SZ) { - m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); - vGetData.clear(); - } - UpdateTxRequestTime(gtxid, current_time); - state.m_tx_download.m_tx_in_flight.emplace(gtxid.GetHash(), current_time); - } else { - // This transaction is in flight from someone else; queue - // up processing to happen after the download times out - // (with a slight delay for inbound peers, to prefer - // requests to outbound peers). - // Don't apply the txid-delay to re-requests of a - // transaction; the heuristic of delaying requests to - // txid-relay peers is to save bandwidth on initial - // announcement of a transaction, and doesn't make sense - // for a followup request if our first peer times out (and - // would open us up to an attacker using inbound - // wtxid-relay to prevent us from requesting transactions - // from outbound txid-relay peers). - const auto next_process_time = CalculateTxGetDataTime(gtxid, current_time, !state.fPreferredDownload, false); - tx_process_time.emplace(next_process_time, gtxid); + std::vector<std::pair<NodeId, GenTxid>> expired; + auto requestable = m_txrequest.GetRequestable(pto->GetId(), current_time, &expired); + for (const auto& entry : expired) { + LogPrint(BCLog::NET, "timeout of inflight %s %s from peer=%d\n", entry.second.IsWtxid() ? "wtx" : "tx", + entry.second.GetHash().ToString(), entry.first); + } + for (const GenTxid& gtxid : requestable) { + if (!AlreadyHaveTx(gtxid, m_mempool)) { + LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx", + gtxid.GetHash().ToString(), pto->GetId()); + vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash()); + if (vGetData.size() >= MAX_GETDATA_SZ) { + m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); + vGetData.clear(); } + m_txrequest.RequestedTx(pto->GetId(), gtxid.GetHash(), current_time + GETDATA_TX_INTERVAL); } else { - // We have already seen this transaction, no need to download. - state.m_tx_download.m_tx_announced.erase(gtxid.GetHash()); - state.m_tx_download.m_tx_in_flight.erase(gtxid.GetHash()); + // We have already seen this transaction, no need to download. This is just a belt-and-suspenders, as + // this should already be called whenever a transaction becomes AlreadyHaveTx(). + m_txrequest.ForgetTxHash(gtxid.GetHash()); } } diff --git a/src/net_processing.h b/src/net_processing.h index 946a5f4715..578660355a 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -9,6 +9,7 @@ #include <consensus/params.h> #include <net.h> #include <sync.h> +#include <txrequest.h> #include <validationinterface.h> class BlockTransactionsRequest; @@ -127,12 +128,19 @@ private: void SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req); + /** Register with TxRequestTracker that an INV has been received from a + * peer. The announcement parameters are decided in PeerManager and then + * passed to TxRequestTracker. */ + void AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time) + EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + const CChainParams& m_chainparams; CConnman& m_connman; /** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */ BanMan* const m_banman; ChainstateManager& m_chainman; CTxMemPool& m_mempool; + TxRequestTracker m_txrequest GUARDED_BY(::cs_main); int64_t m_stale_tip_check_time; //!< Next time to check for stale tip }; diff --git a/src/netaddress.cpp b/src/netaddress.cpp index 6695ec3700..c0193fa2e9 100644 --- a/src/netaddress.cpp +++ b/src/netaddress.cpp @@ -1109,6 +1109,17 @@ bool CSubNet::IsValid() const return valid; } +bool CSubNet::SanityCheck() const +{ + if (!(network.IsIPv4() || network.IsIPv6())) return false; + + for (size_t x = 0; x < network.m_addr.size(); ++x) { + if (network.m_addr[x] & ~netmask[x]) return false; + } + + return true; +} + bool operator==(const CSubNet& a, const CSubNet& b) { return a.valid == b.valid && a.network == b.network && !memcmp(a.netmask, b.netmask, 16); diff --git a/src/netaddress.h b/src/netaddress.h index 9c8148e33e..f35b01d202 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -459,6 +459,8 @@ class CSubNet /// Is this value valid? (only used to signal parse errors) bool valid; + bool SanityCheck() const; + public: CSubNet(); CSubNet(const CNetAddr& addr, uint8_t mask); @@ -476,7 +478,23 @@ class CSubNet friend bool operator!=(const CSubNet& a, const CSubNet& b) { return !(a == b); } friend bool operator<(const CSubNet& a, const CSubNet& b); - SERIALIZE_METHODS(CSubNet, obj) { READWRITE(obj.network, obj.netmask, obj.valid); } + SERIALIZE_METHODS(CSubNet, obj) + { + READWRITE(obj.network); + if (obj.network.IsIPv4()) { + // Before commit 102867c587f5f7954232fb8ed8e85cda78bb4d32, CSubNet used the last 4 bytes of netmask + // to store the relevant bytes for an IPv4 mask. For compatiblity reasons, keep doing so in + // serialized form. + unsigned char dummy[12] = {0}; + READWRITE(dummy); + READWRITE(MakeSpan(obj.netmask).first(4)); + } else { + READWRITE(obj.netmask); + } + READWRITE(obj.valid); + // Mark invalid if the result doesn't pass sanity checking. + SER_READ(obj, if (obj.valid) obj.valid = obj.SanityCheck()); + } }; /** A combination of a network address (CNetAddr) and a (TCP) port */ diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp index b72f7b70e9..97d5aad8e4 100644 --- a/src/node/transaction.cpp +++ b/src/node/transaction.cpp @@ -13,7 +13,8 @@ #include <future> -static TransactionError HandleATMPError(const TxValidationState& state, std::string& err_string_out) { +static TransactionError HandleATMPError(const TxValidationState& state, std::string& err_string_out) +{ err_string_out = state.ToString(); if (state.IsInvalid()) { if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { @@ -50,10 +51,10 @@ TransactionError BroadcastTransaction(NodeContext& node, const CTransactionRef t if (!node.mempool->exists(hashTx)) { // Transaction is not already in the mempool. TxValidationState state; - CAmount fee{0}; - if (max_tx_fee) { + if (max_tx_fee > 0) { // First, call ATMP with test_accept and check the fee. If ATMP // fails here, return error immediately. + CAmount fee{0}; if (!AcceptToMemoryPool(*node.mempool, state, tx, nullptr /* plTxnReplaced */, false /* bypass_limits */, /* test_accept */ true, &fee)) { return HandleATMPError(state, err_string); diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp index 0e9820da1e..69f2b456f1 100644 --- a/src/policy/policy.cpp +++ b/src/policy/policy.cpp @@ -9,7 +9,7 @@ #include <consensus/validation.h> #include <coins.h> - +#include <span.h> CAmount GetDustThreshold(const CTxOut& txout, const CFeeRate& dustRelayFeeIn) { @@ -206,6 +206,7 @@ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) // get the scriptPubKey corresponding to this input: CScript prevScript = prev.scriptPubKey; + bool p2sh = false; if (prevScript.IsPayToScriptHash()) { std::vector <std::vector<unsigned char> > stack; // If the scriptPubKey is P2SH, we try to extract the redeemScript casually by converting the scriptSig @@ -216,6 +217,7 @@ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) if (stack.empty()) return false; prevScript = CScript(stack.back().begin(), stack.back().end()); + p2sh = true; } int witnessversion = 0; @@ -237,6 +239,36 @@ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) return false; } } + + // Check policy limits for Taproot spends: + // - MAX_STANDARD_TAPSCRIPT_STACK_ITEM_SIZE limit for stack item size + // - No annexes + if (witnessversion == 1 && witnessprogram.size() == WITNESS_V1_TAPROOT_SIZE && !p2sh) { + // Taproot spend (non-P2SH-wrapped, version 1, witness program size 32; see BIP 341) + auto stack = MakeSpan(tx.vin[i].scriptWitness.stack); + if (stack.size() >= 2 && !stack.back().empty() && stack.back()[0] == ANNEX_TAG) { + // Annexes are nonstandard as long as no semantics are defined for them. + return false; + } + if (stack.size() >= 2) { + // Script path spend (2 or more stack elements after removing optional annex) + const auto& control_block = SpanPopBack(stack); + SpanPopBack(stack); // Ignore script + if (control_block.empty()) return false; // Empty control block is invalid + if ((control_block[0] & TAPROOT_LEAF_MASK) == TAPROOT_LEAF_TAPSCRIPT) { + // Leaf version 0xc0 (aka Tapscript, see BIP 342) + for (const auto& item : stack) { + if (item.size() > MAX_STANDARD_TAPSCRIPT_STACK_ITEM_SIZE) return false; + } + } + } else if (stack.size() == 1) { + // Key path spend (1 stack element after removing optional annex) + // (no policy rules apply) + } else { + // 0 stack elements; this is already invalid by consensus rules + return false; + } + } } return true; } diff --git a/src/policy/policy.h b/src/policy/policy.h index 7f168ee20f..51d67b9390 100644 --- a/src/policy/policy.h +++ b/src/policy/policy.h @@ -40,6 +40,8 @@ static const bool DEFAULT_PERMIT_BAREMULTISIG = true; static const unsigned int MAX_STANDARD_P2WSH_STACK_ITEMS = 100; /** The maximum size of each witness stack item in a standard P2WSH script */ static const unsigned int MAX_STANDARD_P2WSH_STACK_ITEM_SIZE = 80; +/** The maximum size of each witness stack item in a standard BIP 342 script (Taproot, leaf version 0xc0) */ +static const unsigned int MAX_STANDARD_TAPSCRIPT_STACK_ITEM_SIZE = 80; /** The maximum size of a standard witnessScript */ static const unsigned int MAX_STANDARD_P2WSH_SCRIPT_SIZE = 3600; /** Min feerate for defining dust. Historically this has been based on the @@ -68,7 +70,11 @@ static constexpr unsigned int STANDARD_SCRIPT_VERIFY_FLAGS = MANDATORY_SCRIPT_VE SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE | - SCRIPT_VERIFY_CONST_SCRIPTCODE; + SCRIPT_VERIFY_CONST_SCRIPTCODE | + SCRIPT_VERIFY_TAPROOT | + SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION | + SCRIPT_VERIFY_DISCOURAGE_OP_SUCCESS | + SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE; /** For convenience, standard but not mandatory verify flags. */ static constexpr unsigned int STANDARD_NOT_MANDATORY_VERIFY_FLAGS = STANDARD_SCRIPT_VERIFY_FLAGS & ~MANDATORY_SCRIPT_VERIFY_FLAGS; diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h index 77cb1781a4..00544f64fe 100644 --- a/src/primitives/transaction.h +++ b/src/primitives/transaction.h @@ -399,8 +399,8 @@ template <typename Tx> static inline CTransactionRef MakeTransactionRef(Tx&& txI /** A generic txid reference (txid or wtxid). */ class GenTxid { - const bool m_is_wtxid; - const uint256 m_hash; + bool m_is_wtxid; + uint256 m_hash; public: GenTxid(bool is_wtxid, const uint256& hash) : m_is_wtxid(is_wtxid), m_hash(hash) {} bool IsWtxid() const { return m_is_wtxid; } diff --git a/src/pubkey.cpp b/src/pubkey.cpp index fc14f41a0c..4d734fc891 100644 --- a/src/pubkey.cpp +++ b/src/pubkey.cpp @@ -7,6 +7,7 @@ #include <secp256k1.h> #include <secp256k1_recovery.h> +#include <secp256k1_schnorrsig.h> namespace { @@ -166,6 +167,27 @@ int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_ return 1; } +XOnlyPubKey::XOnlyPubKey(Span<const unsigned char> bytes) +{ + assert(bytes.size() == 32); + std::copy(bytes.begin(), bytes.end(), m_keydata.begin()); +} + +bool XOnlyPubKey::VerifySchnorr(const uint256& msg, Span<const unsigned char> sigbytes) const +{ + assert(sigbytes.size() == 64); + secp256k1_xonly_pubkey pubkey; + if (!secp256k1_xonly_pubkey_parse(secp256k1_context_verify, &pubkey, m_keydata.data())) return false; + return secp256k1_schnorrsig_verify(secp256k1_context_verify, sigbytes.data(), msg.begin(), &pubkey); +} + +bool XOnlyPubKey::CheckPayToContract(const XOnlyPubKey& base, const uint256& hash, bool parity) const +{ + secp256k1_xonly_pubkey base_point; + if (!secp256k1_xonly_pubkey_parse(secp256k1_context_verify, &base_point, base.data())) return false; + return secp256k1_xonly_pubkey_tweak_add_check(secp256k1_context_verify, m_keydata.begin(), parity, &base_point, hash.begin()); +} + bool CPubKey::Verify(const uint256 &hash, const std::vector<unsigned char>& vchSig) const { if (!IsValid()) return false; diff --git a/src/pubkey.h b/src/pubkey.h index fcbc7e8416..0f784b86e4 100644 --- a/src/pubkey.h +++ b/src/pubkey.h @@ -9,6 +9,7 @@ #include <hash.h> #include <serialize.h> +#include <span.h> #include <uint256.h> #include <stdexcept> @@ -169,7 +170,7 @@ public: /* * Check syntactic correctness. * - * Note that this is consensus critical as CheckSig() calls it! + * Note that this is consensus critical as CheckECDSASignature() calls it! */ bool IsValid() const { @@ -206,6 +207,27 @@ public: bool Derive(CPubKey& pubkeyChild, ChainCode &ccChild, unsigned int nChild, const ChainCode& cc) const; }; +class XOnlyPubKey +{ +private: + uint256 m_keydata; + +public: + /** Construct an x-only pubkey from exactly 32 bytes. */ + XOnlyPubKey(Span<const unsigned char> bytes); + + /** Verify a Schnorr signature against this public key. + * + * sigbytes must be exactly 64 bytes. + */ + bool VerifySchnorr(const uint256& msg, Span<const unsigned char> sigbytes) const; + bool CheckPayToContract(const XOnlyPubKey& base, const uint256& hash, bool parity) const; + + const unsigned char& operator[](int pos) const { return *(m_keydata.begin() + pos); } + const unsigned char* data() const { return m_keydata.begin(); } + size_t size() const { return m_keydata.size(); } +}; + struct CExtPubKey { unsigned char nDepth; unsigned char vchFingerprint[4]; diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp index 97fb88d71c..50a1ea6936 100644 --- a/src/qt/sendcoinsdialog.cpp +++ b/src/qt/sendcoinsdialog.cpp @@ -28,6 +28,8 @@ #include <wallet/fees.h> #include <wallet/wallet.h> +#include <validation.h> + #include <QFontMetrics> #include <QScrollBar> #include <QSettings> @@ -134,7 +136,7 @@ void SendCoinsDialog::setClientModel(ClientModel *_clientModel) this->clientModel = _clientModel; if (_clientModel) { - connect(_clientModel, &ClientModel::numBlocksChanged, this, &SendCoinsDialog::updateSmartFeeLabel); + connect(_clientModel, &ClientModel::numBlocksChanged, this, &SendCoinsDialog::updateNumberOfBlocks); } } @@ -744,6 +746,12 @@ void SendCoinsDialog::updateCoinControlState(CCoinControl& ctrl) ctrl.fAllowWatchOnly = model->wallet().privateKeysDisabled(); } +void SendCoinsDialog::updateNumberOfBlocks(int count, const QDateTime& blockDate, double nVerificationProgress, bool headers, SynchronizationState sync_state) { + if (sync_state == SynchronizationState::POST_INIT) { + updateSmartFeeLabel(); + } +} + void SendCoinsDialog::updateSmartFeeLabel() { if(!model || !model->getOptionsModel()) diff --git a/src/qt/sendcoinsdialog.h b/src/qt/sendcoinsdialog.h index 6961aa7821..8519f1f65b 100644 --- a/src/qt/sendcoinsdialog.h +++ b/src/qt/sendcoinsdialog.h @@ -17,6 +17,7 @@ class ClientModel; class PlatformStyle; class SendCoinsEntry; class SendCoinsRecipient; +enum class SynchronizationState; namespace Ui { class SendCoinsDialog; @@ -98,6 +99,7 @@ private Q_SLOTS: void coinControlClipboardLowOutput(); void coinControlClipboardChange(); void updateFeeSectionControls(); + void updateNumberOfBlocks(int count, const QDateTime& blockDate, double nVerificationProgress, bool headers, SynchronizationState sync_state); void updateSmartFeeLabel(); Q_SIGNALS: diff --git a/src/randomenv.cpp b/src/randomenv.cpp index 073d82b491..07122b7f6d 100644 --- a/src/randomenv.cpp +++ b/src/randomenv.cpp @@ -67,7 +67,8 @@ void RandAddSeedPerfmon(CSHA512& hasher) #ifdef WIN32 // Seed with the entire set of perfmon data - // This can take up to 2 seconds, so only do it every 10 minutes + // This can take up to 2 seconds, so only do it every 10 minutes. + // Initialize last_perfmon to 0 seconds, we don't skip the first call. static std::atomic<std::chrono::seconds> last_perfmon{std::chrono::seconds{0}}; auto last_time = last_perfmon.load(); auto current_time = GetTime<std::chrono::seconds>(); @@ -83,7 +84,7 @@ void RandAddSeedPerfmon(CSHA512& hasher) ret = RegQueryValueExA(HKEY_PERFORMANCE_DATA, "Global", nullptr, nullptr, vData.data(), &nSize); if (ret != ERROR_MORE_DATA || vData.size() >= nMaxSize) break; - vData.resize(std::max((vData.size() * 3) / 2, nMaxSize)); // Grow size of buffer exponentially + vData.resize(std::min((vData.size() * 3) / 2, nMaxSize)); // Grow size of buffer exponentially } RegCloseKey(HKEY_PERFORMANCE_DATA); if (ret == ERROR_SUCCESS) { diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 1fbddc1ceb..a162c1ee70 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1354,6 +1354,7 @@ RPCHelpMan getblockchaininfo() BuriedForkDescPushBack(softforks, "csv", consensusParams.CSVHeight); BuriedForkDescPushBack(softforks, "segwit", consensusParams.SegwitHeight); BIP9SoftForkDescPushBack(softforks, "testdummy", consensusParams, Consensus::DEPLOYMENT_TESTDUMMY); + BIP9SoftForkDescPushBack(softforks, "taproot", consensusParams, Consensus::DEPLOYMENT_TAPROOT); obj.pushKV("softforks", softforks); obj.pushKV("warnings", GetWarnings(false).original); diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index b04e106b2d..a561b7e93c 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -352,7 +352,7 @@ static RPCHelpMan generateblock() txs.push_back(MakeTransactionRef(std::move(mtx))); } else { - throw JSONRPCError(RPC_DESERIALIZATION_ERROR, strprintf("Transaction decode failed for %s", str)); + throw JSONRPCError(RPC_DESERIALIZATION_ERROR, strprintf("Transaction decode failed for %s. Make sure the tx has at least one input.", str)); } } diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index def21b119e..b81e6414a5 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -29,6 +29,15 @@ #include <univalue.h> +const std::vector<std::string> CONNECTION_TYPE_DOC{ + "outbound-full-relay (default automatic connections)", + "block-relay-only (does not relay transactions or addresses)", + "inbound (initiated by the peer)", + "manual (added via addnode RPC or -addnode/-connect configuration options)", + "addr-fetch (short-lived automatic connection for soliciting addresses)", + "feeler (short-lived automatic connection for testing addresses)" +}; + static RPCHelpMan getconnectioncount() { return RPCHelpMan{"getconnectioncount", @@ -94,6 +103,7 @@ static RPCHelpMan getpeerinfo() {RPCResult::Type::STR, "addr", "(host:port) The IP address and port of the peer"}, {RPCResult::Type::STR, "addrbind", "(ip:port) Bind address of the connection to the peer"}, {RPCResult::Type::STR, "addrlocal", "(ip:port) Local address as reported by the peer"}, + {RPCResult::Type::STR, "network", "Network (ipv4, ipv6, or onion) the peer connected through"}, {RPCResult::Type::NUM, "mapped_as", "The AS in the BGP route to the peer used for diversifying\n" "peer selection (only available if the asmap config flag is set)"}, {RPCResult::Type::STR_HEX, "services", "The services offered"}, @@ -118,7 +128,9 @@ static RPCHelpMan getpeerinfo() {RPCResult::Type::BOOL, "inbound", "Inbound (true) or Outbound (false)"}, {RPCResult::Type::BOOL, "addnode", "Whether connection was due to addnode/-connect or if it was an automatic/inbound connection\n" "(DEPRECATED, returned only if the config option -deprecatedrpc=getpeerinfo_addnode is passed)"}, - {RPCResult::Type::STR, "connection_type", "Type of connection: \n" + Join(CONNECTION_TYPE_DOC, ",\n") + "."}, + {RPCResult::Type::STR, "connection_type", "Type of connection: \n" + Join(CONNECTION_TYPE_DOC, ",\n") + ".\n" + "Please note this output is unlikely to be stable in upcoming releases as we iterate to\n" + "best capture connection behaviors."}, {RPCResult::Type::NUM, "startingheight", "The starting height (block) of the peer"}, {RPCResult::Type::NUM, "banscore", "The ban score (DEPRECATED, returned only if config option -deprecatedrpc=banscore is passed)"}, {RPCResult::Type::NUM, "synced_headers", "The last header we have in common with this peer"}, @@ -127,7 +139,8 @@ static RPCHelpMan getpeerinfo() { {RPCResult::Type::NUM, "n", "The heights of blocks we're currently asking from this peer"}, }}, - {RPCResult::Type::BOOL, "whitelisted", "Whether the peer is whitelisted"}, + {RPCResult::Type::BOOL, "whitelisted", /* optional */ true, "Whether the peer is whitelisted with default permissions\n" + "(DEPRECATED, returned only if config option -deprecatedrpc=whitelisted is passed)"}, {RPCResult::Type::NUM, "minfeefilter", "The minimum fee rate for transactions this peer accepts"}, {RPCResult::Type::OBJ_DYN, "bytessent_per_msg", "", { @@ -139,7 +152,8 @@ static RPCHelpMan getpeerinfo() { {RPCResult::Type::NUM, "msg", "The total bytes received aggregated by message type\n" "When a message type is not listed in this json object, the bytes received are 0.\n" - "Only known message types can appear as keys in the object and all bytes received of unknown message types are listed under '"+NET_MESSAGE_COMMAND_OTHER+"'."} + "Only known message types can appear as keys in the object and all bytes received\n" + "of unknown message types are listed under '"+NET_MESSAGE_COMMAND_OTHER+"'."} }}, }}, }}, @@ -165,10 +179,13 @@ static RPCHelpMan getpeerinfo() bool fStateStats = GetNodeStateStats(stats.nodeid, statestats); obj.pushKV("id", stats.nodeid); obj.pushKV("addr", stats.addrName); - if (!(stats.addrLocal.empty())) - obj.pushKV("addrlocal", stats.addrLocal); - if (stats.addrBind.IsValid()) + if (stats.addrBind.IsValid()) { obj.pushKV("addrbind", stats.addrBind.ToString()); + } + if (!(stats.addrLocal.empty())) { + obj.pushKV("addrlocal", stats.addrLocal); + } + obj.pushKV("network", stats.m_network); if (stats.m_mapped_as != 0) { obj.pushKV("mapped_as", uint64_t(stats.m_mapped_as)); } @@ -216,7 +233,10 @@ static RPCHelpMan getpeerinfo() } obj.pushKV("inflight", heights); } - obj.pushKV("whitelisted", stats.m_legacyWhitelisted); + if (IsDeprecatedRPCEnabled("whitelisted")) { + // whitelisted is deprecated in v0.21 for removal in v0.22 + obj.pushKV("whitelisted", stats.m_legacyWhitelisted); + } UniValue permissions(UniValue::VARR); for (const auto& permission : NetPermissions::ToStrings(stats.m_permissionFlags)) { permissions.push_back(permission); diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 7a6b605ec3..c6d7fea443 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -656,8 +656,8 @@ static RPCHelpMan combinerawtransaction() std::vector<CMutableTransaction> txVariants(txs.size()); for (unsigned int idx = 0; idx < txs.size(); idx++) { - if (!DecodeHexTx(txVariants[idx], txs[idx].get_str(), true)) { - throw JSONRPCError(RPC_DESERIALIZATION_ERROR, strprintf("TX decode failed for tx %d", idx)); + if (!DecodeHexTx(txVariants[idx], txs[idx].get_str())) { + throw JSONRPCError(RPC_DESERIALIZATION_ERROR, strprintf("TX decode failed for tx %d. Make sure the tx has at least one input.", idx)); } } @@ -780,8 +780,8 @@ static RPCHelpMan signrawtransactionwithkey() RPCTypeCheck(request.params, {UniValue::VSTR, UniValue::VARR, UniValue::VARR, UniValue::VSTR}, true); CMutableTransaction mtx; - if (!DecodeHexTx(mtx, request.params[0].get_str(), true)) { - throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed"); + if (!DecodeHexTx(mtx, request.params[0].get_str())) { + throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed. Make sure the tx has at least one input."); } FillableSigningProvider keystore; @@ -847,10 +847,10 @@ static RPCHelpMan sendrawtransaction() UniValueType(), // VNUM or VSTR, checked inside AmountFromValue() }); - // parse hex string from parameter CMutableTransaction mtx; - if (!DecodeHexTx(mtx, request.params[0].get_str())) - throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed"); + if (!DecodeHexTx(mtx, request.params[0].get_str())) { + throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed. Make sure the tx has at least one input."); + } CTransactionRef tx(MakeTransactionRef(std::move(mtx))); const CFeeRate max_raw_tx_fee_rate = request.params[1].isNull() ? @@ -928,7 +928,7 @@ static RPCHelpMan testmempoolaccept() CMutableTransaction mtx; if (!DecodeHexTx(mtx, request.params[0].get_array()[0].get_str())) { - throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed"); + throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed. Make sure the tx has at least one input."); } CTransactionRef tx(MakeTransactionRef(std::move(mtx))); const uint256& tx_hash = tx->GetHash(); diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index 50a6192476..5735e7df66 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -342,13 +342,10 @@ public: }; } -/** Helper for OP_CHECKSIG and OP_CHECKSIGVERIFY - * - * A return value of false means the script fails entirely. When true is returned, the - * fSuccess variable indicates whether the signature check itself succeeded. - */ -static bool EvalChecksig(const valtype& vchSig, const valtype& vchPubKey, CScript::const_iterator pbegincodehash, CScript::const_iterator pend, unsigned int flags, const BaseSignatureChecker& checker, SigVersion sigversion, ScriptError* serror, bool& fSuccess) +static bool EvalChecksigPreTapscript(const valtype& vchSig, const valtype& vchPubKey, CScript::const_iterator pbegincodehash, CScript::const_iterator pend, unsigned int flags, const BaseSignatureChecker& checker, SigVersion sigversion, ScriptError* serror, bool& fSuccess) { + assert(sigversion == SigVersion::BASE || sigversion == SigVersion::WITNESS_V0); + // Subset of script starting at the most recent codeseparator CScript scriptCode(pbegincodehash, pend); @@ -363,7 +360,7 @@ static bool EvalChecksig(const valtype& vchSig, const valtype& vchPubKey, CScrip //serror is set return false; } - fSuccess = checker.CheckSig(vchSig, vchPubKey, scriptCode, sigversion); + fSuccess = checker.CheckECDSASignature(vchSig, vchPubKey, scriptCode, sigversion); if (!fSuccess && (flags & SCRIPT_VERIFY_NULLFAIL) && vchSig.size()) return set_error(serror, SCRIPT_ERR_SIG_NULLFAIL); @@ -371,7 +368,67 @@ static bool EvalChecksig(const valtype& vchSig, const valtype& vchPubKey, CScrip return true; } -bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& script, unsigned int flags, const BaseSignatureChecker& checker, SigVersion sigversion, ScriptError* serror) +static bool EvalChecksigTapscript(const valtype& sig, const valtype& pubkey, ScriptExecutionData& execdata, unsigned int flags, const BaseSignatureChecker& checker, SigVersion sigversion, ScriptError* serror, bool& success) +{ + assert(sigversion == SigVersion::TAPSCRIPT); + + /* + * The following validation sequence is consensus critical. Please note how -- + * upgradable public key versions precede other rules; + * the script execution fails when using empty signature with invalid public key; + * the script execution fails when using non-empty invalid signature. + */ + success = !sig.empty(); + if (success) { + // Implement the sigops/witnesssize ratio test. + // Passing with an upgradable public key version is also counted. + assert(execdata.m_validation_weight_left_init); + execdata.m_validation_weight_left -= VALIDATION_WEIGHT_PER_SIGOP_PASSED; + if (execdata.m_validation_weight_left < 0) { + return set_error(serror, SCRIPT_ERR_TAPSCRIPT_VALIDATION_WEIGHT); + } + } + if (pubkey.size() == 0) { + return set_error(serror, SCRIPT_ERR_PUBKEYTYPE); + } else if (pubkey.size() == 32) { + if (success && !checker.CheckSchnorrSignature(sig, pubkey, sigversion, execdata, serror)) { + return false; // serror is set + } + } else { + /* + * New public key version softforks should be defined before this `else` block. + * Generally, the new code should not do anything but failing the script execution. To avoid + * consensus bugs, it should not modify any existing values (including `success`). + */ + if ((flags & SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE) != 0) { + return set_error(serror, SCRIPT_ERR_DISCOURAGE_UPGRADABLE_PUBKEYTYPE); + } + } + + return true; +} + +/** Helper for OP_CHECKSIG, OP_CHECKSIGVERIFY, and (in Tapscript) OP_CHECKSIGADD. + * + * A return value of false means the script fails entirely. When true is returned, the + * success variable indicates whether the signature check itself succeeded. + */ +static bool EvalChecksig(const valtype& sig, const valtype& pubkey, CScript::const_iterator pbegincodehash, CScript::const_iterator pend, ScriptExecutionData& execdata, unsigned int flags, const BaseSignatureChecker& checker, SigVersion sigversion, ScriptError* serror, bool& success) +{ + switch (sigversion) { + case SigVersion::BASE: + case SigVersion::WITNESS_V0: + return EvalChecksigPreTapscript(sig, pubkey, pbegincodehash, pend, flags, checker, sigversion, serror, success); + case SigVersion::TAPSCRIPT: + return EvalChecksigTapscript(sig, pubkey, execdata, flags, checker, sigversion, serror, success); + case SigVersion::TAPROOT: + // Key path spending in Taproot has no script, so this is unreachable. + break; + } + assert(false); +} + +bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& script, unsigned int flags, const BaseSignatureChecker& checker, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror) { static const CScriptNum bnZero(0); static const CScriptNum bnOne(1); @@ -381,6 +438,9 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& // static const valtype vchZero(0); static const valtype vchTrue(1, 1); + // sigversion cannot be TAPROOT here, as it admits no script execution. + assert(sigversion == SigVersion::BASE || sigversion == SigVersion::WITNESS_V0 || sigversion == SigVersion::TAPSCRIPT); + CScript::const_iterator pc = script.begin(); CScript::const_iterator pend = script.end(); CScript::const_iterator pbegincodehash = script.begin(); @@ -389,15 +449,18 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& ConditionStack vfExec; std::vector<valtype> altstack; set_error(serror, SCRIPT_ERR_UNKNOWN_ERROR); - if (script.size() > MAX_SCRIPT_SIZE) + if ((sigversion == SigVersion::BASE || sigversion == SigVersion::WITNESS_V0) && script.size() > MAX_SCRIPT_SIZE) { return set_error(serror, SCRIPT_ERR_SCRIPT_SIZE); + } int nOpCount = 0; bool fRequireMinimal = (flags & SCRIPT_VERIFY_MINIMALDATA) != 0; + uint32_t opcode_pos = 0; + execdata.m_codeseparator_pos = 0xFFFFFFFFUL; + execdata.m_codeseparator_pos_init = true; try { - while (pc < pend) - { + for (; pc < pend; ++opcode_pos) { bool fExec = vfExec.all_true(); // @@ -408,9 +471,12 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& if (vchPushValue.size() > MAX_SCRIPT_ELEMENT_SIZE) return set_error(serror, SCRIPT_ERR_PUSH_SIZE); - // Note how OP_RESERVED does not count towards the opcode limit. - if (opcode > OP_16 && ++nOpCount > MAX_OPS_PER_SCRIPT) - return set_error(serror, SCRIPT_ERR_OP_COUNT); + if (sigversion == SigVersion::BASE || sigversion == SigVersion::WITNESS_V0) { + // Note how OP_RESERVED does not count towards the opcode limit. + if (opcode > OP_16 && ++nOpCount > MAX_OPS_PER_SCRIPT) { + return set_error(serror, SCRIPT_ERR_OP_COUNT); + } + } if (opcode == OP_CAT || opcode == OP_SUBSTR || @@ -568,6 +634,15 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& if (stack.size() < 1) return set_error(serror, SCRIPT_ERR_UNBALANCED_CONDITIONAL); valtype& vch = stacktop(-1); + // Tapscript requires minimal IF/NOTIF inputs as a consensus rule. + if (sigversion == SigVersion::TAPSCRIPT) { + // The input argument to the OP_IF and OP_NOTIF opcodes must be either + // exactly 0 (the empty vector) or exactly 1 (the one-byte vector with value 1). + if (vch.size() > 1 || (vch.size() == 1 && vch[0] != 1)) { + return set_error(serror, SCRIPT_ERR_TAPSCRIPT_MINIMALIF); + } + } + // Under witness v0 rules it is only a policy rule, enabled through SCRIPT_VERIFY_MINIMALIF. if (sigversion == SigVersion::WITNESS_V0 && (flags & SCRIPT_VERIFY_MINIMALIF)) { if (vch.size() > 1) return set_error(serror, SCRIPT_ERR_MINIMALIF); @@ -1001,6 +1076,7 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& // Hash starts after the code separator pbegincodehash = pc; + execdata.m_codeseparator_pos = opcode_pos; } break; @@ -1015,7 +1091,7 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& valtype& vchPubKey = stacktop(-1); bool fSuccess = true; - if (!EvalChecksig(vchSig, vchPubKey, pbegincodehash, pend, flags, checker, sigversion, serror, fSuccess)) return false; + if (!EvalChecksig(vchSig, vchPubKey, pbegincodehash, pend, execdata, flags, checker, sigversion, serror, fSuccess)) return false; popstack(stack); popstack(stack); stack.push_back(fSuccess ? vchTrue : vchFalse); @@ -1029,9 +1105,32 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& } break; + case OP_CHECKSIGADD: + { + // OP_CHECKSIGADD is only available in Tapscript + if (sigversion == SigVersion::BASE || sigversion == SigVersion::WITNESS_V0) return set_error(serror, SCRIPT_ERR_BAD_OPCODE); + + // (sig num pubkey -- num) + if (stack.size() < 3) return set_error(serror, SCRIPT_ERR_INVALID_STACK_OPERATION); + + const valtype& sig = stacktop(-3); + const CScriptNum num(stacktop(-2), fRequireMinimal); + const valtype& pubkey = stacktop(-1); + + bool success = true; + if (!EvalChecksig(sig, pubkey, pbegincodehash, pend, execdata, flags, checker, sigversion, serror, success)) return false; + popstack(stack); + popstack(stack); + popstack(stack); + stack.push_back((num + (success ? 1 : 0)).getvch()); + } + break; + case OP_CHECKMULTISIG: case OP_CHECKMULTISIGVERIFY: { + if (sigversion == SigVersion::TAPSCRIPT) return set_error(serror, SCRIPT_ERR_TAPSCRIPT_CHECKMULTISIG); + // ([sig ...] num_of_signatures [pubkey ...] num_of_pubkeys -- bool) int i = 1; @@ -1089,7 +1188,7 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& } // Check signature - bool fOk = checker.CheckSig(vchSig, vchPubKey, scriptCode, sigversion); + bool fOk = checker.CheckECDSASignature(vchSig, vchPubKey, scriptCode, sigversion); if (fOk) { isig++; @@ -1159,6 +1258,12 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& return set_success(serror); } +bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& script, unsigned int flags, const BaseSignatureChecker& checker, SigVersion sigversion, ScriptError* serror) +{ + ScriptExecutionData execdata; + return EvalScript(stack, script, flags, checker, sigversion, execdata, serror); +} + namespace { /** @@ -1291,35 +1396,183 @@ uint256 GetOutputsSHA256(const T& txTo) return ss.GetSHA256(); } +/** Compute the (single) SHA256 of the concatenation of all amounts spent by a tx. */ +uint256 GetSpentAmountsSHA256(const std::vector<CTxOut>& outputs_spent) +{ + CHashWriter ss(SER_GETHASH, 0); + for (const auto& txout : outputs_spent) { + ss << txout.nValue; + } + return ss.GetSHA256(); +} + +/** Compute the (single) SHA256 of the concatenation of all scriptPubKeys spent by a tx. */ +uint256 GetSpentScriptsSHA256(const std::vector<CTxOut>& outputs_spent) +{ + CHashWriter ss(SER_GETHASH, 0); + for (const auto& txout : outputs_spent) { + ss << txout.scriptPubKey; + } + return ss.GetSHA256(); +} + + } // namespace template <class T> -void PrecomputedTransactionData::Init(const T& txTo) +void PrecomputedTransactionData::Init(const T& txTo, std::vector<CTxOut>&& spent_outputs) { - assert(!m_ready); + assert(!m_spent_outputs_ready); - // Cache is calculated only for transactions with witness - if (txTo.HasWitness()) { - hashPrevouts = SHA256Uint256(GetPrevoutsSHA256(txTo)); - hashSequence = SHA256Uint256(GetSequencesSHA256(txTo)); - hashOutputs = SHA256Uint256(GetOutputsSHA256(txTo)); + m_spent_outputs = std::move(spent_outputs); + if (!m_spent_outputs.empty()) { + assert(m_spent_outputs.size() == txTo.vin.size()); + m_spent_outputs_ready = true; } - m_ready = true; + // Determine which precomputation-impacting features this transaction uses. + bool uses_bip143_segwit = false; + bool uses_bip341_taproot = false; + for (size_t inpos = 0; inpos < txTo.vin.size(); ++inpos) { + if (!txTo.vin[inpos].scriptWitness.IsNull()) { + if (m_spent_outputs_ready && m_spent_outputs[inpos].scriptPubKey.size() == 2 + WITNESS_V1_TAPROOT_SIZE && + m_spent_outputs[inpos].scriptPubKey[0] == OP_1) { + // Treat every witness-bearing spend with 34-byte scriptPubKey that starts with OP_1 as a Taproot + // spend. This only works if spent_outputs was provided as well, but if it wasn't, actual validation + // will fail anyway. Note that this branch may trigger for scriptPubKeys that aren't actually segwit + // but in that case validation will fail as SCRIPT_ERR_WITNESS_UNEXPECTED anyway. + uses_bip341_taproot = true; + } else { + // Treat every spend that's not known to native witness v1 as a Witness v0 spend. This branch may + // also be taken for unknown witness versions, but it is harmless, and being precise would require + // P2SH evaluation to find the redeemScript. + uses_bip143_segwit = true; + } + } + if (uses_bip341_taproot && uses_bip143_segwit) break; // No need to scan further if we already need all. + } + + if (uses_bip143_segwit || uses_bip341_taproot) { + // Computations shared between both sighash schemes. + m_prevouts_single_hash = GetPrevoutsSHA256(txTo); + m_sequences_single_hash = GetSequencesSHA256(txTo); + m_outputs_single_hash = GetOutputsSHA256(txTo); + } + if (uses_bip143_segwit) { + hashPrevouts = SHA256Uint256(m_prevouts_single_hash); + hashSequence = SHA256Uint256(m_sequences_single_hash); + hashOutputs = SHA256Uint256(m_outputs_single_hash); + m_bip143_segwit_ready = true; + } + if (uses_bip341_taproot) { + m_spent_amounts_single_hash = GetSpentAmountsSHA256(m_spent_outputs); + m_spent_scripts_single_hash = GetSpentScriptsSHA256(m_spent_outputs); + m_bip341_taproot_ready = true; + } } template <class T> PrecomputedTransactionData::PrecomputedTransactionData(const T& txTo) { - Init(txTo); + Init(txTo, {}); } // explicit instantiation -template void PrecomputedTransactionData::Init(const CTransaction& txTo); -template void PrecomputedTransactionData::Init(const CMutableTransaction& txTo); +template void PrecomputedTransactionData::Init(const CTransaction& txTo, std::vector<CTxOut>&& spent_outputs); +template void PrecomputedTransactionData::Init(const CMutableTransaction& txTo, std::vector<CTxOut>&& spent_outputs); template PrecomputedTransactionData::PrecomputedTransactionData(const CTransaction& txTo); template PrecomputedTransactionData::PrecomputedTransactionData(const CMutableTransaction& txTo); +static const CHashWriter HASHER_TAPSIGHASH = TaggedHash("TapSighash"); +static const CHashWriter HASHER_TAPLEAF = TaggedHash("TapLeaf"); +static const CHashWriter HASHER_TAPBRANCH = TaggedHash("TapBranch"); +static const CHashWriter HASHER_TAPTWEAK = TaggedHash("TapTweak"); + +template<typename T> +bool SignatureHashSchnorr(uint256& hash_out, const ScriptExecutionData& execdata, const T& tx_to, uint32_t in_pos, uint8_t hash_type, SigVersion sigversion, const PrecomputedTransactionData& cache) +{ + uint8_t ext_flag, key_version; + switch (sigversion) { + case SigVersion::TAPROOT: + ext_flag = 0; + // key_version is not used and left uninitialized. + break; + case SigVersion::TAPSCRIPT: + ext_flag = 1; + // key_version must be 0 for now, representing the current version of + // 32-byte public keys in the tapscript signature opcode execution. + // An upgradable public key version (with a size not 32-byte) may + // request a different key_version with a new sigversion. + key_version = 0; + break; + default: + assert(false); + } + assert(in_pos < tx_to.vin.size()); + assert(cache.m_bip341_taproot_ready && cache.m_spent_outputs_ready); + + CHashWriter ss = HASHER_TAPSIGHASH; + + // Epoch + static constexpr uint8_t EPOCH = 0; + ss << EPOCH; + + // Hash type + const uint8_t output_type = (hash_type == SIGHASH_DEFAULT) ? SIGHASH_ALL : (hash_type & SIGHASH_OUTPUT_MASK); // Default (no sighash byte) is equivalent to SIGHASH_ALL + const uint8_t input_type = hash_type & SIGHASH_INPUT_MASK; + if (!(hash_type <= 0x03 || (hash_type >= 0x81 && hash_type <= 0x83))) return false; + ss << hash_type; + + // Transaction level data + ss << tx_to.nVersion; + ss << tx_to.nLockTime; + if (input_type != SIGHASH_ANYONECANPAY) { + ss << cache.m_prevouts_single_hash; + ss << cache.m_spent_amounts_single_hash; + ss << cache.m_spent_scripts_single_hash; + ss << cache.m_sequences_single_hash; + } + if (output_type == SIGHASH_ALL) { + ss << cache.m_outputs_single_hash; + } + + // Data about the input/prevout being spent + assert(execdata.m_annex_init); + const bool have_annex = execdata.m_annex_present; + const uint8_t spend_type = (ext_flag << 1) + (have_annex ? 1 : 0); // The low bit indicates whether an annex is present. + ss << spend_type; + if (input_type == SIGHASH_ANYONECANPAY) { + ss << tx_to.vin[in_pos].prevout; + ss << cache.m_spent_outputs[in_pos]; + ss << tx_to.vin[in_pos].nSequence; + } else { + ss << in_pos; + } + if (have_annex) { + ss << execdata.m_annex_hash; + } + + // Data about the output (if only one). + if (output_type == SIGHASH_SINGLE) { + if (in_pos >= tx_to.vout.size()) return false; + CHashWriter sha_single_output(SER_GETHASH, 0); + sha_single_output << tx_to.vout[in_pos]; + ss << sha_single_output.GetSHA256(); + } + + // Additional data for BIP 342 signatures + if (sigversion == SigVersion::TAPSCRIPT) { + assert(execdata.m_tapleaf_hash_init); + ss << execdata.m_tapleaf_hash; + ss << key_version; + assert(execdata.m_codeseparator_pos_init); + ss << execdata.m_codeseparator_pos; + } + + hash_out = ss.GetSHA256(); + return true; +} + template <class T> uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache) { @@ -1329,7 +1582,7 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn uint256 hashPrevouts; uint256 hashSequence; uint256 hashOutputs; - const bool cacheready = cache && cache->m_ready; + const bool cacheready = cache && cache->m_bip143_segwit_ready; if (!(nHashType & SIGHASH_ANYONECANPAY)) { hashPrevouts = cacheready ? cache->hashPrevouts : SHA256Uint256(GetPrevoutsSHA256(txTo)); @@ -1389,13 +1642,19 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn } template <class T> -bool GenericTransactionSignatureChecker<T>::VerifySignature(const std::vector<unsigned char>& vchSig, const CPubKey& pubkey, const uint256& sighash) const +bool GenericTransactionSignatureChecker<T>::VerifyECDSASignature(const std::vector<unsigned char>& vchSig, const CPubKey& pubkey, const uint256& sighash) const { return pubkey.Verify(sighash, vchSig); } template <class T> -bool GenericTransactionSignatureChecker<T>::CheckSig(const std::vector<unsigned char>& vchSigIn, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const +bool GenericTransactionSignatureChecker<T>::VerifySchnorrSignature(Span<const unsigned char> sig, const XOnlyPubKey& pubkey, const uint256& sighash) const +{ + return pubkey.VerifySchnorr(sighash, sig); +} + +template <class T> +bool GenericTransactionSignatureChecker<T>::CheckECDSASignature(const std::vector<unsigned char>& vchSigIn, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const { CPubKey pubkey(vchPubKey); if (!pubkey.IsValid()) @@ -1410,13 +1669,41 @@ bool GenericTransactionSignatureChecker<T>::CheckSig(const std::vector<unsigned uint256 sighash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion, this->txdata); - if (!VerifySignature(vchSig, pubkey, sighash)) + if (!VerifyECDSASignature(vchSig, pubkey, sighash)) return false; return true; } template <class T> +bool GenericTransactionSignatureChecker<T>::CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey_in, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror) const +{ + assert(sigversion == SigVersion::TAPROOT || sigversion == SigVersion::TAPSCRIPT); + // Schnorr signatures have 32-byte public keys. The caller is responsible for enforcing this. + assert(pubkey_in.size() == 32); + // Note that in Tapscript evaluation, empty signatures are treated specially (invalid signature that does not + // abort script execution). This is implemented in EvalChecksigTapscript, which won't invoke + // CheckSchnorrSignature in that case. In other contexts, they are invalid like every other signature with + // size different from 64 or 65. + if (sig.size() != 64 && sig.size() != 65) return set_error(serror, SCRIPT_ERR_SCHNORR_SIG_SIZE); + + XOnlyPubKey pubkey{pubkey_in}; + + uint8_t hashtype = SIGHASH_DEFAULT; + if (sig.size() == 65) { + hashtype = SpanPopBack(sig); + if (hashtype == SIGHASH_DEFAULT) return set_error(serror, SCRIPT_ERR_SCHNORR_SIG_HASHTYPE); + } + uint256 sighash; + assert(this->txdata); + if (!SignatureHashSchnorr(sighash, execdata, *txTo, nIn, hashtype, sigversion, *this->txdata)) { + return set_error(serror, SCRIPT_ERR_SCHNORR_SIG_HASHTYPE); + } + if (!VerifySchnorrSignature(sig, pubkey, sighash)) return set_error(serror, SCRIPT_ERR_SCHNORR_SIG); + return true; +} + +template <class T> bool GenericTransactionSignatureChecker<T>::CheckLockTime(const CScriptNum& nLockTime) const { // There are two kinds of nLockTime: lock-by-blockheight @@ -1504,17 +1791,39 @@ bool GenericTransactionSignatureChecker<T>::CheckSequence(const CScriptNum& nSeq template class GenericTransactionSignatureChecker<CTransaction>; template class GenericTransactionSignatureChecker<CMutableTransaction>; -static bool ExecuteWitnessScript(const Span<const valtype>& stack_span, const CScript& scriptPubKey, unsigned int flags, SigVersion sigversion, const BaseSignatureChecker& checker, ScriptError* serror) +static bool ExecuteWitnessScript(const Span<const valtype>& stack_span, const CScript& scriptPubKey, unsigned int flags, SigVersion sigversion, const BaseSignatureChecker& checker, ScriptExecutionData& execdata, ScriptError* serror) { std::vector<valtype> stack{stack_span.begin(), stack_span.end()}; + if (sigversion == SigVersion::TAPSCRIPT) { + // OP_SUCCESSx processing overrides everything, including stack element size limits + CScript::const_iterator pc = scriptPubKey.begin(); + while (pc < scriptPubKey.end()) { + opcodetype opcode; + if (!scriptPubKey.GetOp(pc, opcode)) { + // Note how this condition would not be reached if an unknown OP_SUCCESSx was found + return set_error(serror, SCRIPT_ERR_BAD_OPCODE); + } + // New opcodes will be listed here. May use a different sigversion to modify existing opcodes. + if (IsOpSuccess(opcode)) { + if (flags & SCRIPT_VERIFY_DISCOURAGE_OP_SUCCESS) { + return set_error(serror, SCRIPT_ERR_DISCOURAGE_OP_SUCCESS); + } + return set_success(serror); + } + } + + // Tapscript enforces initial stack size limits (altstack is empty here) + if (stack.size() > MAX_STACK_SIZE) return set_error(serror, SCRIPT_ERR_STACK_SIZE); + } + // Disallow stack item size > MAX_SCRIPT_ELEMENT_SIZE in witness stack for (const valtype& elem : stack) { if (elem.size() > MAX_SCRIPT_ELEMENT_SIZE) return set_error(serror, SCRIPT_ERR_PUSH_SIZE); } // Run the script interpreter. - if (!EvalScript(stack, scriptPubKey, flags, checker, sigversion, serror)) return false; + if (!EvalScript(stack, scriptPubKey, flags, checker, sigversion, execdata, serror)) return false; // Scripts inside witness implicitly require cleanstack behaviour if (stack.size() != 1) return set_error(serror, SCRIPT_ERR_CLEANSTACK); @@ -1522,40 +1831,104 @@ static bool ExecuteWitnessScript(const Span<const valtype>& stack_span, const CS return true; } -static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion, const std::vector<unsigned char>& program, unsigned int flags, const BaseSignatureChecker& checker, ScriptError* serror) +static bool VerifyTaprootCommitment(const std::vector<unsigned char>& control, const std::vector<unsigned char>& program, const CScript& script, uint256& tapleaf_hash) { - CScript scriptPubKey; + const int path_len = (control.size() - TAPROOT_CONTROL_BASE_SIZE) / TAPROOT_CONTROL_NODE_SIZE; + const XOnlyPubKey p{uint256(std::vector<unsigned char>(control.begin() + 1, control.begin() + TAPROOT_CONTROL_BASE_SIZE))}; + const XOnlyPubKey q{uint256(program)}; + tapleaf_hash = (CHashWriter(HASHER_TAPLEAF) << uint8_t(control[0] & TAPROOT_LEAF_MASK) << script).GetSHA256(); + uint256 k = tapleaf_hash; + for (int i = 0; i < path_len; ++i) { + CHashWriter ss_branch{HASHER_TAPBRANCH}; + Span<const unsigned char> node(control.data() + TAPROOT_CONTROL_BASE_SIZE + TAPROOT_CONTROL_NODE_SIZE * i, TAPROOT_CONTROL_NODE_SIZE); + if (std::lexicographical_compare(k.begin(), k.end(), node.begin(), node.end())) { + ss_branch << k << node; + } else { + ss_branch << node << k; + } + k = ss_branch.GetSHA256(); + } + k = (CHashWriter(HASHER_TAPTWEAK) << MakeSpan(p) << k).GetSHA256(); + return q.CheckPayToContract(p, k, control[0] & 1); +} + +static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion, const std::vector<unsigned char>& program, unsigned int flags, const BaseSignatureChecker& checker, ScriptError* serror, bool is_p2sh) +{ + CScript exec_script; //!< Actually executed script (last stack item in P2WSH; implied P2PKH script in P2WPKH; leaf script in P2TR) Span<const valtype> stack{witness.stack}; + ScriptExecutionData execdata; if (witversion == 0) { if (program.size() == WITNESS_V0_SCRIPTHASH_SIZE) { - // Version 0 segregated witness program: SHA256(CScript) inside the program, CScript + inputs in witness + // BIP141 P2WSH: 32-byte witness v0 program (which encodes SHA256(script)) if (stack.size() == 0) { return set_error(serror, SCRIPT_ERR_WITNESS_PROGRAM_WITNESS_EMPTY); } const valtype& script_bytes = SpanPopBack(stack); - scriptPubKey = CScript(script_bytes.begin(), script_bytes.end()); - uint256 hashScriptPubKey; - CSHA256().Write(&scriptPubKey[0], scriptPubKey.size()).Finalize(hashScriptPubKey.begin()); - if (memcmp(hashScriptPubKey.begin(), program.data(), 32)) { + exec_script = CScript(script_bytes.begin(), script_bytes.end()); + uint256 hash_exec_script; + CSHA256().Write(&exec_script[0], exec_script.size()).Finalize(hash_exec_script.begin()); + if (memcmp(hash_exec_script.begin(), program.data(), 32)) { return set_error(serror, SCRIPT_ERR_WITNESS_PROGRAM_MISMATCH); } - return ExecuteWitnessScript(stack, scriptPubKey, flags, SigVersion::WITNESS_V0, checker, serror); + return ExecuteWitnessScript(stack, exec_script, flags, SigVersion::WITNESS_V0, checker, execdata, serror); } else if (program.size() == WITNESS_V0_KEYHASH_SIZE) { - // Special case for pay-to-pubkeyhash; signature + pubkey in witness + // BIP141 P2WPKH: 20-byte witness v0 program (which encodes Hash160(pubkey)) if (stack.size() != 2) { return set_error(serror, SCRIPT_ERR_WITNESS_PROGRAM_MISMATCH); // 2 items in witness } - scriptPubKey << OP_DUP << OP_HASH160 << program << OP_EQUALVERIFY << OP_CHECKSIG; - return ExecuteWitnessScript(stack, scriptPubKey, flags, SigVersion::WITNESS_V0, checker, serror); + exec_script << OP_DUP << OP_HASH160 << program << OP_EQUALVERIFY << OP_CHECKSIG; + return ExecuteWitnessScript(stack, exec_script, flags, SigVersion::WITNESS_V0, checker, execdata, serror); } else { return set_error(serror, SCRIPT_ERR_WITNESS_PROGRAM_WRONG_LENGTH); } + } else if (witversion == 1 && program.size() == WITNESS_V1_TAPROOT_SIZE && !is_p2sh) { + // BIP341 Taproot: 32-byte non-P2SH witness v1 program (which encodes a P2C-tweaked pubkey) + if (!(flags & SCRIPT_VERIFY_TAPROOT)) return set_success(serror); + if (stack.size() == 0) return set_error(serror, SCRIPT_ERR_WITNESS_PROGRAM_WITNESS_EMPTY); + if (stack.size() >= 2 && !stack.back().empty() && stack.back()[0] == ANNEX_TAG) { + // Drop annex (this is non-standard; see IsWitnessStandard) + const valtype& annex = SpanPopBack(stack); + execdata.m_annex_hash = (CHashWriter(SER_GETHASH, 0) << annex).GetSHA256(); + execdata.m_annex_present = true; + } else { + execdata.m_annex_present = false; + } + execdata.m_annex_init = true; + if (stack.size() == 1) { + // Key path spending (stack size is 1 after removing optional annex) + if (!checker.CheckSchnorrSignature(stack.front(), program, SigVersion::TAPROOT, execdata, serror)) { + return false; // serror is set + } + return set_success(serror); + } else { + // Script path spending (stack size is >1 after removing optional annex) + const valtype& control = SpanPopBack(stack); + const valtype& script_bytes = SpanPopBack(stack); + exec_script = CScript(script_bytes.begin(), script_bytes.end()); + if (control.size() < TAPROOT_CONTROL_BASE_SIZE || control.size() > TAPROOT_CONTROL_MAX_SIZE || ((control.size() - TAPROOT_CONTROL_BASE_SIZE) % TAPROOT_CONTROL_NODE_SIZE) != 0) { + return set_error(serror, SCRIPT_ERR_TAPROOT_WRONG_CONTROL_SIZE); + } + if (!VerifyTaprootCommitment(control, program, exec_script, execdata.m_tapleaf_hash)) { + return set_error(serror, SCRIPT_ERR_WITNESS_PROGRAM_MISMATCH); + } + execdata.m_tapleaf_hash_init = true; + if ((control[0] & TAPROOT_LEAF_MASK) == TAPROOT_LEAF_TAPSCRIPT) { + // Tapscript (leaf version 0xc0) + execdata.m_validation_weight_left = ::GetSerializeSize(witness.stack, PROTOCOL_VERSION) + VALIDATION_WEIGHT_OFFSET; + execdata.m_validation_weight_left_init = true; + return ExecuteWitnessScript(stack, exec_script, flags, SigVersion::TAPSCRIPT, checker, execdata, serror); + } + if (flags & SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION) { + return set_error(serror, SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION); + } + return set_success(serror); + } } else { if (flags & SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM) { return set_error(serror, SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM); } - // Higher version witness scripts return true for future softfork compatibility + // Other version/size/p2sh combinations return true for future softfork compatibility return true; } // There is intentionally no return statement here, to be able to use "control reaches end of non-void function" warnings to detect gaps in the logic above. @@ -1601,7 +1974,7 @@ bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const C // The scriptSig must be _exactly_ CScript(), otherwise we reintroduce malleability. return set_error(serror, SCRIPT_ERR_WITNESS_MALLEATED); } - if (!VerifyWitnessProgram(*witness, witnessversion, witnessprogram, flags, checker, serror)) { + if (!VerifyWitnessProgram(*witness, witnessversion, witnessprogram, flags, checker, serror, /* is_p2sh */ false)) { return false; } // Bypass the cleanstack check at the end. The actual stack is obviously not clean @@ -1646,7 +2019,7 @@ bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const C // reintroduce malleability. return set_error(serror, SCRIPT_ERR_WITNESS_MALLEATED_P2SH); } - if (!VerifyWitnessProgram(*witness, witnessversion, witnessprogram, flags, checker, serror)) { + if (!VerifyWitnessProgram(*witness, witnessversion, witnessprogram, flags, checker, serror, /* is_p2sh */ true)) { return false; } // Bypass the cleanstack check at the end. The actual stack is obviously not clean diff --git a/src/script/interpreter.h b/src/script/interpreter.h index 71f2436369..c0c2b012c6 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -7,14 +7,17 @@ #define BITCOIN_SCRIPT_INTERPRETER_H #include <script/script_error.h> +#include <span.h> #include <primitives/transaction.h> #include <vector> #include <stdint.h> class CPubKey; +class XOnlyPubKey; class CScript; class CTransaction; +class CTxOut; class uint256; /** Signature hash types/flags */ @@ -24,6 +27,10 @@ enum SIGHASH_NONE = 2, SIGHASH_SINGLE = 3, SIGHASH_ANYONECANPAY = 0x80, + + SIGHASH_DEFAULT = 0, //!< Taproot only; implied when sighash byte is missing, and equivalent to SIGHASH_ALL + SIGHASH_OUTPUT_MASK = 3, + SIGHASH_INPUT_MASK = 0x80, }; /** Script verification flags. @@ -79,6 +86,8 @@ enum // "Exactly one stack element must remain, and when interpreted as a boolean, it must be true". // (BIP62 rule 6) // Note: CLEANSTACK should never be used without P2SH or WITNESS. + // Note: WITNESS_V0 and TAPSCRIPT script execution have behavior similar to CLEANSTACK as part of their + // consensus rules. It is automatic there and does not need this flag. SCRIPT_VERIFY_CLEANSTACK = (1U << 8), // Verify CHECKLOCKTIMEVERIFY @@ -101,6 +110,8 @@ enum // Segwit script only: Require the argument of OP_IF/NOTIF to be exactly 0x01 or empty vector // + // Note: TAPSCRIPT script execution has behavior similar to MINIMALIF as part of its consensus + // rules. It is automatic there and does not depend on this flag. SCRIPT_VERIFY_MINIMALIF = (1U << 13), // Signature(s) must be empty vector if a CHECK(MULTI)SIG operation failed @@ -114,19 +125,49 @@ enum // Making OP_CODESEPARATOR and FindAndDelete fail any non-segwit scripts // SCRIPT_VERIFY_CONST_SCRIPTCODE = (1U << 16), + + // Taproot/Tapscript validation (BIPs 341 & 342) + // + SCRIPT_VERIFY_TAPROOT = (1U << 17), + + // Making unknown Taproot leaf versions non-standard + // + SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION = (1U << 18), + + // Making unknown OP_SUCCESS non-standard + SCRIPT_VERIFY_DISCOURAGE_OP_SUCCESS = (1U << 19), + + // Making unknown public key versions (in BIP 342 scripts) non-standard + SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE = (1U << 20), }; bool CheckSignatureEncoding(const std::vector<unsigned char> &vchSig, unsigned int flags, ScriptError* serror); struct PrecomputedTransactionData { + // BIP341 precomputed data. + // These are single-SHA256, see https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki#cite_note-15. + uint256 m_prevouts_single_hash; + uint256 m_sequences_single_hash; + uint256 m_outputs_single_hash; + uint256 m_spent_amounts_single_hash; + uint256 m_spent_scripts_single_hash; + //! Whether the 5 fields above are initialized. + bool m_bip341_taproot_ready = false; + + // BIP143 precomputed data (double-SHA256). uint256 hashPrevouts, hashSequence, hashOutputs; - bool m_ready = false; + //! Whether the 3 fields above are initialized. + bool m_bip143_segwit_ready = false; + + std::vector<CTxOut> m_spent_outputs; + //! Whether m_spent_outputs is initialized. + bool m_spent_outputs_ready = false; PrecomputedTransactionData() = default; template <class T> - void Init(const T& tx); + void Init(const T& tx, std::vector<CTxOut>&& spent_outputs); template <class T> explicit PrecomputedTransactionData(const T& tx); @@ -134,13 +175,48 @@ struct PrecomputedTransactionData enum class SigVersion { - BASE = 0, - WITNESS_V0 = 1, + BASE = 0, //!< Bare scripts and BIP16 P2SH-wrapped redeemscripts + WITNESS_V0 = 1, //!< Witness v0 (P2WPKH and P2WSH); see BIP 141 + TAPROOT = 2, //!< Witness v1 with 32-byte program, not BIP16 P2SH-wrapped, key path spending; see BIP 341 + TAPSCRIPT = 3, //!< Witness v1 with 32-byte program, not BIP16 P2SH-wrapped, script path spending, leaf version 0xc0; see BIP 342 +}; + +struct ScriptExecutionData +{ + //! Whether m_tapleaf_hash is initialized. + bool m_tapleaf_hash_init = false; + //! The tapleaf hash. + uint256 m_tapleaf_hash; + + //! Whether m_codeseparator_pos is initialized. + bool m_codeseparator_pos_init = false; + //! Opcode position of the last executed OP_CODESEPARATOR (or 0xFFFFFFFF if none executed). + uint32_t m_codeseparator_pos; + + //! Whether m_annex_present and (when needed) m_annex_hash are initialized. + bool m_annex_init = false; + //! Whether an annex is present. + bool m_annex_present; + //! Hash of the annex data. + uint256 m_annex_hash; + + //! Whether m_validation_weight_left is initialized. + bool m_validation_weight_left_init = false; + //! How much validation weight is left (decremented for every successful non-empty signature check). + int64_t m_validation_weight_left; }; /** Signature hash sizes */ static constexpr size_t WITNESS_V0_SCRIPTHASH_SIZE = 32; static constexpr size_t WITNESS_V0_KEYHASH_SIZE = 20; +static constexpr size_t WITNESS_V1_TAPROOT_SIZE = 32; + +static constexpr uint8_t TAPROOT_LEAF_MASK = 0xfe; +static constexpr uint8_t TAPROOT_LEAF_TAPSCRIPT = 0xc0; +static constexpr size_t TAPROOT_CONTROL_BASE_SIZE = 33; +static constexpr size_t TAPROOT_CONTROL_NODE_SIZE = 32; +static constexpr size_t TAPROOT_CONTROL_MAX_NODE_COUNT = 128; +static constexpr size_t TAPROOT_CONTROL_MAX_SIZE = TAPROOT_CONTROL_BASE_SIZE + TAPROOT_CONTROL_NODE_SIZE * TAPROOT_CONTROL_MAX_NODE_COUNT; template <class T> uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache = nullptr); @@ -148,7 +224,12 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn class BaseSignatureChecker { public: - virtual bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const + virtual bool CheckECDSASignature(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const + { + return false; + } + + virtual bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror = nullptr) const { return false; } @@ -176,12 +257,14 @@ private: const PrecomputedTransactionData* txdata; protected: - virtual bool VerifySignature(const std::vector<unsigned char>& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const; + virtual bool VerifyECDSASignature(const std::vector<unsigned char>& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const; + virtual bool VerifySchnorrSignature(Span<const unsigned char> sig, const XOnlyPubKey& pubkey, const uint256& sighash) const; public: GenericTransactionSignatureChecker(const T* txToIn, unsigned int nInIn, const CAmount& amountIn) : txTo(txToIn), nIn(nInIn), amount(amountIn), txdata(nullptr) {} GenericTransactionSignatureChecker(const T* txToIn, unsigned int nInIn, const CAmount& amountIn, const PrecomputedTransactionData& txdataIn) : txTo(txToIn), nIn(nInIn), amount(amountIn), txdata(&txdataIn) {} - bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override; + bool CheckECDSASignature(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override; + bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror = nullptr) const override; bool CheckLockTime(const CScriptNum& nLockTime) const override; bool CheckSequence(const CScriptNum& nSequence) const override; }; @@ -189,6 +272,7 @@ public: using TransactionSignatureChecker = GenericTransactionSignatureChecker<CTransaction>; using MutableTransactionSignatureChecker = GenericTransactionSignatureChecker<CMutableTransaction>; +bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& script, unsigned int flags, const BaseSignatureChecker& checker, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* error = nullptr); bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& script, unsigned int flags, const BaseSignatureChecker& checker, SigVersion sigversion, ScriptError* error = nullptr); bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const CScriptWitness* witness, unsigned int flags, const BaseSignatureChecker& checker, ScriptError* serror = nullptr); diff --git a/src/script/script.cpp b/src/script/script.cpp index 92c6fe7785..f31472e42d 100644 --- a/src/script/script.cpp +++ b/src/script/script.cpp @@ -140,6 +140,9 @@ std::string GetOpName(opcodetype opcode) case OP_NOP9 : return "OP_NOP9"; case OP_NOP10 : return "OP_NOP10"; + // Opcode added by BIP 342 (Tapscript) + case OP_CHECKSIGADD : return "OP_CHECKSIGADD"; + case OP_INVALIDOPCODE : return "OP_INVALIDOPCODE"; default: @@ -328,3 +331,11 @@ bool GetScriptOp(CScriptBase::const_iterator& pc, CScriptBase::const_iterator en opcodeRet = static_cast<opcodetype>(opcode); return true; } + +bool IsOpSuccess(const opcodetype& opcode) +{ + return opcode == 80 || opcode == 98 || (opcode >= 126 && opcode <= 129) || + (opcode >= 131 && opcode <= 134) || (opcode >= 137 && opcode <= 138) || + (opcode >= 141 && opcode <= 142) || (opcode >= 149 && opcode <= 153) || + (opcode >= 187 && opcode <= 254); +} diff --git a/src/script/script.h b/src/script/script.h index c1f2b66921..974cde4984 100644 --- a/src/script/script.h +++ b/src/script/script.h @@ -44,6 +44,17 @@ static const unsigned int LOCKTIME_THRESHOLD = 500000000; // Tue Nov 5 00:53:20 // SEQUENCE_FINAL). static const uint32_t LOCKTIME_MAX = 0xFFFFFFFFU; +// Tag for input annex. If there are at least two witness elements for a transaction input, +// and the first byte of the last element is 0x50, this last element is called annex, and +// has meanings independent of the script +static constexpr unsigned int ANNEX_TAG = 0x50; + +// Validation weight per passing signature (Tapscript only, see BIP 342). +static constexpr uint64_t VALIDATION_WEIGHT_PER_SIGOP_PASSED = 50; + +// How much weight budget is added to the witness size (Tapscript only, see BIP 342). +static constexpr uint64_t VALIDATION_WEIGHT_OFFSET = 50; + template <typename T> std::vector<unsigned char> ToByteVector(const T& in) { @@ -187,6 +198,9 @@ enum opcodetype OP_NOP9 = 0xb8, OP_NOP10 = 0xb9, + // Opcode added by BIP 342 (Tapscript) + OP_CHECKSIGADD = 0xba, + OP_INVALIDOPCODE = 0xff, }; @@ -555,4 +569,7 @@ struct CScriptWitness std::string ToString() const; }; +/** Test for OP_SUCCESSx opcodes as defined by BIP342. */ +bool IsOpSuccess(const opcodetype& opcode); + #endif // BITCOIN_SCRIPT_SCRIPT_H diff --git a/src/script/script_error.cpp b/src/script/script_error.cpp index 3b383393f9..fadc04262c 100644 --- a/src/script/script_error.cpp +++ b/src/script/script_error.cpp @@ -73,6 +73,12 @@ std::string ScriptErrorString(const ScriptError serror) return "NOPx reserved for soft-fork upgrades"; case SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM: return "Witness version reserved for soft-fork upgrades"; + case SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION: + return "Taproot version reserved for soft-fork upgrades"; + case SCRIPT_ERR_DISCOURAGE_OP_SUCCESS: + return "OP_SUCCESSx reserved for soft-fork upgrades"; + case SCRIPT_ERR_DISCOURAGE_UPGRADABLE_PUBKEYTYPE: + return "Public key version reserved for soft-fork upgrades"; case SCRIPT_ERR_PUBKEYTYPE: return "Public key is neither compressed or uncompressed"; case SCRIPT_ERR_CLEANSTACK: @@ -91,6 +97,20 @@ std::string ScriptErrorString(const ScriptError serror) return "Witness provided for non-witness script"; case SCRIPT_ERR_WITNESS_PUBKEYTYPE: return "Using non-compressed keys in segwit"; + case SCRIPT_ERR_SCHNORR_SIG_SIZE: + return "Invalid Schnorr signature size"; + case SCRIPT_ERR_SCHNORR_SIG_HASHTYPE: + return "Invalid Schnorr signature hash type"; + case SCRIPT_ERR_SCHNORR_SIG: + return "Invalid Schnorr signature"; + case SCRIPT_ERR_TAPROOT_WRONG_CONTROL_SIZE: + return "Invalid Taproot control block size"; + case SCRIPT_ERR_TAPSCRIPT_VALIDATION_WEIGHT: + return "Too much signature validation relative to witness weight"; + case SCRIPT_ERR_TAPSCRIPT_CHECKMULTISIG: + return "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"; + case SCRIPT_ERR_TAPSCRIPT_MINIMALIF: + return "OP_IF/NOTIF argument must be minimal in tapscript"; case SCRIPT_ERR_OP_CODESEPARATOR: return "Using OP_CODESEPARATOR in non-witness script"; case SCRIPT_ERR_SIG_FINDANDDELETE: diff --git a/src/script/script_error.h b/src/script/script_error.h index 2978c147e1..b071681613 100644 --- a/src/script/script_error.h +++ b/src/script/script_error.h @@ -56,6 +56,9 @@ typedef enum ScriptError_t /* softfork safeness */ SCRIPT_ERR_DISCOURAGE_UPGRADABLE_NOPS, SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM, + SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION, + SCRIPT_ERR_DISCOURAGE_OP_SUCCESS, + SCRIPT_ERR_DISCOURAGE_UPGRADABLE_PUBKEYTYPE, /* segregated witness */ SCRIPT_ERR_WITNESS_PROGRAM_WRONG_LENGTH, @@ -66,6 +69,15 @@ typedef enum ScriptError_t SCRIPT_ERR_WITNESS_UNEXPECTED, SCRIPT_ERR_WITNESS_PUBKEYTYPE, + /* Taproot */ + SCRIPT_ERR_SCHNORR_SIG_SIZE, + SCRIPT_ERR_SCHNORR_SIG_HASHTYPE, + SCRIPT_ERR_SCHNORR_SIG, + SCRIPT_ERR_TAPROOT_WRONG_CONTROL_SIZE, + SCRIPT_ERR_TAPSCRIPT_VALIDATION_WEIGHT, + SCRIPT_ERR_TAPSCRIPT_CHECKMULTISIG, + SCRIPT_ERR_TAPSCRIPT_MINIMALIF, + /* Constant scriptCode */ SCRIPT_ERR_OP_CODESEPARATOR, SCRIPT_ERR_SIG_FINDANDDELETE, diff --git a/src/script/sigcache.cpp b/src/script/sigcache.cpp index aaecab1ef2..c1786140de 100644 --- a/src/script/sigcache.cpp +++ b/src/script/sigcache.cpp @@ -22,8 +22,9 @@ namespace { class CSignatureCache { private: - //! Entries are SHA256(nonce || signature hash || public key || signature): - CSHA256 m_salted_hasher; + //! Entries are SHA256(nonce || 'E' or 'S' || 31 zero bytes || signature hash || public key || signature): + CSHA256 m_salted_hasher_ecdsa; + CSHA256 m_salted_hasher_schnorr; typedef CuckooCache::cache<uint256, SignatureCacheHasher> map_type; map_type setValid; boost::shared_mutex cs_sigcache; @@ -34,18 +35,30 @@ public: uint256 nonce = GetRandHash(); // We want the nonce to be 64 bytes long to force the hasher to process // this chunk, which makes later hash computations more efficient. We - // just write our 32-byte entropy twice to fill the 64 bytes. - m_salted_hasher.Write(nonce.begin(), 32); - m_salted_hasher.Write(nonce.begin(), 32); + // just write our 32-byte entropy, and then pad with 'E' for ECDSA and + // 'S' for Schnorr (followed by 0 bytes). + static constexpr unsigned char PADDING_ECDSA[32] = {'E'}; + static constexpr unsigned char PADDING_SCHNORR[32] = {'S'}; + m_salted_hasher_ecdsa.Write(nonce.begin(), 32); + m_salted_hasher_ecdsa.Write(PADDING_ECDSA, 32); + m_salted_hasher_schnorr.Write(nonce.begin(), 32); + m_salted_hasher_schnorr.Write(PADDING_SCHNORR, 32); } void - ComputeEntry(uint256& entry, const uint256 &hash, const std::vector<unsigned char>& vchSig, const CPubKey& pubkey) + ComputeEntryECDSA(uint256& entry, const uint256 &hash, const std::vector<unsigned char>& vchSig, const CPubKey& pubkey) const { - CSHA256 hasher = m_salted_hasher; + CSHA256 hasher = m_salted_hasher_ecdsa; hasher.Write(hash.begin(), 32).Write(&pubkey[0], pubkey.size()).Write(&vchSig[0], vchSig.size()).Finalize(entry.begin()); } + void + ComputeEntrySchnorr(uint256& entry, const uint256 &hash, Span<const unsigned char> sig, const XOnlyPubKey& pubkey) const + { + CSHA256 hasher = m_salted_hasher_schnorr; + hasher.Write(hash.begin(), 32).Write(&pubkey[0], pubkey.size()).Write(sig.data(), sig.size()).Finalize(entry.begin()); + } + bool Get(const uint256& entry, const bool erase) { @@ -85,15 +98,25 @@ void InitSignatureCache() (nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems); } -bool CachingTransactionSignatureChecker::VerifySignature(const std::vector<unsigned char>& vchSig, const CPubKey& pubkey, const uint256& sighash) const +bool CachingTransactionSignatureChecker::VerifyECDSASignature(const std::vector<unsigned char>& vchSig, const CPubKey& pubkey, const uint256& sighash) const { uint256 entry; - signatureCache.ComputeEntry(entry, sighash, vchSig, pubkey); + signatureCache.ComputeEntryECDSA(entry, sighash, vchSig, pubkey); if (signatureCache.Get(entry, !store)) return true; - if (!TransactionSignatureChecker::VerifySignature(vchSig, pubkey, sighash)) + if (!TransactionSignatureChecker::VerifyECDSASignature(vchSig, pubkey, sighash)) return false; if (store) signatureCache.Set(entry); return true; } + +bool CachingTransactionSignatureChecker::VerifySchnorrSignature(Span<const unsigned char> sig, const XOnlyPubKey& pubkey, const uint256& sighash) const +{ + uint256 entry; + signatureCache.ComputeEntrySchnorr(entry, sighash, sig, pubkey); + if (signatureCache.Get(entry, !store)) return true; + if (!TransactionSignatureChecker::VerifySchnorrSignature(sig, pubkey, sighash)) return false; + if (store) signatureCache.Set(entry); + return true; +} diff --git a/src/script/sigcache.h b/src/script/sigcache.h index 807b61b542..00534f9758 100644 --- a/src/script/sigcache.h +++ b/src/script/sigcache.h @@ -7,6 +7,7 @@ #define BITCOIN_SCRIPT_SIGCACHE_H #include <script/interpreter.h> +#include <span.h> #include <vector> @@ -48,7 +49,8 @@ private: public: CachingTransactionSignatureChecker(const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, bool storeIn, PrecomputedTransactionData& txdataIn) : TransactionSignatureChecker(txToIn, nInIn, amountIn, txdataIn), store(storeIn) {} - bool VerifySignature(const std::vector<unsigned char>& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const override; + bool VerifyECDSASignature(const std::vector<unsigned char>& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const override; + bool VerifySchnorrSignature(Span<const unsigned char> sig, const XOnlyPubKey& pubkey, const uint256& sighash) const override; }; void InitSignatureCache(); diff --git a/src/script/sign.cpp b/src/script/sign.cpp index 9b3f94f14d..0e6864d547 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -111,6 +111,7 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator case TxoutType::NONSTANDARD: case TxoutType::NULL_DATA: case TxoutType::WITNESS_UNKNOWN: + case TxoutType::WITNESS_V1_TAPROOT: return false; case TxoutType::PUBKEY: if (!CreateSig(creator, sigdata, provider, sig, CPubKey(vSolutions[0]), scriptPubKey, sigversion)) return false; @@ -260,9 +261,9 @@ private: public: SignatureExtractorChecker(SignatureData& sigdata, BaseSignatureChecker& checker) : sigdata(sigdata), checker(checker) {} - bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override + bool CheckECDSASignature(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override { - if (checker.CheckSig(scriptSig, vchPubKey, scriptCode, sigversion)) { + if (checker.CheckECDSASignature(scriptSig, vchPubKey, scriptCode, sigversion)) { CPubKey pubkey(vchPubKey); sigdata.signatures.emplace(pubkey.GetID(), SigPair(pubkey, scriptSig)); return true; @@ -339,7 +340,7 @@ SignatureData DataFromTransaction(const CMutableTransaction& tx, unsigned int nI for (unsigned int i = last_success_key; i < num_pubkeys; ++i) { const valtype& pubkey = solutions[i+1]; // We either have a signature for this pubkey, or we have found a signature and it is valid - if (data.signatures.count(CPubKey(pubkey).GetID()) || extractor_checker.CheckSig(sig, pubkey, next_script, sigversion)) { + if (data.signatures.count(CPubKey(pubkey).GetID()) || extractor_checker.CheckECDSASignature(sig, pubkey, next_script, sigversion)) { last_success_key = i + 1; break; } @@ -400,7 +401,7 @@ class DummySignatureChecker final : public BaseSignatureChecker { public: DummySignatureChecker() {} - bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override { return true; } + bool CheckECDSASignature(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override { return true; } }; const DummySignatureChecker DUMMY_CHECKER; diff --git a/src/script/sign.h b/src/script/sign.h index b77d26c0d7..a1cfe1574d 100644 --- a/src/script/sign.h +++ b/src/script/sign.h @@ -11,6 +11,7 @@ #include <pubkey.h> #include <script/interpreter.h> #include <script/keyorigin.h> +#include <span.h> #include <streams.h> class CKey; diff --git a/src/script/standard.cpp b/src/script/standard.cpp index 96a3d311a6..f2f81664f6 100644 --- a/src/script/standard.cpp +++ b/src/script/standard.cpp @@ -55,6 +55,7 @@ std::string GetTxnOutputType(TxoutType t) case TxoutType::NULL_DATA: return "nulldata"; case TxoutType::WITNESS_V0_KEYHASH: return "witness_v0_keyhash"; case TxoutType::WITNESS_V0_SCRIPTHASH: return "witness_v0_scripthash"; + case TxoutType::WITNESS_V1_TAPROOT: return "witness_v1_taproot"; case TxoutType::WITNESS_UNKNOWN: return "witness_unknown"; } // no default case, so the compiler can warn about missing cases assert(false); @@ -130,6 +131,11 @@ TxoutType Solver(const CScript& scriptPubKey, std::vector<std::vector<unsigned c vSolutionsRet.push_back(witnessprogram); return TxoutType::WITNESS_V0_SCRIPTHASH; } + if (witnessversion == 1 && witnessprogram.size() == WITNESS_V1_TAPROOT_SIZE) { + vSolutionsRet.push_back(std::vector<unsigned char>{(unsigned char)witnessversion}); + vSolutionsRet.push_back(std::move(witnessprogram)); + return TxoutType::WITNESS_V1_TAPROOT; + } if (witnessversion != 0) { vSolutionsRet.push_back(std::vector<unsigned char>{(unsigned char)witnessversion}); vSolutionsRet.push_back(std::move(witnessprogram)); @@ -203,7 +209,7 @@ bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet) std::copy(vSolutions[0].begin(), vSolutions[0].end(), hash.begin()); addressRet = hash; return true; - } else if (whichType == TxoutType::WITNESS_UNKNOWN) { + } else if (whichType == TxoutType::WITNESS_UNKNOWN || whichType == TxoutType::WITNESS_V1_TAPROOT) { WitnessUnknown unk; unk.version = vSolutions[0][0]; std::copy(vSolutions[1].begin(), vSolutions[1].end(), unk.program); diff --git a/src/script/standard.h b/src/script/standard.h index 6dbcd04968..721203385e 100644 --- a/src/script/standard.h +++ b/src/script/standard.h @@ -129,6 +129,7 @@ enum class TxoutType { NULL_DATA, //!< unspendable OP_RETURN script that carries data WITNESS_V0_SCRIPTHASH, WITNESS_V0_KEYHASH, + WITNESS_V1_TAPROOT, WITNESS_UNKNOWN, //!< Only for Witness versions not already defined above }; @@ -206,7 +207,8 @@ struct WitnessUnknown * * ScriptHash: TxoutType::SCRIPTHASH destination (P2SH) * * WitnessV0ScriptHash: TxoutType::WITNESS_V0_SCRIPTHASH destination (P2WSH) * * WitnessV0KeyHash: TxoutType::WITNESS_V0_KEYHASH destination (P2WPKH) - * * WitnessUnknown: TxoutType::WITNESS_UNKNOWN destination (P2W???) + * * WitnessUnknown: TxoutType::WITNESS_UNKNOWN/WITNESS_V1_TAPROOT destination (P2W???) + * (taproot outputs do not require their own type as long as no wallet support exists) * A CTxDestination is the internal data type encoded in a bitcoin address */ typedef boost::variant<CNoDestination, PKHash, ScriptHash, WitnessV0ScriptHash, WitnessV0KeyHash, WitnessUnknown> CTxDestination; diff --git a/src/secp256k1/.travis.yml b/src/secp256k1/.travis.yml index e1a88c4051..bcc8c210f5 100644 --- a/src/secp256k1/.travis.yml +++ b/src/secp256k1/.travis.yml @@ -17,33 +17,29 @@ compiler: - gcc env: global: - - WIDEMUL=auto BIGNUM=auto ENDOMORPHISM=no STATICPRECOMPUTATION=yes ECMULTGENPRECISION=auto ASM=no BUILD=check EXTRAFLAGS= HOST= ECDH=no RECOVERY=no SCHNORRSIG=no EXPERIMENTAL=no CTIMETEST=yes BENCH=yes ITERS=2 + - WIDEMUL=auto BIGNUM=auto STATICPRECOMPUTATION=yes ECMULTGENPRECISION=auto ASM=no BUILD=check WITH_VALGRIND=yes RUN_VALGRIND=no EXTRAFLAGS= HOST= ECDH=no RECOVERY=no SCHNORRSIG=no EXPERIMENTAL=no CTIMETEST=yes BENCH=yes ITERS=2 matrix: - WIDEMUL=int64 RECOVERY=yes - WIDEMUL=int64 ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes - - WIDEMUL=int64 ENDOMORPHISM=yes - WIDEMUL=int128 - WIDEMUL=int128 RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes - - WIDEMUL=int128 ENDOMORPHISM=yes - - WIDEMUL=int128 ENDOMORPHISM=yes ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes + - WIDEMUL=int128 ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes - WIDEMUL=int128 ASM=x86_64 - - WIDEMUL=int128 ENDOMORPHISM=yes ASM=x86_64 - BIGNUM=no - - BIGNUM=no ENDOMORPHISM=yes RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes + - BIGNUM=no RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes - BIGNUM=no STATICPRECOMPUTATION=no - - BUILD=distcheck CTIMETEST= BENCH= + - BUILD=distcheck WITH_VALGRIND=no CTIMETEST=no BENCH=no - CPPFLAGS=-DDETERMINISTIC - - CFLAGS=-O0 CTIMETEST= + - CFLAGS=-O0 CTIMETEST=no - ECMULTGENPRECISION=2 - ECMULTGENPRECISION=8 - - VALGRIND=yes ENDOMORPHISM=yes BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes EXTRAFLAGS="--disable-openssl-tests" CPPFLAGS=-DVALGRIND BUILD= - - VALGRIND=yes BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes EXTRAFLAGS="--disable-openssl-tests" CPPFLAGS=-DVALGRIND BUILD= + - RUN_VALGRIND=yes BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes EXTRAFLAGS="--disable-openssl-tests" BUILD= matrix: fast_finish: true include: - compiler: clang os: linux - env: HOST=i686-linux-gnu ENDOMORPHISM=yes + env: HOST=i686-linux-gnu addons: apt: packages: @@ -63,7 +59,7 @@ matrix: - libtool-bin - libc6-dbg:i386 - compiler: gcc - env: HOST=i686-linux-gnu ENDOMORPHISM=yes + env: HOST=i686-linux-gnu os: linux addons: apt: diff --git a/src/secp256k1/README.md b/src/secp256k1/README.md index 434178b372..2602475787 100644 --- a/src/secp256k1/README.md +++ b/src/secp256k1/README.md @@ -48,7 +48,7 @@ Implementation details * Use wNAF notation for point multiplicands. * Use a much larger window for multiples of G, using precomputed multiples. * Use Shamir's trick to do the multiplication with the public key and the generator simultaneously. - * Optionally (off by default) use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones. + * Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones. * Point multiplication for signing * Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions. * Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains) diff --git a/src/secp256k1/configure.ac b/src/secp256k1/configure.ac index 6fe8984f4d..5a078e6c81 100644 --- a/src/secp256k1/configure.ac +++ b/src/secp256k1/configure.ac @@ -67,7 +67,7 @@ esac CFLAGS="-W $CFLAGS" -warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wno-unused-function -Wno-long-long -Wno-overlength-strings" +warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wundef -Wno-unused-function -Wno-long-long -Wno-overlength-strings" saved_CFLAGS="$CFLAGS" CFLAGS="$warn_CFLAGS $CFLAGS" AC_MSG_CHECKING([if ${CC} supports ${warn_CFLAGS}]) @@ -116,11 +116,6 @@ AC_ARG_ENABLE(exhaustive_tests, [use_exhaustive_tests=$enableval], [use_exhaustive_tests=yes]) -AC_ARG_ENABLE(endomorphism, - AS_HELP_STRING([--enable-endomorphism],[enable endomorphism [default=no]]), - [use_endomorphism=$enableval], - [use_endomorphism=no]) - AC_ARG_ENABLE(ecmult_static_precomputation, AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing [default=auto]]), [use_ecmult_static_precomputation=$enableval], @@ -164,8 +159,7 @@ AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto], AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE|auto], [window size for ecmult precomputation for verification, specified as integer in range [2..24].] [Larger values result in possibly better performance at the cost of an exponentially larger precomputed table.] -[The table will store 2^(SIZE-2) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.] -[If the endomorphism optimization is enabled, two tables of this size are used instead of only one.] +[The table will store 2^(SIZE-1) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.] ["auto" is a reasonable setting for desktop machines (currently 15). [default=auto]] )], [req_ecmult_window=$withval], [req_ecmult_window=auto]) @@ -178,7 +172,21 @@ AC_ARG_WITH([ecmult-gen-precision], [AS_HELP_STRING([--with-ecmult-gen-precision )], [req_ecmult_gen_precision=$withval], [req_ecmult_gen_precision=auto]) -AC_CHECK_HEADER([valgrind/memcheck.h], [enable_valgrind=yes], [enable_valgrind=no], []) +AC_ARG_WITH([valgrind], [AS_HELP_STRING([--with-valgrind=yes|no|auto], +[Build with extra checks for running inside Valgrind [default=auto]] +)], +[req_valgrind=$withval], [req_valgrind=auto]) + +if test x"$req_valgrind" = x"no"; then + enable_valgrind=no +else + AC_CHECK_HEADER([valgrind/memcheck.h], [enable_valgrind=yes], [ + if test x"$req_valgrind" = x"yes"; then + AC_MSG_ERROR([Valgrind support explicitly requested but valgrind/memcheck.h header not available]) + fi + enable_valgrind=no + ], []) +fi AM_CONDITIONAL([VALGRIND_ENABLED],[test "$enable_valgrind" = "yes"]) if test x"$enable_coverage" = x"yes"; then @@ -415,10 +423,6 @@ if test x"$set_bignum" = x"gmp"; then SECP_INCLUDES="$SECP_INCLUDES $GMP_CPPFLAGS" fi -if test x"$use_endomorphism" = x"yes"; then - AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization]) -fi - if test x"$set_precomp" = x"yes"; then AC_DEFINE(USE_ECMULT_STATIC_PRECOMPUTATION, 1, [Define this symbol to use a statically generated ecmult table]) fi @@ -500,7 +504,6 @@ AC_OUTPUT echo echo "Build Options:" -echo " with endomorphism = $use_endomorphism" echo " with ecmult precomp = $set_precomp" echo " with external callbacks = $use_external_default_callbacks" echo " with benchmarks = $use_benchmark" diff --git a/src/secp256k1/contrib/travis.sh b/src/secp256k1/contrib/travis.sh index b0b55b44b8..24cc9315cb 100755 --- a/src/secp256k1/contrib/travis.sh +++ b/src/secp256k1/contrib/travis.sh @@ -13,27 +13,28 @@ then fi ./configure \ - --enable-experimental="$EXPERIMENTAL" --enable-endomorphism="$ENDOMORPHISM" \ + --enable-experimental="$EXPERIMENTAL" \ --with-test-override-wide-multiply="$WIDEMUL" --with-bignum="$BIGNUM" --with-asm="$ASM" \ --enable-ecmult-static-precomputation="$STATICPRECOMPUTATION" --with-ecmult-gen-precision="$ECMULTGENPRECISION" \ --enable-module-ecdh="$ECDH" --enable-module-recovery="$RECOVERY" \ --enable-module-schnorrsig="$SCHNORRSIG" \ + --with-valgrind="$WITH_VALGRIND" \ --host="$HOST" $EXTRAFLAGS if [ -n "$BUILD" ] then make -j2 "$BUILD" fi -if [ -n "$VALGRIND" ] +if [ "$RUN_VALGRIND" = "yes" ] then make -j2 # the `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (http://valgrind.org/docs/manual/manual-core.html) valgrind --error-exitcode=42 ./tests 16 valgrind --error-exitcode=42 ./exhaustive_tests fi -if [ -n "$BENCH" ] +if [ "$BENCH" = "yes" ] then - if [ -n "$VALGRIND" ] + if [ "$RUN_VALGRIND" = "yes" ] then # Using the local `libtool` because on macOS the system's libtool has nothing to do with GNU libtool EXEC='./libtool --mode=execute valgrind --error-exitcode=42' @@ -56,8 +57,12 @@ then then $EXEC ./bench_ecdh >> bench.log 2>&1 fi + if [ "$SCHNORRSIG" = "yes" ] + then + $EXEC ./bench_schnorrsig >> bench.log 2>&1 + fi fi -if [ -n "$CTIMETEST" ] +if [ "$CTIMETEST" = "yes" ] then ./libtool --mode=execute valgrind --error-exitcode=42 ./valgrind_ctime_test > valgrind_ctime_test.log 2>&1 fi diff --git a/src/secp256k1/sage/gen_exhaustive_groups.sage b/src/secp256k1/sage/gen_exhaustive_groups.sage new file mode 100644 index 0000000000..3c3c984811 --- /dev/null +++ b/src/secp256k1/sage/gen_exhaustive_groups.sage @@ -0,0 +1,129 @@ +# Define field size and field +P = 2^256 - 2^32 - 977 +F = GF(P) +BETA = F(0x7ae96a2b657c07106e64479eac3434e99cf0497512f58995c1396c28719501ee) + +assert(BETA != F(1) and BETA^3 == F(1)) + +orders_done = set() +results = {} +first = True +for b in range(1, P): + # There are only 6 curves (up to isomorphism) of the form y^2=x^3+B. Stop once we have tried all. + if len(orders_done) == 6: + break + + E = EllipticCurve(F, [0, b]) + print("Analyzing curve y^2 = x^3 + %i" % b) + n = E.order() + # Skip curves with an order we've already tried + if n in orders_done: + print("- Isomorphic to earlier curve") + continue + orders_done.add(n) + # Skip curves isomorphic to the real secp256k1 + if n.is_pseudoprime(): + print(" - Isomorphic to secp256k1") + continue + + print("- Finding subgroups") + + # Find what prime subgroups exist + for f, _ in n.factor(): + print("- Analyzing subgroup of order %i" % f) + # Skip subgroups of order >1000 + if f < 4 or f > 1000: + print(" - Bad size") + continue + + # Iterate over X coordinates until we find one that is on the curve, has order f, + # and for which curve isomorphism exists that maps it to X coordinate 1. + for x in range(1, P): + # Skip X coordinates not on the curve, and construct the full point otherwise. + if not E.is_x_coord(x): + continue + G = E.lift_x(F(x)) + + print(" - Analyzing (multiples of) point with X=%i" % x) + + # Skip points whose order is not a multiple of f. Project the point to have + # order f otherwise. + if (G.order() % f): + print(" - Bad order") + continue + G = G * (G.order() // f) + + # Find lambda for endomorphism. Skip if none can be found. + lam = None + for l in Integers(f)(1).nth_root(3, all=True): + if int(l)*G == E(BETA*G[0], G[1]): + lam = int(l) + break + if lam is None: + print(" - No endomorphism for this subgroup") + break + + # Now look for an isomorphism of the curve that gives this point an X + # coordinate equal to 1. + # If (x,y) is on y^2 = x^3 + b, then (a^2*x, a^3*y) is on y^2 = x^3 + a^6*b. + # So look for m=a^2=1/x. + m = F(1)/G[0] + if not m.is_square(): + print(" - No curve isomorphism maps it to a point with X=1") + continue + a = m.sqrt() + rb = a^6*b + RE = EllipticCurve(F, [0, rb]) + + # Use as generator twice the image of G under the above isormorphism. + # This means that generator*(1/2 mod f) will have X coordinate 1. + RG = RE(1, a^3*G[1]) * 2 + # And even Y coordinate. + if int(RG[1]) % 2: + RG = -RG + assert(RG.order() == f) + assert(lam*RG == RE(BETA*RG[0], RG[1])) + + # We have found curve RE:y^2=x^3+rb with generator RG of order f. Remember it + results[f] = {"b": rb, "G": RG, "lambda": lam} + print(" - Found solution") + break + + print("") + +print("") +print("") +print("/* To be put in src/group_impl.h: */") +first = True +for f in sorted(results.keys()): + b = results[f]["b"] + G = results[f]["G"] + print("# %s EXHAUSTIVE_TEST_ORDER == %i" % ("if" if first else "elif", f)) + first = False + print("static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(") + print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4))) + print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8))) + print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4))) + print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8))) + print(");") + print("static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(") + print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4))) + print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8))) + print(");") +print("# else") +print("# error No known generator for the specified exhaustive test group order.") +print("# endif") + +print("") +print("") +print("/* To be put in src/scalar_impl.h: */") +first = True +for f in sorted(results.keys()): + lam = results[f]["lambda"] + print("# %s EXHAUSTIVE_TEST_ORDER == %i" % ("if" if first else "elif", f)) + first = False + print("# define EXHAUSTIVE_TEST_LAMBDA %i" % lam) +print("# else") +print("# error No known lambda for the specified exhaustive test group order.") +print("# endif") +print("") diff --git a/src/secp256k1/src/assumptions.h b/src/secp256k1/src/assumptions.h index f9d4e8e793..77204de2b8 100644 --- a/src/secp256k1/src/assumptions.h +++ b/src/secp256k1/src/assumptions.h @@ -7,6 +7,8 @@ #ifndef SECP256K1_ASSUMPTIONS_H #define SECP256K1_ASSUMPTIONS_H +#include <limits.h> + #include "util.h" /* This library, like most software, relies on a number of compiler implementation defined (but not undefined) @@ -19,7 +21,11 @@ struct secp256k1_assumption_checker { allowed. */ int dummy_array[( /* Bytes are 8 bits. */ - CHAR_BIT == 8 && + (CHAR_BIT == 8) && + + /* No integer promotion for uint32_t. This ensures that we can multiply uintXX_t values where XX >= 32 + without signed overflow, which would be undefined behaviour. */ + (UINT_MAX <= UINT32_MAX) && /* Conversions from unsigned to signed outside of the bounds of the signed type are implementation-defined. Verify that they function as reinterpreting the lower diff --git a/src/secp256k1/src/basic-config.h b/src/secp256k1/src/basic-config.h index 83dbe6f25b..b0d82e89b4 100644 --- a/src/secp256k1/src/basic-config.h +++ b/src/secp256k1/src/basic-config.h @@ -11,7 +11,6 @@ #undef USE_ASM_X86_64 #undef USE_ECMULT_STATIC_PRECOMPUTATION -#undef USE_ENDOMORPHISM #undef USE_EXTERNAL_ASM #undef USE_EXTERNAL_DEFAULT_CALLBACKS #undef USE_FIELD_INV_BUILTIN diff --git a/src/secp256k1/src/bench_internal.c b/src/secp256k1/src/bench_internal.c index 9687fe4482..5f2b7a9759 100644 --- a/src/secp256k1/src/bench_internal.c +++ b/src/secp256k1/src/bench_internal.c @@ -117,7 +117,6 @@ void bench_scalar_mul(void* arg, int iters) { } } -#ifdef USE_ENDOMORPHISM void bench_scalar_split(void* arg, int iters) { int i, j = 0; bench_inv *data = (bench_inv*)arg; @@ -128,7 +127,6 @@ void bench_scalar_split(void* arg, int iters) { } CHECK(j <= iters); } -#endif void bench_scalar_inverse(void* arg, int iters) { int i, j = 0; @@ -397,9 +395,7 @@ int main(int argc, char **argv) { if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, iters*100); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "sqr")) run_benchmark("scalar_sqr", bench_scalar_sqr, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, iters*10); -#ifdef USE_ENDOMORPHISM if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "split")) run_benchmark("scalar_split", bench_scalar_split, bench_setup, NULL, &data, 10, iters); -#endif if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, 2000); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, 2000); diff --git a/src/secp256k1/src/ecmult.h b/src/secp256k1/src/ecmult.h index c9b198239d..09e8146414 100644 --- a/src/secp256k1/src/ecmult.h +++ b/src/secp256k1/src/ecmult.h @@ -15,9 +15,7 @@ typedef struct { /* For accelerating the computation of a*P + b*G: */ secp256k1_ge_storage (*pre_g)[]; /* odd multiples of the generator */ -#ifdef USE_ENDOMORPHISM secp256k1_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */ -#endif } secp256k1_ecmult_context; static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; diff --git a/src/secp256k1/src/ecmult_const_impl.h b/src/secp256k1/src/ecmult_const_impl.h index 55b61e4937..bb9511108b 100644 --- a/src/secp256k1/src/ecmult_const_impl.h +++ b/src/secp256k1/src/ecmult_const_impl.h @@ -140,19 +140,16 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons secp256k1_fe Z; int skew_1; -#ifdef USE_ENDOMORPHISM secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)]; int skew_lam; secp256k1_scalar q_1, q_lam; -#endif int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)]; int i; /* build wnaf representation for q. */ int rsize = size; -#ifdef USE_ENDOMORPHISM if (size > 128) { rsize = 128; /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */ @@ -160,12 +157,9 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128); skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128); } else -#endif { skew_1 = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size); -#ifdef USE_ENDOMORPHISM skew_lam = 0; -#endif } /* Calculate odd multiples of a. @@ -179,14 +173,12 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { secp256k1_fe_normalize_weak(&pre_a[i].y); } -#ifdef USE_ENDOMORPHISM if (size > 128) { for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); } } -#endif /* first loop iteration (separated out so we can directly set r, rather * than having it start at infinity, get doubled several times, then have @@ -195,14 +187,12 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons VERIFY_CHECK(i != 0); ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A); secp256k1_gej_set_ge(r, &tmpa); -#ifdef USE_ENDOMORPHISM if (size > 128) { i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)]; VERIFY_CHECK(i != 0); ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A); secp256k1_gej_add_ge(r, r, &tmpa); } -#endif /* remaining loop iterations */ for (i = WNAF_SIZE_BITS(rsize, WINDOW_A - 1) - 1; i >= 0; i--) { int n; @@ -215,14 +205,12 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); VERIFY_CHECK(n != 0); secp256k1_gej_add_ge(r, r, &tmpa); -#ifdef USE_ENDOMORPHISM if (size > 128) { n = wnaf_lam[i]; ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A); VERIFY_CHECK(n != 0); secp256k1_gej_add_ge(r, r, &tmpa); } -#endif } secp256k1_fe_mul(&r->z, &r->z, &Z); @@ -231,43 +219,35 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons /* Correct for wNAF skew */ secp256k1_ge correction = *a; secp256k1_ge_storage correction_1_stor; -#ifdef USE_ENDOMORPHISM secp256k1_ge_storage correction_lam_stor; -#endif secp256k1_ge_storage a2_stor; secp256k1_gej tmpj; secp256k1_gej_set_ge(&tmpj, &correction); secp256k1_gej_double_var(&tmpj, &tmpj, NULL); secp256k1_ge_set_gej(&correction, &tmpj); secp256k1_ge_to_storage(&correction_1_stor, a); -#ifdef USE_ENDOMORPHISM if (size > 128) { secp256k1_ge_to_storage(&correction_lam_stor, a); } -#endif secp256k1_ge_to_storage(&a2_stor, &correction); /* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */ secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2); -#ifdef USE_ENDOMORPHISM if (size > 128) { secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2); } -#endif /* Apply the correction */ secp256k1_ge_from_storage(&correction, &correction_1_stor); secp256k1_ge_neg(&correction, &correction); secp256k1_gej_add_ge(r, r, &correction); -#ifdef USE_ENDOMORPHISM if (size > 128) { secp256k1_ge_from_storage(&correction, &correction_lam_stor); secp256k1_ge_neg(&correction, &correction); secp256k1_ge_mul_lambda(&correction, &correction); secp256k1_gej_add_ge(r, r, &correction); } -#endif } } diff --git a/src/secp256k1/src/ecmult_impl.h b/src/secp256k1/src/ecmult_impl.h index f03fa9469d..057a69cf73 100644 --- a/src/secp256k1/src/ecmult_impl.h +++ b/src/secp256k1/src/ecmult_impl.h @@ -38,8 +38,8 @@ * (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes, * where sizeof(secp256k1_ge_storage) is typically 64 bytes but can * be larger due to platform-specific padding and alignment. - * If the endomorphism optimization is enabled (USE_ENDOMORMPHSIM) - * two tables of this size are used instead of only one. + * Two tables of this size are used (due to the endomorphism + * optimization). */ # define WINDOW_G ECMULT_WINDOW_SIZE #endif @@ -59,11 +59,7 @@ # error Set ECMULT_WINDOW_SIZE to an integer in range [2..24]. #endif -#ifdef USE_ENDOMORPHISM - #define WNAF_BITS 128 -#else - #define WNAF_BITS 256 -#endif +#define WNAF_BITS 128 #define WNAF_SIZE_BITS(bits, w) (((bits) + (w) - 1) / (w)) #define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w) @@ -77,17 +73,9 @@ #define PIPPENGER_MAX_BUCKET_WINDOW 12 /* Minimum number of points for which pippenger_wnaf is faster than strauss wnaf */ -#ifdef USE_ENDOMORPHISM - #define ECMULT_PIPPENGER_THRESHOLD 88 -#else - #define ECMULT_PIPPENGER_THRESHOLD 160 -#endif +#define ECMULT_PIPPENGER_THRESHOLD 88 -#ifdef USE_ENDOMORPHISM - #define ECMULT_MAX_POINTS_PER_BATCH 5000000 -#else - #define ECMULT_MAX_POINTS_PER_BATCH 10000000 -#endif +#define ECMULT_MAX_POINTS_PER_BATCH 5000000 /** Fill a table 'prej' with precomputed odd multiples of a. Prej will contain * the values [1*a,3*a,...,(2*n-1)*a], so it space for n values. zr[0] will @@ -313,16 +301,12 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp25 static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)) -#ifdef USE_ENDOMORPHISM + ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)) -#endif ; static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) { ctx->pre_g = NULL; -#ifdef USE_ENDOMORPHISM ctx->pre_g_128 = NULL; -#endif } static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc) { @@ -347,7 +331,6 @@ static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void * /* precompute the tables with odd multiples */ secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj); -#ifdef USE_ENDOMORPHISM { secp256k1_gej g_128j; int i; @@ -364,7 +347,6 @@ static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void * } secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j); } -#endif } static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src) { @@ -372,11 +354,9 @@ static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *d /* We cast to void* first to suppress a -Wcast-align warning. */ dst->pre_g = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src)); } -#ifdef USE_ENDOMORPHISM if (src->pre_g_128 != NULL) { dst->pre_g_128 = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src)); } -#endif } static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx) { @@ -447,16 +427,11 @@ static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, } struct secp256k1_strauss_point_state { -#ifdef USE_ENDOMORPHISM secp256k1_scalar na_1, na_lam; - int wnaf_na_1[130]; - int wnaf_na_lam[130]; + int wnaf_na_1[129]; + int wnaf_na_lam[129]; int bits_na_1; int bits_na_lam; -#else - int wnaf_na[256]; - int bits_na; -#endif size_t input_pos; }; @@ -464,26 +439,19 @@ struct secp256k1_strauss_state { secp256k1_gej* prej; secp256k1_fe* zr; secp256k1_ge* pre_a; -#ifdef USE_ENDOMORPHISM secp256k1_ge* pre_a_lam; -#endif struct secp256k1_strauss_point_state* ps; }; static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, const struct secp256k1_strauss_state *state, secp256k1_gej *r, int num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) { secp256k1_ge tmpa; secp256k1_fe Z; -#ifdef USE_ENDOMORPHISM /* Splitted G factors. */ secp256k1_scalar ng_1, ng_128; int wnaf_ng_1[129]; int bits_ng_1 = 0; int wnaf_ng_128[129]; int bits_ng_128 = 0; -#else - int wnaf_ng[256]; - int bits_ng = 0; -#endif int i; int bits = 0; int np; @@ -494,28 +462,20 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c continue; } state->ps[no].input_pos = np; -#ifdef USE_ENDOMORPHISM /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ secp256k1_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]); /* build wnaf representation for na_1 and na_lam. */ - state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 130, &state->ps[no].na_1, WINDOW_A); - state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 130, &state->ps[no].na_lam, WINDOW_A); - VERIFY_CHECK(state->ps[no].bits_na_1 <= 130); - VERIFY_CHECK(state->ps[no].bits_na_lam <= 130); + state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &state->ps[no].na_1, WINDOW_A); + state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &state->ps[no].na_lam, WINDOW_A); + VERIFY_CHECK(state->ps[no].bits_na_1 <= 129); + VERIFY_CHECK(state->ps[no].bits_na_lam <= 129); if (state->ps[no].bits_na_1 > bits) { bits = state->ps[no].bits_na_1; } if (state->ps[no].bits_na_lam > bits) { bits = state->ps[no].bits_na_lam; } -#else - /* build wnaf representation for na. */ - state->ps[no].bits_na = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na, 256, &na[np], WINDOW_A); - if (state->ps[no].bits_na > bits) { - bits = state->ps[no].bits_na; - } -#endif ++no; } @@ -547,7 +507,6 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c secp256k1_fe_set_int(&Z, 1); } -#ifdef USE_ENDOMORPHISM for (np = 0; np < no; ++np) { for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { secp256k1_ge_mul_lambda(&state->pre_a_lam[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i]); @@ -568,21 +527,12 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c bits = bits_ng_128; } } -#else - if (ng) { - bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, 256, ng, WINDOW_G); - if (bits_ng > bits) { - bits = bits_ng; - } - } -#endif secp256k1_gej_set_infinity(r); for (i = bits - 1; i >= 0; i--) { int n; secp256k1_gej_double_var(r, r, NULL); -#ifdef USE_ENDOMORPHISM for (np = 0; np < no; ++np) { if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) { ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); @@ -601,18 +551,6 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g_128, n, WINDOW_G); secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); } -#else - for (np = 0; np < no; ++np) { - if (i < state->ps[np].bits_na && (n = state->ps[np].wnaf_na[i])) { - ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); - secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); - } - } - if (i < bits_ng && (n = wnaf_ng[i])) { - ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G); - secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); - } -#endif } if (!r->infinity) { @@ -625,27 +563,19 @@ static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; struct secp256k1_strauss_point_state ps[1]; -#ifdef USE_ENDOMORPHISM secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; -#endif struct secp256k1_strauss_state state; state.prej = prej; state.zr = zr; state.pre_a = pre_a; -#ifdef USE_ENDOMORPHISM state.pre_a_lam = pre_a_lam; -#endif state.ps = ps; secp256k1_ecmult_strauss_wnaf(ctx, &state, r, 1, a, na, ng); } static size_t secp256k1_strauss_scratch_size(size_t n_points) { -#ifdef USE_ENDOMORPHISM static const size_t point_size = (2 * sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar); -#else - static const size_t point_size = (sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar); -#endif return n_points*point_size; } @@ -665,12 +595,8 @@ static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callba scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar)); state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej)); state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe)); -#ifdef USE_ENDOMORPHISM state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge)); state.pre_a_lam = state.pre_a + n_points * ECMULT_TABLE_SIZE(WINDOW_A); -#else - state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge)); -#endif state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state)); if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL) { @@ -868,7 +794,6 @@ static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_wi * set of buckets) for a given number of points. */ static int secp256k1_pippenger_bucket_window(size_t n) { -#ifdef USE_ENDOMORPHISM if (n <= 1) { return 1; } else if (n <= 4) { @@ -892,33 +817,6 @@ static int secp256k1_pippenger_bucket_window(size_t n) { } else { return PIPPENGER_MAX_BUCKET_WINDOW; } -#else - if (n <= 1) { - return 1; - } else if (n <= 11) { - return 2; - } else if (n <= 45) { - return 3; - } else if (n <= 100) { - return 4; - } else if (n <= 275) { - return 5; - } else if (n <= 625) { - return 6; - } else if (n <= 1850) { - return 7; - } else if (n <= 3400) { - return 8; - } else if (n <= 9630) { - return 9; - } else if (n <= 17900) { - return 10; - } else if (n <= 32800) { - return 11; - } else { - return PIPPENGER_MAX_BUCKET_WINDOW; - } -#endif } /** @@ -926,7 +824,6 @@ static int secp256k1_pippenger_bucket_window(size_t n) { */ static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) { switch(bucket_window) { -#ifdef USE_ENDOMORPHISM case 1: return 1; case 2: return 4; case 3: return 20; @@ -939,26 +836,11 @@ static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) { case 10: return 7880; case 11: return 16050; case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX; -#else - case 1: return 1; - case 2: return 11; - case 3: return 45; - case 4: return 100; - case 5: return 275; - case 6: return 625; - case 7: return 1850; - case 8: return 3400; - case 9: return 9630; - case 10: return 17900; - case 11: return 32800; - case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX; -#endif } return 0; } -#ifdef USE_ENDOMORPHISM SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, secp256k1_scalar *s2, secp256k1_ge *p1, secp256k1_ge *p2) { secp256k1_scalar tmp = *s1; secp256k1_scalar_split_lambda(s1, s2, &tmp); @@ -973,32 +855,23 @@ SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, s secp256k1_ge_neg(p2, p2); } } -#endif /** * Returns the scratch size required for a given number of points (excluding * base point G) without considering alignment. */ static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window) { -#ifdef USE_ENDOMORPHISM size_t entries = 2*n_points + 2; -#else - size_t entries = n_points + 1; -#endif size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size; } static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch); - /* Use 2(n+1) with the endomorphism, n+1 without, when calculating batch + /* Use 2(n+1) with the endomorphism, when calculating batch * sizes. The reason for +1 is that we add the G scalar to the list of * other scalars. */ -#ifdef USE_ENDOMORPHISM size_t entries = 2*n_points + 2; -#else - size_t entries = n_points + 1; -#endif secp256k1_ge *points; secp256k1_scalar *scalars; secp256k1_gej *buckets; @@ -1035,10 +908,8 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_call scalars[0] = *inp_g_sc; points[0] = secp256k1_ge_const_g; idx++; -#ifdef USE_ENDOMORPHISM secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]); idx++; -#endif } while (point_idx < n_points) { @@ -1047,10 +918,8 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_call return 0; } idx++; -#ifdef USE_ENDOMORPHISM secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]); idx++; -#endif point_idx++; } @@ -1093,9 +962,7 @@ static size_t secp256k1_pippenger_max_points(const secp256k1_callback* error_cal size_t space_overhead; size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); -#ifdef USE_ENDOMORPHISM entry_size = 2*entry_size; -#endif space_overhead = (sizeof(secp256k1_gej) << bucket_window) + entry_size + sizeof(struct secp256k1_pippenger_state); if (space_overhead > max_alloc) { break; diff --git a/src/secp256k1/src/group.h b/src/secp256k1/src/group.h index 6185be052d..36e39ecf0f 100644 --- a/src/secp256k1/src/group.h +++ b/src/secp256k1/src/group.h @@ -59,6 +59,7 @@ static int secp256k1_ge_is_infinity(const secp256k1_ge *a); /** Check whether a group element is valid (i.e., on the curve). */ static int secp256k1_ge_is_valid_var(const secp256k1_ge *a); +/** Set r equal to the inverse of a (i.e., mirrored around the X axis) */ static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a); /** Set a group element equal to another which is given in jacobian coordinates */ @@ -115,10 +116,8 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c /** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv); -#ifdef USE_ENDOMORPHISM /** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */ static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a); -#endif /** Clear a secp256k1_gej to prevent leaking sensitive information. */ static void secp256k1_gej_clear(secp256k1_gej *r); @@ -138,4 +137,15 @@ static void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_g /** Rescale a jacobian point by b which must be non-zero. Constant-time. */ static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *b); +/** Determine if a point (which is assumed to be on the curve) is in the correct (sub)group of the curve. + * + * In normal mode, the used group is secp256k1, which has cofactor=1 meaning that every point on the curve is in the + * group, and this function returns always true. + * + * When compiling in exhaustive test mode, a slightly different curve equation is used, leading to a group with a + * (very) small subgroup, and that subgroup is what is used for all cryptographic operations. In that mode, this + * function checks whether a point that is on the curve is in fact also in that subgroup. + */ +static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge); + #endif /* SECP256K1_GROUP_H */ diff --git a/src/secp256k1/src/group_impl.h b/src/secp256k1/src/group_impl.h index ccd93d3483..a5fbc91a0f 100644 --- a/src/secp256k1/src/group_impl.h +++ b/src/secp256k1/src/group_impl.h @@ -11,49 +11,38 @@ #include "field.h" #include "group.h" -/* These points can be generated in sage as follows: +/* These exhaustive group test orders and generators are chosen such that: + * - The field size is equal to that of secp256k1, so field code is the same. + * - The curve equation is of the form y^2=x^3+B for some constant B. + * - The subgroup has a generator 2*P, where P.x=1. + * - The subgroup has size less than 1000 to permit exhaustive testing. + * - The subgroup admits an endomorphism of the form lambda*(x,y) == (beta*x,y). * - * 0. Setup a worksheet with the following parameters. - * b = 4 # whatever CURVE_B will be set to - * F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F) - * C = EllipticCurve ([F (0), F (b)]) - * - * 1. Determine all the small orders available to you. (If there are - * no satisfactory ones, go back and change b.) - * print C.order().factor(limit=1000) - * - * 2. Choose an order as one of the prime factors listed in the above step. - * (You can also multiply some to get a composite order, though the - * tests will crash trying to invert scalars during signing.) We take a - * random point and scale it to drop its order to the desired value. - * There is some probability this won't work; just try again. - * order = 199 - * P = C.random_point() - * P = (int(P.order()) / int(order)) * P - * assert(P.order() == order) - * - * 3. Print the values. You'll need to use a vim macro or something to - * split the hex output into 4-byte chunks. - * print "%x %x" % P.xy() + * These parameters are generated using sage/gen_exhaustive_groups.sage. */ #if defined(EXHAUSTIVE_TEST_ORDER) -# if EXHAUSTIVE_TEST_ORDER == 199 +# if EXHAUSTIVE_TEST_ORDER == 13 static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( - 0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069, - 0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18, - 0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868, - 0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED + 0xc3459c3d, 0x35326167, 0xcd86cce8, 0x07a2417f, + 0x5b8bd567, 0xde8538ee, 0x0d507b0c, 0xd128f5bb, + 0x8e467fec, 0xcd30000a, 0x6cc1184e, 0x25d382c2, + 0xa2f4494e, 0x2fbe9abc, 0x8b64abac, 0xd005fb24 ); - -static const int CURVE_B = 4; -# elif EXHAUSTIVE_TEST_ORDER == 13 +static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST( + 0x3d3486b2, 0x159a9ca5, 0xc75638be, 0xb23a69bc, + 0x946a45ab, 0x24801247, 0xb4ed2b8e, 0x26b6a417 +); +# elif EXHAUSTIVE_TEST_ORDER == 199 static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( - 0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0, - 0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15, - 0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e, - 0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac + 0x226e653f, 0xc8df7744, 0x9bacbf12, 0x7d1dcbf9, + 0x87f05b2a, 0xe7edbd28, 0x1f564575, 0xc48dcf18, + 0xa13872c2, 0xe933bb17, 0x5d9ffd5b, 0xb5b6e10c, + 0x57fe3c00, 0xbaaaa15a, 0xe003ec3e, 0x9c269bae +); +static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST( + 0x2cca28fa, 0xfc614b80, 0x2a3db42b, 0x00ba00b1, + 0xbea8d943, 0xdace9ab2, 0x9536daea, 0x0074defb ); -static const int CURVE_B = 2; # else # error No known generator for the specified exhaustive test group order. # endif @@ -68,7 +57,7 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( 0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL ); -static const int CURVE_B = 7; +static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7); #endif static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) { @@ -219,14 +208,13 @@ static void secp256k1_ge_clear(secp256k1_ge *r) { } static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) { - secp256k1_fe x2, x3, c; + secp256k1_fe x2, x3; r->x = *x; secp256k1_fe_sqr(&x2, x); secp256k1_fe_mul(&x3, x, &x2); r->infinity = 0; - secp256k1_fe_set_int(&c, CURVE_B); - secp256k1_fe_add(&c, &x3); - return secp256k1_fe_sqrt(&r->y, &c); + secp256k1_fe_add(&x3, &secp256k1_fe_const_b); + return secp256k1_fe_sqrt(&r->y, &x3); } static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) { @@ -269,36 +257,15 @@ static int secp256k1_gej_is_infinity(const secp256k1_gej *a) { return a->infinity; } -static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) { - secp256k1_fe y2, x3, z2, z6; - if (a->infinity) { - return 0; - } - /** y^2 = x^3 + 7 - * (Y/Z^3)^2 = (X/Z^2)^3 + 7 - * Y^2 / Z^6 = X^3 / Z^6 + 7 - * Y^2 = X^3 + 7*Z^6 - */ - secp256k1_fe_sqr(&y2, &a->y); - secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); - secp256k1_fe_sqr(&z2, &a->z); - secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2); - secp256k1_fe_mul_int(&z6, CURVE_B); - secp256k1_fe_add(&x3, &z6); - secp256k1_fe_normalize_weak(&x3); - return secp256k1_fe_equal_var(&y2, &x3); -} - static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) { - secp256k1_fe y2, x3, c; + secp256k1_fe y2, x3; if (a->infinity) { return 0; } /* y^2 = x^3 + 7 */ secp256k1_fe_sqr(&y2, &a->y); secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); - secp256k1_fe_set_int(&c, CURVE_B); - secp256k1_fe_add(&x3, &c); + secp256k1_fe_add(&x3, &secp256k1_fe_const_b); secp256k1_fe_normalize_weak(&x3); return secp256k1_fe_equal_var(&y2, &x3); } @@ -679,7 +646,6 @@ static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, secp256k1_fe_storage_cmov(&r->y, &a->y, flag); } -#ifdef USE_ENDOMORPHISM static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) { static const secp256k1_fe beta = SECP256K1_FE_CONST( 0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul, @@ -688,7 +654,6 @@ static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) { *r = *a; secp256k1_fe_mul(&r->x, &r->x, &beta); } -#endif static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) { secp256k1_fe yz; @@ -704,4 +669,25 @@ static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) { return secp256k1_fe_is_quad_var(&yz); } +static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge) { +#ifdef EXHAUSTIVE_TEST_ORDER + secp256k1_gej out; + int i; + + /* A very simple EC multiplication ladder that avoids a dependecy on ecmult. */ + secp256k1_gej_set_infinity(&out); + for (i = 0; i < 32; ++i) { + secp256k1_gej_double_var(&out, &out, NULL); + if ((((uint32_t)EXHAUSTIVE_TEST_ORDER) >> (31 - i)) & 1) { + secp256k1_gej_add_ge_var(&out, &out, ge, NULL); + } + } + return secp256k1_gej_is_infinity(&out); +#else + (void)ge; + /* The real secp256k1 group has cofactor 1, so the subgroup is the entire curve. */ + return 1; +#endif +} + #endif /* SECP256K1_GROUP_IMPL_H */ diff --git a/src/secp256k1/src/modules/ecdh/tests_impl.h b/src/secp256k1/src/modules/ecdh/tests_impl.h index fe26e8fb69..e8d2aeab9a 100644 --- a/src/secp256k1/src/modules/ecdh/tests_impl.h +++ b/src/secp256k1/src/modules/ecdh/tests_impl.h @@ -80,7 +80,7 @@ void test_ecdh_generator_basepoint(void) { /* compute "explicitly" */ CHECK(secp256k1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1); /* compare */ - CHECK(memcmp(output_ecdh, point_ser, 65) == 0); + CHECK(secp256k1_memcmp_var(output_ecdh, point_ser, 65) == 0); /* compute using ECDH function with default hash function */ CHECK(secp256k1_ecdh(ctx, output_ecdh, &point[0], s_b32, NULL, NULL) == 1); @@ -90,7 +90,7 @@ void test_ecdh_generator_basepoint(void) { secp256k1_sha256_write(&sha, point_ser, point_ser_len); secp256k1_sha256_finalize(&sha, output_ser); /* compare */ - CHECK(memcmp(output_ecdh, output_ser, 32) == 0); + CHECK(secp256k1_memcmp_var(output_ecdh, output_ser, 32) == 0); } } diff --git a/src/secp256k1/src/modules/extrakeys/Makefile.am.include b/src/secp256k1/src/modules/extrakeys/Makefile.am.include index 8515f92e7a..0d901ec1f4 100644 --- a/src/secp256k1/src/modules/extrakeys/Makefile.am.include +++ b/src/secp256k1/src/modules/extrakeys/Makefile.am.include @@ -1,3 +1,4 @@ include_HEADERS += include/secp256k1_extrakeys.h noinst_HEADERS += src/modules/extrakeys/tests_impl.h +noinst_HEADERS += src/modules/extrakeys/tests_exhaustive_impl.h noinst_HEADERS += src/modules/extrakeys/main_impl.h diff --git a/src/secp256k1/src/modules/extrakeys/main_impl.h b/src/secp256k1/src/modules/extrakeys/main_impl.h index d319215355..5378d2f301 100644 --- a/src/secp256k1/src/modules/extrakeys/main_impl.h +++ b/src/secp256k1/src/modules/extrakeys/main_impl.h @@ -33,6 +33,9 @@ int secp256k1_xonly_pubkey_parse(const secp256k1_context* ctx, secp256k1_xonly_p if (!secp256k1_ge_set_xo_var(&pk, &x, 0)) { return 0; } + if (!secp256k1_ge_is_in_correct_subgroup(&pk)) { + return 0; + } secp256k1_xonly_pubkey_save(pubkey, &pk); return 1; } @@ -121,7 +124,7 @@ int secp256k1_xonly_pubkey_tweak_add_check(const secp256k1_context* ctx, const u secp256k1_fe_normalize_var(&pk.y); secp256k1_fe_get_b32(pk_expected32, &pk.x); - return memcmp(&pk_expected32, tweaked_pubkey32, 32) == 0 + return secp256k1_memcmp_var(&pk_expected32, tweaked_pubkey32, 32) == 0 && secp256k1_fe_is_odd(&pk.y) == tweaked_pk_parity; } diff --git a/src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h b/src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h new file mode 100644 index 0000000000..0e29bc6b09 --- /dev/null +++ b/src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h @@ -0,0 +1,68 @@ +/********************************************************************** + * Copyright (c) 2020 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef _SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_ +#define _SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_ + +#include "src/modules/extrakeys/main_impl.h" +#include "include/secp256k1_extrakeys.h" + +static void test_exhaustive_extrakeys(const secp256k1_context *ctx, const secp256k1_ge* group) { + secp256k1_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; + secp256k1_pubkey pubkey[EXHAUSTIVE_TEST_ORDER - 1]; + secp256k1_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1]; + int parities[EXHAUSTIVE_TEST_ORDER - 1]; + unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32]; + int i; + + for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { + secp256k1_fe fe; + secp256k1_scalar scalar_i; + unsigned char buf[33]; + int parity; + + secp256k1_scalar_set_int(&scalar_i, i); + secp256k1_scalar_get_b32(buf, &scalar_i); + + /* Construct pubkey and keypair. */ + CHECK(secp256k1_keypair_create(ctx, &keypair[i - 1], buf)); + CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey[i - 1], buf)); + + /* Construct serialized xonly_pubkey from keypair. */ + CHECK(secp256k1_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parities[i - 1], &keypair[i - 1])); + CHECK(secp256k1_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1])); + + /* Parse the xonly_pubkey back and verify it matches the previously serialized value. */ + CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pubkey[i - 1], xonly_pubkey_bytes[i - 1])); + CHECK(secp256k1_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1])); + CHECK(secp256k1_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0); + + /* Construct the xonly_pubkey from the pubkey, and verify it matches the same. */ + CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pubkey[i - 1], &parity, &pubkey[i - 1])); + CHECK(parity == parities[i - 1]); + CHECK(secp256k1_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1])); + CHECK(secp256k1_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0); + + /* Compare the xonly_pubkey bytes against the precomputed group. */ + secp256k1_fe_set_b32(&fe, xonly_pubkey_bytes[i - 1]); + CHECK(secp256k1_fe_equal_var(&fe, &group[i].x)); + + /* Check the parity against the precomputed group. */ + fe = group[i].y; + secp256k1_fe_normalize_var(&fe); + CHECK(secp256k1_fe_is_odd(&fe) == parities[i - 1]); + + /* Verify that the higher half is identical to the lower half mirrored. */ + if (i > EXHAUSTIVE_TEST_ORDER / 2) { + CHECK(secp256k1_memcmp_var(xonly_pubkey_bytes[i - 1], xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - i - 1], 32) == 0); + CHECK(parities[i - 1] == 1 - parities[EXHAUSTIVE_TEST_ORDER - i - 1]); + } + } + + /* TODO: keypair/xonly_pubkey tweak tests */ +} + +#endif diff --git a/src/secp256k1/src/modules/extrakeys/tests_impl.h b/src/secp256k1/src/modules/extrakeys/tests_impl.h index fc9d40eda1..5ee135849e 100644 --- a/src/secp256k1/src/modules/extrakeys/tests_impl.h +++ b/src/secp256k1/src/modules/extrakeys/tests_impl.h @@ -35,9 +35,9 @@ void test_xonly_pubkey(void) { secp256k1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount); secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); - secp256k1_rand256(sk); + secp256k1_testrand256(sk); memset(ones32, 0xFF, 32); - secp256k1_rand256(xy_sk); + secp256k1_testrand256(xy_sk); CHECK(secp256k1_ec_pubkey_create(sign, &pk, sk) == 1); CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1); @@ -60,7 +60,7 @@ void test_xonly_pubkey(void) { sk[0] = 1; CHECK(secp256k1_ec_pubkey_create(ctx, &pk, sk) == 1); CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(memcmp(&pk, &xonly_pk, sizeof(pk)) == 0); + CHECK(secp256k1_memcmp_var(&pk, &xonly_pk, sizeof(pk)) == 0); CHECK(pk_parity == 0); /* Choose a secret key such that pubkey and xonly_pubkey are each others @@ -68,7 +68,7 @@ void test_xonly_pubkey(void) { sk[0] = 2; CHECK(secp256k1_ec_pubkey_create(ctx, &pk, sk) == 1); CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1); - CHECK(memcmp(&xonly_pk, &pk, sizeof(xonly_pk)) != 0); + CHECK(secp256k1_memcmp_var(&xonly_pk, &pk, sizeof(xonly_pk)) != 0); CHECK(pk_parity == 1); secp256k1_pubkey_load(ctx, &pk1, &pk); secp256k1_pubkey_load(ctx, &pk2, (secp256k1_pubkey *) &xonly_pk); @@ -81,7 +81,7 @@ void test_xonly_pubkey(void) { CHECK(secp256k1_xonly_pubkey_serialize(none, NULL, &xonly_pk) == 0); CHECK(ecount == 1); CHECK(secp256k1_xonly_pubkey_serialize(none, buf32, NULL) == 0); - CHECK(memcmp(buf32, zeros64, 32) == 0); + CHECK(secp256k1_memcmp_var(buf32, zeros64, 32) == 0); CHECK(ecount == 2); { /* A pubkey filled with 0s will fail to serialize due to pubkey_load @@ -104,28 +104,28 @@ void test_xonly_pubkey(void) { CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1); CHECK(secp256k1_xonly_pubkey_serialize(ctx, buf32, &xonly_pk) == 1); CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pk_tmp, buf32) == 1); - CHECK(memcmp(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0); + CHECK(secp256k1_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0); /* Test parsing invalid field elements */ memset(&xonly_pk, 1, sizeof(xonly_pk)); /* Overflowing field element */ CHECK(secp256k1_xonly_pubkey_parse(none, &xonly_pk, ones32) == 0); - CHECK(memcmp(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); + CHECK(secp256k1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); memset(&xonly_pk, 1, sizeof(xonly_pk)); /* There's no point with x-coordinate 0 on secp256k1 */ CHECK(secp256k1_xonly_pubkey_parse(none, &xonly_pk, zeros64) == 0); - CHECK(memcmp(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); + CHECK(secp256k1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); /* If a random 32-byte string can not be parsed with ec_pubkey_parse * (because interpreted as X coordinate it does not correspond to a point on * the curve) then xonly_pubkey_parse should fail as well. */ for (i = 0; i < count; i++) { unsigned char rand33[33]; - secp256k1_rand256(&rand33[1]); + secp256k1_testrand256(&rand33[1]); rand33[0] = SECP256K1_TAG_PUBKEY_EVEN; if (!secp256k1_ec_pubkey_parse(ctx, &pk, rand33, 33)) { memset(&xonly_pk, 1, sizeof(xonly_pk)); CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 0); - CHECK(memcmp(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); + CHECK(secp256k1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0); } else { CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 1); } @@ -154,8 +154,8 @@ void test_xonly_pubkey_tweak(void) { secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); memset(overflows, 0xff, sizeof(overflows)); - secp256k1_rand256(tweak); - secp256k1_rand256(sk); + secp256k1_testrand256(tweak); + secp256k1_testrand256(sk); CHECK(secp256k1_ec_pubkey_create(ctx, &internal_pk, sk) == 1); CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1); @@ -170,15 +170,15 @@ void test_xonly_pubkey_tweak(void) { CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, NULL, tweak) == 0); CHECK(ecount == 4); /* NULL internal_xonly_pk zeroes the output_pk */ - CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, NULL) == 0); CHECK(ecount == 5); /* NULL tweak zeroes the output_pk */ - CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); /* Invalid tweak zeroes the output_pk */ CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, overflows) == 0); - CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); /* A zero tweak is fine */ CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, zeros64) == 1); @@ -193,16 +193,16 @@ void test_xonly_pubkey_tweak(void) { secp256k1_scalar_get_b32(tweak, &scalar_tweak); CHECK((secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, sk) == 0) || (secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0)); - CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); } /* Invalid pk with a valid tweak */ memset(&internal_xonly_pk, 0, sizeof(internal_xonly_pk)); - secp256k1_rand256(tweak); + secp256k1_testrand256(tweak); ecount = 0; CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0); CHECK(ecount == 1); - CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); secp256k1_context_destroy(none); secp256k1_context_destroy(sign); @@ -228,8 +228,8 @@ void test_xonly_pubkey_tweak_check(void) { secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); memset(overflows, 0xff, sizeof(overflows)); - secp256k1_rand256(tweak); - secp256k1_rand256(sk); + secp256k1_testrand256(tweak); + secp256k1_testrand256(sk); CHECK(secp256k1_ec_pubkey_create(ctx, &internal_pk, sk) == 1); CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1); @@ -268,7 +268,7 @@ void test_xonly_pubkey_tweak_check(void) { /* Overflowing tweak not allowed */ CHECK(secp256k1_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, overflows) == 0); CHECK(secp256k1_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, overflows) == 0); - CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0); + CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0); CHECK(ecount == 5); secp256k1_context_destroy(none); @@ -287,7 +287,7 @@ void test_xonly_pubkey_tweak_recursive(void) { unsigned char tweak[N_PUBKEYS - 1][32]; int i; - secp256k1_rand256(sk); + secp256k1_testrand256(sk); CHECK(secp256k1_ec_pubkey_create(ctx, &pk[0], sk) == 1); /* Add tweaks */ for (i = 0; i < N_PUBKEYS - 1; i++) { @@ -327,51 +327,51 @@ void test_keypair(void) { /* Test keypair_create */ ecount = 0; - secp256k1_rand256(sk); + secp256k1_testrand256(sk); CHECK(secp256k1_keypair_create(none, &keypair, sk) == 0); - CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); CHECK(ecount == 1); CHECK(secp256k1_keypair_create(verify, &keypair, sk) == 0); - CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); CHECK(ecount == 2); CHECK(secp256k1_keypair_create(sign, &keypair, sk) == 1); CHECK(secp256k1_keypair_create(sign, NULL, sk) == 0); CHECK(ecount == 3); CHECK(secp256k1_keypair_create(sign, &keypair, NULL) == 0); - CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); CHECK(ecount == 4); /* Invalid secret key */ CHECK(secp256k1_keypair_create(sign, &keypair, zeros96) == 0); - CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); CHECK(secp256k1_keypair_create(sign, &keypair, overflows) == 0); - CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0); + CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0); /* Test keypair_pub */ ecount = 0; - secp256k1_rand256(sk); + secp256k1_testrand256(sk); CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1); CHECK(secp256k1_keypair_pub(none, &pk, &keypair) == 1); CHECK(secp256k1_keypair_pub(none, NULL, &keypair) == 0); CHECK(ecount == 1); CHECK(secp256k1_keypair_pub(none, &pk, NULL) == 0); CHECK(ecount == 2); - CHECK(memcmp(zeros96, &pk, sizeof(pk)) == 0); + CHECK(secp256k1_memcmp_var(zeros96, &pk, sizeof(pk)) == 0); /* Using an invalid keypair is fine for keypair_pub */ memset(&keypair, 0, sizeof(keypair)); CHECK(secp256k1_keypair_pub(none, &pk, &keypair) == 1); - CHECK(memcmp(zeros96, &pk, sizeof(pk)) == 0); + CHECK(secp256k1_memcmp_var(zeros96, &pk, sizeof(pk)) == 0); /* keypair holds the same pubkey as pubkey_create */ CHECK(secp256k1_ec_pubkey_create(sign, &pk, sk) == 1); CHECK(secp256k1_keypair_create(sign, &keypair, sk) == 1); CHECK(secp256k1_keypair_pub(none, &pk_tmp, &keypair) == 1); - CHECK(memcmp(&pk, &pk_tmp, sizeof(pk)) == 0); + CHECK(secp256k1_memcmp_var(&pk, &pk_tmp, sizeof(pk)) == 0); /** Test keypair_xonly_pub **/ ecount = 0; - secp256k1_rand256(sk); + secp256k1_testrand256(sk); CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1); CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1); CHECK(secp256k1_keypair_xonly_pub(none, NULL, &pk_parity, &keypair) == 0); @@ -379,13 +379,13 @@ void test_keypair(void) { CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, NULL, &keypair) == 1); CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, NULL) == 0); CHECK(ecount == 2); - CHECK(memcmp(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); + CHECK(secp256k1_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); /* Using an invalid keypair will set the xonly_pk to 0 (first reset * xonly_pk). */ CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1); memset(&keypair, 0, sizeof(keypair)); CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 0); - CHECK(memcmp(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); + CHECK(secp256k1_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0); CHECK(ecount == 3); /** keypair holds the same xonly pubkey as pubkey_create **/ @@ -393,7 +393,7 @@ void test_keypair(void) { CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1); CHECK(secp256k1_keypair_create(sign, &keypair, sk) == 1); CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk_tmp, &pk_parity_tmp, &keypair) == 1); - CHECK(memcmp(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0); + CHECK(secp256k1_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0); CHECK(pk_parity == pk_parity_tmp); secp256k1_context_destroy(none); @@ -414,8 +414,8 @@ void test_keypair_add(void) { secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount); CHECK(sizeof(zeros96) == sizeof(keypair)); - secp256k1_rand256(sk); - secp256k1_rand256(tweak); + secp256k1_testrand256(sk); + secp256k1_testrand256(tweak); memset(overflows, 0xFF, 32); CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1); @@ -429,12 +429,12 @@ void test_keypair_add(void) { CHECK(secp256k1_keypair_xonly_tweak_add(verify, &keypair, NULL) == 0); CHECK(ecount == 4); /* This does not set the keypair to zeroes */ - CHECK(memcmp(&keypair, zeros96, sizeof(keypair)) != 0); + CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) != 0); /* Invalid tweak zeroes the keypair */ CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1); CHECK(secp256k1_keypair_xonly_tweak_add(ctx, &keypair, overflows) == 0); - CHECK(memcmp(&keypair, zeros96, sizeof(keypair)) == 0); + CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0); /* A zero tweak is fine */ CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1); @@ -444,7 +444,7 @@ void test_keypair_add(void) { for (i = 0; i < count; i++) { secp256k1_scalar scalar_tweak; secp256k1_keypair keypair_tmp; - secp256k1_rand256(sk); + secp256k1_testrand256(sk); CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1); memcpy(&keypair_tmp, &keypair, sizeof(keypair)); /* Because sk may be negated before adding, we need to try with tweak = @@ -454,17 +454,17 @@ void test_keypair_add(void) { secp256k1_scalar_get_b32(tweak, &scalar_tweak); CHECK((secp256k1_keypair_xonly_tweak_add(ctx, &keypair, sk) == 0) || (secp256k1_keypair_xonly_tweak_add(ctx, &keypair_tmp, tweak) == 0)); - CHECK(memcmp(&keypair, zeros96, sizeof(keypair)) == 0 - || memcmp(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0); + CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0 + || secp256k1_memcmp_var(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0); } /* Invalid keypair with a valid tweak */ memset(&keypair, 0, sizeof(keypair)); - secp256k1_rand256(tweak); + secp256k1_testrand256(tweak); ecount = 0; CHECK(secp256k1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0); CHECK(ecount == 1); - CHECK(memcmp(&keypair, zeros96, sizeof(keypair)) == 0); + CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0); /* Only seckey part of keypair invalid */ CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1); memset(&keypair, 0, 32); @@ -486,7 +486,7 @@ void test_keypair_add(void) { unsigned char pk32[32]; int pk_parity; - secp256k1_rand256(tweak); + secp256k1_testrand256(tweak); CHECK(secp256k1_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1); CHECK(secp256k1_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1); CHECK(secp256k1_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1); @@ -498,11 +498,11 @@ void test_keypair_add(void) { /* Check that the resulting pubkey matches xonly_pubkey_tweak_add */ CHECK(secp256k1_keypair_pub(ctx, &output_pk_xy, &keypair) == 1); CHECK(secp256k1_xonly_pubkey_tweak_add(ctx, &output_pk_expected, &internal_pk, tweak) == 1); - CHECK(memcmp(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); + CHECK(secp256k1_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); /* Check that the secret key in the keypair is tweaked correctly */ CHECK(secp256k1_ec_pubkey_create(ctx, &output_pk_expected, &keypair.data[0]) == 1); - CHECK(memcmp(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); + CHECK(secp256k1_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0); } secp256k1_context_destroy(none); secp256k1_context_destroy(sign); diff --git a/src/secp256k1/src/modules/recovery/Makefile.am.include b/src/secp256k1/src/modules/recovery/Makefile.am.include index bf23c26e71..e2d3f1248d 100644 --- a/src/secp256k1/src/modules/recovery/Makefile.am.include +++ b/src/secp256k1/src/modules/recovery/Makefile.am.include @@ -1,6 +1,7 @@ include_HEADERS += include/secp256k1_recovery.h noinst_HEADERS += src/modules/recovery/main_impl.h noinst_HEADERS += src/modules/recovery/tests_impl.h +noinst_HEADERS += src/modules/recovery/tests_exhaustive_impl.h if USE_BENCHMARK noinst_PROGRAMS += bench_recover bench_recover_SOURCES = src/bench_recover.c diff --git a/src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h b/src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h new file mode 100644 index 0000000000..a2f381d77a --- /dev/null +++ b/src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h @@ -0,0 +1,149 @@ +/********************************************************************** + * Copyright (c) 2016 Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H +#define SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H + +#include "src/modules/recovery/main_impl.h" +#include "include/secp256k1_recovery.h" + +void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group) { + int i, j, k; + uint64_t iter = 0; + + /* Loop */ + for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { /* message */ + for (j = 1; j < EXHAUSTIVE_TEST_ORDER; j++) { /* key */ + if (skip_section(&iter)) continue; + for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */ + const int starting_k = k; + secp256k1_fe r_dot_y_normalized; + secp256k1_ecdsa_recoverable_signature rsig; + secp256k1_ecdsa_signature sig; + secp256k1_scalar sk, msg, r, s, expected_r; + unsigned char sk32[32], msg32[32]; + int expected_recid; + int recid; + int overflow; + secp256k1_scalar_set_int(&msg, i); + secp256k1_scalar_set_int(&sk, j); + secp256k1_scalar_get_b32(sk32, &sk); + secp256k1_scalar_get_b32(msg32, &msg); + + secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k); + + /* Check directly */ + secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig); + r_from_k(&expected_r, group, k, &overflow); + CHECK(r == expected_r); + CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER || + (k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER); + /* The recid's second bit is for conveying overflow (R.x value >= group order). + * In the actual secp256k1 this is an astronomically unlikely event, but in the + * small group used here, it will be the case for all points except the ones where + * R.x=1 (which the group is specifically selected to have). + * Note that this isn't actually useful; full recovery would need to convey + * floor(R.x / group_order), but only one bit is used as that is sufficient + * in the real group. */ + expected_recid = overflow ? 2 : 0; + r_dot_y_normalized = group[k].y; + secp256k1_fe_normalize(&r_dot_y_normalized); + /* Also the recovery id is flipped depending if we hit the low-s branch */ + if ((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER) { + expected_recid |= secp256k1_fe_is_odd(&r_dot_y_normalized); + } else { + expected_recid |= !secp256k1_fe_is_odd(&r_dot_y_normalized); + } + CHECK(recid == expected_recid); + + /* Convert to a standard sig then check */ + secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); + secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); + /* Note that we compute expected_r *after* signing -- this is important + * because our nonce-computing function function might change k during + * signing. */ + r_from_k(&expected_r, group, k, NULL); + CHECK(r == expected_r); + CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER || + (k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER); + + /* Overflow means we've tried every possible nonce */ + if (k < starting_k) { + break; + } + } + } + } +} + +void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group) { + /* This is essentially a copy of test_exhaustive_verify, with recovery added */ + int s, r, msg, key; + uint64_t iter = 0; + for (s = 1; s < EXHAUSTIVE_TEST_ORDER; s++) { + for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) { + for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) { + for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) { + secp256k1_ge nonconst_ge; + secp256k1_ecdsa_recoverable_signature rsig; + secp256k1_ecdsa_signature sig; + secp256k1_pubkey pk; + secp256k1_scalar sk_s, msg_s, r_s, s_s; + secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; + int recid = 0; + int k, should_verify; + unsigned char msg32[32]; + + if (skip_section(&iter)) continue; + + secp256k1_scalar_set_int(&s_s, s); + secp256k1_scalar_set_int(&r_s, r); + secp256k1_scalar_set_int(&msg_s, msg); + secp256k1_scalar_set_int(&sk_s, key); + secp256k1_scalar_get_b32(msg32, &msg_s); + + /* Verify by hand */ + /* Run through every k value that gives us this r and check that *one* works. + * Note there could be none, there could be multiple, ECDSA is weird. */ + should_verify = 0; + for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { + secp256k1_scalar check_x_s; + r_from_k(&check_x_s, group, k, NULL); + if (r_s == check_x_s) { + secp256k1_scalar_set_int(&s_times_k_s, k); + secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); + secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); + secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); + should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); + } + } + /* nb we have a "high s" rule */ + should_verify &= !secp256k1_scalar_is_high(&s_s); + + /* We would like to try recovering the pubkey and checking that it matches, + * but pubkey recovery is impossible in the exhaustive tests (the reason + * being that there are 12 nonzero r values, 12 nonzero points, and no + * overlap between the sets, so there are no valid signatures). */ + + /* Verify by converting to a standard signature and calling verify */ + secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid); + secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); + memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); + secp256k1_pubkey_save(&pk, &nonconst_ge); + CHECK(should_verify == + secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); + } + } + } + } +} + +static void test_exhaustive_recovery(const secp256k1_context *ctx, const secp256k1_ge *group) { + test_exhaustive_recovery_sign(ctx, group); + test_exhaustive_recovery_verify(ctx, group); +} + +#endif /* SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H */ diff --git a/src/secp256k1/src/modules/recovery/tests_impl.h b/src/secp256k1/src/modules/recovery/tests_impl.h index 38a533a755..09cae38403 100644 --- a/src/secp256k1/src/modules/recovery/tests_impl.h +++ b/src/secp256k1/src/modules/recovery/tests_impl.h @@ -25,7 +25,7 @@ static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned c } /* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */ memset(nonce32, 1, 32); - return secp256k1_rand_bits(1); + return secp256k1_testrand_bits(1); } void test_ecdsa_recovery_api(void) { @@ -184,7 +184,7 @@ void test_ecdsa_recovery_end_to_end(void) { CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1); CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); - CHECK(memcmp(&signature[4], &signature[0], 64) == 0); + CHECK(secp256k1_memcmp_var(&signature[4], &signature[0], 64) == 0); CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); memset(&rsignature[4], 0, sizeof(rsignature[4])); CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); @@ -193,16 +193,16 @@ void test_ecdsa_recovery_end_to_end(void) { /* Parse compact (with recovery id) and recover. */ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1); - CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) == 0); /* Serialize/destroy/parse signature and verify again. */ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); - sig[secp256k1_rand_bits(6)] += 1 + secp256k1_rand_int(255); + sig[secp256k1_testrand_bits(6)] += 1 + secp256k1_testrand_int(255); CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0); /* Recover again */ CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 || - memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0); + secp256k1_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) != 0); } /* Tests several edge cases. */ diff --git a/src/secp256k1/src/modules/schnorrsig/Makefile.am.include b/src/secp256k1/src/modules/schnorrsig/Makefile.am.include index a82bafe43f..568bcc3523 100644 --- a/src/secp256k1/src/modules/schnorrsig/Makefile.am.include +++ b/src/secp256k1/src/modules/schnorrsig/Makefile.am.include @@ -1,6 +1,7 @@ include_HEADERS += include/secp256k1_schnorrsig.h noinst_HEADERS += src/modules/schnorrsig/main_impl.h noinst_HEADERS += src/modules/schnorrsig/tests_impl.h +noinst_HEADERS += src/modules/schnorrsig/tests_exhaustive_impl.h if USE_BENCHMARK noinst_PROGRAMS += bench_schnorrsig bench_schnorrsig_SOURCES = src/bench_schnorrsig.c diff --git a/src/secp256k1/src/modules/schnorrsig/main_impl.h b/src/secp256k1/src/modules/schnorrsig/main_impl.h index a0218f881a..b0d8481f9b 100644 --- a/src/secp256k1/src/modules/schnorrsig/main_impl.h +++ b/src/secp256k1/src/modules/schnorrsig/main_impl.h @@ -68,7 +68,7 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms /* Tag the hash with algo16 which is important to avoid nonce reuse across * algorithms. If this nonce function is used in BIP-340 signing as defined * in the spec, an optimized tagging implementation is used. */ - if (memcmp(algo16, bip340_algo16, 16) == 0) { + if (secp256k1_memcmp_var(algo16, bip340_algo16, 16) == 0) { secp256k1_nonce_function_bip340_sha256_tagged(&sha); } else { int algo16_len = 16; @@ -108,6 +108,22 @@ static void secp256k1_schnorrsig_sha256_tagged(secp256k1_sha256 *sha) { sha->bytes = 64; } +static void secp256k1_schnorrsig_challenge(secp256k1_scalar* e, const unsigned char *r32, const unsigned char *msg32, const unsigned char *pubkey32) +{ + unsigned char buf[32]; + secp256k1_sha256 sha; + + /* tagged hash(r.x, pk.x, msg32) */ + secp256k1_schnorrsig_sha256_tagged(&sha); + secp256k1_sha256_write(&sha, r32, 32); + secp256k1_sha256_write(&sha, pubkey32, 32); + secp256k1_sha256_write(&sha, msg32, 32); + secp256k1_sha256_finalize(&sha, buf); + /* Set scalar e to the challenge hash modulo the curve order as per + * BIP340. */ + secp256k1_scalar_set_b32(e, buf, NULL); +} + int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const secp256k1_keypair *keypair, secp256k1_nonce_function_hardened noncefp, void *ndata) { secp256k1_scalar sk; secp256k1_scalar e; @@ -115,7 +131,6 @@ int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64 secp256k1_gej rj; secp256k1_ge pk; secp256k1_ge r; - secp256k1_sha256 sha; unsigned char buf[32] = { 0 }; unsigned char pk_buf[32]; unsigned char seckey[32]; @@ -159,16 +174,7 @@ int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64 secp256k1_fe_normalize_var(&r.x); secp256k1_fe_get_b32(&sig64[0], &r.x); - /* tagged hash(r.x, pk.x, msg32) */ - secp256k1_schnorrsig_sha256_tagged(&sha); - secp256k1_sha256_write(&sha, &sig64[0], 32); - secp256k1_sha256_write(&sha, pk_buf, sizeof(pk_buf)); - secp256k1_sha256_write(&sha, msg32, 32); - secp256k1_sha256_finalize(&sha, buf); - - /* Set scalar e to the challenge hash modulo the curve order as per - * BIP340. */ - secp256k1_scalar_set_b32(&e, buf, NULL); + secp256k1_schnorrsig_challenge(&e, &sig64[0], msg32, pk_buf); secp256k1_scalar_mul(&e, &e, &sk); secp256k1_scalar_add(&e, &e, &k); secp256k1_scalar_get_b32(&sig64[32], &e); @@ -189,7 +195,6 @@ int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned cha secp256k1_gej pkj; secp256k1_fe rx; secp256k1_ge r; - secp256k1_sha256 sha; unsigned char buf[32]; int overflow; @@ -212,13 +217,9 @@ int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned cha return 0; } - secp256k1_schnorrsig_sha256_tagged(&sha); - secp256k1_sha256_write(&sha, &sig64[0], 32); + /* Compute e. */ secp256k1_fe_get_b32(buf, &pk.x); - secp256k1_sha256_write(&sha, buf, sizeof(buf)); - secp256k1_sha256_write(&sha, msg32, 32); - secp256k1_sha256_finalize(&sha, buf); - secp256k1_scalar_set_b32(&e, buf, NULL); + secp256k1_schnorrsig_challenge(&e, &sig64[0], msg32, buf); /* Compute rj = s*G + (-e)*pkj */ secp256k1_scalar_negate(&e, &e); diff --git a/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h b/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h new file mode 100644 index 0000000000..4bf0bc1680 --- /dev/null +++ b/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h @@ -0,0 +1,206 @@ +/********************************************************************** + * Copyright (c) 2020 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef _SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_ +#define _SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_ + +#include "include/secp256k1_schnorrsig.h" +#include "src/modules/schnorrsig/main_impl.h" + +static const unsigned char invalid_pubkey_bytes[][32] = { + /* 0 */ + { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }, + /* 2 */ + { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 + }, + /* order */ + { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ((EXHAUSTIVE_TEST_ORDER + 0UL) >> 24) & 0xFF, + ((EXHAUSTIVE_TEST_ORDER + 0UL) >> 16) & 0xFF, + ((EXHAUSTIVE_TEST_ORDER + 0UL) >> 8) & 0xFF, + (EXHAUSTIVE_TEST_ORDER + 0UL) & 0xFF + }, + /* order + 1 */ + { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ((EXHAUSTIVE_TEST_ORDER + 1UL) >> 24) & 0xFF, + ((EXHAUSTIVE_TEST_ORDER + 1UL) >> 16) & 0xFF, + ((EXHAUSTIVE_TEST_ORDER + 1UL) >> 8) & 0xFF, + (EXHAUSTIVE_TEST_ORDER + 1UL) & 0xFF + }, + /* field size */ + { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x2F + }, + /* field size + 1 (note that 1 is legal) */ + { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x30 + }, + /* 2^256 - 1 */ + { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF + } +}; + +#define NUM_INVALID_KEYS (sizeof(invalid_pubkey_bytes) / sizeof(invalid_pubkey_bytes[0])) + +static int secp256k1_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, + const unsigned char *key32, const unsigned char *xonly_pk32, + const unsigned char *algo16, void* data) { + secp256k1_scalar s; + int *idata = data; + (void)msg32; + (void)key32; + (void)xonly_pk32; + (void)algo16; + secp256k1_scalar_set_int(&s, *idata); + secp256k1_scalar_get_b32(nonce32, &s); + return 1; +} + +static void test_exhaustive_schnorrsig_verify(const secp256k1_context *ctx, const secp256k1_xonly_pubkey* pubkeys, unsigned char (*xonly_pubkey_bytes)[32], const int* parities) { + int d; + uint64_t iter = 0; + /* Iterate over the possible public keys to verify against (through their corresponding DL d). */ + for (d = 1; d <= EXHAUSTIVE_TEST_ORDER / 2; ++d) { + int actual_d; + unsigned k; + unsigned char pk32[32]; + memcpy(pk32, xonly_pubkey_bytes[d - 1], 32); + actual_d = parities[d - 1] ? EXHAUSTIVE_TEST_ORDER - d : d; + /* Iterate over the possible valid first 32 bytes in the signature, through their corresponding DL k. + Values above EXHAUSTIVE_TEST_ORDER/2 refer to the entries in invalid_pubkey_bytes. */ + for (k = 1; k <= EXHAUSTIVE_TEST_ORDER / 2 + NUM_INVALID_KEYS; ++k) { + unsigned char sig64[64]; + int actual_k = -1; + int e_done[EXHAUSTIVE_TEST_ORDER] = {0}; + int e_count_done = 0; + if (skip_section(&iter)) continue; + if (k <= EXHAUSTIVE_TEST_ORDER / 2) { + memcpy(sig64, xonly_pubkey_bytes[k - 1], 32); + actual_k = parities[k - 1] ? EXHAUSTIVE_TEST_ORDER - k : k; + } else { + memcpy(sig64, invalid_pubkey_bytes[k - 1 - EXHAUSTIVE_TEST_ORDER / 2], 32); + } + /* Randomly generate messages until all challenges have been hit. */ + while (e_count_done < EXHAUSTIVE_TEST_ORDER) { + secp256k1_scalar e; + unsigned char msg32[32]; + secp256k1_testrand256(msg32); + secp256k1_schnorrsig_challenge(&e, sig64, msg32, pk32); + /* Only do work if we hit a challenge we haven't tried before. */ + if (!e_done[e]) { + /* Iterate over the possible valid last 32 bytes in the signature. + 0..order=that s value; order+1=random bytes */ + int count_valid = 0, s; + for (s = 0; s <= EXHAUSTIVE_TEST_ORDER + 1; ++s) { + int expect_valid, valid; + if (s <= EXHAUSTIVE_TEST_ORDER) { + secp256k1_scalar s_s; + secp256k1_scalar_set_int(&s_s, s); + secp256k1_scalar_get_b32(sig64 + 32, &s_s); + expect_valid = actual_k != -1 && s != EXHAUSTIVE_TEST_ORDER && + (s_s == (actual_k + actual_d * e) % EXHAUSTIVE_TEST_ORDER); + } else { + secp256k1_testrand256(sig64 + 32); + expect_valid = 0; + } + valid = secp256k1_schnorrsig_verify(ctx, sig64, msg32, &pubkeys[d - 1]); + CHECK(valid == expect_valid); + count_valid += valid; + } + /* Exactly one s value must verify, unless R is illegal. */ + CHECK(count_valid == (actual_k != -1)); + /* Don't retry other messages that result in the same challenge. */ + e_done[e] = 1; + ++e_count_done; + } + } + } + } +} + +static void test_exhaustive_schnorrsig_sign(const secp256k1_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const secp256k1_keypair* keypairs, const int* parities) { + int d, k; + uint64_t iter = 0; + /* Loop over keys. */ + for (d = 1; d < EXHAUSTIVE_TEST_ORDER; ++d) { + int actual_d = d; + if (parities[d - 1]) actual_d = EXHAUSTIVE_TEST_ORDER - d; + /* Loop over nonces. */ + for (k = 1; k < EXHAUSTIVE_TEST_ORDER; ++k) { + int e_done[EXHAUSTIVE_TEST_ORDER] = {0}; + int e_count_done = 0; + unsigned char msg32[32]; + unsigned char sig64[64]; + int actual_k = k; + if (skip_section(&iter)) continue; + if (parities[k - 1]) actual_k = EXHAUSTIVE_TEST_ORDER - k; + /* Generate random messages until all challenges have been tried. */ + while (e_count_done < EXHAUSTIVE_TEST_ORDER) { + secp256k1_scalar e; + secp256k1_testrand256(msg32); + secp256k1_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, xonly_pubkey_bytes[d - 1]); + /* Only do work if we hit a challenge we haven't tried before. */ + if (!e_done[e]) { + secp256k1_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER; + unsigned char expected_s_bytes[32]; + secp256k1_scalar_get_b32(expected_s_bytes, &expected_s); + /* Invoke the real function to construct a signature. */ + CHECK(secp256k1_schnorrsig_sign(ctx, sig64, msg32, &keypairs[d - 1], secp256k1_hardened_nonce_function_smallint, &k)); + /* The first 32 bytes must match the xonly pubkey for the specified k. */ + CHECK(secp256k1_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0); + /* The last 32 bytes must match the expected s value. */ + CHECK(secp256k1_memcmp_var(sig64 + 32, expected_s_bytes, 32) == 0); + /* Don't retry other messages that result in the same challenge. */ + e_done[e] = 1; + ++e_count_done; + } + } + } + } +} + +static void test_exhaustive_schnorrsig(const secp256k1_context *ctx) { + secp256k1_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; + secp256k1_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1]; + int parity[EXHAUSTIVE_TEST_ORDER - 1]; + unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32]; + unsigned i; + + /* Verify that all invalid_pubkey_bytes are actually invalid. */ + for (i = 0; i < NUM_INVALID_KEYS; ++i) { + secp256k1_xonly_pubkey pk; + CHECK(!secp256k1_xonly_pubkey_parse(ctx, &pk, invalid_pubkey_bytes[i])); + } + + /* Construct keypairs and xonly-pubkeys for the entire group. */ + for (i = 1; i < EXHAUSTIVE_TEST_ORDER; ++i) { + secp256k1_scalar scalar_i; + unsigned char buf[32]; + secp256k1_scalar_set_int(&scalar_i, i); + secp256k1_scalar_get_b32(buf, &scalar_i); + CHECK(secp256k1_keypair_create(ctx, &keypair[i - 1], buf)); + CHECK(secp256k1_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parity[i - 1], &keypair[i - 1])); + CHECK(secp256k1_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1])); + } + + test_exhaustive_schnorrsig_sign(ctx, xonly_pubkey_bytes, keypair, parity); + test_exhaustive_schnorrsig_verify(ctx, xonly_pubkey, xonly_pubkey_bytes, parity); +} + +#endif diff --git a/src/secp256k1/src/modules/schnorrsig/tests_impl.h b/src/secp256k1/src/modules/schnorrsig/tests_impl.h index 88d8f56404..f522fcb320 100644 --- a/src/secp256k1/src/modules/schnorrsig/tests_impl.h +++ b/src/secp256k1/src/modules/schnorrsig/tests_impl.h @@ -15,9 +15,9 @@ void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes) { unsigned char nonces[2][32]; CHECK(nonce_function_bip340(nonces[0], args[0], args[1], args[2], args[3], args[4]) == 1); - secp256k1_rand_flip(args[n_flip], n_bytes); + secp256k1_testrand_flip(args[n_flip], n_bytes); CHECK(nonce_function_bip340(nonces[1], args[0], args[1], args[2], args[3], args[4]) == 1); - CHECK(memcmp(nonces[0], nonces[1], 32) != 0); + CHECK(secp256k1_memcmp_var(nonces[0], nonces[1], 32) != 0); } /* Tests for the equality of two sha256 structs. This function only produces a @@ -28,7 +28,7 @@ void test_sha256_eq(const secp256k1_sha256 *sha1, const secp256k1_sha256 *sha2) CHECK((sha1->bytes & 0x3F) == 0); CHECK(sha1->bytes == sha2->bytes); - CHECK(memcmp(sha1->s, sha2->s, sizeof(sha1->s)) == 0); + CHECK(secp256k1_memcmp_var(sha1->s, sha2->s, sizeof(sha1->s)) == 0); } void run_nonce_function_bip340_tests(void) { @@ -59,10 +59,10 @@ void run_nonce_function_bip340_tests(void) { secp256k1_nonce_function_bip340_sha256_tagged_aux(&sha_optimized); test_sha256_eq(&sha, &sha_optimized); - secp256k1_rand256(msg); - secp256k1_rand256(key); - secp256k1_rand256(pk); - secp256k1_rand256(aux_rand); + secp256k1_testrand256(msg); + secp256k1_testrand256(key); + secp256k1_testrand256(pk); + secp256k1_testrand256(aux_rand); /* Check that a bitflip in an argument results in different nonces. */ args[0] = msg; @@ -124,10 +124,10 @@ void test_schnorrsig_api(void) { secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount); - secp256k1_rand256(sk1); - secp256k1_rand256(sk2); - secp256k1_rand256(sk3); - secp256k1_rand256(msg); + secp256k1_testrand256(sk1); + secp256k1_testrand256(sk2); + secp256k1_testrand256(sk3); + secp256k1_testrand256(msg); CHECK(secp256k1_keypair_create(ctx, &keypairs[0], sk1) == 1); CHECK(secp256k1_keypair_create(ctx, &keypairs[1], sk2) == 1); CHECK(secp256k1_keypair_create(ctx, &keypairs[2], sk3) == 1); @@ -197,11 +197,11 @@ void test_schnorrsig_bip_vectors_check_signing(const unsigned char *sk, const un CHECK(secp256k1_keypair_create(ctx, &keypair, sk)); CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, aux_rand)); - CHECK(memcmp(sig, expected_sig, 64) == 0); + CHECK(secp256k1_memcmp_var(sig, expected_sig, 64) == 0); CHECK(secp256k1_xonly_pubkey_parse(ctx, &pk_expected, pk_serialized)); CHECK(secp256k1_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); - CHECK(memcmp(&pk, &pk_expected, sizeof(pk)) == 0); + CHECK(secp256k1_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0); CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, &pk)); } @@ -675,19 +675,19 @@ void test_schnorrsig_sign(void) { unsigned char sig[64]; unsigned char zeros64[64] = { 0 }; - secp256k1_rand256(sk); + secp256k1_testrand256(sk); CHECK(secp256k1_keypair_create(ctx, &keypair, sk)); CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1); /* Test different nonce functions */ memset(sig, 1, sizeof(sig)); CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_failing, NULL) == 0); - CHECK(memcmp(sig, zeros64, sizeof(sig)) == 0); + CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) == 0); memset(&sig, 1, sizeof(sig)); CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_0, NULL) == 0); - CHECK(memcmp(sig, zeros64, sizeof(sig)) == 0); + CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) == 0); CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_overflowing, NULL) == 1); - CHECK(memcmp(sig, zeros64, sizeof(sig)) != 0); + CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) != 0); } #define N_SIGS 3 @@ -703,12 +703,12 @@ void test_schnorrsig_sign_verify(void) { secp256k1_xonly_pubkey pk; secp256k1_scalar s; - secp256k1_rand256(sk); + secp256k1_testrand256(sk); CHECK(secp256k1_keypair_create(ctx, &keypair, sk)); CHECK(secp256k1_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); for (i = 0; i < N_SIGS; i++) { - secp256k1_rand256(msg[i]); + secp256k1_testrand256(msg[i]); CHECK(secp256k1_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL, NULL)); CHECK(secp256k1_schnorrsig_verify(ctx, sig[i], msg[i], &pk)); } @@ -716,19 +716,19 @@ void test_schnorrsig_sign_verify(void) { { /* Flip a few bits in the signature and in the message and check that * verify and verify_batch (TODO) fail */ - size_t sig_idx = secp256k1_rand_int(N_SIGS); - size_t byte_idx = secp256k1_rand_int(32); - unsigned char xorbyte = secp256k1_rand_int(254)+1; + size_t sig_idx = secp256k1_testrand_int(N_SIGS); + size_t byte_idx = secp256k1_testrand_int(32); + unsigned char xorbyte = secp256k1_testrand_int(254)+1; sig[sig_idx][byte_idx] ^= xorbyte; CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); sig[sig_idx][byte_idx] ^= xorbyte; - byte_idx = secp256k1_rand_int(32); + byte_idx = secp256k1_testrand_int(32); sig[sig_idx][32+byte_idx] ^= xorbyte; CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); sig[sig_idx][32+byte_idx] ^= xorbyte; - byte_idx = secp256k1_rand_int(32); + byte_idx = secp256k1_testrand_int(32); msg[sig_idx][byte_idx] ^= xorbyte; CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); msg[sig_idx][byte_idx] ^= xorbyte; @@ -766,7 +766,7 @@ void test_schnorrsig_taproot(void) { unsigned char sig[64]; /* Create output key */ - secp256k1_rand256(sk); + secp256k1_testrand256(sk); CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1); CHECK(secp256k1_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1); /* In actual taproot the tweak would be hash of internal_pk */ @@ -776,7 +776,7 @@ void test_schnorrsig_taproot(void) { CHECK(secp256k1_xonly_pubkey_serialize(ctx, output_pk_bytes, &output_pk) == 1); /* Key spend */ - secp256k1_rand256(msg); + secp256k1_testrand256(msg); CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1); /* Verify key spend */ CHECK(secp256k1_xonly_pubkey_parse(ctx, &output_pk, output_pk_bytes) == 1); diff --git a/src/secp256k1/src/scalar.h b/src/secp256k1/src/scalar.h index 95d3e326c9..fb3fb187ce 100644 --- a/src/secp256k1/src/scalar.h +++ b/src/secp256k1/src/scalar.h @@ -102,12 +102,11 @@ static void secp256k1_scalar_order_get_num(secp256k1_num *r); /** Compare two scalars. */ static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b); -#ifdef USE_ENDOMORPHISM -/** Find r1 and r2 such that r1+r2*2^128 = a. */ -static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a); -/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see secp256k1_gej_mul_lambda). */ -static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a); -#endif +/** Find r1 and r2 such that r1+r2*2^128 = k. */ +static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k); +/** Find r1 and r2 such that r1+r2*lambda = k, + * where r1 and r2 or their negations are maximum 128 bits long (see secp256k1_ge_mul_lambda). */ +static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k); /** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */ static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift); diff --git a/src/secp256k1/src/scalar_4x64_impl.h b/src/secp256k1/src/scalar_4x64_impl.h index 7f39927861..73cbd5e18a 100644 --- a/src/secp256k1/src/scalar_4x64_impl.h +++ b/src/secp256k1/src/scalar_4x64_impl.h @@ -912,18 +912,16 @@ static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) secp256k1_scalar_reduce_512(r, l); } -#ifdef USE_ENDOMORPHISM -static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { - r1->d[0] = a->d[0]; - r1->d[1] = a->d[1]; +static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { + r1->d[0] = k->d[0]; + r1->d[1] = k->d[1]; r1->d[2] = 0; r1->d[3] = 0; - r2->d[0] = a->d[2]; - r2->d[1] = a->d[3]; + r2->d[0] = k->d[2]; + r2->d[1] = k->d[3]; r2->d[2] = 0; r2->d[3] = 0; } -#endif SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0; diff --git a/src/secp256k1/src/scalar_8x32_impl.h b/src/secp256k1/src/scalar_8x32_impl.h index f8c7fa7efa..6853f79ecc 100644 --- a/src/secp256k1/src/scalar_8x32_impl.h +++ b/src/secp256k1/src/scalar_8x32_impl.h @@ -672,26 +672,24 @@ static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) secp256k1_scalar_reduce_512(r, l); } -#ifdef USE_ENDOMORPHISM -static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { - r1->d[0] = a->d[0]; - r1->d[1] = a->d[1]; - r1->d[2] = a->d[2]; - r1->d[3] = a->d[3]; +static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { + r1->d[0] = k->d[0]; + r1->d[1] = k->d[1]; + r1->d[2] = k->d[2]; + r1->d[3] = k->d[3]; r1->d[4] = 0; r1->d[5] = 0; r1->d[6] = 0; r1->d[7] = 0; - r2->d[0] = a->d[4]; - r2->d[1] = a->d[5]; - r2->d[2] = a->d[6]; - r2->d[3] = a->d[7]; + r2->d[0] = k->d[4]; + r2->d[1] = k->d[5]; + r2->d[2] = k->d[6]; + r2->d[3] = k->d[7]; r2->d[4] = 0; r2->d[5] = 0; r2->d[6] = 0; r2->d[7] = 0; } -#endif SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0; diff --git a/src/secp256k1/src/scalar_impl.h b/src/secp256k1/src/scalar_impl.h index 2ec04b1ae9..fc75891818 100644 --- a/src/secp256k1/src/scalar_impl.h +++ b/src/secp256k1/src/scalar_impl.h @@ -7,6 +7,10 @@ #ifndef SECP256K1_SCALAR_IMPL_H #define SECP256K1_SCALAR_IMPL_H +#ifdef VERIFY +#include <string.h> +#endif + #include "scalar.h" #include "util.h" @@ -252,37 +256,65 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc #endif } -#ifdef USE_ENDOMORPHISM +/* These parameters are generated using sage/gen_exhaustive_groups.sage. */ #if defined(EXHAUSTIVE_TEST_ORDER) +# if EXHAUSTIVE_TEST_ORDER == 13 +# define EXHAUSTIVE_TEST_LAMBDA 9 +# elif EXHAUSTIVE_TEST_ORDER == 199 +# define EXHAUSTIVE_TEST_LAMBDA 92 +# else +# error No known lambda for the specified exhaustive test group order. +# endif + /** - * Find k1 and k2 given k, such that k1 + k2 * lambda == k mod n; unlike in the - * full case we don't bother making k1 and k2 be small, we just want them to be + * Find r1 and r2 given k, such that r1 + r2 * lambda == k mod n; unlike in the + * full case we don't bother making r1 and r2 be small, we just want them to be * nontrivial to get full test coverage for the exhaustive tests. We therefore - * (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda. + * (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n). */ -static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { - *r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER; - *r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER; +static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { + *r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER; + *r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER; } #else /** * The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where - * lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a, - * 0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78,0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72} + * lambda is: */ +static const secp256k1_scalar secp256k1_const_lambda = SECP256K1_SCALAR_CONST( + 0x5363AD4CUL, 0xC05C30E0UL, 0xA5261C02UL, 0x8812645AUL, + 0x122E22EAUL, 0x20816678UL, 0xDF02967CUL, 0x1B23BD72UL +); + +#ifdef VERIFY +static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k); +#endif + +/* + * Both lambda and beta are primitive cube roots of unity. That is lamba^3 == 1 mod n and + * beta^3 == 1 mod p, where n is the curve order and p is the field order. * - * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm - * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1 - * and k2 have a small size. - * It relies on constants a1, b1, a2, b2. These constants for the value of lambda above are: + * Futhermore, because (X^3 - 1) = (X - 1)(X^2 + X + 1), the primitive cube roots of unity are + * roots of X^2 + X + 1. Therefore lambda^2 + lamba == -1 mod n and beta^2 + beta == -1 mod p. + * (The other primitive cube roots of unity are lambda^2 and beta^2 respectively.) + * + * Let l = -1/2 + i*sqrt(3)/2, the complex root of X^2 + X + 1. We can define a ring + * homomorphism phi : Z[l] -> Z_n where phi(a + b*l) == a + b*lambda mod n. The kernel of phi + * is a lattice over Z[l] (considering Z[l] as a Z-module). This lattice is generated by a + * reduced basis {a1 + b1*l, a2 + b2*l} where * * - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15} * - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3} * - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8} * - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15} * - * The algorithm then computes c1 = round(b1 * k / n) and c2 = round(b2 * k / n), and gives + * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm + * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1 + * and k2 are small in absolute value. + * + * The algorithm computes c1 = round(b2 * k / n) and c2 = round((-b1) * k / n), and gives * k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and - * compute k1 as k - k2 * lambda, avoiding the need for constants a1 and a2. + * compute r2 = k2 mod n, and r1 = k1 mod n = (k - r2 * lambda) mod n, avoiding the need for + * the constants a1 and a2. * * g1, g2 are precomputed constants used to replace division with a rounded multiplication * when decomposing the scalar for an endomorphism-based point multiplication. @@ -294,21 +326,21 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar * Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez), * Section 4.3 (here we use a somewhat higher-precision estimate): * d = a1*b2 - b1*a2 - * g1 = round((2^272)*b2/d) - * g2 = round((2^272)*b1/d) + * g1 = round(2^384 * b2/d) + * g2 = round(2^384 * (-b1)/d) * - * (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found - * as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda'). + * (Note that d is also equal to the curve order, n, here because [a1,b1] and [a2,b2] + * can be found as outputs of the Extended Euclidean Algorithm on inputs n and lambda). * - * The function below splits a in r1 and r2, such that r1 + lambda * r2 == a (mod order). + * The function below splits k into r1 and r2, such that + * - r1 + lambda * r2 == k (mod n) + * - either r1 < 2^128 or -r1 mod n < 2^128 + * - either r2 < 2^128 or -r2 mod n < 2^128 + * + * See proof below. */ - -static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { +static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { secp256k1_scalar c1, c2; - static const secp256k1_scalar minus_lambda = SECP256K1_SCALAR_CONST( - 0xAC9C52B3UL, 0x3FA3CF1FUL, 0x5AD9E3FDUL, 0x77ED9BA4UL, - 0xA880B9FCUL, 0x8EC739C2UL, 0xE0CFC810UL, 0xB51283CFUL - ); static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST( 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL, 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL @@ -318,25 +350,167 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar 0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL ); static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST( - 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00003086UL, - 0xD221A7D4UL, 0x6BCDE86CUL, 0x90E49284UL, 0xEB153DABUL + 0x3086D221UL, 0xA7D46BCDUL, 0xE86C90E4UL, 0x9284EB15UL, + 0x3DAA8A14UL, 0x71E8CA7FUL, 0xE893209AUL, 0x45DBB031UL ); static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST( - 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x0000E443UL, - 0x7ED6010EUL, 0x88286F54UL, 0x7FA90ABFUL, 0xE4C42212UL + 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL, + 0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL ); - VERIFY_CHECK(r1 != a); - VERIFY_CHECK(r2 != a); + VERIFY_CHECK(r1 != k); + VERIFY_CHECK(r2 != k); /* these _var calls are constant time since the shift amount is constant */ - secp256k1_scalar_mul_shift_var(&c1, a, &g1, 272); - secp256k1_scalar_mul_shift_var(&c2, a, &g2, 272); + secp256k1_scalar_mul_shift_var(&c1, k, &g1, 384); + secp256k1_scalar_mul_shift_var(&c2, k, &g2, 384); secp256k1_scalar_mul(&c1, &c1, &minus_b1); secp256k1_scalar_mul(&c2, &c2, &minus_b2); secp256k1_scalar_add(r2, &c1, &c2); - secp256k1_scalar_mul(r1, r2, &minus_lambda); - secp256k1_scalar_add(r1, r1, a); -} -#endif + secp256k1_scalar_mul(r1, r2, &secp256k1_const_lambda); + secp256k1_scalar_negate(r1, r1); + secp256k1_scalar_add(r1, r1, k); + +#ifdef VERIFY + secp256k1_scalar_split_lambda_verify(r1, r2, k); #endif +} + +#ifdef VERIFY +/* + * Proof for secp256k1_scalar_split_lambda's bounds. + * + * Let + * - epsilon1 = 2^256 * |g1/2^384 - b2/d| + * - epsilon2 = 2^256 * |g2/2^384 - (-b1)/d| + * - c1 = round(k*g1/2^384) + * - c2 = round(k*g2/2^384) + * + * Lemma 1: |c1 - k*b2/d| < 2^-1 + epsilon1 + * + * |c1 - k*b2/d| + * = + * |c1 - k*g1/2^384 + k*g1/2^384 - k*b2/d| + * <= {triangle inequality} + * |c1 - k*g1/2^384| + |k*g1/2^384 - k*b2/d| + * = + * |c1 - k*g1/2^384| + k*|g1/2^384 - b2/d| + * < {rounding in c1 and 0 <= k < 2^256} + * 2^-1 + 2^256 * |g1/2^384 - b2/d| + * = {definition of epsilon1} + * 2^-1 + epsilon1 + * + * Lemma 2: |c2 - k*(-b1)/d| < 2^-1 + epsilon2 + * + * |c2 - k*(-b1)/d| + * = + * |c2 - k*g2/2^384 + k*g2/2^384 - k*(-b1)/d| + * <= {triangle inequality} + * |c2 - k*g2/2^384| + |k*g2/2^384 - k*(-b1)/d| + * = + * |c2 - k*g2/2^384| + k*|g2/2^384 - (-b1)/d| + * < {rounding in c2 and 0 <= k < 2^256} + * 2^-1 + 2^256 * |g2/2^384 - (-b1)/d| + * = {definition of epsilon2} + * 2^-1 + epsilon2 + * + * Let + * - k1 = k - c1*a1 - c2*a2 + * - k2 = - c1*b1 - c2*b2 + * + * Lemma 3: |k1| < (a1 + a2 + 1)/2 < 2^128 + * + * |k1| + * = {definition of k1} + * |k - c1*a1 - c2*a2| + * = {(a1*b2 - b1*a2)/n = 1} + * |k*(a1*b2 - b1*a2)/n - c1*a1 - c2*a2| + * = + * |a1*(k*b2/n - c1) + a2*(k*(-b1)/n - c2)| + * <= {triangle inequality} + * a1*|k*b2/n - c1| + a2*|k*(-b1)/n - c2| + * < {Lemma 1 and Lemma 2} + * a1*(2^-1 + epslion1) + a2*(2^-1 + epsilon2) + * < {rounding up to an integer} + * (a1 + a2 + 1)/2 + * < {rounding up to a power of 2} + * 2^128 + * + * Lemma 4: |k2| < (-b1 + b2)/2 + 1 < 2^128 + * + * |k2| + * = {definition of k2} + * |- c1*a1 - c2*a2| + * = {(b1*b2 - b1*b2)/n = 0} + * |k*(b1*b2 - b1*b2)/n - c1*b1 - c2*b2| + * = + * |b1*(k*b2/n - c1) + b2*(k*(-b1)/n - c2)| + * <= {triangle inequality} + * (-b1)*|k*b2/n - c1| + b2*|k*(-b1)/n - c2| + * < {Lemma 1 and Lemma 2} + * (-b1)*(2^-1 + epslion1) + b2*(2^-1 + epsilon2) + * < {rounding up to an integer} + * (-b1 + b2)/2 + 1 + * < {rounding up to a power of 2} + * 2^128 + * + * Let + * - r2 = k2 mod n + * - r1 = k - r2*lambda mod n. + * + * Notice that r1 is defined such that r1 + r2 * lambda == k (mod n). + * + * Lemma 5: r1 == k1 mod n. + * + * r1 + * == {definition of r1 and r2} + * k - k2*lambda + * == {definition of k2} + * k - (- c1*b1 - c2*b2)*lambda + * == + * k + c1*b1*lambda + c2*b2*lambda + * == {a1 + b1*lambda == 0 mod n and a2 + b2*lambda == 0 mod n} + * k - c1*a1 - c2*a2 + * == {definition of k1} + * k1 + * + * From Lemma 3, Lemma 4, Lemma 5 and the definition of r2, we can conclude that + * + * - either r1 < 2^128 or -r1 mod n < 2^128 + * - either r2 < 2^128 or -r2 mod n < 2^128. + * + * Q.E.D. + */ +static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k) { + secp256k1_scalar s; + unsigned char buf1[32]; + unsigned char buf2[32]; + + /* (a1 + a2 + 1)/2 is 0xa2a8918ca85bafe22016d0b917e4dd77 */ + static const unsigned char k1_bound[32] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xa2, 0xa8, 0x91, 0x8c, 0xa8, 0x5b, 0xaf, 0xe2, 0x20, 0x16, 0xd0, 0xb9, 0x17, 0xe4, 0xdd, 0x77 + }; + + /* (-b1 + b2)/2 + 1 is 0x8a65287bd47179fb2be08846cea267ed */ + static const unsigned char k2_bound[32] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x8a, 0x65, 0x28, 0x7b, 0xd4, 0x71, 0x79, 0xfb, 0x2b, 0xe0, 0x88, 0x46, 0xce, 0xa2, 0x67, 0xed + }; + + secp256k1_scalar_mul(&s, &secp256k1_const_lambda, r2); + secp256k1_scalar_add(&s, &s, r1); + VERIFY_CHECK(secp256k1_scalar_eq(&s, k)); + + secp256k1_scalar_negate(&s, r1); + secp256k1_scalar_get_b32(buf1, r1); + secp256k1_scalar_get_b32(buf2, &s); + VERIFY_CHECK(secp256k1_memcmp_var(buf1, k1_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k1_bound, 32) < 0); + + secp256k1_scalar_negate(&s, r2); + secp256k1_scalar_get_b32(buf1, r2); + secp256k1_scalar_get_b32(buf2, &s); + VERIFY_CHECK(secp256k1_memcmp_var(buf1, k2_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k2_bound, 32) < 0); +} +#endif /* VERIFY */ +#endif /* !defined(EXHAUSTIVE_TEST_ORDER) */ #endif /* SECP256K1_SCALAR_IMPL_H */ diff --git a/src/secp256k1/src/scalar_low_impl.h b/src/secp256k1/src/scalar_low_impl.h index b79cf1ff6c..a615ec074b 100644 --- a/src/secp256k1/src/scalar_low_impl.h +++ b/src/secp256k1/src/scalar_low_impl.h @@ -48,14 +48,17 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int } static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { - const int base = 0x100 % EXHAUSTIVE_TEST_ORDER; int i; + int over = 0; *r = 0; for (i = 0; i < 32; i++) { - *r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER; + *r = (*r * 0x100) + b32[i]; + if (*r >= EXHAUSTIVE_TEST_ORDER) { + over = 1; + *r %= EXHAUSTIVE_TEST_ORDER; + } } - /* just deny overflow, it basically always happens */ - if (overflow) *overflow = 0; + if (overflow) *overflow = over; } static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { diff --git a/src/secp256k1/src/scratch_impl.h b/src/secp256k1/src/scratch_impl.h index b205620224..f381e2e322 100644 --- a/src/secp256k1/src/scratch_impl.h +++ b/src/secp256k1/src/scratch_impl.h @@ -26,7 +26,7 @@ static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* err static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback, secp256k1_scratch* scratch) { if (scratch != NULL) { VERIFY_CHECK(scratch->alloc_size == 0); /* all checkpoints should be applied */ - if (memcmp(scratch->magic, "scratch", 8) != 0) { + if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) { secp256k1_callback_call(error_callback, "invalid scratch space"); return; } @@ -36,7 +36,7 @@ static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback, } static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch) { - if (memcmp(scratch->magic, "scratch", 8) != 0) { + if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) { secp256k1_callback_call(error_callback, "invalid scratch space"); return 0; } @@ -44,7 +44,7 @@ static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callb } static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t checkpoint) { - if (memcmp(scratch->magic, "scratch", 8) != 0) { + if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) { secp256k1_callback_call(error_callback, "invalid scratch space"); return; } @@ -56,7 +56,7 @@ static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_c } static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch, size_t objects) { - if (memcmp(scratch->magic, "scratch", 8) != 0) { + if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) { secp256k1_callback_call(error_callback, "invalid scratch space"); return 0; } @@ -81,7 +81,7 @@ static void *secp256k1_scratch_alloc(const secp256k1_callback* error_callback, s } size = rounded_size; - if (memcmp(scratch->magic, "scratch", 8) != 0) { + if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) { secp256k1_callback_call(error_callback, "invalid scratch space"); return NULL; } diff --git a/src/secp256k1/src/secp256k1.c b/src/secp256k1/src/secp256k1.c index eaafb3a21d..dae506d08c 100644 --- a/src/secp256k1/src/secp256k1.c +++ b/src/secp256k1/src/secp256k1.c @@ -284,6 +284,9 @@ int secp256k1_ec_pubkey_parse(const secp256k1_context* ctx, secp256k1_pubkey* pu if (!secp256k1_eckey_pubkey_parse(&Q, input, inputlen)) { return 0; } + if (!secp256k1_ge_is_in_correct_subgroup(&Q)) { + return 0; + } secp256k1_pubkey_save(pubkey, &Q); secp256k1_ge_clear(&Q); return 1; diff --git a/src/secp256k1/src/selftest.h b/src/secp256k1/src/selftest.h index 885983aa20..0e37510c1e 100644 --- a/src/secp256k1/src/selftest.h +++ b/src/secp256k1/src/selftest.h @@ -22,7 +22,7 @@ static int secp256k1_selftest_sha256(void) { secp256k1_sha256_initialize(&hasher); secp256k1_sha256_write(&hasher, (const unsigned char*)input63, 63); secp256k1_sha256_finalize(&hasher, out); - return memcmp(out, output32, 32) == 0; + return secp256k1_memcmp_var(out, output32, 32) == 0; } static int secp256k1_selftest(void) { diff --git a/src/secp256k1/src/testrand.h b/src/secp256k1/src/testrand.h index bcbe15a6f1..a76003d5b8 100644 --- a/src/secp256k1/src/testrand.h +++ b/src/secp256k1/src/testrand.h @@ -14,28 +14,34 @@ /* A non-cryptographic RNG used only for test infrastructure. */ /** Seed the pseudorandom number generator for testing. */ -SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16); +SECP256K1_INLINE static void secp256k1_testrand_seed(const unsigned char *seed16); /** Generate a pseudorandom number in the range [0..2**32-1]. */ -static uint32_t secp256k1_rand32(void); +static uint32_t secp256k1_testrand32(void); /** Generate a pseudorandom number in the range [0..2**bits-1]. Bits must be 1 or * more. */ -static uint32_t secp256k1_rand_bits(int bits); +static uint32_t secp256k1_testrand_bits(int bits); /** Generate a pseudorandom number in the range [0..range-1]. */ -static uint32_t secp256k1_rand_int(uint32_t range); +static uint32_t secp256k1_testrand_int(uint32_t range); /** Generate a pseudorandom 32-byte array. */ -static void secp256k1_rand256(unsigned char *b32); +static void secp256k1_testrand256(unsigned char *b32); /** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */ -static void secp256k1_rand256_test(unsigned char *b32); +static void secp256k1_testrand256_test(unsigned char *b32); /** Generate pseudorandom bytes with long sequences of zero and one bits. */ -static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len); +static void secp256k1_testrand_bytes_test(unsigned char *bytes, size_t len); /** Flip a single random bit in a byte array */ -static void secp256k1_rand_flip(unsigned char *b, size_t len); +static void secp256k1_testrand_flip(unsigned char *b, size_t len); + +/** Initialize the test RNG using (hex encoded) array up to 16 bytes, or randomly if hexseed is NULL. */ +static void secp256k1_testrand_init(const char* hexseed); + +/** Print final test information. */ +static void secp256k1_testrand_finish(void); #endif /* SECP256K1_TESTRAND_H */ diff --git a/src/secp256k1/src/testrand_impl.h b/src/secp256k1/src/testrand_impl.h index dfb658d9c6..3392566329 100644 --- a/src/secp256k1/src/testrand_impl.h +++ b/src/secp256k1/src/testrand_impl.h @@ -8,6 +8,7 @@ #define SECP256K1_TESTRAND_IMPL_H #include <stdint.h> +#include <stdio.h> #include <string.h> #include "testrand.h" @@ -19,11 +20,11 @@ static int secp256k1_test_rng_precomputed_used = 8; static uint64_t secp256k1_test_rng_integer; static int secp256k1_test_rng_integer_bits_left = 0; -SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16) { +SECP256K1_INLINE static void secp256k1_testrand_seed(const unsigned char *seed16) { secp256k1_rfc6979_hmac_sha256_initialize(&secp256k1_test_rng, seed16, 16); } -SECP256K1_INLINE static uint32_t secp256k1_rand32(void) { +SECP256K1_INLINE static uint32_t secp256k1_testrand32(void) { if (secp256k1_test_rng_precomputed_used == 8) { secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, (unsigned char*)(&secp256k1_test_rng_precomputed[0]), sizeof(secp256k1_test_rng_precomputed)); secp256k1_test_rng_precomputed_used = 0; @@ -31,10 +32,10 @@ SECP256K1_INLINE static uint32_t secp256k1_rand32(void) { return secp256k1_test_rng_precomputed[secp256k1_test_rng_precomputed_used++]; } -static uint32_t secp256k1_rand_bits(int bits) { +static uint32_t secp256k1_testrand_bits(int bits) { uint32_t ret; if (secp256k1_test_rng_integer_bits_left < bits) { - secp256k1_test_rng_integer |= (((uint64_t)secp256k1_rand32()) << secp256k1_test_rng_integer_bits_left); + secp256k1_test_rng_integer |= (((uint64_t)secp256k1_testrand32()) << secp256k1_test_rng_integer_bits_left); secp256k1_test_rng_integer_bits_left += 32; } ret = secp256k1_test_rng_integer; @@ -44,7 +45,7 @@ static uint32_t secp256k1_rand_bits(int bits) { return ret; } -static uint32_t secp256k1_rand_int(uint32_t range) { +static uint32_t secp256k1_testrand_int(uint32_t range) { /* We want a uniform integer between 0 and range-1, inclusive. * B is the smallest number such that range <= 2**B. * two mechanisms implemented here: @@ -76,25 +77,25 @@ static uint32_t secp256k1_rand_int(uint32_t range) { mult = 1; } while(1) { - uint32_t x = secp256k1_rand_bits(bits); + uint32_t x = secp256k1_testrand_bits(bits); if (x < trange) { return (mult == 1) ? x : (x % range); } } } -static void secp256k1_rand256(unsigned char *b32) { +static void secp256k1_testrand256(unsigned char *b32) { secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, b32, 32); } -static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len) { +static void secp256k1_testrand_bytes_test(unsigned char *bytes, size_t len) { size_t bits = 0; memset(bytes, 0, len); while (bits < len * 8) { int now; uint32_t val; - now = 1 + (secp256k1_rand_bits(6) * secp256k1_rand_bits(5) + 16) / 31; - val = secp256k1_rand_bits(1); + now = 1 + (secp256k1_testrand_bits(6) * secp256k1_testrand_bits(5) + 16) / 31; + val = secp256k1_testrand_bits(1); while (now > 0 && bits < len * 8) { bytes[bits / 8] |= val << (bits % 8); now--; @@ -103,12 +104,55 @@ static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len) { } } -static void secp256k1_rand256_test(unsigned char *b32) { - secp256k1_rand_bytes_test(b32, 32); +static void secp256k1_testrand256_test(unsigned char *b32) { + secp256k1_testrand_bytes_test(b32, 32); } -static void secp256k1_rand_flip(unsigned char *b, size_t len) { - b[secp256k1_rand_int(len)] ^= (1 << secp256k1_rand_int(8)); +static void secp256k1_testrand_flip(unsigned char *b, size_t len) { + b[secp256k1_testrand_int(len)] ^= (1 << secp256k1_testrand_int(8)); +} + +static void secp256k1_testrand_init(const char* hexseed) { + unsigned char seed16[16] = {0}; + if (hexseed && strlen(hexseed) != 0) { + int pos = 0; + while (pos < 16 && hexseed[0] != 0 && hexseed[1] != 0) { + unsigned short sh; + if ((sscanf(hexseed, "%2hx", &sh)) == 1) { + seed16[pos] = sh; + } else { + break; + } + hexseed += 2; + pos++; + } + } else { + FILE *frand = fopen("/dev/urandom", "r"); + if ((frand == NULL) || fread(&seed16, 1, sizeof(seed16), frand) != sizeof(seed16)) { + uint64_t t = time(NULL) * (uint64_t)1337; + fprintf(stderr, "WARNING: could not read 16 bytes from /dev/urandom; falling back to insecure PRNG\n"); + seed16[0] ^= t; + seed16[1] ^= t >> 8; + seed16[2] ^= t >> 16; + seed16[3] ^= t >> 24; + seed16[4] ^= t >> 32; + seed16[5] ^= t >> 40; + seed16[6] ^= t >> 48; + seed16[7] ^= t >> 56; + } + if (frand) { + fclose(frand); + } + } + + printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]); + secp256k1_testrand_seed(seed16); +} + +static void secp256k1_testrand_finish(void) { + unsigned char run32[32]; + secp256k1_testrand256(run32); + printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]); } #endif /* SECP256K1_TESTRAND_IMPL_H */ diff --git a/src/secp256k1/src/tests.c b/src/secp256k1/src/tests.c index 4780e9319b..bb4b5b4c07 100644 --- a/src/secp256k1/src/tests.c +++ b/src/secp256k1/src/tests.c @@ -54,7 +54,7 @@ static void uncounting_illegal_callback_fn(const char* str, void* data) { void random_field_element_test(secp256k1_fe *fe) { do { unsigned char b32[32]; - secp256k1_rand256_test(b32); + secp256k1_testrand256_test(b32); if (secp256k1_fe_set_b32(fe, b32)) { break; } @@ -63,7 +63,7 @@ void random_field_element_test(secp256k1_fe *fe) { void random_field_element_magnitude(secp256k1_fe *fe) { secp256k1_fe zero; - int n = secp256k1_rand_int(9); + int n = secp256k1_testrand_int(9); secp256k1_fe_normalize(fe); if (n == 0) { return; @@ -81,11 +81,12 @@ void random_group_element_test(secp256k1_ge *ge) { secp256k1_fe fe; do { random_field_element_test(&fe); - if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_rand_bits(1))) { + if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_testrand_bits(1))) { secp256k1_fe_normalize(&ge->y); break; } } while(1); + ge->infinity = 0; } void random_group_element_jacobian_test(secp256k1_gej *gej, const secp256k1_ge *ge) { @@ -107,7 +108,7 @@ void random_scalar_order_test(secp256k1_scalar *num) { do { unsigned char b32[32]; int overflow = 0; - secp256k1_rand256_test(b32); + secp256k1_testrand256_test(b32); secp256k1_scalar_set_b32(num, b32, &overflow); if (overflow || secp256k1_scalar_is_zero(num)) { continue; @@ -120,7 +121,7 @@ void random_scalar_order(secp256k1_scalar *num) { do { unsigned char b32[32]; int overflow = 0; - secp256k1_rand256(b32); + secp256k1_testrand256(b32); secp256k1_scalar_set_b32(num, b32, &overflow); if (overflow || secp256k1_scalar_is_zero(num)) { continue; @@ -441,14 +442,14 @@ void run_sha256_tests(void) { secp256k1_sha256_initialize(&hasher); secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); secp256k1_sha256_finalize(&hasher, out); - CHECK(memcmp(out, outputs[i], 32) == 0); + CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0); if (strlen(inputs[i]) > 0) { - int split = secp256k1_rand_int(strlen(inputs[i])); + int split = secp256k1_testrand_int(strlen(inputs[i])); secp256k1_sha256_initialize(&hasher); secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); secp256k1_sha256_finalize(&hasher, out); - CHECK(memcmp(out, outputs[i], 32) == 0); + CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0); } } } @@ -485,14 +486,14 @@ void run_hmac_sha256_tests(void) { secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); secp256k1_hmac_sha256_finalize(&hasher, out); - CHECK(memcmp(out, outputs[i], 32) == 0); + CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0); if (strlen(inputs[i]) > 0) { - int split = secp256k1_rand_int(strlen(inputs[i])); + int split = secp256k1_testrand_int(strlen(inputs[i])); secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); secp256k1_hmac_sha256_finalize(&hasher, out); - CHECK(memcmp(out, outputs[i], 32) == 0); + CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0); } } } @@ -519,21 +520,21 @@ void run_rfc6979_hmac_sha256_tests(void) { secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 64); for (i = 0; i < 3; i++) { secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(memcmp(out, out1[i], 32) == 0); + CHECK(secp256k1_memcmp_var(out, out1[i], 32) == 0); } secp256k1_rfc6979_hmac_sha256_finalize(&rng); secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 65); for (i = 0; i < 3; i++) { secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(memcmp(out, out1[i], 32) != 0); + CHECK(secp256k1_memcmp_var(out, out1[i], 32) != 0); } secp256k1_rfc6979_hmac_sha256_finalize(&rng); secp256k1_rfc6979_hmac_sha256_initialize(&rng, key2, 64); for (i = 0; i < 3; i++) { secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(memcmp(out, out2[i], 32) == 0); + CHECK(secp256k1_memcmp_var(out, out2[i], 32) == 0); } secp256k1_rfc6979_hmac_sha256_finalize(&rng); } @@ -557,7 +558,7 @@ void test_rand_bits(int rand32, int bits) { /* Multiply the output of all rand calls with the odd number m, which should not change the uniformity of its distribution. */ for (i = 0; i < rounds[usebits]; i++) { - uint32_t r = (rand32 ? secp256k1_rand32() : secp256k1_rand_bits(bits)); + uint32_t r = (rand32 ? secp256k1_testrand32() : secp256k1_testrand_bits(bits)); CHECK((((uint64_t)r) >> bits) == 0); for (m = 0; m < sizeof(mults) / sizeof(mults[0]); m++) { uint32_t rm = r * mults[m]; @@ -582,7 +583,7 @@ void test_rand_int(uint32_t range, uint32_t subrange) { uint64_t x = 0; CHECK((range % subrange) == 0); for (i = 0; i < rounds; i++) { - uint32_t r = secp256k1_rand_int(range); + uint32_t r = secp256k1_testrand_int(range); CHECK(r < range); r = r % subrange; x |= (((uint64_t)1) << r); @@ -614,7 +615,7 @@ void run_rand_int(void) { #ifndef USE_NUM_NONE void random_num_negate(secp256k1_num *num) { - if (secp256k1_rand_bits(1)) { + if (secp256k1_testrand_bits(1)) { secp256k1_num_negate(num); } } @@ -658,11 +659,11 @@ void test_num_add_sub(void) { secp256k1_num n2; secp256k1_num n1p2, n2p1, n1m2, n2m1; random_num_order_test(&n1); /* n1 = R1 */ - if (secp256k1_rand_bits(1)) { + if (secp256k1_testrand_bits(1)) { random_num_negate(&n1); } random_num_order_test(&n2); /* n2 = R2 */ - if (secp256k1_rand_bits(1)) { + if (secp256k1_testrand_bits(1)) { random_num_negate(&n2); } secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = R1 + R2 */ @@ -853,7 +854,7 @@ void scalar_test(void) { while (i < 256) { secp256k1_scalar t; int j; - int now = secp256k1_rand_int(15) + 1; + int now = secp256k1_testrand_int(15) + 1; if (now + i > 256) { now = 256 - i; } @@ -930,7 +931,7 @@ void scalar_test(void) { secp256k1_num rnum; secp256k1_num rnum2; unsigned char cone[1] = {0x01}; - unsigned int shift = 256 + secp256k1_rand_int(257); + unsigned int shift = 256 + secp256k1_testrand_int(257); secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift); secp256k1_num_mul(&rnum, &s1num, &s2num); secp256k1_num_shift(&rnum, shift - 1); @@ -948,7 +949,7 @@ void scalar_test(void) { random_scalar_order_test(&r); for (i = 0; i < 100; ++i) { int low; - int shift = 1 + secp256k1_rand_int(15); + int shift = 1 + secp256k1_testrand_int(15); int expected = r.d[0] % (1 << shift); low = secp256k1_scalar_shr_int(&r, shift); CHECK(expected == low); @@ -996,7 +997,7 @@ void scalar_test(void) { secp256k1_scalar b; int i; /* Test add_bit. */ - int bit = secp256k1_rand_bits(8); + int bit = secp256k1_testrand_bits(8); secp256k1_scalar_set_int(&b, 1); CHECK(secp256k1_scalar_is_one(&b)); for (i = 0; i < bit; i++) { @@ -1157,7 +1158,7 @@ void run_scalar_tests(void) { secp256k1_scalar_set_b32(&scalar, bin, &overflow); CHECK(overflow == 0); secp256k1_scalar_get_b32(bin_tmp, &scalar); - CHECK(memcmp(bin, bin_tmp, 32) == 0); + CHECK(secp256k1_memcmp_var(bin, bin_tmp, 32) == 0); /* A scalar set to all 1s should overflow. */ memset(bin, 0xFF, 32); @@ -1767,7 +1768,7 @@ void run_scalar_tests(void) { void random_fe(secp256k1_fe *x) { unsigned char bin[32]; do { - secp256k1_rand256(bin); + secp256k1_testrand256(bin); if (secp256k1_fe_set_b32(x, bin)) { return; } @@ -1777,7 +1778,7 @@ void random_fe(secp256k1_fe *x) { void random_fe_test(secp256k1_fe *x) { unsigned char bin[32]; do { - secp256k1_rand256_test(bin); + secp256k1_testrand256_test(bin); if (secp256k1_fe_set_b32(x, bin)) { return; } @@ -1845,18 +1846,18 @@ void run_field_convert(void) { CHECK(secp256k1_fe_equal_var(&fe, &fe2)); /* Check conversion from fe. */ secp256k1_fe_get_b32(b322, &fe); - CHECK(memcmp(b322, b32, 32) == 0); + CHECK(secp256k1_memcmp_var(b322, b32, 32) == 0); secp256k1_fe_to_storage(&fes2, &fe); - CHECK(memcmp(&fes2, &fes, sizeof(fes)) == 0); + CHECK(secp256k1_memcmp_var(&fes2, &fes, sizeof(fes)) == 0); } -int fe_memcmp(const secp256k1_fe *a, const secp256k1_fe *b) { +int fe_secp256k1_memcmp_var(const secp256k1_fe *a, const secp256k1_fe *b) { secp256k1_fe t = *b; #ifdef VERIFY t.magnitude = a->magnitude; t.normalized = a->normalized; #endif - return memcmp(a, &t, sizeof(secp256k1_fe)); + return secp256k1_memcmp_var(a, &t, sizeof(secp256k1_fe)); } void run_field_misc(void) { @@ -1882,13 +1883,13 @@ void run_field_misc(void) { CHECK(x.normalized && x.magnitude == 1); #endif secp256k1_fe_cmov(&x, &x, 1); - CHECK(fe_memcmp(&x, &z) != 0); - CHECK(fe_memcmp(&x, &q) == 0); + CHECK(fe_secp256k1_memcmp_var(&x, &z) != 0); + CHECK(fe_secp256k1_memcmp_var(&x, &q) == 0); secp256k1_fe_cmov(&q, &z, 1); #ifdef VERIFY CHECK(!q.normalized && q.magnitude == z.magnitude); #endif - CHECK(fe_memcmp(&q, &z) == 0); + CHECK(fe_secp256k1_memcmp_var(&q, &z) == 0); secp256k1_fe_normalize_var(&x); secp256k1_fe_normalize_var(&z); CHECK(!secp256k1_fe_equal_var(&x, &z)); @@ -1912,9 +1913,9 @@ void run_field_misc(void) { secp256k1_fe_to_storage(&zs, &z); secp256k1_fe_storage_cmov(&zs, &xs, 0); secp256k1_fe_storage_cmov(&zs, &zs, 1); - CHECK(memcmp(&xs, &zs, sizeof(xs)) != 0); + CHECK(secp256k1_memcmp_var(&xs, &zs, sizeof(xs)) != 0); secp256k1_fe_storage_cmov(&ys, &xs, 1); - CHECK(memcmp(&xs, &ys, sizeof(xs)) == 0); + CHECK(secp256k1_memcmp_var(&xs, &ys, sizeof(xs)) == 0); secp256k1_fe_from_storage(&x, &xs); secp256k1_fe_from_storage(&y, &ys); secp256k1_fe_from_storage(&z, &zs); @@ -1970,7 +1971,7 @@ void run_field_inv_all_var(void) { secp256k1_fe_inv_all_var(xi, x, 0); for (i = 0; i < count; i++) { size_t j; - size_t len = secp256k1_rand_int(15) + 1; + size_t len = secp256k1_testrand_int(15) + 1; for (j = 0; j < len; j++) { random_fe_non_zero(&x[j]); } @@ -2101,17 +2102,12 @@ void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) { void test_ge(void) { int i, i1; -#ifdef USE_ENDOMORPHISM int runs = 6; -#else - int runs = 4; -#endif - /* Points: (infinity, p1, p1, -p1, -p1, p2, p2, -p2, -p2, p3, p3, -p3, -p3, p4, p4, -p4, -p4). - * The second in each pair of identical points uses a random Z coordinate in the Jacobian form. - * All magnitudes are randomized. - * All 17*17 combinations of points are added to each other, using all applicable methods. - * - * When the endomorphism code is compiled in, p5 = lambda*p1 and p6 = lambda^2*p1 are added as well. + /* 25 points are used: + * - infinity + * - for each of four random points p1 p2 p3 p4, we add the point, its + * negation, and then those two again but with randomized Z coordinate. + * - The same is then done for lambda*p1 and lambda^2*p1. */ secp256k1_ge *ge = (secp256k1_ge *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_ge) * (1 + 4 * runs)); secp256k1_gej *gej = (secp256k1_gej *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_gej) * (1 + 4 * runs)); @@ -2126,14 +2122,12 @@ void test_ge(void) { int j; secp256k1_ge g; random_group_element_test(&g); -#ifdef USE_ENDOMORPHISM if (i >= runs - 2) { secp256k1_ge_mul_lambda(&g, &ge[1]); } if (i >= runs - 1) { secp256k1_ge_mul_lambda(&g, &g); } -#endif ge[1 + 4 * i] = g; ge[2 + 4 * i] = g; secp256k1_ge_neg(&ge[3 + 4 * i], &g); @@ -2262,7 +2256,7 @@ void test_ge(void) { gej_shuffled[i] = gej[i]; } for (i = 0; i < 4 * runs + 1; i++) { - int swap = i + secp256k1_rand_int(4 * runs + 1 - i); + int swap = i + secp256k1_testrand_int(4 * runs + 1 - i); if (swap != i) { secp256k1_gej t = gej_shuffled[i]; gej_shuffled[i] = gej_shuffled[swap]; @@ -2448,7 +2442,7 @@ void test_ec_combine(void) { secp256k1_ge_set_gej(&Q, &Qj); secp256k1_pubkey_save(&sd, &Q); CHECK(secp256k1_ec_pubkey_combine(ctx, &sd2, d, i) == 1); - CHECK(memcmp(&sd, &sd2, sizeof(sd)) == 0); + CHECK(secp256k1_memcmp_var(&sd, &sd2, sizeof(sd)) == 0); } } @@ -2614,7 +2608,6 @@ void test_point_times_order(const secp256k1_gej *point) { secp256k1_ecmult(&ctx->ecmult_ctx, &res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */ secp256k1_gej_add_var(&res1, &res1, &res2, NULL); CHECK(secp256k1_gej_is_infinity(&res1)); - CHECK(secp256k1_gej_is_valid_var(&res1) == 0); secp256k1_ge_set_gej(&res3, &res1); CHECK(secp256k1_ge_is_infinity(&res3)); CHECK(secp256k1_ge_is_valid_var(&res3) == 0); @@ -2633,6 +2626,87 @@ void test_point_times_order(const secp256k1_gej *point) { ge_equals_ge(&res3, &secp256k1_ge_const_g); } +/* These scalars reach large (in absolute value) outputs when fed to secp256k1_scalar_split_lambda. + * + * They are computed as: + * - For a in [-2, -1, 0, 1, 2]: + * - For b in [-3, -1, 1, 3]: + * - Output (a*LAMBDA + (ORDER+b)/2) % ORDER + */ +static const secp256k1_scalar scalars_near_split_bounds[20] = { + SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fc), + SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fd), + SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fe), + SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6ff), + SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf7632d), + SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf7632e), + SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf7632f), + SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf76330), + SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b209f), + SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b20a0), + SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b20a1), + SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b20a2), + SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede11), + SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede12), + SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede13), + SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede14), + SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a42), + SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a43), + SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a44), + SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a45) +}; + +void test_ecmult_target(const secp256k1_scalar* target, int mode) { + /* Mode: 0=ecmult_gen, 1=ecmult, 2=ecmult_const */ + secp256k1_scalar n1, n2; + secp256k1_ge p; + secp256k1_gej pj, p1j, p2j, ptj; + static const secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); + + /* Generate random n1,n2 such that n1+n2 = -target. */ + random_scalar_order_test(&n1); + secp256k1_scalar_add(&n2, &n1, target); + secp256k1_scalar_negate(&n2, &n2); + + /* Generate a random input point. */ + if (mode != 0) { + random_group_element_test(&p); + secp256k1_gej_set_ge(&pj, &p); + } + + /* EC multiplications */ + if (mode == 0) { + secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &p1j, &n1); + secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &p2j, &n2); + secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &ptj, target); + } else if (mode == 1) { + secp256k1_ecmult(&ctx->ecmult_ctx, &p1j, &pj, &n1, &zero); + secp256k1_ecmult(&ctx->ecmult_ctx, &p2j, &pj, &n2, &zero); + secp256k1_ecmult(&ctx->ecmult_ctx, &ptj, &pj, target, &zero); + } else { + secp256k1_ecmult_const(&p1j, &p, &n1, 256); + secp256k1_ecmult_const(&p2j, &p, &n2, 256); + secp256k1_ecmult_const(&ptj, &p, target, 256); + } + + /* Add them all up: n1*P + n2*P + target*P = (n1+n2+target)*P = (n1+n1-n1-n2)*P = 0. */ + secp256k1_gej_add_var(&ptj, &ptj, &p1j, NULL); + secp256k1_gej_add_var(&ptj, &ptj, &p2j, NULL); + CHECK(secp256k1_gej_is_infinity(&ptj)); +} + +void run_ecmult_near_split_bound(void) { + int i; + unsigned j; + for (i = 0; i < 4*count; ++i) { + for (j = 0; j < sizeof(scalars_near_split_bounds) / sizeof(scalars_near_split_bounds[0]); ++j) { + test_ecmult_target(&scalars_near_split_bounds[j], 0); + test_ecmult_target(&scalars_near_split_bounds[j], 1); + test_ecmult_target(&scalars_near_split_bounds[j], 2); + } + } +} + void run_point_times_order(void) { int i; secp256k1_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2); @@ -2646,7 +2720,6 @@ void run_point_times_order(void) { secp256k1_gej j; CHECK(secp256k1_ge_is_valid_var(&p)); secp256k1_gej_set_ge(&j, &p); - CHECK(secp256k1_gej_is_valid_var(&j)); test_point_times_order(&j); } secp256k1_fe_sqr(&x, &x); @@ -3042,12 +3115,10 @@ void test_secp256k1_pippenger_bucket_window_inv(void) { CHECK(secp256k1_pippenger_bucket_window_inv(0) == 0); for(i = 1; i <= PIPPENGER_MAX_BUCKET_WINDOW; i++) { -#ifdef USE_ENDOMORPHISM /* Bucket_window of 8 is not used with endo */ if (i == 8) { continue; } -#endif CHECK(secp256k1_pippenger_bucket_window(secp256k1_pippenger_bucket_window_inv(i)) == i); if (i != PIPPENGER_MAX_BUCKET_WINDOW) { CHECK(secp256k1_pippenger_bucket_window(secp256k1_pippenger_bucket_window_inv(i)+1) > i); @@ -3060,7 +3131,7 @@ void test_secp256k1_pippenger_bucket_window_inv(void) { * for a given scratch space. */ void test_ecmult_multi_pippenger_max_points(void) { - size_t scratch_size = secp256k1_rand_int(256); + size_t scratch_size = secp256k1_testrand_int(256); size_t max_size = secp256k1_pippenger_scratch_size(secp256k1_pippenger_bucket_window_inv(PIPPENGER_MAX_BUCKET_WINDOW-1)+512, 12); secp256k1_scratch *scratch; size_t n_points_supported; @@ -3290,13 +3361,10 @@ void test_constant_wnaf(const secp256k1_scalar *number, int w) { secp256k1_scalar_set_int(&x, 0); secp256k1_scalar_set_int(&shift, 1 << w); - /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */ -#ifdef USE_ENDOMORPHISM for (i = 0; i < 16; ++i) { secp256k1_scalar_shr_int(&num, 8); } bits = 128; -#endif skew = secp256k1_wnaf_const(wnaf, &num, w, bits); for (i = WNAF_SIZE_BITS(bits, w); i >= 0; --i) { @@ -3331,12 +3399,9 @@ void test_fixed_wnaf(const secp256k1_scalar *number, int w) { secp256k1_scalar_set_int(&x, 0); secp256k1_scalar_set_int(&shift, 1 << w); - /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */ -#ifdef USE_ENDOMORPHISM for (i = 0; i < 16; ++i) { secp256k1_scalar_shr_int(&num, 8); } -#endif skew = secp256k1_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 0; --i) { @@ -3520,7 +3585,7 @@ void test_ecmult_gen_blind(void) { secp256k1_ge pge; random_scalar_order_test(&key); secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej, &key); - secp256k1_rand256(seed32); + secp256k1_testrand256(seed32); b = ctx->ecmult_gen_ctx.blind; i = ctx->ecmult_gen_ctx.initial; secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); @@ -3552,16 +3617,18 @@ void run_ecmult_gen_blind(void) { } } -#ifdef USE_ENDOMORPHISM /***** ENDOMORPHISH TESTS *****/ -void test_scalar_split(void) { - secp256k1_scalar full; - secp256k1_scalar s1, slam; +void test_scalar_split(const secp256k1_scalar* full) { + secp256k1_scalar s, s1, slam; const unsigned char zero[32] = {0}; unsigned char tmp[32]; - random_scalar_order_test(&full); - secp256k1_scalar_split_lambda(&s1, &slam, &full); + secp256k1_scalar_split_lambda(&s1, &slam, full); + + /* check slam*lambda + s1 == full */ + secp256k1_scalar_mul(&s, &secp256k1_const_lambda, &slam); + secp256k1_scalar_add(&s, &s, &s1); + CHECK(secp256k1_scalar_eq(&s, full)); /* check that both are <= 128 bits in size */ if (secp256k1_scalar_is_high(&s1)) { @@ -3572,15 +3639,32 @@ void test_scalar_split(void) { } secp256k1_scalar_get_b32(tmp, &s1); - CHECK(memcmp(zero, tmp, 16) == 0); + CHECK(secp256k1_memcmp_var(zero, tmp, 16) == 0); secp256k1_scalar_get_b32(tmp, &slam); - CHECK(memcmp(zero, tmp, 16) == 0); + CHECK(secp256k1_memcmp_var(zero, tmp, 16) == 0); } + void run_endomorphism_tests(void) { - test_scalar_split(); + unsigned i; + static secp256k1_scalar s; + test_scalar_split(&secp256k1_scalar_zero); + test_scalar_split(&secp256k1_scalar_one); + secp256k1_scalar_negate(&s,&secp256k1_scalar_one); + test_scalar_split(&s); + test_scalar_split(&secp256k1_const_lambda); + secp256k1_scalar_add(&s, &secp256k1_const_lambda, &secp256k1_scalar_one); + test_scalar_split(&s); + + for (i = 0; i < 100U * count; ++i) { + secp256k1_scalar full; + random_scalar_order_test(&full); + test_scalar_split(&full); + } + for (i = 0; i < sizeof(scalars_near_split_bounds) / sizeof(scalars_near_split_bounds[0]); ++i) { + test_scalar_split(&scalars_near_split_bounds[i]); + } } -#endif void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvalid) { unsigned char pubkeyc[65]; @@ -3622,7 +3706,7 @@ void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvali CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1); VG_CHECK(pubkeyo, outl); CHECK(outl == 33); - CHECK(memcmp(&pubkeyo[1], &pubkeyc[1], 32) == 0); + CHECK(secp256k1_memcmp_var(&pubkeyo[1], &pubkeyc[1], 32) == 0); CHECK((pubkeyclen != 33) || (pubkeyo[0] == pubkeyc[0])); if (ypass) { /* This test isn't always done because we decode with alternative signs, so the y won't match. */ @@ -3638,7 +3722,7 @@ void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvali VG_CHECK(pubkeyo, outl); CHECK(outl == 65); CHECK(pubkeyo[0] == 4); - CHECK(memcmp(&pubkeyo[1], input, 64) == 0); + CHECK(secp256k1_memcmp_var(&pubkeyo[1], input, 64) == 0); } CHECK(ecount == 0); } else { @@ -4007,7 +4091,7 @@ void run_eckey_edge_case_test(void) { VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, orderc) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); /* Maximum value is too large, reject. */ memset(ctmp, 255, 32); CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0); @@ -4015,7 +4099,7 @@ void run_eckey_edge_case_test(void) { VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); /* Zero is too small, reject. */ memset(ctmp, 0, 32); CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0); @@ -4023,7 +4107,7 @@ void run_eckey_edge_case_test(void) { VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); /* One must be accepted. */ ctmp[31] = 0x01; CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1); @@ -4031,7 +4115,7 @@ void run_eckey_edge_case_test(void) { VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); pubkey_one = pubkey; /* Group order + 1 is too large, reject. */ memcpy(ctmp, orderc, 32); @@ -4041,7 +4125,7 @@ void run_eckey_edge_case_test(void) { VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); /* -1 must be accepted. */ ctmp[31] = 0x40; CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1); @@ -4049,20 +4133,20 @@ void run_eckey_edge_case_test(void) { VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); pubkey_negone = pubkey; /* Tweak of zero leaves the value unchanged. */ memset(ctmp2, 0, 32); CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 1); - CHECK(memcmp(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40); + CHECK(secp256k1_memcmp_var(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40); memcpy(&pubkey2, &pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); - CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Multiply tweak of zero zeroizes the output. */ CHECK(secp256k1_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0); - CHECK(memcmp(zeros, ctmp, 32) == 0); + CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0); CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, ctmp2) == 0); - CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* If seckey_tweak_add or seckey_tweak_mul are called with an overflowing seckey, the seckey is zeroized. */ @@ -4072,29 +4156,29 @@ void run_eckey_edge_case_test(void) { CHECK(secp256k1_ec_seckey_verify(ctx, ctmp2) == 1); CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0); CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 0); - CHECK(memcmp(zeros, ctmp, 32) == 0); + CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); CHECK(secp256k1_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0); - CHECK(memcmp(zeros, ctmp, 32) == 0); + CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0); /* If seckey_tweak_add or seckey_tweak_mul are called with an overflowing tweak, the seckey is zeroized. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp, orderc) == 0); - CHECK(memcmp(zeros, ctmp, 32) == 0); + CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; CHECK(secp256k1_ec_seckey_tweak_mul(ctx, ctmp, orderc) == 0); - CHECK(memcmp(zeros, ctmp, 32) == 0); + CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; /* If pubkey_tweak_add or pubkey_tweak_mul are called with an overflowing tweak, the pubkey is zeroized. */ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, orderc) == 0); - CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, orderc) == 0); - CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* If the resulting key in secp256k1_ec_seckey_tweak_add and * secp256k1_ec_pubkey_tweak_add is 0 the functions fail and in the latter @@ -4104,25 +4188,25 @@ void run_eckey_edge_case_test(void) { memset(ctmp2, 0, 32); ctmp2[31] = 1; CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 0); - CHECK(memcmp(zeros, ctmp2, 32) == 0); + CHECK(secp256k1_memcmp_var(zeros, ctmp2, 32) == 0); ctmp2[31] = 1; CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); - CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* Tweak computation wraps and results in a key of 1. */ ctmp2[31] = 2; CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 1); - CHECK(memcmp(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1); + CHECK(secp256k1_memcmp_var(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1); ctmp2[31] = 2; CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); ctmp2[31] = 1; CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, ctmp2) == 1); - CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Tweak mul * 2 = 1+1. */ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); ctmp2[31] = 2; CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 1); - CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Test argument errors. */ ecount = 0; secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); @@ -4131,12 +4215,12 @@ void run_eckey_edge_case_test(void) { memset(&pubkey, 0, 32); CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); CHECK(ecount == 1); - CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); memset(&pubkey2, 0, 32); CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 0); CHECK(ecount == 2); - CHECK(memcmp(&pubkey2, zeros, sizeof(pubkey2)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey2, zeros, sizeof(pubkey2)) == 0); /* Plain argument errors. */ ecount = 0; CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1); @@ -4176,7 +4260,7 @@ void run_eckey_edge_case_test(void) { memset(&pubkey, 1, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, NULL) == 0); CHECK(ecount == 2); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); /* secp256k1_ec_pubkey_combine tests. */ ecount = 0; pubkeys[0] = &pubkey_one; @@ -4187,28 +4271,28 @@ void run_eckey_edge_case_test(void) { VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 0) == 0); VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); CHECK(ecount == 1); CHECK(secp256k1_ec_pubkey_combine(ctx, NULL, pubkeys, 1) == 0); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); CHECK(ecount == 2); memset(&pubkey, 255, sizeof(secp256k1_pubkey)); VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, NULL, 1) == 0); VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); CHECK(ecount == 3); pubkeys[0] = &pubkey_negone; memset(&pubkey, 255, sizeof(secp256k1_pubkey)); VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 1) == 1); VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); CHECK(ecount == 3); len = 33; CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1); - CHECK(memcmp(ctmp, ctmp2, 33) == 0); + CHECK(secp256k1_memcmp_var(ctmp, ctmp2, 33) == 0); /* Result is infinity. */ pubkeys[0] = &pubkey_one; pubkeys[1] = &pubkey_negone; @@ -4216,7 +4300,7 @@ void run_eckey_edge_case_test(void) { VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 0); VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); CHECK(ecount == 3); /* Passes through infinity but comes out one. */ pubkeys[2] = &pubkey_one; @@ -4224,19 +4308,19 @@ void run_eckey_edge_case_test(void) { VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 3) == 1); VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); CHECK(ecount == 3); len = 33; CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1); - CHECK(memcmp(ctmp, ctmp2, 33) == 0); + CHECK(secp256k1_memcmp_var(ctmp, ctmp2, 33) == 0); /* Adds to two. */ pubkeys[1] = &pubkey_one; memset(&pubkey, 255, sizeof(secp256k1_pubkey)); VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 1); VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); + CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); CHECK(ecount == 3); secp256k1_context_set_illegal_callback(ctx, NULL, NULL); } @@ -4250,21 +4334,21 @@ void run_eckey_negate_test(void) { /* Verify negation changes the key and changes it back */ CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 1); - CHECK(memcmp(seckey, seckey_tmp, 32) != 0); + CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) != 0); CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 1); - CHECK(memcmp(seckey, seckey_tmp, 32) == 0); + CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Check that privkey alias gives same result */ CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 1); CHECK(secp256k1_ec_privkey_negate(ctx, seckey_tmp) == 1); - CHECK(memcmp(seckey, seckey_tmp, 32) == 0); + CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Negating all 0s fails */ memset(seckey, 0, 32); memset(seckey_tmp, 0, 32); CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 0); /* Check that seckey is not modified */ - CHECK(memcmp(seckey, seckey_tmp, 32) == 0); + CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Negating an overflowing seckey fails and the seckey is zeroed. In this * test, the seckey has 16 random bytes to ensure that ec_seckey_negate @@ -4273,7 +4357,7 @@ void run_eckey_negate_test(void) { memset(seckey, 0xFF, 16); memset(seckey_tmp, 0, 32); CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 0); - CHECK(memcmp(seckey, seckey_tmp, 32) == 0); + CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0); } void random_sign(secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *key, const secp256k1_scalar *msg, int *recid) { @@ -4295,7 +4379,7 @@ void test_ecdsa_sign_verify(void) { random_scalar_order_test(&key); secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key); secp256k1_ge_set_gej(&pub, &pubj); - getrec = secp256k1_rand_bits(1); + getrec = secp256k1_testrand_bits(1); random_sign(&sigr, &sigs, &key, &msg, getrec?&recid:NULL); if (getrec) { CHECK(recid >= 0 && recid < 4); @@ -4362,7 +4446,7 @@ static int nonce_function_test_retry(unsigned char *nonce32, const unsigned char int is_empty_signature(const secp256k1_ecdsa_signature *sig) { static const unsigned char res[sizeof(secp256k1_ecdsa_signature)] = {0}; - return memcmp(sig, res, sizeof(secp256k1_ecdsa_signature)) == 0; + return secp256k1_memcmp_var(sig, res, sizeof(secp256k1_ecdsa_signature)) == 0; } void test_ecdsa_end_to_end(void) { @@ -4395,31 +4479,31 @@ void test_ecdsa_end_to_end(void) { CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); /* Verify exporting and importing public key. */ - CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, secp256k1_rand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED)); + CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, secp256k1_testrand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED)); memset(&pubkey, 0, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); /* Verify negation changes the key and changes it back */ memcpy(&pubkey_tmp, &pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); - CHECK(memcmp(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0); + CHECK(secp256k1_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0); CHECK(secp256k1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); - CHECK(memcmp(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0); /* Verify private key import and export. */ - CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, secp256k1_rand_bits(1) == 1)); + CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, secp256k1_testrand_bits(1) == 1)); CHECK(ec_privkey_import_der(ctx, privkey2, seckey, seckeylen) == 1); - CHECK(memcmp(privkey, privkey2, 32) == 0); + CHECK(secp256k1_memcmp_var(privkey, privkey2, 32) == 0); /* Optionally tweak the keys using addition. */ - if (secp256k1_rand_int(3) == 0) { + if (secp256k1_testrand_int(3) == 0) { int ret1; int ret2; int ret3; unsigned char rnd[32]; unsigned char privkey_tmp[32]; secp256k1_pubkey pubkey2; - secp256k1_rand256_test(rnd); + secp256k1_testrand256_test(rnd); memcpy(privkey_tmp, privkey, 32); ret1 = secp256k1_ec_seckey_tweak_add(ctx, privkey, rnd); ret2 = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, rnd); @@ -4430,20 +4514,20 @@ void test_ecdsa_end_to_end(void) { if (ret1 == 0) { return; } - CHECK(memcmp(privkey, privkey_tmp, 32) == 0); + CHECK(secp256k1_memcmp_var(privkey, privkey_tmp, 32) == 0); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); - CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); } /* Optionally tweak the keys using multiplication. */ - if (secp256k1_rand_int(3) == 0) { + if (secp256k1_testrand_int(3) == 0) { int ret1; int ret2; int ret3; unsigned char rnd[32]; unsigned char privkey_tmp[32]; secp256k1_pubkey pubkey2; - secp256k1_rand256_test(rnd); + secp256k1_testrand256_test(rnd); memcpy(privkey_tmp, privkey, 32); ret1 = secp256k1_ec_seckey_tweak_mul(ctx, privkey, rnd); ret2 = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, rnd); @@ -4454,9 +4538,9 @@ void test_ecdsa_end_to_end(void) { if (ret1 == 0) { return; } - CHECK(memcmp(privkey, privkey_tmp, 32) == 0); + CHECK(secp256k1_memcmp_var(privkey, privkey_tmp, 32) == 0); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); - CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); + CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); } /* Sign. */ @@ -4468,13 +4552,13 @@ void test_ecdsa_end_to_end(void) { extra[31] = 0; extra[0] = 1; CHECK(secp256k1_ecdsa_sign(ctx, &signature[3], message, privkey, NULL, extra) == 1); - CHECK(memcmp(&signature[0], &signature[4], sizeof(signature[0])) == 0); - CHECK(memcmp(&signature[0], &signature[1], sizeof(signature[0])) != 0); - CHECK(memcmp(&signature[0], &signature[2], sizeof(signature[0])) != 0); - CHECK(memcmp(&signature[0], &signature[3], sizeof(signature[0])) != 0); - CHECK(memcmp(&signature[1], &signature[2], sizeof(signature[0])) != 0); - CHECK(memcmp(&signature[1], &signature[3], sizeof(signature[0])) != 0); - CHECK(memcmp(&signature[2], &signature[3], sizeof(signature[0])) != 0); + CHECK(secp256k1_memcmp_var(&signature[0], &signature[4], sizeof(signature[0])) == 0); + CHECK(secp256k1_memcmp_var(&signature[0], &signature[1], sizeof(signature[0])) != 0); + CHECK(secp256k1_memcmp_var(&signature[0], &signature[2], sizeof(signature[0])) != 0); + CHECK(secp256k1_memcmp_var(&signature[0], &signature[3], sizeof(signature[0])) != 0); + CHECK(secp256k1_memcmp_var(&signature[1], &signature[2], sizeof(signature[0])) != 0); + CHECK(secp256k1_memcmp_var(&signature[1], &signature[3], sizeof(signature[0])) != 0); + CHECK(secp256k1_memcmp_var(&signature[2], &signature[3], sizeof(signature[0])) != 0); /* Verify. */ CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); CHECK(secp256k1_ecdsa_verify(ctx, &signature[1], message, &pubkey) == 1); @@ -4495,7 +4579,7 @@ void test_ecdsa_end_to_end(void) { secp256k1_ecdsa_signature_save(&signature[5], &r, &s); CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5])); CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); - CHECK(memcmp(&signature[5], &signature[0], 64) == 0); + CHECK(secp256k1_memcmp_var(&signature[5], &signature[0], 64) == 0); /* Serialize/parse DER and verify again */ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); @@ -4505,7 +4589,7 @@ void test_ecdsa_end_to_end(void) { /* Serialize/destroy/parse DER and verify again. */ siglen = 74; CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); - sig[secp256k1_rand_int(siglen)] += 1 + secp256k1_rand_int(255); + sig[secp256k1_testrand_int(siglen)] += 1 + secp256k1_testrand_int(255); CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 0 || secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 0); } @@ -4515,23 +4599,23 @@ void test_random_pubkeys(void) { secp256k1_ge elem2; unsigned char in[65]; /* Generate some randomly sized pubkeys. */ - size_t len = secp256k1_rand_bits(2) == 0 ? 65 : 33; - if (secp256k1_rand_bits(2) == 0) { - len = secp256k1_rand_bits(6); + size_t len = secp256k1_testrand_bits(2) == 0 ? 65 : 33; + if (secp256k1_testrand_bits(2) == 0) { + len = secp256k1_testrand_bits(6); } if (len == 65) { - in[0] = secp256k1_rand_bits(1) ? 4 : (secp256k1_rand_bits(1) ? 6 : 7); + in[0] = secp256k1_testrand_bits(1) ? 4 : (secp256k1_testrand_bits(1) ? 6 : 7); } else { - in[0] = secp256k1_rand_bits(1) ? 2 : 3; + in[0] = secp256k1_testrand_bits(1) ? 2 : 3; } - if (secp256k1_rand_bits(3) == 0) { - in[0] = secp256k1_rand_bits(8); + if (secp256k1_testrand_bits(3) == 0) { + in[0] = secp256k1_testrand_bits(8); } if (len > 1) { - secp256k1_rand256(&in[1]); + secp256k1_testrand256(&in[1]); } if (len > 33) { - secp256k1_rand256(&in[33]); + secp256k1_testrand256(&in[33]); } if (secp256k1_eckey_pubkey_parse(&elem, in, len)) { unsigned char out[65]; @@ -4542,7 +4626,7 @@ void test_random_pubkeys(void) { /* If the pubkey can be parsed, it should round-trip... */ CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, len == 33)); CHECK(size == len); - CHECK(memcmp(&in[1], &out[1], len-1) == 0); + CHECK(secp256k1_memcmp_var(&in[1], &out[1], len-1) == 0); /* ... except for the type of hybrid inputs. */ if ((in[0] != 6) && (in[0] != 7)) { CHECK(in[0] == out[0]); @@ -4553,7 +4637,7 @@ void test_random_pubkeys(void) { CHECK(secp256k1_eckey_pubkey_parse(&elem2, in, size)); ge_equals_ge(&elem,&elem2); /* Check that the X9.62 hybrid type is checked. */ - in[0] = secp256k1_rand_bits(1) ? 6 : 7; + in[0] = secp256k1_testrand_bits(1) ? 6 : 7; res = secp256k1_eckey_pubkey_parse(&elem2, in, size); if (firstb == 2 || firstb == 3) { if (in[0] == firstb + 4) { @@ -4565,7 +4649,7 @@ void test_random_pubkeys(void) { if (res) { ge_equals_ge(&elem,&elem2); CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, 0)); - CHECK(memcmp(&in[1], &out[1], 64) == 0); + CHECK(secp256k1_memcmp_var(&in[1], &out[1], 64) == 0); } } } @@ -4621,21 +4705,21 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_ parsed_der = secp256k1_ecdsa_signature_parse_der(ctx, &sig_der, sig, siglen); if (parsed_der) { ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der, &sig_der)) << 0; - valid_der = (memcmp(compact_der, zeroes, 32) != 0) && (memcmp(compact_der + 32, zeroes, 32) != 0); + valid_der = (secp256k1_memcmp_var(compact_der, zeroes, 32) != 0) && (secp256k1_memcmp_var(compact_der + 32, zeroes, 32) != 0); } if (valid_der) { ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der, &len_der, &sig_der)) << 1; - roundtrips_der = (len_der == siglen) && memcmp(roundtrip_der, sig, siglen) == 0; + roundtrips_der = (len_der == siglen) && secp256k1_memcmp_var(roundtrip_der, sig, siglen) == 0; } parsed_der_lax = ecdsa_signature_parse_der_lax(ctx, &sig_der_lax, sig, siglen); if (parsed_der_lax) { ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der_lax, &sig_der_lax)) << 10; - valid_der_lax = (memcmp(compact_der_lax, zeroes, 32) != 0) && (memcmp(compact_der_lax + 32, zeroes, 32) != 0); + valid_der_lax = (secp256k1_memcmp_var(compact_der_lax, zeroes, 32) != 0) && (secp256k1_memcmp_var(compact_der_lax + 32, zeroes, 32) != 0); } if (valid_der_lax) { ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11; - roundtrips_der_lax = (len_der_lax == siglen) && memcmp(roundtrip_der_lax, sig, siglen) == 0; + roundtrips_der_lax = (len_der_lax == siglen) && secp256k1_memcmp_var(roundtrip_der_lax, sig, siglen) == 0; } if (certainly_der) { @@ -4651,7 +4735,7 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_ if (valid_der) { ret |= (!roundtrips_der_lax) << 12; ret |= (len_der != len_der_lax) << 13; - ret |= ((len_der != len_der_lax) || (memcmp(roundtrip_der_lax, roundtrip_der, len_der) != 0)) << 14; + ret |= ((len_der != len_der_lax) || (secp256k1_memcmp_var(roundtrip_der_lax, roundtrip_der, len_der) != 0)) << 14; } ret |= (roundtrips_der != roundtrips_der_lax) << 15; if (parsed_der) { @@ -4668,19 +4752,19 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_ if (valid_openssl) { unsigned char tmp[32] = {0}; BN_bn2bin(r, tmp + 32 - BN_num_bytes(r)); - valid_openssl = memcmp(tmp, max_scalar, 32) < 0; + valid_openssl = secp256k1_memcmp_var(tmp, max_scalar, 32) < 0; } if (valid_openssl) { unsigned char tmp[32] = {0}; BN_bn2bin(s, tmp + 32 - BN_num_bytes(s)); - valid_openssl = memcmp(tmp, max_scalar, 32) < 0; + valid_openssl = secp256k1_memcmp_var(tmp, max_scalar, 32) < 0; } } len_openssl = i2d_ECDSA_SIG(sig_openssl, NULL); if (len_openssl <= 2048) { unsigned char *ptr = roundtrip_openssl; CHECK(i2d_ECDSA_SIG(sig_openssl, &ptr) == len_openssl); - roundtrips_openssl = valid_openssl && ((size_t)len_openssl == siglen) && (memcmp(roundtrip_openssl, sig, siglen) == 0); + roundtrips_openssl = valid_openssl && ((size_t)len_openssl == siglen) && (secp256k1_memcmp_var(roundtrip_openssl, sig, siglen) == 0); } else { len_openssl = 0; } @@ -4692,7 +4776,7 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_ ret |= (roundtrips_der != roundtrips_openssl) << 7; if (roundtrips_openssl) { ret |= (len_der != (size_t)len_openssl) << 8; - ret |= ((len_der != (size_t)len_openssl) || (memcmp(roundtrip_der, roundtrip_openssl, len_der) != 0)) << 9; + ret |= ((len_der != (size_t)len_openssl) || (secp256k1_memcmp_var(roundtrip_der, roundtrip_openssl, len_der) != 0)) << 9; } #endif return ret; @@ -4712,27 +4796,27 @@ static void assign_big_endian(unsigned char *ptr, size_t ptrlen, uint32_t val) { static void damage_array(unsigned char *sig, size_t *len) { int pos; - int action = secp256k1_rand_bits(3); + int action = secp256k1_testrand_bits(3); if (action < 1 && *len > 3) { /* Delete a byte. */ - pos = secp256k1_rand_int(*len); + pos = secp256k1_testrand_int(*len); memmove(sig + pos, sig + pos + 1, *len - pos - 1); (*len)--; return; } else if (action < 2 && *len < 2048) { /* Insert a byte. */ - pos = secp256k1_rand_int(1 + *len); + pos = secp256k1_testrand_int(1 + *len); memmove(sig + pos + 1, sig + pos, *len - pos); - sig[pos] = secp256k1_rand_bits(8); + sig[pos] = secp256k1_testrand_bits(8); (*len)++; return; } else if (action < 4) { /* Modify a byte. */ - sig[secp256k1_rand_int(*len)] += 1 + secp256k1_rand_int(255); + sig[secp256k1_testrand_int(*len)] += 1 + secp256k1_testrand_int(255); return; } else { /* action < 8 */ /* Modify a bit. */ - sig[secp256k1_rand_int(*len)] ^= 1 << secp256k1_rand_bits(3); + sig[secp256k1_testrand_int(*len)] ^= 1 << secp256k1_testrand_bits(3); return; } } @@ -4745,23 +4829,23 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly int n; *len = 0; - der = secp256k1_rand_bits(2) == 0; + der = secp256k1_testrand_bits(2) == 0; *certainly_der = der; *certainly_not_der = 0; - indet = der ? 0 : secp256k1_rand_int(10) == 0; + indet = der ? 0 : secp256k1_testrand_int(10) == 0; for (n = 0; n < 2; n++) { /* We generate two classes of numbers: nlow==1 "low" ones (up to 32 bytes), nlow==0 "high" ones (32 bytes with 129 top bits set, or larger than 32 bytes) */ - nlow[n] = der ? 1 : (secp256k1_rand_bits(3) != 0); + nlow[n] = der ? 1 : (secp256k1_testrand_bits(3) != 0); /* The length of the number in bytes (the first byte of which will always be nonzero) */ - nlen[n] = nlow[n] ? secp256k1_rand_int(33) : 32 + secp256k1_rand_int(200) * secp256k1_rand_int(8) / 8; + nlen[n] = nlow[n] ? secp256k1_testrand_int(33) : 32 + secp256k1_testrand_int(200) * secp256k1_testrand_int(8) / 8; CHECK(nlen[n] <= 232); /* The top bit of the number. */ - nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : secp256k1_rand_bits(1)); + nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : secp256k1_testrand_bits(1)); /* The top byte of the number (after the potential hardcoded 16 0xFF characters for "high" 32 bytes numbers) */ - nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + secp256k1_rand_bits(7) : 1 + secp256k1_rand_int(127)); + nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + secp256k1_testrand_bits(7) : 1 + secp256k1_testrand_int(127)); /* The number of zero bytes in front of the number (which is 0 or 1 in case of DER, otherwise we extend up to 300 bytes) */ - nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? secp256k1_rand_int(3) : secp256k1_rand_int(300 - nlen[n]) * secp256k1_rand_int(8) / 8); + nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? secp256k1_testrand_int(3) : secp256k1_testrand_int(300 - nlen[n]) * secp256k1_testrand_int(8) / 8); if (nzlen[n] > ((nlen[n] == 0 || nhbit[n]) ? 1 : 0)) { *certainly_not_der = 1; } @@ -4770,7 +4854,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly nlenlen[n] = nlen[n] + nzlen[n] < 128 ? 0 : (nlen[n] + nzlen[n] < 256 ? 1 : 2); if (!der) { /* nlenlen[n] max 127 bytes */ - int add = secp256k1_rand_int(127 - nlenlen[n]) * secp256k1_rand_int(16) * secp256k1_rand_int(16) / 256; + int add = secp256k1_testrand_int(127 - nlenlen[n]) * secp256k1_testrand_int(16) * secp256k1_testrand_int(16) / 256; nlenlen[n] += add; if (add != 0) { *certainly_not_der = 1; @@ -4784,7 +4868,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen <= 856); /* The length of the garbage inside the tuple. */ - elen = (der || indet) ? 0 : secp256k1_rand_int(980 - tlen) * secp256k1_rand_int(8) / 8; + elen = (der || indet) ? 0 : secp256k1_testrand_int(980 - tlen) * secp256k1_testrand_int(8) / 8; if (elen != 0) { *certainly_not_der = 1; } @@ -4792,7 +4876,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen <= 980); /* The length of the garbage after the end of the tuple. */ - glen = der ? 0 : secp256k1_rand_int(990 - tlen) * secp256k1_rand_int(8) / 8; + glen = der ? 0 : secp256k1_testrand_int(990 - tlen) * secp256k1_testrand_int(8) / 8; if (glen != 0) { *certainly_not_der = 1; } @@ -4807,7 +4891,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly } else { int tlenlen = tlen < 128 ? 0 : (tlen < 256 ? 1 : 2); if (!der) { - int add = secp256k1_rand_int(127 - tlenlen) * secp256k1_rand_int(16) * secp256k1_rand_int(16) / 256; + int add = secp256k1_testrand_int(127 - tlenlen) * secp256k1_testrand_int(16) * secp256k1_testrand_int(16) / 256; tlenlen += add; if (add != 0) { *certainly_not_der = 1; @@ -4858,13 +4942,13 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly nlen[n]--; } /* Generate remaining random bytes of number */ - secp256k1_rand_bytes_test(sig + *len, nlen[n]); + secp256k1_testrand_bytes_test(sig + *len, nlen[n]); *len += nlen[n]; nlen[n] = 0; } /* Generate random garbage inside tuple. */ - secp256k1_rand_bytes_test(sig + *len, elen); + secp256k1_testrand_bytes_test(sig + *len, elen); *len += elen; /* Generate end-of-contents bytes. */ @@ -4876,7 +4960,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly CHECK(tlen + glen <= 1121); /* Generate random garbage outside tuple. */ - secp256k1_rand_bytes_test(sig + *len, glen); + secp256k1_testrand_bytes_test(sig + *len, glen); *len += glen; tlen += glen; CHECK(tlen <= 1121); @@ -5208,11 +5292,11 @@ void test_ecdsa_edge_cases(void) { CHECK(!is_empty_signature(&sig)); CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, nonce_function_rfc6979, extra) == 1); CHECK(!is_empty_signature(&sig2)); - CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0); + CHECK(secp256k1_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); /* The default nonce function is deterministic. */ CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); - CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0); + CHECK(secp256k1_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); /* The default nonce function changes output with different messages. */ for(i = 0; i < 256; i++) { int j; @@ -5259,12 +5343,12 @@ void test_ecdsa_edge_cases(void) { VG_CHECK(nonce3,32); CHECK(nonce_function_rfc6979(nonce4, zeros, zeros, zeros, (void *)zeros, 0) == 1); VG_CHECK(nonce4,32); - CHECK(memcmp(nonce, nonce2, 32) != 0); - CHECK(memcmp(nonce, nonce3, 32) != 0); - CHECK(memcmp(nonce, nonce4, 32) != 0); - CHECK(memcmp(nonce2, nonce3, 32) != 0); - CHECK(memcmp(nonce2, nonce4, 32) != 0); - CHECK(memcmp(nonce3, nonce4, 32) != 0); + CHECK(secp256k1_memcmp_var(nonce, nonce2, 32) != 0); + CHECK(secp256k1_memcmp_var(nonce, nonce3, 32) != 0); + CHECK(secp256k1_memcmp_var(nonce, nonce4, 32) != 0); + CHECK(secp256k1_memcmp_var(nonce2, nonce3, 32) != 0); + CHECK(secp256k1_memcmp_var(nonce2, nonce4, 32) != 0); + CHECK(secp256k1_memcmp_var(nonce3, nonce4, 32) != 0); } @@ -5293,7 +5377,7 @@ EC_KEY *get_openssl_key(const unsigned char *key32) { unsigned char privkey[300]; size_t privkeylen; const unsigned char* pbegin = privkey; - int compr = secp256k1_rand_bits(1); + int compr = secp256k1_testrand_bits(1); EC_KEY *ec_key = EC_KEY_new_by_curve_name(NID_secp256k1); CHECK(ec_privkey_export_der(ctx, privkey, &privkeylen, key32, compr)); CHECK(d2i_ECPrivateKey(&ec_key, &pbegin, privkeylen)); @@ -5314,7 +5398,7 @@ void test_ecdsa_openssl(void) { unsigned char message[32]; unsigned char signature[80]; unsigned char key32[32]; - secp256k1_rand256_test(message); + secp256k1_testrand256_test(message); secp256k1_scalar_set_b32(&msg, message, NULL); random_scalar_order_test(&key); secp256k1_scalar_get_b32(key32, &key); @@ -5367,12 +5451,12 @@ void run_memczero_test(void) { /* memczero(..., ..., 0) is a noop. */ memcpy(buf2, buf1, sizeof(buf1)); memczero(buf1, sizeof(buf1), 0); - CHECK(memcmp(buf1, buf2, sizeof(buf1)) == 0); + CHECK(secp256k1_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); /* memczero(..., ..., 1) zeros the buffer. */ memset(buf2, 0, sizeof(buf2)); memczero(buf1, sizeof(buf1) , 1); - CHECK(memcmp(buf1, buf2, sizeof(buf1)) == 0); + CHECK(secp256k1_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); } void int_cmov_test(void) { @@ -5411,23 +5495,23 @@ void fe_cmov_test(void) { secp256k1_fe a = zero; secp256k1_fe_cmov(&r, &a, 0); - CHECK(memcmp(&r, &max, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; secp256k1_fe_cmov(&r, &a, 1); - CHECK(memcmp(&r, &max, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; secp256k1_fe_cmov(&r, &a, 1); - CHECK(memcmp(&r, &zero, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; secp256k1_fe_cmov(&r, &a, 1); - CHECK(memcmp(&r, &one, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; secp256k1_fe_cmov(&r, &a, 0); - CHECK(memcmp(&r, &one, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); } void fe_storage_cmov_test(void) { @@ -5441,23 +5525,23 @@ void fe_storage_cmov_test(void) { secp256k1_fe_storage a = zero; secp256k1_fe_storage_cmov(&r, &a, 0); - CHECK(memcmp(&r, &max, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; secp256k1_fe_storage_cmov(&r, &a, 1); - CHECK(memcmp(&r, &max, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; secp256k1_fe_storage_cmov(&r, &a, 1); - CHECK(memcmp(&r, &zero, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; secp256k1_fe_storage_cmov(&r, &a, 1); - CHECK(memcmp(&r, &one, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; secp256k1_fe_storage_cmov(&r, &a, 0); - CHECK(memcmp(&r, &one, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); } void scalar_cmov_test(void) { @@ -5471,23 +5555,23 @@ void scalar_cmov_test(void) { secp256k1_scalar a = zero; secp256k1_scalar_cmov(&r, &a, 0); - CHECK(memcmp(&r, &max, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; secp256k1_scalar_cmov(&r, &a, 1); - CHECK(memcmp(&r, &max, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; secp256k1_scalar_cmov(&r, &a, 1); - CHECK(memcmp(&r, &zero, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; secp256k1_scalar_cmov(&r, &a, 1); - CHECK(memcmp(&r, &one, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; secp256k1_scalar_cmov(&r, &a, 0); - CHECK(memcmp(&r, &one, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); } void ge_storage_cmov_test(void) { @@ -5503,23 +5587,23 @@ void ge_storage_cmov_test(void) { secp256k1_ge_storage a = zero; secp256k1_ge_storage_cmov(&r, &a, 0); - CHECK(memcmp(&r, &max, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; secp256k1_ge_storage_cmov(&r, &a, 1); - CHECK(memcmp(&r, &max, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; secp256k1_ge_storage_cmov(&r, &a, 1); - CHECK(memcmp(&r, &zero, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; secp256k1_ge_storage_cmov(&r, &a, 1); - CHECK(memcmp(&r, &one, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; secp256k1_ge_storage_cmov(&r, &a, 0); - CHECK(memcmp(&r, &one, sizeof(r)) == 0); + CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); } void run_cmov_tests(void) { @@ -5531,9 +5615,6 @@ void run_cmov_tests(void) { } int main(int argc, char **argv) { - unsigned char seed16[16] = {0}; - unsigned char run32[32] = {0}; - /* Disable buffering for stdout to improve reliability of getting * diagnostic information. Happens right at the start of main because * setbuf must be used before any other operation on the stream. */ @@ -5546,52 +5627,20 @@ int main(int argc, char **argv) { if (argc > 1) { count = strtol(argv[1], NULL, 0); } + printf("test count = %i\n", count); /* find random seed */ - if (argc > 2) { - int pos = 0; - const char* ch = argv[2]; - while (pos < 16 && ch[0] != 0 && ch[1] != 0) { - unsigned short sh; - if ((sscanf(ch, "%2hx", &sh)) == 1) { - seed16[pos] = sh; - } else { - break; - } - ch += 2; - pos++; - } - } else { - FILE *frand = fopen("/dev/urandom", "r"); - if ((frand == NULL) || fread(&seed16, 1, sizeof(seed16), frand) != sizeof(seed16)) { - uint64_t t = time(NULL) * (uint64_t)1337; - fprintf(stderr, "WARNING: could not read 16 bytes from /dev/urandom; falling back to insecure PRNG\n"); - seed16[0] ^= t; - seed16[1] ^= t >> 8; - seed16[2] ^= t >> 16; - seed16[3] ^= t >> 24; - seed16[4] ^= t >> 32; - seed16[5] ^= t >> 40; - seed16[6] ^= t >> 48; - seed16[7] ^= t >> 56; - } - if (frand) { - fclose(frand); - } - } - secp256k1_rand_seed(seed16); - - printf("test count = %i\n", count); - printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]); + secp256k1_testrand_init(argc > 2 ? argv[2] : NULL); /* initialize */ run_context_tests(0); run_context_tests(1); run_scratch_tests(); ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - if (secp256k1_rand_bits(1)) { - secp256k1_rand256(run32); - CHECK(secp256k1_context_randomize(ctx, secp256k1_rand_bits(1) ? run32 : NULL)); + if (secp256k1_testrand_bits(1)) { + unsigned char rand32[32]; + secp256k1_testrand256(rand32); + CHECK(secp256k1_context_randomize(ctx, secp256k1_testrand_bits(1) ? rand32 : NULL)); } run_rand_bits(); @@ -5625,6 +5674,7 @@ int main(int argc, char **argv) { /* ecmult tests */ run_wnaf(); run_point_times_order(); + run_ecmult_near_split_bound(); run_ecmult_chain(); run_ecmult_constants(); run_ecmult_gen_blind(); @@ -5633,9 +5683,7 @@ int main(int argc, char **argv) { run_ec_combine(); /* endomorphism tests */ -#ifdef USE_ENDOMORPHISM run_endomorphism_tests(); -#endif /* EC point parser test */ run_ec_pubkey_parse_test(); @@ -5679,8 +5727,7 @@ int main(int argc, char **argv) { run_cmov_tests(); - secp256k1_rand256(run32); - printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]); + secp256k1_testrand_finish(); /* shutdown */ secp256k1_context_destroy(ctx); diff --git a/src/secp256k1/src/tests_exhaustive.c b/src/secp256k1/src/tests_exhaustive.c index 681ed80bd0..f4d5b8e176 100644 --- a/src/secp256k1/src/tests_exhaustive.c +++ b/src/secp256k1/src/tests_exhaustive.c @@ -18,7 +18,6 @@ #ifndef EXHAUSTIVE_TEST_ORDER /* see group_impl.h for allowable values */ #define EXHAUSTIVE_TEST_ORDER 13 -#define EXHAUSTIVE_TEST_LAMBDA 9 /* cube root of 1 mod 13 */ #endif #include "include/secp256k1.h" @@ -27,10 +26,7 @@ #include "secp256k1.c" #include "testrand_impl.h" -#ifdef ENABLE_MODULE_RECOVERY -#include "src/modules/recovery/main_impl.h" -#include "include/secp256k1_recovery.h" -#endif +static int count = 2; /** stolen from tests.c */ void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) { @@ -62,7 +58,7 @@ void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) { void random_fe(secp256k1_fe *x) { unsigned char bin[32]; do { - secp256k1_rand256(bin); + secp256k1_testrand256(bin); if (secp256k1_fe_set_b32(x, bin)) { return; } @@ -70,6 +66,15 @@ void random_fe(secp256k1_fe *x) { } /** END stolen from tests.c */ +static uint32_t num_cores = 1; +static uint32_t this_core = 0; + +SECP256K1_INLINE static int skip_section(uint64_t* iter) { + if (num_cores == 1) return 0; + *iter += 0xe7037ed1a0b428dbULL; + return ((((uint32_t)*iter ^ (*iter >> 32)) * num_cores) >> 32) != this_core; +} + int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int attempt) { @@ -90,91 +95,93 @@ int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned cha return 1; } -#ifdef USE_ENDOMORPHISM -void test_exhaustive_endomorphism(const secp256k1_ge *group, int order) { +void test_exhaustive_endomorphism(const secp256k1_ge *group) { int i; - for (i = 0; i < order; i++) { + for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { secp256k1_ge res; secp256k1_ge_mul_lambda(&res, &group[i]); ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res); } } -#endif -void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) { +void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj) { int i, j; + uint64_t iter = 0; /* Sanity-check (and check infinity functions) */ CHECK(secp256k1_ge_is_infinity(&group[0])); CHECK(secp256k1_gej_is_infinity(&groupj[0])); - for (i = 1; i < order; i++) { + for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { CHECK(!secp256k1_ge_is_infinity(&group[i])); CHECK(!secp256k1_gej_is_infinity(&groupj[i])); } /* Check all addition formulae */ - for (j = 0; j < order; j++) { + for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { secp256k1_fe fe_inv; + if (skip_section(&iter)) continue; secp256k1_fe_inv(&fe_inv, &groupj[j].z); - for (i = 0; i < order; i++) { + for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { secp256k1_ge zless_gej; secp256k1_gej tmp; /* add_var */ secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); - ge_equals_gej(&group[(i + j) % order], &tmp); + ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); /* add_ge */ if (j > 0) { secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]); - ge_equals_gej(&group[(i + j) % order], &tmp); + ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); } /* add_ge_var */ secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); - ge_equals_gej(&group[(i + j) % order], &tmp); + ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); /* add_zinv_var */ zless_gej.infinity = groupj[j].infinity; zless_gej.x = groupj[j].x; zless_gej.y = groupj[j].y; secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); - ge_equals_gej(&group[(i + j) % order], &tmp); + ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); } } /* Check doubling */ - for (i = 0; i < order; i++) { + for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { secp256k1_gej tmp; secp256k1_gej_double(&tmp, &groupj[i]); - ge_equals_gej(&group[(2 * i) % order], &tmp); + ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp); secp256k1_gej_double_var(&tmp, &groupj[i], NULL); - ge_equals_gej(&group[(2 * i) % order], &tmp); + ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp); } /* Check negation */ - for (i = 1; i < order; i++) { + for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { secp256k1_ge tmp; secp256k1_gej tmpj; secp256k1_ge_neg(&tmp, &group[i]); - ge_equals_ge(&group[order - i], &tmp); + ge_equals_ge(&group[EXHAUSTIVE_TEST_ORDER - i], &tmp); secp256k1_gej_neg(&tmpj, &groupj[i]); - ge_equals_gej(&group[order - i], &tmpj); + ge_equals_gej(&group[EXHAUSTIVE_TEST_ORDER - i], &tmpj); } } -void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj, int order) { +void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj) { int i, j, r_log; - for (r_log = 1; r_log < order; r_log++) { - for (j = 0; j < order; j++) { - for (i = 0; i < order; i++) { + uint64_t iter = 0; + for (r_log = 1; r_log < EXHAUSTIVE_TEST_ORDER; r_log++) { + for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { + if (skip_section(&iter)) continue; + for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { secp256k1_gej tmp; secp256k1_scalar na, ng; secp256k1_scalar_set_int(&na, i); secp256k1_scalar_set_int(&ng, j); secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng); - ge_equals_gej(&group[(i * r_log + j) % order], &tmp); + ge_equals_gej(&group[(i * r_log + j) % EXHAUSTIVE_TEST_ORDER], &tmp); if (i > 0) { secp256k1_ecmult_const(&tmp, &group[i], &ng, 256); - ge_equals_gej(&group[(i * j) % order], &tmp); + ge_equals_gej(&group[(i * j) % EXHAUSTIVE_TEST_ORDER], &tmp); } } } @@ -193,14 +200,16 @@ static int ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t return 1; } -void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { +void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ge *group) { int i, j, k, x, y; + uint64_t iter = 0; secp256k1_scratch *scratch = secp256k1_scratch_create(&ctx->error_callback, 4096); - for (i = 0; i < order; i++) { - for (j = 0; j < order; j++) { - for (k = 0; k < order; k++) { - for (x = 0; x < order; x++) { - for (y = 0; y < order; y++) { + for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { + for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { + for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { + for (x = 0; x < EXHAUSTIVE_TEST_ORDER; x++) { + if (skip_section(&iter)) continue; + for (y = 0; y < EXHAUSTIVE_TEST_ORDER; y++) { secp256k1_gej tmp; secp256k1_scalar g_sc; ecmult_multi_data data; @@ -212,7 +221,7 @@ void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ data.pt[1] = group[y]; secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2); - ge_equals_gej(&group[(i * x + j * y + k) % order], &tmp); + ge_equals_gej(&group[(i * x + j * y + k) % EXHAUSTIVE_TEST_ORDER], &tmp); } } } @@ -221,22 +230,23 @@ void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ secp256k1_scratch_destroy(&ctx->error_callback, scratch); } -void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) { +void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k, int* overflow) { secp256k1_fe x; unsigned char x_bin[32]; k %= EXHAUSTIVE_TEST_ORDER; x = group[k].x; secp256k1_fe_normalize(&x); secp256k1_fe_get_b32(x_bin, &x); - secp256k1_scalar_set_b32(r, x_bin, NULL); + secp256k1_scalar_set_b32(r, x_bin, overflow); } -void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { +void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group) { int s, r, msg, key; - for (s = 1; s < order; s++) { - for (r = 1; r < order; r++) { - for (msg = 1; msg < order; msg++) { - for (key = 1; key < order; key++) { + uint64_t iter = 0; + for (s = 1; s < EXHAUSTIVE_TEST_ORDER; s++) { + for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) { + for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) { + for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) { secp256k1_ge nonconst_ge; secp256k1_ecdsa_signature sig; secp256k1_pubkey pk; @@ -245,6 +255,8 @@ void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *gr int k, should_verify; unsigned char msg32[32]; + if (skip_section(&iter)) continue; + secp256k1_scalar_set_int(&s_s, s); secp256k1_scalar_set_int(&r_s, r); secp256k1_scalar_set_int(&msg_s, msg); @@ -254,9 +266,9 @@ void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *gr /* Run through every k value that gives us this r and check that *one* works. * Note there could be none, there could be multiple, ECDSA is weird. */ should_verify = 0; - for (k = 0; k < order; k++) { + for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { secp256k1_scalar check_x_s; - r_from_k(&check_x_s, group, k); + r_from_k(&check_x_s, group, k, NULL); if (r_s == check_x_s) { secp256k1_scalar_set_int(&s_times_k_s, k); secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); @@ -281,13 +293,15 @@ void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *gr } } -void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { +void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group) { int i, j, k; + uint64_t iter = 0; /* Loop */ - for (i = 1; i < order; i++) { /* message */ - for (j = 1; j < order; j++) { /* key */ - for (k = 1; k < order; k++) { /* nonce */ + for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { /* message */ + for (j = 1; j < EXHAUSTIVE_TEST_ORDER; j++) { /* key */ + if (skip_section(&iter)) continue; + for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */ const int starting_k = k; secp256k1_ecdsa_signature sig; secp256k1_scalar sk, msg, r, s, expected_r; @@ -303,10 +317,10 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou /* Note that we compute expected_r *after* signing -- this is important * because our nonce-computing function function might change k during * signing. */ - r_from_k(&expected_r, group, k); + r_from_k(&expected_r, group, k, NULL); CHECK(r == expected_r); - CHECK((k * s) % order == (i + r * j) % order || - (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); + CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER || + (k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER); /* Overflow means we've tried every possible nonce */ if (k < starting_k) { @@ -327,184 +341,114 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou } #ifdef ENABLE_MODULE_RECOVERY -void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { - int i, j, k; - - /* Loop */ - for (i = 1; i < order; i++) { /* message */ - for (j = 1; j < order; j++) { /* key */ - for (k = 1; k < order; k++) { /* nonce */ - const int starting_k = k; - secp256k1_fe r_dot_y_normalized; - secp256k1_ecdsa_recoverable_signature rsig; - secp256k1_ecdsa_signature sig; - secp256k1_scalar sk, msg, r, s, expected_r; - unsigned char sk32[32], msg32[32]; - int expected_recid; - int recid; - secp256k1_scalar_set_int(&msg, i); - secp256k1_scalar_set_int(&sk, j); - secp256k1_scalar_get_b32(sk32, &sk); - secp256k1_scalar_get_b32(msg32, &msg); - - secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k); +#include "src/modules/recovery/tests_exhaustive_impl.h" +#endif - /* Check directly */ - secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig); - r_from_k(&expected_r, group, k); - CHECK(r == expected_r); - CHECK((k * s) % order == (i + r * j) % order || - (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); - /* In computing the recid, there is an overflow condition that is disabled in - * scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value - * will exceed the group order, and our signing code always holds out for r - * values that don't overflow, so with a proper overflow check the tests would - * loop indefinitely. */ - r_dot_y_normalized = group[k].y; - secp256k1_fe_normalize(&r_dot_y_normalized); - /* Also the recovery id is flipped depending if we hit the low-s branch */ - if ((k * s) % order == (i + r * j) % order) { - expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 1 : 0; - } else { - expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 0 : 1; - } - CHECK(recid == expected_recid); +#ifdef ENABLE_MODULE_EXTRAKEYS +#include "src/modules/extrakeys/tests_exhaustive_impl.h" +#endif - /* Convert to a standard sig then check */ - secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); - secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); - /* Note that we compute expected_r *after* signing -- this is important - * because our nonce-computing function function might change k during - * signing. */ - r_from_k(&expected_r, group, k); - CHECK(r == expected_r); - CHECK((k * s) % order == (i + r * j) % order || - (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); +#ifdef ENABLE_MODULE_SCHNORRSIG +#include "src/modules/schnorrsig/tests_exhaustive_impl.h" +#endif - /* Overflow means we've tried every possible nonce */ - if (k < starting_k) { - break; - } - } +int main(int argc, char** argv) { + int i; + secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER]; + secp256k1_ge group[EXHAUSTIVE_TEST_ORDER]; + unsigned char rand32[32]; + secp256k1_context *ctx; + + /* Disable buffering for stdout to improve reliability of getting + * diagnostic information. Happens right at the start of main because + * setbuf must be used before any other operation on the stream. */ + setbuf(stdout, NULL); + /* Also disable buffering for stderr because it's not guaranteed that it's + * unbuffered on all systems. */ + setbuf(stderr, NULL); + + printf("Exhaustive tests for order %lu\n", (unsigned long)EXHAUSTIVE_TEST_ORDER); + + /* find iteration count */ + if (argc > 1) { + count = strtol(argv[1], NULL, 0); + } + printf("test count = %i\n", count); + + /* find random seed */ + secp256k1_testrand_init(argc > 2 ? argv[2] : NULL); + + /* set up split processing */ + if (argc > 4) { + num_cores = strtol(argv[3], NULL, 0); + this_core = strtol(argv[4], NULL, 0); + if (num_cores < 1 || this_core >= num_cores) { + fprintf(stderr, "Usage: %s [count] [seed] [numcores] [thiscore]\n", argv[0]); + return 1; } + printf("running tests for core %lu (out of [0..%lu])\n", (unsigned long)this_core, (unsigned long)num_cores - 1); } -} - -void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { - /* This is essentially a copy of test_exhaustive_verify, with recovery added */ - int s, r, msg, key; - for (s = 1; s < order; s++) { - for (r = 1; r < order; r++) { - for (msg = 1; msg < order; msg++) { - for (key = 1; key < order; key++) { - secp256k1_ge nonconst_ge; - secp256k1_ecdsa_recoverable_signature rsig; - secp256k1_ecdsa_signature sig; - secp256k1_pubkey pk; - secp256k1_scalar sk_s, msg_s, r_s, s_s; - secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; - int recid = 0; - int k, should_verify; - unsigned char msg32[32]; - secp256k1_scalar_set_int(&s_s, s); - secp256k1_scalar_set_int(&r_s, r); - secp256k1_scalar_set_int(&msg_s, msg); - secp256k1_scalar_set_int(&sk_s, key); - secp256k1_scalar_get_b32(msg32, &msg_s); + while (count--) { + /* Build context */ + ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + secp256k1_testrand256(rand32); + CHECK(secp256k1_context_randomize(ctx, rand32)); + + /* Generate the entire group */ + secp256k1_gej_set_infinity(&groupj[0]); + secp256k1_ge_set_gej(&group[0], &groupj[0]); + for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { + secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g); + secp256k1_ge_set_gej(&group[i], &groupj[i]); + if (count != 0) { + /* Set a different random z-value for each Jacobian point, except z=1 + is used in the last iteration. */ + secp256k1_fe z; + random_fe(&z); + secp256k1_gej_rescale(&groupj[i], &z); + } - /* Verify by hand */ - /* Run through every k value that gives us this r and check that *one* works. - * Note there could be none, there could be multiple, ECDSA is weird. */ - should_verify = 0; - for (k = 0; k < order; k++) { - secp256k1_scalar check_x_s; - r_from_k(&check_x_s, group, k); - if (r_s == check_x_s) { - secp256k1_scalar_set_int(&s_times_k_s, k); - secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); - secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); - secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); - should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); - } - } - /* nb we have a "high s" rule */ - should_verify &= !secp256k1_scalar_is_high(&s_s); + /* Verify against ecmult_gen */ + { + secp256k1_scalar scalar_i; + secp256k1_gej generatedj; + secp256k1_ge generated; - /* We would like to try recovering the pubkey and checking that it matches, - * but pubkey recovery is impossible in the exhaustive tests (the reason - * being that there are 12 nonzero r values, 12 nonzero points, and no - * overlap between the sets, so there are no valid signatures). */ + secp256k1_scalar_set_int(&scalar_i, i); + secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); + secp256k1_ge_set_gej(&generated, &generatedj); - /* Verify by converting to a standard signature and calling verify */ - secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid); - secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); - memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); - secp256k1_pubkey_save(&pk, &nonconst_ge); - CHECK(should_verify == - secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); - } + CHECK(group[i].infinity == 0); + CHECK(generated.infinity == 0); + CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x)); + CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y)); } } - } -} -#endif - -int main(void) { - int i; - secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER]; - secp256k1_ge group[EXHAUSTIVE_TEST_ORDER]; - /* Build context */ - secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + /* Run the tests */ + test_exhaustive_endomorphism(group); + test_exhaustive_addition(group, groupj); + test_exhaustive_ecmult(ctx, group, groupj); + test_exhaustive_ecmult_multi(ctx, group); + test_exhaustive_sign(ctx, group); + test_exhaustive_verify(ctx, group); - /* TODO set z = 1, then do num_tests runs with random z values */ +#ifdef ENABLE_MODULE_RECOVERY + test_exhaustive_recovery(ctx, group); +#endif +#ifdef ENABLE_MODULE_EXTRAKEYS + test_exhaustive_extrakeys(ctx, group); +#endif +#ifdef ENABLE_MODULE_SCHNORRSIG + test_exhaustive_schnorrsig(ctx); +#endif - /* Generate the entire group */ - secp256k1_gej_set_infinity(&groupj[0]); - secp256k1_ge_set_gej(&group[0], &groupj[0]); - for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - /* Set a different random z-value for each Jacobian point */ - secp256k1_fe z; - random_fe(&z); - - secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g); - secp256k1_ge_set_gej(&group[i], &groupj[i]); - secp256k1_gej_rescale(&groupj[i], &z); - - /* Verify against ecmult_gen */ - { - secp256k1_scalar scalar_i; - secp256k1_gej generatedj; - secp256k1_ge generated; - - secp256k1_scalar_set_int(&scalar_i, i); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); - secp256k1_ge_set_gej(&generated, &generatedj); - - CHECK(group[i].infinity == 0); - CHECK(generated.infinity == 0); - CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x)); - CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y)); - } + secp256k1_context_destroy(ctx); } - /* Run the tests */ -#ifdef USE_ENDOMORPHISM - test_exhaustive_endomorphism(group, EXHAUSTIVE_TEST_ORDER); -#endif - test_exhaustive_addition(group, groupj, EXHAUSTIVE_TEST_ORDER); - test_exhaustive_ecmult(ctx, group, groupj, EXHAUSTIVE_TEST_ORDER); - test_exhaustive_ecmult_multi(ctx, group, EXHAUSTIVE_TEST_ORDER); - test_exhaustive_sign(ctx, group, EXHAUSTIVE_TEST_ORDER); - test_exhaustive_verify(ctx, group, EXHAUSTIVE_TEST_ORDER); + secp256k1_testrand_finish(); -#ifdef ENABLE_MODULE_RECOVERY - test_exhaustive_recovery_sign(ctx, group, EXHAUSTIVE_TEST_ORDER); - test_exhaustive_recovery_verify(ctx, group, EXHAUSTIVE_TEST_ORDER); -#endif - - secp256k1_context_destroy(ctx); + printf("no problems found\n"); return 0; } - diff --git a/src/secp256k1/src/util.h b/src/secp256k1/src/util.h index a5cbe03ef5..3a88a41bc6 100644 --- a/src/secp256k1/src/util.h +++ b/src/secp256k1/src/util.h @@ -216,6 +216,24 @@ static SECP256K1_INLINE void memczero(void *s, size_t len, int flag) { } } +/** Semantics like memcmp. Variable-time. + * + * We use this to avoid possible compiler bugs with memcmp, e.g. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95189 + */ +static SECP256K1_INLINE int secp256k1_memcmp_var(const void *s1, const void *s2, size_t n) { + const unsigned char *p1 = s1, *p2 = s2; + size_t i; + + for (i = 0; i < n; i++) { + int diff = p1[i] - p2[i]; + if (diff != 0) { + return diff; + } + } + return 0; +} + /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized and non-negative.*/ static SECP256K1_INLINE void secp256k1_int_cmov(int *r, const int *a, int flag) { unsigned int mask0, mask1, r_masked, a_masked; diff --git a/src/secp256k1/src/valgrind_ctime_test.c b/src/secp256k1/src/valgrind_ctime_test.c index e676a8326c..3169e3651c 100644 --- a/src/secp256k1/src/valgrind_ctime_test.c +++ b/src/secp256k1/src/valgrind_ctime_test.c @@ -9,19 +9,19 @@ #include "assumptions.h" #include "util.h" -#if ENABLE_MODULE_ECDH +#ifdef ENABLE_MODULE_ECDH # include "include/secp256k1_ecdh.h" #endif -#if ENABLE_MODULE_RECOVERY +#ifdef ENABLE_MODULE_RECOVERY # include "include/secp256k1_recovery.h" #endif -#if ENABLE_MODULE_EXTRAKEYS +#ifdef ENABLE_MODULE_EXTRAKEYS # include "include/secp256k1_extrakeys.h" #endif -#if ENABLE_MODULE_SCHNORRSIG +#ifdef ENABLE_MODULE_SCHNORRSIG #include "include/secp256k1_schnorrsig.h" #endif @@ -37,11 +37,11 @@ int main(void) { unsigned char key[32]; unsigned char sig[74]; unsigned char spubkey[33]; -#if ENABLE_MODULE_RECOVERY +#ifdef ENABLE_MODULE_RECOVERY secp256k1_ecdsa_recoverable_signature recoverable_signature; int recid; #endif -#if ENABLE_MODULE_EXTRAKEYS +#ifdef ENABLE_MODULE_EXTRAKEYS secp256k1_keypair keypair; #endif @@ -81,7 +81,7 @@ int main(void) { CHECK(ret); CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature)); -#if ENABLE_MODULE_ECDH +#ifdef ENABLE_MODULE_ECDH /* Test ECDH. */ VALGRIND_MAKE_MEM_UNDEFINED(key, 32); ret = secp256k1_ecdh(ctx, msg, &pubkey, key, NULL, NULL); @@ -89,7 +89,7 @@ int main(void) { CHECK(ret == 1); #endif -#if ENABLE_MODULE_RECOVERY +#ifdef ENABLE_MODULE_RECOVERY /* Test signing a recoverable signature. */ VALGRIND_MAKE_MEM_UNDEFINED(key, 32); ret = secp256k1_ecdsa_sign_recoverable(ctx, &recoverable_signature, msg, key, NULL, NULL); @@ -129,7 +129,7 @@ int main(void) { CHECK(ret); /* Test keypair_create and keypair_xonly_tweak_add. */ -#if ENABLE_MODULE_EXTRAKEYS +#ifdef ENABLE_MODULE_EXTRAKEYS VALGRIND_MAKE_MEM_UNDEFINED(key, 32); ret = secp256k1_keypair_create(ctx, &keypair, key); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); @@ -142,7 +142,7 @@ int main(void) { CHECK(ret == 1); #endif -#if ENABLE_MODULE_SCHNORRSIG +#ifdef ENABLE_MODULE_SCHNORRSIG VALGRIND_MAKE_MEM_UNDEFINED(key, 32); ret = secp256k1_keypair_create(ctx, &keypair, key); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp index 712567ac0d..6743dc0070 100644 --- a/src/test/denialofservice_tests.cpp +++ b/src/test/denialofservice_tests.cpp @@ -88,7 +88,6 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) dummyNode1.SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode1); - dummyNode1.nVersion = 1; dummyNode1.fSuccessfullyConnected = true; // This test requires that we have a chain with non-zero work. @@ -141,7 +140,6 @@ static void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerManager &pee node.SetCommonVersion(PROTOCOL_VERSION); peerLogic.InitializeNode(&node); - node.nVersion = 1; node.fSuccessfullyConnected = true; connman->AddNode(node); @@ -231,7 +229,6 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::INBOUND); dummyNode1.SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode1); - dummyNode1.nVersion = 1; dummyNode1.fSuccessfullyConnected = true; peerLogic->Misbehaving(dummyNode1.GetId(), DISCOURAGEMENT_THRESHOLD, /* message */ ""); // Should be discouraged { @@ -245,7 +242,6 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, 1, CAddress(), "", ConnectionType::INBOUND); dummyNode2.SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode2); - dummyNode2.nVersion = 1; dummyNode2.fSuccessfullyConnected = true; peerLogic->Misbehaving(dummyNode2.GetId(), DISCOURAGEMENT_THRESHOLD - 1, /* message */ ""); { @@ -282,7 +278,6 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, 4, CAddress(), "", ConnectionType::INBOUND); dummyNode.SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode); - dummyNode.nVersion = 1; dummyNode.fSuccessfullyConnected = true; peerLogic->Misbehaving(dummyNode.GetId(), DISCOURAGEMENT_THRESHOLD, /* message */ ""); diff --git a/src/test/fuzz/script_assets_test_minimizer.cpp b/src/test/fuzz/script_assets_test_minimizer.cpp new file mode 100644 index 0000000000..d20fa43d68 --- /dev/null +++ b/src/test/fuzz/script_assets_test_minimizer.cpp @@ -0,0 +1,200 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <test/fuzz/fuzz.h> + +#include <primitives/transaction.h> +#include <pubkey.h> +#include <script/interpreter.h> +#include <serialize.h> +#include <streams.h> +#include <univalue.h> +#include <util/strencodings.h> + +#include <boost/algorithm/string.hpp> +#include <cstdint> +#include <string> +#include <vector> + +// This fuzz "test" can be used to minimize test cases for script_assets_test in +// src/test/script_tests.cpp. While it written as a fuzz test, and can be used as such, +// fuzzing the inputs is unlikely to construct useful test cases. +// +// Instead, it is primarily intended to be run on a test set that was generated +// externally, for example using test/functional/feature_taproot.py's --dumptests mode. +// The minimized set can then be concatenated together, surrounded by '[' and ']', +// and used as the script_assets_test.json input to the script_assets_test unit test: +// +// (normal build) +// $ mkdir dump +// $ for N in $(seq 1 10); do TEST_DUMP_DIR=dump test/functional/feature_taproot --dumptests; done +// $ ... +// +// (fuzz test build) +// $ mkdir dump-min +// $ ./src/test/fuzz/script_assets_test_minimizer -merge=1 dump-min/ dump/ +// $ (echo -en '[\n'; cat dump-min/* | head -c -2; echo -en '\n]') >script_assets_test.json + +namespace { + +std::vector<unsigned char> CheckedParseHex(const std::string& str) +{ + if (str.size() && !IsHex(str)) throw std::runtime_error("Non-hex input '" + str + "'"); + return ParseHex(str); +} + +CScript ScriptFromHex(const std::string& str) +{ + std::vector<unsigned char> data = CheckedParseHex(str); + return CScript(data.begin(), data.end()); +} + +CMutableTransaction TxFromHex(const std::string& str) +{ + CMutableTransaction tx; + try { + VectorReader(SER_DISK, SERIALIZE_TRANSACTION_NO_WITNESS, CheckedParseHex(str), 0) >> tx; + } catch (const std::ios_base::failure&) { + throw std::runtime_error("Tx deserialization failure"); + } + return tx; +} + +std::vector<CTxOut> TxOutsFromJSON(const UniValue& univalue) +{ + if (!univalue.isArray()) throw std::runtime_error("Prevouts must be array"); + std::vector<CTxOut> prevouts; + for (size_t i = 0; i < univalue.size(); ++i) { + CTxOut txout; + try { + VectorReader(SER_DISK, 0, CheckedParseHex(univalue[i].get_str()), 0) >> txout; + } catch (const std::ios_base::failure&) { + throw std::runtime_error("Prevout invalid format"); + } + prevouts.push_back(std::move(txout)); + } + return prevouts; +} + +CScriptWitness ScriptWitnessFromJSON(const UniValue& univalue) +{ + if (!univalue.isArray()) throw std::runtime_error("Script witness is not array"); + CScriptWitness scriptwitness; + for (size_t i = 0; i < univalue.size(); ++i) { + auto bytes = CheckedParseHex(univalue[i].get_str()); + scriptwitness.stack.push_back(std::move(bytes)); + } + return scriptwitness; +} + +const std::map<std::string, unsigned int> FLAG_NAMES = { + {std::string("P2SH"), (unsigned int)SCRIPT_VERIFY_P2SH}, + {std::string("DERSIG"), (unsigned int)SCRIPT_VERIFY_DERSIG}, + {std::string("NULLDUMMY"), (unsigned int)SCRIPT_VERIFY_NULLDUMMY}, + {std::string("CHECKLOCKTIMEVERIFY"), (unsigned int)SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY}, + {std::string("CHECKSEQUENCEVERIFY"), (unsigned int)SCRIPT_VERIFY_CHECKSEQUENCEVERIFY}, + {std::string("WITNESS"), (unsigned int)SCRIPT_VERIFY_WITNESS}, + {std::string("TAPROOT"), (unsigned int)SCRIPT_VERIFY_TAPROOT}, +}; + +std::vector<unsigned int> AllFlags() +{ + std::vector<unsigned int> ret; + + for (unsigned int i = 0; i < 128; ++i) { + unsigned int flag = 0; + if (i & 1) flag |= SCRIPT_VERIFY_P2SH; + if (i & 2) flag |= SCRIPT_VERIFY_DERSIG; + if (i & 4) flag |= SCRIPT_VERIFY_NULLDUMMY; + if (i & 8) flag |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY; + if (i & 16) flag |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY; + if (i & 32) flag |= SCRIPT_VERIFY_WITNESS; + if (i & 64) flag |= SCRIPT_VERIFY_TAPROOT; + + // SCRIPT_VERIFY_WITNESS requires SCRIPT_VERIFY_P2SH + if (flag & SCRIPT_VERIFY_WITNESS && !(flag & SCRIPT_VERIFY_P2SH)) continue; + // SCRIPT_VERIFY_TAPROOT requires SCRIPT_VERIFY_WITNESS + if (flag & SCRIPT_VERIFY_TAPROOT && !(flag & SCRIPT_VERIFY_WITNESS)) continue; + + ret.push_back(flag); + } + + return ret; +} + +const std::vector<unsigned int> ALL_FLAGS = AllFlags(); + +unsigned int ParseScriptFlags(const std::string& str) +{ + if (str.empty()) return 0; + + unsigned int flags = 0; + std::vector<std::string> words; + boost::algorithm::split(words, str, boost::algorithm::is_any_of(",")); + + for (const std::string& word : words) + { + auto it = FLAG_NAMES.find(word); + if (it == FLAG_NAMES.end()) throw std::runtime_error("Unknown verification flag " + word); + flags |= it->second; + } + + return flags; +} + +void Test(const std::string& str) +{ + UniValue test; + if (!test.read(str) || !test.isObject()) throw std::runtime_error("Non-object test input"); + + CMutableTransaction tx = TxFromHex(test["tx"].get_str()); + const std::vector<CTxOut> prevouts = TxOutsFromJSON(test["prevouts"]); + if (prevouts.size() != tx.vin.size()) throw std::runtime_error("Incorrect number of prevouts"); + size_t idx = test["index"].get_int64(); + if (idx >= tx.vin.size()) throw std::runtime_error("Invalid index"); + unsigned int test_flags = ParseScriptFlags(test["flags"].get_str()); + bool final = test.exists("final") && test["final"].get_bool(); + + if (test.exists("success")) { + tx.vin[idx].scriptSig = ScriptFromHex(test["success"]["scriptSig"].get_str()); + tx.vin[idx].scriptWitness = ScriptWitnessFromJSON(test["success"]["witness"]); + PrecomputedTransactionData txdata; + txdata.Init(tx, std::vector<CTxOut>(prevouts)); + MutableTransactionSignatureChecker txcheck(&tx, idx, prevouts[idx].nValue, txdata); + for (const auto flags : ALL_FLAGS) { + // "final": true tests are valid for all flags. Others are only valid with flags that are + // a subset of test_flags. + if (final || ((flags & test_flags) == flags)) { + (void)VerifyScript(tx.vin[idx].scriptSig, prevouts[idx].scriptPubKey, &tx.vin[idx].scriptWitness, flags, txcheck, nullptr); + } + } + } + + if (test.exists("failure")) { + tx.vin[idx].scriptSig = ScriptFromHex(test["failure"]["scriptSig"].get_str()); + tx.vin[idx].scriptWitness = ScriptWitnessFromJSON(test["failure"]["witness"]); + PrecomputedTransactionData txdata; + txdata.Init(tx, std::vector<CTxOut>(prevouts)); + MutableTransactionSignatureChecker txcheck(&tx, idx, prevouts[idx].nValue, txdata); + for (const auto flags : ALL_FLAGS) { + // If a test is supposed to fail with test_flags, it should also fail with any superset thereof. + if ((flags & test_flags) == test_flags) { + (void)VerifyScript(tx.vin[idx].scriptSig, prevouts[idx].scriptPubKey, &tx.vin[idx].scriptWitness, flags, txcheck, nullptr); + } + } + } +} + +ECCVerifyHandle handle; + +} + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + if (buffer.size() < 2 || buffer.back() != '\n' || buffer[buffer.size() - 2] != ',') return; + const std::string str((const char*)buffer.data(), buffer.size() - 2); + try { + Test(str); + } catch (const std::runtime_error&) {} +} diff --git a/src/test/fuzz/script_sigcache.cpp b/src/test/fuzz/script_sigcache.cpp index 434a47b702..87af71897b 100644 --- a/src/test/fuzz/script_sigcache.cpp +++ b/src/test/fuzz/script_sigcache.cpp @@ -35,11 +35,19 @@ void test_one_input(const std::vector<uint8_t>& buffer) const bool store = fuzzed_data_provider.ConsumeBool(); PrecomputedTransactionData tx_data; CachingTransactionSignatureChecker caching_transaction_signature_checker{mutable_transaction ? &tx : nullptr, n_in, amount, store, tx_data}; - const std::optional<CPubKey> pub_key = ConsumeDeserializable<CPubKey>(fuzzed_data_provider); - if (pub_key) { - const std::vector<uint8_t> random_bytes = ConsumeRandomLengthByteVector(fuzzed_data_provider); - if (!random_bytes.empty()) { - (void)caching_transaction_signature_checker.VerifySignature(random_bytes, *pub_key, ConsumeUInt256(fuzzed_data_provider)); + if (fuzzed_data_provider.ConsumeBool()) { + const auto random_bytes = fuzzed_data_provider.ConsumeBytes<unsigned char>(64); + const XOnlyPubKey pub_key(ConsumeUInt256(fuzzed_data_provider)); + if (random_bytes.size() == 64) { + (void)caching_transaction_signature_checker.VerifySchnorrSignature(random_bytes, pub_key, ConsumeUInt256(fuzzed_data_provider)); + } + } else { + const auto random_bytes = ConsumeRandomLengthByteVector(fuzzed_data_provider); + const auto pub_key = ConsumeDeserializable<CPubKey>(fuzzed_data_provider); + if (pub_key) { + if (!random_bytes.empty()) { + (void)caching_transaction_signature_checker.VerifyECDSASignature(random_bytes, *pub_key, ConsumeUInt256(fuzzed_data_provider)); + } } } } diff --git a/src/test/fuzz/signature_checker.cpp b/src/test/fuzz/signature_checker.cpp index 3aaeb66649..e121c89665 100644 --- a/src/test/fuzz/signature_checker.cpp +++ b/src/test/fuzz/signature_checker.cpp @@ -28,7 +28,12 @@ public: { } - bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override + bool CheckECDSASignature(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override + { + return m_fuzzed_data_provider.ConsumeBool(); + } + + bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror = nullptr) const override { return m_fuzzed_data_provider.ConsumeBool(); } diff --git a/src/test/fuzz/txrequest.cpp b/src/test/fuzz/txrequest.cpp new file mode 100644 index 0000000000..9529ad3274 --- /dev/null +++ b/src/test/fuzz/txrequest.cpp @@ -0,0 +1,374 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <crypto/common.h> +#include <crypto/sha256.h> +#include <crypto/siphash.h> +#include <primitives/transaction.h> +#include <test/fuzz/fuzz.h> +#include <txrequest.h> + +#include <bitset> +#include <cstdint> +#include <queue> +#include <vector> + +namespace { + +constexpr int MAX_TXHASHES = 16; +constexpr int MAX_PEERS = 16; + +//! Randomly generated GenTxids used in this test (length is MAX_TXHASHES). +uint256 TXHASHES[MAX_TXHASHES]; + +//! Precomputed random durations (positive and negative, each ~exponentially distributed). +std::chrono::microseconds DELAYS[256]; + +struct Initializer +{ + Initializer() + { + for (uint8_t txhash = 0; txhash < MAX_TXHASHES; txhash += 1) { + CSHA256().Write(&txhash, 1).Finalize(TXHASHES[txhash].begin()); + } + int i = 0; + // DELAYS[N] for N=0..15 is just N microseconds. + for (; i < 16; ++i) { + DELAYS[i] = std::chrono::microseconds{i}; + } + // DELAYS[N] for N=16..127 has randomly-looking but roughly exponentially increasing values up to + // 198.416453 seconds. + for (; i < 128; ++i) { + int diff_bits = ((i - 10) * 2) / 9; + uint64_t diff = 1 + (CSipHasher(0, 0).Write(i).Finalize() >> (64 - diff_bits)); + DELAYS[i] = DELAYS[i - 1] + std::chrono::microseconds{diff}; + } + // DELAYS[N] for N=128..255 are negative delays with the same magnitude as N=0..127. + for (; i < 256; ++i) { + DELAYS[i] = -DELAYS[255 - i]; + } + } +} g_initializer; + +/** Tester class for TxRequestTracker + * + * It includes a naive reimplementation of its behavior, for a limited set + * of MAX_TXHASHES distinct txids, and MAX_PEERS peer identifiers. + * + * All of the public member functions perform the same operation on + * an actual TxRequestTracker and on the state of the reimplementation. + * The output of GetRequestable is compared with the expected value + * as well. + * + * Check() calls the TxRequestTracker's sanity check, plus compares the + * output of the constant accessors (Size(), CountLoad(), CountTracked()) + * with expected values. + */ +class Tester +{ + //! TxRequestTracker object being tested. + TxRequestTracker m_tracker; + + //! States for txid/peer combinations in the naive data structure. + enum class State { + NOTHING, //!< Absence of this txid/peer combination + + // Note that this implementation does not distinguish between DELAYED/READY/BEST variants of CANDIDATE. + CANDIDATE, + REQUESTED, + COMPLETED, + }; + + //! Sequence numbers, incremented whenever a new CANDIDATE is added. + uint64_t m_current_sequence{0}; + + //! List of future 'events' (all inserted reqtimes/exptimes). This is used to implement AdvanceToEvent. + std::priority_queue<std::chrono::microseconds, std::vector<std::chrono::microseconds>, + std::greater<std::chrono::microseconds>> m_events; + + //! Information about a txhash/peer combination. + struct Announcement + { + std::chrono::microseconds m_time; + uint64_t m_sequence; + State m_state{State::NOTHING}; + bool m_preferred; + bool m_is_wtxid; + uint64_t m_priority; //!< Precomputed priority. + }; + + //! Information about all txhash/peer combination. + Announcement m_announcements[MAX_TXHASHES][MAX_PEERS]; + + //! The current time; can move forward and backward. + std::chrono::microseconds m_now{244466666}; + + //! Delete txhashes whose only announcements are COMPLETED. + void Cleanup(int txhash) + { + bool all_nothing = true; + for (int peer = 0; peer < MAX_PEERS; ++peer) { + const Announcement& ann = m_announcements[txhash][peer]; + if (ann.m_state != State::NOTHING) { + if (ann.m_state != State::COMPLETED) return; + all_nothing = false; + } + } + if (all_nothing) return; + for (int peer = 0; peer < MAX_PEERS; ++peer) { + m_announcements[txhash][peer].m_state = State::NOTHING; + } + } + + //! Find the current best peer to request from for a txhash (or -1 if none). + int GetSelected(int txhash) const + { + int ret = -1; + uint64_t ret_priority = 0; + for (int peer = 0; peer < MAX_PEERS; ++peer) { + const Announcement& ann = m_announcements[txhash][peer]; + // Return -1 if there already is a (non-expired) in-flight request. + if (ann.m_state == State::REQUESTED) return -1; + // If it's a viable candidate, see if it has lower priority than the best one so far. + if (ann.m_state == State::CANDIDATE && ann.m_time <= m_now) { + if (ret == -1 || ann.m_priority > ret_priority) { + std::tie(ret, ret_priority) = std::tie(peer, ann.m_priority); + } + } + } + return ret; + } + +public: + Tester() : m_tracker(true) {} + + std::chrono::microseconds Now() const { return m_now; } + + void AdvanceTime(std::chrono::microseconds offset) + { + m_now += offset; + while (!m_events.empty() && m_events.top() <= m_now) m_events.pop(); + } + + void AdvanceToEvent() + { + while (!m_events.empty() && m_events.top() <= m_now) m_events.pop(); + if (!m_events.empty()) { + m_now = m_events.top(); + m_events.pop(); + } + } + + void DisconnectedPeer(int peer) + { + // Apply to naive structure: all announcements for that peer are wiped. + for (int txhash = 0; txhash < MAX_TXHASHES; ++txhash) { + if (m_announcements[txhash][peer].m_state != State::NOTHING) { + m_announcements[txhash][peer].m_state = State::NOTHING; + Cleanup(txhash); + } + } + + // Call TxRequestTracker's implementation. + m_tracker.DisconnectedPeer(peer); + } + + void ForgetTxHash(int txhash) + { + // Apply to naive structure: all announcements for that txhash are wiped. + for (int peer = 0; peer < MAX_PEERS; ++peer) { + m_announcements[txhash][peer].m_state = State::NOTHING; + } + Cleanup(txhash); + + // Call TxRequestTracker's implementation. + m_tracker.ForgetTxHash(TXHASHES[txhash]); + } + + void ReceivedInv(int peer, int txhash, bool is_wtxid, bool preferred, std::chrono::microseconds reqtime) + { + // Apply to naive structure: if no announcement for txidnum/peer combination + // already, create a new CANDIDATE; otherwise do nothing. + Announcement& ann = m_announcements[txhash][peer]; + if (ann.m_state == State::NOTHING) { + ann.m_preferred = preferred; + ann.m_state = State::CANDIDATE; + ann.m_time = reqtime; + ann.m_is_wtxid = is_wtxid; + ann.m_sequence = m_current_sequence++; + ann.m_priority = m_tracker.ComputePriority(TXHASHES[txhash], peer, ann.m_preferred); + + // Add event so that AdvanceToEvent can quickly jump to the point where its reqtime passes. + if (reqtime > m_now) m_events.push(reqtime); + } + + // Call TxRequestTracker's implementation. + m_tracker.ReceivedInv(peer, GenTxid{is_wtxid, TXHASHES[txhash]}, preferred, reqtime); + } + + void RequestedTx(int peer, int txhash, std::chrono::microseconds exptime) + { + // Apply to naive structure: if a CANDIDATE announcement exists for peer/txhash, + // convert it to REQUESTED, and change any existing REQUESTED announcement for the same txhash to COMPLETED. + if (m_announcements[txhash][peer].m_state == State::CANDIDATE) { + for (int peer2 = 0; peer2 < MAX_PEERS; ++peer2) { + if (m_announcements[txhash][peer2].m_state == State::REQUESTED) { + m_announcements[txhash][peer2].m_state = State::COMPLETED; + } + } + m_announcements[txhash][peer].m_state = State::REQUESTED; + m_announcements[txhash][peer].m_time = exptime; + } + + // Add event so that AdvanceToEvent can quickly jump to the point where its exptime passes. + if (exptime > m_now) m_events.push(exptime); + + // Call TxRequestTracker's implementation. + m_tracker.RequestedTx(peer, TXHASHES[txhash], exptime); + } + + void ReceivedResponse(int peer, int txhash) + { + // Apply to naive structure: convert anything to COMPLETED. + if (m_announcements[txhash][peer].m_state != State::NOTHING) { + m_announcements[txhash][peer].m_state = State::COMPLETED; + Cleanup(txhash); + } + + // Call TxRequestTracker's implementation. + m_tracker.ReceivedResponse(peer, TXHASHES[txhash]); + } + + void GetRequestable(int peer) + { + // Implement using naive structure: + + //! list of (sequence number, txhash, is_wtxid) tuples. + std::vector<std::tuple<uint64_t, int, bool>> result; + std::vector<std::pair<NodeId, GenTxid>> expected_expired; + for (int txhash = 0; txhash < MAX_TXHASHES; ++txhash) { + // Mark any expired REQUESTED announcements as COMPLETED. + for (int peer2 = 0; peer2 < MAX_PEERS; ++peer2) { + Announcement& ann2 = m_announcements[txhash][peer2]; + if (ann2.m_state == State::REQUESTED && ann2.m_time <= m_now) { + expected_expired.emplace_back(peer2, GenTxid{ann2.m_is_wtxid, TXHASHES[txhash]}); + ann2.m_state = State::COMPLETED; + break; + } + } + // And delete txids with only COMPLETED announcements left. + Cleanup(txhash); + // CANDIDATEs for which this announcement has the highest priority get returned. + const Announcement& ann = m_announcements[txhash][peer]; + if (ann.m_state == State::CANDIDATE && GetSelected(txhash) == peer) { + result.emplace_back(ann.m_sequence, txhash, ann.m_is_wtxid); + } + } + // Sort the results by sequence number. + std::sort(result.begin(), result.end()); + std::sort(expected_expired.begin(), expected_expired.end()); + + // Compare with TxRequestTracker's implementation. + std::vector<std::pair<NodeId, GenTxid>> expired; + const auto actual = m_tracker.GetRequestable(peer, m_now, &expired); + std::sort(expired.begin(), expired.end()); + assert(expired == expected_expired); + + m_tracker.PostGetRequestableSanityCheck(m_now); + assert(result.size() == actual.size()); + for (size_t pos = 0; pos < actual.size(); ++pos) { + assert(TXHASHES[std::get<1>(result[pos])] == actual[pos].GetHash()); + assert(std::get<2>(result[pos]) == actual[pos].IsWtxid()); + } + } + + void Check() + { + // Compare CountTracked and CountLoad with naive structure. + size_t total = 0; + for (int peer = 0; peer < MAX_PEERS; ++peer) { + size_t tracked = 0; + size_t inflight = 0; + size_t candidates = 0; + for (int txhash = 0; txhash < MAX_TXHASHES; ++txhash) { + tracked += m_announcements[txhash][peer].m_state != State::NOTHING; + inflight += m_announcements[txhash][peer].m_state == State::REQUESTED; + candidates += m_announcements[txhash][peer].m_state == State::CANDIDATE; + } + assert(m_tracker.Count(peer) == tracked); + assert(m_tracker.CountInFlight(peer) == inflight); + assert(m_tracker.CountCandidates(peer) == candidates); + total += tracked; + } + // Compare Size. + assert(m_tracker.Size() == total); + + // Invoke internal consistency check of TxRequestTracker object. + m_tracker.SanityCheck(); + } +}; +} // namespace + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + // Tester object (which encapsulates a TxRequestTracker). + Tester tester; + + // Decode the input as a sequence of instructions with parameters + auto it = buffer.begin(); + while (it != buffer.end()) { + int cmd = *(it++) % 11; + int peer, txidnum, delaynum; + switch (cmd) { + case 0: // Make time jump to the next event (m_time of CANDIDATE or REQUESTED) + tester.AdvanceToEvent(); + break; + case 1: // Change time + delaynum = it == buffer.end() ? 0 : *(it++); + tester.AdvanceTime(DELAYS[delaynum]); + break; + case 2: // Query for requestable txs + peer = it == buffer.end() ? 0 : *(it++) % MAX_PEERS; + tester.GetRequestable(peer); + break; + case 3: // Peer went offline + peer = it == buffer.end() ? 0 : *(it++) % MAX_PEERS; + tester.DisconnectedPeer(peer); + break; + case 4: // No longer need tx + txidnum = it == buffer.end() ? 0 : *(it++); + tester.ForgetTxHash(txidnum % MAX_TXHASHES); + break; + case 5: // Received immediate preferred inv + case 6: // Same, but non-preferred. + peer = it == buffer.end() ? 0 : *(it++) % MAX_PEERS; + txidnum = it == buffer.end() ? 0 : *(it++); + tester.ReceivedInv(peer, txidnum % MAX_TXHASHES, (txidnum / MAX_TXHASHES) & 1, cmd & 1, + std::chrono::microseconds::min()); + break; + case 7: // Received delayed preferred inv + case 8: // Same, but non-preferred. + peer = it == buffer.end() ? 0 : *(it++) % MAX_PEERS; + txidnum = it == buffer.end() ? 0 : *(it++); + delaynum = it == buffer.end() ? 0 : *(it++); + tester.ReceivedInv(peer, txidnum % MAX_TXHASHES, (txidnum / MAX_TXHASHES) & 1, cmd & 1, + tester.Now() + DELAYS[delaynum]); + break; + case 9: // Requested tx from peer + peer = it == buffer.end() ? 0 : *(it++) % MAX_PEERS; + txidnum = it == buffer.end() ? 0 : *(it++); + delaynum = it == buffer.end() ? 0 : *(it++); + tester.RequestedTx(peer, txidnum % MAX_TXHASHES, tester.Now() + DELAYS[delaynum]); + break; + case 10: // Received response + peer = it == buffer.end() ? 0 : *(it++) % MAX_PEERS; + txidnum = it == buffer.end() ? 0 : *(it++); + tester.ReceivedResponse(peer, txidnum % MAX_TXHASHES); + break; + default: + assert(false); + } + } + tester.Check(); +} diff --git a/src/test/key_tests.cpp b/src/test/key_tests.cpp index 4e4c44266a..3362b8d17c 100644 --- a/src/test/key_tests.cpp +++ b/src/test/key_tests.cpp @@ -264,4 +264,32 @@ BOOST_AUTO_TEST_CASE(pubkey_unserialize) } } +BOOST_AUTO_TEST_CASE(bip340_test_vectors) +{ + static const std::vector<std::pair<std::array<std::string, 3>, bool>> VECTORS = { + {{"F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9", "0000000000000000000000000000000000000000000000000000000000000000", "E907831F80848D1069A5371B402410364BDF1C5F8307B0084C55F1CE2DCA821525F66A4A85EA8B71E482A74F382D2CE5EBEEE8FDB2172F477DF4900D310536C0"}, true}, + {{"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659", "243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89", "6896BD60EEAE296DB48A229FF71DFE071BDE413E6D43F917DC8DCF8C78DE33418906D11AC976ABCCB20B091292BFF4EA897EFCB639EA871CFA95F6DE339E4B0A"}, true}, + {{"DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8", "7E2D58D8B3BCDF1ABADEC7829054F90DDA9805AAB56C77333024B9D0A508B75C", "5831AAEED7B44BB74E5EAB94BA9D4294C49BCF2A60728D8B4C200F50DD313C1BAB745879A5AD954A72C45A91C3A51D3C7ADEA98D82F8481E0E1E03674A6F3FB7"}, true}, + {{"25D1DFF95105F5253C4022F628A996AD3A0D95FBF21D468A1B33F8C160D8F517", "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", "7EB0509757E246F19449885651611CB965ECC1A187DD51B64FDA1EDC9637D5EC97582B9CB13DB3933705B32BA982AF5AF25FD78881EBB32771FC5922EFC66EA3"}, true}, + {{"D69C3509BB99E412E68B0FE8544E72837DFA30746D8BE2AA65975F29D22DC7B9", "4DF3C3F68FCC83B27E9D42C90431A72499F17875C81A599B566C9889B9696703", "00000000000000000000003B78CE563F89A0ED9414F5AA28AD0D96D6795F9C6376AFB1548AF603B3EB45C9F8207DEE1060CB71C04E80F593060B07D28308D7F4"}, true}, + {{"EEFDEA4CDB677750A420FEE807EACF21EB9898AE79B9768766E4FAA04A2D4A34", "243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89", "6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E17776969E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B"}, false}, + {{"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659", "243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89", "FFF97BD5755EEEA420453A14355235D382F6472F8568A18B2F057A14602975563CC27944640AC607CD107AE10923D9EF7A73C643E166BE5EBEAFA34B1AC553E2"}, false}, + {{"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659", "243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89", "1FA62E331EDBC21C394792D2AB1100A7B432B013DF3F6FF4F99FCB33E0E1515F28890B3EDB6E7189B630448B515CE4F8622A954CFE545735AAEA5134FCCDB2BD"}, false}, + {{"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659", "243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89", "6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E177769961764B3AA9B2FFCB6EF947B6887A226E8D7C93E00C5ED0C1834FF0D0C2E6DA6"}, false}, + {{"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659", "243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89", "0000000000000000000000000000000000000000000000000000000000000000123DDA8328AF9C23A94C1FEECFD123BA4FB73476F0D594DCB65C6425BD186051"}, false}, + {{"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659", "243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89", "00000000000000000000000000000000000000000000000000000000000000017615FBAF5AE28864013C099742DEADB4DBA87F11AC6754F93780D5A1837CF197"}, false}, + {{"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659", "243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89", "4A298DACAE57395A15D0795DDBFD1DCB564DA82B0F269BC70A74F8220429BA1D69E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B"}, false}, + {{"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659", "243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89", "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F69E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B"}, false}, + {{"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659", "243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89", "6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E177769FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"}, false}, + {{"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30", "243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89", "6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E17776969E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B"}, false} + }; + + for (const auto& test : VECTORS) { + auto pubkey = ParseHex(test.first[0]); + auto msg = ParseHex(test.first[1]); + auto sig = ParseHex(test.first[2]); + BOOST_CHECK_EQUAL(XOnlyPubKey(pubkey).VerifySchnorr(uint256(msg), sig), test.second); + } +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/limitedmap_tests.cpp b/src/test/limitedmap_tests.cpp deleted file mode 100644 index ea18debbd3..0000000000 --- a/src/test/limitedmap_tests.cpp +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2012-2019 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#include <limitedmap.h> - -#include <test/util/setup_common.h> - -#include <boost/test/unit_test.hpp> - -BOOST_FIXTURE_TEST_SUITE(limitedmap_tests, BasicTestingSetup) - -BOOST_AUTO_TEST_CASE(limitedmap_test) -{ - // create a limitedmap capped at 10 items - limitedmap<int, int> map(10); - - // check that the max size is 10 - BOOST_CHECK(map.max_size() == 10); - - // check that it's empty - BOOST_CHECK(map.size() == 0); - - // insert (-1, -1) - map.insert(std::pair<int, int>(-1, -1)); - - // make sure that the size is updated - BOOST_CHECK(map.size() == 1); - - // make sure that the new item is in the map - BOOST_CHECK(map.count(-1) == 1); - - // insert 10 new items - for (int i = 0; i < 10; i++) { - map.insert(std::pair<int, int>(i, i + 1)); - } - - // make sure that the map now contains 10 items... - BOOST_CHECK(map.size() == 10); - - // ...and that the first item has been discarded - BOOST_CHECK(map.count(-1) == 0); - - // iterate over the map, both with an index and an iterator - limitedmap<int, int>::const_iterator it = map.begin(); - for (int i = 0; i < 10; i++) { - // make sure the item is present - BOOST_CHECK(map.count(i) == 1); - - // use the iterator to check for the expected key and value - BOOST_CHECK(it->first == i); - BOOST_CHECK(it->second == i + 1); - - // use find to check for the value - BOOST_CHECK(map.find(i)->second == i + 1); - - // update and recheck - map.update(it, i + 2); - BOOST_CHECK(map.find(i)->second == i + 2); - - it++; - } - - // check that we've exhausted the iterator - BOOST_CHECK(it == map.end()); - - // resize the map to 5 items - map.max_size(5); - - // check that the max size and size are now 5 - BOOST_CHECK(map.max_size() == 5); - BOOST_CHECK(map.size() == 5); - - // check that items less than 5 have been discarded - // and items greater than 5 are retained - for (int i = 0; i < 10; i++) { - if (i < 5) { - BOOST_CHECK(map.count(i) == 0); - } else { - BOOST_CHECK(map.count(i) == 1); - } - } - - // erase some items not in the map - for (int i = 100; i < 1000; i += 100) { - map.erase(i); - } - - // check that the size is unaffected - BOOST_CHECK(map.size() == 5); - - // erase the remaining elements - for (int i = 5; i < 10; i++) { - map.erase(i); - } - - // check that the map is now empty - BOOST_CHECK(map.empty()); -} - -BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/netbase_tests.cpp b/src/test/netbase_tests.cpp index eb0d95a373..f5d26fafef 100644 --- a/src/test/netbase_tests.cpp +++ b/src/test/netbase_tests.cpp @@ -452,17 +452,17 @@ BOOST_AUTO_TEST_CASE(netbase_dont_resolve_strings_with_embedded_nul_characters) static const std::vector<CAddress> fixture_addresses({ CAddress( - CService(CNetAddr(in6addr_loopback), 0 /* port */), + CService(CNetAddr(in6_addr(IN6ADDR_LOOPBACK_INIT)), 0 /* port */), NODE_NONE, 0x4966bc61U /* Fri Jan 9 02:54:25 UTC 2009 */ ), CAddress( - CService(CNetAddr(in6addr_loopback), 0x00f1 /* port */), + CService(CNetAddr(in6_addr(IN6ADDR_LOOPBACK_INIT)), 0x00f1 /* port */), NODE_NETWORK, 0x83766279U /* Tue Nov 22 11:22:33 UTC 2039 */ ), CAddress( - CService(CNetAddr(in6addr_loopback), 0xf1f2 /* port */), + CService(CNetAddr(in6_addr(IN6ADDR_LOOPBACK_INIT)), 0xf1f2 /* port */), static_cast<ServiceFlags>(NODE_WITNESS | NODE_COMPACT_FILTERS | NODE_NETWORK_LIMITED), 0xffffffffU /* Sun Feb 7 06:28:15 UTC 2106 */ ) diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index 0830743d61..7c53bd0002 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -5,10 +5,12 @@ #include <test/data/script_tests.json.h> #include <core_io.h> +#include <fs.h> #include <key.h> #include <rpc/util.h> #include <script/script.h> #include <script/script_error.h> +#include <script/sigcache.h> #include <script/sign.h> #include <script/signingprovider.h> #include <streams.h> @@ -1339,14 +1341,12 @@ BOOST_AUTO_TEST_CASE(script_GetScriptAsm) BOOST_CHECK_EQUAL(derSig + "83 " + pubKey, ScriptToAsmStr(CScript() << ToByteVector(ParseHex(derSig + "83")) << vchPubKey)); } -static CScript -ScriptFromHex(const char* hex) +static CScript ScriptFromHex(const std::string& str) { - std::vector<unsigned char> data = ParseHex(hex); + std::vector<unsigned char> data = ParseHex(str); return CScript(data.begin(), data.end()); } - BOOST_AUTO_TEST_CASE(script_FindAndDelete) { // Exercise the FindAndDelete functionality @@ -1472,6 +1472,36 @@ BOOST_AUTO_TEST_CASE(script_HasValidOps) #if defined(HAVE_CONSENSUS_LIB) +static CMutableTransaction TxFromHex(const std::string& str) +{ + CMutableTransaction tx; + VectorReader(SER_DISK, SERIALIZE_TRANSACTION_NO_WITNESS, ParseHex(str), 0) >> tx; + return tx; +} + +static std::vector<CTxOut> TxOutsFromJSON(const UniValue& univalue) +{ + assert(univalue.isArray()); + std::vector<CTxOut> prevouts; + for (size_t i = 0; i < univalue.size(); ++i) { + CTxOut txout; + VectorReader(SER_DISK, 0, ParseHex(univalue[i].get_str()), 0) >> txout; + prevouts.push_back(std::move(txout)); + } + return prevouts; +} + +static CScriptWitness ScriptWitnessFromJSON(const UniValue& univalue) +{ + assert(univalue.isArray()); + CScriptWitness scriptwitness; + for (size_t i = 0; i < univalue.size(); ++i) { + auto bytes = ParseHex(univalue[i].get_str()); + scriptwitness.stack.push_back(std::move(bytes)); + } + return scriptwitness; +} + /* Test simple (successful) usage of bitcoinconsensus_verify_script */ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_returns_true) { @@ -1610,5 +1640,107 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_invalid_flags) BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_INVALID_FLAGS); } +static std::vector<unsigned int> AllConsensusFlags() +{ + std::vector<unsigned int> ret; + + for (unsigned int i = 0; i < 128; ++i) { + unsigned int flag = 0; + if (i & 1) flag |= SCRIPT_VERIFY_P2SH; + if (i & 2) flag |= SCRIPT_VERIFY_DERSIG; + if (i & 4) flag |= SCRIPT_VERIFY_NULLDUMMY; + if (i & 8) flag |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY; + if (i & 16) flag |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY; + if (i & 32) flag |= SCRIPT_VERIFY_WITNESS; + if (i & 64) flag |= SCRIPT_VERIFY_TAPROOT; + + // SCRIPT_VERIFY_WITNESS requires SCRIPT_VERIFY_P2SH + if (flag & SCRIPT_VERIFY_WITNESS && !(flag & SCRIPT_VERIFY_P2SH)) continue; + // SCRIPT_VERIFY_TAPROOT requires SCRIPT_VERIFY_WITNESS + if (flag & SCRIPT_VERIFY_TAPROOT && !(flag & SCRIPT_VERIFY_WITNESS)) continue; + + ret.push_back(flag); + } + + return ret; +} + +/** Precomputed list of all valid combinations of consensus-relevant script validation flags. */ +static const std::vector<unsigned int> ALL_CONSENSUS_FLAGS = AllConsensusFlags(); + +static void AssetTest(const UniValue& test) +{ + BOOST_CHECK(test.isObject()); + + CMutableTransaction mtx = TxFromHex(test["tx"].get_str()); + const std::vector<CTxOut> prevouts = TxOutsFromJSON(test["prevouts"]); + BOOST_CHECK(prevouts.size() == mtx.vin.size()); + size_t idx = test["index"].get_int64(); + unsigned int test_flags = ParseScriptFlags(test["flags"].get_str()); + bool fin = test.exists("final") && test["final"].get_bool(); + + if (test.exists("success")) { + mtx.vin[idx].scriptSig = ScriptFromHex(test["success"]["scriptSig"].get_str()); + mtx.vin[idx].scriptWitness = ScriptWitnessFromJSON(test["success"]["witness"]); + CTransaction tx(mtx); + PrecomputedTransactionData txdata; + txdata.Init(tx, std::vector<CTxOut>(prevouts)); + CachingTransactionSignatureChecker txcheck(&tx, idx, prevouts[idx].nValue, true, txdata); + for (const auto flags : ALL_CONSENSUS_FLAGS) { + // "final": true tests are valid for all flags. Others are only valid with flags that are + // a subset of test_flags. + if (fin || ((flags & test_flags) == flags)) { + bool ret = VerifyScript(tx.vin[idx].scriptSig, prevouts[idx].scriptPubKey, &tx.vin[idx].scriptWitness, flags, txcheck, nullptr); + BOOST_CHECK(ret); + } + } + } + + if (test.exists("failure")) { + mtx.vin[idx].scriptSig = ScriptFromHex(test["failure"]["scriptSig"].get_str()); + mtx.vin[idx].scriptWitness = ScriptWitnessFromJSON(test["failure"]["witness"]); + CTransaction tx(mtx); + PrecomputedTransactionData txdata; + txdata.Init(tx, std::vector<CTxOut>(prevouts)); + CachingTransactionSignatureChecker txcheck(&tx, idx, prevouts[idx].nValue, true, txdata); + for (const auto flags : ALL_CONSENSUS_FLAGS) { + // If a test is supposed to fail with test_flags, it should also fail with any superset thereof. + if ((flags & test_flags) == test_flags) { + bool ret = VerifyScript(tx.vin[idx].scriptSig, prevouts[idx].scriptPubKey, &tx.vin[idx].scriptWitness, flags, txcheck, nullptr); + BOOST_CHECK(!ret); + } + } + } +} + +BOOST_AUTO_TEST_CASE(script_assets_test) +{ + // See src/test/fuzz/script_assets_test_minimizer.cpp for information on how to generate + // the script_assets_test.json file used by this test. + + const char* dir = std::getenv("DIR_UNIT_TEST_DATA"); + BOOST_WARN_MESSAGE(dir != nullptr, "Variable DIR_UNIT_TEST_DATA unset, skipping script_assets_test"); + if (dir == nullptr) return; + auto path = fs::path(dir) / "script_assets_test.json"; + bool exists = fs::exists(path); + BOOST_WARN_MESSAGE(exists, "File $DIR_UNIT_TEST_DATA/script_assets_test.json not found, skipping script_assets_test"); + if (!exists) return; + fs::ifstream file(path); + BOOST_CHECK(file.is_open()); + file.seekg(0, std::ios::end); + size_t length = file.tellg(); + file.seekg(0, std::ios::beg); + std::string data(length, '\0'); + file.read(&data[0], data.size()); + UniValue tests = read_json(data); + BOOST_CHECK(tests.isArray()); + BOOST_CHECK(tests.size() > 0); + + for (size_t i = 0; i < tests.size(); i++) { + AssetTest(tests[i]); + } + file.close(); +} + #endif BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index 94b5dba913..b7ee280336 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -57,6 +57,7 @@ static std::map<std::string, unsigned int> mapFlagNames = { {std::string("DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM"), (unsigned int)SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM}, {std::string("WITNESS_PUBKEYTYPE"), (unsigned int)SCRIPT_VERIFY_WITNESS_PUBKEYTYPE}, {std::string("CONST_SCRIPTCODE"), (unsigned int)SCRIPT_VERIFY_CONST_SCRIPTCODE}, + {std::string("TAPROOT"), (unsigned int)SCRIPT_VERIFY_TAPROOT}, }; unsigned int ParseScriptFlags(std::string strFlags) diff --git a/src/test/txrequest_tests.cpp b/src/test/txrequest_tests.cpp new file mode 100644 index 0000000000..1d137b03b1 --- /dev/null +++ b/src/test/txrequest_tests.cpp @@ -0,0 +1,738 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + + +#include <txrequest.h> +#include <uint256.h> + +#include <test/util/setup_common.h> + +#include <algorithm> +#include <functional> +#include <vector> + +#include <boost/test/unit_test.hpp> + +BOOST_FIXTURE_TEST_SUITE(txrequest_tests, BasicTestingSetup) + +namespace { + +constexpr std::chrono::microseconds MIN_TIME = std::chrono::microseconds::min(); +constexpr std::chrono::microseconds MAX_TIME = std::chrono::microseconds::max(); +constexpr std::chrono::microseconds MICROSECOND = std::chrono::microseconds{1}; +constexpr std::chrono::microseconds NO_TIME = std::chrono::microseconds{0}; + +/** An Action is a function to call at a particular (simulated) timestamp. */ +using Action = std::pair<std::chrono::microseconds, std::function<void()>>; + +/** Object that stores actions from multiple interleaved scenarios, and data shared across them. + * + * The Scenario below is used to fill this. + */ +struct Runner +{ + /** The TxRequestTracker being tested. */ + TxRequestTracker txrequest; + + /** List of actions to be executed (in order of increasing timestamp). */ + std::vector<Action> actions; + + /** Which node ids have been assigned already (to prevent reuse). */ + std::set<NodeId> peerset; + + /** Which txhashes have been assigned already (to prevent reuse). */ + std::set<uint256> txhashset; + + /** Which (peer, gtxid) combinations are known to be expired. These need to be accumulated here instead of + * checked directly in the GetRequestable return value to avoid introducing a dependency between the various + * parallel tests. */ + std::multiset<std::pair<NodeId, GenTxid>> expired; +}; + +std::chrono::microseconds RandomTime8s() { return std::chrono::microseconds{1 + InsecureRandBits(23)}; } +std::chrono::microseconds RandomTime1y() { return std::chrono::microseconds{1 + InsecureRandBits(45)}; } + +/** A proxy for a Runner that helps build a sequence of consecutive test actions on a TxRequestTracker. + * + * Each Scenario is a proxy through which actions for the (sequential) execution of various tests are added to a + * Runner. The actions from multiple scenarios are then run concurrently, resulting in these tests being performed + * against a TxRequestTracker in parallel. Every test has its own unique txhashes and NodeIds which are not + * reused in other tests, and thus they should be independent from each other. Running them in parallel however + * means that we verify the behavior (w.r.t. one test's txhashes and NodeIds) even when the state of the data + * structure is more complicated due to the presence of other tests. + */ +class Scenario +{ + Runner& m_runner; + std::chrono::microseconds m_now; + std::string m_testname; + +public: + Scenario(Runner& runner, std::chrono::microseconds starttime) : m_runner(runner), m_now(starttime) {} + + /** Set a name for the current test, to give more clear error messages. */ + void SetTestName(std::string testname) + { + m_testname = std::move(testname); + } + + /** Advance this Scenario's time; this affects the timestamps newly scheduled events get. */ + void AdvanceTime(std::chrono::microseconds amount) + { + assert(amount.count() >= 0); + m_now += amount; + } + + /** Schedule a ForgetTxHash call at the Scheduler's current time. */ + void ForgetTxHash(const uint256& txhash) + { + auto& runner = m_runner; + runner.actions.emplace_back(m_now, [=,&runner]() { + runner.txrequest.ForgetTxHash(txhash); + runner.txrequest.SanityCheck(); + }); + } + + /** Schedule a ReceivedInv call at the Scheduler's current time. */ + void ReceivedInv(NodeId peer, const GenTxid& gtxid, bool pref, std::chrono::microseconds reqtime) + { + auto& runner = m_runner; + runner.actions.emplace_back(m_now, [=,&runner]() { + runner.txrequest.ReceivedInv(peer, gtxid, pref, reqtime); + runner.txrequest.SanityCheck(); + }); + } + + /** Schedule a DisconnectedPeer call at the Scheduler's current time. */ + void DisconnectedPeer(NodeId peer) + { + auto& runner = m_runner; + runner.actions.emplace_back(m_now, [=,&runner]() { + runner.txrequest.DisconnectedPeer(peer); + runner.txrequest.SanityCheck(); + }); + } + + /** Schedule a RequestedTx call at the Scheduler's current time. */ + void RequestedTx(NodeId peer, const uint256& txhash, std::chrono::microseconds exptime) + { + auto& runner = m_runner; + runner.actions.emplace_back(m_now, [=,&runner]() { + runner.txrequest.RequestedTx(peer, txhash, exptime); + runner.txrequest.SanityCheck(); + }); + } + + /** Schedule a ReceivedResponse call at the Scheduler's current time. */ + void ReceivedResponse(NodeId peer, const uint256& txhash) + { + auto& runner = m_runner; + runner.actions.emplace_back(m_now, [=,&runner]() { + runner.txrequest.ReceivedResponse(peer, txhash); + runner.txrequest.SanityCheck(); + }); + } + + /** Schedule calls to verify the TxRequestTracker's state at the Scheduler's current time. + * + * @param peer The peer whose state will be inspected. + * @param expected The expected return value for GetRequestable(peer) + * @param candidates The expected return value CountCandidates(peer) + * @param inflight The expected return value CountInFlight(peer) + * @param completed The expected return value of Count(peer), minus candidates and inflight. + * @param checkname An arbitrary string to include in error messages, for test identificatrion. + * @param offset Offset with the current time to use (must be <= 0). This allows simulations of time going + * backwards (but note that the ordering of this event only follows the scenario's m_now. + */ + void Check(NodeId peer, const std::vector<GenTxid>& expected, size_t candidates, size_t inflight, + size_t completed, const std::string& checkname, + std::chrono::microseconds offset = std::chrono::microseconds{0}) + { + const auto comment = m_testname + " " + checkname; + auto& runner = m_runner; + const auto now = m_now; + assert(offset.count() <= 0); + runner.actions.emplace_back(m_now, [=,&runner]() { + std::vector<std::pair<NodeId, GenTxid>> expired_now; + auto ret = runner.txrequest.GetRequestable(peer, now + offset, &expired_now); + for (const auto& entry : expired_now) runner.expired.insert(entry); + runner.txrequest.SanityCheck(); + runner.txrequest.PostGetRequestableSanityCheck(now + offset); + size_t total = candidates + inflight + completed; + size_t real_total = runner.txrequest.Count(peer); + size_t real_candidates = runner.txrequest.CountCandidates(peer); + size_t real_inflight = runner.txrequest.CountInFlight(peer); + BOOST_CHECK_MESSAGE(real_total == total, strprintf("[" + comment + "] total %i (%i expected)", real_total, total)); + BOOST_CHECK_MESSAGE(real_inflight == inflight, strprintf("[" + comment + "] inflight %i (%i expected)", real_inflight, inflight)); + BOOST_CHECK_MESSAGE(real_candidates == candidates, strprintf("[" + comment + "] candidates %i (%i expected)", real_candidates, candidates)); + BOOST_CHECK_MESSAGE(ret == expected, "[" + comment + "] mismatching requestables"); + }); + } + + /** Verify that an announcement for gtxid by peer has expired some time before this check is scheduled. + * + * Every expected expiration should be accounted for through exactly one call to this function. + */ + void CheckExpired(NodeId peer, GenTxid gtxid) + { + const auto& testname = m_testname; + auto& runner = m_runner; + runner.actions.emplace_back(m_now, [=,&runner]() { + auto it = runner.expired.find(std::pair<NodeId, GenTxid>{peer, gtxid}); + BOOST_CHECK_MESSAGE(it != runner.expired.end(), "[" + testname + "] missing expiration"); + if (it != runner.expired.end()) runner.expired.erase(it); + }); + } + + /** Generate a random txhash, whose priorities for certain peers are constrained. + * + * For example, NewTxHash({{p1,p2,p3},{p2,p4,p5}}) will generate a txhash T such that both: + * - priority(p1,T) > priority(p2,T) > priority(p3,T) + * - priority(p2,T) > priority(p4,T) > priority(p5,T) + * where priority is the predicted internal TxRequestTracker's priority, assuming all announcements + * are within the same preferredness class. + */ + uint256 NewTxHash(const std::vector<std::vector<NodeId>>& orders = {}) + { + uint256 ret; + bool ok; + do { + ret = InsecureRand256(); + ok = true; + for (const auto& order : orders) { + for (size_t pos = 1; pos < order.size(); ++pos) { + uint64_t prio_prev = m_runner.txrequest.ComputePriority(ret, order[pos - 1], true); + uint64_t prio_cur = m_runner.txrequest.ComputePriority(ret, order[pos], true); + if (prio_prev <= prio_cur) { + ok = false; + break; + } + } + if (!ok) break; + } + if (ok) { + ok = m_runner.txhashset.insert(ret).second; + } + } while(!ok); + return ret; + } + + /** Generate a random GenTxid; the txhash follows NewTxHash; the is_wtxid flag is random. */ + GenTxid NewGTxid(const std::vector<std::vector<NodeId>>& orders = {}) + { + return {InsecureRandBool(), NewTxHash(orders)}; + } + + /** Generate a new random NodeId to use as peer. The same NodeId is never returned twice + * (across all Scenarios combined). */ + NodeId NewPeer() + { + bool ok; + NodeId ret; + do { + ret = InsecureRandBits(63); + ok = m_runner.peerset.insert(ret).second; + } while(!ok); + return ret; + } + + std::chrono::microseconds Now() const { return m_now; } +}; + +/** Add to scenario a test with a single tx announced by a single peer. + * + * config is an integer in [0, 32), which controls which variant of the test is used. + */ +void BuildSingleTest(Scenario& scenario, int config) +{ + auto peer = scenario.NewPeer(); + auto gtxid = scenario.NewGTxid(); + bool immediate = config & 1; + bool preferred = config & 2; + auto delay = immediate ? NO_TIME : RandomTime8s(); + + scenario.SetTestName(strprintf("Single(config=%i)", config)); + + // Receive an announcement, either immediately requestable or delayed. + scenario.ReceivedInv(peer, gtxid, preferred, immediate ? MIN_TIME : scenario.Now() + delay); + if (immediate) { + scenario.Check(peer, {gtxid}, 1, 0, 0, "s1"); + } else { + scenario.Check(peer, {}, 1, 0, 0, "s2"); + scenario.AdvanceTime(delay - MICROSECOND); + scenario.Check(peer, {}, 1, 0, 0, "s3"); + scenario.AdvanceTime(MICROSECOND); + scenario.Check(peer, {gtxid}, 1, 0, 0, "s4"); + } + + if (config >> 3) { // We'll request the transaction + scenario.AdvanceTime(RandomTime8s()); + auto expiry = RandomTime8s(); + scenario.Check(peer, {gtxid}, 1, 0, 0, "s5"); + scenario.RequestedTx(peer, gtxid.GetHash(), scenario.Now() + expiry); + scenario.Check(peer, {}, 0, 1, 0, "s6"); + + if ((config >> 3) == 1) { // The request will time out + scenario.AdvanceTime(expiry - MICROSECOND); + scenario.Check(peer, {}, 0, 1, 0, "s7"); + scenario.AdvanceTime(MICROSECOND); + scenario.Check(peer, {}, 0, 0, 0, "s8"); + scenario.CheckExpired(peer, gtxid); + return; + } else { + scenario.AdvanceTime(std::chrono::microseconds{InsecureRandRange(expiry.count())}); + scenario.Check(peer, {}, 0, 1, 0, "s9"); + if ((config >> 3) == 3) { // A response will arrive for the transaction + scenario.ReceivedResponse(peer, gtxid.GetHash()); + scenario.Check(peer, {}, 0, 0, 0, "s10"); + return; + } + } + } + + if (config & 4) { // The peer will go offline + scenario.DisconnectedPeer(peer); + } else { // The transaction is no longer needed + scenario.ForgetTxHash(gtxid.GetHash()); + } + scenario.Check(peer, {}, 0, 0, 0, "s11"); +} + +/** Add to scenario a test with a single tx announced by two peers, to verify the + * right peer is selected for requests. + * + * config is an integer in [0, 32), which controls which variant of the test is used. + */ +void BuildPriorityTest(Scenario& scenario, int config) +{ + scenario.SetTestName(strprintf("Priority(config=%i)", config)); + + // Two peers. They will announce in order {peer1, peer2}. + auto peer1 = scenario.NewPeer(), peer2 = scenario.NewPeer(); + // Construct a transaction that under random rules would be preferred by peer2 or peer1, + // depending on configuration. + bool prio1 = config & 1; + auto gtxid = prio1 ? scenario.NewGTxid({{peer1, peer2}}) : scenario.NewGTxid({{peer2, peer1}}); + bool pref1 = config & 2, pref2 = config & 4; + + scenario.ReceivedInv(peer1, gtxid, pref1, MIN_TIME); + scenario.Check(peer1, {gtxid}, 1, 0, 0, "p1"); + if (InsecureRandBool()) { + scenario.AdvanceTime(RandomTime8s()); + scenario.Check(peer1, {gtxid}, 1, 0, 0, "p2"); + } + + scenario.ReceivedInv(peer2, gtxid, pref2, MIN_TIME); + bool stage2_prio = + // At this point, peer2 will be given priority if: + // - It is preferred and peer1 is not + (pref2 && !pref1) || + // - They're in the same preference class, + // and the randomized priority favors peer2 over peer1. + (pref1 == pref2 && !prio1); + NodeId priopeer = stage2_prio ? peer2 : peer1, otherpeer = stage2_prio ? peer1 : peer2; + scenario.Check(otherpeer, {}, 1, 0, 0, "p3"); + scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p4"); + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.Check(otherpeer, {}, 1, 0, 0, "p5"); + scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p6"); + + // We possibly request from the selected peer. + if (config & 8) { + scenario.RequestedTx(priopeer, gtxid.GetHash(), MAX_TIME); + scenario.Check(priopeer, {}, 0, 1, 0, "p7"); + scenario.Check(otherpeer, {}, 1, 0, 0, "p8"); + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + } + + // The peer which was selected (or requested from) now goes offline, or a NOTFOUND is received from them. + if (config & 16) { + scenario.DisconnectedPeer(priopeer); + } else { + scenario.ReceivedResponse(priopeer, gtxid.GetHash()); + } + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.Check(priopeer, {}, 0, 0, !(config & 16), "p8"); + scenario.Check(otherpeer, {gtxid}, 1, 0, 0, "p9"); + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + + // Now the other peer goes offline. + scenario.DisconnectedPeer(otherpeer); + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.Check(peer1, {}, 0, 0, 0, "p10"); + scenario.Check(peer2, {}, 0, 0, 0, "p11"); +} + +/** Add to scenario a randomized test in which N peers announce the same transaction, to verify + * the order in which they are requested. */ +void BuildBigPriorityTest(Scenario& scenario, int peers) +{ + scenario.SetTestName(strprintf("BigPriority(peers=%i)", peers)); + + // We will have N peers announce the same transaction. + std::map<NodeId, bool> preferred; + std::vector<NodeId> pref_peers, npref_peers; + int num_pref = InsecureRandRange(peers + 1) ; // Some preferred, ... + int num_npref = peers - num_pref; // some not preferred. + for (int i = 0; i < num_pref; ++i) { + pref_peers.push_back(scenario.NewPeer()); + preferred[pref_peers.back()] = true; + } + for (int i = 0; i < num_npref; ++i) { + npref_peers.push_back(scenario.NewPeer()); + preferred[npref_peers.back()] = false; + } + // Make a list of all peers, in order of intended request order (concatenation of pref_peers and npref_peers). + std::vector<NodeId> request_order; + for (int i = 0; i < num_pref; ++i) request_order.push_back(pref_peers[i]); + for (int i = 0; i < num_npref; ++i) request_order.push_back(npref_peers[i]); + + // Determine the announcement order randomly. + std::vector<NodeId> announce_order = request_order; + Shuffle(announce_order.begin(), announce_order.end(), g_insecure_rand_ctx); + + // Find a gtxid whose txhash prioritization is consistent with the required ordering within pref_peers and + // within npref_peers. + auto gtxid = scenario.NewGTxid({pref_peers, npref_peers}); + + // Decide reqtimes in opposite order of the expected request order. This means that as time passes we expect the + // to-be-requested-from-peer will change every time a subsequent reqtime is passed. + std::map<NodeId, std::chrono::microseconds> reqtimes; + auto reqtime = scenario.Now(); + for (int i = peers - 1; i >= 0; --i) { + reqtime += RandomTime8s(); + reqtimes[request_order[i]] = reqtime; + } + + // Actually announce from all peers simultaneously (but in announce_order). + for (const auto peer : announce_order) { + scenario.ReceivedInv(peer, gtxid, preferred[peer], reqtimes[peer]); + } + for (const auto peer : announce_order) { + scenario.Check(peer, {}, 1, 0, 0, "b1"); + } + + // Let time pass and observe the to-be-requested-from peer change, from nonpreferred to preferred, and from + // high priority to low priority within each class. + for (int i = peers - 1; i >= 0; --i) { + scenario.AdvanceTime(reqtimes[request_order[i]] - scenario.Now() - MICROSECOND); + scenario.Check(request_order[i], {}, 1, 0, 0, "b2"); + scenario.AdvanceTime(MICROSECOND); + scenario.Check(request_order[i], {gtxid}, 1, 0, 0, "b3"); + } + + // Peers now in random order go offline, or send NOTFOUNDs. At every point in time the new to-be-requested-from + // peer should be the best remaining one, so verify this after every response. + for (int i = 0; i < peers; ++i) { + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + const int pos = InsecureRandRange(request_order.size()); + const auto peer = request_order[pos]; + request_order.erase(request_order.begin() + pos); + if (InsecureRandBool()) { + scenario.DisconnectedPeer(peer); + scenario.Check(peer, {}, 0, 0, 0, "b4"); + } else { + scenario.ReceivedResponse(peer, gtxid.GetHash()); + scenario.Check(peer, {}, 0, 0, request_order.size() > 0, "b5"); + } + if (request_order.size()) { + scenario.Check(request_order[0], {gtxid}, 1, 0, 0, "b6"); + } + } + + // Everything is gone in the end. + for (const auto peer : announce_order) { + scenario.Check(peer, {}, 0, 0, 0, "b7"); + } +} + +/** Add to scenario a test with one peer announcing two transactions, to verify they are + * fetched in announcement order. + * + * config is an integer in [0, 4) inclusive, and selects the variant of the test. + */ +void BuildRequestOrderTest(Scenario& scenario, int config) +{ + scenario.SetTestName(strprintf("RequestOrder(config=%i)", config)); + + auto peer = scenario.NewPeer(); + auto gtxid1 = scenario.NewGTxid(); + auto gtxid2 = scenario.NewGTxid(); + + auto reqtime2 = scenario.Now() + RandomTime8s(); + auto reqtime1 = reqtime2 + RandomTime8s(); + + scenario.ReceivedInv(peer, gtxid1, config & 1, reqtime1); + // Simulate time going backwards by giving the second announcement an earlier reqtime. + scenario.ReceivedInv(peer, gtxid2, config & 2, reqtime2); + + scenario.AdvanceTime(reqtime2 - MICROSECOND - scenario.Now()); + scenario.Check(peer, {}, 2, 0, 0, "o1"); + scenario.AdvanceTime(MICROSECOND); + scenario.Check(peer, {gtxid2}, 2, 0, 0, "o2"); + scenario.AdvanceTime(reqtime1 - MICROSECOND - scenario.Now()); + scenario.Check(peer, {gtxid2}, 2, 0, 0, "o3"); + scenario.AdvanceTime(MICROSECOND); + // Even with time going backwards in between announcements, the return value of GetRequestable is in + // announcement order. + scenario.Check(peer, {gtxid1, gtxid2}, 2, 0, 0, "o4"); + + scenario.DisconnectedPeer(peer); + scenario.Check(peer, {}, 0, 0, 0, "o5"); +} + +/** Add to scenario a test that verifies behavior related to both txid and wtxid with the same + * hash being announced. + * + * config is an integer in [0, 4) inclusive, and selects the variant of the test used. +*/ +void BuildWtxidTest(Scenario& scenario, int config) +{ + scenario.SetTestName(strprintf("Wtxid(config=%i)", config)); + + auto peerT = scenario.NewPeer(); + auto peerW = scenario.NewPeer(); + auto txhash = scenario.NewTxHash(); + GenTxid txid{false, txhash}; + GenTxid wtxid{true, txhash}; + + auto reqtimeT = InsecureRandBool() ? MIN_TIME : scenario.Now() + RandomTime8s(); + auto reqtimeW = InsecureRandBool() ? MIN_TIME : scenario.Now() + RandomTime8s(); + + // Announce txid first or wtxid first. + if (config & 1) { + scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT); + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW); + } else { + scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW); + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT); + } + + // Let time pass if needed, and check that the preferred announcement (txid or wtxid) + // is correctly to-be-requested (and with the correct wtxidness). + auto max_reqtime = std::max(reqtimeT, reqtimeW); + if (max_reqtime > scenario.Now()) scenario.AdvanceTime(max_reqtime - scenario.Now()); + if (config & 2) { + scenario.Check(peerT, {txid}, 1, 0, 0, "w1"); + scenario.Check(peerW, {}, 1, 0, 0, "w2"); + } else { + scenario.Check(peerT, {}, 1, 0, 0, "w3"); + scenario.Check(peerW, {wtxid}, 1, 0, 0, "w4"); + } + + // Let the preferred announcement be requested. It's not going to be delivered. + auto expiry = RandomTime8s(); + if (config & 2) { + scenario.RequestedTx(peerT, txid.GetHash(), scenario.Now() + expiry); + scenario.Check(peerT, {}, 0, 1, 0, "w5"); + scenario.Check(peerW, {}, 1, 0, 0, "w6"); + } else { + scenario.RequestedTx(peerW, wtxid.GetHash(), scenario.Now() + expiry); + scenario.Check(peerT, {}, 1, 0, 0, "w7"); + scenario.Check(peerW, {}, 0, 1, 0, "w8"); + } + + // After reaching expiration time of the preferred announcement, verify that the + // remaining one is requestable + scenario.AdvanceTime(expiry); + if (config & 2) { + scenario.Check(peerT, {}, 0, 0, 1, "w9"); + scenario.Check(peerW, {wtxid}, 1, 0, 0, "w10"); + scenario.CheckExpired(peerT, txid); + } else { + scenario.Check(peerT, {txid}, 1, 0, 0, "w11"); + scenario.Check(peerW, {}, 0, 0, 1, "w12"); + scenario.CheckExpired(peerW, wtxid); + } + + // If a good transaction with either that hash as wtxid or txid arrives, both + // announcements are gone. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.ForgetTxHash(txhash); + scenario.Check(peerT, {}, 0, 0, 0, "w13"); + scenario.Check(peerW, {}, 0, 0, 0, "w14"); +} + +/** Add to scenario a test that exercises clocks that go backwards. */ +void BuildTimeBackwardsTest(Scenario& scenario) +{ + auto peer1 = scenario.NewPeer(); + auto peer2 = scenario.NewPeer(); + auto gtxid = scenario.NewGTxid({{peer1, peer2}}); + + // Announce from peer2. + auto reqtime = scenario.Now() + RandomTime8s(); + scenario.ReceivedInv(peer2, gtxid, true, reqtime); + scenario.Check(peer2, {}, 1, 0, 0, "r1"); + scenario.AdvanceTime(reqtime - scenario.Now()); + scenario.Check(peer2, {gtxid}, 1, 0, 0, "r2"); + // Check that if the clock goes backwards by 1us, the transaction would stop being requested. + scenario.Check(peer2, {}, 1, 0, 0, "r3", -MICROSECOND); + // But it reverts to being requested if time goes forward again. + scenario.Check(peer2, {gtxid}, 1, 0, 0, "r4"); + + // Announce from peer1. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.ReceivedInv(peer1, gtxid, true, MAX_TIME); + scenario.Check(peer2, {gtxid}, 1, 0, 0, "r5"); + scenario.Check(peer1, {}, 1, 0, 0, "r6"); + + // Request from peer1. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + auto expiry = scenario.Now() + RandomTime8s(); + scenario.RequestedTx(peer1, gtxid.GetHash(), expiry); + scenario.Check(peer1, {}, 0, 1, 0, "r7"); + scenario.Check(peer2, {}, 1, 0, 0, "r8"); + + // Expiration passes. + scenario.AdvanceTime(expiry - scenario.Now()); + scenario.Check(peer1, {}, 0, 0, 1, "r9"); + scenario.Check(peer2, {gtxid}, 1, 0, 0, "r10"); // Request goes back to peer2. + scenario.CheckExpired(peer1, gtxid); + scenario.Check(peer1, {}, 0, 0, 1, "r11", -MICROSECOND); // Going back does not unexpire. + scenario.Check(peer2, {gtxid}, 1, 0, 0, "r12", -MICROSECOND); + + // Peer2 goes offline, meaning no viable announcements remain. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.DisconnectedPeer(peer2); + scenario.Check(peer1, {}, 0, 0, 0, "r13"); + scenario.Check(peer2, {}, 0, 0, 0, "r14"); +} + +/** Add to scenario a test that involves RequestedTx() calls for txhashes not returned by GetRequestable. */ +void BuildWeirdRequestsTest(Scenario& scenario) +{ + auto peer1 = scenario.NewPeer(); + auto peer2 = scenario.NewPeer(); + auto gtxid1 = scenario.NewGTxid({{peer1, peer2}}); + auto gtxid2 = scenario.NewGTxid({{peer2, peer1}}); + + // Announce gtxid1 by peer1. + scenario.ReceivedInv(peer1, gtxid1, true, MIN_TIME); + scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q1"); + + // Announce gtxid2 by peer2. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.ReceivedInv(peer2, gtxid2, true, MIN_TIME); + scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q2"); + scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q3"); + + // We request gtxid2 from *peer1* - no effect. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.RequestedTx(peer1, gtxid2.GetHash(), MAX_TIME); + scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q4"); + scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q5"); + + // Now request gtxid1 from peer1 - marks it as REQUESTED. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + auto expiryA = scenario.Now() + RandomTime8s(); + scenario.RequestedTx(peer1, gtxid1.GetHash(), expiryA); + scenario.Check(peer1, {}, 0, 1, 0, "q6"); + scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q7"); + + // Request it a second time - nothing happens, as it's already REQUESTED. + auto expiryB = expiryA + RandomTime8s(); + scenario.RequestedTx(peer1, gtxid1.GetHash(), expiryB); + scenario.Check(peer1, {}, 0, 1, 0, "q8"); + scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q9"); + + // Also announce gtxid1 from peer2 now, so that the txhash isn't forgotten when the peer1 request expires. + scenario.ReceivedInv(peer2, gtxid1, true, MIN_TIME); + scenario.Check(peer1, {}, 0, 1, 0, "q10"); + scenario.Check(peer2, {gtxid2}, 2, 0, 0, "q11"); + + // When reaching expiryA, it expires (not expiryB, which is later). + scenario.AdvanceTime(expiryA - scenario.Now()); + scenario.Check(peer1, {}, 0, 0, 1, "q12"); + scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q13"); + scenario.CheckExpired(peer1, gtxid1); + + // Requesting it yet again from peer1 doesn't do anything, as it's already COMPLETED. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.RequestedTx(peer1, gtxid1.GetHash(), MAX_TIME); + scenario.Check(peer1, {}, 0, 0, 1, "q14"); + scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q15"); + + // Now announce gtxid2 from peer1. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.ReceivedInv(peer1, gtxid2, true, MIN_TIME); + scenario.Check(peer1, {}, 1, 0, 1, "q16"); + scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q17"); + + // And request it from peer1 (weird as peer2 has the preference). + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.RequestedTx(peer1, gtxid2.GetHash(), MAX_TIME); + scenario.Check(peer1, {}, 0, 1, 1, "q18"); + scenario.Check(peer2, {gtxid1}, 2, 0, 0, "q19"); + + // If peer2 now (normally) requests gtxid2, the existing request by peer1 becomes COMPLETED. + if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s()); + scenario.RequestedTx(peer2, gtxid2.GetHash(), MAX_TIME); + scenario.Check(peer1, {}, 0, 0, 2, "q20"); + scenario.Check(peer2, {gtxid1}, 1, 1, 0, "q21"); + + // If peer2 goes offline, no viable announcements remain. + scenario.DisconnectedPeer(peer2); + scenario.Check(peer1, {}, 0, 0, 0, "q22"); + scenario.Check(peer2, {}, 0, 0, 0, "q23"); +} + +void TestInterleavedScenarios() +{ + // Create a list of functions which add tests to scenarios. + std::vector<std::function<void(Scenario&)>> builders; + // Add instances of every test, for every configuration. + for (int n = 0; n < 64; ++n) { + builders.emplace_back([n](Scenario& scenario){ BuildWtxidTest(scenario, n); }); + builders.emplace_back([n](Scenario& scenario){ BuildRequestOrderTest(scenario, n & 3); }); + builders.emplace_back([n](Scenario& scenario){ BuildSingleTest(scenario, n & 31); }); + builders.emplace_back([n](Scenario& scenario){ BuildPriorityTest(scenario, n & 31); }); + builders.emplace_back([n](Scenario& scenario){ BuildBigPriorityTest(scenario, (n & 7) + 1); }); + builders.emplace_back([](Scenario& scenario){ BuildTimeBackwardsTest(scenario); }); + builders.emplace_back([](Scenario& scenario){ BuildWeirdRequestsTest(scenario); }); + } + // Randomly shuffle all those functions. + Shuffle(builders.begin(), builders.end(), g_insecure_rand_ctx); + + Runner runner; + auto starttime = RandomTime1y(); + // Construct many scenarios, and run (up to) 10 randomly-chosen tests consecutively in each. + while (builders.size()) { + // Introduce some variation in the start time of each scenario, so they don't all start off + // concurrently, but get a more random interleaving. + auto scenario_start = starttime + RandomTime8s() + RandomTime8s() + RandomTime8s(); + Scenario scenario(runner, scenario_start); + for (int j = 0; builders.size() && j < 10; ++j) { + builders.back()(scenario); + builders.pop_back(); + } + } + // Sort all the actions from all those scenarios chronologically, resulting in the actions from + // distinct scenarios to become interleaved. Use stable_sort so that actions from one scenario + // aren't reordered w.r.t. each other. + std::stable_sort(runner.actions.begin(), runner.actions.end(), [](const Action& a1, const Action& a2) { + return a1.first < a2.first; + }); + + // Run all actions from all scenarios, in order. + for (auto& action : runner.actions) { + action.second(); + } + + BOOST_CHECK_EQUAL(runner.txrequest.Size(), 0U); + BOOST_CHECK(runner.expired.empty()); +} + +} // namespace + +BOOST_AUTO_TEST_CASE(TxRequestTest) +{ + for (int i = 0; i < 5; ++i) { + TestInterleavedScenarios(); + } +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/txrequest.cpp b/src/txrequest.cpp new file mode 100644 index 0000000000..09eb78e927 --- /dev/null +++ b/src/txrequest.cpp @@ -0,0 +1,756 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <txrequest.h> + +#include <crypto/siphash.h> +#include <net.h> +#include <primitives/transaction.h> +#include <random.h> +#include <uint256.h> +#include <util/memory.h> + +#include <boost/multi_index_container.hpp> +#include <boost/multi_index/ordered_index.hpp> + +#include <chrono> +#include <unordered_map> +#include <utility> + +#include <assert.h> + +namespace { + +/** The various states a (txhash,peer) pair can be in. + * + * Note that CANDIDATE is split up into 3 substates (DELAYED, BEST, READY), allowing more efficient implementation. + * Also note that the sorting order of ByTxHashView relies on the specific order of values in this enum. + * + * Expected behaviour is: + * - When first announced by a peer, the state is CANDIDATE_DELAYED until reqtime is reached. + * - Announcements that have reached their reqtime but not been requested will be either CANDIDATE_READY or + * CANDIDATE_BEST. Neither of those has an expiration time; they remain in that state until they're requested or + * no longer needed. CANDIDATE_READY announcements are promoted to CANDIDATE_BEST when they're the best one left. + * - When requested, an announcement will be in state REQUESTED until expiry is reached. + * - If expiry is reached, or the peer replies to the request (either with NOTFOUND or the tx), the state becomes + * COMPLETED. + */ +enum class State : uint8_t { + /** A CANDIDATE announcement whose reqtime is in the future. */ + CANDIDATE_DELAYED, + /** A CANDIDATE announcement that's not CANDIDATE_DELAYED or CANDIDATE_BEST. */ + CANDIDATE_READY, + /** The best CANDIDATE for a given txhash; only if there is no REQUESTED announcement already for that txhash. + * The CANDIDATE_BEST is the highest-priority announcement among all CANDIDATE_READY (and _BEST) ones for that + * txhash. */ + CANDIDATE_BEST, + /** A REQUESTED announcement. */ + REQUESTED, + /** A COMPLETED announcement. */ + COMPLETED, +}; + +//! Type alias for sequence numbers. +using SequenceNumber = uint64_t; + +/** An announcement. This is the data we track for each txid or wtxid that is announced to us by each peer. */ +struct Announcement { + /** Txid or wtxid that was announced. */ + const uint256 m_txhash; + /** For CANDIDATE_{DELAYED,BEST,READY} the reqtime; for REQUESTED the expiry. */ + std::chrono::microseconds m_time; + /** What peer the request was from. */ + const NodeId m_peer; + /** What sequence number this announcement has. */ + const SequenceNumber m_sequence : 59; + /** Whether the request is preferred. */ + const bool m_preferred : 1; + /** Whether this is a wtxid request. */ + const bool m_is_wtxid : 1; + + /** What state this announcement is in. + * This is a uint8_t instead of a State to silence a GCC warning in versions prior to 8.4 and 9.3. + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414 */ + uint8_t m_state : 3; + + /** Convert m_state to a State enum. */ + State GetState() const { return static_cast<State>(m_state); } + + /** Convert a State enum to a uint8_t and store it in m_state. */ + void SetState(State state) { m_state = static_cast<uint8_t>(state); } + + /** Whether this announcement is selected. There can be at most 1 selected peer per txhash. */ + bool IsSelected() const + { + return GetState() == State::CANDIDATE_BEST || GetState() == State::REQUESTED; + } + + /** Whether this announcement is waiting for a certain time to pass. */ + bool IsWaiting() const + { + return GetState() == State::REQUESTED || GetState() == State::CANDIDATE_DELAYED; + } + + /** Whether this announcement can feasibly be selected if the current IsSelected() one disappears. */ + bool IsSelectable() const + { + return GetState() == State::CANDIDATE_READY || GetState() == State::CANDIDATE_BEST; + } + + /** Construct a new announcement from scratch, initially in CANDIDATE_DELAYED state. */ + Announcement(const GenTxid& gtxid, NodeId peer, bool preferred, std::chrono::microseconds reqtime, + SequenceNumber sequence) : + m_txhash(gtxid.GetHash()), m_time(reqtime), m_peer(peer), m_sequence(sequence), m_preferred(preferred), + m_is_wtxid(gtxid.IsWtxid()), m_state(static_cast<uint8_t>(State::CANDIDATE_DELAYED)) {} +}; + +//! Type alias for priorities. +using Priority = uint64_t; + +/** A functor with embedded salt that computes priority of an announcement. + * + * Higher priorities are selected first. + */ +class PriorityComputer { + const uint64_t m_k0, m_k1; +public: + explicit PriorityComputer(bool deterministic) : + m_k0{deterministic ? 0 : GetRand(0xFFFFFFFFFFFFFFFF)}, + m_k1{deterministic ? 0 : GetRand(0xFFFFFFFFFFFFFFFF)} {} + + Priority operator()(const uint256& txhash, NodeId peer, bool preferred) const + { + uint64_t low_bits = CSipHasher(m_k0, m_k1).Write(txhash.begin(), txhash.size()).Write(peer).Finalize() >> 1; + return low_bits | uint64_t{preferred} << 63; + } + + Priority operator()(const Announcement& ann) const + { + return operator()(ann.m_txhash, ann.m_peer, ann.m_preferred); + } +}; + +// Definitions for the 3 indexes used in the main data structure. +// +// Each index has a By* type to identify it, a By*View data type to represent the view of announcement it is sorted +// by, and an By*ViewExtractor type to convert an announcement into the By*View type. +// See https://www.boost.org/doc/libs/1_58_0/libs/multi_index/doc/reference/key_extraction.html#key_extractors +// for more information about the key extraction concept. + +// The ByPeer index is sorted by (peer, state == CANDIDATE_BEST, txhash) +// +// Uses: +// * Looking up existing announcements by peer/txhash, by checking both (peer, false, txhash) and +// (peer, true, txhash). +// * Finding all CANDIDATE_BEST announcements for a given peer in GetRequestable. +struct ByPeer {}; +using ByPeerView = std::tuple<NodeId, bool, const uint256&>; +struct ByPeerViewExtractor +{ + using result_type = ByPeerView; + result_type operator()(const Announcement& ann) const + { + return ByPeerView{ann.m_peer, ann.GetState() == State::CANDIDATE_BEST, ann.m_txhash}; + } +}; + +// The ByTxHash index is sorted by (txhash, state, priority). +// +// Note: priority == 0 whenever state != CANDIDATE_READY. +// +// Uses: +// * Deleting all announcements with a given txhash in ForgetTxHash. +// * Finding the best CANDIDATE_READY to convert to CANDIDATE_BEST, when no other CANDIDATE_READY or REQUESTED +// announcement exists for that txhash. +// * Determining when no more non-COMPLETED announcements for a given txhash exist, so the COMPLETED ones can be +// deleted. +struct ByTxHash {}; +using ByTxHashView = std::tuple<const uint256&, State, Priority>; +class ByTxHashViewExtractor { + const PriorityComputer& m_computer; +public: + ByTxHashViewExtractor(const PriorityComputer& computer) : m_computer(computer) {} + using result_type = ByTxHashView; + result_type operator()(const Announcement& ann) const + { + const Priority prio = (ann.GetState() == State::CANDIDATE_READY) ? m_computer(ann) : 0; + return ByTxHashView{ann.m_txhash, ann.GetState(), prio}; + } +}; + +enum class WaitState { + //! Used for announcements that need efficient testing of "is their timestamp in the future?". + FUTURE_EVENT, + //! Used for announcements whose timestamp is not relevant. + NO_EVENT, + //! Used for announcements that need efficient testing of "is their timestamp in the past?". + PAST_EVENT, +}; + +WaitState GetWaitState(const Announcement& ann) +{ + if (ann.IsWaiting()) return WaitState::FUTURE_EVENT; + if (ann.IsSelectable()) return WaitState::PAST_EVENT; + return WaitState::NO_EVENT; +} + +// The ByTime index is sorted by (wait_state, time). +// +// All announcements with a timestamp in the future can be found by iterating the index forward from the beginning. +// All announcements with a timestamp in the past can be found by iterating the index backwards from the end. +// +// Uses: +// * Finding CANDIDATE_DELAYED announcements whose reqtime has passed, and REQUESTED announcements whose expiry has +// passed. +// * Finding CANDIDATE_READY/BEST announcements whose reqtime is in the future (when the clock time went backwards). +struct ByTime {}; +using ByTimeView = std::pair<WaitState, std::chrono::microseconds>; +struct ByTimeViewExtractor +{ + using result_type = ByTimeView; + result_type operator()(const Announcement& ann) const + { + return ByTimeView{GetWaitState(ann), ann.m_time}; + } +}; + +/** Data type for the main data structure (Announcement objects with ByPeer/ByTxHash/ByTime indexes). */ +using Index = boost::multi_index_container< + Announcement, + boost::multi_index::indexed_by< + boost::multi_index::ordered_unique<boost::multi_index::tag<ByPeer>, ByPeerViewExtractor>, + boost::multi_index::ordered_non_unique<boost::multi_index::tag<ByTxHash>, ByTxHashViewExtractor>, + boost::multi_index::ordered_non_unique<boost::multi_index::tag<ByTime>, ByTimeViewExtractor> + > +>; + +/** Helper type to simplify syntax of iterator types. */ +template<typename Tag> +using Iter = typename Index::index<Tag>::type::iterator; + +/** Per-peer statistics object. */ +struct PeerInfo { + size_t m_total = 0; //!< Total number of announcements for this peer. + size_t m_completed = 0; //!< Number of COMPLETED announcements for this peer. + size_t m_requested = 0; //!< Number of REQUESTED announcements for this peer. +}; + +/** Per-txhash statistics object. Only used for sanity checking. */ +struct TxHashInfo +{ + //! Number of CANDIDATE_DELAYED announcements for this txhash. + size_t m_candidate_delayed = 0; + //! Number of CANDIDATE_READY announcements for this txhash. + size_t m_candidate_ready = 0; + //! Number of CANDIDATE_BEST announcements for this txhash (at most one). + size_t m_candidate_best = 0; + //! Number of REQUESTED announcements for this txhash (at most one; mutually exclusive with CANDIDATE_BEST). + size_t m_requested = 0; + //! The priority of the CANDIDATE_BEST announcement if one exists, or max() otherwise. + Priority m_priority_candidate_best = std::numeric_limits<Priority>::max(); + //! The highest priority of all CANDIDATE_READY announcements (or min() if none exist). + Priority m_priority_best_candidate_ready = std::numeric_limits<Priority>::min(); + //! All peers we have an announcement for this txhash for. + std::vector<NodeId> m_peers; +}; + +/** Compare two PeerInfo objects. Only used for sanity checking. */ +bool operator==(const PeerInfo& a, const PeerInfo& b) +{ + return std::tie(a.m_total, a.m_completed, a.m_requested) == + std::tie(b.m_total, b.m_completed, b.m_requested); +}; + +/** (Re)compute the PeerInfo map from the index. Only used for sanity checking. */ +std::unordered_map<NodeId, PeerInfo> RecomputePeerInfo(const Index& index) +{ + std::unordered_map<NodeId, PeerInfo> ret; + for (const Announcement& ann : index) { + PeerInfo& info = ret[ann.m_peer]; + ++info.m_total; + info.m_requested += (ann.GetState() == State::REQUESTED); + info.m_completed += (ann.GetState() == State::COMPLETED); + } + return ret; +} + +/** Compute the TxHashInfo map. Only used for sanity checking. */ +std::map<uint256, TxHashInfo> ComputeTxHashInfo(const Index& index, const PriorityComputer& computer) +{ + std::map<uint256, TxHashInfo> ret; + for (const Announcement& ann : index) { + TxHashInfo& info = ret[ann.m_txhash]; + // Classify how many announcements of each state we have for this txhash. + info.m_candidate_delayed += (ann.GetState() == State::CANDIDATE_DELAYED); + info.m_candidate_ready += (ann.GetState() == State::CANDIDATE_READY); + info.m_candidate_best += (ann.GetState() == State::CANDIDATE_BEST); + info.m_requested += (ann.GetState() == State::REQUESTED); + // And track the priority of the best CANDIDATE_READY/CANDIDATE_BEST announcements. + if (ann.GetState() == State::CANDIDATE_BEST) { + info.m_priority_candidate_best = computer(ann); + } + if (ann.GetState() == State::CANDIDATE_READY) { + info.m_priority_best_candidate_ready = std::max(info.m_priority_best_candidate_ready, computer(ann)); + } + // Also keep track of which peers this txhash has an announcement for (so we can detect duplicates). + info.m_peers.push_back(ann.m_peer); + } + return ret; +} + +GenTxid ToGenTxid(const Announcement& ann) +{ + return {ann.m_is_wtxid, ann.m_txhash}; +} + +} // namespace + +/** Actual implementation for TxRequestTracker's data structure. */ +class TxRequestTracker::Impl { + //! The current sequence number. Increases for every announcement. This is used to sort txhashes returned by + //! GetRequestable in announcement order. + SequenceNumber m_current_sequence{0}; + + //! This tracker's priority computer. + const PriorityComputer m_computer; + + //! This tracker's main data structure. See SanityCheck() for the invariants that apply to it. + Index m_index; + + //! Map with this tracker's per-peer statistics. + std::unordered_map<NodeId, PeerInfo> m_peerinfo; + +public: + void SanityCheck() const + { + // Recompute m_peerdata from m_index. This verifies the data in it as it should just be caching statistics + // on m_index. It also verifies the invariant that no PeerInfo announcements with m_total==0 exist. + assert(m_peerinfo == RecomputePeerInfo(m_index)); + + // Calculate per-txhash statistics from m_index, and validate invariants. + for (auto& item : ComputeTxHashInfo(m_index, m_computer)) { + TxHashInfo& info = item.second; + + // Cannot have only COMPLETED peer (txhash should have been forgotten already) + assert(info.m_candidate_delayed + info.m_candidate_ready + info.m_candidate_best + info.m_requested > 0); + + // Can have at most 1 CANDIDATE_BEST/REQUESTED peer + assert(info.m_candidate_best + info.m_requested <= 1); + + // If there are any CANDIDATE_READY announcements, there must be exactly one CANDIDATE_BEST or REQUESTED + // announcement. + if (info.m_candidate_ready > 0) { + assert(info.m_candidate_best + info.m_requested == 1); + } + + // If there is both a CANDIDATE_READY and a CANDIDATE_BEST announcement, the CANDIDATE_BEST one must be + // at least as good (equal or higher priority) as the best CANDIDATE_READY. + if (info.m_candidate_ready && info.m_candidate_best) { + assert(info.m_priority_candidate_best >= info.m_priority_best_candidate_ready); + } + + // No txhash can have been announced by the same peer twice. + std::sort(info.m_peers.begin(), info.m_peers.end()); + assert(std::adjacent_find(info.m_peers.begin(), info.m_peers.end()) == info.m_peers.end()); + } + } + + void PostGetRequestableSanityCheck(std::chrono::microseconds now) const + { + for (const Announcement& ann : m_index) { + if (ann.IsWaiting()) { + // REQUESTED and CANDIDATE_DELAYED must have a time in the future (they should have been converted + // to COMPLETED/CANDIDATE_READY respectively). + assert(ann.m_time > now); + } else if (ann.IsSelectable()) { + // CANDIDATE_READY and CANDIDATE_BEST cannot have a time in the future (they should have remained + // CANDIDATE_DELAYED, or should have been converted back to it if time went backwards). + assert(ann.m_time <= now); + } + } + } + +private: + //! Wrapper around Index::...::erase that keeps m_peerinfo up to date. + template<typename Tag> + Iter<Tag> Erase(Iter<Tag> it) + { + auto peerit = m_peerinfo.find(it->m_peer); + peerit->second.m_completed -= it->GetState() == State::COMPLETED; + peerit->second.m_requested -= it->GetState() == State::REQUESTED; + if (--peerit->second.m_total == 0) m_peerinfo.erase(peerit); + return m_index.get<Tag>().erase(it); + } + + //! Wrapper around Index::...::modify that keeps m_peerinfo up to date. + template<typename Tag, typename Modifier> + void Modify(Iter<Tag> it, Modifier modifier) + { + auto peerit = m_peerinfo.find(it->m_peer); + peerit->second.m_completed -= it->GetState() == State::COMPLETED; + peerit->second.m_requested -= it->GetState() == State::REQUESTED; + m_index.get<Tag>().modify(it, std::move(modifier)); + peerit->second.m_completed += it->GetState() == State::COMPLETED; + peerit->second.m_requested += it->GetState() == State::REQUESTED; + } + + //! Convert a CANDIDATE_DELAYED announcement into a CANDIDATE_READY. If this makes it the new best + //! CANDIDATE_READY (and no REQUESTED exists) and better than the CANDIDATE_BEST (if any), it becomes the new + //! CANDIDATE_BEST. + void PromoteCandidateReady(Iter<ByTxHash> it) + { + assert(it != m_index.get<ByTxHash>().end()); + assert(it->GetState() == State::CANDIDATE_DELAYED); + // Convert CANDIDATE_DELAYED to CANDIDATE_READY first. + Modify<ByTxHash>(it, [](Announcement& ann){ ann.SetState(State::CANDIDATE_READY); }); + // The following code relies on the fact that the ByTxHash is sorted by txhash, and then by state (first + // _DELAYED, then _READY, then _BEST/REQUESTED). Within the _READY announcements, the best one (highest + // priority) comes last. Thus, if an existing _BEST exists for the same txhash that this announcement may + // be preferred over, it must immediately follow the newly created _READY. + auto it_next = std::next(it); + if (it_next == m_index.get<ByTxHash>().end() || it_next->m_txhash != it->m_txhash || + it_next->GetState() == State::COMPLETED) { + // This is the new best CANDIDATE_READY, and there is no IsSelected() announcement for this txhash + // already. + Modify<ByTxHash>(it, [](Announcement& ann){ ann.SetState(State::CANDIDATE_BEST); }); + } else if (it_next->GetState() == State::CANDIDATE_BEST) { + Priority priority_old = m_computer(*it_next); + Priority priority_new = m_computer(*it); + if (priority_new > priority_old) { + // There is a CANDIDATE_BEST announcement already, but this one is better. + Modify<ByTxHash>(it_next, [](Announcement& ann){ ann.SetState(State::CANDIDATE_READY); }); + Modify<ByTxHash>(it, [](Announcement& ann){ ann.SetState(State::CANDIDATE_BEST); }); + } + } + } + + //! Change the state of an announcement to something non-IsSelected(). If it was IsSelected(), the next best + //! announcement will be marked CANDIDATE_BEST. + void ChangeAndReselect(Iter<ByTxHash> it, State new_state) + { + assert(new_state == State::COMPLETED || new_state == State::CANDIDATE_DELAYED); + assert(it != m_index.get<ByTxHash>().end()); + if (it->IsSelected() && it != m_index.get<ByTxHash>().begin()) { + auto it_prev = std::prev(it); + // The next best CANDIDATE_READY, if any, immediately precedes the REQUESTED or CANDIDATE_BEST + // announcement in the ByTxHash index. + if (it_prev->m_txhash == it->m_txhash && it_prev->GetState() == State::CANDIDATE_READY) { + // If one such CANDIDATE_READY exists (for this txhash), convert it to CANDIDATE_BEST. + Modify<ByTxHash>(it_prev, [](Announcement& ann){ ann.SetState(State::CANDIDATE_BEST); }); + } + } + Modify<ByTxHash>(it, [new_state](Announcement& ann){ ann.SetState(new_state); }); + } + + //! Check if 'it' is the only announcement for a given txhash that isn't COMPLETED. + bool IsOnlyNonCompleted(Iter<ByTxHash> it) + { + assert(it != m_index.get<ByTxHash>().end()); + assert(it->GetState() != State::COMPLETED); // Not allowed to call this on COMPLETED announcements. + + // This announcement has a predecessor that belongs to the same txhash. Due to ordering, and the + // fact that 'it' is not COMPLETED, its predecessor cannot be COMPLETED here. + if (it != m_index.get<ByTxHash>().begin() && std::prev(it)->m_txhash == it->m_txhash) return false; + + // This announcement has a successor that belongs to the same txhash, and is not COMPLETED. + if (std::next(it) != m_index.get<ByTxHash>().end() && std::next(it)->m_txhash == it->m_txhash && + std::next(it)->GetState() != State::COMPLETED) return false; + + return true; + } + + /** Convert any announcement to a COMPLETED one. If there are no non-COMPLETED announcements left for this + * txhash, they are deleted. If this was a REQUESTED announcement, and there are other CANDIDATEs left, the + * best one is made CANDIDATE_BEST. Returns whether the announcement still exists. */ + bool MakeCompleted(Iter<ByTxHash> it) + { + assert(it != m_index.get<ByTxHash>().end()); + + // Nothing to be done if it's already COMPLETED. + if (it->GetState() == State::COMPLETED) return true; + + if (IsOnlyNonCompleted(it)) { + // This is the last non-COMPLETED announcement for this txhash. Delete all. + uint256 txhash = it->m_txhash; + do { + it = Erase<ByTxHash>(it); + } while (it != m_index.get<ByTxHash>().end() && it->m_txhash == txhash); + return false; + } + + // Mark the announcement COMPLETED, and select the next best announcement (the first CANDIDATE_READY) if + // needed. + ChangeAndReselect(it, State::COMPLETED); + + return true; + } + + //! Make the data structure consistent with a given point in time: + //! - REQUESTED annoucements with expiry <= now are turned into COMPLETED. + //! - CANDIDATE_DELAYED announcements with reqtime <= now are turned into CANDIDATE_{READY,BEST}. + //! - CANDIDATE_{READY,BEST} announcements with reqtime > now are turned into CANDIDATE_DELAYED. + void SetTimePoint(std::chrono::microseconds now, std::vector<std::pair<NodeId, GenTxid>>* expired) + { + if (expired) expired->clear(); + + // Iterate over all CANDIDATE_DELAYED and REQUESTED from old to new, as long as they're in the past, + // and convert them to CANDIDATE_READY and COMPLETED respectively. + while (!m_index.empty()) { + auto it = m_index.get<ByTime>().begin(); + if (it->GetState() == State::CANDIDATE_DELAYED && it->m_time <= now) { + PromoteCandidateReady(m_index.project<ByTxHash>(it)); + } else if (it->GetState() == State::REQUESTED && it->m_time <= now) { + if (expired) expired->emplace_back(it->m_peer, ToGenTxid(*it)); + MakeCompleted(m_index.project<ByTxHash>(it)); + } else { + break; + } + } + + while (!m_index.empty()) { + // If time went backwards, we may need to demote CANDIDATE_BEST and CANDIDATE_READY announcements back + // to CANDIDATE_DELAYED. This is an unusual edge case, and unlikely to matter in production. However, + // it makes it much easier to specify and test TxRequestTracker::Impl's behaviour. + auto it = std::prev(m_index.get<ByTime>().end()); + if (it->IsSelectable() && it->m_time > now) { + ChangeAndReselect(m_index.project<ByTxHash>(it), State::CANDIDATE_DELAYED); + } else { + break; + } + } + } + +public: + Impl(bool deterministic) : + m_computer(deterministic), + // Explicitly initialize m_index as we need to pass a reference to m_computer to ByTxHashViewExtractor. + m_index(boost::make_tuple( + boost::make_tuple(ByPeerViewExtractor(), std::less<ByPeerView>()), + boost::make_tuple(ByTxHashViewExtractor(m_computer), std::less<ByTxHashView>()), + boost::make_tuple(ByTimeViewExtractor(), std::less<ByTimeView>()) + )) {} + + // Disable copying and assigning (a default copy won't work due the stateful ByTxHashViewExtractor). + Impl(const Impl&) = delete; + Impl& operator=(const Impl&) = delete; + + void DisconnectedPeer(NodeId peer) + { + auto& index = m_index.get<ByPeer>(); + auto it = index.lower_bound(ByPeerView{peer, false, uint256::ZERO}); + while (it != index.end() && it->m_peer == peer) { + // Check what to continue with after this iteration. 'it' will be deleted in what follows, so we need to + // decide what to continue with afterwards. There are a number of cases to consider: + // - std::next(it) is end() or belongs to a different peer. In that case, this is the last iteration + // of the loop (denote this by setting it_next to end()). + // - 'it' is not the only non-COMPLETED announcement for its txhash. This means it will be deleted, but + // no other Announcement objects will be modified. Continue with std::next(it) if it belongs to the + // same peer, but decide this ahead of time (as 'it' may change position in what follows). + // - 'it' is the only non-COMPLETED announcement for its txhash. This means it will be deleted along + // with all other announcements for the same txhash - which may include std::next(it). However, other + // than 'it', no announcements for the same peer can be affected (due to (peer, txhash) uniqueness). + // In other words, the situation where std::next(it) is deleted can only occur if std::next(it) + // belongs to a different peer but the same txhash as 'it'. This is covered by the first bulletpoint + // already, and we'll have set it_next to end(). + auto it_next = (std::next(it) == index.end() || std::next(it)->m_peer != peer) ? index.end() : + std::next(it); + // If the announcement isn't already COMPLETED, first make it COMPLETED (which will mark other + // CANDIDATEs as CANDIDATE_BEST, or delete all of a txhash's announcements if no non-COMPLETED ones are + // left). + if (MakeCompleted(m_index.project<ByTxHash>(it))) { + // Then actually delete the announcement (unless it was already deleted by MakeCompleted). + Erase<ByPeer>(it); + } + it = it_next; + } + } + + void ForgetTxHash(const uint256& txhash) + { + auto it = m_index.get<ByTxHash>().lower_bound(ByTxHashView{txhash, State::CANDIDATE_DELAYED, 0}); + while (it != m_index.get<ByTxHash>().end() && it->m_txhash == txhash) { + it = Erase<ByTxHash>(it); + } + } + + void ReceivedInv(NodeId peer, const GenTxid& gtxid, bool preferred, + std::chrono::microseconds reqtime) + { + // Bail out if we already have a CANDIDATE_BEST announcement for this (txhash, peer) combination. The case + // where there is a non-CANDIDATE_BEST announcement already will be caught by the uniqueness property of the + // ByPeer index when we try to emplace the new object below. + if (m_index.get<ByPeer>().count(ByPeerView{peer, true, gtxid.GetHash()})) return; + + // Try creating the announcement with CANDIDATE_DELAYED state (which will fail due to the uniqueness + // of the ByPeer index if a non-CANDIDATE_BEST announcement already exists with the same txhash and peer). + // Bail out in that case. + auto ret = m_index.get<ByPeer>().emplace(gtxid, peer, preferred, reqtime, m_current_sequence); + if (!ret.second) return; + + // Update accounting metadata. + ++m_peerinfo[peer].m_total; + ++m_current_sequence; + } + + //! Find the GenTxids to request now from peer. + std::vector<GenTxid> GetRequestable(NodeId peer, std::chrono::microseconds now, + std::vector<std::pair<NodeId, GenTxid>>* expired) + { + // Move time. + SetTimePoint(now, expired); + + // Find all CANDIDATE_BEST announcements for this peer. + std::vector<const Announcement*> selected; + auto it_peer = m_index.get<ByPeer>().lower_bound(ByPeerView{peer, true, uint256::ZERO}); + while (it_peer != m_index.get<ByPeer>().end() && it_peer->m_peer == peer && + it_peer->GetState() == State::CANDIDATE_BEST) { + selected.emplace_back(&*it_peer); + ++it_peer; + } + + // Sort by sequence number. + std::sort(selected.begin(), selected.end(), [](const Announcement* a, const Announcement* b) { + return a->m_sequence < b->m_sequence; + }); + + // Convert to GenTxid and return. + std::vector<GenTxid> ret; + ret.reserve(selected.size()); + std::transform(selected.begin(), selected.end(), std::back_inserter(ret), [](const Announcement* ann) { + return ToGenTxid(*ann); + }); + return ret; + } + + void RequestedTx(NodeId peer, const uint256& txhash, std::chrono::microseconds expiry) + { + auto it = m_index.get<ByPeer>().find(ByPeerView{peer, true, txhash}); + if (it == m_index.get<ByPeer>().end()) { + // There is no CANDIDATE_BEST announcement, look for a _READY or _DELAYED instead. If the caller only + // ever invokes RequestedTx with the values returned by GetRequestable, and no other non-const functions + // other than ForgetTxHash and GetRequestable in between, this branch will never execute (as txhashes + // returned by GetRequestable always correspond to CANDIDATE_BEST announcements). + + it = m_index.get<ByPeer>().find(ByPeerView{peer, false, txhash}); + if (it == m_index.get<ByPeer>().end() || (it->GetState() != State::CANDIDATE_DELAYED && + it->GetState() != State::CANDIDATE_READY)) { + // There is no CANDIDATE announcement tracked for this peer, so we have nothing to do. Either this + // txhash wasn't tracked at all (and the caller should have called ReceivedInv), or it was already + // requested and/or completed for other reasons and this is just a superfluous RequestedTx call. + return; + } + + // Look for an existing CANDIDATE_BEST or REQUESTED with the same txhash. We only need to do this if the + // found announcement had a different state than CANDIDATE_BEST. If it did, invariants guarantee that no + // other CANDIDATE_BEST or REQUESTED can exist. + auto it_old = m_index.get<ByTxHash>().lower_bound(ByTxHashView{txhash, State::CANDIDATE_BEST, 0}); + if (it_old != m_index.get<ByTxHash>().end() && it_old->m_txhash == txhash) { + if (it_old->GetState() == State::CANDIDATE_BEST) { + // The data structure's invariants require that there can be at most one CANDIDATE_BEST or one + // REQUESTED announcement per txhash (but not both simultaneously), so we have to convert any + // existing CANDIDATE_BEST to another CANDIDATE_* when constructing another REQUESTED. + // It doesn't matter whether we pick CANDIDATE_READY or _DELAYED here, as SetTimePoint() + // will correct it at GetRequestable() time. If time only goes forward, it will always be + // _READY, so pick that to avoid extra work in SetTimePoint(). + Modify<ByTxHash>(it_old, [](Announcement& ann) { ann.SetState(State::CANDIDATE_READY); }); + } else if (it_old->GetState() == State::REQUESTED) { + // As we're no longer waiting for a response to the previous REQUESTED announcement, convert it + // to COMPLETED. This also helps guaranteeing progress. + Modify<ByTxHash>(it_old, [](Announcement& ann) { ann.SetState(State::COMPLETED); }); + } + } + } + + Modify<ByPeer>(it, [expiry](Announcement& ann) { + ann.SetState(State::REQUESTED); + ann.m_time = expiry; + }); + } + + void ReceivedResponse(NodeId peer, const uint256& txhash) + { + // We need to search the ByPeer index for both (peer, false, txhash) and (peer, true, txhash). + auto it = m_index.get<ByPeer>().find(ByPeerView{peer, false, txhash}); + if (it == m_index.get<ByPeer>().end()) { + it = m_index.get<ByPeer>().find(ByPeerView{peer, true, txhash}); + } + if (it != m_index.get<ByPeer>().end()) MakeCompleted(m_index.project<ByTxHash>(it)); + } + + size_t CountInFlight(NodeId peer) const + { + auto it = m_peerinfo.find(peer); + if (it != m_peerinfo.end()) return it->second.m_requested; + return 0; + } + + size_t CountCandidates(NodeId peer) const + { + auto it = m_peerinfo.find(peer); + if (it != m_peerinfo.end()) return it->second.m_total - it->second.m_requested - it->second.m_completed; + return 0; + } + + size_t Count(NodeId peer) const + { + auto it = m_peerinfo.find(peer); + if (it != m_peerinfo.end()) return it->second.m_total; + return 0; + } + + //! Count how many announcements are being tracked in total across all peers and transactions. + size_t Size() const { return m_index.size(); } + + uint64_t ComputePriority(const uint256& txhash, NodeId peer, bool preferred) const + { + // Return Priority as a uint64_t as Priority is internal. + return uint64_t{m_computer(txhash, peer, preferred)}; + } + +}; + +TxRequestTracker::TxRequestTracker(bool deterministic) : + m_impl{MakeUnique<TxRequestTracker::Impl>(deterministic)} {} + +TxRequestTracker::~TxRequestTracker() = default; + +void TxRequestTracker::ForgetTxHash(const uint256& txhash) { m_impl->ForgetTxHash(txhash); } +void TxRequestTracker::DisconnectedPeer(NodeId peer) { m_impl->DisconnectedPeer(peer); } +size_t TxRequestTracker::CountInFlight(NodeId peer) const { return m_impl->CountInFlight(peer); } +size_t TxRequestTracker::CountCandidates(NodeId peer) const { return m_impl->CountCandidates(peer); } +size_t TxRequestTracker::Count(NodeId peer) const { return m_impl->Count(peer); } +size_t TxRequestTracker::Size() const { return m_impl->Size(); } +void TxRequestTracker::SanityCheck() const { m_impl->SanityCheck(); } + +void TxRequestTracker::PostGetRequestableSanityCheck(std::chrono::microseconds now) const +{ + m_impl->PostGetRequestableSanityCheck(now); +} + +void TxRequestTracker::ReceivedInv(NodeId peer, const GenTxid& gtxid, bool preferred, + std::chrono::microseconds reqtime) +{ + m_impl->ReceivedInv(peer, gtxid, preferred, reqtime); +} + +void TxRequestTracker::RequestedTx(NodeId peer, const uint256& txhash, std::chrono::microseconds expiry) +{ + m_impl->RequestedTx(peer, txhash, expiry); +} + +void TxRequestTracker::ReceivedResponse(NodeId peer, const uint256& txhash) +{ + m_impl->ReceivedResponse(peer, txhash); +} + +std::vector<GenTxid> TxRequestTracker::GetRequestable(NodeId peer, std::chrono::microseconds now, + std::vector<std::pair<NodeId, GenTxid>>* expired) +{ + return m_impl->GetRequestable(peer, now, expired); +} + +uint64_t TxRequestTracker::ComputePriority(const uint256& txhash, NodeId peer, bool preferred) const +{ + return m_impl->ComputePriority(txhash, peer, preferred); +} diff --git a/src/txrequest.h b/src/txrequest.h new file mode 100644 index 0000000000..cd3042c87e --- /dev/null +++ b/src/txrequest.h @@ -0,0 +1,211 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_TXREQUEST_H +#define BITCOIN_TXREQUEST_H + +#include <primitives/transaction.h> +#include <net.h> // For NodeId +#include <uint256.h> + +#include <chrono> +#include <vector> + +#include <stdint.h> + +/** Data structure to keep track of, and schedule, transaction downloads from peers. + * + * === Specification === + * + * We keep track of which peers have announced which transactions, and use that to determine which requests + * should go to which peer, when, and in what order. + * + * The following information is tracked per peer/tx combination ("announcement"): + * - Which peer announced it (through their NodeId) + * - The txid or wtxid of the transaction (collectively called "txhash" in what follows) + * - Whether it was a tx or wtx announcement (see BIP339). + * - What the earliest permitted time is that that transaction can be requested from that peer (called "reqtime"). + * - Whether it's from a "preferred" peer or not. Which announcements get this flag is determined by the caller, but + * this is designed for outbound peers, or other peers that we have a higher level of trust in. Even when the + * peers' preferredness changes, the preferred flag of existing announcements from that peer won't change. + * - Whether or not the transaction was requested already, and if so, when it times out (called "expiry"). + * - Whether or not the transaction request failed already (timed out, or invalid transaction or NOTFOUND was + * received). + * + * Transaction requests are then assigned to peers, following these rules: + * + * - No transaction is requested as long as another request for the same txhash is outstanding (it needs to fail + * first by passing expiry, or a NOTFOUND or invalid transaction has to be received for it). + * + * Rationale: to avoid wasting bandwidth on multiple copies of the same transaction. Note that this only works + * per txhash, so if the same transaction is announced both through txid and wtxid, we have no means + * to prevent fetching both (the caller can however mitigate this by delaying one, see further). + * + * - The same transaction is never requested twice from the same peer, unless the announcement was forgotten in + * between, and re-announced. Announcements are forgotten only: + * - If a peer goes offline, all its announcements are forgotten. + * - If a transaction has been successfully received, or is otherwise no longer needed, the caller can call + * ForgetTxHash, which removes all announcements across all peers with the specified txhash. + * - If for a given txhash only already-failed announcements remain, they are all forgotten. + * + * Rationale: giving a peer multiple chances to announce a transaction would allow them to bias requests in their + * favor, worsening transaction censoring attacks. The flip side is that as long as an attacker manages + * to prevent us from receiving a transaction, failed announcements (including those from honest peers) + * will linger longer, increasing memory usage somewhat. The impact of this is limited by imposing a + * cap on the number of tracked announcements per peer. As failed requests in response to announcements + * from honest peers should be rare, this almost solely hinders attackers. + * Transaction censoring attacks can be done by announcing transactions quickly while not answering + * requests for them. See https://allquantor.at/blockchainbib/pdf/miller2015topology.pdf for more + * information. + * + * - Transactions are not requested from a peer until its reqtime has passed. + * + * Rationale: enable the calling code to define a delay for less-than-ideal peers, so that (presumed) better + * peers have a chance to give their announcement first. + * + * - If multiple viable candidate peers exist according to the above rules, pick a peer as follows: + * + * - If any preferred peers are available, non-preferred peers are not considered for what follows. + * + * Rationale: preferred peers are more trusted by us, so are less likely to be under attacker control. + * + * - Pick a uniformly random peer among the candidates. + * + * Rationale: random assignments are hard to influence for attackers. + * + * Together these rules strike a balance between being fast in non-adverserial conditions and minimizing + * susceptibility to censorship attacks. An attacker that races the network: + * - Will be unsuccessful if all preferred connections are honest (and there is at least one preferred connection). + * - If there are P preferred connections of which Ph>=1 are honest, the attacker can delay us from learning + * about a transaction by k expiration periods, where k ~ 1 + NHG(N=P-1,K=P-Ph-1,r=1), which has mean + * P/(Ph+1) (where NHG stands for Negative Hypergeometric distribution). The "1 +" is due to the fact that the + * attacker can be the first to announce through a preferred connection in this scenario, which very likely means + * they get the first request. + * - If all P preferred connections are to the attacker, and there are NP non-preferred connections of which NPh>=1 + * are honest, where we assume that the attacker can disconnect and reconnect those connections, the distribution + * becomes k ~ P + NB(p=1-NPh/NP,r=1) (where NB stands for Negative Binomial distribution), which has mean + * P-1+NP/NPh. + * + * Complexity: + * - Memory usage is proportional to the total number of tracked announcements (Size()) plus the number of + * peers with a nonzero number of tracked announcements. + * - CPU usage is generally logarithmic in the total number of tracked announcements, plus the number of + * announcements affected by an operation (amortized O(1) per announcement). + */ +class TxRequestTracker { + // Avoid littering this header file with implementation details. + class Impl; + const std::unique_ptr<Impl> m_impl; + +public: + //! Construct a TxRequestTracker. + explicit TxRequestTracker(bool deterministic = false); + ~TxRequestTracker(); + + // Conceptually, the data structure consists of a collection of "announcements", one for each peer/txhash + // combination: + // + // - CANDIDATE announcements represent transactions that were announced by a peer, and that become available for + // download after their reqtime has passed. + // + // - REQUESTED announcements represent transactions that have been requested, and which we're awaiting a + // response for from that peer. Their expiry value determines when the request times out. + // + // - COMPLETED announcements represent transactions that have been requested from a peer, and a NOTFOUND or a + // transaction was received in response (valid or not), or they timed out. They're only kept around to + // prevent requesting them again. If only COMPLETED announcements for a given txhash remain (so no CANDIDATE + // or REQUESTED ones), all of them are deleted (this is an invariant, and maintained by all operations below). + // + // The operations below manipulate the data structure. + + /** Adds a new CANDIDATE announcement. + * + * Does nothing if one already exists for that (txhash, peer) combination (whether it's CANDIDATE, REQUESTED, or + * COMPLETED). Note that the txid/wtxid property is ignored for determining uniqueness, so if an announcement + * is added for a wtxid H, while one for txid H from the same peer already exists, it will be ignored. This is + * harmless as the txhashes being equal implies it is a non-segwit transaction, so it doesn't matter how it is + * fetched. The new announcement is given the specified preferred and reqtime values, and takes its is_wtxid + * from the specified gtxid. + */ + void ReceivedInv(NodeId peer, const GenTxid& gtxid, bool preferred, + std::chrono::microseconds reqtime); + + /** Deletes all announcements for a given peer. + * + * It should be called when a peer goes offline. + */ + void DisconnectedPeer(NodeId peer); + + /** Deletes all announcements for a given txhash (both txid and wtxid ones). + * + * This should be called when a transaction is no longer needed. The caller should ensure that new announcements + * for the same txhash will not trigger new ReceivedInv calls, at least in the short term after this call. + */ + void ForgetTxHash(const uint256& txhash); + + /** Find the txids to request now from peer. + * + * It does the following: + * - Convert all REQUESTED announcements (for all txhashes/peers) with (expiry <= now) to COMPLETED ones. + * These are returned in expired, if non-nullptr. + * - Requestable announcements are selected: CANDIDATE announcements from the specified peer with + * (reqtime <= now) for which no existing REQUESTED announcement with the same txhash from a different peer + * exists, and for which the specified peer is the best choice among all (reqtime <= now) CANDIDATE + * announcements with the same txhash (subject to preferredness rules, and tiebreaking using a deterministic + * salted hash of peer and txhash). + * - The selected announcements are converted to GenTxids using their is_wtxid flag, and returned in + * announcement order (even if multiple were added at the same time, or when the clock went backwards while + * they were being added). This is done to minimize disruption from dependent transactions being requested + * out of order: if multiple dependent transactions are announced simultaneously by one peer, and end up + * being requested from them, the requests will happen in announcement order. + */ + std::vector<GenTxid> GetRequestable(NodeId peer, std::chrono::microseconds now, + std::vector<std::pair<NodeId, GenTxid>>* expired = nullptr); + + /** Marks a transaction as requested, with a specified expiry. + * + * If no CANDIDATE announcement for the provided peer and txhash exists, this call has no effect. Otherwise: + * - That announcement is converted to REQUESTED. + * - If any other REQUESTED announcement for the same txhash already existed, it means an unexpected request + * was made (GetRequestable will never advise doing so). In this case it is converted to COMPLETED, as we're + * no longer waiting for a response to it. + */ + void RequestedTx(NodeId peer, const uint256& txhash, std::chrono::microseconds expiry); + + /** Converts a CANDIDATE or REQUESTED announcement to a COMPLETED one. If no such announcement exists for the + * provided peer and txhash, nothing happens. + * + * It should be called whenever a transaction or NOTFOUND was received from a peer. When the transaction is + * not needed entirely anymore, ForgetTxhash should be called instead of, or in addition to, this call. + */ + void ReceivedResponse(NodeId peer, const uint256& txhash); + + // The operations below inspect the data structure. + + /** Count how many REQUESTED announcements a peer has. */ + size_t CountInFlight(NodeId peer) const; + + /** Count how many CANDIDATE announcements a peer has. */ + size_t CountCandidates(NodeId peer) const; + + /** Count how many announcements a peer has (REQUESTED, CANDIDATE, and COMPLETED combined). */ + size_t Count(NodeId peer) const; + + /** Count how many announcements are being tracked in total across all peers and transaction hashes. */ + size_t Size() const; + + /** Access to the internal priority computation (testing only) */ + uint64_t ComputePriority(const uint256& txhash, NodeId peer, bool preferred) const; + + /** Run internal consistency check (testing only). */ + void SanityCheck() const; + + /** Run a time-dependent internal consistency check (testing only). + * + * This can only be called immediately after GetRequestable, with the same 'now' parameter. + */ + void PostGetRequestableSanityCheck(std::chrono::microseconds now) const; +}; + +#endif // BITCOIN_TXREQUEST_H diff --git a/src/uint256.cpp b/src/uint256.cpp index d074df2f20..f358b62903 100644 --- a/src/uint256.cpp +++ b/src/uint256.cpp @@ -80,4 +80,5 @@ template std::string base_blob<256>::ToString() const; template void base_blob<256>::SetHex(const char*); template void base_blob<256>::SetHex(const std::string&); +const uint256 uint256::ZERO(0); const uint256 uint256::ONE(1); diff --git a/src/uint256.h b/src/uint256.h index c55cb31456..ceae70707e 100644 --- a/src/uint256.h +++ b/src/uint256.h @@ -126,6 +126,7 @@ public: constexpr uint256() {} constexpr explicit uint256(uint8_t v) : base_blob<256>(v) {} explicit uint256(const std::vector<unsigned char>& vch) : base_blob<256>(vch) {} + static const uint256 ZERO; static const uint256 ONE; }; diff --git a/src/util/system.cpp b/src/util/system.cpp index a411b73a16..9f8035948b 100644 --- a/src/util/system.cpp +++ b/src/util/system.cpp @@ -427,6 +427,14 @@ bool ArgsManager::ReadSettingsFile(std::vector<std::string>* errors) SaveErrors(read_errors, errors); return false; } + for (const auto& setting : m_settings.rw_settings) { + std::string section; + std::string key = setting.first; + (void)InterpretOption(section, key, /* value */ {}); // Split setting key into section and argname + if (!GetArgFlags('-' + key)) { + LogPrintf("Ignoring unknown rw_settings value %s\n", setting.first); + } + } return true; } diff --git a/src/validation.cpp b/src/validation.cpp index 06b9b36fca..423b93479a 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1532,14 +1532,21 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState &state, const C return true; } - if (!txdata.m_ready) { - txdata.Init(tx); + if (!txdata.m_spent_outputs_ready) { + std::vector<CTxOut> spent_outputs; + spent_outputs.reserve(tx.vin.size()); + + for (const auto& txin : tx.vin) { + const COutPoint& prevout = txin.prevout; + const Coin& coin = inputs.AccessCoin(prevout); + assert(!coin.IsSpent()); + spent_outputs.emplace_back(coin.out); + } + txdata.Init(tx, std::move(spent_outputs)); } + assert(txdata.m_spent_outputs.size() == tx.vin.size()); for (unsigned int i = 0; i < tx.vin.size(); i++) { - const COutPoint &prevout = tx.vin[i].prevout; - const Coin& coin = inputs.AccessCoin(prevout); - assert(!coin.IsSpent()); // We very carefully only pass in things to CScriptCheck which // are clearly committed to by tx' witness hash. This provides @@ -1548,7 +1555,7 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState &state, const C // spent being checked as a part of CScriptCheck. // Verify signature - CScriptCheck check(coin.out, tx, i, flags, cacheSigStore, &txdata); + CScriptCheck check(txdata.m_spent_outputs[i], tx, i, flags, cacheSigStore, &txdata); if (pvChecks) { pvChecks->push_back(CScriptCheck()); check.swap(pvChecks->back()); @@ -1562,7 +1569,7 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState &state, const C // splitting the network between upgraded and // non-upgraded nodes by banning CONSENSUS-failing // data providers. - CScriptCheck check2(coin.out, tx, i, + CScriptCheck check2(txdata.m_spent_outputs[i], tx, i, flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata); if (check2()) return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); @@ -1907,6 +1914,11 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consens flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY; } + // Start enforcing Taproot using versionbits logic. + if (VersionBitsState(pindex->pprev, consensusparams, Consensus::DEPLOYMENT_TAPROOT, versionbitscache) == ThresholdState::ACTIVE) { + flags |= SCRIPT_VERIFY_TAPROOT; + } + // Start enforcing BIP147 NULLDUMMY (activated simultaneously with segwit) if (IsWitnessEnabled(pindex->pprev, consensusparams)) { flags |= SCRIPT_VERIFY_NULLDUMMY; diff --git a/src/versionbitsinfo.cpp b/src/versionbitsinfo.cpp index 20297b9f9d..20dfc044ca 100644 --- a/src/versionbitsinfo.cpp +++ b/src/versionbitsinfo.cpp @@ -11,4 +11,8 @@ const struct VBDeploymentInfo VersionBitsDeploymentInfo[Consensus::MAX_VERSION_B /*.name =*/ "testdummy", /*.gbt_force =*/ true, }, + { + /*.name =*/ "taproot", + /*.gbt_force =*/ true, + }, }; diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index fbb3d2cac5..85aae0170d 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -305,17 +305,16 @@ BerkeleyDatabase::~BerkeleyDatabase() } } -BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr), m_cursor(nullptr), m_database(database) +BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const bool read_only, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr), m_cursor(nullptr), m_database(database) { database.AddRef(); - database.Open(pszMode); - fReadOnly = (!strchr(pszMode, '+') && !strchr(pszMode, 'w')); + database.Open(); + fReadOnly = read_only; fFlushOnClose = fFlushOnCloseIn; env = database.env.get(); pdb = database.m_db.get(); strFile = database.strFile; - bool fCreate = strchr(pszMode, 'c') != nullptr; - if (fCreate && !Exists(std::string("version"))) { + if (!Exists(std::string("version"))) { bool fTmp = fReadOnly; fReadOnly = false; Write(std::string("version"), CLIENT_VERSION); @@ -323,12 +322,9 @@ BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bo } } -void BerkeleyDatabase::Open(const char* pszMode) +void BerkeleyDatabase::Open() { - bool fCreate = strchr(pszMode, 'c') != nullptr; - unsigned int nFlags = DB_THREAD; - if (fCreate) - nFlags |= DB_CREATE; + unsigned int nFlags = DB_THREAD | DB_CREATE; { LOCK(cs_db); @@ -468,7 +464,7 @@ bool BerkeleyDatabase::Rewrite(const char* pszSkip) LogPrintf("BerkeleyBatch::Rewrite: Rewriting %s...\n", strFile); std::string strFileRes = strFile + ".rewrite"; { // surround usage of db with extra {} - BerkeleyBatch db(*this, "r"); + BerkeleyBatch db(*this, true); std::unique_ptr<Db> pdbCopy = MakeUnique<Db>(env->dbenv.get(), 0); int ret = pdbCopy->open(nullptr, // Txn pointer @@ -807,9 +803,9 @@ void BerkeleyDatabase::RemoveRef() if (env) env->m_db_in_use.notify_all(); } -std::unique_ptr<DatabaseBatch> BerkeleyDatabase::MakeBatch(const char* mode, bool flush_on_close) +std::unique_ptr<DatabaseBatch> BerkeleyDatabase::MakeBatch(bool flush_on_close) { - return MakeUnique<BerkeleyBatch>(*this, mode, flush_on_close); + return MakeUnique<BerkeleyBatch>(*this, false, flush_on_close); } bool ExistsBerkeleyDatabase(const fs::path& path) @@ -817,7 +813,7 @@ bool ExistsBerkeleyDatabase(const fs::path& path) fs::path env_directory; std::string data_filename; SplitWalletPath(path, env_directory, data_filename); - return IsBerkeleyBtree(env_directory / data_filename); + return IsBDBFile(env_directory / data_filename); } std::unique_ptr<BerkeleyDatabase> MakeBerkeleyDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error) @@ -843,3 +839,28 @@ std::unique_ptr<BerkeleyDatabase> MakeBerkeleyDatabase(const fs::path& path, con status = DatabaseStatus::SUCCESS; return db; } + +bool IsBDBFile(const fs::path& path) +{ + if (!fs::exists(path)) return false; + + // A Berkeley DB Btree file has at least 4K. + // This check also prevents opening lock files. + boost::system::error_code ec; + auto size = fs::file_size(path, ec); + if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string()); + if (size < 4096) return false; + + fsbridge::ifstream file(path, std::ios::binary); + if (!file.is_open()) return false; + + file.seekg(12, std::ios::beg); // Magic bytes start at offset 12 + uint32_t data = 0; + file.read((char*) &data, sizeof(data)); // Read 4 bytes of file to compare against magic + + // Berkeley DB Btree magic bytes, from: + // https://github.com/file/file/blob/5824af38469ec1ca9ac3ffd251e7afe9dc11e227/magic/Magdir/database#L74-L75 + // - big endian systems - 00 05 31 62 + // - little endian systems - 62 31 05 00 + return data == 0x00053162 || data == 0x62310500; +} diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h index fd5a49acc3..9073c1b6b3 100644 --- a/src/wallet/bdb.h +++ b/src/wallet/bdb.h @@ -87,7 +87,7 @@ public: std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& wallet_path, std::string& database_filename); /** Check format of database file */ -bool IsBerkeleyBtree(const fs::path& path); +bool IsBDBFile(const fs::path& path); class BerkeleyBatch; @@ -109,9 +109,8 @@ public: ~BerkeleyDatabase() override; - /** Open the database if it is not already opened. - * Dummy function, doesn't do anything right now, but is needed for class abstraction */ - void Open(const char* mode) override; + /** Open the database if it is not already opened. */ + void Open() override; /** Rewrite the entire database on disk, with the exception of key pszSkip if non-zero */ @@ -147,6 +146,7 @@ public: /** Return path to main database filename */ std::string Filename() override { return (env->Directory() / strFile).string(); } + std::string Format() override { return "bdb"; } /** * Pointer to shared database environment. * @@ -164,7 +164,7 @@ public: std::string strFile; /** Make a BerkeleyBatch connected to this database */ - std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) override; + std::unique_ptr<DatabaseBatch> MakeBatch(bool flush_on_close = true) override; }; /** RAII class that provides access to a Berkeley database */ @@ -207,7 +207,7 @@ protected: BerkeleyDatabase& m_database; public: - explicit BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode = "r+", bool fFlushOnCloseIn=true); + explicit BerkeleyBatch(BerkeleyDatabase& database, const bool fReadOnly, bool fFlushOnCloseIn=true); ~BerkeleyBatch() override; BerkeleyBatch(const BerkeleyBatch&) = delete; diff --git a/src/wallet/db.h b/src/wallet/db.h index 617ed46141..940d1cd242 100644 --- a/src/wallet/db.h +++ b/src/wallet/db.h @@ -8,6 +8,7 @@ #include <clientversion.h> #include <fs.h> +#include <optional.h> #include <streams.h> #include <support/allocators/secure.h> #include <util/memory.h> @@ -108,7 +109,7 @@ public: virtual ~WalletDatabase() {}; /** Open the database if it is not already opened. */ - virtual void Open(const char* mode) = 0; + virtual void Open() = 0; //! Counts the number of active database users to be sure that the database is not closed while someone is using it std::atomic<int> m_refcount{0}; @@ -143,13 +144,15 @@ public: /** Return path to main database file for logs and error messages. */ virtual std::string Filename() = 0; + virtual std::string Format() = 0; + std::atomic<unsigned int> nUpdateCounter; unsigned int nLastSeen; unsigned int nLastFlushed; int64_t nLastWalletUpdate; /** Make a DatabaseBatch connected to this database */ - virtual std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) = 0; + virtual std::unique_ptr<DatabaseBatch> MakeBatch(bool flush_on_close = true) = 0; }; /** RAII class that provides access to a DummyDatabase. Never fails. */ @@ -178,7 +181,7 @@ public: class DummyDatabase : public WalletDatabase { public: - void Open(const char* mode) override {}; + void Open() override {}; void AddRef() override {} void RemoveRef() override {} bool Rewrite(const char* pszSkip=nullptr) override { return true; } @@ -189,16 +192,19 @@ public: void IncrementUpdateCounter() override { ++nUpdateCounter; } void ReloadDbEnv() override {} std::string Filename() override { return "dummy"; } - std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) override { return MakeUnique<DummyBatch>(); } + std::string Format() override { return "dummy"; } + std::unique_ptr<DatabaseBatch> MakeBatch(bool flush_on_close = true) override { return MakeUnique<DummyBatch>(); } }; enum class DatabaseFormat { BERKELEY, + SQLITE, }; struct DatabaseOptions { bool require_existing = false; bool require_create = false; + Optional<DatabaseFormat> require_format; uint64_t create_flags = 0; SecureString create_passphrase; bool verify = true; diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp index 9e36a09780..884ab58497 100644 --- a/src/wallet/rpcdump.cpp +++ b/src/wallet/rpcdump.cpp @@ -340,8 +340,9 @@ RPCHelpMan importprunedfunds() CWallet* const pwallet = wallet.get(); CMutableTransaction tx; - if (!DecodeHexTx(tx, request.params[0].get_str())) - throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed"); + if (!DecodeHexTx(tx, request.params[0].get_str())) { + throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed. Make sure the tx has at least one input."); + } uint256 hashTx = tx.GetHash(); CDataStream ssMB(ParseHexV(request.params[1], "proof"), SER_NETWORK, PROTOCOL_VERSION); @@ -932,6 +933,7 @@ static std::string RecurseImportData(const CScript& script, ImportData& import_d return "unspendable script"; case TxoutType::NONSTANDARD: case TxoutType::WITNESS_UNKNOWN: + case TxoutType::WITNESS_V1_TAPROOT: default: return "unrecognized script"; } diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index 23291e3a48..29b55ec130 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -2422,6 +2422,7 @@ static RPCHelpMan getwalletinfo() { {RPCResult::Type::STR, "walletname", "the wallet name"}, {RPCResult::Type::NUM, "walletversion", "the wallet version"}, + {RPCResult::Type::STR, "format", "the database format (bdb or sqlite)"}, {RPCResult::Type::STR_AMOUNT, "balance", "DEPRECATED. Identical to getbalances().mine.trusted"}, {RPCResult::Type::STR_AMOUNT, "unconfirmed_balance", "DEPRECATED. Identical to getbalances().mine.untrusted_pending"}, {RPCResult::Type::STR_AMOUNT, "immature_balance", "DEPRECATED. Identical to getbalances().mine.immature"}, @@ -2465,6 +2466,7 @@ static RPCHelpMan getwalletinfo() int64_t kp_oldest = pwallet->GetOldestKeyPoolTime(); obj.pushKV("walletname", pwallet->GetName()); obj.pushKV("walletversion", pwallet->GetVersion()); + obj.pushKV("format", pwallet->GetDatabase().Format()); obj.pushKV("balance", ValueFromAmount(bal.m_mine_trusted)); obj.pushKV("unconfirmed_balance", ValueFromAmount(bal.m_mine_untrusted_pending)); obj.pushKV("immature_balance", ValueFromAmount(bal.m_mine_immature)); @@ -3331,8 +3333,8 @@ RPCHelpMan signrawtransactionwithwallet() RPCTypeCheck(request.params, {UniValue::VSTR, UniValue::VARR, UniValue::VSTR}, true); CMutableTransaction mtx; - if (!DecodeHexTx(mtx, request.params[0].get_str(), true)) { - throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed"); + if (!DecodeHexTx(mtx, request.params[0].get_str())) { + throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed. Make sure the tx has at least one input."); } // Sign the transaction diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index 435716e56a..188289b010 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -96,6 +96,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s case TxoutType::NONSTANDARD: case TxoutType::NULL_DATA: case TxoutType::WITNESS_UNKNOWN: + case TxoutType::WITNESS_V1_TAPROOT: break; case TxoutType::PUBKEY: keyID = CPubKey(vSolutions[0]).GetID(); @@ -452,7 +453,7 @@ bool LegacyScriptPubKeyMan::Upgrade(int prev_version, bilingual_str& error) hd_upgrade = true; } // Upgrade to HD chain split if necessary - if (m_storage.CanSupportFeature(FEATURE_HD_SPLIT) && CHDChain::VERSION_HD_CHAIN_SPLIT) { + if (m_storage.CanSupportFeature(FEATURE_HD_SPLIT)) { WalletLogPrintf("Upgrading wallet to use HD chain split\n"); m_storage.SetMinVersion(FEATURE_PRE_SPLIT_KEYPOOL); split_upgrade = FEATURE_HD_SPLIT > prev_version; diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h index 14fb1fa89f..63c10b7a0d 100644 --- a/src/wallet/scriptpubkeyman.h +++ b/src/wallet/scriptpubkeyman.h @@ -33,7 +33,7 @@ class WalletStorage public: virtual ~WalletStorage() = default; virtual const std::string GetDisplayName() const = 0; - virtual WalletDatabase& GetDatabase() = 0; + virtual WalletDatabase& GetDatabase() const = 0; virtual bool IsWalletFlagSet(uint64_t) const = 0; virtual void UnsetBlankWalletFlag(WalletBatch&) = 0; virtual bool CanSupportFeature(enum WalletFeature) const = 0; diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp new file mode 100644 index 0000000000..02a161ecbd --- /dev/null +++ b/src/wallet/sqlite.cpp @@ -0,0 +1,629 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <wallet/sqlite.h> + +#include <chainparams.h> +#include <crypto/common.h> +#include <logging.h> +#include <sync.h> +#include <util/memory.h> +#include <util/strencodings.h> +#include <util/system.h> +#include <util/translation.h> +#include <wallet/db.h> + +#include <sqlite3.h> +#include <stdint.h> + +static const char* const DATABASE_FILENAME = "wallet.dat"; +static constexpr int32_t WALLET_SCHEMA_VERSION = 0; + +static Mutex g_sqlite_mutex; +static int g_sqlite_count GUARDED_BY(g_sqlite_mutex) = 0; + +static void ErrorLogCallback(void* arg, int code, const char* msg) +{ + // From sqlite3_config() documentation for the SQLITE_CONFIG_LOG option: + // "The void pointer that is the second argument to SQLITE_CONFIG_LOG is passed through as + // the first parameter to the application-defined logger function whenever that function is + // invoked." + // Assert that this is the case: + assert(arg == nullptr); + LogPrintf("SQLite Error. Code: %d. Message: %s\n", code, msg); +} + +SQLiteDatabase::SQLiteDatabase(const fs::path& dir_path, const fs::path& file_path, bool mock) + : WalletDatabase(), m_mock(mock), m_dir_path(dir_path.string()), m_file_path(file_path.string()) +{ + { + LOCK(g_sqlite_mutex); + LogPrintf("Using SQLite Version %s\n", SQLiteDatabaseVersion()); + LogPrintf("Using wallet %s\n", m_dir_path); + + if (++g_sqlite_count == 1) { + // Setup logging + int ret = sqlite3_config(SQLITE_CONFIG_LOG, ErrorLogCallback, nullptr); + if (ret != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to setup error log: %s\n", sqlite3_errstr(ret))); + } + // Force serialized threading mode + ret = sqlite3_config(SQLITE_CONFIG_SERIALIZED); + if (ret != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to configure serialized threading mode: %s\n", sqlite3_errstr(ret))); + } + } + int ret = sqlite3_initialize(); // This is a no-op if sqlite3 is already initialized + if (ret != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to initialize SQLite: %s\n", sqlite3_errstr(ret))); + } + } + + try { + Open(); + } catch (const std::runtime_error&) { + // If open fails, cleanup this object and rethrow the exception + Cleanup(); + throw; + } +} + +void SQLiteBatch::SetupSQLStatements() +{ + int res; + if (!m_read_stmt) { + if ((res = sqlite3_prepare_v2(m_database.m_db, "SELECT value FROM main WHERE key = ?", -1, &m_read_stmt, nullptr)) != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to setup SQL statements: %s\n", sqlite3_errstr(res))); + } + } + if (!m_insert_stmt) { + if ((res = sqlite3_prepare_v2(m_database.m_db, "INSERT INTO main VALUES(?, ?)", -1, &m_insert_stmt, nullptr)) != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to setup SQL statements: %s\n", sqlite3_errstr(res))); + } + } + if (!m_overwrite_stmt) { + if ((res = sqlite3_prepare_v2(m_database.m_db, "INSERT or REPLACE into main values(?, ?)", -1, &m_overwrite_stmt, nullptr)) != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to setup SQL statements: %s\n", sqlite3_errstr(res))); + } + } + if (!m_delete_stmt) { + if ((res = sqlite3_prepare_v2(m_database.m_db, "DELETE FROM main WHERE key = ?", -1, &m_delete_stmt, nullptr)) != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to setup SQL statements: %s\n", sqlite3_errstr(res))); + } + } + if (!m_cursor_stmt) { + if ((res = sqlite3_prepare_v2(m_database.m_db, "SELECT key, value FROM main", -1, &m_cursor_stmt, nullptr)) != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to setup SQL statements : %s\n", sqlite3_errstr(res))); + } + } +} + +SQLiteDatabase::~SQLiteDatabase() +{ + Cleanup(); +} + +void SQLiteDatabase::Cleanup() noexcept +{ + Close(); + + LOCK(g_sqlite_mutex); + if (--g_sqlite_count == 0) { + int ret = sqlite3_shutdown(); + if (ret != SQLITE_OK) { + LogPrintf("SQLiteDatabase: Failed to shutdown SQLite: %s\n", sqlite3_errstr(ret)); + } + } +} + +bool SQLiteDatabase::Verify(bilingual_str& error) +{ + assert(m_db); + + // Check the application ID matches our network magic + sqlite3_stmt* app_id_stmt{nullptr}; + int ret = sqlite3_prepare_v2(m_db, "PRAGMA application_id", -1, &app_id_stmt, nullptr); + if (ret != SQLITE_OK) { + sqlite3_finalize(app_id_stmt); + error = strprintf(_("SQLiteDatabase: Failed to prepare the statement to fetch the application id: %s"), sqlite3_errstr(ret)); + return false; + } + ret = sqlite3_step(app_id_stmt); + if (ret != SQLITE_ROW) { + sqlite3_finalize(app_id_stmt); + error = strprintf(_("SQLiteDatabase: Failed to fetch the application id: %s"), sqlite3_errstr(ret)); + return false; + } + uint32_t app_id = static_cast<uint32_t>(sqlite3_column_int(app_id_stmt, 0)); + sqlite3_finalize(app_id_stmt); + uint32_t net_magic = ReadBE32(Params().MessageStart()); + if (app_id != net_magic) { + error = strprintf(_("SQLiteDatabase: Unexpected application id. Expected %u, got %u"), net_magic, app_id); + return false; + } + + // Check our schema version + sqlite3_stmt* user_ver_stmt{nullptr}; + ret = sqlite3_prepare_v2(m_db, "PRAGMA user_version", -1, &user_ver_stmt, nullptr); + if (ret != SQLITE_OK) { + sqlite3_finalize(user_ver_stmt); + error = strprintf(_("SQLiteDatabase: Failed to prepare the statement to fetch sqlite wallet schema version: %s"), sqlite3_errstr(ret)); + return false; + } + ret = sqlite3_step(user_ver_stmt); + if (ret != SQLITE_ROW) { + sqlite3_finalize(user_ver_stmt); + error = strprintf(_("SQLiteDatabase: Failed to fetch sqlite wallet schema version: %s"), sqlite3_errstr(ret)); + return false; + } + int32_t user_ver = sqlite3_column_int(user_ver_stmt, 0); + sqlite3_finalize(user_ver_stmt); + if (user_ver != WALLET_SCHEMA_VERSION) { + error = strprintf(_("SQLiteDatabase: Unknown sqlite wallet schema version %d. Only version %d is supported"), user_ver, WALLET_SCHEMA_VERSION); + return false; + } + + sqlite3_stmt* stmt{nullptr}; + ret = sqlite3_prepare_v2(m_db, "PRAGMA integrity_check", -1, &stmt, nullptr); + if (ret != SQLITE_OK) { + sqlite3_finalize(stmt); + error = strprintf(_("SQLiteDatabase: Failed to prepare statement to verify database: %s"), sqlite3_errstr(ret)); + return false; + } + while (true) { + ret = sqlite3_step(stmt); + if (ret == SQLITE_DONE) { + break; + } + if (ret != SQLITE_ROW) { + error = strprintf(_("SQLiteDatabase: Failed to execute statement to verify database: %s"), sqlite3_errstr(ret)); + break; + } + const char* msg = (const char*)sqlite3_column_text(stmt, 0); + if (!msg) { + error = strprintf(_("SQLiteDatabase: Failed to read database verification error: %s"), sqlite3_errstr(ret)); + break; + } + std::string str_msg(msg); + if (str_msg == "ok") { + continue; + } + if (error.empty()) { + error = _("Failed to verify database") + Untranslated("\n"); + } + error += Untranslated(strprintf("%s\n", str_msg)); + } + sqlite3_finalize(stmt); + return error.empty(); +} + +void SQLiteDatabase::Open() +{ + int flags = SQLITE_OPEN_FULLMUTEX | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE; + if (m_mock) { + flags |= SQLITE_OPEN_MEMORY; // In memory database for mock db + } + + if (m_db == nullptr) { + TryCreateDirectories(m_dir_path); + int ret = sqlite3_open_v2(m_file_path.c_str(), &m_db, flags, nullptr); + if (ret != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to open database: %s\n", sqlite3_errstr(ret))); + } + } + + if (sqlite3_db_readonly(m_db, "main") != 0) { + throw std::runtime_error("SQLiteDatabase: Database opened in readonly mode but read-write permissions are needed"); + } + + // Acquire an exclusive lock on the database + // First change the locking mode to exclusive + int ret = sqlite3_exec(m_db, "PRAGMA locking_mode = exclusive", nullptr, nullptr, nullptr); + if (ret != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Unable to change database locking mode to exclusive: %s\n", sqlite3_errstr(ret))); + } + // Now begin a transaction to acquire the exclusive lock. This lock won't be released until we close because of the exclusive locking mode. + ret = sqlite3_exec(m_db, "BEGIN EXCLUSIVE TRANSACTION", nullptr, nullptr, nullptr); + if (ret != SQLITE_OK) { + throw std::runtime_error("SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?\n"); + } + ret = sqlite3_exec(m_db, "COMMIT", nullptr, nullptr, nullptr); + if (ret != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Unable to end exclusive lock transaction: %s\n", sqlite3_errstr(ret))); + } + + // Enable fullfsync for the platforms that use it + ret = sqlite3_exec(m_db, "PRAGMA fullfsync = true", nullptr, nullptr, nullptr); + if (ret != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to enable fullfsync: %s\n", sqlite3_errstr(ret))); + } + + // Make the table for our key-value pairs + // First check that the main table exists + sqlite3_stmt* check_main_stmt{nullptr}; + ret = sqlite3_prepare_v2(m_db, "SELECT name FROM sqlite_master WHERE type='table' AND name='main'", -1, &check_main_stmt, nullptr); + if (ret != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to prepare statement to check table existence: %s\n", sqlite3_errstr(ret))); + } + ret = sqlite3_step(check_main_stmt); + if (sqlite3_finalize(check_main_stmt) != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to finalize statement checking table existence: %s\n", sqlite3_errstr(ret))); + } + bool table_exists; + if (ret == SQLITE_DONE) { + table_exists = false; + } else if (ret == SQLITE_ROW) { + table_exists = true; + } else { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to execute statement to check table existence: %s\n", sqlite3_errstr(ret))); + } + + // Do the db setup things because the table doesn't exist only when we are creating a new wallet + if (!table_exists) { + ret = sqlite3_exec(m_db, "CREATE TABLE main(key BLOB PRIMARY KEY NOT NULL, value BLOB NOT NULL)", nullptr, nullptr, nullptr); + if (ret != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to create new database: %s\n", sqlite3_errstr(ret))); + } + + // Set the application id + uint32_t app_id = ReadBE32(Params().MessageStart()); + std::string set_app_id = strprintf("PRAGMA application_id = %d", static_cast<int32_t>(app_id)); + ret = sqlite3_exec(m_db, set_app_id.c_str(), nullptr, nullptr, nullptr); + if (ret != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to set the application id: %s\n", sqlite3_errstr(ret))); + } + + // Set the user version + std::string set_user_ver = strprintf("PRAGMA user_version = %d", WALLET_SCHEMA_VERSION); + ret = sqlite3_exec(m_db, set_user_ver.c_str(), nullptr, nullptr, nullptr); + if (ret != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to set the wallet schema version: %s\n", sqlite3_errstr(ret))); + } + } +} + +bool SQLiteDatabase::Rewrite(const char* skip) +{ + // Rewrite the database using the VACUUM command: https://sqlite.org/lang_vacuum.html + int ret = sqlite3_exec(m_db, "VACUUM", nullptr, nullptr, nullptr); + return ret == SQLITE_OK; +} + +bool SQLiteDatabase::Backup(const std::string& dest) const +{ + sqlite3* db_copy; + int res = sqlite3_open(dest.c_str(), &db_copy); + if (res != SQLITE_OK) { + sqlite3_close(db_copy); + return false; + } + sqlite3_backup* backup = sqlite3_backup_init(db_copy, "main", m_db, "main"); + if (!backup) { + LogPrintf("%s: Unable to begin backup: %s\n", __func__, sqlite3_errmsg(m_db)); + sqlite3_close(db_copy); + return false; + } + // Specifying -1 will copy all of the pages + res = sqlite3_backup_step(backup, -1); + if (res != SQLITE_DONE) { + LogPrintf("%s: Unable to backup: %s\n", __func__, sqlite3_errstr(res)); + sqlite3_backup_finish(backup); + sqlite3_close(db_copy); + return false; + } + res = sqlite3_backup_finish(backup); + sqlite3_close(db_copy); + return res == SQLITE_OK; +} + +void SQLiteDatabase::Close() +{ + int res = sqlite3_close(m_db); + if (res != SQLITE_OK) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to close database: %s\n", sqlite3_errstr(res))); + } + m_db = nullptr; +} + +std::unique_ptr<DatabaseBatch> SQLiteDatabase::MakeBatch(bool flush_on_close) +{ + // We ignore flush_on_close because we don't do manual flushing for SQLite + return MakeUnique<SQLiteBatch>(*this); +} + +SQLiteBatch::SQLiteBatch(SQLiteDatabase& database) + : m_database(database) +{ + // Make sure we have a db handle + assert(m_database.m_db); + + SetupSQLStatements(); +} + +void SQLiteBatch::Close() +{ + // If m_db is in a transaction (i.e. not in autocommit mode), then abort the transaction in progress + if (m_database.m_db && sqlite3_get_autocommit(m_database.m_db) == 0) { + if (TxnAbort()) { + LogPrintf("SQLiteBatch: Batch closed unexpectedly without the transaction being explicitly committed or aborted\n"); + } else { + LogPrintf("SQLiteBatch: Batch closed and failed to abort transaction\n"); + } + } + + // Free all of the prepared statements + int ret = sqlite3_finalize(m_read_stmt); + if (ret != SQLITE_OK) { + LogPrintf("SQLiteBatch: Batch closed but could not finalize read statement: %s\n", sqlite3_errstr(ret)); + } + ret = sqlite3_finalize(m_insert_stmt); + if (ret != SQLITE_OK) { + LogPrintf("SQLiteBatch: Batch closed but could not finalize insert statement: %s\n", sqlite3_errstr(ret)); + } + ret = sqlite3_finalize(m_overwrite_stmt); + if (ret != SQLITE_OK) { + LogPrintf("SQLiteBatch: Batch closed but could not finalize overwrite statement: %s\n", sqlite3_errstr(ret)); + } + ret = sqlite3_finalize(m_delete_stmt); + if (ret != SQLITE_OK) { + LogPrintf("SQLiteBatch: Batch closed but could not finalize delete statement: %s\n", sqlite3_errstr(ret)); + } + ret = sqlite3_finalize(m_cursor_stmt); + if (ret != SQLITE_OK) { + LogPrintf("SQLiteBatch: Batch closed but could not finalize cursor statement: %s\n", sqlite3_errstr(ret)); + } + m_read_stmt = nullptr; + m_insert_stmt = nullptr; + m_overwrite_stmt = nullptr; + m_delete_stmt = nullptr; + m_cursor_stmt = nullptr; +} + +bool SQLiteBatch::ReadKey(CDataStream&& key, CDataStream& value) +{ + if (!m_database.m_db) return false; + assert(m_read_stmt); + + // Bind: leftmost parameter in statement is index 1 + int res = sqlite3_bind_blob(m_read_stmt, 1, key.data(), key.size(), SQLITE_STATIC); + if (res != SQLITE_OK) { + LogPrintf("%s: Unable to bind statement: %s\n", __func__, sqlite3_errstr(res)); + sqlite3_clear_bindings(m_read_stmt); + sqlite3_reset(m_read_stmt); + return false; + } + res = sqlite3_step(m_read_stmt); + if (res != SQLITE_ROW) { + if (res != SQLITE_DONE) { + // SQLITE_DONE means "not found", don't log an error in that case. + LogPrintf("%s: Unable to execute statement: %s\n", __func__, sqlite3_errstr(res)); + } + sqlite3_clear_bindings(m_read_stmt); + sqlite3_reset(m_read_stmt); + return false; + } + // Leftmost column in result is index 0 + const char* data = reinterpret_cast<const char*>(sqlite3_column_blob(m_read_stmt, 0)); + int data_size = sqlite3_column_bytes(m_read_stmt, 0); + value.write(data, data_size); + + sqlite3_clear_bindings(m_read_stmt); + sqlite3_reset(m_read_stmt); + return true; +} + +bool SQLiteBatch::WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite) +{ + if (!m_database.m_db) return false; + assert(m_insert_stmt && m_overwrite_stmt); + + sqlite3_stmt* stmt; + if (overwrite) { + stmt = m_overwrite_stmt; + } else { + stmt = m_insert_stmt; + } + + // Bind: leftmost parameter in statement is index 1 + // Insert index 1 is key, 2 is value + int res = sqlite3_bind_blob(stmt, 1, key.data(), key.size(), SQLITE_STATIC); + if (res != SQLITE_OK) { + LogPrintf("%s: Unable to bind key to statement: %s\n", __func__, sqlite3_errstr(res)); + sqlite3_clear_bindings(stmt); + sqlite3_reset(stmt); + return false; + } + res = sqlite3_bind_blob(stmt, 2, value.data(), value.size(), SQLITE_STATIC); + if (res != SQLITE_OK) { + LogPrintf("%s: Unable to bind value to statement: %s\n", __func__, sqlite3_errstr(res)); + sqlite3_clear_bindings(stmt); + sqlite3_reset(stmt); + return false; + } + + // Execute + res = sqlite3_step(stmt); + sqlite3_clear_bindings(stmt); + sqlite3_reset(stmt); + if (res != SQLITE_DONE) { + LogPrintf("%s: Unable to execute statement: %s\n", __func__, sqlite3_errstr(res)); + } + return res == SQLITE_DONE; +} + +bool SQLiteBatch::EraseKey(CDataStream&& key) +{ + if (!m_database.m_db) return false; + assert(m_delete_stmt); + + // Bind: leftmost parameter in statement is index 1 + int res = sqlite3_bind_blob(m_delete_stmt, 1, key.data(), key.size(), SQLITE_STATIC); + if (res != SQLITE_OK) { + LogPrintf("%s: Unable to bind statement: %s\n", __func__, sqlite3_errstr(res)); + sqlite3_clear_bindings(m_delete_stmt); + sqlite3_reset(m_delete_stmt); + return false; + } + + // Execute + res = sqlite3_step(m_delete_stmt); + sqlite3_clear_bindings(m_delete_stmt); + sqlite3_reset(m_delete_stmt); + if (res != SQLITE_DONE) { + LogPrintf("%s: Unable to execute statement: %s\n", __func__, sqlite3_errstr(res)); + } + return res == SQLITE_DONE; +} + +bool SQLiteBatch::HasKey(CDataStream&& key) +{ + if (!m_database.m_db) return false; + assert(m_read_stmt); + + // Bind: leftmost parameter in statement is index 1 + bool ret = false; + int res = sqlite3_bind_blob(m_read_stmt, 1, key.data(), key.size(), SQLITE_STATIC); + if (res == SQLITE_OK) { + res = sqlite3_step(m_read_stmt); + if (res == SQLITE_ROW) { + ret = true; + } + } + + sqlite3_clear_bindings(m_read_stmt); + sqlite3_reset(m_read_stmt); + return ret; +} + +bool SQLiteBatch::StartCursor() +{ + assert(!m_cursor_init); + if (!m_database.m_db) return false; + m_cursor_init = true; + return true; +} + +bool SQLiteBatch::ReadAtCursor(CDataStream& key, CDataStream& value, bool& complete) +{ + complete = false; + + if (!m_cursor_init) return false; + + int res = sqlite3_step(m_cursor_stmt); + if (res == SQLITE_DONE) { + complete = true; + return true; + } + if (res != SQLITE_ROW) { + LogPrintf("SQLiteBatch::ReadAtCursor: Unable to execute cursor step: %s\n", sqlite3_errstr(res)); + return false; + } + + // Leftmost column in result is index 0 + const char* key_data = reinterpret_cast<const char*>(sqlite3_column_blob(m_cursor_stmt, 0)); + int key_data_size = sqlite3_column_bytes(m_cursor_stmt, 0); + key.write(key_data, key_data_size); + const char* value_data = reinterpret_cast<const char*>(sqlite3_column_blob(m_cursor_stmt, 1)); + int value_data_size = sqlite3_column_bytes(m_cursor_stmt, 1); + value.write(value_data, value_data_size); + return true; +} + +void SQLiteBatch::CloseCursor() +{ + sqlite3_reset(m_cursor_stmt); + m_cursor_init = false; +} + +bool SQLiteBatch::TxnBegin() +{ + if (!m_database.m_db || sqlite3_get_autocommit(m_database.m_db) == 0) return false; + int res = sqlite3_exec(m_database.m_db, "BEGIN TRANSACTION", nullptr, nullptr, nullptr); + if (res != SQLITE_OK) { + LogPrintf("SQLiteBatch: Failed to begin the transaction\n"); + } + return res == SQLITE_OK; +} + +bool SQLiteBatch::TxnCommit() +{ + if (!m_database.m_db || sqlite3_get_autocommit(m_database.m_db) != 0) return false; + int res = sqlite3_exec(m_database.m_db, "COMMIT TRANSACTION", nullptr, nullptr, nullptr); + if (res != SQLITE_OK) { + LogPrintf("SQLiteBatch: Failed to commit the transaction\n"); + } + return res == SQLITE_OK; +} + +bool SQLiteBatch::TxnAbort() +{ + if (!m_database.m_db || sqlite3_get_autocommit(m_database.m_db) != 0) return false; + int res = sqlite3_exec(m_database.m_db, "ROLLBACK TRANSACTION", nullptr, nullptr, nullptr); + if (res != SQLITE_OK) { + LogPrintf("SQLiteBatch: Failed to abort the transaction\n"); + } + return res == SQLITE_OK; +} + +bool ExistsSQLiteDatabase(const fs::path& path) +{ + const fs::path file = path / DATABASE_FILENAME; + return fs::symlink_status(file).type() == fs::regular_file && IsSQLiteFile(file); +} + +std::unique_ptr<SQLiteDatabase> MakeSQLiteDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error) +{ + const fs::path file = path / DATABASE_FILENAME; + try { + auto db = MakeUnique<SQLiteDatabase>(path, file); + if (options.verify && !db->Verify(error)) { + status = DatabaseStatus::FAILED_VERIFY; + return nullptr; + } + return db; + } catch (const std::runtime_error& e) { + status = DatabaseStatus::FAILED_LOAD; + error.original = e.what(); + return nullptr; + } +} + +std::string SQLiteDatabaseVersion() +{ + return std::string(sqlite3_libversion()); +} + +bool IsSQLiteFile(const fs::path& path) +{ + if (!fs::exists(path)) return false; + + // A SQLite Database file is at least 512 bytes. + boost::system::error_code ec; + auto size = fs::file_size(path, ec); + if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string()); + if (size < 512) return false; + + fsbridge::ifstream file(path, std::ios::binary); + if (!file.is_open()) return false; + + // Magic is at beginning and is 16 bytes long + char magic[16]; + file.read(magic, 16); + + // Application id is at offset 68 and 4 bytes long + file.seekg(68, std::ios::beg); + char app_id[4]; + file.read(app_id, 4); + + file.close(); + + // Check the magic, see https://sqlite.org/fileformat2.html + std::string magic_str(magic); + if (magic_str != std::string("SQLite format 3")) { + return false; + } + + // Check the application id matches our network magic + return memcmp(Params().MessageStart(), app_id, 4) == 0; +} diff --git a/src/wallet/sqlite.h b/src/wallet/sqlite.h new file mode 100644 index 0000000000..693a2ef55a --- /dev/null +++ b/src/wallet/sqlite.h @@ -0,0 +1,122 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_WALLET_SQLITE_H +#define BITCOIN_WALLET_SQLITE_H + +#include <wallet/db.h> + +#include <sqlite3.h> + +struct bilingual_str; +class SQLiteDatabase; + +/** RAII class that provides access to a WalletDatabase */ +class SQLiteBatch : public DatabaseBatch +{ +private: + SQLiteDatabase& m_database; + + bool m_cursor_init = false; + + sqlite3_stmt* m_read_stmt{nullptr}; + sqlite3_stmt* m_insert_stmt{nullptr}; + sqlite3_stmt* m_overwrite_stmt{nullptr}; + sqlite3_stmt* m_delete_stmt{nullptr}; + sqlite3_stmt* m_cursor_stmt{nullptr}; + + void SetupSQLStatements(); + + bool ReadKey(CDataStream&& key, CDataStream& value) override; + bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite = true) override; + bool EraseKey(CDataStream&& key) override; + bool HasKey(CDataStream&& key) override; + +public: + explicit SQLiteBatch(SQLiteDatabase& database); + ~SQLiteBatch() override { Close(); } + + /* No-op. See commeng on SQLiteDatabase::Flush */ + void Flush() override {} + + void Close() override; + + bool StartCursor() override; + bool ReadAtCursor(CDataStream& key, CDataStream& value, bool& complete) override; + void CloseCursor() override; + bool TxnBegin() override; + bool TxnCommit() override; + bool TxnAbort() override; +}; + +/** An instance of this class represents one SQLite3 database. + **/ +class SQLiteDatabase : public WalletDatabase +{ +private: + const bool m_mock{false}; + + const std::string m_dir_path; + + const std::string m_file_path; + + void Cleanup() noexcept; + +public: + SQLiteDatabase() = delete; + + /** Create DB handle to real database */ + SQLiteDatabase(const fs::path& dir_path, const fs::path& file_path, bool mock = false); + + ~SQLiteDatabase(); + + bool Verify(bilingual_str& error); + + /** Open the database if it is not already opened */ + void Open() override; + + /** Close the database */ + void Close() override; + + /* These functions are unused */ + void AddRef() override { assert(false); } + void RemoveRef() override { assert(false); } + + /** Rewrite the entire database on disk */ + bool Rewrite(const char* skip = nullptr) override; + + /** Back up the entire database to a file. + */ + bool Backup(const std::string& dest) const override; + + /** No-ops + * + * SQLite always flushes everything to the database file after each transaction + * (each Read/Write/Erase that we do is its own transaction unless we called + * TxnBegin) so there is no need to have Flush or Periodic Flush. + * + * There is no DB env to reload, so ReloadDbEnv has nothing to do + */ + void Flush() override {} + bool PeriodicFlush() override { return false; } + void ReloadDbEnv() override {} + + void IncrementUpdateCounter() override { ++nUpdateCounter; } + + std::string Filename() override { return m_file_path; } + std::string Format() override { return "sqlite"; } + + /** Make a SQLiteBatch connected to this database */ + std::unique_ptr<DatabaseBatch> MakeBatch(bool flush_on_close = true) override; + + sqlite3* m_db{nullptr}; +}; + +bool ExistsSQLiteDatabase(const fs::path& path); +std::unique_ptr<SQLiteDatabase> MakeSQLiteDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error); + +std::string SQLiteDatabaseVersion(); +bool IsSQLiteFile(const fs::path& path); + +#endif // BITCOIN_WALLET_SQLITE_H diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 4d8c0b175b..6b7d05fdf3 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -243,11 +243,13 @@ std::shared_ptr<CWallet> LoadWallet(interfaces::Chain& chain, const std::string& return wallet; } -std::shared_ptr<CWallet> CreateWallet(interfaces::Chain& chain, const std::string& name, Optional<bool> load_on_start, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings) +std::shared_ptr<CWallet> CreateWallet(interfaces::Chain& chain, const std::string& name, Optional<bool> load_on_start, DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings) { uint64_t wallet_creation_flags = options.create_flags; const SecureString& passphrase = options.create_passphrase; + if (wallet_creation_flags & WALLET_FLAG_DESCRIPTORS) options.require_format = DatabaseFormat::SQLITE; + // Indicate that the wallet is actually supposed to be blank and not just blank to make it encrypted bool create_blank = (wallet_creation_flags & WALLET_FLAG_BLANK_WALLET); @@ -791,7 +793,7 @@ bool CWallet::MarkReplaced(const uint256& originalHash, const uint256& newHash) wtx.mapValue["replaced_by_txid"] = newHash.ToString(); - WalletBatch batch(*database, "r+"); + WalletBatch batch(*database); bool success = true; if (!batch.WriteTx(wtx)) { @@ -863,7 +865,7 @@ CWalletTx* CWallet::AddToWallet(CTransactionRef tx, const CWalletTx::Confirmatio { LOCK(cs_wallet); - WalletBatch batch(*database, "r+", fFlushOnClose); + WalletBatch batch(*database, fFlushOnClose); uint256 hash = tx->GetHash(); @@ -1062,7 +1064,7 @@ bool CWallet::AbandonTransaction(const uint256& hashTx) { LOCK(cs_wallet); - WalletBatch batch(*database, "r+"); + WalletBatch batch(*database); std::set<uint256> todo; std::set<uint256> done; @@ -1125,7 +1127,7 @@ void CWallet::MarkConflicted(const uint256& hashBlock, int conflicting_height, c return; // Do not flush the wallet here for performance reasons - WalletBatch batch(*database, "r+", false); + WalletBatch batch(*database, false); std::set<uint256> todo; std::set<uint256> done; @@ -3190,7 +3192,7 @@ DBErrors CWallet::LoadWallet(bool& fFirstRunRet) LOCK(cs_wallet); fFirstRunRet = false; - DBErrors nLoadWalletRet = WalletBatch(*database,"cr+").LoadWallet(this); + DBErrors nLoadWalletRet = WalletBatch(*database).LoadWallet(this); if (nLoadWalletRet == DBErrors::NEED_REWRITE) { if (database->Rewrite("\x04pool")) @@ -3217,7 +3219,7 @@ DBErrors CWallet::LoadWallet(bool& fFirstRunRet) DBErrors CWallet::ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256>& vHashOut) { AssertLockHeld(cs_wallet); - DBErrors nZapSelectTxRet = WalletBatch(*database, "cr+").ZapSelectTx(vHashIn, vHashOut); + DBErrors nZapSelectTxRet = WalletBatch(*database).ZapSelectTx(vHashIn, vHashOut); for (const uint256& hash : vHashOut) { const auto& it = mapWallet.find(hash); wtxOrdered.erase(it->second.m_it_wtxOrdered); diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index fb08cb4085..74de55dcb5 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -55,7 +55,7 @@ bool RemoveWallet(const std::shared_ptr<CWallet>& wallet, Optional<bool> load_on std::vector<std::shared_ptr<CWallet>> GetWallets(); std::shared_ptr<CWallet> GetWallet(const std::string& name); std::shared_ptr<CWallet> LoadWallet(interfaces::Chain& chain, const std::string& name, Optional<bool> load_on_start, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings); -std::shared_ptr<CWallet> CreateWallet(interfaces::Chain& chain, const std::string& name, Optional<bool> load_on_start, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings); +std::shared_ptr<CWallet> CreateWallet(interfaces::Chain& chain, const std::string& name, Optional<bool> load_on_start, DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings); std::unique_ptr<interfaces::Handler> HandleLoadWallet(LoadWalletFn load_wallet); std::unique_ptr<WalletDatabase> MakeWalletDatabase(const std::string& name, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error); @@ -739,7 +739,7 @@ public: { return *database; } - WalletDatabase& GetDatabase() override { return *database; } + WalletDatabase& GetDatabase() const override { return *database; } /** * Select a set of coins such that nValueRet >= nTargetValue and at least diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index 5bf21eb91f..0092a29cb4 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -15,6 +15,7 @@ #include <util/time.h> #include <util/translation.h> #include <wallet/bdb.h> +#include <wallet/sqlite.h> #include <wallet/wallet.h> #include <atomic> @@ -1011,6 +1012,14 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas if (ExistsBerkeleyDatabase(path)) { format = DatabaseFormat::BERKELEY; } + if (ExistsSQLiteDatabase(path)) { + if (format) { + error = Untranslated(strprintf("Failed to load database path '%s'. Data is in ambiguous format.", path.string())); + status = DatabaseStatus::FAILED_BAD_FORMAT; + return nullptr; + } + format = DatabaseFormat::SQLITE; + } } else if (options.require_existing) { error = Untranslated(strprintf("Failed to load database path '%s'. Path does not exist.", path.string())); status = DatabaseStatus::FAILED_NOT_FOUND; @@ -1029,6 +1038,20 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas return nullptr; } + // A db already exists so format is set, but options also specifies the format, so make sure they agree + if (format && options.require_format && format != options.require_format) { + error = Untranslated(strprintf("Failed to load database path '%s'. Data is not in required format.", path.string())); + status = DatabaseStatus::FAILED_BAD_FORMAT; + return nullptr; + } + + // Format is not set when a db doesn't already exist, so use the format specified by the options if it is set. + if (!format && options.require_format) format = options.require_format; + + if (format && format == DatabaseFormat::SQLITE) { + return MakeSQLiteDatabase(path, options, status, error); + } + return MakeBerkeleyDatabase(path, options, status, error); } diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h index eda810ed8a..7f1b86e458 100644 --- a/src/wallet/walletdb.h +++ b/src/wallet/walletdb.h @@ -204,8 +204,8 @@ private: } public: - explicit WalletBatch(WalletDatabase& database, const char* pszMode = "r+", bool _fFlushOnClose = true) : - m_batch(database.MakeBatch(pszMode, _fFlushOnClose)), + explicit WalletBatch(WalletDatabase &database, bool _fFlushOnClose = true) : + m_batch(database.MakeBatch(_fFlushOnClose)), m_database(database) { } diff --git a/src/wallet/wallettool.cpp b/src/wallet/wallettool.cpp index 4452840eb1..0e18d6a740 100644 --- a/src/wallet/wallettool.cpp +++ b/src/wallet/wallettool.cpp @@ -95,6 +95,9 @@ static void WalletShowInfo(CWallet* wallet_instance) LOCK(wallet_instance->cs_wallet); tfm::format(std::cout, "Wallet info\n===========\n"); + tfm::format(std::cout, "Name: %s\n", wallet_instance->GetName()); + tfm::format(std::cout, "Format: %s\n", wallet_instance->GetDatabase().Format()); + tfm::format(std::cout, "Descriptors: %s\n", wallet_instance->IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS) ? "yes" : "no"); tfm::format(std::cout, "Encrypted: %s\n", wallet_instance->IsCrypted() ? "yes" : "no"); tfm::format(std::cout, "HD (hd seed available): %s\n", wallet_instance->IsHDEnabled() ? "yes" : "no"); tfm::format(std::cout, "Keypool Size: %u\n", wallet_instance->GetKeyPoolSize()); diff --git a/src/wallet/walletutil.cpp b/src/wallet/walletutil.cpp index e4c72aed98..a2a55f9751 100644 --- a/src/wallet/walletutil.cpp +++ b/src/wallet/walletutil.cpp @@ -7,6 +7,9 @@ #include <logging.h> #include <util/system.h> +bool ExistsBerkeleyDatabase(const fs::path& path); +bool ExistsSQLiteDatabase(const fs::path& path); + fs::path GetWalletDir() { fs::path path; @@ -29,31 +32,6 @@ fs::path GetWalletDir() return path; } -bool IsBerkeleyBtree(const fs::path& path) -{ - if (!fs::exists(path)) return false; - - // A Berkeley DB Btree file has at least 4K. - // This check also prevents opening lock files. - boost::system::error_code ec; - auto size = fs::file_size(path, ec); - if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string()); - if (size < 4096) return false; - - fsbridge::ifstream file(path, std::ios::binary); - if (!file.is_open()) return false; - - file.seekg(12, std::ios::beg); // Magic bytes start at offset 12 - uint32_t data = 0; - file.read((char*) &data, sizeof(data)); // Read 4 bytes of file to compare against magic - - // Berkeley DB Btree magic bytes, from: - // https://github.com/file/file/blob/5824af38469ec1ca9ac3ffd251e7afe9dc11e227/magic/Magdir/database#L74-L75 - // - big endian systems - 00 05 31 62 - // - little endian systems - 62 31 05 00 - return data == 0x00053162 || data == 0x62310500; -} - std::vector<fs::path> ListWalletDir() { const fs::path wallet_dir = GetWalletDir(); @@ -71,10 +49,11 @@ std::vector<fs::path> ListWalletDir() // This can be replaced by boost::filesystem::lexically_relative once boost is bumped to 1.60. const fs::path path = it->path().string().substr(offset); - if (it->status().type() == fs::directory_file && IsBerkeleyBtree(it->path() / "wallet.dat")) { + if (it->status().type() == fs::directory_file && + (ExistsBerkeleyDatabase(it->path()) || ExistsSQLiteDatabase(it->path()))) { // Found a directory which contains wallet.dat btree file, add it as a wallet. paths.emplace_back(path); - } else if (it.level() == 0 && it->symlink_status().type() == fs::regular_file && IsBerkeleyBtree(it->path())) { + } else if (it.level() == 0 && it->symlink_status().type() == fs::regular_file && ExistsBerkeleyDatabase(it->path())) { if (it->path().filename() == "wallet.dat") { // Found top-level wallet.dat btree file, add top level directory "" // as a wallet. diff --git a/test/functional/example_test.py b/test/functional/example_test.py index 3b9bd3048f..c28bb7115f 100755 --- a/test/functional/example_test.py +++ b/test/functional/example_test.py @@ -25,7 +25,6 @@ from test_framework.p2p import ( from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - connect_nodes, ) # P2PInterface is a class containing callbacks to be executed when a P2P @@ -115,7 +114,7 @@ class ExampleTest(BitcoinTestFramework): # In this test, we're not connecting node2 to node0 or node1. Calls to # sync_all() should not include node2, since we're not expecting it to # sync. - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) self.sync_all(self.nodes[0:2]) # Use setup_nodes() to customize the node start behaviour (for example if @@ -183,7 +182,7 @@ class ExampleTest(BitcoinTestFramework): self.nodes[1].waitforblockheight(11) self.log.info("Connect node2 and node1") - connect_nodes(self.nodes[1], 2) + self.connect_nodes(1, 2) self.log.info("Wait for node2 to receive all the blocks from node1") self.sync_all() diff --git a/test/functional/feature_abortnode.py b/test/functional/feature_abortnode.py index 17fbf50cc8..8abfdef3a1 100755 --- a/test/functional/feature_abortnode.py +++ b/test/functional/feature_abortnode.py @@ -11,7 +11,7 @@ """ from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import get_datadir_path, connect_nodes +from test_framework.util import get_datadir_path import os @@ -36,7 +36,7 @@ class AbortNodeTest(BitcoinTestFramework): # attempt. self.nodes[1].generate(3) with self.nodes[0].assert_debug_log(["Failed to disconnect block"]): - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) self.nodes[1].generate(1) # Check that node0 aborted diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py index 1253c45418..60492350ee 100755 --- a/test/functional/feature_bip68_sequence.py +++ b/test/functional/feature_bip68_sequence.py @@ -6,7 +6,7 @@ import time -from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment +from test_framework.blocktools import create_block, NORMAL_GBT_REQUEST_PARAMS, add_witness_commitment from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, ToHex from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( @@ -275,6 +275,8 @@ class BIP68Test(BitcoinTestFramework): # Advance the time on the node so that we can test timelocks self.nodes[0].setmocktime(cur_time+600) + # Save block template now to use for the reorg later + tmpl = self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS) self.nodes[0].generate(1) assert tx2.hash not in self.nodes[0].getrawmempool() @@ -318,16 +320,15 @@ class BIP68Test(BitcoinTestFramework): # diagram above). # This would cause tx2 to be added back to the mempool, which in turn causes # tx3 to be removed. - tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16) - height = self.nodes[0].getblockcount() for i in range(2): - block = create_block(tip, create_coinbase(height), cur_time) - block.nVersion = 3 + block = create_block(tmpl=tmpl, ntime=cur_time) block.rehash() block.solve() tip = block.sha256 - height += 1 assert_equal(None if i == 1 else 'inconclusive', self.nodes[0].submitblock(ToHex(block))) + tmpl = self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS) + tmpl['previousblockhash'] = '%x' % tip + tmpl['transactions'] = [] cur_time += 1 mempool = self.nodes[0].getrawmempool() @@ -375,9 +376,7 @@ class BIP68Test(BitcoinTestFramework): assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3)) # make a block that violates bip68; ensure that the tip updates - tip = int(self.nodes[0].getbestblockhash(), 16) - block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1)) - block.nVersion = 3 + block = create_block(tmpl=self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)) block.vtx.extend([tx1, tx2, tx3]) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py index 702a1d9995..8a8a0c7614 100755 --- a/test/functional/feature_fee_estimation.py +++ b/test/functional/feature_fee_estimation.py @@ -13,7 +13,6 @@ from test_framework.util import ( assert_equal, assert_greater_than, assert_greater_than_or_equal, - connect_nodes, satoshi_round, ) @@ -232,9 +231,9 @@ class EstimateFeeTest(BitcoinTestFramework): # so the estimates would not be affected by the splitting transactions self.start_node(1) self.start_node(2) - connect_nodes(self.nodes[1], 0) - connect_nodes(self.nodes[0], 2) - connect_nodes(self.nodes[2], 1) + self.connect_nodes(1, 0) + self.connect_nodes(0, 2) + self.connect_nodes(2, 1) self.sync_all() diff --git a/test/functional/feature_minchainwork.py b/test/functional/feature_minchainwork.py index dbff6f15f2..abf87e8f0c 100755 --- a/test/functional/feature_minchainwork.py +++ b/test/functional/feature_minchainwork.py @@ -18,7 +18,7 @@ only succeeds past a given node once its nMinimumChainWork has been exceeded. import time from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import connect_nodes, assert_equal +from test_framework.util import assert_equal # 2 hashes required per regtest block (with no difficulty adjustment) REGTEST_WORK_PER_BLOCK = 2 @@ -39,7 +39,7 @@ class MinimumChainWorkTest(BitcoinTestFramework): # block relay to inbound peers. self.setup_nodes() for i in range(self.num_nodes-1): - connect_nodes(self.nodes[i+1], i) + self.connect_nodes(i+1, i) def run_test(self): # Start building a chain on node0. node2 shouldn't be able to sync until node1's diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py index 5522f2b7c6..cf102f321c 100755 --- a/test/functional/feature_notifications.py +++ b/test/functional/feature_notifications.py @@ -9,8 +9,6 @@ from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE, keyhash_to_p2pkh from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - connect_nodes, - disconnect_nodes, hex_str_to_bytes, ) @@ -75,7 +73,7 @@ class NotificationsTest(BitcoinTestFramework): self.log.info("test -walletnotify after rescan") # restart node to rescan to force wallet notifications self.start_node(1) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10) @@ -126,12 +124,12 @@ class NotificationsTest(BitcoinTestFramework): # Bump tx2 as bump2 and generate a block on node 0 while # disconnected, then reconnect and check for notifications on node 1 # about newly confirmed bump2 and newly conflicted tx2. - disconnect_nodes(self.nodes[0], 1) + self.disconnect_nodes(0, 1) bump2 = self.nodes[0].bumpfee(tx2)["txid"] self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE) assert_equal(self.nodes[0].gettransaction(bump2)["confirmations"], 1) assert_equal(tx2 in self.nodes[1].getrawmempool(), True) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) self.sync_blocks() self.expect_wallet_notify([bump2, tx2]) assert_equal(self.nodes[1].gettransaction(bump2)["confirmations"], 1) diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py index ff55cb76d9..d1196a4bbd 100755 --- a/test/functional/feature_nulldummy.py +++ b/test/functional/feature_nulldummy.py @@ -14,7 +14,7 @@ Generate 427 more blocks. """ import time -from test_framework.blocktools import create_coinbase, create_block, create_transaction, add_witness_commitment +from test_framework.blocktools import NORMAL_GBT_REQUEST_PARAMS, create_block, create_transaction, add_witness_commitment from test_framework.messages import CTransaction from test_framework.script import CScript from test_framework.test_framework import BitcoinTestFramework @@ -37,14 +37,15 @@ def trueDummy(tx): class NULLDUMMYTest(BitcoinTestFramework): def set_test_params(self): - self.num_nodes = 1 + # Need two nodes only so GBT doesn't complain that it's not connected + self.num_nodes = 2 self.setup_clean_chain = True # This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through # normal segwit activation here (and don't use the default always-on behaviour). self.extra_args = [[ '-segwitheight=432', '-addresstype=legacy', - ]] + ]] * 2 def skip_test_if_missing_module(self): self.skip_if_no_wallet() @@ -61,7 +62,6 @@ class NULLDUMMYTest(BitcoinTestFramework): coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0]) self.nodes[0].generate(427) # Block 429 self.lastblockhash = self.nodes[0].getbestblockhash() - self.tip = int("0x" + self.lastblockhash, 0) self.lastblockheight = 429 self.lastblocktime = int(time.time()) + 429 @@ -102,8 +102,10 @@ class NULLDUMMYTest(BitcoinTestFramework): self.block_submit(self.nodes[0], test6txs, True, True) def block_submit(self, node, txs, witness=False, accept=False): - block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1) - block.nVersion = 4 + tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS) + assert_equal(tmpl['previousblockhash'], self.lastblockhash) + assert_equal(tmpl['height'], self.lastblockheight + 1) + block = create_block(tmpl=tmpl, ntime=self.lastblocktime + 1) for tx in txs: tx.rehash() block.vtx.append(tx) @@ -114,7 +116,6 @@ class NULLDUMMYTest(BitcoinTestFramework): assert_equal(None if accept else 'block-validation-failed', node.submitblock(block.serialize().hex())) if (accept): assert_equal(node.getbestblockhash(), block.hash) - self.tip = block.sha256 self.lastblockhash = block.hash self.lastblocktime += 1 self.lastblockheight += 1 diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py index be323d355e..dfae58e860 100755 --- a/test/functional/feature_proxy.py +++ b/test/functional/feature_proxy.py @@ -18,8 +18,9 @@ Test plan: - proxy on IPv6 - Create various proxies (as threads) -- Create bitcoinds that connect to them -- Manipulate the bitcoinds using addnode (onetry) an observe effects +- Create nodes that connect to them +- Manipulate the peer connections using addnode (onetry) and observe effects +- Test the getpeerinfo `network` field for the peer addnode connect to IPv4 addnode connect to IPv6 @@ -40,6 +41,12 @@ from test_framework.util import ( from test_framework.netutil import test_ipv6_local RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports +# From GetNetworkName() in netbase.cpp: +NET_UNROUTABLE = "" +NET_IPV4 = "ipv4" +NET_IPV6 = "ipv6" +NET_ONION = "onion" + class ProxyTest(BitcoinTestFramework): def set_test_params(self): @@ -90,10 +97,16 @@ class ProxyTest(BitcoinTestFramework): self.add_nodes(self.num_nodes, extra_args=args) self.start_nodes() + def network_test(self, node, addr, network): + for peer in node.getpeerinfo(): + if peer["addr"] == addr: + assert_equal(peer["network"], network) + def node_test(self, node, proxies, auth, test_onion=True): rv = [] - # Test: outgoing IPv4 connection through node - node.addnode("15.61.23.23:1234", "onetry") + addr = "15.61.23.23:1234" + self.log.debug("Test: outgoing IPv4 connection through node for address {}".format(addr)) + node.addnode(addr, "onetry") cmd = proxies[0].queue.get() assert isinstance(cmd, Socks5Command) # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 @@ -104,10 +117,12 @@ class ProxyTest(BitcoinTestFramework): assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) + self.network_test(node, addr, network=NET_IPV4) if self.have_ipv6: - # Test: outgoing IPv6 connection through node - node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry") + addr = "[1233:3432:2434:2343:3234:2345:6546:4534]:5443" + self.log.debug("Test: outgoing IPv6 connection through node for address {}".format(addr)) + node.addnode(addr, "onetry") cmd = proxies[1].queue.get() assert isinstance(cmd, Socks5Command) # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 @@ -118,10 +133,12 @@ class ProxyTest(BitcoinTestFramework): assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) + self.network_test(node, addr, network=NET_IPV6) if test_onion: - # Test: outgoing onion connection through node - node.addnode("bitcoinostk4e4re.onion:8333", "onetry") + addr = "bitcoinostk4e4re.onion:8333" + self.log.debug("Test: outgoing onion connection through node for address {}".format(addr)) + node.addnode(addr, "onetry") cmd = proxies[2].queue.get() assert isinstance(cmd, Socks5Command) assert_equal(cmd.atyp, AddressType.DOMAINNAME) @@ -131,9 +148,11 @@ class ProxyTest(BitcoinTestFramework): assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) + self.network_test(node, addr, network=NET_ONION) - # Test: outgoing DNS name connection through node - node.addnode("node.noumenon:8333", "onetry") + addr = "node.noumenon:8333" + self.log.debug("Test: outgoing DNS name connection through node for address {}".format(addr)) + node.addnode(addr, "onetry") cmd = proxies[3].queue.get() assert isinstance(cmd, Socks5Command) assert_equal(cmd.atyp, AddressType.DOMAINNAME) @@ -143,6 +162,7 @@ class ProxyTest(BitcoinTestFramework): assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) + self.network_test(node, addr, network=NET_UNROUTABLE) return rv @@ -197,5 +217,6 @@ class ProxyTest(BitcoinTestFramework): assert_equal(n3[net]['proxy_randomize_credentials'], False) assert_equal(n3['onion']['reachable'], False) + if __name__ == '__main__': ProxyTest().main() diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index e370e11a3e..f09bffe2d4 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -18,8 +18,6 @@ from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, - connect_nodes, - disconnect_nodes, ) # Rescans start at the earliest block up to 2 hours before a key timestamp, so @@ -102,11 +100,11 @@ class PruneTest(BitcoinTestFramework): self.prunedir = os.path.join(self.nodes[2].datadir, self.chain, 'blocks', '') - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[1], 2) - connect_nodes(self.nodes[0], 2) - connect_nodes(self.nodes[0], 3) - connect_nodes(self.nodes[0], 4) + self.connect_nodes(0, 1) + self.connect_nodes(1, 2) + self.connect_nodes(0, 2) + self.connect_nodes(0, 3) + self.connect_nodes(0, 4) self.sync_blocks(self.nodes[0:5]) def setup_nodes(self): @@ -148,8 +146,8 @@ class PruneTest(BitcoinTestFramework): for _ in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects - disconnect_nodes(self.nodes[0], 1) - disconnect_nodes(self.nodes[0], 2) + self.disconnect_nodes(0, 1) + self.disconnect_nodes(0, 2) # Mine 24 blocks in node 1 mine_large_blocks(self.nodes[1], 24) @@ -157,8 +155,8 @@ class PruneTest(BitcoinTestFramework): mine_large_blocks(self.nodes[0], 25) # Create connections in the order so both nodes can see the reorg at the same time - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[0], 2) + self.connect_nodes(0, 1) + self.connect_nodes(0, 2) self.sync_blocks(self.nodes[0:3]) self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir)) @@ -187,15 +185,15 @@ class PruneTest(BitcoinTestFramework): self.log.info("New best height: %d" % self.nodes[1].getblockcount()) # Disconnect node1 and generate the new chain - disconnect_nodes(self.nodes[0], 1) - disconnect_nodes(self.nodes[1], 2) + self.disconnect_nodes(0, 1) + self.disconnect_nodes(1, 2) self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) self.log.info("Reconnect nodes") - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[1], 2) + self.connect_nodes(0, 1) + self.connect_nodes(1, 2) self.sync_blocks(self.nodes[0:3], timeout=120) self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount()) @@ -336,7 +334,7 @@ class PruneTest(BitcoinTestFramework): # check that wallet loads successfully when restarting a pruned node after IBD. # this was reported to fail in #7494. self.log.info("Syncing node 5 to test wallet") - connect_nodes(self.nodes[0], 5) + self.connect_nodes(0, 5) nds = [self.nodes[0], self.nodes[5]] self.sync_blocks(nds, wait=5, timeout=300) self.restart_node(5, extra_args=["-prune=550"]) # restart to trigger rescan diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py index 0842972779..7bd2fc7847 100755 --- a/test/functional/feature_segwit.py +++ b/test/functional/feature_segwit.py @@ -22,7 +22,6 @@ from test_framework.util import ( assert_equal, assert_is_hex_string, assert_raises_rpc_error, - connect_nodes, hex_str_to_bytes, try_rpc, ) @@ -61,14 +60,12 @@ class SegWitTest(BitcoinTestFramework): ], [ "-acceptnonstdtxn=1", - "-blockversion=4", "-rpcserialversion=1", "-segwitheight=432", "-addresstype=legacy", ], [ "-acceptnonstdtxn=1", - "-blockversion=536870915", "-segwitheight=432", "-addresstype=legacy", ], @@ -80,7 +77,7 @@ class SegWitTest(BitcoinTestFramework): def setup_network(self): super().setup_network() - connect_nodes(self.nodes[0], 2) + self.connect_nodes(0, 2) self.sync_all() def success_mine(self, node, txid, sign, redeem_script=""): diff --git a/test/functional/feature_settings.py b/test/functional/feature_settings.py index 3705caf4ff..5a0236401d 100755 --- a/test/functional/feature_settings.py +++ b/test/functional/feature_settings.py @@ -31,19 +31,25 @@ class SettingsTest(BitcoinTestFramework): # Assert settings are parsed and logged with settings.open("w") as fp: - json.dump({"string": "string", "num": 5, "bool": True, "null": None, "list": [6,7]}, fp) + json.dump({"string": "string", "num": 5, "bool": True, "null": None, "list": [6, 7]}, fp) with node.assert_debug_log(expected_msgs=[ + 'Ignoring unknown rw_settings value bool', + 'Ignoring unknown rw_settings value list', + 'Ignoring unknown rw_settings value null', + 'Ignoring unknown rw_settings value num', + 'Ignoring unknown rw_settings value string', 'Setting file arg: string = "string"', 'Setting file arg: num = 5', 'Setting file arg: bool = true', 'Setting file arg: null = null', - 'Setting file arg: list = [6,7]']): + 'Setting file arg: list = [6,7]', + ]): self.start_node(0) self.stop_node(0) # Assert settings are unchanged after shutdown with settings.open() as fp: - assert_equal(json.load(fp), {"string": "string", "num": 5, "bool": True, "null": None, "list": [6,7]}) + assert_equal(json.load(fp), {"string": "string", "num": 5, "bool": True, "null": None, "list": [6, 7]}) # Test invalid json with settings.open("w") as fp: diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py new file mode 100755 index 0000000000..7b534c1c2f --- /dev/null +++ b/test/functional/feature_taproot.py @@ -0,0 +1,1458 @@ +#!/usr/bin/env python3 +# Copyright (c) 2019-2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# Test Taproot softfork (BIPs 340-342) + +from test_framework.blocktools import ( + create_coinbase, + create_block, + add_witness_commitment, + MAX_BLOCK_SIGOPS_WEIGHT, + WITNESS_SCALE_FACTOR, +) +from test_framework.messages import ( + COutPoint, + CTransaction, + CTxIn, + CTxInWitness, + CTxOut, + ToHex, +) +from test_framework.script import ( + ANNEX_TAG, + CScript, + CScriptNum, + CScriptOp, + LEAF_VERSION_TAPSCRIPT, + LegacySignatureHash, + LOCKTIME_THRESHOLD, + MAX_SCRIPT_ELEMENT_SIZE, + OP_0, + OP_1, + OP_2, + OP_3, + OP_4, + OP_5, + OP_6, + OP_7, + OP_8, + OP_9, + OP_10, + OP_11, + OP_12, + OP_16, + OP_2DROP, + OP_2DUP, + OP_CHECKMULTISIG, + OP_CHECKMULTISIGVERIFY, + OP_CHECKSIG, + OP_CHECKSIGADD, + OP_CHECKSIGVERIFY, + OP_CODESEPARATOR, + OP_DROP, + OP_DUP, + OP_ELSE, + OP_ENDIF, + OP_EQUAL, + OP_EQUALVERIFY, + OP_HASH160, + OP_IF, + OP_NOP, + OP_NOT, + OP_NOTIF, + OP_PUSHDATA1, + OP_RETURN, + OP_SWAP, + OP_VERIFY, + SIGHASH_DEFAULT, + SIGHASH_ALL, + SIGHASH_NONE, + SIGHASH_SINGLE, + SIGHASH_ANYONECANPAY, + SegwitV0SignatureHash, + TaprootSignatureHash, + is_op_success, + taproot_construct, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_raises_rpc_error, assert_equal +from test_framework.key import generate_privkey, compute_xonly_pubkey, sign_schnorr, tweak_add_privkey, ECKey +from test_framework.address import ( + hash160, + sha256, +) +from collections import OrderedDict, namedtuple +from io import BytesIO +import json +import hashlib +import os +import random + +# === Framework for building spending transactions. === +# +# The computation is represented as a "context" dict, whose entries store potentially-unevaluated expressions that +# refer to lower-level ones. By overwriting these expression, many aspects - both high and low level - of the signing +# process can be overridden. +# +# Specifically, a context object is a dict that maps names to compositions of: +# - values +# - lists of values +# - callables which, when fed the context object as argument, produce any of these +# +# The DEFAULT_CONTEXT object specifies a standard signing process, with many overridable knobs. +# +# The get(ctx, name) function can evaluate a name, and cache its result in the context. +# getter(name) can be used to construct a callable that evaluates name. For example: +# +# ctx1 = {**DEFAULT_CONTEXT, inputs=[getter("sign"), b'\x01']} +# +# creates a context where the script inputs are a signature plus the bytes 0x01. +# +# override(expr, name1=expr1, name2=expr2, ...) can be used to cause an expression to be evaluated in a selectively +# modified context. For example: +# +# ctx2 = {**DEFAULT_CONTEXT, sighash=override(default_sighash, hashtype=SIGHASH_DEFAULT)} +# +# creates a context ctx2 where the sighash is modified to use hashtype=SIGHASH_DEFAULT. This differs from +# +# ctx3 = {**DEFAULT_CONTEXT, hashtype=SIGHASH_DEFAULT} +# +# in that ctx3 will globally use hashtype=SIGHASH_DEFAULT (including in the hashtype byte appended to the signature) +# while ctx2 only uses the modified hashtype inside the sighash calculation. + +def deep_eval(ctx, expr): + """Recursively replace any callables c in expr (including inside lists) with c(ctx).""" + while callable(expr): + expr = expr(ctx) + if isinstance(expr, list): + expr = [deep_eval(ctx, x) for x in expr] + return expr + +# Data type to represent fully-evaluated expressions in a context dict (so we can avoid reevaluating them). +Final = namedtuple("Final", "value") + +def get(ctx, name): + """Evaluate name in context ctx.""" + assert name in ctx, "Missing '%s' in context" % name + expr = ctx[name] + if not isinstance(expr, Final): + # Evaluate and cache the result. + expr = Final(deep_eval(ctx, expr)) + ctx[name] = expr + return expr.value + +def getter(name): + """Return a callable that evaluates name in its passed context.""" + return lambda ctx: get(ctx, name) + +def override(expr, **kwargs): + """Return a callable that evaluates expr in a modified context.""" + return lambda ctx: deep_eval({**ctx, **kwargs}, expr) + +# === Implementations for the various default expressions in DEFAULT_CONTEXT === + +def default_hashtype(ctx): + """Default expression for "hashtype": SIGHASH_DEFAULT for taproot, SIGHASH_ALL otherwise.""" + mode = get(ctx, "mode") + if mode == "taproot": + return SIGHASH_DEFAULT + else: + return SIGHASH_ALL + +def default_tapleaf(ctx): + """Default expression for "tapleaf": looking up leaf in tap[2].""" + return get(ctx, "tap").leaves[get(ctx, "leaf")] + +def default_script_taproot(ctx): + """Default expression for "script_taproot": tapleaf.script.""" + return get(ctx, "tapleaf").script + +def default_leafversion(ctx): + """Default expression for "leafversion": tapleaf.version""" + return get(ctx, "tapleaf").version + +def default_negflag(ctx): + """Default expression for "negflag": tap.negflag.""" + return get(ctx, "tap").negflag + +def default_pubkey_inner(ctx): + """Default expression for "pubkey_inner": tap.inner_pubkey.""" + return get(ctx, "tap").inner_pubkey + +def default_merklebranch(ctx): + """Default expression for "merklebranch": tapleaf.merklebranch.""" + return get(ctx, "tapleaf").merklebranch + +def default_controlblock(ctx): + """Default expression for "controlblock": combine leafversion, negflag, pubkey_inner, merklebranch.""" + return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_inner") + get(ctx, "merklebranch") + +def default_sighash(ctx): + """Default expression for "sighash": depending on mode, compute BIP341, BIP143, or legacy sighash.""" + tx = get(ctx, "tx") + idx = get(ctx, "idx") + hashtype = get(ctx, "hashtype_actual") + mode = get(ctx, "mode") + if mode == "taproot": + # BIP341 signature hash + utxos = get(ctx, "utxos") + annex = get(ctx, "annex") + if get(ctx, "leaf") is not None: + codeseppos = get(ctx, "codeseppos") + leaf_ver = get(ctx, "leafversion") + script = get(ctx, "script_taproot") + return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=True, script=script, leaf_ver=leaf_ver, codeseparator_pos=codeseppos, annex=annex) + else: + return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=False, annex=annex) + elif mode == "witv0": + # BIP143 signature hash + scriptcode = get(ctx, "scriptcode") + utxos = get(ctx, "utxos") + return SegwitV0SignatureHash(scriptcode, tx, idx, hashtype, utxos[idx].nValue) + else: + # Pre-segwit signature hash + scriptcode = get(ctx, "scriptcode") + return LegacySignatureHash(scriptcode, tx, idx, hashtype)[0] + +def default_tweak(ctx): + """Default expression for "tweak": None if a leaf is specified, tap[0] otherwise.""" + if get(ctx, "leaf") is None: + return get(ctx, "tap").tweak + return None + +def default_key_tweaked(ctx): + """Default expression for "key_tweaked": key if tweak is None, tweaked with it otherwise.""" + key = get(ctx, "key") + tweak = get(ctx, "tweak") + if tweak is None: + return key + else: + return tweak_add_privkey(key, tweak) + +def default_signature(ctx): + """Default expression for "signature": BIP340 signature or ECDSA signature depending on mode.""" + sighash = get(ctx, "sighash") + if get(ctx, "mode") == "taproot": + key = get(ctx, "key_tweaked") + flip_r = get(ctx, "flag_flip_r") + flip_p = get(ctx, "flag_flip_p") + return sign_schnorr(key, sighash, flip_r=flip_r, flip_p=flip_p) + else: + key = get(ctx, "key") + return key.sign_ecdsa(sighash) + +def default_hashtype_actual(ctx): + """Default expression for "hashtype_actual": hashtype, unless mismatching SIGHASH_SINGLE in taproot.""" + hashtype = get(ctx, "hashtype") + mode = get(ctx, "mode") + if mode != "taproot": + return hashtype + idx = get(ctx, "idx") + tx = get(ctx, "tx") + if hashtype & 3 == SIGHASH_SINGLE and idx >= len(tx.vout): + return (hashtype & ~3) | SIGHASH_NONE + return hashtype + +def default_bytes_hashtype(ctx): + """Default expression for "bytes_hashtype": bytes([hashtype_actual]) if not 0, b"" otherwise.""" + return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0]) + +def default_sign(ctx): + """Default expression for "sign": concatenation of signature and bytes_hashtype.""" + return get(ctx, "signature") + get(ctx, "bytes_hashtype") + +def default_inputs_keypath(ctx): + """Default expression for "inputs_keypath": a signature.""" + return [get(ctx, "sign")] + +def default_witness_taproot(ctx): + """Default expression for "witness_taproot", consisting of inputs, script, control block, and annex as needed.""" + annex = get(ctx, "annex") + suffix_annex = [] + if annex is not None: + suffix_annex = [annex] + if get(ctx, "leaf") is None: + return get(ctx, "inputs_keypath") + suffix_annex + else: + return get(ctx, "inputs") + [bytes(get(ctx, "script_taproot")), get(ctx, "controlblock")] + suffix_annex + +def default_witness_witv0(ctx): + """Default expression for "witness_witv0", consisting of inputs and witness script, as needed.""" + script = get(ctx, "script_witv0") + inputs = get(ctx, "inputs") + if script is None: + return inputs + else: + return inputs + [script] + +def default_witness(ctx): + """Default expression for "witness", delegating to "witness_taproot" or "witness_witv0" as needed.""" + mode = get(ctx, "mode") + if mode == "taproot": + return get(ctx, "witness_taproot") + elif mode == "witv0": + return get(ctx, "witness_witv0") + else: + return [] + +def default_scriptsig(ctx): + """Default expression for "scriptsig", consisting of inputs and redeemscript, as needed.""" + scriptsig = [] + mode = get(ctx, "mode") + if mode == "legacy": + scriptsig = get(ctx, "inputs") + redeemscript = get(ctx, "script_p2sh") + if redeemscript is not None: + scriptsig += [bytes(redeemscript)] + return scriptsig + +# The default context object. +DEFAULT_CONTEXT = { + # == The main expressions to evaluate. Only override these for unusual or invalid spends. == + # The overall witness stack, as a list of bytes objects. + "witness": default_witness, + # The overall scriptsig, as a list of CScript objects (to be concatenated) and bytes objects (to be pushed) + "scriptsig": default_scriptsig, + + # == Expressions you'll generally only override for intentionally invalid spends. == + # The witness stack for spending a taproot output. + "witness_taproot": default_witness_taproot, + # The witness stack for spending a P2WPKH/P2WSH output. + "witness_witv0": default_witness_witv0, + # The script inputs for a taproot key path spend. + "inputs_keypath": default_inputs_keypath, + # The actual hashtype to use (usually equal to hashtype, but in taproot SIGHASH_SINGLE is not always allowed). + "hashtype_actual": default_hashtype_actual, + # The bytes object for a full signature (including hashtype byte, if needed). + "bytes_hashtype": default_bytes_hashtype, + # A full script signature (bytes including hashtype, if needed) + "sign": default_sign, + # An ECDSA or Schnorr signature (excluding hashtype byte). + "signature": default_signature, + # The 32-byte tweaked key (equal to key for script path spends, or key+tweak for key path spends). + "key_tweaked": default_key_tweaked, + # The tweak to use (None for script path spends, the actual tweak for key path spends). + "tweak": default_tweak, + # The sighash value (32 bytes) + "sighash": default_sighash, + # The information about the chosen script path spend (TaprootLeafInfo object). + "tapleaf": default_tapleaf, + # The script to push, and include in the sighash, for a taproot script path spend. + "script_taproot": default_script_taproot, + # The inner pubkey for a taproot script path spend (32 bytes). + "pubkey_inner": default_pubkey_inner, + # The negation flag of the inner pubkey for a taproot script path spend. + "negflag": default_negflag, + # The leaf version to include in the sighash (this does not affect the one in the control block). + "leafversion": default_leafversion, + # The Merkle path to include in the control block for a script path spend. + "merklebranch": default_merklebranch, + # The control block to push for a taproot script path spend. + "controlblock": default_controlblock, + # Whether to produce signatures with invalid P sign (Schnorr signatures only). + "flag_flip_p": False, + # Whether to produce signatures with invalid R sign (Schnorr signatures only). + "flag_flip_r": False, + + # == Parameters that can be changed without invalidating, but do have a default: == + # The hashtype (as an integer). + "hashtype": default_hashtype, + # The annex (only when mode=="taproot"). + "annex": None, + # The codeseparator position (only when mode=="taproot"). + "codeseppos": -1, + # The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH). + "script_p2sh": None, + # The script to add to the witness in (if P2WSH; None implies P2WPKH) + "script_witv0": None, + # The leaf to use in taproot spends (if script path spend; None implies key path spend). + "leaf": None, + # The input arguments to provide to the executed script + "inputs": [], + + # == Parameters to be set before evaluation: == + # - mode: what spending style to use ("taproot", "witv0", or "legacy"). + # - key: the (untweaked) private key to sign with (ECKey object for ECDSA, 32 bytes for Schnorr). + # - tap: the TaprootInfo object (see taproot_construct; needed in mode=="taproot"). + # - tx: the transaction to sign. + # - utxos: the UTXOs being spent (needed in mode=="witv0" and mode=="taproot"). + # - idx: the input position being signed. + # - scriptcode: the scriptcode to include in legacy and witv0 sighashes. +} + +def flatten(lst): + ret = [] + for elem in lst: + if isinstance(elem, list): + ret += flatten(elem) + else: + ret.append(elem) + return ret + +def spend(tx, idx, utxos, **kwargs): + """Sign transaction input idx of tx, provided utxos is the list of outputs being spent. + + Additional arguments may be provided that override any aspect of the signing process. + See DEFAULT_CONTEXT above for what can be overridden, and what must be provided. + """ + + ctx = {**DEFAULT_CONTEXT, "tx":tx, "idx":idx, "utxos":utxos, **kwargs} + + def to_script(elem): + """If fed a CScript, return it; if fed bytes, return a CScript that pushes it.""" + if isinstance(elem, CScript): + return elem + else: + return CScript([elem]) + + scriptsig_list = flatten(get(ctx, "scriptsig")) + scriptsig = CScript(b"".join(bytes(to_script(elem)) for elem in scriptsig_list)) + witness_stack = flatten(get(ctx, "witness")) + return (scriptsig, witness_stack) + + +# === Spender objects === +# +# Each spender is a tuple of: +# - A scriptPubKey which is to be spent from (CScript) +# - A comment describing the test (string) +# - Whether the spending (on itself) is expected to be standard (bool) +# - A tx-signing lambda returning (scriptsig, witness_stack), taking as inputs: +# - A transaction to sign (CTransaction) +# - An input position (int) +# - The spent UTXOs by this transaction (list of CTxOut) +# - Whether to produce a valid spend (bool) +# - A string with an expected error message for failure case if known +# - The (pre-taproot) sigops weight consumed by a successful spend +# - Whether this spend cannot fail +# - Whether this test demands being placed in a txin with no corresponding txout (for testing SIGHASH_SINGLE behavior) + +Spender = namedtuple("Spender", "script,comment,is_standard,sat_function,err_msg,sigops_weight,no_fail,need_vin_vout_mismatch") + +def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=False, spk_mutate_pre_p2sh=None, failure=None, standard=True, err_msg=None, sigops_weight=0, need_vin_vout_mismatch=False, **kwargs): + """Helper for constructing Spender objects using the context signing framework. + + * tap: a TaprootInfo object (see taproot_construct), for Taproot spends (cannot be combined with pkh, witv0, or script) + * witv0: boolean indicating the use of witness v0 spending (needs one of script or pkh) + * script: the actual script executed (for bare/P2WSH/P2SH spending) + * pkh: the public key for P2PKH or P2WPKH spending + * p2sh: whether the output is P2SH wrapper (this is supported even for Taproot, where it makes the output unencumbered) + * spk_mutate_pre_psh: a callable to be applied to the script (before potentially P2SH-wrapping it) + * failure: a dict of entries to override in the context when intentionally failing to spend (if None, no_fail will be set) + * standard: whether the (valid version of) spending is expected to be standard + * err_msg: a string with an expected error message for failure (or None, if not cared about) + * sigops_weight: the pre-taproot sigops weight consumed by a successful spend + """ + + conf = dict() + + # Compute scriptPubKey and set useful defaults based on the inputs. + if witv0: + assert tap is None + conf["mode"] = "witv0" + if pkh is not None: + # P2WPKH + assert script is None + pubkeyhash = hash160(pkh) + spk = CScript([OP_0, pubkeyhash]) + conf["scriptcode"] = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG]) + conf["script_witv0"] = None + conf["inputs"] = [getter("sign"), pkh] + elif script is not None: + # P2WSH + spk = CScript([OP_0, sha256(script)]) + conf["scriptcode"] = script + conf["script_witv0"] = script + else: + assert False + elif tap is None: + conf["mode"] = "legacy" + if pkh is not None: + # P2PKH + assert script is None + pubkeyhash = hash160(pkh) + spk = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG]) + conf["scriptcode"] = spk + conf["inputs"] = [getter("sign"), pkh] + elif script is not None: + # bare + spk = script + conf["scriptcode"] = script + else: + assert False + else: + assert script is None + conf["mode"] = "taproot" + conf["tap"] = tap + spk = tap.scriptPubKey + + if spk_mutate_pre_p2sh is not None: + spk = spk_mutate_pre_p2sh(spk) + + if p2sh: + # P2SH wrapper can be combined with anything else + conf["script_p2sh"] = spk + spk = CScript([OP_HASH160, hash160(spk), OP_EQUAL]) + + conf = {**conf, **kwargs} + + def sat_fn(tx, idx, utxos, valid): + if valid: + return spend(tx, idx, utxos, **conf) + else: + assert failure is not None + return spend(tx, idx, utxos, **{**conf, **failure}) + + return Spender(script=spk, comment=comment, is_standard=standard, sat_function=sat_fn, err_msg=err_msg, sigops_weight=sigops_weight, no_fail=failure is None, need_vin_vout_mismatch=need_vin_vout_mismatch) + +def add_spender(spenders, *args, **kwargs): + """Make a spender using make_spender, and add it to spenders.""" + spenders.append(make_spender(*args, **kwargs)) + +# === Helpers for the test === + +def random_checksig_style(pubkey): + """Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack.""" + return bytes(CScript([pubkey, OP_CHECKSIG])) + opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD]) + if (opcode == OP_CHECKSIGVERIFY): + ret = CScript([pubkey, opcode, OP_1]) + elif (opcode == OP_CHECKSIGADD): + num = random.choice([0, 0x7fffffff, -0x7fffffff]) + ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL]) + else: + ret = CScript([pubkey, opcode]) + return bytes(ret) + +def random_bytes(n): + """Return a random bytes object of length n.""" + return bytes(random.getrandbits(8) for i in range(n)) + +def bitflipper(expr): + """Return a callable that evaluates expr and returns it with a random bitflip.""" + def fn(ctx): + sub = deep_eval(ctx, expr) + assert isinstance(sub, bytes) + return (int.from_bytes(sub, 'little') ^ (1 << random.randrange(len(sub) * 8))).to_bytes(len(sub), 'little') + return fn + +def zero_appender(expr): + """Return a callable that evaluates expr and returns it with a zero added.""" + return lambda ctx: deep_eval(ctx, expr) + b"\x00" + +def byte_popper(expr): + """Return a callable that evaluates expr and returns it with its last byte removed.""" + return lambda ctx: deep_eval(ctx, expr)[:-1] + +# Expected error strings + +ERR_SIG_SIZE = {"err_msg": "Invalid Schnorr signature size"} +ERR_SIG_HASHTYPE = {"err_msg": "Invalid Schnorr signature hash type"} +ERR_SIG_SCHNORR = {"err_msg": "Invalid Schnorr signature"} +ERR_OP_RETURN = {"err_msg": "OP_RETURN was encountered"} +ERR_CONTROLBLOCK_SIZE = {"err_msg": "Invalid Taproot control block size"} +ERR_WITNESS_PROGRAM_MISMATCH = {"err_msg": "Witness program hash mismatch"} +ERR_PUSH_LIMIT = {"err_msg": "Push value size limit exceeded"} +ERR_DISABLED_OPCODE = {"err_msg": "Attempted to use a disabled opcode"} +ERR_TAPSCRIPT_CHECKMULTISIG = {"err_msg": "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"} +ERR_MINIMALIF = {"err_msg": "OP_IF/NOTIF argument must be minimal in tapscript"} +ERR_UNKNOWN_PUBKEY = {"err_msg": "Public key is neither compressed or uncompressed"} +ERR_STACK_SIZE = {"err_msg": "Stack size limit exceeded"} +ERR_CLEANSTACK = {"err_msg": "Stack size must be exactly one after execution"} +ERR_STACK_EMPTY = {"err_msg": "Operation not valid with the current stack size"} +ERR_SIGOPS_RATIO = {"err_msg": "Too much signature validation relative to witness weight"} +ERR_UNDECODABLE = {"err_msg": "Opcode missing or not understood"} +ERR_NO_SUCCESS = {"err_msg": "Script evaluated without error but finished with a false/empty top stack element"} +ERR_EMPTY_WITNESS = {"err_msg": "Witness program was passed an empty witness"} +ERR_CHECKSIGVERIFY = {"err_msg": "Script failed an OP_CHECKSIGVERIFY operation"} + +VALID_SIGHASHES_ECDSA = [ + SIGHASH_ALL, + SIGHASH_NONE, + SIGHASH_SINGLE, + SIGHASH_ANYONECANPAY + SIGHASH_ALL, + SIGHASH_ANYONECANPAY + SIGHASH_NONE, + SIGHASH_ANYONECANPAY + SIGHASH_SINGLE +] + +VALID_SIGHASHES_TAPROOT = [SIGHASH_DEFAULT] + VALID_SIGHASHES_ECDSA + +VALID_SIGHASHES_TAPROOT_SINGLE = [ + SIGHASH_SINGLE, + SIGHASH_ANYONECANPAY + SIGHASH_SINGLE +] + +VALID_SIGHASHES_TAPROOT_NO_SINGLE = [h for h in VALID_SIGHASHES_TAPROOT if h not in VALID_SIGHASHES_TAPROOT_SINGLE] + +SIGHASH_BITFLIP = {"failure": {"sighash": bitflipper(default_sighash)}} +SIG_POP_BYTE = {"failure": {"sign": byte_popper(default_sign)}} +SINGLE_SIG = {"inputs": [getter("sign")]} +SIG_ADD_ZERO = {"failure": {"sign": zero_appender(default_sign)}} + +DUST_LIMIT = 600 +MIN_FEE = 50000 + +# === Actual test cases === + + +def spenders_taproot_active(): + """Return a list of Spenders for testing post-Taproot activation behavior.""" + + secs = [generate_privkey() for _ in range(8)] + pubs = [compute_xonly_pubkey(sec)[0] for sec in secs] + + spenders = [] + + # == Tests for BIP340 signature validation. == + # These are primarily tested through the test vectors implemented in libsecp256k1, and in src/tests/key_tests.cpp. + # Some things are tested programmatically as well here. + + tap = taproot_construct(pubs[0]) + # Test with key with bit flipped. + add_spender(spenders, "sig/key", tap=tap, key=secs[0], failure={"key_tweaked": bitflipper(default_key_tweaked)}, **ERR_SIG_SCHNORR) + # Test with sighash with bit flipped. + add_spender(spenders, "sig/sighash", tap=tap, key=secs[0], failure={"sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR) + # Test with invalid R sign. + add_spender(spenders, "sig/flip_r", tap=tap, key=secs[0], failure={"flag_flip_r": True}, **ERR_SIG_SCHNORR) + # Test with invalid P sign. + add_spender(spenders, "sig/flip_p", tap=tap, key=secs[0], failure={"flag_flip_p": True}, **ERR_SIG_SCHNORR) + # Test with signature with bit flipped. + add_spender(spenders, "sig/bitflip", tap=tap, key=secs[0], failure={"signature": bitflipper(default_signature)}, **ERR_SIG_SCHNORR) + + # == Tests for signature hashing == + + # Run all tests once with no annex, and once with a valid random annex. + for annex in [None, lambda _: bytes([ANNEX_TAG]) + random_bytes(random.randrange(0, 250))]: + # Non-empty annex is non-standard + no_annex = annex is None + + # Sighash mutation tests (test all sighash combinations) + for hashtype in VALID_SIGHASHES_TAPROOT: + common = {"annex": annex, "hashtype": hashtype, "standard": no_annex} + + # Pure pubkey + tap = taproot_construct(pubs[0]) + add_spender(spenders, "sighash/purepk", tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) + + # Pubkey/P2PK script combination + scripts = [("s0", CScript(random_checksig_style(pubs[1])))] + tap = taproot_construct(pubs[0], scripts) + add_spender(spenders, "sighash/keypath_hashtype_%x" % hashtype, tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) + add_spender(spenders, "sighash/scriptpath_hashtype_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) + + # Test SIGHASH_SINGLE behavior in combination with mismatching outputs + if hashtype in VALID_SIGHASHES_TAPROOT_SINGLE: + add_spender(spenders, "sighash/keypath_hashtype_mis_%x" % hashtype, tap=tap, key=secs[0], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True) + add_spender(spenders, "sighash/scriptpath_hashtype_mis_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), **SINGLE_SIG, failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True) + + # Test OP_CODESEPARATOR impact on sighashing. + hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT) + common = {"annex": annex, "hashtype": hashtype, "standard": no_annex} + scripts = [ + ("pk_codesep", CScript(random_checksig_style(pubs[1]) + bytes([OP_CODESEPARATOR]))), # codesep after checksig + ("codesep_pk", CScript(bytes([OP_CODESEPARATOR]) + random_checksig_style(pubs[1]))), # codesep before checksig + ("branched_codesep", CScript([random_bytes(random.randrange(511)), OP_DROP, OP_IF, OP_CODESEPARATOR, pubs[0], OP_ELSE, OP_CODESEPARATOR, pubs[1], OP_ENDIF, OP_CHECKSIG])), # branch dependent codesep + ] + random.shuffle(scripts) + tap = taproot_construct(pubs[0], scripts) + add_spender(spenders, "sighash/pk_codesep", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) + add_spender(spenders, "sighash/codesep_pk", tap=tap, leaf="codesep_pk", key=secs[1], codeseppos=0, **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) + add_spender(spenders, "sighash/branched_codesep/left", tap=tap, leaf="branched_codesep", key=secs[0], codeseppos=3, **common, inputs=[getter("sign"), b'\x01'], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) + add_spender(spenders, "sighash/branched_codesep/right", tap=tap, leaf="branched_codesep", key=secs[1], codeseppos=6, **common, inputs=[getter("sign"), b''], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) + + # Reusing the scripts above, test that various features affect the sighash. + add_spender(spenders, "sighash/annex", tap=tap, leaf="pk_codesep", key=secs[1], hashtype=hashtype, standard=False, **SINGLE_SIG, annex=bytes([ANNEX_TAG]), failure={"sighash": override(default_sighash, annex=None)}, **ERR_SIG_SCHNORR) + add_spender(spenders, "sighash/script", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, script_taproot=tap.leaves["codesep_pk"].script)}, **ERR_SIG_SCHNORR) + add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE != 0xC0]))}, **ERR_SIG_SCHNORR) + add_spender(spenders, "sighash/scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leaf=None)}, **ERR_SIG_SCHNORR) + add_spender(spenders, "sighash/keypath", tap=tap, key=secs[0], **common, failure={"sighash": override(default_sighash, leaf="pk_codesep")}, **ERR_SIG_SCHNORR) + + # Test that invalid hashtypes don't work, both in key path and script path spends + hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT) + for invalid_hashtype in [x for x in range(0x100) if x not in VALID_SIGHASHES_TAPROOT]: + add_spender(spenders, "sighash/keypath_unk_hashtype_%x" % invalid_hashtype, tap=tap, key=secs[0], hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE) + add_spender(spenders, "sighash/scriptpath_unk_hashtype_%x" % invalid_hashtype, tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE) + + # Test that hashtype 0 cannot have a hashtype byte, and 1 must have one. + add_spender(spenders, "sighash/hashtype0_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE) + add_spender(spenders, "sighash/hashtype0_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE) + add_spender(spenders, "sighash/hashtype1_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR) + add_spender(spenders, "sighash/hashtype1_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR) + # Test that hashtype 0 and hashtype 1 cannot be transmuted into each other. + add_spender(spenders, "sighash/hashtype0to1_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR) + add_spender(spenders, "sighash/hashtype0to1_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR) + add_spender(spenders, "sighash/hashtype1to0_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR) + add_spender(spenders, "sighash/hashtype1to0_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR) + + # Test aspects of signatures with unusual lengths + for hashtype in [SIGHASH_DEFAULT, random.choice(VALID_SIGHASHES_TAPROOT)]: + scripts = [ + ("csv", CScript([pubs[2], OP_CHECKSIGVERIFY, OP_1])), + ("cs_pos", CScript([pubs[2], OP_CHECKSIG])), + ("csa_pos", CScript([OP_0, pubs[2], OP_CHECKSIGADD, OP_1, OP_EQUAL])), + ("cs_neg", CScript([pubs[2], OP_CHECKSIG, OP_NOT])), + ("csa_neg", CScript([OP_2, pubs[2], OP_CHECKSIGADD, OP_2, OP_EQUAL])) + ] + random.shuffle(scripts) + tap = taproot_construct(pubs[3], scripts) + # Empty signatures + add_spender(spenders, "siglen/empty_keypath", tap=tap, key=secs[3], hashtype=hashtype, failure={"sign": b""}, **ERR_SIG_SIZE) + add_spender(spenders, "siglen/empty_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_CHECKSIGVERIFY) + add_spender(spenders, "siglen/empty_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS) + add_spender(spenders, "siglen/empty_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS) + add_spender(spenders, "siglen/empty_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(1, 63))}, **ERR_SIG_SIZE) + add_spender(spenders, "siglen/empty_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(66, 100))}, **ERR_SIG_SIZE) + # Appending a zero byte to signatures invalidates them + add_spender(spenders, "siglen/padzero_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE)) + add_spender(spenders, "siglen/padzero_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE)) + add_spender(spenders, "siglen/padzero_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE)) + add_spender(spenders, "siglen/padzero_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE)) + add_spender(spenders, "siglen/padzero_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE)) + add_spender(spenders, "siglen/padzero_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE)) + # Removing the last byte from signatures invalidates them + add_spender(spenders, "siglen/popbyte_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR)) + add_spender(spenders, "siglen/popbyte_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR)) + add_spender(spenders, "siglen/popbyte_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR)) + add_spender(spenders, "siglen/popbyte_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR)) + add_spender(spenders, "siglen/popbyte_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR)) + add_spender(spenders, "siglen/popbyte_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR)) + # Verify that an invalid signature is not allowed, not even when the CHECKSIG* is expected to fail. + add_spender(spenders, "siglen/invalid_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR) + add_spender(spenders, "siglen/invalid_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR) + + # == Test that BIP341 spending only applies to witness version 1, program length 32, no P2SH == + + for p2sh in [False, True]: + for witver in range(1, 17): + for witlen in [20, 31, 32, 33]: + def mutate(spk): + prog = spk[2:] + assert len(prog) == 32 + if witlen < 32: + prog = prog[0:witlen] + elif witlen > 32: + prog += bytes([0 for _ in range(witlen - 32)]) + return CScript([CScriptOp.encode_op_n(witver), prog]) + scripts = [("s0", CScript([pubs[0], OP_CHECKSIG])), ("dummy", CScript([OP_RETURN]))] + tap = taproot_construct(pubs[1], scripts) + if not p2sh and witver == 1 and witlen == 32: + add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) + add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, failure={"leaf": "dummy"}, **ERR_OP_RETURN) + else: + add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], standard=False) + add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, standard=False) + + # == Test various aspects of BIP341 spending paths == + + # A set of functions that compute the hashing partner in a Merkle tree, designed to exercise + # edge cases. This relies on the taproot_construct feature that a lambda can be passed in + # instead of a subtree, to compute the partner to be hashed with. + PARTNER_MERKLE_FN = [ + # Combine with itself + lambda h: h, + # Combine with hash 0 + lambda h: bytes([0 for _ in range(32)]), + # Combine with hash 2^256-1 + lambda h: bytes([0xff for _ in range(32)]), + # Combine with itself-1 (BE) + lambda h: (int.from_bytes(h, 'big') - 1).to_bytes(32, 'big'), + # Combine with itself+1 (BE) + lambda h: (int.from_bytes(h, 'big') + 1).to_bytes(32, 'big'), + # Combine with itself-1 (LE) + lambda h: (int.from_bytes(h, 'little') - 1).to_bytes(32, 'big'), + # Combine with itself+1 (LE) + lambda h: (int.from_bytes(h, 'little') + 1).to_bytes(32, 'little'), + # Combine with random bitflipped version of self. + lambda h: (int.from_bytes(h, 'little') ^ (1 << random.randrange(256))).to_bytes(32, 'little') + ] + # Start with a tree of that has depth 1 for "128deep" and depth 2 for "129deep". + scripts = [("128deep", CScript([pubs[0], OP_CHECKSIG])), [("129deep", CScript([pubs[0], OP_CHECKSIG])), random.choice(PARTNER_MERKLE_FN)]] + # Add 127 nodes on top of that tree, so that "128deep" and "129deep" end up at their designated depths. + for _ in range(127): + scripts = [scripts, random.choice(PARTNER_MERKLE_FN)] + tap = taproot_construct(pubs[0], scripts) + # Test that spends with a depth of 128 work, but 129 doesn't (even with a tree with weird Merkle branches in it). + add_spender(spenders, "spendpath/merklelimit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"leaf": "129deep"}, **ERR_CONTROLBLOCK_SIZE) + # Test that flipping the negation bit invalidates spends. + add_spender(spenders, "spendpath/negflag", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"negflag": lambda ctx: 1 - default_negflag(ctx)}, **ERR_WITNESS_PROGRAM_MISMATCH) + # Test that bitflips in the Merkle branch invalidate it. + add_spender(spenders, "spendpath/bitflipmerkle", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"merklebranch": bitflipper(default_merklebranch)}, **ERR_WITNESS_PROGRAM_MISMATCH) + # Test that bitflips in the inner pubkey invalidate it. + add_spender(spenders, "spendpath/bitflippubkey", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"pubkey_inner": bitflipper(default_pubkey_inner)}, **ERR_WITNESS_PROGRAM_MISMATCH) + # Test that empty witnesses are invalid. + add_spender(spenders, "spendpath/emptywit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"witness": []}, **ERR_EMPTY_WITNESS) + # Test that adding garbage to the control block invalidates it. + add_spender(spenders, "spendpath/padlongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE) + # Test that truncating the control block invalidates it. + add_spender(spenders, "spendpath/trunclongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE) + + scripts = [("s", CScript([pubs[0], OP_CHECKSIG]))] + tap = taproot_construct(pubs[1], scripts) + # Test that adding garbage to the control block invalidates it. + add_spender(spenders, "spendpath/padshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE) + # Test that truncating the control block invalidates it. + add_spender(spenders, "spendpath/truncshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE) + # Test that truncating the control block to 1 byte ("-1 Merkle length") invalidates it + add_spender(spenders, "spendpath/trunc1shortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:1]}, **ERR_CONTROLBLOCK_SIZE) + + # == Test BIP342 edge cases == + + csa_low_val = random.randrange(0, 17) # Within range for OP_n + csa_low_result = csa_low_val + 1 + + csa_high_val = random.randrange(17, 100) if random.getrandbits(1) else random.randrange(-100, -1) # Outside OP_n range + csa_high_result = csa_high_val + 1 + + OVERSIZE_NUMBER = 2**31 + assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER))), 6) + assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER-1))), 5) + + big_choices = [] + big_scriptops = [] + for i in range(1000): + r = random.randrange(len(pubs)) + big_choices.append(r) + big_scriptops += [pubs[r], OP_CHECKSIGVERIFY] + + + def big_spend_inputs(ctx): + """Helper function to construct the script input for t33/t34 below.""" + # Instead of signing 999 times, precompute signatures for every (key, hashtype) combination + sigs = {} + for ht in VALID_SIGHASHES_TAPROOT: + for k in range(len(pubs)): + sigs[(k, ht)] = override(default_sign, hashtype=ht, key=secs[k])(ctx) + num = get(ctx, "num") + return [sigs[(big_choices[i], random.choice(VALID_SIGHASHES_TAPROOT))] for i in range(num - 1, -1, -1)] + + # Various BIP342 features + scripts = [ + # 0) drop stack element and OP_CHECKSIG + ("t0", CScript([OP_DROP, pubs[1], OP_CHECKSIG])), + # 1) normal OP_CHECKSIG + ("t1", CScript([pubs[1], OP_CHECKSIG])), + # 2) normal OP_CHECKSIGVERIFY + ("t2", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1])), + # 3) Hypothetical OP_CHECKMULTISIG script that takes a single sig as input + ("t3", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIG])), + # 4) Hypothetical OP_CHECKMULTISIGVERIFY script that takes a single sig as input + ("t4", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIGVERIFY, OP_1])), + # 5) OP_IF script that needs a true input + ("t5", CScript([OP_IF, pubs[1], OP_CHECKSIG, OP_ELSE, OP_RETURN, OP_ENDIF])), + # 6) OP_NOTIF script that needs a true input + ("t6", CScript([OP_NOTIF, OP_RETURN, OP_ELSE, pubs[1], OP_CHECKSIG, OP_ENDIF])), + # 7) OP_CHECKSIG with an empty key + ("t7", CScript([OP_0, OP_CHECKSIG])), + # 8) OP_CHECKSIGVERIFY with an empty key + ("t8", CScript([OP_0, OP_CHECKSIGVERIFY, OP_1])), + # 9) normal OP_CHECKSIGADD that also ensures return value is correct + ("t9", CScript([csa_low_val, pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])), + # 10) OP_CHECKSIGADD with empty key + ("t10", CScript([csa_low_val, OP_0, OP_CHECKSIGADD, csa_low_result, OP_EQUAL])), + # 11) OP_CHECKSIGADD with missing counter stack element + ("t11", CScript([pubs[1], OP_CHECKSIGADD, OP_1, OP_EQUAL])), + # 12) OP_CHECKSIG that needs invalid signature + ("t12", CScript([pubs[1], OP_CHECKSIGVERIFY, pubs[0], OP_CHECKSIG, OP_NOT])), + # 13) OP_CHECKSIG with empty key that needs invalid signature + ("t13", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_CHECKSIG, OP_NOT])), + # 14) OP_CHECKSIGADD that needs invalid signature + ("t14", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, pubs[0], OP_CHECKSIGADD, OP_NOT])), + # 15) OP_CHECKSIGADD with empty key that needs invalid signature + ("t15", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGADD, OP_NOT])), + # 16) OP_CHECKSIG with unknown pubkey type + ("t16", CScript([OP_1, OP_CHECKSIG])), + # 17) OP_CHECKSIGADD with unknown pubkey type + ("t17", CScript([OP_0, OP_1, OP_CHECKSIGADD])), + # 18) OP_CHECKSIGVERIFY with unknown pubkey type + ("t18", CScript([OP_1, OP_CHECKSIGVERIFY, OP_1])), + # 19) script longer than 10000 bytes and over 201 non-push opcodes + ("t19", CScript([OP_0, OP_0, OP_2DROP] * 10001 + [pubs[1], OP_CHECKSIG])), + # 20) OP_CHECKSIGVERIFY with empty key + ("t20", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGVERIFY, OP_1])), + # 21) Script that grows the stack to 1000 elements + ("t21", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 999 + [OP_DROP] * 999)), + # 22) Script that grows the stack to 1001 elements + ("t22", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 1000 + [OP_DROP] * 1000)), + # 23) Script that expects an input stack of 1000 elements + ("t23", CScript([OP_DROP] * 999 + [pubs[1], OP_CHECKSIG])), + # 24) Script that expects an input stack of 1001 elements + ("t24", CScript([OP_DROP] * 1000 + [pubs[1], OP_CHECKSIG])), + # 25) Script that pushes a MAX_SCRIPT_ELEMENT_SIZE-bytes element + ("t25", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE), OP_DROP, pubs[1], OP_CHECKSIG])), + # 26) Script that pushes a (MAX_SCRIPT_ELEMENT_SIZE+1)-bytes element + ("t26", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, pubs[1], OP_CHECKSIG])), + # 27) CHECKSIGADD that must fail because numeric argument number is >4 bytes + ("t27", CScript([CScriptNum(OVERSIZE_NUMBER), pubs[1], OP_CHECKSIGADD])), + # 28) Pushes random CScriptNum value, checks OP_CHECKSIGADD result + ("t28", CScript([csa_high_val, pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])), + # 29) CHECKSIGADD that succeeds with proper sig because numeric argument number is <=4 bytes + ("t29", CScript([CScriptNum(OVERSIZE_NUMBER-1), pubs[1], OP_CHECKSIGADD])), + # 30) Variant of t1 with "normal" 33-byte pubkey + ("t30", CScript([b'\x03' + pubs[1], OP_CHECKSIG])), + # 31) Variant of t2 with "normal" 33-byte pubkey + ("t31", CScript([b'\x02' + pubs[1], OP_CHECKSIGVERIFY, OP_1])), + # 32) Variant of t28 with "normal" 33-byte pubkey + ("t32", CScript([csa_high_val, b'\x03' + pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])), + # 33) 999-of-999 multisig + ("t33", CScript(big_scriptops[:1998] + [OP_1])), + # 34) 1000-of-1000 multisig + ("t34", CScript(big_scriptops[:2000] + [OP_1])), + # 35) Variant of t9 that uses a non-minimally encoded input arg + ("t35", CScript([bytes([csa_low_val]), pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])), + # 36) Empty script + ("t36", CScript([])), + ] + # Add many dummies to test huge trees + for j in range(100000): + scripts.append((None, CScript([OP_RETURN, random.randrange(100000)]))) + random.shuffle(scripts) + tap = taproot_construct(pubs[0], scripts) + common = { + "hashtype": hashtype, + "key": secs[1], + "tap": tap, + } + # Test that MAX_SCRIPT_ELEMENT_SIZE byte stack element inputs are valid, but not one more (and 80 bytes is standard but 81 is not). + add_spender(spenders, "tapscript/inputmaxlimit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE)], failure={"inputs": [getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1)]}, **ERR_PUSH_LIMIT) + add_spender(spenders, "tapscript/input80limit", leaf="t0", **common, inputs=[getter("sign"), random_bytes(80)]) + add_spender(spenders, "tapscript/input81limit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(81)]) + # Test that OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY cause failure, but OP_CHECKSIG and OP_CHECKSIGVERIFY work. + add_spender(spenders, "tapscript/disabled_checkmultisig", leaf="t1", **common, **SINGLE_SIG, failure={"leaf": "t3"}, **ERR_TAPSCRIPT_CHECKMULTISIG) + add_spender(spenders, "tapscript/disabled_checkmultisigverify", leaf="t2", **common, **SINGLE_SIG, failure={"leaf": "t4"}, **ERR_TAPSCRIPT_CHECKMULTISIG) + # Test that OP_IF and OP_NOTIF do not accept non-0x01 as truth value (the MINIMALIF rule is consensus in Tapscript) + add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x02']}, **ERR_MINIMALIF) + add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x03']}, **ERR_MINIMALIF) + add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0001']}, **ERR_MINIMALIF) + add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0100']}, **ERR_MINIMALIF) + # Test that 1-byte public keys (which are unknown) are acceptable but nonstandard with unrelated signatures, but 0-byte public keys are not valid. + add_spender(spenders, "tapscript/unkpk/checksig", leaf="t16", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY) + add_spender(spenders, "tapscript/unkpk/checksigadd", leaf="t17", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY) + add_spender(spenders, "tapscript/unkpk/checksigverify", leaf="t18", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY) + # Test that 33-byte public keys (which are unknown) are acceptable but nonstandard with valid signatures, but normal pubkeys are not valid in that case. + add_spender(spenders, "tapscript/oldpk/checksig", leaf="t30", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t1"}, **ERR_SIG_SCHNORR) + add_spender(spenders, "tapscript/oldpk/checksigadd", leaf="t31", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t2"}, **ERR_SIG_SCHNORR) + add_spender(spenders, "tapscript/oldpk/checksigverify", leaf="t32", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t28"}, **ERR_SIG_SCHNORR) + # Test that 0-byte public keys are not acceptable. + add_spender(spenders, "tapscript/emptypk/checksig", leaf="t1", **SINGLE_SIG, **common, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY) + add_spender(spenders, "tapscript/emptypk/checksigverify", leaf="t2", **SINGLE_SIG, **common, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY) + add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY) + add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t35", standard=False, **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY) + # Test that OP_CHECKSIGADD results are as expected + add_spender(spenders, "tapscript/checksigaddresults", leaf="t28", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error") + add_spender(spenders, "tapscript/checksigaddoversize", leaf="t29", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error") + # Test that OP_CHECKSIGADD requires 3 stack elements. + add_spender(spenders, "tapscript/checksigadd3args", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t11"}, **ERR_STACK_EMPTY) + # Test that empty signatures do not cause script failure in OP_CHECKSIG and OP_CHECKSIGADD (but do fail with empty pubkey, and do fail OP_CHECKSIGVERIFY) + add_spender(spenders, "tapscript/emptysigs/checksig", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t13"}, **ERR_UNKNOWN_PUBKEY) + add_spender(spenders, "tapscript/emptysigs/nochecksigverify", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t20"}, **ERR_UNKNOWN_PUBKEY) + add_spender(spenders, "tapscript/emptysigs/checksigadd", leaf="t14", **common, inputs=[b'', getter("sign")], failure={"leaf": "t15"}, **ERR_UNKNOWN_PUBKEY) + # Test that scripts over 10000 bytes (and over 201 non-push ops) are acceptable. + add_spender(spenders, "tapscript/no10000limit", leaf="t19", **SINGLE_SIG, **common) + # Test that a stack size of 1000 elements is permitted, but 1001 isn't. + add_spender(spenders, "tapscript/1000stack", leaf="t21", **SINGLE_SIG, **common, failure={"leaf": "t22"}, **ERR_STACK_SIZE) + # Test that an input stack size of 1000 elements is permitted, but 1001 isn't. + add_spender(spenders, "tapscript/1000inputs", leaf="t23", **common, inputs=[getter("sign")] + [b'' for _ in range(999)], failure={"leaf": "t24", "inputs": [getter("sign")] + [b'' for _ in range(1000)]}, **ERR_STACK_SIZE) + # Test that pushing a MAX_SCRIPT_ELEMENT_SIZE byte stack element is valid, but one longer is not. + add_spender(spenders, "tapscript/pushmaxlimit", leaf="t25", **common, **SINGLE_SIG, failure={"leaf": "t26"}, **ERR_PUSH_LIMIT) + # Test that 999-of-999 multisig works (but 1000-of-1000 triggers stack size limits) + add_spender(spenders, "tapscript/bigmulti", leaf="t33", **common, inputs=big_spend_inputs, num=999, failure={"leaf": "t34", "num": 1000}, **ERR_STACK_SIZE) + # Test that the CLEANSTACK rule is consensus critical in tapscript + add_spender(spenders, "tapscript/cleanstack", leaf="t36", tap=tap, inputs=[b'\x01'], failure={"inputs": [b'\x01', b'\x01']}, **ERR_CLEANSTACK) + + # == Test for sigops ratio limit == + + # Given a number n, and a public key pk, functions that produce a (CScript, sigops). Each script takes as + # input a valid signature with the passed pk followed by a dummy push of bytes that are to be dropped, and + # will execute sigops signature checks. + SIGOPS_RATIO_SCRIPTS = [ + # n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIG. + lambda n, pk: (CScript([OP_DROP, pk] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_CHECKSIG]), n + 1), + # n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGVERIFY. + lambda n, pk: (CScript([OP_DROP, pk, OP_0, OP_IF, OP_2DUP, OP_CHECKSIGVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_2, OP_SWAP, OP_CHECKSIGADD, OP_3, OP_EQUAL]), n + 1), + # n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIG. + lambda n, pk: (CScript([random_bytes(220), OP_2DROP, pk, OP_1, OP_NOTIF, OP_2DUP, OP_CHECKSIG, OP_VERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_4, OP_SWAP, OP_CHECKSIGADD, OP_5, OP_EQUAL]), n + 1), + # n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGADD. + lambda n, pk: (CScript([OP_DROP, pk, OP_1, OP_IF, OP_ELSE, OP_2DUP, OP_6, OP_SWAP, OP_CHECKSIGADD, OP_7, OP_EQUALVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_8, OP_SWAP, OP_CHECKSIGADD, OP_9, OP_EQUAL]), n + 1), + # n+1 OP_CHECKSIGs, but also one OP_CHECKSIG with an empty signature. + lambda n, pk: (CScript([OP_DROP, OP_0, pk, OP_CHECKSIG, OP_NOT, OP_VERIFY, pk] + [OP_2DUP, OP_CHECKSIG, OP_VERIFY] * n + [OP_CHECKSIG]), n + 1), + # n OP_CHECKSIGADDs and 1 OP_CHECKSIG, but also an OP_CHECKSIGADD with an empty signature. + lambda n, pk: (CScript([OP_DROP, OP_0, OP_10, pk, OP_CHECKSIGADD, OP_10, OP_EQUALVERIFY, pk] + [OP_2DUP, OP_16, OP_SWAP, OP_CHECKSIGADD, b'\x11', OP_EQUALVERIFY] * n + [OP_CHECKSIG]), n + 1), + ] + for annex in [None, bytes([ANNEX_TAG]) + random_bytes(random.randrange(1000))]: + for hashtype in [SIGHASH_DEFAULT, SIGHASH_ALL]: + for pubkey in [pubs[1], random_bytes(random.choice([x for x in range(2, 81) if x != 32]))]: + for fn_num, fn in enumerate(SIGOPS_RATIO_SCRIPTS): + merkledepth = random.randrange(129) + + + def predict_sigops_ratio(n, dummy_size): + """Predict whether spending fn(n, pubkey) with dummy_size will pass the ratio test.""" + script, sigops = fn(n, pubkey) + # Predict the size of the witness for a given choice of n + stacklen_size = 1 + sig_size = 64 + (hashtype != SIGHASH_DEFAULT) + siglen_size = 1 + dummylen_size = 1 + 2 * (dummy_size >= 253) + script_size = len(script) + scriptlen_size = 1 + 2 * (script_size >= 253) + control_size = 33 + 32 * merkledepth + controllen_size = 1 + 2 * (control_size >= 253) + annex_size = 0 if annex is None else len(annex) + annexlen_size = 0 if annex is None else 1 + 2 * (annex_size >= 253) + witsize = stacklen_size + sig_size + siglen_size + dummy_size + dummylen_size + script_size + scriptlen_size + control_size + controllen_size + annex_size + annexlen_size + # sigops ratio test + return witsize + 50 >= 50 * sigops + # Make sure n is high enough that with empty dummy, the script is not valid + n = 0 + while predict_sigops_ratio(n, 0): + n += 1 + # But allow picking a bit higher still + n += random.randrange(5) + # Now pick dummy size *just* large enough that the overall construction passes + dummylen = 0 + while not predict_sigops_ratio(n, dummylen): + dummylen += 1 + scripts = [("s", fn(n, pubkey)[0])] + for _ in range(merkledepth): + scripts = [scripts, random.choice(PARTNER_MERKLE_FN)] + tap = taproot_construct(pubs[0], scripts) + standard = annex is None and dummylen <= 80 and len(pubkey) == 32 + add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random_bytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random_bytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO) + + # Future leaf versions + for leafver in range(0, 0x100, 2): + if leafver == LEAF_VERSION_TAPSCRIPT or leafver == ANNEX_TAG: + # Skip the defined LEAF_VERSION_TAPSCRIPT, and the ANNEX_TAG which is not usable as leaf version + continue + scripts = [ + ("bare_c0", CScript([OP_NOP])), + ("bare_unkver", CScript([OP_NOP]), leafver), + ("return_c0", CScript([OP_RETURN])), + ("return_unkver", CScript([OP_RETURN]), leafver), + ("undecodable_c0", CScript([OP_PUSHDATA1])), + ("undecodable_unkver", CScript([OP_PUSHDATA1]), leafver), + ("bigpush_c0", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP])), + ("bigpush_unkver", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP]), leafver), + ("1001push_c0", CScript([OP_0] * 1001)), + ("1001push_unkver", CScript([OP_0] * 1001), leafver), + ] + random.shuffle(scripts) + tap = taproot_construct(pubs[0], scripts) + add_spender(spenders, "unkver/bare", standard=False, tap=tap, leaf="bare_unkver", failure={"leaf": "bare_c0"}, **ERR_CLEANSTACK) + add_spender(spenders, "unkver/return", standard=False, tap=tap, leaf="return_unkver", failure={"leaf": "return_c0"}, **ERR_OP_RETURN) + add_spender(spenders, "unkver/undecodable", standard=False, tap=tap, leaf="undecodable_unkver", failure={"leaf": "undecodable_c0"}, **ERR_UNDECODABLE) + add_spender(spenders, "unkver/bigpush", standard=False, tap=tap, leaf="bigpush_unkver", failure={"leaf": "bigpush_c0"}, **ERR_PUSH_LIMIT) + add_spender(spenders, "unkver/1001push", standard=False, tap=tap, leaf="1001push_unkver", failure={"leaf": "1001push_c0"}, **ERR_STACK_SIZE) + add_spender(spenders, "unkver/1001inputs", standard=False, tap=tap, leaf="bare_unkver", inputs=[b'']*1001, failure={"leaf": "bare_c0"}, **ERR_STACK_SIZE) + + # OP_SUCCESSx tests. + hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT) + for opval in range(76, 0x100): + opcode = CScriptOp(opval) + if not is_op_success(opcode): + continue + scripts = [ + ("bare_success", CScript([opcode])), + ("bare_nop", CScript([OP_NOP])), + ("unexecif_success", CScript([OP_0, OP_IF, opcode, OP_ENDIF])), + ("unexecif_nop", CScript([OP_0, OP_IF, OP_NOP, OP_ENDIF])), + ("return_success", CScript([OP_RETURN, opcode])), + ("return_nop", CScript([OP_RETURN, OP_NOP])), + ("undecodable_success", CScript([opcode, OP_PUSHDATA1])), + ("undecodable_nop", CScript([OP_NOP, OP_PUSHDATA1])), + ("undecodable_bypassed_success", CScript([OP_PUSHDATA1, OP_2, opcode])), + ("bigpush_success", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, opcode])), + ("bigpush_nop", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, OP_NOP])), + ("1001push_success", CScript([OP_0] * 1001 + [opcode])), + ("1001push_nop", CScript([OP_0] * 1001 + [OP_NOP])), + ] + random.shuffle(scripts) + tap = taproot_construct(pubs[0], scripts) + add_spender(spenders, "opsuccess/bare", standard=False, tap=tap, leaf="bare_success", failure={"leaf": "bare_nop"}, **ERR_CLEANSTACK) + add_spender(spenders, "opsuccess/unexecif", standard=False, tap=tap, leaf="unexecif_success", failure={"leaf": "unexecif_nop"}, **ERR_CLEANSTACK) + add_spender(spenders, "opsuccess/return", standard=False, tap=tap, leaf="return_success", failure={"leaf": "return_nop"}, **ERR_OP_RETURN) + add_spender(spenders, "opsuccess/undecodable", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_nop"}, **ERR_UNDECODABLE) + add_spender(spenders, "opsuccess/undecodable_bypass", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_bypassed_success"}, **ERR_UNDECODABLE) + add_spender(spenders, "opsuccess/bigpush", standard=False, tap=tap, leaf="bigpush_success", failure={"leaf": "bigpush_nop"}, **ERR_PUSH_LIMIT) + add_spender(spenders, "opsuccess/1001push", standard=False, tap=tap, leaf="1001push_success", failure={"leaf": "1001push_nop"}, **ERR_STACK_SIZE) + add_spender(spenders, "opsuccess/1001inputs", standard=False, tap=tap, leaf="bare_success", inputs=[b'']*1001, failure={"leaf": "bare_nop"}, **ERR_STACK_SIZE) + + # Non-OP_SUCCESSx (verify that those aren't accidentally treated as OP_SUCCESSx) + for opval in range(0, 0x100): + opcode = CScriptOp(opval) + if is_op_success(opcode): + continue + scripts = [ + ("normal", CScript([OP_RETURN, opcode] + [OP_NOP] * 75)), + ("op_success", CScript([OP_RETURN, CScriptOp(0x50)])) + ] + tap = taproot_construct(pubs[0], scripts) + add_spender(spenders, "alwaysvalid/notsuccessx", tap=tap, leaf="op_success", inputs=[], standard=False, failure={"leaf": "normal"}) # err_msg differs based on opcode + + # == Legacy tests == + + # Also add a few legacy spends into the mix, so that transactions which combine taproot and pre-taproot spends get tested too. + for compressed in [False, True]: + eckey1 = ECKey() + eckey1.set(generate_privkey(), compressed) + pubkey1 = eckey1.get_pubkey().get_bytes() + eckey2 = ECKey() + eckey2.set(generate_privkey(), compressed) + for p2sh in [False, True]: + for witv0 in [False, True]: + for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]: + standard = (hashtype in VALID_SIGHASHES_ECDSA) and (compressed or not witv0) + add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([pubkey1, OP_CHECKSIG]), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS) + add_spender(spenders, "legacy/pkh-sighashflip", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, pkh=pubkey1, key=eckey1, **SIGHASH_BITFLIP, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS) + + # Verify that OP_CHECKSIGADD wasn't accidentally added to pre-taproot validation logic. + for p2sh in [False, True]: + for witv0 in [False, True]: + for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]: + standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0) + add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE) + + return spenders + +def spenders_taproot_inactive(): + """Spenders for testing that pre-activation Taproot rules don't apply.""" + + spenders = [] + + sec = generate_privkey() + pub, _ = compute_xonly_pubkey(sec) + scripts = [ + ("pk", CScript([pub, OP_CHECKSIG])), + ("future_leaf", CScript([pub, OP_CHECKSIG]), 0xc2), + ("op_success", CScript([pub, OP_CHECKSIG, OP_0, OP_IF, CScriptOp(0x50), OP_ENDIF])), + ] + tap = taproot_construct(pub, scripts) + + # Test that keypath spending is valid & standard if compliant, but valid and nonstandard otherwise. + add_spender(spenders, "inactive/keypath_valid", key=sec, tap=tap) + add_spender(spenders, "inactive/keypath_invalidsig", key=sec, tap=tap, standard=False, sighash=bitflipper(default_sighash)) + add_spender(spenders, "inactive/keypath_empty", key=sec, tap=tap, standard=False, witness=[]) + + # Same for scriptpath spending (but using future features like annex, leaf versions, or OP_SUCCESS is nonstandard). + add_spender(spenders, "inactive/scriptpath_valid", key=sec, tap=tap, leaf="pk", inputs=[getter("sign")]) + add_spender(spenders, "inactive/scriptpath_invalidsig", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash)) + add_spender(spenders, "inactive/scriptpath_invalidcb", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], controlblock=bitflipper(default_controlblock)) + add_spender(spenders, "inactive/scriptpath_valid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")]) + add_spender(spenders, "inactive/scriptpath_invalid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash)) + add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")]) + add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash)) + + return spenders + +# Consensus validation flags to use in dumps for tests with "legacy/" or "inactive/" prefix. +LEGACY_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY" +# Consensus validation flags to use in dumps for all other tests. +TAPROOT_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY,TAPROOT" + +def dump_json_test(tx, input_utxos, idx, success, failure): + spender = input_utxos[idx].spender + # Determine flags to dump + flags = LEGACY_FLAGS if spender.comment.startswith("legacy/") or spender.comment.startswith("inactive/") else TAPROOT_FLAGS + + fields = [ + ("tx", tx.serialize().hex()), + ("prevouts", [x.output.serialize().hex() for x in input_utxos]), + ("index", idx), + ("flags", flags), + ("comment", spender.comment) + ] + + # The "final" field indicates that a spend should be always valid, even with more validation flags enabled + # than the listed ones. Use standardness as a proxy for this (which gives a conservative underestimate). + if spender.is_standard: + fields.append(("final", True)) + + def dump_witness(wit): + return OrderedDict([("scriptSig", wit[0].hex()), ("witness", [x.hex() for x in wit[1]])]) + if success is not None: + fields.append(("success", dump_witness(success))) + if failure is not None: + fields.append(("failure", dump_witness(failure))) + + # Write the dump to $TEST_DUMP_DIR/x/xyz... where x,y,z,... are the SHA1 sum of the dump (which makes the + # file naming scheme compatible with fuzzing infrastructure). + dump = json.dumps(OrderedDict(fields)) + ",\n" + sha1 = hashlib.sha1(dump.encode("utf-8")).hexdigest() + dirname = os.environ.get("TEST_DUMP_DIR", ".") + ("/%s" % sha1[0]) + os.makedirs(dirname, exist_ok=True) + with open(dirname + ("/%s" % sha1), 'w', encoding="utf8") as f: + f.write(dump) + +# Data type to keep track of UTXOs, where they were created, and how to spend them. +UTXOData = namedtuple('UTXOData', 'outpoint,output,spender') + +class TaprootTest(BitcoinTestFramework): + def add_options(self, parser): + parser.add_argument("--dumptests", dest="dump_tests", default=False, action="store_true", + help="Dump generated test cases to directory set by TEST_DUMP_DIR environment variable") + + def skip_test_if_missing_module(self): + self.skip_if_no_wallet() + + def set_test_params(self): + self.num_nodes = 2 + self.setup_clean_chain = True + # Node 0 has Taproot inactive, Node 1 active. + self.extra_args = [["-whitelist=127.0.0.1", "-par=1", "-vbparams=taproot:1:1"], ["-whitelist=127.0.0.1", "-par=1"]] + + def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False): + + # Deplete block of any non-tapscript sigops using a single additional 0-value coinbase output. + # It is not impossible to fit enough tapscript sigops to hit the old 80k limit without + # busting txin-level limits. We simply have to account for the p2pk outputs in all + # transactions. + extra_output_script = CScript([OP_CHECKSIG]*((MAX_BLOCK_SIGOPS_WEIGHT - sigops_weight) // WITNESS_SCALE_FACTOR)) + + block = create_block(self.tip, create_coinbase(self.lastblockheight + 1, pubkey=cb_pubkey, extra_output_script=extra_output_script, fees=fees), self.lastblocktime + 1) + block.nVersion = 4 + for tx in txs: + tx.rehash() + block.vtx.append(tx) + block.hashMerkleRoot = block.calc_merkle_root() + witness and add_witness_commitment(block) + block.rehash() + block.solve() + block_response = node.submitblock(block.serialize(True).hex()) + if err_msg is not None: + assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg) + if (accept): + assert node.getbestblockhash() == block.hash, "Failed to accept: %s (response: %s)" % (msg, block_response) + self.tip = block.sha256 + self.lastblockhash = block.hash + self.lastblocktime += 1 + self.lastblockheight += 1 + else: + assert node.getbestblockhash() == self.lastblockhash, "Failed to reject: " + msg + + def test_spenders(self, node, spenders, input_counts): + """Run randomized tests with a number of "spenders". + + Steps: + 1) Generate an appropriate UTXO for each spender to test spend conditions + 2) Generate 100 random addresses of all wallet types: pkh/sh_wpkh/wpkh + 3) Select random number of inputs from (1) + 4) Select random number of addresses from (2) as outputs + + Each spender embodies a test; in a large randomized test, it is verified + that toggling the valid argument to each lambda toggles the validity of + the transaction. This is accomplished by constructing transactions consisting + of all valid inputs, except one invalid one. + """ + + # Construct a bunch of sPKs that send coins back to the host wallet + self.log.info("- Constructing addresses for returning coins") + host_spks = [] + host_pubkeys = [] + for i in range(16): + addr = node.getnewaddress(address_type=random.choice(["legacy", "p2sh-segwit", "bech32"])) + info = node.getaddressinfo(addr) + spk = bytes.fromhex(info['scriptPubKey']) + host_spks.append(spk) + host_pubkeys.append(bytes.fromhex(info['pubkey'])) + + # Initialize variables used by block_submit(). + self.lastblockhash = node.getbestblockhash() + self.tip = int(self.lastblockhash, 16) + block = node.getblock(self.lastblockhash) + self.lastblockheight = block['height'] + self.lastblocktime = block['time'] + + # Create transactions spending up to 50 of the wallet's inputs, with one output for each spender, and + # one change output at the end. The transaction is constructed on the Python side to enable + # having multiple outputs to the same address and outputs with no assigned address. The wallet + # is then asked to sign it through signrawtransactionwithwallet, and then added to a block on the + # Python side (to bypass standardness rules). + self.log.info("- Creating test UTXOs...") + random.shuffle(spenders) + normal_utxos = [] + mismatching_utxos = [] # UTXOs with input that requires mismatching output position + done = 0 + while done < len(spenders): + # Compute how many UTXOs to create with this transaction + count_this_tx = min(len(spenders) - done, (len(spenders) + 4) // 5, 10000) + + fund_tx = CTransaction() + # Add the 50 highest-value inputs + unspents = node.listunspent() + random.shuffle(unspents) + unspents.sort(key=lambda x: int(x["amount"] * 100000000), reverse=True) + if len(unspents) > 50: + unspents = unspents[:50] + random.shuffle(unspents) + balance = 0 + for unspent in unspents: + balance += int(unspent["amount"] * 100000000) + txid = int(unspent["txid"], 16) + fund_tx.vin.append(CTxIn(COutPoint(txid, int(unspent["vout"])), CScript())) + # Add outputs + cur_progress = done / len(spenders) + next_progress = (done + count_this_tx) / len(spenders) + change_goal = (1.0 - 0.6 * next_progress) / (1.0 - 0.6 * cur_progress) * balance + self.log.debug("Create %i UTXOs in a transaction spending %i inputs worth %.8f (sending ~%.8f to change)" % (count_this_tx, len(unspents), balance * 0.00000001, change_goal * 0.00000001)) + for i in range(count_this_tx): + avg = (balance - change_goal) / (count_this_tx - i) + amount = int(random.randrange(int(avg*0.85 + 0.5), int(avg*1.15 + 0.5)) + 0.5) + balance -= amount + fund_tx.vout.append(CTxOut(amount, spenders[done + i].script)) + # Add change + fund_tx.vout.append(CTxOut(balance - 10000, random.choice(host_spks))) + # Ask the wallet to sign + ss = BytesIO(bytes.fromhex(node.signrawtransactionwithwallet(ToHex(fund_tx))["hex"])) + fund_tx.deserialize(ss) + # Construct UTXOData entries + fund_tx.rehash() + for i in range(count_this_tx): + utxodata = UTXOData(outpoint=COutPoint(fund_tx.sha256, i), output=fund_tx.vout[i], spender=spenders[done]) + if utxodata.spender.need_vin_vout_mismatch: + mismatching_utxos.append(utxodata) + else: + normal_utxos.append(utxodata) + done += 1 + # Mine into a block + self.block_submit(node, [fund_tx], "Funding tx", None, random.choice(host_pubkeys), 10000, MAX_BLOCK_SIGOPS_WEIGHT, True, True) + + # Consume groups of choice(input_coins) from utxos in a tx, testing the spenders. + self.log.info("- Running %i spending tests" % done) + random.shuffle(normal_utxos) + random.shuffle(mismatching_utxos) + assert done == len(normal_utxos) + len(mismatching_utxos) + + left = done + while left: + # Construct CTransaction with random nVersion, nLocktime + tx = CTransaction() + tx.nVersion = random.choice([1, 2, random.randint(-0x80000000, 0x7fffffff)]) + min_sequence = (tx.nVersion != 1 and tx.nVersion != 0) * 0x80000000 # The minimum sequence number to disable relative locktime + if random.choice([True, False]): + tx.nLockTime = random.randrange(LOCKTIME_THRESHOLD, self.lastblocktime - 7200) # all absolute locktimes in the past + else: + tx.nLockTime = random.randrange(self.lastblockheight + 1) # all block heights in the past + + # Decide how many UTXOs to test with. + acceptable = [n for n in input_counts if n <= left and (left - n > max(input_counts) or (left - n) in [0] + input_counts)] + num_inputs = random.choice(acceptable) + + # If we have UTXOs that require mismatching inputs/outputs left, include exactly one of those + # unless there is only one normal UTXO left (as tests with mismatching UTXOs require at least one + # normal UTXO to go in the first position), and we don't want to run out of normal UTXOs. + input_utxos = [] + while len(mismatching_utxos) and (len(input_utxos) == 0 or len(normal_utxos) == 1): + input_utxos.append(mismatching_utxos.pop()) + left -= 1 + + # Top up until we hit num_inputs (but include at least one normal UTXO always). + for _ in range(max(1, num_inputs - len(input_utxos))): + input_utxos.append(normal_utxos.pop()) + left -= 1 + + # The first input cannot require a mismatching output (as there is at least one output). + while True: + random.shuffle(input_utxos) + if not input_utxos[0].spender.need_vin_vout_mismatch: + break + first_mismatch_input = None + for i in range(len(input_utxos)): + if input_utxos[i].spender.need_vin_vout_mismatch: + first_mismatch_input = i + assert first_mismatch_input is None or first_mismatch_input > 0 + + # Decide fee, and add CTxIns to tx. + amount = sum(utxo.output.nValue for utxo in input_utxos) + fee = min(random.randrange(MIN_FEE * 2, MIN_FEE * 4), amount - DUST_LIMIT) # 10000-20000 sat fee + in_value = amount - fee + tx.vin = [CTxIn(outpoint=utxo.outpoint, nSequence=random.randint(min_sequence, 0xffffffff)) for utxo in input_utxos] + tx.wit.vtxinwit = [CTxInWitness() for _ in range(len(input_utxos))] + sigops_weight = sum(utxo.spender.sigops_weight for utxo in input_utxos) + self.log.debug("Test: %s" % (", ".join(utxo.spender.comment for utxo in input_utxos))) + + # Add 1 to 4 random outputs (but constrained by inputs that require mismatching outputs) + num_outputs = random.choice(range(1, 1 + min(4, 4 if first_mismatch_input is None else first_mismatch_input))) + assert in_value >= 0 and fee - num_outputs * DUST_LIMIT >= MIN_FEE + for i in range(num_outputs): + tx.vout.append(CTxOut()) + if in_value <= DUST_LIMIT: + tx.vout[-1].nValue = DUST_LIMIT + elif i < num_outputs - 1: + tx.vout[-1].nValue = in_value + else: + tx.vout[-1].nValue = random.randint(DUST_LIMIT, in_value) + in_value -= tx.vout[-1].nValue + tx.vout[-1].scriptPubKey = random.choice(host_spks) + sigops_weight += CScript(tx.vout[-1].scriptPubKey).GetSigOpCount(False) * WITNESS_SCALE_FACTOR + fee += in_value + assert fee >= 0 + + # Select coinbase pubkey + cb_pubkey = random.choice(host_pubkeys) + sigops_weight += 1 * WITNESS_SCALE_FACTOR + + # Precompute one satisfying and one failing scriptSig/witness for each input. + input_data = [] + for i in range(len(input_utxos)): + fn = input_utxos[i].spender.sat_function + fail = None + success = fn(tx, i, [utxo.output for utxo in input_utxos], True) + if not input_utxos[i].spender.no_fail: + fail = fn(tx, i, [utxo.output for utxo in input_utxos], False) + input_data.append((fail, success)) + if self.options.dump_tests: + dump_json_test(tx, input_utxos, i, success, fail) + + # Sign each input incorrectly once on each complete signing pass, except the very last. + for fail_input in list(range(len(input_utxos))) + [None]: + # Skip trying to fail at spending something that can't be made to fail. + if fail_input is not None and input_utxos[fail_input].spender.no_fail: + continue + # Expected message with each input failure, may be None(which is ignored) + expected_fail_msg = None if fail_input is None else input_utxos[fail_input].spender.err_msg + # Fill inputs/witnesses + for i in range(len(input_utxos)): + tx.vin[i].scriptSig = input_data[i][i != fail_input][0] + tx.wit.vtxinwit[i].scriptWitness.stack = input_data[i][i != fail_input][1] + # Submit to mempool to check standardness + is_standard_tx = fail_input is None and all(utxo.spender.is_standard for utxo in input_utxos) and tx.nVersion >= 1 and tx.nVersion <= 2 + tx.rehash() + msg = ','.join(utxo.spender.comment + ("*" if n == fail_input else "") for n, utxo in enumerate(input_utxos)) + if is_standard_tx: + node.sendrawtransaction(tx.serialize().hex(), 0) + assert node.getmempoolentry(tx.hash) is not None, "Failed to accept into mempool: " + msg + else: + assert_raises_rpc_error(-26, None, node.sendrawtransaction, tx.serialize().hex(), 0) + # Submit in a block + self.block_submit(node, [tx], msg, witness=True, accept=fail_input is None, cb_pubkey=cb_pubkey, fees=fee, sigops_weight=sigops_weight, err_msg=expected_fail_msg) + + if (len(spenders) - left) // 200 > (len(spenders) - left - len(input_utxos)) // 200: + self.log.info(" - %i tests done" % (len(spenders) - left)) + + assert left == 0 + assert len(normal_utxos) == 0 + assert len(mismatching_utxos) == 0 + self.log.info(" - Done") + + def run_test(self): + self.connect_nodes(0, 1) + + # Post-taproot activation tests go first (pre-taproot tests' blocks are invalid post-taproot). + self.log.info("Post-activation tests...") + self.nodes[1].generate(101) + self.test_spenders(self.nodes[1], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3]) + + # Transfer % of funds to pre-taproot node. + addr = self.nodes[0].getnewaddress() + self.nodes[1].sendtoaddress(address=addr, amount=int(self.nodes[1].getbalance() * 70000000) / 100000000) + self.nodes[1].generate(1) + self.sync_blocks() + + # Pre-taproot activation tests. + self.log.info("Pre-activation tests...") + self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[1, 2, 2, 2, 2, 3]) + + +if __name__ == '__main__': + TaprootTest().main() diff --git a/test/functional/interface_zmq.py b/test/functional/interface_zmq.py index a0bc937f75..d675ae174c 100755 --- a/test/functional/interface_zmq.py +++ b/test/functional/interface_zmq.py @@ -11,7 +11,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.messages import CTransaction, hash256, FromHex from test_framework.util import ( assert_equal, - connect_nodes, assert_raises_rpc_error, ) from io import BytesIO @@ -102,7 +101,7 @@ class ZMQTest (BitcoinTestFramework): rawtx = subs[3] self.restart_node(0, ["-zmqpub%s=%s" % (sub.topic.decode(), address) for sub in [hashblock, hashtx, rawblock, rawtx]]) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) for socket in sockets: socket.connect(address) @@ -207,7 +206,7 @@ class ZMQTest (BitcoinTestFramework): connect_blocks = self.nodes[1].generatetoaddress(2, ADDRESS_BCRT1_P2WSH_OP_TRUE) # nodes[0] will reorg chain after connecting back nodes[1] - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) self.sync_blocks() # tx in mempool valid but not advertised # Should receive nodes[1] tip @@ -264,7 +263,7 @@ class ZMQTest (BitcoinTestFramework): self.nodes[1].generatetoaddress(2, ADDRESS_BCRT1_P2WSH_OP_TRUE) # nodes[0] will reorg chain after connecting back nodes[1] - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) # Then we receive all block (dis)connect notifications for the 2 block reorg assert_equal((dc_block, "D", None), seq.receive_sequence()) @@ -406,7 +405,7 @@ class ZMQTest (BitcoinTestFramework): seq = ZMQSubscriber(socket, b'sequence') self.restart_node(0, ['-zmqpub%s=%s' % (seq.topic.decode(), address)]) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) socket.connect(address) # Relax so that the subscriber is ready before publishing zmq messages sleep(0.2) diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py index 57a059b7f7..fc4c775668 100755 --- a/test/functional/mempool_accept.py +++ b/test/functional/mempool_accept.py @@ -83,31 +83,31 @@ class MempoolAcceptanceTest(BitcoinTestFramework): ) self.log.info('A transaction not in the mempool') - fee = 0.00000700 + fee = Decimal('0.000007') raw_tx_0 = node.signrawtransactionwithwallet(node.createrawtransaction( inputs=[{"txid": txid_in_block, "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}], # RBF is used later - outputs=[{node.getnewaddress(): 0.3 - fee}], + outputs=[{node.getnewaddress(): Decimal('0.3') - fee}], ))['hex'] tx = CTransaction() tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0))) txid_0 = tx.rehash() self.check_mempool_result( - result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': Decimal(str(fee))}}], + result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': fee}}], rawtxs=[raw_tx_0], ) self.log.info('A final transaction not in the mempool') coin = coins.pop() # Pick a random coin(base) to spend - output_amount = 0.025 + output_amount = Decimal('0.025') raw_tx_final = node.signrawtransactionwithwallet(node.createrawtransaction( inputs=[{'txid': coin['txid'], 'vout': coin['vout'], "sequence": 0xffffffff}], # SEQUENCE_FINAL outputs=[{node.getnewaddress(): output_amount}], locktime=node.getblockcount() + 2000, # Can be anything ))['hex'] tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_final))) - fee_expected = int(coin['amount']) - output_amount + fee_expected = coin['amount'] - output_amount self.check_mempool_result( - result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': Decimal(str(fee_expected))}}], + result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': fee_expected}}], rawtxs=[tx.serialize().hex()], maxfeerate=0, ) @@ -130,7 +130,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0))) txid_0 = tx.rehash() self.check_mempool_result( - result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': Decimal(str(2 * fee))}}], + result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': (2 * fee)}}], rawtxs=[raw_tx_0], ) @@ -190,7 +190,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) # Reference tx should be valid on itself self.check_mempool_result( - result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': { 'base': Decimal(str(0.1 - 0.05))}}], + result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': { 'base': Decimal('0.1') - Decimal('0.05')}}], rawtxs=[tx.serialize().hex()], maxfeerate=0, ) diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py index f73f1a02a2..70cd4ebb3b 100755 --- a/test/functional/mempool_persist.py +++ b/test/functional/mempool_persist.py @@ -45,8 +45,6 @@ from test_framework.util import ( assert_equal, assert_greater_than_or_equal, assert_raises_rpc_error, - connect_nodes, - disconnect_nodes, ) @@ -83,11 +81,11 @@ class MempoolPersistTest(BitcoinTestFramework): assert_greater_than_or_equal(tx_creation_time_higher, tx_creation_time) # disconnect nodes & make a txn that remains in the unbroadcast set. - disconnect_nodes(self.nodes[0], 1) + self.disconnect_nodes(0, 1) assert(len(self.nodes[0].getpeerinfo()) == 0) assert(len(self.nodes[0].p2ps) == 0) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("12")) - connect_nodes(self.nodes[0], 2) + self.connect_nodes(0, 2) self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.") self.stop_nodes() diff --git a/test/functional/mempool_unbroadcast.py b/test/functional/mempool_unbroadcast.py index abd5a03d95..b475b65e68 100755 --- a/test/functional/mempool_unbroadcast.py +++ b/test/functional/mempool_unbroadcast.py @@ -11,9 +11,7 @@ from test_framework.p2p import P2PTxInvStore from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - connect_nodes, create_confirmed_utxos, - disconnect_nodes, ) MAX_INITIAL_BROADCAST_DELAY = 15 * 60 # 15 minutes in seconds @@ -36,7 +34,7 @@ class MempoolUnbroadcastTest(BitcoinTestFramework): min_relay_fee = node.getnetworkinfo()["relayfee"] utxos = create_confirmed_utxos(min_relay_fee, node, 10) - disconnect_nodes(node, 1) + self.disconnect_nodes(0, 1) self.log.info("Generate transactions that only node 0 knows about") @@ -70,7 +68,7 @@ class MempoolUnbroadcastTest(BitcoinTestFramework): self.restart_node(0) self.log.info("Reconnect nodes & check if they are sent to node 1") - connect_nodes(node, 1) + self.connect_nodes(0, 1) # fast forward into the future & ensure that the second node has the txns node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY) @@ -91,7 +89,7 @@ class MempoolUnbroadcastTest(BitcoinTestFramework): time.sleep(2) # allow sufficient time for possibility of broadcast assert_equal(len(conn.get_invs()), 0) - disconnect_nodes(node, 1) + self.disconnect_nodes(0, 1) node.disconnect_p2ps() def test_txn_removal(self): diff --git a/test/functional/mining_basic.py b/test/functional/mining_basic.py index 1b2c7644bd..80635e33c5 100755 --- a/test/functional/mining_basic.py +++ b/test/functional/mining_basic.py @@ -25,7 +25,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, - connect_nodes, ) @@ -56,7 +55,7 @@ class MiningTest(BitcoinTestFramework): assert_equal(mining_info['currentblocktx'], 0) assert_equal(mining_info['currentblockweight'], 4000) self.restart_node(0) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) def run_test(self): self.mine_chain() diff --git a/test/functional/mining_getblocktemplate_longpoll.py b/test/functional/mining_getblocktemplate_longpoll.py index 6d0b241e57..2adafb1fdb 100755 --- a/test/functional/mining_getblocktemplate_longpoll.py +++ b/test/functional/mining_getblocktemplate_longpoll.py @@ -5,11 +5,13 @@ """Test longpolling with getblocktemplate.""" from decimal import Decimal +import random +import threading from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import get_rpc_proxy, random_transaction +from test_framework.util import get_rpc_proxy +from test_framework.wallet import MiniWallet -import threading class LongpollThread(threading.Thread): def __init__(self, node): @@ -29,45 +31,48 @@ class GetBlockTemplateLPTest(BitcoinTestFramework): self.num_nodes = 2 self.supports_cli = False - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - def run_test(self): self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.") + self.log.info("Test that longpollid doesn't change between successive getblocktemplate() invocations if nothing else happens") self.nodes[0].generate(10) template = self.nodes[0].getblocktemplate({'rules': ['segwit']}) longpollid = template['longpollid'] - # longpollid should not change between successive invocations if nothing else happens template2 = self.nodes[0].getblocktemplate({'rules': ['segwit']}) assert template2['longpollid'] == longpollid - # Test 1: test that the longpolling wait if we do nothing + self.log.info("Test that longpoll waits if we do nothing") thr = LongpollThread(self.nodes[0]) thr.start() # check that thread still lives thr.join(5) # wait 5 seconds or until thread exits assert thr.is_alive() - # Test 2: test that longpoll will terminate if another node generates a block - self.nodes[1].generate(1) # generate a block on another node + miniwallets = [ MiniWallet(node) for node in self.nodes ] + self.log.info("Test that longpoll will terminate if another node generates a block") + miniwallets[1].generate(1) # generate a block on another node # check that thread will exit now that new transaction entered mempool thr.join(5) # wait 5 seconds or until thread exits assert not thr.is_alive() - # Test 3: test that longpoll will terminate if we generate a block ourselves + self.log.info("Test that longpoll will terminate if we generate a block ourselves") thr = LongpollThread(self.nodes[0]) thr.start() - self.nodes[0].generate(1) # generate a block on another node + miniwallets[0].generate(1) # generate a block on own node thr.join(5) # wait 5 seconds or until thread exits assert not thr.is_alive() - # Test 4: test that introducing a new transaction into the mempool will terminate the longpoll + # Add enough mature utxos to the wallets, so that all txs spend confirmed coins + self.nodes[0].generate(100) + self.sync_blocks() + + self.log.info("Test that introducing a new transaction into the mempool will terminate the longpoll") thr = LongpollThread(self.nodes[0]) thr.start() # generate a random transaction and submit it min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"] - # min_relay_fee is fee per 1000 bytes, which should be more than enough. - (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20) + fee_rate = min_relay_fee + Decimal('0.00000010') * random.randint(0,20) + miniwallets[0].send_self_transfer(from_node=random.choice(self.nodes), + fee_rate=fee_rate) # after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned thr.join(60 + 20) assert not thr.is_alive() diff --git a/test/functional/p2p_blockfilters.py b/test/functional/p2p_blockfilters.py index 84178d0dd7..3250cbecf9 100755 --- a/test/functional/p2p_blockfilters.py +++ b/test/functional/p2p_blockfilters.py @@ -22,8 +22,6 @@ from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - connect_nodes, - disconnect_nodes, ) class CFiltersClient(P2PInterface): @@ -61,7 +59,7 @@ class CompactFiltersTest(BitcoinTestFramework): self.sync_blocks(timeout=600) # Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting - disconnect_nodes(self.nodes[0], 1) + self.disconnect_nodes(0, 1) self.nodes[0].generate(1) self.wait_until(lambda: self.nodes[0].getblockcount() == 1000) @@ -90,7 +88,7 @@ class CompactFiltersTest(BitcoinTestFramework): assert_equal(len(response.headers), 1) self.log.info("Reorg node 0 to a new chain.") - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) self.sync_blocks(timeout=600) main_block_hash = self.nodes[0].getblockhash(1000) diff --git a/test/functional/p2p_blocksonly.py b/test/functional/p2p_blocksonly.py index 646baa1550..e80422d1cf 100755 --- a/test/functional/p2p_blocksonly.py +++ b/test/functional/p2p_blocksonly.py @@ -59,7 +59,7 @@ class P2PBlocksOnly(BitcoinTestFramework): self.log.info('Check that txs from peers with relay-permission are not rejected and relayed to others') self.log.info("Restarting node 0 with relay permission and blocksonly") - self.restart_node(0, ["-persistmempool=0", "-whitelist=relay@127.0.0.1", "-blocksonly"]) + self.restart_node(0, ["-persistmempool=0", "-whitelist=relay@127.0.0.1", "-blocksonly", '-deprecatedrpc=whitelisted']) assert_equal(self.nodes[0].getrawmempool(), []) first_peer = self.nodes[0].add_p2p_connection(P2PInterface()) second_peer = self.nodes[0].add_p2p_connection(P2PInterface()) diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index 506b480039..611bffb25f 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -9,7 +9,7 @@ Version 2 compact blocks are post-segwit (wtxids) """ import random -from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment +from test_framework.blocktools import create_block, NORMAL_GBT_REQUEST_PARAMS, add_witness_commitment from test_framework.messages import BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, FromHex, HeaderAndShortIDs, msg_no_witness_block, msg_no_witness_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, msg_block, msg_blocktxn, MSG_BLOCK, MSG_CMPCT_BLOCK, MSG_WITNESS_FLAG, NODE_NETWORK, P2PHeaderAndShortIDs, PrefilledTransaction, ser_uint256, ToHex from test_framework.p2p import p2p_lock, P2PInterface from test_framework.script import CScript, OP_TRUE, OP_DROP @@ -104,11 +104,7 @@ class CompactBlocksTest(BitcoinTestFramework): self.skip_if_no_wallet() def build_block_on_tip(self, node, segwit=False): - height = node.getblockcount() - tip = node.getbestblockhash() - mtp = node.getblockheader(tip)['mediantime'] - block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1) - block.nVersion = 4 + block = create_block(tmpl=node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)) if segwit: add_witness_commitment(block) block.solve() @@ -769,6 +765,9 @@ class CompactBlocksTest(BitcoinTestFramework): assert_equal(int(node.getbestblockhash(), 16), block.sha256) def run_test(self): + # Get the nodes out of IBD + self.nodes[0].generate(1) + # Setup the p2p connections self.segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=2)) self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=1), services=NODE_NETWORK) diff --git a/test/functional/p2p_disconnect_ban.py b/test/functional/p2p_disconnect_ban.py index b7c2a306eb..3088a8aa46 100755 --- a/test/functional/p2p_disconnect_ban.py +++ b/test/functional/p2p_disconnect_ban.py @@ -9,7 +9,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, - connect_nodes, ) class DisconnectBanTest(BitcoinTestFramework): @@ -19,8 +18,8 @@ class DisconnectBanTest(BitcoinTestFramework): def run_test(self): self.log.info("Connect nodes both way") - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[1], 0) + self.connect_nodes(0, 1) + self.connect_nodes(1, 0) self.log.info("Test setban and listbanned RPCs") @@ -78,8 +77,8 @@ class DisconnectBanTest(BitcoinTestFramework): # Clear ban lists self.nodes[1].clearbanned() self.log.info("Connect nodes both way") - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[1], 0) + self.connect_nodes(0, 1) + self.connect_nodes(1, 0) self.log.info("Test disconnectnode RPCs") @@ -98,7 +97,7 @@ class DisconnectBanTest(BitcoinTestFramework): assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1] self.log.info("disconnectnode: successfully reconnect node") - connect_nodes(self.nodes[0], 1) # reconnect the node + self.connect_nodes(0, 1) # reconnect the node assert_equal(len(self.nodes[0].getpeerinfo()), 2) assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1] diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py index ea066a984d..51b7623fe3 100755 --- a/test/functional/p2p_feefilter.py +++ b/test/functional/p2p_feefilter.py @@ -108,9 +108,10 @@ class FeeFilterTest(BitcoinTestFramework): # to 35 entries in an inv, which means that when this next transaction # is eligible for relay, the prior transactions from node1 are eligible # as well. - txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00020000'), from_node=node0)['wtxid'] for _ in range(3)] + txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00020000'), from_node=node0)['wtxid'] for _ in range(1)] conn.wait_for_invs_to_match(txids) conn.clear_invs() + self.sync_mempools() # must be sure node 1 has received all txs self.log.info("Remove fee filter and check txs are received again") conn.send_and_ping(msg_feefilter(0)) diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py index 2c9cbea5e4..b1a7ef6877 100755 --- a/test/functional/p2p_node_network_limited.py +++ b/test/functional/p2p_node_network_limited.py @@ -13,8 +13,6 @@ from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - disconnect_nodes, - connect_nodes, ) @@ -40,9 +38,9 @@ class NodeNetworkLimitedTest(BitcoinTestFramework): self.extra_args = [['-prune=550', '-addrmantest'], [], []] def disconnect_all(self): - disconnect_nodes(self.nodes[0], 1) - disconnect_nodes(self.nodes[0], 2) - disconnect_nodes(self.nodes[1], 2) + self.disconnect_nodes(0, 1) + self.disconnect_nodes(0, 2) + self.disconnect_nodes(1, 2) def setup_network(self): self.add_nodes(self.num_nodes, self.extra_args) @@ -60,7 +58,7 @@ class NodeNetworkLimitedTest(BitcoinTestFramework): assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services) self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.") - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) blocks = self.nodes[1].generatetoaddress(292, self.nodes[1].get_deterministic_priv_key().address) self.sync_blocks([self.nodes[0], self.nodes[1]]) @@ -85,7 +83,7 @@ class NodeNetworkLimitedTest(BitcoinTestFramework): # connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer # because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible - connect_nodes(self.nodes[0], 2) + self.connect_nodes(0, 2) try: self.sync_blocks([self.nodes[0], self.nodes[2]], timeout=5) except: @@ -94,7 +92,7 @@ class NodeNetworkLimitedTest(BitcoinTestFramework): assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0) # now connect also to node 1 (non pruned) - connect_nodes(self.nodes[1], 2) + self.connect_nodes(1, 2) # sync must be possible self.sync_blocks() @@ -106,7 +104,7 @@ class NodeNetworkLimitedTest(BitcoinTestFramework): self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address) # connect node1 (non pruned) with node0 (pruned) and check if the can sync - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) # sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED) self.sync_blocks([self.nodes[0], self.nodes[1]]) diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py index 3ec36edb41..653e3894af 100755 --- a/test/functional/p2p_permissions.py +++ b/test/functional/p2p_permissions.py @@ -22,7 +22,6 @@ from test_framework.test_node import ErrorMatch from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - connect_nodes, p2p_port, ) @@ -43,6 +42,13 @@ class P2PPermissionsTests(BitcoinTestFramework): True) self.checkpermission( + # check without deprecatedrpc=whitelisted + ["-whitelist=127.0.0.1"], + # Make sure the default values in the command line documentation match the ones here + ["relay", "noban", "mempool", "download"], + None) + + self.checkpermission( # no permission (even with forcerelay) ["-whitelist=@127.0.0.1", "-whitelistforcerelay=1"], [], @@ -81,6 +87,12 @@ class P2PPermissionsTests(BitcoinTestFramework): False) self.checkpermission( + # check without deprecatedrpc=whitelisted + ["-whitelist=noban,mempool@127.0.0.1", "-whitelistrelay"], + ["noban", "mempool", "download"], + None) + + self.checkpermission( # legacy whitelistforcerelay should be ignored ["-whitelist=noban,mempool@127.0.0.1", "-whitelistforcerelay"], ["noban", "mempool", "download"], @@ -133,7 +145,7 @@ class P2PPermissionsTests(BitcoinTestFramework): p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1]) self.log.debug("Check that node[1] will send the tx to node[0] even though it is already in the mempool") - connect_nodes(self.nodes[1], 0) + self.connect_nodes(1, 0) with self.nodes[1].assert_debug_log(["Force relaying tx {} from peer=0".format(txid)]): p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1]) self.wait_until(lambda: txid in self.nodes[0].getrawmempool()) @@ -149,10 +161,15 @@ class P2PPermissionsTests(BitcoinTestFramework): ) def checkpermission(self, args, expectedPermissions, whitelisted): + if whitelisted is not None: + args = [*args, '-deprecatedrpc=whitelisted'] self.restart_node(1, args) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) peerinfo = self.nodes[1].getpeerinfo()[0] - assert_equal(peerinfo['whitelisted'], whitelisted) + if whitelisted is None: + assert 'whitelisted' not in peerinfo + else: + assert_equal(peerinfo['whitelisted'], whitelisted) assert_equal(len(expectedPermissions), len(peerinfo['permissions'])) for p in expectedPermissions: if not p in peerinfo['permissions']: diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index d79ed449e5..5c15538418 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -55,6 +55,7 @@ from test_framework.script import ( MAX_SCRIPT_ELEMENT_SIZE, OP_0, OP_1, + OP_2, OP_16, OP_2DROP, OP_CHECKMULTISIG, @@ -80,8 +81,6 @@ from test_framework.script import ( from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - connect_nodes, - disconnect_nodes, softfork_active, hex_str_to_bytes, assert_raises_rpc_error, @@ -232,8 +231,8 @@ class SegWitTest(BitcoinTestFramework): def setup_network(self): self.setup_nodes() - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[0], 2) + self.connect_nodes(0, 1) + self.connect_nodes(0, 2) self.sync_all() # Helper functions @@ -497,7 +496,7 @@ class SegWitTest(BitcoinTestFramework): # node2 doesn't need to be connected for this test. # (If it's connected, node0 may propagate an invalid block to it over # compact blocks and the nodes would have inconsistent tips.) - disconnect_nodes(self.nodes[0], 2) + self.disconnect_nodes(0, 2) # Create two outputs, a p2wsh and p2sh-p2wsh witness_program = CScript([OP_TRUE]) @@ -559,7 +558,7 @@ class SegWitTest(BitcoinTestFramework): # TODO: support multiple acceptable reject reasons. test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False) - connect_nodes(self.nodes[0], 2) + self.connect_nodes(0, 2) self.utxo.pop(0) self.utxo.append(UTXO(txid, 2, value)) @@ -1400,7 +1399,11 @@ class SegWitTest(BitcoinTestFramework): assert_equal(len(self.nodes[1].getrawmempool()), 0) for version in list(range(OP_1, OP_16 + 1)) + [OP_0]: # First try to spend to a future version segwit script_pubkey. - script_pubkey = CScript([CScriptOp(version), witness_hash]) + if version == OP_1: + # Don't use 32-byte v1 witness (used by Taproot; see BIP 341) + script_pubkey = CScript([CScriptOp(version), witness_hash + b'\x00']) + else: + script_pubkey = CScript([CScriptOp(version), witness_hash]) tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")] tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)] tx.rehash() @@ -1413,9 +1416,9 @@ class SegWitTest(BitcoinTestFramework): self.sync_blocks() assert len(self.nodes[0].getrawmempool()) == 0 - # Finally, verify that version 0 -> version 1 transactions + # Finally, verify that version 0 -> version 2 transactions # are standard - script_pubkey = CScript([CScriptOp(OP_1), witness_hash]) + script_pubkey = CScript([CScriptOp(OP_2), witness_hash]) tx2 = CTransaction() tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)] @@ -1940,7 +1943,7 @@ class SegWitTest(BitcoinTestFramework): """Test the behavior of starting up a segwit-aware node after the softfork has activated.""" self.restart_node(2, extra_args=["-segwitheight={}".format(SEGWIT_HEIGHT)]) - connect_nodes(self.nodes[0], 2) + self.connect_nodes(0, 2) # We reconnect more than 100 blocks, give it plenty of time self.sync_blocks(timeout=240) diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py index 5c3f021b3f..16d9302db8 100755 --- a/test/functional/p2p_tx_download.py +++ b/test/functional/p2p_tx_download.py @@ -42,15 +42,15 @@ class TestP2PConn(P2PInterface): # Constants from net_processing GETDATA_TX_INTERVAL = 60 # seconds -MAX_GETDATA_RANDOM_DELAY = 2 # seconds INBOUND_PEER_TX_DELAY = 2 # seconds TXID_RELAY_DELAY = 2 # seconds +OVERLOADED_PEER_DELAY = 2 # seconds MAX_GETDATA_IN_FLIGHT = 100 -TX_EXPIRY_INTERVAL = GETDATA_TX_INTERVAL * 10 +MAX_PEER_TX_ANNOUNCEMENTS = 5000 # Python test constants NUM_INBOUND = 10 -MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + MAX_GETDATA_RANDOM_DELAY + INBOUND_PEER_TX_DELAY + TXID_RELAY_DELAY +MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + INBOUND_PEER_TX_DELAY + TXID_RELAY_DELAY class TxDownloadTest(BitcoinTestFramework): @@ -121,14 +121,12 @@ class TxDownloadTest(BitcoinTestFramework): # * the first time it is re-requested from the outbound peer, plus # * 2 seconds to avoid races assert self.nodes[1].getpeerinfo()[0]['inbound'] == False - timeout = 2 + (MAX_GETDATA_RANDOM_DELAY + INBOUND_PEER_TX_DELAY) + ( - GETDATA_TX_INTERVAL + MAX_GETDATA_RANDOM_DELAY) + timeout = 2 + INBOUND_PEER_TX_DELAY + GETDATA_TX_INTERVAL self.log.info("Tx should be received at node 1 after {} seconds".format(timeout)) self.sync_mempools(timeout=timeout) def test_in_flight_max(self): - self.log.info("Test that we don't request more than {} transactions from any peer, every {} minutes".format( - MAX_GETDATA_IN_FLIGHT, TX_EXPIRY_INTERVAL / 60)) + self.log.info("Test that we don't load peers with more than {} transaction requests immediately".format(MAX_GETDATA_IN_FLIGHT)) txids = [i for i in range(MAX_GETDATA_IN_FLIGHT + 2)] p = self.nodes[0].p2ps[0] @@ -136,31 +134,120 @@ class TxDownloadTest(BitcoinTestFramework): with p2p_lock: p.tx_getdata_count = 0 - p.send_message(msg_inv([CInv(t=MSG_WTX, h=i) for i in txids])) + mock_time = int(time.time() + 1) + self.nodes[0].setmocktime(mock_time) + for i in range(MAX_GETDATA_IN_FLIGHT): + p.send_message(msg_inv([CInv(t=MSG_WTX, h=txids[i])])) + p.sync_with_ping() + mock_time += INBOUND_PEER_TX_DELAY + self.nodes[0].setmocktime(mock_time) p.wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT) + for i in range(MAX_GETDATA_IN_FLIGHT, len(txids)): + p.send_message(msg_inv([CInv(t=MSG_WTX, h=txids[i])])) + p.sync_with_ping() + self.log.info("No more than {} requests should be seen within {} seconds after announcement".format(MAX_GETDATA_IN_FLIGHT, INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY - 1)) + self.nodes[0].setmocktime(mock_time + INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY - 1) + p.sync_with_ping() with p2p_lock: assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT) + self.log.info("If we wait {} seconds after announcement, we should eventually get more requests".format(INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY)) + self.nodes[0].setmocktime(mock_time + INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY) + p.wait_until(lambda: p.tx_getdata_count == len(txids)) - self.log.info("Now check that if we send a NOTFOUND for a transaction, we'll get one more request") - p.send_message(msg_notfound(vec=[CInv(t=MSG_WTX, h=txids[0])])) - p.wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT + 1, timeout=10) + def test_expiry_fallback(self): + self.log.info('Check that expiry will select another peer for download') + WTXID = 0xffaa + peer1 = self.nodes[0].add_p2p_connection(TestP2PConn()) + peer2 = self.nodes[0].add_p2p_connection(TestP2PConn()) + for p in [peer1, peer2]: + p.send_message(msg_inv([CInv(t=MSG_WTX, h=WTXID)])) + # One of the peers is asked for the tx + peer2.wait_until(lambda: sum(p.tx_getdata_count for p in [peer1, peer2]) == 1) with p2p_lock: - assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT + 1) + peer_expiry, peer_fallback = (peer1, peer2) if peer1.tx_getdata_count == 1 else (peer2, peer1) + assert_equal(peer_fallback.tx_getdata_count, 0) + self.nodes[0].setmocktime(int(time.time()) + GETDATA_TX_INTERVAL + 1) # Wait for request to peer_expiry to expire + peer_fallback.wait_until(lambda: peer_fallback.tx_getdata_count >= 1, timeout=1) + with p2p_lock: + assert_equal(peer_fallback.tx_getdata_count, 1) + self.restart_node(0) # reset mocktime - WAIT_TIME = TX_EXPIRY_INTERVAL // 2 + TX_EXPIRY_INTERVAL - self.log.info("if we wait about {} minutes, we should eventually get more requests".format(WAIT_TIME / 60)) - self.nodes[0].setmocktime(int(time.time() + WAIT_TIME)) - p.wait_until(lambda: p.tx_getdata_count == MAX_GETDATA_IN_FLIGHT + 2) - self.nodes[0].setmocktime(0) + def test_disconnect_fallback(self): + self.log.info('Check that disconnect will select another peer for download') + WTXID = 0xffbb + peer1 = self.nodes[0].add_p2p_connection(TestP2PConn()) + peer2 = self.nodes[0].add_p2p_connection(TestP2PConn()) + for p in [peer1, peer2]: + p.send_message(msg_inv([CInv(t=MSG_WTX, h=WTXID)])) + # One of the peers is asked for the tx + peer2.wait_until(lambda: sum(p.tx_getdata_count for p in [peer1, peer2]) == 1) + with p2p_lock: + peer_disconnect, peer_fallback = (peer1, peer2) if peer1.tx_getdata_count == 1 else (peer2, peer1) + assert_equal(peer_fallback.tx_getdata_count, 0) + peer_disconnect.peer_disconnect() + peer_disconnect.wait_for_disconnect() + peer_fallback.wait_until(lambda: peer_fallback.tx_getdata_count >= 1, timeout=1) + with p2p_lock: + assert_equal(peer_fallback.tx_getdata_count, 1) + + def test_notfound_fallback(self): + self.log.info('Check that notfounds will select another peer for download immediately') + WTXID = 0xffdd + peer1 = self.nodes[0].add_p2p_connection(TestP2PConn()) + peer2 = self.nodes[0].add_p2p_connection(TestP2PConn()) + for p in [peer1, peer2]: + p.send_message(msg_inv([CInv(t=MSG_WTX, h=WTXID)])) + # One of the peers is asked for the tx + peer2.wait_until(lambda: sum(p.tx_getdata_count for p in [peer1, peer2]) == 1) + with p2p_lock: + peer_notfound, peer_fallback = (peer1, peer2) if peer1.tx_getdata_count == 1 else (peer2, peer1) + assert_equal(peer_fallback.tx_getdata_count, 0) + peer_notfound.send_and_ping(msg_notfound(vec=[CInv(MSG_WTX, WTXID)])) # Send notfound, so that fallback peer is selected + peer_fallback.wait_until(lambda: peer_fallback.tx_getdata_count >= 1, timeout=1) + with p2p_lock: + assert_equal(peer_fallback.tx_getdata_count, 1) + + def test_preferred_inv(self): + self.log.info('Check that invs from preferred peers are downloaded immediately') + self.restart_node(0, extra_args=['-whitelist=noban@127.0.0.1']) + peer = self.nodes[0].add_p2p_connection(TestP2PConn()) + peer.send_message(msg_inv([CInv(t=MSG_WTX, h=0xff00ff00)])) + peer.wait_until(lambda: peer.tx_getdata_count >= 1, timeout=1) + with p2p_lock: + assert_equal(peer.tx_getdata_count, 1) + + def test_large_inv_batch(self): + self.log.info('Test how large inv batches are handled with relay permission') + self.restart_node(0, extra_args=['-whitelist=relay@127.0.0.1']) + peer = self.nodes[0].add_p2p_connection(TestP2PConn()) + peer.send_message(msg_inv([CInv(t=MSG_WTX, h=wtxid) for wtxid in range(MAX_PEER_TX_ANNOUNCEMENTS + 1)])) + peer.wait_until(lambda: peer.tx_getdata_count == MAX_PEER_TX_ANNOUNCEMENTS + 1) + + self.log.info('Test how large inv batches are handled without relay permission') + self.restart_node(0) + peer = self.nodes[0].add_p2p_connection(TestP2PConn()) + peer.send_message(msg_inv([CInv(t=MSG_WTX, h=wtxid) for wtxid in range(MAX_PEER_TX_ANNOUNCEMENTS + 1)])) + peer.wait_until(lambda: peer.tx_getdata_count == MAX_PEER_TX_ANNOUNCEMENTS) + peer.sync_with_ping() + with p2p_lock: + assert_equal(peer.tx_getdata_count, MAX_PEER_TX_ANNOUNCEMENTS) def test_spurious_notfound(self): self.log.info('Check that spurious notfound is ignored') self.nodes[0].p2ps[0].send_message(msg_notfound(vec=[CInv(MSG_TX, 1)])) def run_test(self): + # Run tests without mocktime that only need one peer-connection first, to avoid restarting the nodes + self.test_expiry_fallback() + self.test_disconnect_fallback() + self.test_notfound_fallback() + self.test_preferred_inv() + self.test_large_inv_batch() + self.test_spurious_notfound() + # Run each test against new bitcoind instances, as setting mocktimes has long-term effects on when # the next trickle relay event happens. - for test in [self.test_spurious_notfound, self.test_in_flight_max, self.test_inv_block, self.test_tx_requests]: + for test in [self.test_in_flight_max, self.test_inv_block, self.test_tx_requests]: self.stop_nodes() self.start_nodes() self.connect_nodes(1, 0) diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py index 36b434bce3..e7a05d8547 100755 --- a/test/functional/p2p_unrequested_blocks.py +++ b/test/functional/p2p_unrequested_blocks.py @@ -60,7 +60,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, - connect_nodes, ) @@ -284,7 +283,7 @@ class AcceptBlockTest(BitcoinTestFramework): test_node.wait_for_disconnect() # 9. Connect node1 to node0 and ensure it is able to sync - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) self.sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Successfully synced nodes 1 and 0") diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index 35cea85c07..f965677408 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -146,7 +146,19 @@ class BlockchainTest(BitcoinTestFramework): 'possible': True, }, }, - 'active': False} + 'active': False + }, + 'taproot': { + 'type': 'bip9', + 'bip9': { + 'status': 'active', + 'start_time': -1, + 'timeout': 9223372036854775807, + 'since': 0 + }, + 'height': 0, + 'active': True + } }) def _test_getchaintxstats(self): diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py index 7a729f7bc1..167c4671ef 100755 --- a/test/functional/rpc_fundrawtransaction.py +++ b/test/functional/rpc_fundrawtransaction.py @@ -12,7 +12,6 @@ from test_framework.util import ( assert_greater_than, assert_greater_than_or_equal, assert_raises_rpc_error, - connect_nodes, count_bytes, find_vout_for_address, ) @@ -38,10 +37,10 @@ class RawTransactionsTest(BitcoinTestFramework): def setup_network(self): self.setup_nodes() - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[1], 2) - connect_nodes(self.nodes[0], 2) - connect_nodes(self.nodes[0], 3) + self.connect_nodes(0, 1) + self.connect_nodes(1, 2) + self.connect_nodes(0, 2) + self.connect_nodes(0, 3) def run_test(self): self.log.info("Connect nodes, set fees, generate blocks, and sync") diff --git a/test/functional/rpc_getblockfilter.py b/test/functional/rpc_getblockfilter.py index 8fa36445cd..c3c3622cf9 100755 --- a/test/functional/rpc_getblockfilter.py +++ b/test/functional/rpc_getblockfilter.py @@ -7,7 +7,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_is_hex_string, assert_raises_rpc_error, - connect_nodes, disconnect_nodes ) FILTER_TYPES = ["basic"] @@ -20,7 +19,7 @@ class GetBlockFilterTest(BitcoinTestFramework): def run_test(self): # Create two chains by disconnecting nodes 0 & 1, mining, then reconnecting - disconnect_nodes(self.nodes[0], 1) + self.disconnect_nodes(0, 1) self.nodes[0].generate(3) self.nodes[1].generate(4) @@ -29,7 +28,7 @@ class GetBlockFilterTest(BitcoinTestFramework): chain0_hashes = [self.nodes[0].getblockhash(block_height) for block_height in range(4)] # Reorg node 0 to a new chain - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) self.sync_blocks() assert_equal(self.nodes[0].getblockcount(), 4) diff --git a/test/functional/rpc_getpeerinfo_deprecation.py b/test/functional/rpc_getpeerinfo_deprecation.py index 287c40ae3e..340a66e12f 100755 --- a/test/functional/rpc_getpeerinfo_deprecation.py +++ b/test/functional/rpc_getpeerinfo_deprecation.py @@ -5,7 +5,6 @@ """Test deprecation of getpeerinfo RPC fields.""" from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import connect_nodes class GetpeerinfoDeprecationTest(BitcoinTestFramework): @@ -26,7 +25,7 @@ class GetpeerinfoDeprecationTest(BitcoinTestFramework): def test_addnode_deprecation(self): self.restart_node(1, ["-deprecatedrpc=getpeerinfo_addnode"]) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) self.log.info("Test getpeerinfo by default no longer returns an addnode field") assert "addnode" not in self.nodes[0].getpeerinfo()[0].keys() diff --git a/test/functional/rpc_invalidateblock.py b/test/functional/rpc_invalidateblock.py index e788e75557..f884b8d293 100755 --- a/test/functional/rpc_invalidateblock.py +++ b/test/functional/rpc_invalidateblock.py @@ -8,7 +8,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR from test_framework.util import ( assert_equal, - connect_nodes, ) @@ -32,7 +31,7 @@ class InvalidateTest(BitcoinTestFramework): assert_equal(self.nodes[1].getblockcount(), 6) self.log.info("Connect nodes to force a reorg") - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) self.sync_blocks(self.nodes[0:2]) assert_equal(self.nodes[0].getblockcount(), 6) badhash = self.nodes[1].getblockhash(2) @@ -43,7 +42,7 @@ class InvalidateTest(BitcoinTestFramework): assert_equal(self.nodes[0].getbestblockhash(), besthash_n0) self.log.info("Make sure we won't reorg to a lower work chain:") - connect_nodes(self.nodes[1], 2) + self.connect_nodes(1, 2) self.log.info("Sync node 2 to node 1 so both have 6 blocks") self.sync_blocks(self.nodes[1:3]) assert_equal(self.nodes[2].getblockcount(), 6) diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index b8a04f494d..71e8e1b22a 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -24,7 +24,6 @@ from test_framework.util import ( assert_greater_than_or_equal, assert_greater_than, assert_raises_rpc_error, - connect_nodes, p2p_port, ) @@ -53,8 +52,9 @@ class NetTest(BitcoinTestFramework): # Get out of IBD for the minfeefilter and getpeerinfo tests. self.nodes[0].generate(101) # Connect nodes both ways. - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[1], 0) + self.connect_nodes(0, 1) + self.connect_nodes(1, 0) + self.sync_all() self.test_connection_count() self.test_getpeerinfo() @@ -117,8 +117,8 @@ class NetTest(BitcoinTestFramework): with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']): self.nodes[0].setnetworkactive(state=True) # Connect nodes both ways. - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[1], 0) + self.connect_nodes(0, 1) + self.connect_nodes(1, 0) info = self.nodes[0].getnetworkinfo() assert_equal(info['networkactive'], True) diff --git a/test/functional/rpc_preciousblock.py b/test/functional/rpc_preciousblock.py index 8386e47411..04d55b103f 100755 --- a/test/functional/rpc_preciousblock.py +++ b/test/functional/rpc_preciousblock.py @@ -7,7 +7,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - connect_nodes, ) def unidirectional_node_sync_via_rpc(node_src, node_dest): @@ -61,7 +60,7 @@ class PreciousTest(BitcoinTestFramework): self.log.info("Connect nodes and check no reorg occurs") # Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync) node_sync_via_rpc(self.nodes[0:2]) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) assert_equal(self.nodes[0].getbestblockhash(), hashC) assert_equal(self.nodes[1].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block G") @@ -98,8 +97,8 @@ class PreciousTest(BitcoinTestFramework): hashL = self.nodes[2].getbestblockhash() self.log.info("Connect nodes and check no reorg occurs") node_sync_via_rpc(self.nodes[1:3]) - connect_nodes(self.nodes[1], 2) - connect_nodes(self.nodes[0], 2) + self.connect_nodes(1, 2) + self.connect_nodes(0, 2) assert_equal(self.nodes[0].getbestblockhash(), hashH) assert_equal(self.nodes[1].getbestblockhash(), hashH) assert_equal(self.nodes[2].getbestblockhash(), hashL) diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py index 10aebc2b1d..32dc2f8644 100755 --- a/test/functional/rpc_psbt.py +++ b/test/functional/rpc_psbt.py @@ -12,8 +12,6 @@ from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, - connect_nodes, - disconnect_nodes, find_output, ) @@ -46,7 +44,7 @@ class PSBTTest(BitcoinTestFramework): # Disconnect offline node from others # Topology of test network is linear, so this one call is enough - disconnect_nodes(offline_node, 1) + self.disconnect_nodes(0, 1) # Create watchonly on online_node online_node.createwallet(wallet_name='wonline', disable_private_keys=True) @@ -80,8 +78,8 @@ class PSBTTest(BitcoinTestFramework): wonline.unloadwallet() # Reconnect - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[0], 2) + self.connect_nodes(0, 1) + self.connect_nodes(0, 2) def assert_change_type(self, psbtx, expected_type): """Assert that the given PSBT has a change output with the given type.""" diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py index d74128b42d..326495843f 100755 --- a/test/functional/rpc_rawtransaction.py +++ b/test/functional/rpc_rawtransaction.py @@ -20,7 +20,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, - connect_nodes, hex_str_to_bytes, ) @@ -60,7 +59,7 @@ class RawTransactionsTest(BitcoinTestFramework): def setup_network(self): super().setup_network() - connect_nodes(self.nodes[0], 2) + self.connect_nodes(0, 2) def run_test(self): self.log.info('prepare some coins for multiple *rawtransaction commands') diff --git a/test/functional/rpc_setban.py b/test/functional/rpc_setban.py index 1cc1fb164b..bc48449084 100755 --- a/test/functional/rpc_setban.py +++ b/test/functional/rpc_setban.py @@ -6,7 +6,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( - connect_nodes, p2p_port ) @@ -18,7 +17,7 @@ class SetBanTests(BitcoinTestFramework): def run_test(self): # Node 0 connects to Node 1, check that the noban permission is not granted - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) peerinfo = self.nodes[1].getpeerinfo()[0] assert(not 'noban' in peerinfo['permissions']) @@ -32,14 +31,14 @@ class SetBanTests(BitcoinTestFramework): # However, node 0 should be able to reconnect if it has noban permission self.restart_node(1, ['-whitelist=127.0.0.1']) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) peerinfo = self.nodes[1].getpeerinfo()[0] assert('noban' in peerinfo['permissions']) # If we remove the ban, Node 0 should be able to reconnect even without noban permission self.nodes[1].setban("127.0.0.1", "remove") self.restart_node(1, []) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) peerinfo = self.nodes[1].getpeerinfo()[0] assert(not 'noban' in peerinfo['permissions']) diff --git a/test/functional/test_framework/bip340_test_vectors.csv b/test/functional/test_framework/bip340_test_vectors.csv new file mode 100644 index 0000000000..e068322deb --- /dev/null +++ b/test/functional/test_framework/bip340_test_vectors.csv @@ -0,0 +1,16 @@ +index,secret key,public key,aux_rand,message,signature,verification result,comment +0,0000000000000000000000000000000000000000000000000000000000000003,F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9,0000000000000000000000000000000000000000000000000000000000000000,0000000000000000000000000000000000000000000000000000000000000000,E907831F80848D1069A5371B402410364BDF1C5F8307B0084C55F1CE2DCA821525F66A4A85EA8B71E482A74F382D2CE5EBEEE8FDB2172F477DF4900D310536C0,TRUE, +1,B7E151628AED2A6ABF7158809CF4F3C762E7160F38B4DA56A784D9045190CFEF,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,0000000000000000000000000000000000000000000000000000000000000001,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6896BD60EEAE296DB48A229FF71DFE071BDE413E6D43F917DC8DCF8C78DE33418906D11AC976ABCCB20B091292BFF4EA897EFCB639EA871CFA95F6DE339E4B0A,TRUE, +2,C90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B14E5C9,DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8,C87AA53824B4D7AE2EB035A2B5BBBCCC080E76CDC6D1692C4B0B62D798E6D906,7E2D58D8B3BCDF1ABADEC7829054F90DDA9805AAB56C77333024B9D0A508B75C,5831AAEED7B44BB74E5EAB94BA9D4294C49BCF2A60728D8B4C200F50DD313C1BAB745879A5AD954A72C45A91C3A51D3C7ADEA98D82F8481E0E1E03674A6F3FB7,TRUE, +3,0B432B2677937381AEF05BB02A66ECD012773062CF3FA2549E44F58ED2401710,25D1DFF95105F5253C4022F628A996AD3A0D95FBF21D468A1B33F8C160D8F517,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,7EB0509757E246F19449885651611CB965ECC1A187DD51B64FDA1EDC9637D5EC97582B9CB13DB3933705B32BA982AF5AF25FD78881EBB32771FC5922EFC66EA3,TRUE,test fails if msg is reduced modulo p or n +4,,D69C3509BB99E412E68B0FE8544E72837DFA30746D8BE2AA65975F29D22DC7B9,,4DF3C3F68FCC83B27E9D42C90431A72499F17875C81A599B566C9889B9696703,00000000000000000000003B78CE563F89A0ED9414F5AA28AD0D96D6795F9C6376AFB1548AF603B3EB45C9F8207DEE1060CB71C04E80F593060B07D28308D7F4,TRUE, +5,,EEFDEA4CDB677750A420FEE807EACF21EB9898AE79B9768766E4FAA04A2D4A34,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E17776969E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,public key not on the curve +6,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,FFF97BD5755EEEA420453A14355235D382F6472F8568A18B2F057A14602975563CC27944640AC607CD107AE10923D9EF7A73C643E166BE5EBEAFA34B1AC553E2,FALSE,has_even_y(R) is false +7,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,1FA62E331EDBC21C394792D2AB1100A7B432B013DF3F6FF4F99FCB33E0E1515F28890B3EDB6E7189B630448B515CE4F8622A954CFE545735AAEA5134FCCDB2BD,FALSE,negated message +8,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E177769961764B3AA9B2FFCB6EF947B6887A226E8D7C93E00C5ED0C1834FF0D0C2E6DA6,FALSE,negated s value +9,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,0000000000000000000000000000000000000000000000000000000000000000123DDA8328AF9C23A94C1FEECFD123BA4FB73476F0D594DCB65C6425BD186051,FALSE,sG - eP is infinite. Test fails in single verification if has_even_y(inf) is defined as true and x(inf) as 0 +10,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,00000000000000000000000000000000000000000000000000000000000000017615FBAF5AE28864013C099742DEADB4DBA87F11AC6754F93780D5A1837CF197,FALSE,sG - eP is infinite. Test fails in single verification if has_even_y(inf) is defined as true and x(inf) as 1 +11,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,4A298DACAE57395A15D0795DDBFD1DCB564DA82B0F269BC70A74F8220429BA1D69E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,sig[0:32] is not an X coordinate on the curve +12,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F69E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,sig[0:32] is equal to field size +13,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E177769FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141,FALSE,sig[32:64] is equal to curve order +14,,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E17776969E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,public key is not a valid X coordinate because it exceeds the field size diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index afc1995009..0859380d06 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -4,6 +4,10 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utilities for manipulating blocks and transactions.""" +from binascii import a2b_hex +import io +import struct +import time import unittest from .address import ( @@ -43,7 +47,9 @@ from .script import ( from .util import assert_equal from io import BytesIO +WITNESS_SCALE_FACTOR = 4 MAX_BLOCK_SIGOPS = 20000 +MAX_BLOCK_SIGOPS_WEIGHT = MAX_BLOCK_SIGOPS * WITNESS_SCALE_FACTOR # Genesis block time (regtest) TIME_GENESIS_BLOCK = 1296688602 @@ -51,19 +57,31 @@ TIME_GENESIS_BLOCK = 1296688602 # From BIP141 WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed" +NORMAL_GBT_REQUEST_PARAMS = {"rules": ["segwit"]} -def create_block(hashprev, coinbase, ntime=None, *, version=1): + +def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl=None, txlist=None): """Create a block (with regtest difficulty).""" block = CBlock() - block.nVersion = version - if ntime is None: - import time - block.nTime = int(time.time() + 600) + if tmpl is None: + tmpl = {} + block.nVersion = version or tmpl.get('version') or 1 + block.nTime = ntime or tmpl.get('curtime') or int(time.time() + 600) + block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10) + if tmpl and not tmpl.get('bits') is None: + block.nBits = struct.unpack('>I', a2b_hex(tmpl['bits']))[0] else: - block.nTime = ntime - block.hashPrevBlock = hashprev - block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams + block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams + if coinbase is None: + coinbase = create_coinbase(height=tmpl['height']) block.vtx.append(coinbase) + if txlist: + for tx in txlist: + if not hasattr(tx, 'calc_sha256'): + txo = CTransaction() + txo.deserialize(io.BytesIO(tx)) + tx = txo + block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() return block @@ -101,22 +119,31 @@ def script_BIP34_coinbase_height(height): return CScript([CScriptNum(height)]) -def create_coinbase(height, pubkey=None): - """Create a coinbase transaction, assuming no miner fees. +def create_coinbase(height, pubkey=None, extra_output_script=None, fees=0): + """Create a coinbase transaction. If pubkey is passed in, the coinbase output will be a P2PK output; - otherwise an anyone-can-spend output.""" + otherwise an anyone-can-spend output. + + If extra_output_script is given, make a 0-value output to that + script. This is useful to pad block weight/sigops as needed. """ coinbase = CTransaction() coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), 0xffffffff)) coinbaseoutput = CTxOut() coinbaseoutput.nValue = 50 * COIN halvings = int(height / 150) # regtest coinbaseoutput.nValue >>= halvings - if (pubkey is not None): + coinbaseoutput.nValue += fees + if pubkey is not None: coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG]) else: coinbaseoutput.scriptPubKey = CScript([OP_TRUE]) coinbase.vout = [coinbaseoutput] + if extra_output_script is not None: + coinbaseoutput2 = CTxOut() + coinbaseoutput2.nValue = 0 + coinbaseoutput2.scriptPubKey = extra_output_script + coinbase.vout.append(coinbaseoutput2) coinbase.calc_sha256() return coinbase diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py index adbffb7dc7..a6bc187985 100644 --- a/test/functional/test_framework/key.py +++ b/test/functional/test_framework/key.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019 Pieter Wuille +# Copyright (c) 2019-2020 Pieter Wuille # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test-only secp256k1 elliptic curve implementation @@ -6,10 +6,24 @@ WARNING: This code is slow, uses bad randomness, does not properly protect keys, and is trivially vulnerable to side channel attacks. Do not use for anything but tests.""" +import csv +import hashlib +import os import random +import sys +import unittest from .util import modinv +def TaggedHash(tag, data): + ss = hashlib.sha256(tag.encode('utf-8')).digest() + ss += ss + ss += data + return hashlib.sha256(ss).digest() + +def xor_bytes(b0, b1): + return bytes(x ^ y for (x, y) in zip(b0, b1)) + def jacobi_symbol(n, k): """Compute the Jacobi symbol of n modulo k @@ -68,6 +82,10 @@ class EllipticCurve: inv_3 = (inv_2 * inv) % self.p return ((inv_2 * x1) % self.p, (inv_3 * y1) % self.p, 1) + def has_even_y(self, p1): + """Whether the point p1 has an even Y coordinate when expressed in affine coordinates.""" + return not (p1[2] == 0 or self.affine(p1)[1] & 1) + def negate(self, p1): """Negate a Jacobian point tuple p1.""" x1, y1, z1 = p1 @@ -86,13 +104,13 @@ class EllipticCurve: return jacobi_symbol(x_3 + self.a * x + self.b, self.p) != -1 def lift_x(self, x): - """Given an X coordinate on the curve, return a corresponding affine point.""" + """Given an X coordinate on the curve, return a corresponding affine point for which the Y coordinate is even.""" x_3 = pow(x, 3, self.p) v = x_3 + self.a * x + self.b y = modsqrt(v, self.p) if y is None: return None - return (x, y, 1) + return (x, self.p - y if y & 1 else y, 1) def double(self, p1): """Double a Jacobian tuple p1 @@ -197,7 +215,8 @@ class EllipticCurve: r = self.add(r, p) return r -SECP256K1 = EllipticCurve(2**256 - 2**32 - 977, 0, 7) +SECP256K1_FIELD_SIZE = 2**256 - 2**32 - 977 +SECP256K1 = EllipticCurve(SECP256K1_FIELD_SIZE, 0, 7) SECP256K1_G = (0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8, 1) SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2 @@ -221,9 +240,9 @@ class ECPubKey(): x = int.from_bytes(data[1:33], 'big') if SECP256K1.is_x_coord(x): p = SECP256K1.lift_x(x) - # if the oddness of the y co-ord isn't correct, find the other - # valid y - if (p[1] & 1) != (data[0] & 1): + # Make the Y coordinate odd if required (lift_x always produces + # a point with an even Y coordinate). + if data[0] & 1: p = SECP256K1.negate(p) self.p = p self.valid = True @@ -307,6 +326,10 @@ class ECPubKey(): return False return True +def generate_privkey(): + """Generate a valid random 32-byte private key.""" + return random.randrange(1, SECP256K1_ORDER).to_bytes(32, 'big') + class ECKey(): """A secp256k1 private key""" @@ -324,7 +347,7 @@ class ECKey(): def generate(self, compressed=True): """Generate a random private key (compressed or uncompressed).""" - self.set(random.randrange(1, SECP256K1_ORDER).to_bytes(32, 'big'), compressed) + self.set(generate_privkey(), compressed) def get_bytes(self): """Retrieve the 32-byte representation of this key.""" @@ -369,3 +392,161 @@ class ECKey(): rb = r.to_bytes((r.bit_length() + 8) // 8, 'big') sb = s.to_bytes((s.bit_length() + 8) // 8, 'big') return b'\x30' + bytes([4 + len(rb) + len(sb), 2, len(rb)]) + rb + bytes([2, len(sb)]) + sb + +def compute_xonly_pubkey(key): + """Compute an x-only (32 byte) public key from a (32 byte) private key. + + This also returns whether the resulting public key was negated. + """ + + assert len(key) == 32 + x = int.from_bytes(key, 'big') + if x == 0 or x >= SECP256K1_ORDER: + return (None, None) + P = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, x)])) + return (P[0].to_bytes(32, 'big'), not SECP256K1.has_even_y(P)) + +def tweak_add_privkey(key, tweak): + """Tweak a private key (after negating it if needed).""" + + assert len(key) == 32 + assert len(tweak) == 32 + + x = int.from_bytes(key, 'big') + if x == 0 or x >= SECP256K1_ORDER: + return None + if not SECP256K1.has_even_y(SECP256K1.mul([(SECP256K1_G, x)])): + x = SECP256K1_ORDER - x + t = int.from_bytes(tweak, 'big') + if t >= SECP256K1_ORDER: + return None + x = (x + t) % SECP256K1_ORDER + if x == 0: + return None + return x.to_bytes(32, 'big') + +def tweak_add_pubkey(key, tweak): + """Tweak a public key and return whether the result had to be negated.""" + + assert len(key) == 32 + assert len(tweak) == 32 + + x_coord = int.from_bytes(key, 'big') + if x_coord >= SECP256K1_FIELD_SIZE: + return None + P = SECP256K1.lift_x(x_coord) + if P is None: + return None + t = int.from_bytes(tweak, 'big') + if t >= SECP256K1_ORDER: + return None + Q = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, t), (P, 1)])) + if Q is None: + return None + return (Q[0].to_bytes(32, 'big'), not SECP256K1.has_even_y(Q)) + +def verify_schnorr(key, sig, msg): + """Verify a Schnorr signature (see BIP 340). + + - key is a 32-byte xonly pubkey (computed using compute_xonly_pubkey). + - sig is a 64-byte Schnorr signature + - msg is a 32-byte message + """ + assert len(key) == 32 + assert len(msg) == 32 + assert len(sig) == 64 + + x_coord = int.from_bytes(key, 'big') + if x_coord == 0 or x_coord >= SECP256K1_FIELD_SIZE: + return False + P = SECP256K1.lift_x(x_coord) + if P is None: + return False + r = int.from_bytes(sig[0:32], 'big') + if r >= SECP256K1_FIELD_SIZE: + return False + s = int.from_bytes(sig[32:64], 'big') + if s >= SECP256K1_ORDER: + return False + e = int.from_bytes(TaggedHash("BIP0340/challenge", sig[0:32] + key + msg), 'big') % SECP256K1_ORDER + R = SECP256K1.mul([(SECP256K1_G, s), (P, SECP256K1_ORDER - e)]) + if not SECP256K1.has_even_y(R): + return False + if ((r * R[2] * R[2]) % SECP256K1_FIELD_SIZE) != R[0]: + return False + return True + +def sign_schnorr(key, msg, aux=None, flip_p=False, flip_r=False): + """Create a Schnorr signature (see BIP 340).""" + + if aux is None: + aux = bytes(32) + + assert len(key) == 32 + assert len(msg) == 32 + assert len(aux) == 32 + + sec = int.from_bytes(key, 'big') + if sec == 0 or sec >= SECP256K1_ORDER: + return None + P = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, sec)])) + if SECP256K1.has_even_y(P) == flip_p: + sec = SECP256K1_ORDER - sec + t = (sec ^ int.from_bytes(TaggedHash("BIP0340/aux", aux), 'big')).to_bytes(32, 'big') + kp = int.from_bytes(TaggedHash("BIP0340/nonce", t + P[0].to_bytes(32, 'big') + msg), 'big') % SECP256K1_ORDER + assert kp != 0 + R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, kp)])) + k = kp if SECP256K1.has_even_y(R) != flip_r else SECP256K1_ORDER - kp + e = int.from_bytes(TaggedHash("BIP0340/challenge", R[0].to_bytes(32, 'big') + P[0].to_bytes(32, 'big') + msg), 'big') % SECP256K1_ORDER + return R[0].to_bytes(32, 'big') + ((k + e * sec) % SECP256K1_ORDER).to_bytes(32, 'big') + +class TestFrameworkKey(unittest.TestCase): + def test_schnorr(self): + """Test the Python Schnorr implementation.""" + byte_arrays = [generate_privkey() for _ in range(3)] + [v.to_bytes(32, 'big') for v in [0, SECP256K1_ORDER - 1, SECP256K1_ORDER, 2**256 - 1]] + keys = {} + for privkey in byte_arrays: # build array of key/pubkey pairs + pubkey, _ = compute_xonly_pubkey(privkey) + if pubkey is not None: + keys[privkey] = pubkey + for msg in byte_arrays: # test every combination of message, signing key, verification key + for sign_privkey, sign_pubkey in keys.items(): + sig = sign_schnorr(sign_privkey, msg) + for verify_privkey, verify_pubkey in keys.items(): + if verify_privkey == sign_privkey: + self.assertTrue(verify_schnorr(verify_pubkey, sig, msg)) + sig = list(sig) + sig[random.randrange(64)] ^= (1 << (random.randrange(8))) # damaging signature should break things + sig = bytes(sig) + self.assertFalse(verify_schnorr(verify_pubkey, sig, msg)) + + def test_schnorr_testvectors(self): + """Implement the BIP340 test vectors (read from bip340_test_vectors.csv).""" + num_tests = 0 + with open(os.path.join(sys.path[0], 'test_framework', 'bip340_test_vectors.csv'), newline='', encoding='utf8') as csvfile: + reader = csv.reader(csvfile) + next(reader) + for row in reader: + (i_str, seckey_hex, pubkey_hex, aux_rand_hex, msg_hex, sig_hex, result_str, comment) = row + i = int(i_str) + pubkey = bytes.fromhex(pubkey_hex) + msg = bytes.fromhex(msg_hex) + sig = bytes.fromhex(sig_hex) + result = result_str == 'TRUE' + if seckey_hex != '': + seckey = bytes.fromhex(seckey_hex) + pubkey_actual = compute_xonly_pubkey(seckey)[0] + self.assertEqual(pubkey.hex(), pubkey_actual.hex(), "BIP340 test vector %i (%s): pubkey mismatch" % (i, comment)) + aux_rand = bytes.fromhex(aux_rand_hex) + try: + sig_actual = sign_schnorr(seckey, msg, aux_rand) + self.assertEqual(sig.hex(), sig_actual.hex(), "BIP340 test vector %i (%s): sig mismatch" % (i, comment)) + except RuntimeError as e: + self.fail("BIP340 test vector %i (%s): signing raised exception %s" % (i, comment, e)) + result_actual = verify_schnorr(pubkey, sig, msg) + if result: + self.assertEqual(result, result_actual, "BIP340 test vector %i (%s): verification failed" % (i, comment)) + else: + self.assertEqual(result, result_actual, "BIP340 test vector %i (%s): verification succeeded unexpectedly" % (i, comment)) + num_tests += 1 + self.assertTrue(num_tests >= 15) # expect at least 15 test vectors diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index 5e35ba0fce..8e5848d493 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -6,11 +6,15 @@ This file is modified from python-bitcoinlib. """ + +from collections import namedtuple import hashlib import struct import unittest from typing import List, Dict +from .key import TaggedHash, tweak_add_pubkey + from .messages import ( CTransaction, CTxOut, @@ -22,8 +26,13 @@ from .messages import ( ) MAX_SCRIPT_ELEMENT_SIZE = 520 +LOCKTIME_THRESHOLD = 500000000 +ANNEX_TAG = 0x50 + OPCODE_NAMES = {} # type: Dict[CScriptOp, str] +LEAF_VERSION_TAPSCRIPT = 0xc0 + def hash160(s): return hashlib.new('ripemd160', sha256(s)).digest() @@ -239,11 +248,8 @@ OP_NOP8 = CScriptOp(0xb7) OP_NOP9 = CScriptOp(0xb8) OP_NOP10 = CScriptOp(0xb9) -# template matching params -OP_SMALLINTEGER = CScriptOp(0xfa) -OP_PUBKEYS = CScriptOp(0xfb) -OP_PUBKEYHASH = CScriptOp(0xfd) -OP_PUBKEY = CScriptOp(0xfe) +# BIP 342 opcodes (Tapscript) +OP_CHECKSIGADD = CScriptOp(0xba) OP_INVALIDOPCODE = CScriptOp(0xff) @@ -359,10 +365,7 @@ OPCODE_NAMES.update({ OP_NOP8: 'OP_NOP8', OP_NOP9: 'OP_NOP9', OP_NOP10: 'OP_NOP10', - OP_SMALLINTEGER: 'OP_SMALLINTEGER', - OP_PUBKEYS: 'OP_PUBKEYS', - OP_PUBKEYHASH: 'OP_PUBKEYHASH', - OP_PUBKEY: 'OP_PUBKEY', + OP_CHECKSIGADD: 'OP_CHECKSIGADD', OP_INVALIDOPCODE: 'OP_INVALIDOPCODE', }) @@ -593,6 +596,7 @@ class CScript(bytes): return n +SIGHASH_DEFAULT = 0 # Taproot-only default, semantics same as SIGHASH_ALL SIGHASH_ALL = 1 SIGHASH_NONE = 2 SIGHASH_SINGLE = 3 @@ -615,7 +619,6 @@ def FindAndDelete(script, sig): r += script[last_sop_idx:] return CScript(r) - def LegacySignatureHash(script, txTo, inIdx, hashtype): """Consensus-correct SignatureHash @@ -738,3 +741,113 @@ class TestFrameworkScript(unittest.TestCase): values = [0, 1, -1, -2, 127, 128, -255, 256, (1 << 15) - 1, -(1 << 16), (1 << 24) - 1, (1 << 31), 1 - (1 << 32), 1 << 40, 1500, -1500] for value in values: self.assertEqual(CScriptNum.decode(CScriptNum.encode(CScriptNum(value))), value) + +def TaprootSignatureHash(txTo, spent_utxos, hash_type, input_index = 0, scriptpath = False, script = CScript(), codeseparator_pos = -1, annex = None, leaf_ver = LEAF_VERSION_TAPSCRIPT): + assert (len(txTo.vin) == len(spent_utxos)) + assert (input_index < len(txTo.vin)) + out_type = SIGHASH_ALL if hash_type == 0 else hash_type & 3 + in_type = hash_type & SIGHASH_ANYONECANPAY + spk = spent_utxos[input_index].scriptPubKey + ss = bytes([0, hash_type]) # epoch, hash_type + ss += struct.pack("<i", txTo.nVersion) + ss += struct.pack("<I", txTo.nLockTime) + if in_type != SIGHASH_ANYONECANPAY: + ss += sha256(b"".join(i.prevout.serialize() for i in txTo.vin)) + ss += sha256(b"".join(struct.pack("<q", u.nValue) for u in spent_utxos)) + ss += sha256(b"".join(ser_string(u.scriptPubKey) for u in spent_utxos)) + ss += sha256(b"".join(struct.pack("<I", i.nSequence) for i in txTo.vin)) + if out_type == SIGHASH_ALL: + ss += sha256(b"".join(o.serialize() for o in txTo.vout)) + spend_type = 0 + if annex is not None: + spend_type |= 1 + if (scriptpath): + spend_type |= 2 + ss += bytes([spend_type]) + if in_type == SIGHASH_ANYONECANPAY: + ss += txTo.vin[input_index].prevout.serialize() + ss += struct.pack("<q", spent_utxos[input_index].nValue) + ss += ser_string(spk) + ss += struct.pack("<I", txTo.vin[input_index].nSequence) + else: + ss += struct.pack("<I", input_index) + if (spend_type & 1): + ss += sha256(ser_string(annex)) + if out_type == SIGHASH_SINGLE: + if input_index < len(txTo.vout): + ss += sha256(txTo.vout[input_index].serialize()) + else: + ss += bytes(0 for _ in range(32)) + if (scriptpath): + ss += TaggedHash("TapLeaf", bytes([leaf_ver]) + ser_string(script)) + ss += bytes([0]) + ss += struct.pack("<i", codeseparator_pos) + assert len(ss) == 175 - (in_type == SIGHASH_ANYONECANPAY) * 49 - (out_type != SIGHASH_ALL and out_type != SIGHASH_SINGLE) * 32 + (annex is not None) * 32 + scriptpath * 37 + return TaggedHash("TapSighash", ss) + +def taproot_tree_helper(scripts): + if len(scripts) == 0: + return ([], bytes(0 for _ in range(32))) + if len(scripts) == 1: + # One entry: treat as a leaf + script = scripts[0] + assert(not callable(script)) + if isinstance(script, list): + return taproot_tree_helper(script) + assert(isinstance(script, tuple)) + version = LEAF_VERSION_TAPSCRIPT + name = script[0] + code = script[1] + if len(script) == 3: + version = script[2] + assert version & 1 == 0 + assert isinstance(code, bytes) + h = TaggedHash("TapLeaf", bytes([version]) + ser_string(code)) + if name is None: + return ([], h) + return ([(name, version, code, bytes())], h) + elif len(scripts) == 2 and callable(scripts[1]): + # Two entries, and the right one is a function + left, left_h = taproot_tree_helper(scripts[0:1]) + right_h = scripts[1](left_h) + left = [(name, version, script, control + right_h) for name, version, script, control in left] + right = [] + else: + # Two or more entries: descend into each side + split_pos = len(scripts) // 2 + left, left_h = taproot_tree_helper(scripts[0:split_pos]) + right, right_h = taproot_tree_helper(scripts[split_pos:]) + left = [(name, version, script, control + right_h) for name, version, script, control in left] + right = [(name, version, script, control + left_h) for name, version, script, control in right] + if right_h < left_h: + right_h, left_h = left_h, right_h + h = TaggedHash("TapBranch", left_h + right_h) + return (left + right, h) + +TaprootInfo = namedtuple("TaprootInfo", "scriptPubKey,inner_pubkey,negflag,tweak,leaves") +TaprootLeafInfo = namedtuple("TaprootLeafInfo", "script,version,merklebranch") + +def taproot_construct(pubkey, scripts=None): + """Construct a tree of Taproot spending conditions + + pubkey: an ECPubKey object for the internal pubkey + scripts: a list of items; each item is either: + - a (name, CScript) tuple + - a (name, CScript, leaf version) tuple + - another list of items (with the same structure) + - a function, which specifies how to compute the hashing partner + in function of the hash of whatever it is combined with + + Returns: script (sPK or redeemScript), tweak, {name:(script, leaf version, negation flag, innerkey, merklepath), ...} + """ + if scripts is None: + scripts = [] + + ret, h = taproot_tree_helper(scripts) + tweak = TaggedHash("TapTweak", pubkey + h) + tweaked, negated = tweak_add_pubkey(pubkey, tweak) + leaves = dict((name, TaprootLeafInfo(script, version, merklebranch)) for name, version, script, merklebranch in ret) + return TaprootInfo(CScript([OP_1, tweaked]), pubkey, negated + 0, tweak, leaves) + +def is_op_success(o): + return o == 0x50 or o == 0x62 or o == 0x89 or o == 0x8a or o == 0x8d or o == 0x8e or (o >= 0x7e and o <= 0x81) or (o >= 0x83 and o <= 0x86) or (o >= 0x95 and o <= 0x99) or (o >= 0xbb and o <= 0xfe) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 2824d80434..20f608a9cf 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -27,10 +27,9 @@ from .util import ( PortSeed, assert_equal, check_json_precision, - connect_nodes, - disconnect_nodes, get_datadir_path, initialize_datadir, + p2p_port, wait_until_helper, ) @@ -103,7 +102,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): self.supports_cli = True self.bind_to_localhost_only = True self.parse_args() - self.default_wallet_name = "" + self.default_wallet_name = "default_wallet" if self.options.descriptors else "" self.wallet_data_filename = "wallet.dat" # Optional list of wallet names that can be set in set_test_params to # create and import keys to. If unset, default is len(nodes) * @@ -529,10 +528,49 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): self.nodes[i].process.wait(timeout) def connect_nodes(self, a, b): - connect_nodes(self.nodes[a], b) + def connect_nodes_helper(from_connection, node_num): + ip_port = "127.0.0.1:" + str(p2p_port(node_num)) + from_connection.addnode(ip_port, "onetry") + # poll until version handshake complete to avoid race conditions + # with transaction relaying + # See comments in net_processing: + # * Must have a version message before anything else + # * Must have a verack message before anything else + wait_until_helper(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo())) + wait_until_helper(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo())) + + connect_nodes_helper(self.nodes[a], b) def disconnect_nodes(self, a, b): - disconnect_nodes(self.nodes[a], b) + def disconnect_nodes_helper(from_connection, node_num): + def get_peer_ids(): + result = [] + for peer in from_connection.getpeerinfo(): + if "testnode{}".format(node_num) in peer['subver']: + result.append(peer['id']) + return result + + peer_ids = get_peer_ids() + if not peer_ids: + self.log.warning("disconnect_nodes: {} and {} were not connected".format( + from_connection.index, + node_num, + )) + return + for peer_id in peer_ids: + try: + from_connection.disconnectnode(nodeid=peer_id) + except JSONRPCException as e: + # If this node is disconnected between calculating the peer id + # and issuing the disconnect, don't worry about it. + # This avoids a race condition if we're mass-disconnecting peers. + if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED + raise + + # wait to disconnect + wait_until_helper(lambda: not get_peer_ids(), timeout=5) + + disconnect_nodes_helper(self.nodes[a], b) def split_network(self): """ diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index af7f0b62f4..3356f1ab10 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -12,7 +12,6 @@ import inspect import json import logging import os -import random import re import time import unittest @@ -412,47 +411,6 @@ def set_node_times(nodes, t): node.setmocktime(t) -def disconnect_nodes(from_connection, node_num): - def get_peer_ids(): - result = [] - for peer in from_connection.getpeerinfo(): - if "testnode{}".format(node_num) in peer['subver']: - result.append(peer['id']) - return result - - peer_ids = get_peer_ids() - if not peer_ids: - logger.warning("disconnect_nodes: {} and {} were not connected".format( - from_connection.index, - node_num, - )) - return - for peer_id in peer_ids: - try: - from_connection.disconnectnode(nodeid=peer_id) - except JSONRPCException as e: - # If this node is disconnected between calculating the peer id - # and issuing the disconnect, don't worry about it. - # This avoids a race condition if we're mass-disconnecting peers. - if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED - raise - - # wait to disconnect - wait_until_helper(lambda: not get_peer_ids(), timeout=5) - - -def connect_nodes(from_connection, node_num): - ip_port = "127.0.0.1:" + str(p2p_port(node_num)) - from_connection.addnode(ip_port, "onetry") - # poll until version handshake complete to avoid race conditions - # with transaction relaying - # See comments in net_processing: - # * Must have a version message before anything else - # * Must have a verack message before anything else - wait_until_helper(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo())) - wait_until_helper(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo())) - - # Transaction/Block functions ############################# @@ -469,62 +427,6 @@ def find_output(node, txid, amount, *, blockhash=None): raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount))) -def gather_inputs(from_node, amount_needed, confirmations_required=1): - """ - Return a random set of unspent txouts that are enough to pay amount_needed - """ - assert confirmations_required >= 0 - utxo = from_node.listunspent(confirmations_required) - random.shuffle(utxo) - inputs = [] - total_in = Decimal("0.00000000") - while total_in < amount_needed and len(utxo) > 0: - t = utxo.pop() - total_in += t["amount"] - inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]}) - if total_in < amount_needed: - raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in)) - return (total_in, inputs) - - -def make_change(from_node, amount_in, amount_out, fee): - """ - Create change output(s), return them - """ - outputs = {} - amount = amount_out + fee - change = amount_in - amount - if change > amount * 2: - # Create an extra change output to break up big inputs - change_address = from_node.getnewaddress() - # Split change in two, being careful of rounding: - outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) - change = amount_in - amount - outputs[change_address] - if change > 0: - outputs[from_node.getnewaddress()] = change - return outputs - - -def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): - """ - Create a random transaction. - Returns (txid, hex-encoded-transaction-data, fee) - """ - from_node = random.choice(nodes) - to_node = random.choice(nodes) - fee = min_fee + fee_increment * random.randint(0, fee_variants) - - (total_in, inputs) = gather_inputs(from_node, amount + fee) - outputs = make_change(from_node, total_in, amount, fee) - outputs[to_node.getnewaddress()] = float(amount) - - rawtx = from_node.createrawtransaction(inputs, outputs) - signresult = from_node.signrawtransactionwithwallet(rawtx) - txid = from_node.sendrawtransaction(signresult["hex"], 0) - - return (txid, signresult["hex"], fee) - - # Helper to create at least "count" utxos # Pass in a fee that is sufficient for relay and mining new transactions. def create_confirmed_utxos(fee, node, count): diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 2e757d7090..8cd82649b6 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -70,6 +70,7 @@ TEST_FRAMEWORK_MODULES = [ "address", "blocktools", "muhash", + "key", "script", "segwit_addr", "util", @@ -88,6 +89,7 @@ BASE_SCRIPTS = [ 'wallet_hd.py', 'wallet_hd.py --descriptors', 'wallet_backup.py', + 'wallet_backup.py --descriptors', # vv Tests less than 5m vv 'mining_getblocktemplate_longpoll.py', 'feature_maxuploadtarget.py', @@ -106,6 +108,7 @@ BASE_SCRIPTS = [ 'mempool_updatefromblock.py', 'wallet_dump.py', 'wallet_listtransactions.py', + 'feature_taproot.py', # vv Tests less than 60s vv 'p2p_sendheaders.py', 'wallet_importmulti.py', @@ -140,6 +143,7 @@ BASE_SCRIPTS = [ 'mempool_reorg.py', 'mempool_persist.py', 'wallet_multiwallet.py', + 'wallet_multiwallet.py --descriptors', 'wallet_multiwallet.py --usecli', 'wallet_createwallet.py', 'wallet_createwallet.py --usecli', diff --git a/test/functional/tool_wallet.py b/test/functional/tool_wallet.py index 3f25c58851..958341c691 100755 --- a/test/functional/tool_wallet.py +++ b/test/functional/tool_wallet.py @@ -98,6 +98,10 @@ class ToolWalletTest(BitcoinTestFramework): out = textwrap.dedent('''\ Wallet info =========== + Name: \ + + Format: bdb + Descriptors: no Encrypted: no HD (hd seed available): yes Keypool Size: 2 @@ -137,6 +141,10 @@ class ToolWalletTest(BitcoinTestFramework): out = textwrap.dedent('''\ Wallet info =========== + Name: \ + + Format: bdb + Descriptors: no Encrypted: no HD (hd seed available): yes Keypool Size: 2 @@ -164,6 +172,9 @@ class ToolWalletTest(BitcoinTestFramework): Topping up keypool... Wallet info =========== + Name: foo + Format: bdb + Descriptors: no Encrypted: no HD (hd seed available): yes Keypool Size: 2000 diff --git a/test/functional/wallet_abandonconflict.py b/test/functional/wallet_abandonconflict.py index 8837e13005..2e0edcfa38 100755 --- a/test/functional/wallet_abandonconflict.py +++ b/test/functional/wallet_abandonconflict.py @@ -16,8 +16,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, - connect_nodes, - disconnect_nodes, ) @@ -50,7 +48,7 @@ class AbandonConflictTest(BitcoinTestFramework): balance = newbalance # Disconnect nodes so node0's transactions don't get into node1's mempool - disconnect_nodes(self.nodes[0], 1) + self.disconnect_nodes(0, 1) # Identify the 10btc outputs nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txA)["details"] if tx_out["amount"] == Decimal("10")) @@ -161,7 +159,7 @@ class AbandonConflictTest(BitcoinTestFramework): self.nodes[1].sendrawtransaction(signed["hex"]) self.nodes[1].generate(1) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) self.sync_blocks() # Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted diff --git a/test/functional/wallet_address_types.py b/test/functional/wallet_address_types.py index bba0b8974d..6b3276e404 100755 --- a/test/functional/wallet_address_types.py +++ b/test/functional/wallet_address_types.py @@ -62,7 +62,6 @@ from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, - connect_nodes, ) class AddressTypeTest(BitcoinTestFramework): @@ -90,7 +89,7 @@ class AddressTypeTest(BitcoinTestFramework): # Fully mesh-connect nodes for faster mempool sync for i, j in itertools.product(range(self.num_nodes), repeat=2): if i > j: - connect_nodes(self.nodes[i], j) + self.connect_nodes(i, j) self.sync_all() def get_balances(self, key='trusted'): diff --git a/test/functional/wallet_avoidreuse.py b/test/functional/wallet_avoidreuse.py index eddd938847..229c134a4b 100755 --- a/test/functional/wallet_avoidreuse.py +++ b/test/functional/wallet_avoidreuse.py @@ -9,7 +9,6 @@ from test_framework.util import ( assert_approx, assert_equal, assert_raises_rpc_error, - connect_nodes, ) def reset_balance(node, discardaddr): @@ -111,7 +110,7 @@ class AvoidReuseTest(BitcoinTestFramework): assert_equal(self.nodes[1].getwalletinfo()["avoid_reuse"], True) self.restart_node(1) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) # Flags should still be node1.avoid_reuse=false, node2.avoid_reuse=true assert_equal(self.nodes[0].getwalletinfo()["avoid_reuse"], False) diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py index 36049dcb45..c7bd2ea02b 100755 --- a/test/functional/wallet_backup.py +++ b/test/functional/wallet_backup.py @@ -39,7 +39,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, - connect_nodes, ) @@ -62,10 +61,10 @@ class WalletBackupTest(BitcoinTestFramework): def setup_network(self): self.setup_nodes() - connect_nodes(self.nodes[0], 3) - connect_nodes(self.nodes[1], 3) - connect_nodes(self.nodes[2], 3) - connect_nodes(self.nodes[2], 0) + self.connect_nodes(0, 3) + self.connect_nodes(1, 3) + self.connect_nodes(2, 3) + self.connect_nodes(2, 0) self.sync_all() def one_send(self, from_node, to_address): @@ -96,10 +95,10 @@ class WalletBackupTest(BitcoinTestFramework): self.start_node(0) self.start_node(1) self.start_node(2) - connect_nodes(self.nodes[0], 3) - connect_nodes(self.nodes[1], 3) - connect_nodes(self.nodes[2], 3) - connect_nodes(self.nodes[2], 0) + self.connect_nodes(0, 3) + self.connect_nodes(1, 3) + self.connect_nodes(2, 3) + self.connect_nodes(2, 0) def stop_three(self): self.stop_node(0) @@ -135,11 +134,13 @@ class WalletBackupTest(BitcoinTestFramework): self.log.info("Backing up") self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak')) - self.nodes[0].dumpwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump')) self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, 'wallet.bak')) - self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump')) self.nodes[2].backupwallet(os.path.join(self.nodes[2].datadir, 'wallet.bak')) - self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump')) + + if not self.options.descriptors: + self.nodes[0].dumpwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump')) + self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump')) + self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump')) self.log.info("More transactions") for _ in range(5): @@ -183,29 +184,30 @@ class WalletBackupTest(BitcoinTestFramework): assert_equal(self.nodes[1].getbalance(), balance1) assert_equal(self.nodes[2].getbalance(), balance2) - self.log.info("Restoring using dumped wallet") - self.stop_three() - self.erase_three() + if not self.options.descriptors: + self.log.info("Restoring using dumped wallet") + self.stop_three() + self.erase_three() - #start node2 with no chain - shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks')) - shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate')) + #start node2 with no chain + shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks')) + shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate')) - self.start_three() + self.start_three() - assert_equal(self.nodes[0].getbalance(), 0) - assert_equal(self.nodes[1].getbalance(), 0) - assert_equal(self.nodes[2].getbalance(), 0) + assert_equal(self.nodes[0].getbalance(), 0) + assert_equal(self.nodes[1].getbalance(), 0) + assert_equal(self.nodes[2].getbalance(), 0) - self.nodes[0].importwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump')) - self.nodes[1].importwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump')) - self.nodes[2].importwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump')) + self.nodes[0].importwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump')) + self.nodes[1].importwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump')) + self.nodes[2].importwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump')) - self.sync_blocks() + self.sync_blocks() - assert_equal(self.nodes[0].getbalance(), balance0) - assert_equal(self.nodes[1].getbalance(), balance1) - assert_equal(self.nodes[2].getbalance(), balance2) + assert_equal(self.nodes[0].getbalance(), balance0) + assert_equal(self.nodes[1].getbalance(), balance1) + assert_equal(self.nodes[2].getbalance(), balance2) # Backup to source wallet file must fail sourcePaths = [ diff --git a/test/functional/wallet_balance.py b/test/functional/wallet_balance.py index e4989b4fea..589fab9992 100755 --- a/test/functional/wallet_balance.py +++ b/test/functional/wallet_balance.py @@ -11,7 +11,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, - connect_nodes, ) @@ -262,7 +261,7 @@ class WalletTest(BitcoinTestFramework): # Now confirm tx_orig self.restart_node(1, ['-persistmempool=0']) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) self.sync_blocks() self.nodes[1].sendrawtransaction(tx_orig) self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY) diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py index 689a0fa4df..d386d94e0c 100755 --- a/test/functional/wallet_basic.py +++ b/test/functional/wallet_basic.py @@ -11,7 +11,6 @@ from test_framework.util import ( assert_equal, assert_fee_amount, assert_raises_rpc_error, - connect_nodes, ) from test_framework.wallet_util import test_address @@ -32,9 +31,9 @@ class WalletTest(BitcoinTestFramework): self.setup_nodes() # Only need nodes 0-2 running at start of test self.stop_node(3) - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[1], 2) - connect_nodes(self.nodes[0], 2) + self.connect_nodes(0, 1) + self.connect_nodes(1, 2) + self.connect_nodes(0, 2) self.sync_all(self.nodes[0:3]) def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size): @@ -281,7 +280,7 @@ class WalletTest(BitcoinTestFramework): assert_equal(self.nodes[0].getbalance(), node_0_bal) self.start_node(3, self.nodes[3].extra_args) - connect_nodes(self.nodes[0], 3) + self.connect_nodes(0, 3) self.sync_all() # check if we can list zero value tx as available coins @@ -316,9 +315,9 @@ class WalletTest(BitcoinTestFramework): self.start_node(0, ["-walletbroadcast=0"]) self.start_node(1, ["-walletbroadcast=0"]) self.start_node(2, ["-walletbroadcast=0"]) - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[1], 2) - connect_nodes(self.nodes[0], 2) + self.connect_nodes(0, 1) + self.connect_nodes(1, 2) + self.connect_nodes(0, 2) self.sync_all(self.nodes[0:3]) txid_not_broadcast = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2) @@ -343,9 +342,9 @@ class WalletTest(BitcoinTestFramework): self.start_node(0) self.start_node(1) self.start_node(2) - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[1], 2) - connect_nodes(self.nodes[0], 2) + self.connect_nodes(0, 1) + self.connect_nodes(1, 2) + self.connect_nodes(0, 2) self.sync_blocks(self.nodes[0:3]) self.nodes[0].generate(1) diff --git a/test/functional/wallet_descriptor.py b/test/functional/wallet_descriptor.py index 62eb15f87a..51e9ec8f40 100755 --- a/test/functional/wallet_descriptor.py +++ b/test/functional/wallet_descriptor.py @@ -21,6 +21,9 @@ class WalletDescriptorTest(BitcoinTestFramework): self.skip_if_no_wallet() def run_test(self): + wallet_info = self.nodes[0].getwalletinfo() + assert_equal(wallet_info['format'], 'bdb') + # Make a descriptor wallet self.log.info("Making a descriptor wallet") self.nodes[0].createwallet(wallet_name="desc1", descriptors=True) @@ -29,6 +32,7 @@ class WalletDescriptorTest(BitcoinTestFramework): # A descriptor wallet should have 100 addresses * 3 types = 300 keys self.log.info("Checking wallet info") wallet_info = self.nodes[0].getwalletinfo() + assert_equal(wallet_info['format'], 'sqlite') assert_equal(wallet_info['keypoolsize'], 300) assert_equal(wallet_info['keypoolsize_hd_internal'], 300) assert 'keypoololdest' not in wallet_info diff --git a/test/functional/wallet_hd.py b/test/functional/wallet_hd.py index 5af14ecb8f..d45cf05689 100755 --- a/test/functional/wallet_hd.py +++ b/test/functional/wallet_hd.py @@ -10,7 +10,6 @@ import shutil from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - connect_nodes, assert_raises_rpc_error, ) @@ -99,7 +98,7 @@ class WalletHDTest(BitcoinTestFramework): assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/" + str(i) + "'") assert_equal(hd_info_2["hdmasterfingerprint"], hd_fingerprint) assert_equal(hd_add, hd_add_2) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) self.sync_all() # Needs rescan @@ -115,7 +114,7 @@ class WalletHDTest(BitcoinTestFramework): os.path.join(self.nodes[1].datadir, self.chain, "wallets", self.default_wallet_name, self.wallet_data_filename), ) self.start_node(1, extra_args=self.extra_args[1]) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) self.sync_all() # Wallet automatically scans blocks older than key on startup assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1) @@ -183,7 +182,7 @@ class WalletHDTest(BitcoinTestFramework): # Restart node 1 with keypool of 3 and a different wallet self.nodes[1].createwallet(wallet_name='origin', blank=True) self.restart_node(1, extra_args=['-keypool=3', '-wallet=origin']) - connect_nodes(self.nodes[0], 1) + self.connect_nodes(0, 1) # sethdseed restoring and seeing txs to addresses out of the keypool origin_rpc = self.nodes[1].get_wallet_rpc('origin') diff --git a/test/functional/wallet_import_rescan.py b/test/functional/wallet_import_rescan.py index 9d532742ee..aad112b499 100755 --- a/test/functional/wallet_import_rescan.py +++ b/test/functional/wallet_import_rescan.py @@ -22,7 +22,6 @@ happened previously. from test_framework.test_framework import BitcoinTestFramework from test_framework.address import AddressType from test_framework.util import ( - connect_nodes, assert_equal, set_node_times, ) @@ -165,7 +164,7 @@ class ImportRescanTest(BitcoinTestFramework): self.start_nodes() for i in range(1, self.num_nodes): - connect_nodes(self.nodes[i], 0) + self.connect_nodes(i, 0) def run_test(self): # Create one transaction on node 0 with a unique amount for @@ -182,6 +181,7 @@ class ImportRescanTest(BitcoinTestFramework): self.nodes[0].generate(1) # Generate one block for each send variant.confirmation_height = self.nodes[0].getblockcount() variant.timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"] + self.sync_all() # Conclude sync before calling setmocktime to avoid timeouts # Generate a block further in the future (past the rescan window). assert_equal(self.nodes[0].getrawmempool(), []) diff --git a/test/functional/wallet_keypool_topup.py b/test/functional/wallet_keypool_topup.py index 3f865b330c..78e06c5916 100755 --- a/test/functional/wallet_keypool_topup.py +++ b/test/functional/wallet_keypool_topup.py @@ -16,7 +16,6 @@ import shutil from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - connect_nodes, ) @@ -38,9 +37,9 @@ class KeypoolRestoreTest(BitcoinTestFramework): self.stop_node(1) shutil.copyfile(wallet_path, wallet_backup_path) self.start_node(1, self.extra_args[1]) - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[0], 2) - connect_nodes(self.nodes[0], 3) + self.connect_nodes(0, 1) + self.connect_nodes(0, 2) + self.connect_nodes(0, 3) for i, output_type in enumerate(["legacy", "p2sh-segwit", "bech32"]): @@ -72,7 +71,7 @@ class KeypoolRestoreTest(BitcoinTestFramework): self.stop_node(idx) shutil.copyfile(wallet_backup_path, wallet_path) self.start_node(idx, self.extra_args[idx]) - connect_nodes(self.nodes[0], idx) + self.connect_nodes(0, idx) self.sync_all() self.log.info("Verify keypool is restored and balance is correct") diff --git a/test/functional/wallet_listsinceblock.py b/test/functional/wallet_listsinceblock.py index d4131deabf..09a336b764 100755 --- a/test/functional/wallet_listsinceblock.py +++ b/test/functional/wallet_listsinceblock.py @@ -10,7 +10,6 @@ from test_framework.util import ( assert_array_result, assert_equal, assert_raises_rpc_error, - connect_nodes, ) from decimal import Decimal @@ -26,7 +25,7 @@ class ListSinceBlockTest(BitcoinTestFramework): def run_test(self): # All nodes are in IBD from genesis, so they'll need the miner (node2) to be an outbound connection, or have # only one connection. (See fPreferredDownload in net_processing) - connect_nodes(self.nodes[1], 2) + self.connect_nodes(1, 2) self.nodes[2].generate(101) self.sync_all() diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py index a0787dd289..61791a756c 100755 --- a/test/functional/wallet_multiwallet.py +++ b/test/functional/wallet_multiwallet.py @@ -60,8 +60,10 @@ class MultiWalletTest(BitcoinTestFramework): wallet = lambda name: node.get_wallet_rpc(name) def wallet_file(name): + if name == self.default_wallet_name: + return wallet_dir(self.default_wallet_name, self.wallet_data_filename) if os.path.isdir(wallet_dir(name)): - return wallet_dir(name, self.wallet_data_filename) + return wallet_dir(name, "wallet.dat") return wallet_dir(name) assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': self.default_wallet_name }] }) @@ -77,13 +79,18 @@ class MultiWalletTest(BitcoinTestFramework): # rename wallet.dat to make sure plain wallet file paths (as opposed to # directory paths) can be loaded - os.rename(wallet_dir(self.default_wallet_name, self.wallet_data_filename), wallet_dir("w8")) - # create another dummy wallet for use in testing backups later - self.start_node(0, ["-nowallet", "-wallet=" + self.default_wallet_name]) + self.start_node(0, ["-nowallet", "-wallet=empty", "-wallet=plain"]) + node.createwallet("created") self.stop_nodes() empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat') - os.rename(wallet_dir(self.default_wallet_name, self.wallet_data_filename), empty_wallet) + os.rename(wallet_file("empty"), empty_wallet) + shutil.rmtree(wallet_dir("empty")) + empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat') + os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet) + shutil.rmtree(wallet_dir("created")) + os.rename(wallet_file("plain"), wallet_dir("w8")) + shutil.rmtree(wallet_dir("plain")) # restart node with a mix of wallet names: # w1, w2, w3 - to verify new wallets created when non-existing paths specified @@ -151,7 +158,7 @@ class MultiWalletTest(BitcoinTestFramework): competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir') os.mkdir(competing_wallet_dir) self.restart_node(0, ['-walletdir=' + competing_wallet_dir]) - exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\"!" + exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!" self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX) self.restart_node(0, extra_args) @@ -246,12 +253,13 @@ class MultiWalletTest(BitcoinTestFramework): assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets') # Fail to load duplicate wallets - path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", self.wallet_data_filename) + path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat") assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0]) # Fail to load duplicate wallets by different ways (directory and filepath) - path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", self.wallet_data_filename) - assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, self.wallet_data_filename) + if not self.options.descriptors: + path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat") + assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat') # Fail to load if one wallet is a copy of another assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy') @@ -334,9 +342,11 @@ class MultiWalletTest(BitcoinTestFramework): rpc = self.nodes[0].get_wallet_rpc(wallet_name) addr = rpc.getnewaddress() backup = os.path.join(self.options.tmpdir, 'backup.dat') + if os.path.exists(backup): + os.unlink(backup) rpc.backupwallet(backup) self.nodes[0].unloadwallet(wallet_name) - shutil.copyfile(empty_wallet, wallet_file(wallet_name)) + shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name)) self.nodes[0].loadwallet(wallet_name) assert_equal(rpc.getaddressinfo(addr)['ismine'], False) self.nodes[0].unloadwallet(wallet_name) @@ -348,7 +358,10 @@ class MultiWalletTest(BitcoinTestFramework): self.start_node(1) wallet = os.path.join(self.options.tmpdir, 'my_wallet') self.nodes[0].createwallet(wallet) - assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet) + if self.options.descriptors: + assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet) + else: + assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet) self.nodes[0].unloadwallet(wallet) self.nodes[1].loadwallet(wallet) diff --git a/test/functional/wallet_reorgsrestore.py b/test/functional/wallet_reorgsrestore.py index 5c24d466c3..a1d6b098ad 100755 --- a/test/functional/wallet_reorgsrestore.py +++ b/test/functional/wallet_reorgsrestore.py @@ -20,8 +20,6 @@ import shutil from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - connect_nodes, - disconnect_nodes, ) class ReorgsRestoreTest(BitcoinTestFramework): @@ -38,9 +36,9 @@ class ReorgsRestoreTest(BitcoinTestFramework): self.sync_blocks() # Disconnect node1 from others to reorg its chain later - disconnect_nodes(self.nodes[0], 1) - disconnect_nodes(self.nodes[1], 2) - connect_nodes(self.nodes[0], 2) + self.disconnect_nodes(0, 1) + self.disconnect_nodes(1, 2) + self.connect_nodes(0, 2) # Send a tx to be unconfirmed later txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) @@ -50,7 +48,7 @@ class ReorgsRestoreTest(BitcoinTestFramework): assert_equal(tx_before_reorg["confirmations"], 4) # Disconnect node0 from node2 to broadcast a conflict on their respective chains - disconnect_nodes(self.nodes[0], 2) + self.disconnect_nodes(0, 2) nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txid_conflict_from)["details"] if tx_out["amount"] == Decimal("10")) inputs = [] inputs.append({"txid": txid_conflict_from, "vout": nA}) @@ -69,7 +67,7 @@ class ReorgsRestoreTest(BitcoinTestFramework): self.nodes[2].generate(9) # Reconnect node0 and node2 and check that conflicted_txid is effectively conflicted - connect_nodes(self.nodes[0], 2) + self.connect_nodes(0, 2) self.sync_blocks([self.nodes[0], self.nodes[2]]) conflicted = self.nodes[0].gettransaction(conflicted_txid) conflicting = self.nodes[0].gettransaction(conflicting_txid) diff --git a/test/functional/wallet_send.py b/test/functional/wallet_send.py index 876eb7f29e..60ab2d457e 100755 --- a/test/functional/wallet_send.py +++ b/test/functional/wallet_send.py @@ -316,6 +316,7 @@ class WalletSendTest(BitcoinTestFramework): res = self.nodes[0].sendrawtransaction(hex) self.nodes[0].generate(1) assert_equal(self.nodes[0].gettransaction(txid)["confirmations"], 1) + self.sync_all() self.log.info("Lock unspents...") utxo1 = w0.listunspent()[0] diff --git a/test/functional/wallet_txn_clone.py b/test/functional/wallet_txn_clone.py index 33a2a9411e..bdbbb3e530 100755 --- a/test/functional/wallet_txn_clone.py +++ b/test/functional/wallet_txn_clone.py @@ -8,8 +8,6 @@ import io from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - connect_nodes, - disconnect_nodes, ) from test_framework.messages import CTransaction, COIN @@ -30,7 +28,7 @@ class TxnMallTest(BitcoinTestFramework): def setup_network(self): # Start with split network: super().setup_network() - disconnect_nodes(self.nodes[1], 2) + self.disconnect_nodes(1, 2) def run_test(self): if self.options.segwit: @@ -118,7 +116,7 @@ class TxnMallTest(BitcoinTestFramework): self.nodes[2].generate(1) # Reconnect the split network, and sync chain: - connect_nodes(self.nodes[1], 2) + self.connect_nodes(1, 2) self.nodes[2].sendrawtransaction(node0_tx2["hex"]) self.nodes[2].sendrawtransaction(tx2["hex"]) self.nodes[2].generate(1) # Mine another block to make sure we sync diff --git a/test/functional/wallet_txn_doublespend.py b/test/functional/wallet_txn_doublespend.py index cac58aeaf2..42de131354 100755 --- a/test/functional/wallet_txn_doublespend.py +++ b/test/functional/wallet_txn_doublespend.py @@ -8,8 +8,6 @@ from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - connect_nodes, - disconnect_nodes, find_output, ) @@ -28,7 +26,7 @@ class TxnMallTest(BitcoinTestFramework): def setup_network(self): # Start with split network: super().setup_network() - disconnect_nodes(self.nodes[1], 2) + self.disconnect_nodes(1, 2) def run_test(self): # All nodes should start with 1,250 BTC: @@ -116,7 +114,7 @@ class TxnMallTest(BitcoinTestFramework): self.nodes[2].generate(1) # Reconnect the split network, and sync chain: - connect_nodes(self.nodes[1], 2) + self.connect_nodes(1, 2) self.nodes[2].generate(1) # Mine another block to make sure we sync self.sync_blocks() assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2) |