aboutsummaryrefslogtreecommitdiff
path: root/src/secp256k1
diff options
context:
space:
mode:
Diffstat (limited to 'src/secp256k1')
-rw-r--r--src/secp256k1/.gitignore4
-rw-r--r--src/secp256k1/.travis.yml34
-rw-r--r--src/secp256k1/Makefile.am12
-rw-r--r--src/secp256k1/README.md2
-rw-r--r--src/secp256k1/TODO3
-rw-r--r--src/secp256k1/build-aux/m4/bitcoin_secp.m45
-rw-r--r--src/secp256k1/configure.ac164
-rw-r--r--src/secp256k1/contrib/lax_der_parsing.c1
-rwxr-xr-xsrc/secp256k1/contrib/travis.sh25
-rw-r--r--src/secp256k1/include/secp256k1.h2
-rw-r--r--src/secp256k1/include/secp256k1_extrakeys.h236
-rw-r--r--src/secp256k1/include/secp256k1_schnorrsig.h111
-rw-r--r--src/secp256k1/sage/gen_exhaustive_groups.sage129
-rw-r--r--src/secp256k1/src/assumptions.h80
-rw-r--r--src/secp256k1/src/basic-config.h10
-rw-r--r--src/secp256k1/src/bench_internal.c178
-rw-r--r--src/secp256k1/src/bench_schnorrsig.c102
-rw-r--r--src/secp256k1/src/ecmult.h2
-rw-r--r--src/secp256k1/src/ecmult_const_impl.h36
-rw-r--r--src/secp256k1/src/ecmult_impl.h157
-rw-r--r--src/secp256k1/src/field.h12
-rw-r--r--src/secp256k1/src/field_5x52.h6
-rw-r--r--src/secp256k1/src/field_impl.h8
-rw-r--r--src/secp256k1/src/gen_context.c1
-rw-r--r--src/secp256k1/src/group.h18
-rw-r--r--src/secp256k1/src/group_impl.h131
-rw-r--r--src/secp256k1/src/hash_impl.h18
-rw-r--r--src/secp256k1/src/modules/ecdh/tests_impl.h4
-rw-r--r--src/secp256k1/src/modules/extrakeys/Makefile.am.include4
-rw-r--r--src/secp256k1/src/modules/extrakeys/main_impl.h251
-rw-r--r--src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h68
-rw-r--r--src/secp256k1/src/modules/extrakeys/tests_impl.h524
-rw-r--r--src/secp256k1/src/modules/recovery/Makefile.am.include1
-rw-r--r--src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h149
-rw-r--r--src/secp256k1/src/modules/recovery/tests_impl.h10
-rw-r--r--src/secp256k1/src/modules/schnorrsig/Makefile.am.include9
-rw-r--r--src/secp256k1/src/modules/schnorrsig/main_impl.h239
-rw-r--r--src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h206
-rw-r--r--src/secp256k1/src/modules/schnorrsig/tests_impl.h806
-rw-r--r--src/secp256k1/src/scalar.h18
-rw-r--r--src/secp256k1/src/scalar_4x64_impl.h32
-rw-r--r--src/secp256k1/src/scalar_8x32_impl.h40
-rw-r--r--src/secp256k1/src/scalar_impl.h256
-rw-r--r--src/secp256k1/src/scalar_low_impl.h11
-rw-r--r--src/secp256k1/src/scratch_impl.h25
-rw-r--r--src/secp256k1/src/secp256k1.c81
-rw-r--r--src/secp256k1/src/selftest.h32
-rw-r--r--src/secp256k1/src/testrand.h23
-rw-r--r--src/secp256k1/src/testrand_impl.h72
-rw-r--r--src/secp256k1/src/tests.c668
-rw-r--r--src/secp256k1/src/tests_exhaustive.c379
-rw-r--r--src/secp256k1/src/util.h76
-rw-r--r--src/secp256k1/src/valgrind_ctime_test.c50
53 files changed, 4402 insertions, 1119 deletions
diff --git a/src/secp256k1/.gitignore b/src/secp256k1/.gitignore
index cb4331aa90..ccdef02b29 100644
--- a/src/secp256k1/.gitignore
+++ b/src/secp256k1/.gitignore
@@ -1,9 +1,9 @@
bench_inv
bench_ecdh
bench_ecmult
+bench_schnorrsig
bench_sign
bench_verify
-bench_schnorr_verify
bench_recover
bench_internal
tests
@@ -31,6 +31,8 @@ libtool
*.lo
*.o
*~
+*.log
+*.trs
src/libsecp256k1-config.h
src/libsecp256k1-config.h.in
src/ecmult_static_context.h
diff --git a/src/secp256k1/.travis.yml b/src/secp256k1/.travis.yml
index a6ad6fb27e..bcc8c210f5 100644
--- a/src/secp256k1/.travis.yml
+++ b/src/secp256k1/.travis.yml
@@ -17,33 +17,29 @@ compiler:
- gcc
env:
global:
- - FIELD=auto BIGNUM=auto SCALAR=auto ENDOMORPHISM=no STATICPRECOMPUTATION=yes ECMULTGENPRECISION=auto ASM=no BUILD=check EXTRAFLAGS= HOST= ECDH=no RECOVERY=no EXPERIMENTAL=no CTIMETEST=yes BENCH=yes ITERS=2
+ - WIDEMUL=auto BIGNUM=auto STATICPRECOMPUTATION=yes ECMULTGENPRECISION=auto ASM=no BUILD=check WITH_VALGRIND=yes RUN_VALGRIND=no EXTRAFLAGS= HOST= ECDH=no RECOVERY=no SCHNORRSIG=no EXPERIMENTAL=no CTIMETEST=yes BENCH=yes ITERS=2
matrix:
- - SCALAR=32bit RECOVERY=yes
- - SCALAR=32bit FIELD=32bit ECDH=yes EXPERIMENTAL=yes
- - SCALAR=64bit
- - FIELD=64bit RECOVERY=yes
- - FIELD=64bit ENDOMORPHISM=yes
- - FIELD=64bit ENDOMORPHISM=yes ECDH=yes EXPERIMENTAL=yes
- - FIELD=64bit ASM=x86_64
- - FIELD=64bit ENDOMORPHISM=yes ASM=x86_64
- - FIELD=32bit ENDOMORPHISM=yes
+ - WIDEMUL=int64 RECOVERY=yes
+ - WIDEMUL=int64 ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes
+ - WIDEMUL=int128
+ - WIDEMUL=int128 RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes
+ - WIDEMUL=int128 ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes
+ - WIDEMUL=int128 ASM=x86_64
- BIGNUM=no
- - BIGNUM=no ENDOMORPHISM=yes RECOVERY=yes EXPERIMENTAL=yes
+ - BIGNUM=no RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- BIGNUM=no STATICPRECOMPUTATION=no
- - BUILD=distcheck CTIMETEST= BENCH=
+ - BUILD=distcheck WITH_VALGRIND=no CTIMETEST=no BENCH=no
- CPPFLAGS=-DDETERMINISTIC
- - CFLAGS=-O0 CTIMETEST=
+ - CFLAGS=-O0 CTIMETEST=no
- ECMULTGENPRECISION=2
- ECMULTGENPRECISION=8
- - VALGRIND=yes ENDOMORPHISM=yes BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes EXTRAFLAGS="--disable-openssl-tests" CPPFLAGS=-DVALGRIND BUILD=
- - VALGRIND=yes BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes EXTRAFLAGS="--disable-openssl-tests" CPPFLAGS=-DVALGRIND BUILD=
+ - RUN_VALGRIND=yes BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes EXTRAFLAGS="--disable-openssl-tests" BUILD=
matrix:
fast_finish: true
include:
- compiler: clang
os: linux
- env: HOST=i686-linux-gnu ENDOMORPHISM=yes
+ env: HOST=i686-linux-gnu
addons:
apt:
packages:
@@ -63,7 +59,7 @@ matrix:
- libtool-bin
- libc6-dbg:i386
- compiler: gcc
- env: HOST=i686-linux-gnu ENDOMORPHISM=yes
+ env: HOST=i686-linux-gnu
os: linux
addons:
apt:
@@ -83,6 +79,10 @@ matrix:
- valgrind
- libtool-bin
- libc6-dbg:i386
+ # S390x build (big endian system)
+ - compiler: gcc
+ env: HOST=s390x-unknown-linux-gnu ECDH=yes RECOVERY=yes EXPERIMENTAL=yes CTIMETEST=
+ arch: s390x
# We use this to install macOS dependencies instead of the built in `homebrew` plugin,
# because in xcode earlier than 11 they have a bug requiring updating the system which overall takes ~8 minutes.
diff --git a/src/secp256k1/Makefile.am b/src/secp256k1/Makefile.am
index d8c1c79e8c..023fa6067f 100644
--- a/src/secp256k1/Makefile.am
+++ b/src/secp256k1/Makefile.am
@@ -34,9 +34,11 @@ noinst_HEADERS += src/field_5x52.h
noinst_HEADERS += src/field_5x52_impl.h
noinst_HEADERS += src/field_5x52_int128_impl.h
noinst_HEADERS += src/field_5x52_asm_impl.h
+noinst_HEADERS += src/assumptions.h
noinst_HEADERS += src/util.h
noinst_HEADERS += src/scratch.h
noinst_HEADERS += src/scratch_impl.h
+noinst_HEADERS += src/selftest.h
noinst_HEADERS += src/testrand.h
noinst_HEADERS += src/testrand_impl.h
noinst_HEADERS += src/hash.h
@@ -99,7 +101,7 @@ if VALGRIND_ENABLED
tests_CPPFLAGS += -DVALGRIND
noinst_PROGRAMS += valgrind_ctime_test
valgrind_ctime_test_SOURCES = src/valgrind_ctime_test.c
-valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB)
+valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_LIBS) $(COMMON_LIB)
endif
if !ENABLE_COVERAGE
tests_CPPFLAGS += -DVERIFY
@@ -152,3 +154,11 @@ endif
if ENABLE_MODULE_RECOVERY
include src/modules/recovery/Makefile.am.include
endif
+
+if ENABLE_MODULE_EXTRAKEYS
+include src/modules/extrakeys/Makefile.am.include
+endif
+
+if ENABLE_MODULE_SCHNORRSIG
+include src/modules/schnorrsig/Makefile.am.include
+endif
diff --git a/src/secp256k1/README.md b/src/secp256k1/README.md
index 434178b372..2602475787 100644
--- a/src/secp256k1/README.md
+++ b/src/secp256k1/README.md
@@ -48,7 +48,7 @@ Implementation details
* Use wNAF notation for point multiplicands.
* Use a much larger window for multiples of G, using precomputed multiples.
* Use Shamir's trick to do the multiplication with the public key and the generator simultaneously.
- * Optionally (off by default) use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
+ * Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
* Point multiplication for signing
* Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions.
* Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains)
diff --git a/src/secp256k1/TODO b/src/secp256k1/TODO
deleted file mode 100644
index a300e1c5eb..0000000000
--- a/src/secp256k1/TODO
+++ /dev/null
@@ -1,3 +0,0 @@
-* Unit tests for fieldelem/groupelem, including ones intended to
- trigger fieldelem's boundary cases.
-* Complete constant-time operations for signing/keygen
diff --git a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 b/src/secp256k1/build-aux/m4/bitcoin_secp.m4
index 1b2b71e6ab..57595f4499 100644
--- a/src/secp256k1/build-aux/m4/bitcoin_secp.m4
+++ b/src/secp256k1/build-aux/m4/bitcoin_secp.m4
@@ -1,8 +1,3 @@
-dnl libsecp25k1 helper checks
-AC_DEFUN([SECP_INT128_CHECK],[
-has_int128=$ac_cv_type___int128
-])
-
dnl escape "$0x" below using the m4 quadrigaph @S|@, and escape it again with a \ for the shell.
AC_DEFUN([SECP_64BIT_ASM_CHECK],[
AC_MSG_CHECKING(for x86_64 assembly availability)
diff --git a/src/secp256k1/configure.ac b/src/secp256k1/configure.ac
index 6021b760b5..5a078e6c81 100644
--- a/src/secp256k1/configure.ac
+++ b/src/secp256k1/configure.ac
@@ -67,7 +67,7 @@ esac
CFLAGS="-W $CFLAGS"
-warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wno-unused-function -Wno-long-long -Wno-overlength-strings"
+warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wundef -Wno-unused-function -Wno-long-long -Wno-overlength-strings"
saved_CFLAGS="$CFLAGS"
CFLAGS="$warn_CFLAGS $CFLAGS"
AC_MSG_CHECKING([if ${CC} supports ${warn_CFLAGS}])
@@ -116,11 +116,6 @@ AC_ARG_ENABLE(exhaustive_tests,
[use_exhaustive_tests=$enableval],
[use_exhaustive_tests=yes])
-AC_ARG_ENABLE(endomorphism,
- AS_HELP_STRING([--enable-endomorphism],[enable endomorphism [default=no]]),
- [use_endomorphism=$enableval],
- [use_endomorphism=no])
-
AC_ARG_ENABLE(ecmult_static_precomputation,
AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing [default=auto]]),
[use_ecmult_static_precomputation=$enableval],
@@ -136,28 +131,35 @@ AC_ARG_ENABLE(module_recovery,
[enable_module_recovery=$enableval],
[enable_module_recovery=no])
+AC_ARG_ENABLE(module_extrakeys,
+ AS_HELP_STRING([--enable-module-extrakeys],[enable extrakeys module (experimental)]),
+ [enable_module_extrakeys=$enableval],
+ [enable_module_extrakeys=no])
+
+AC_ARG_ENABLE(module_schnorrsig,
+ AS_HELP_STRING([--enable-module-schnorrsig],[enable schnorrsig module (experimental)]),
+ [enable_module_schnorrsig=$enableval],
+ [enable_module_schnorrsig=no])
+
AC_ARG_ENABLE(external_default_callbacks,
AS_HELP_STRING([--enable-external-default-callbacks],[enable external default callback functions [default=no]]),
[use_external_default_callbacks=$enableval],
[use_external_default_callbacks=no])
-AC_ARG_WITH([field], [AS_HELP_STRING([--with-field=64bit|32bit|auto],
-[finite field implementation to use [default=auto]])],[req_field=$withval], [req_field=auto])
+dnl Test-only override of the (autodetected by the C code) "widemul" setting.
+dnl Legal values are int64 (for [u]int64_t), int128 (for [unsigned] __int128), and auto (the default).
+AC_ARG_WITH([test-override-wide-multiply], [] ,[set_widemul=$withval], [set_widemul=auto])
AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|no|auto],
[bignum implementation to use [default=auto]])],[req_bignum=$withval], [req_bignum=auto])
-AC_ARG_WITH([scalar], [AS_HELP_STRING([--with-scalar=64bit|32bit|auto],
-[scalar implementation to use [default=auto]])],[req_scalar=$withval], [req_scalar=auto])
-
AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto],
[assembly optimizations to useĀ (experimental: arm) [default=auto]])],[req_asm=$withval], [req_asm=auto])
AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE|auto],
[window size for ecmult precomputation for verification, specified as integer in range [2..24].]
[Larger values result in possibly better performance at the cost of an exponentially larger precomputed table.]
-[The table will store 2^(SIZE-2) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.]
-[If the endomorphism optimization is enabled, two tables of this size are used instead of only one.]
+[The table will store 2^(SIZE-1) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.]
["auto" is a reasonable setting for desktop machines (currently 15). [default=auto]]
)],
[req_ecmult_window=$withval], [req_ecmult_window=auto])
@@ -170,9 +172,21 @@ AC_ARG_WITH([ecmult-gen-precision], [AS_HELP_STRING([--with-ecmult-gen-precision
)],
[req_ecmult_gen_precision=$withval], [req_ecmult_gen_precision=auto])
-AC_CHECK_TYPES([__int128])
+AC_ARG_WITH([valgrind], [AS_HELP_STRING([--with-valgrind=yes|no|auto],
+[Build with extra checks for running inside Valgrind [default=auto]]
+)],
+[req_valgrind=$withval], [req_valgrind=auto])
-AC_CHECK_HEADER([valgrind/memcheck.h], [enable_valgrind=yes], [enable_valgrind=no], [])
+if test x"$req_valgrind" = x"no"; then
+ enable_valgrind=no
+else
+ AC_CHECK_HEADER([valgrind/memcheck.h], [enable_valgrind=yes], [
+ if test x"$req_valgrind" = x"yes"; then
+ AC_MSG_ERROR([Valgrind support explicitly requested but valgrind/memcheck.h header not available])
+ fi
+ enable_valgrind=no
+ ], [])
+fi
AM_CONDITIONAL([VALGRIND_ENABLED],[test "$enable_valgrind" = "yes"])
if test x"$enable_coverage" = x"yes"; then
@@ -265,63 +279,6 @@ else
esac
fi
-if test x"$req_field" = x"auto"; then
- if test x"set_asm" = x"x86_64"; then
- set_field=64bit
- fi
- if test x"$set_field" = x; then
- SECP_INT128_CHECK
- if test x"$has_int128" = x"yes"; then
- set_field=64bit
- fi
- fi
- if test x"$set_field" = x; then
- set_field=32bit
- fi
-else
- set_field=$req_field
- case $set_field in
- 64bit)
- if test x"$set_asm" != x"x86_64"; then
- SECP_INT128_CHECK
- if test x"$has_int128" != x"yes"; then
- AC_MSG_ERROR([64bit field explicitly requested but neither __int128 support or x86_64 assembly available])
- fi
- fi
- ;;
- 32bit)
- ;;
- *)
- AC_MSG_ERROR([invalid field implementation selection])
- ;;
- esac
-fi
-
-if test x"$req_scalar" = x"auto"; then
- SECP_INT128_CHECK
- if test x"$has_int128" = x"yes"; then
- set_scalar=64bit
- fi
- if test x"$set_scalar" = x; then
- set_scalar=32bit
- fi
-else
- set_scalar=$req_scalar
- case $set_scalar in
- 64bit)
- SECP_INT128_CHECK
- if test x"$has_int128" != x"yes"; then
- AC_MSG_ERROR([64bit scalar explicitly requested but __int128 support not available])
- fi
- ;;
- 32bit)
- ;;
- *)
- AC_MSG_ERROR([invalid scalar implementation selected])
- ;;
- esac
-fi
-
if test x"$req_bignum" = x"auto"; then
SECP_GMP_CHECK
if test x"$has_gmp" = x"yes"; then
@@ -365,16 +322,18 @@ no)
;;
esac
-# select field implementation
-case $set_field in
-64bit)
- AC_DEFINE(USE_FIELD_5X52, 1, [Define this symbol to use the FIELD_5X52 implementation])
+# select wide multiplication implementation
+case $set_widemul in
+int128)
+ AC_DEFINE(USE_FORCE_WIDEMUL_INT128, 1, [Define this symbol to force the use of the (unsigned) __int128 based wide multiplication implementation])
+ ;;
+int64)
+ AC_DEFINE(USE_FORCE_WIDEMUL_INT64, 1, [Define this symbol to force the use of the (u)int64_t based wide multiplication implementation])
;;
-32bit)
- AC_DEFINE(USE_FIELD_10X26, 1, [Define this symbol to use the FIELD_10X26 implementation])
+auto)
;;
*)
- AC_MSG_ERROR([invalid field implementation])
+ AC_MSG_ERROR([invalid wide multiplication implementation])
;;
esac
@@ -396,19 +355,6 @@ no)
;;
esac
-#select scalar implementation
-case $set_scalar in
-64bit)
- AC_DEFINE(USE_SCALAR_4X64, 1, [Define this symbol to use the 4x64 scalar implementation])
- ;;
-32bit)
- AC_DEFINE(USE_SCALAR_8X32, 1, [Define this symbol to use the 8x32 scalar implementation])
- ;;
-*)
- AC_MSG_ERROR([invalid scalar implementation])
- ;;
-esac
-
#set ecmult window size
if test x"$req_ecmult_window" = x"auto"; then
set_ecmult_window=15
@@ -477,10 +423,6 @@ if test x"$set_bignum" = x"gmp"; then
SECP_INCLUDES="$SECP_INCLUDES $GMP_CPPFLAGS"
fi
-if test x"$use_endomorphism" = x"yes"; then
- AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization])
-fi
-
if test x"$set_precomp" = x"yes"; then
AC_DEFINE(USE_ECMULT_STATIC_PRECOMPUTATION, 1, [Define this symbol to use a statically generated ecmult table])
fi
@@ -493,7 +435,16 @@ if test x"$enable_module_recovery" = x"yes"; then
AC_DEFINE(ENABLE_MODULE_RECOVERY, 1, [Define this symbol to enable the ECDSA pubkey recovery module])
fi
-AC_C_BIGENDIAN()
+if test x"$enable_module_schnorrsig" = x"yes"; then
+ AC_DEFINE(ENABLE_MODULE_SCHNORRSIG, 1, [Define this symbol to enable the schnorrsig module])
+ enable_module_extrakeys=yes
+fi
+
+# Test if extrakeys is set after the schnorrsig module to allow the schnorrsig
+# module to set enable_module_extrakeys=yes
+if test x"$enable_module_extrakeys" = x"yes"; then
+ AC_DEFINE(ENABLE_MODULE_EXTRAKEYS, 1, [Define this symbol to enable the extrakeys module])
+fi
if test x"$use_external_asm" = x"yes"; then
AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used])
@@ -508,11 +459,19 @@ if test x"$enable_experimental" = x"yes"; then
AC_MSG_NOTICE([WARNING: experimental build])
AC_MSG_NOTICE([Experimental features do not have stable APIs or properties, and may not be safe for production use.])
AC_MSG_NOTICE([Building ECDH module: $enable_module_ecdh])
+ AC_MSG_NOTICE([Building extrakeys module: $enable_module_extrakeys])
+ AC_MSG_NOTICE([Building schnorrsig module: $enable_module_schnorrsig])
AC_MSG_NOTICE([******])
else
if test x"$enable_module_ecdh" = x"yes"; then
AC_MSG_ERROR([ECDH module is experimental. Use --enable-experimental to allow.])
fi
+ if test x"$enable_module_extrakeys" = x"yes"; then
+ AC_MSG_ERROR([extrakeys module is experimental. Use --enable-experimental to allow.])
+ fi
+ if test x"$enable_module_schnorrsig" = x"yes"; then
+ AC_MSG_ERROR([schnorrsig module is experimental. Use --enable-experimental to allow.])
+ fi
if test x"$set_asm" = x"arm"; then
AC_MSG_ERROR([ARM assembly optimization is experimental. Use --enable-experimental to allow.])
fi
@@ -531,6 +490,8 @@ AM_CONDITIONAL([USE_BENCHMARK], [test x"$use_benchmark" = x"yes"])
AM_CONDITIONAL([USE_ECMULT_STATIC_PRECOMPUTATION], [test x"$set_precomp" = x"yes"])
AM_CONDITIONAL([ENABLE_MODULE_ECDH], [test x"$enable_module_ecdh" = x"yes"])
AM_CONDITIONAL([ENABLE_MODULE_RECOVERY], [test x"$enable_module_recovery" = x"yes"])
+AM_CONDITIONAL([ENABLE_MODULE_EXTRAKEYS], [test x"$enable_module_extrakeys" = x"yes"])
+AM_CONDITIONAL([ENABLE_MODULE_SCHNORRSIG], [test x"$enable_module_schnorrsig" = x"yes"])
AM_CONDITIONAL([USE_EXTERNAL_ASM], [test x"$use_external_asm" = x"yes"])
AM_CONDITIONAL([USE_ASM_ARM], [test x"$set_asm" = x"arm"])
@@ -543,20 +504,23 @@ AC_OUTPUT
echo
echo "Build Options:"
-echo " with endomorphism = $use_endomorphism"
echo " with ecmult precomp = $set_precomp"
echo " with external callbacks = $use_external_default_callbacks"
echo " with benchmarks = $use_benchmark"
echo " with coverage = $enable_coverage"
echo " module ecdh = $enable_module_ecdh"
echo " module recovery = $enable_module_recovery"
+echo " module extrakeys = $enable_module_extrakeys"
+echo " module schnorrsig = $enable_module_schnorrsig"
echo
echo " asm = $set_asm"
echo " bignum = $set_bignum"
-echo " field = $set_field"
-echo " scalar = $set_scalar"
echo " ecmult window size = $set_ecmult_window"
echo " ecmult gen prec. bits = $set_ecmult_gen_precision"
+dnl Hide test-only options unless they're used.
+if test x"$set_widemul" != xauto; then
+echo " wide multiplication = $set_widemul"
+fi
echo
echo " valgrind = $enable_valgrind"
echo " CC = $CC"
diff --git a/src/secp256k1/contrib/lax_der_parsing.c b/src/secp256k1/contrib/lax_der_parsing.c
index e177a0562d..f71db4b535 100644
--- a/src/secp256k1/contrib/lax_der_parsing.c
+++ b/src/secp256k1/contrib/lax_der_parsing.c
@@ -112,7 +112,6 @@ int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_
return 0;
}
spos = pos;
- pos += slen;
/* Ignore leading zeroes in R */
while (rlen > 0 && input[rpos] == 0) {
diff --git a/src/secp256k1/contrib/travis.sh b/src/secp256k1/contrib/travis.sh
index 3909d16a27..24cc9315cb 100755
--- a/src/secp256k1/contrib/travis.sh
+++ b/src/secp256k1/contrib/travis.sh
@@ -3,10 +3,6 @@
set -e
set -x
-if [ -n "$HOST" ]
-then
- export USE_HOST="--host=$HOST"
-fi
if [ "$HOST" = "i686-linux-gnu" ]
then
export CC="$CC -m32"
@@ -17,25 +13,28 @@ then
fi
./configure \
- --enable-experimental="$EXPERIMENTAL" --enable-endomorphism="$ENDOMORPHISM" \
- --with-field="$FIELD" --with-bignum="$BIGNUM" --with-asm="$ASM" --with-scalar="$SCALAR" \
+ --enable-experimental="$EXPERIMENTAL" \
+ --with-test-override-wide-multiply="$WIDEMUL" --with-bignum="$BIGNUM" --with-asm="$ASM" \
--enable-ecmult-static-precomputation="$STATICPRECOMPUTATION" --with-ecmult-gen-precision="$ECMULTGENPRECISION" \
- --enable-module-ecdh="$ECDH" --enable-module-recovery="$RECOVERY" "$EXTRAFLAGS" "$USE_HOST"
+ --enable-module-ecdh="$ECDH" --enable-module-recovery="$RECOVERY" \
+ --enable-module-schnorrsig="$SCHNORRSIG" \
+ --with-valgrind="$WITH_VALGRIND" \
+ --host="$HOST" $EXTRAFLAGS
if [ -n "$BUILD" ]
then
make -j2 "$BUILD"
fi
-if [ -n "$VALGRIND" ]
+if [ "$RUN_VALGRIND" = "yes" ]
then
make -j2
# the `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (http://valgrind.org/docs/manual/manual-core.html)
valgrind --error-exitcode=42 ./tests 16
valgrind --error-exitcode=42 ./exhaustive_tests
fi
-if [ -n "$BENCH" ]
+if [ "$BENCH" = "yes" ]
then
- if [ -n "$VALGRIND" ]
+ if [ "$RUN_VALGRIND" = "yes" ]
then
# Using the local `libtool` because on macOS the system's libtool has nothing to do with GNU libtool
EXEC='./libtool --mode=execute valgrind --error-exitcode=42'
@@ -58,8 +57,12 @@ then
then
$EXEC ./bench_ecdh >> bench.log 2>&1
fi
+ if [ "$SCHNORRSIG" = "yes" ]
+ then
+ $EXEC ./bench_schnorrsig >> bench.log 2>&1
+ fi
fi
-if [ -n "$CTIMETEST" ]
+if [ "$CTIMETEST" = "yes" ]
then
./libtool --mode=execute valgrind --error-exitcode=42 ./valgrind_ctime_test > valgrind_ctime_test.log 2>&1
fi
diff --git a/src/secp256k1/include/secp256k1.h b/src/secp256k1/include/secp256k1.h
index 2ba2dca388..2178c8e2d6 100644
--- a/src/secp256k1/include/secp256k1.h
+++ b/src/secp256k1/include/secp256k1.h
@@ -134,7 +134,7 @@ typedef int (*secp256k1_nonce_function)(
# else
# define SECP256K1_API
# endif
-# elif defined(__GNUC__) && defined(SECP256K1_BUILD)
+# elif defined(__GNUC__) && (__GNUC__ >= 4) && defined(SECP256K1_BUILD)
# define SECP256K1_API __attribute__ ((visibility ("default")))
# else
# define SECP256K1_API
diff --git a/src/secp256k1/include/secp256k1_extrakeys.h b/src/secp256k1/include/secp256k1_extrakeys.h
new file mode 100644
index 0000000000..0c5dff2c94
--- /dev/null
+++ b/src/secp256k1/include/secp256k1_extrakeys.h
@@ -0,0 +1,236 @@
+#ifndef SECP256K1_EXTRAKEYS_H
+#define SECP256K1_EXTRAKEYS_H
+
+#include "secp256k1.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Opaque data structure that holds a parsed and valid "x-only" public key.
+ * An x-only pubkey encodes a point whose Y coordinate is even. It is
+ * serialized using only its X coordinate (32 bytes). See BIP-340 for more
+ * information about x-only pubkeys.
+ *
+ * The exact representation of data inside is implementation defined and not
+ * guaranteed to be portable between different platforms or versions. It is
+ * however guaranteed to be 64 bytes in size, and can be safely copied/moved.
+ * If you need to convert to a format suitable for storage, transmission, or
+ * comparison, use secp256k1_xonly_pubkey_serialize and
+ * secp256k1_xonly_pubkey_parse.
+ */
+typedef struct {
+ unsigned char data[64];
+} secp256k1_xonly_pubkey;
+
+/** Opaque data structure that holds a keypair consisting of a secret and a
+ * public key.
+ *
+ * The exact representation of data inside is implementation defined and not
+ * guaranteed to be portable between different platforms or versions. It is
+ * however guaranteed to be 96 bytes in size, and can be safely copied/moved.
+ */
+typedef struct {
+ unsigned char data[96];
+} secp256k1_keypair;
+
+/** Parse a 32-byte sequence into a xonly_pubkey object.
+ *
+ * Returns: 1 if the public key was fully valid.
+ * 0 if the public key could not be parsed or is invalid.
+ *
+ * Args: ctx: a secp256k1 context object (cannot be NULL).
+ * Out: pubkey: pointer to a pubkey object. If 1 is returned, it is set to a
+ * parsed version of input. If not, it's set to an invalid value.
+ * (cannot be NULL).
+ * In: input32: pointer to a serialized xonly_pubkey (cannot be NULL)
+ */
+SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_xonly_pubkey_parse(
+ const secp256k1_context* ctx,
+ secp256k1_xonly_pubkey* pubkey,
+ const unsigned char *input32
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
+
+/** Serialize an xonly_pubkey object into a 32-byte sequence.
+ *
+ * Returns: 1 always.
+ *
+ * Args: ctx: a secp256k1 context object (cannot be NULL).
+ * Out: output32: a pointer to a 32-byte array to place the serialized key in
+ * (cannot be NULL).
+ * In: pubkey: a pointer to a secp256k1_xonly_pubkey containing an
+ * initialized public key (cannot be NULL).
+ */
+SECP256K1_API int secp256k1_xonly_pubkey_serialize(
+ const secp256k1_context* ctx,
+ unsigned char *output32,
+ const secp256k1_xonly_pubkey* pubkey
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
+
+/** Converts a secp256k1_pubkey into a secp256k1_xonly_pubkey.
+ *
+ * Returns: 1 if the public key was successfully converted
+ * 0 otherwise
+ *
+ * Args: ctx: pointer to a context object (cannot be NULL)
+ * Out: xonly_pubkey: pointer to an x-only public key object for placing the
+ * converted public key (cannot be NULL)
+ * pk_parity: pointer to an integer that will be set to 1 if the point
+ * encoded by xonly_pubkey is the negation of the pubkey and
+ * set to 0 otherwise. (can be NULL)
+ * In: pubkey: pointer to a public key that is converted (cannot be NULL)
+ */
+SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_xonly_pubkey_from_pubkey(
+ const secp256k1_context* ctx,
+ secp256k1_xonly_pubkey *xonly_pubkey,
+ int *pk_parity,
+ const secp256k1_pubkey *pubkey
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4);
+
+/** Tweak an x-only public key by adding the generator multiplied with tweak32
+ * to it.
+ *
+ * Note that the resulting point can not in general be represented by an x-only
+ * pubkey because it may have an odd Y coordinate. Instead, the output_pubkey
+ * is a normal secp256k1_pubkey.
+ *
+ * Returns: 0 if the arguments are invalid or the resulting public key would be
+ * invalid (only when the tweak is the negation of the corresponding
+ * secret key). 1 otherwise.
+ *
+ * Args: ctx: pointer to a context object initialized for verification
+ * (cannot be NULL)
+ * Out: output_pubkey: pointer to a public key to store the result. Will be set
+ * to an invalid value if this function returns 0 (cannot
+ * be NULL)
+ * In: internal_pubkey: pointer to an x-only pubkey to apply the tweak to.
+ * (cannot be NULL).
+ * tweak32: pointer to a 32-byte tweak. If the tweak is invalid
+ * according to secp256k1_ec_seckey_verify, this function
+ * returns 0. For uniformly random 32-byte arrays the
+ * chance of being invalid is negligible (around 1 in
+ * 2^128) (cannot be NULL).
+ */
+SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_xonly_pubkey_tweak_add(
+ const secp256k1_context* ctx,
+ secp256k1_pubkey *output_pubkey,
+ const secp256k1_xonly_pubkey *internal_pubkey,
+ const unsigned char *tweak32
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
+
+/** Checks that a tweaked pubkey is the result of calling
+ * secp256k1_xonly_pubkey_tweak_add with internal_pubkey and tweak32.
+ *
+ * The tweaked pubkey is represented by its 32-byte x-only serialization and
+ * its pk_parity, which can both be obtained by converting the result of
+ * tweak_add to a secp256k1_xonly_pubkey.
+ *
+ * Note that this alone does _not_ verify that the tweaked pubkey is a
+ * commitment. If the tweak is not chosen in a specific way, the tweaked pubkey
+ * can easily be the result of a different internal_pubkey and tweak.
+ *
+ * Returns: 0 if the arguments are invalid or the tweaked pubkey is not the
+ * result of tweaking the internal_pubkey with tweak32. 1 otherwise.
+ * Args: ctx: pointer to a context object initialized for verification
+ * (cannot be NULL)
+ * In: tweaked_pubkey32: pointer to a serialized xonly_pubkey (cannot be NULL)
+ * tweaked_pk_parity: the parity of the tweaked pubkey (whose serialization
+ * is passed in as tweaked_pubkey32). This must match the
+ * pk_parity value that is returned when calling
+ * secp256k1_xonly_pubkey with the tweaked pubkey, or
+ * this function will fail.
+ * internal_pubkey: pointer to an x-only public key object to apply the
+ * tweak to (cannot be NULL)
+ * tweak32: pointer to a 32-byte tweak (cannot be NULL)
+ */
+SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_xonly_pubkey_tweak_add_check(
+ const secp256k1_context* ctx,
+ const unsigned char *tweaked_pubkey32,
+ int tweaked_pk_parity,
+ const secp256k1_xonly_pubkey *internal_pubkey,
+ const unsigned char *tweak32
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5);
+
+/** Compute the keypair for a secret key.
+ *
+ * Returns: 1: secret was valid, keypair is ready to use
+ * 0: secret was invalid, try again with a different secret
+ * Args: ctx: pointer to a context object, initialized for signing (cannot be NULL)
+ * Out: keypair: pointer to the created keypair (cannot be NULL)
+ * In: seckey: pointer to a 32-byte secret key (cannot be NULL)
+ */
+SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_keypair_create(
+ const secp256k1_context* ctx,
+ secp256k1_keypair *keypair,
+ const unsigned char *seckey
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
+
+/** Get the public key from a keypair.
+ *
+ * Returns: 0 if the arguments are invalid. 1 otherwise.
+ * Args: ctx: pointer to a context object (cannot be NULL)
+ * Out: pubkey: pointer to a pubkey object. If 1 is returned, it is set to
+ * the keypair public key. If not, it's set to an invalid value.
+ * (cannot be NULL)
+ * In: keypair: pointer to a keypair (cannot be NULL)
+ */
+SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_keypair_pub(
+ const secp256k1_context* ctx,
+ secp256k1_pubkey *pubkey,
+ const secp256k1_keypair *keypair
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
+
+/** Get the x-only public key from a keypair.
+ *
+ * This is the same as calling secp256k1_keypair_pub and then
+ * secp256k1_xonly_pubkey_from_pubkey.
+ *
+ * Returns: 0 if the arguments are invalid. 1 otherwise.
+ * Args: ctx: pointer to a context object (cannot be NULL)
+ * Out: pubkey: pointer to an xonly_pubkey object. If 1 is returned, it is set
+ * to the keypair public key after converting it to an
+ * xonly_pubkey. If not, it's set to an invalid value (cannot be
+ * NULL).
+ * pk_parity: pointer to an integer that will be set to the pk_parity
+ * argument of secp256k1_xonly_pubkey_from_pubkey (can be NULL).
+ * In: keypair: pointer to a keypair (cannot be NULL)
+ */
+SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_keypair_xonly_pub(
+ const secp256k1_context* ctx,
+ secp256k1_xonly_pubkey *pubkey,
+ int *pk_parity,
+ const secp256k1_keypair *keypair
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4);
+
+/** Tweak a keypair by adding tweak32 to the secret key and updating the public
+ * key accordingly.
+ *
+ * Calling this function and then secp256k1_keypair_pub results in the same
+ * public key as calling secp256k1_keypair_xonly_pub and then
+ * secp256k1_xonly_pubkey_tweak_add.
+ *
+ * Returns: 0 if the arguments are invalid or the resulting keypair would be
+ * invalid (only when the tweak is the negation of the keypair's
+ * secret key). 1 otherwise.
+ *
+ * Args: ctx: pointer to a context object initialized for verification
+ * (cannot be NULL)
+ * In/Out: keypair: pointer to a keypair to apply the tweak to. Will be set to
+ * an invalid value if this function returns 0 (cannot be
+ * NULL).
+ * In: tweak32: pointer to a 32-byte tweak. If the tweak is invalid according
+ * to secp256k1_ec_seckey_verify, this function returns 0. For
+ * uniformly random 32-byte arrays the chance of being invalid
+ * is negligible (around 1 in 2^128) (cannot be NULL).
+ */
+SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_keypair_xonly_tweak_add(
+ const secp256k1_context* ctx,
+ secp256k1_keypair *keypair,
+ const unsigned char *tweak32
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SECP256K1_EXTRAKEYS_H */
diff --git a/src/secp256k1/include/secp256k1_schnorrsig.h b/src/secp256k1/include/secp256k1_schnorrsig.h
new file mode 100644
index 0000000000..0150cd3395
--- /dev/null
+++ b/src/secp256k1/include/secp256k1_schnorrsig.h
@@ -0,0 +1,111 @@
+#ifndef SECP256K1_SCHNORRSIG_H
+#define SECP256K1_SCHNORRSIG_H
+
+#include "secp256k1.h"
+#include "secp256k1_extrakeys.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** This module implements a variant of Schnorr signatures compliant with
+ * Bitcoin Improvement Proposal 340 "Schnorr Signatures for secp256k1"
+ * (https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki).
+ */
+
+/** A pointer to a function to deterministically generate a nonce.
+ *
+ * Same as secp256k1_nonce function with the exception of accepting an
+ * additional pubkey argument and not requiring an attempt argument. The pubkey
+ * argument can protect signature schemes with key-prefixed challenge hash
+ * inputs against reusing the nonce when signing with the wrong precomputed
+ * pubkey.
+ *
+ * Returns: 1 if a nonce was successfully generated. 0 will cause signing to
+ * return an error.
+ * Out: nonce32: pointer to a 32-byte array to be filled by the function.
+ * In: msg32: the 32-byte message hash being verified (will not be NULL)
+ * key32: pointer to a 32-byte secret key (will not be NULL)
+ * xonly_pk32: the 32-byte serialized xonly pubkey corresponding to key32
+ * (will not be NULL)
+ * algo16: pointer to a 16-byte array describing the signature
+ * algorithm (will not be NULL).
+ * data: Arbitrary data pointer that is passed through.
+ *
+ * Except for test cases, this function should compute some cryptographic hash of
+ * the message, the key, the pubkey, the algorithm description, and data.
+ */
+typedef int (*secp256k1_nonce_function_hardened)(
+ unsigned char *nonce32,
+ const unsigned char *msg32,
+ const unsigned char *key32,
+ const unsigned char *xonly_pk32,
+ const unsigned char *algo16,
+ void *data
+);
+
+/** An implementation of the nonce generation function as defined in Bitcoin
+ * Improvement Proposal 340 "Schnorr Signatures for secp256k1"
+ * (https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki).
+ *
+ * If a data pointer is passed, it is assumed to be a pointer to 32 bytes of
+ * auxiliary random data as defined in BIP-340. If the data pointer is NULL,
+ * schnorrsig_sign does not produce BIP-340 compliant signatures. The algo16
+ * argument must be non-NULL, otherwise the function will fail and return 0.
+ * The hash will be tagged with algo16 after removing all terminating null
+ * bytes. Therefore, to create BIP-340 compliant signatures, algo16 must be set
+ * to "BIP0340/nonce\0\0\0"
+ */
+SECP256K1_API extern const secp256k1_nonce_function_hardened secp256k1_nonce_function_bip340;
+
+/** Create a Schnorr signature.
+ *
+ * Does _not_ strictly follow BIP-340 because it does not verify the resulting
+ * signature. Instead, you can manually use secp256k1_schnorrsig_verify and
+ * abort if it fails.
+ *
+ * Otherwise BIP-340 compliant if the noncefp argument is NULL or
+ * secp256k1_nonce_function_bip340 and the ndata argument is 32-byte auxiliary
+ * randomness.
+ *
+ * Returns 1 on success, 0 on failure.
+ * Args: ctx: pointer to a context object, initialized for signing (cannot be NULL)
+ * Out: sig64: pointer to a 64-byte array to store the serialized signature (cannot be NULL)
+ * In: msg32: the 32-byte message being signed (cannot be NULL)
+ * keypair: pointer to an initialized keypair (cannot be NULL)
+ * noncefp: pointer to a nonce generation function. If NULL, secp256k1_nonce_function_bip340 is used
+ * ndata: pointer to arbitrary data used by the nonce generation
+ * function (can be NULL). If it is non-NULL and
+ * secp256k1_nonce_function_bip340 is used, then ndata must be a
+ * pointer to 32-byte auxiliary randomness as per BIP-340.
+ */
+SECP256K1_API int secp256k1_schnorrsig_sign(
+ const secp256k1_context* ctx,
+ unsigned char *sig64,
+ const unsigned char *msg32,
+ const secp256k1_keypair *keypair,
+ secp256k1_nonce_function_hardened noncefp,
+ void *ndata
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
+
+/** Verify a Schnorr signature.
+ *
+ * Returns: 1: correct signature
+ * 0: incorrect signature
+ * Args: ctx: a secp256k1 context object, initialized for verification.
+ * In: sig64: pointer to the 64-byte signature to verify (cannot be NULL)
+ * msg32: the 32-byte message being verified (cannot be NULL)
+ * pubkey: pointer to an x-only public key to verify with (cannot be NULL)
+ */
+SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorrsig_verify(
+ const secp256k1_context* ctx,
+ const unsigned char *sig64,
+ const unsigned char *msg32,
+ const secp256k1_xonly_pubkey *pubkey
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SECP256K1_SCHNORRSIG_H */
diff --git a/src/secp256k1/sage/gen_exhaustive_groups.sage b/src/secp256k1/sage/gen_exhaustive_groups.sage
new file mode 100644
index 0000000000..3c3c984811
--- /dev/null
+++ b/src/secp256k1/sage/gen_exhaustive_groups.sage
@@ -0,0 +1,129 @@
+# Define field size and field
+P = 2^256 - 2^32 - 977
+F = GF(P)
+BETA = F(0x7ae96a2b657c07106e64479eac3434e99cf0497512f58995c1396c28719501ee)
+
+assert(BETA != F(1) and BETA^3 == F(1))
+
+orders_done = set()
+results = {}
+first = True
+for b in range(1, P):
+ # There are only 6 curves (up to isomorphism) of the form y^2=x^3+B. Stop once we have tried all.
+ if len(orders_done) == 6:
+ break
+
+ E = EllipticCurve(F, [0, b])
+ print("Analyzing curve y^2 = x^3 + %i" % b)
+ n = E.order()
+ # Skip curves with an order we've already tried
+ if n in orders_done:
+ print("- Isomorphic to earlier curve")
+ continue
+ orders_done.add(n)
+ # Skip curves isomorphic to the real secp256k1
+ if n.is_pseudoprime():
+ print(" - Isomorphic to secp256k1")
+ continue
+
+ print("- Finding subgroups")
+
+ # Find what prime subgroups exist
+ for f, _ in n.factor():
+ print("- Analyzing subgroup of order %i" % f)
+ # Skip subgroups of order >1000
+ if f < 4 or f > 1000:
+ print(" - Bad size")
+ continue
+
+ # Iterate over X coordinates until we find one that is on the curve, has order f,
+ # and for which curve isomorphism exists that maps it to X coordinate 1.
+ for x in range(1, P):
+ # Skip X coordinates not on the curve, and construct the full point otherwise.
+ if not E.is_x_coord(x):
+ continue
+ G = E.lift_x(F(x))
+
+ print(" - Analyzing (multiples of) point with X=%i" % x)
+
+ # Skip points whose order is not a multiple of f. Project the point to have
+ # order f otherwise.
+ if (G.order() % f):
+ print(" - Bad order")
+ continue
+ G = G * (G.order() // f)
+
+ # Find lambda for endomorphism. Skip if none can be found.
+ lam = None
+ for l in Integers(f)(1).nth_root(3, all=True):
+ if int(l)*G == E(BETA*G[0], G[1]):
+ lam = int(l)
+ break
+ if lam is None:
+ print(" - No endomorphism for this subgroup")
+ break
+
+ # Now look for an isomorphism of the curve that gives this point an X
+ # coordinate equal to 1.
+ # If (x,y) is on y^2 = x^3 + b, then (a^2*x, a^3*y) is on y^2 = x^3 + a^6*b.
+ # So look for m=a^2=1/x.
+ m = F(1)/G[0]
+ if not m.is_square():
+ print(" - No curve isomorphism maps it to a point with X=1")
+ continue
+ a = m.sqrt()
+ rb = a^6*b
+ RE = EllipticCurve(F, [0, rb])
+
+ # Use as generator twice the image of G under the above isormorphism.
+ # This means that generator*(1/2 mod f) will have X coordinate 1.
+ RG = RE(1, a^3*G[1]) * 2
+ # And even Y coordinate.
+ if int(RG[1]) % 2:
+ RG = -RG
+ assert(RG.order() == f)
+ assert(lam*RG == RE(BETA*RG[0], RG[1]))
+
+ # We have found curve RE:y^2=x^3+rb with generator RG of order f. Remember it
+ results[f] = {"b": rb, "G": RG, "lambda": lam}
+ print(" - Found solution")
+ break
+
+ print("")
+
+print("")
+print("")
+print("/* To be put in src/group_impl.h: */")
+first = True
+for f in sorted(results.keys()):
+ b = results[f]["b"]
+ G = results[f]["G"]
+ print("# %s EXHAUSTIVE_TEST_ORDER == %i" % ("if" if first else "elif", f))
+ first = False
+ print("static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(")
+ print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
+ print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
+ print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
+ print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
+ print(");")
+ print("static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(")
+ print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
+ print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
+ print(");")
+print("# else")
+print("# error No known generator for the specified exhaustive test group order.")
+print("# endif")
+
+print("")
+print("")
+print("/* To be put in src/scalar_impl.h: */")
+first = True
+for f in sorted(results.keys()):
+ lam = results[f]["lambda"]
+ print("# %s EXHAUSTIVE_TEST_ORDER == %i" % ("if" if first else "elif", f))
+ first = False
+ print("# define EXHAUSTIVE_TEST_LAMBDA %i" % lam)
+print("# else")
+print("# error No known lambda for the specified exhaustive test group order.")
+print("# endif")
+print("")
diff --git a/src/secp256k1/src/assumptions.h b/src/secp256k1/src/assumptions.h
new file mode 100644
index 0000000000..77204de2b8
--- /dev/null
+++ b/src/secp256k1/src/assumptions.h
@@ -0,0 +1,80 @@
+/**********************************************************************
+ * Copyright (c) 2020 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_ASSUMPTIONS_H
+#define SECP256K1_ASSUMPTIONS_H
+
+#include <limits.h>
+
+#include "util.h"
+
+/* This library, like most software, relies on a number of compiler implementation defined (but not undefined)
+ behaviours. Although the behaviours we require are essentially universal we test them specifically here to
+ reduce the odds of experiencing an unwelcome surprise.
+*/
+
+struct secp256k1_assumption_checker {
+ /* This uses a trick to implement a static assertion in C89: a type with an array of negative size is not
+ allowed. */
+ int dummy_array[(
+ /* Bytes are 8 bits. */
+ (CHAR_BIT == 8) &&
+
+ /* No integer promotion for uint32_t. This ensures that we can multiply uintXX_t values where XX >= 32
+ without signed overflow, which would be undefined behaviour. */
+ (UINT_MAX <= UINT32_MAX) &&
+
+ /* Conversions from unsigned to signed outside of the bounds of the signed type are
+ implementation-defined. Verify that they function as reinterpreting the lower
+ bits of the input in two's complement notation. Do this for conversions:
+ - from uint(N)_t to int(N)_t with negative result
+ - from uint(2N)_t to int(N)_t with negative result
+ - from int(2N)_t to int(N)_t with negative result
+ - from int(2N)_t to int(N)_t with positive result */
+
+ /* To int8_t. */
+ ((int8_t)(uint8_t)0xAB == (int8_t)-(int8_t)0x55) &&
+ ((int8_t)(uint16_t)0xABCD == (int8_t)-(int8_t)0x33) &&
+ ((int8_t)(int16_t)(uint16_t)0xCDEF == (int8_t)(uint8_t)0xEF) &&
+ ((int8_t)(int16_t)(uint16_t)0x9234 == (int8_t)(uint8_t)0x34) &&
+
+ /* To int16_t. */
+ ((int16_t)(uint16_t)0xBCDE == (int16_t)-(int16_t)0x4322) &&
+ ((int16_t)(uint32_t)0xA1B2C3D4 == (int16_t)-(int16_t)0x3C2C) &&
+ ((int16_t)(int32_t)(uint32_t)0xC1D2E3F4 == (int16_t)(uint16_t)0xE3F4) &&
+ ((int16_t)(int32_t)(uint32_t)0x92345678 == (int16_t)(uint16_t)0x5678) &&
+
+ /* To int32_t. */
+ ((int32_t)(uint32_t)0xB2C3D4E5 == (int32_t)-(int32_t)0x4D3C2B1B) &&
+ ((int32_t)(uint64_t)0xA123B456C789D012ULL == (int32_t)-(int32_t)0x38762FEE) &&
+ ((int32_t)(int64_t)(uint64_t)0xC1D2E3F4A5B6C7D8ULL == (int32_t)(uint32_t)0xA5B6C7D8) &&
+ ((int32_t)(int64_t)(uint64_t)0xABCDEF0123456789ULL == (int32_t)(uint32_t)0x23456789) &&
+
+ /* To int64_t. */
+ ((int64_t)(uint64_t)0xB123C456D789E012ULL == (int64_t)-(int64_t)0x4EDC3BA928761FEEULL) &&
+#if defined(SECP256K1_WIDEMUL_INT128)
+ ((int64_t)(((uint128_t)0xA1234567B8901234ULL << 64) + 0xC5678901D2345678ULL) == (int64_t)-(int64_t)0x3A9876FE2DCBA988ULL) &&
+ (((int64_t)(int128_t)(((uint128_t)0xB1C2D3E4F5A6B7C8ULL << 64) + 0xD9E0F1A2B3C4D5E6ULL)) == (int64_t)(uint64_t)0xD9E0F1A2B3C4D5E6ULL) &&
+ (((int64_t)(int128_t)(((uint128_t)0xABCDEF0123456789ULL << 64) + 0x0123456789ABCDEFULL)) == (int64_t)(uint64_t)0x0123456789ABCDEFULL) &&
+
+ /* To int128_t. */
+ ((int128_t)(((uint128_t)0xB1234567C8901234ULL << 64) + 0xD5678901E2345678ULL) == (int128_t)(-(int128_t)0x8E1648B3F50E80DCULL * 0x8E1648B3F50E80DDULL + 0x5EA688D5482F9464ULL)) &&
+#endif
+
+ /* Right shift on negative signed values is implementation defined. Verify that it
+ acts as a right shift in two's complement with sign extension (i.e duplicating
+ the top bit into newly added bits). */
+ ((((int8_t)0xE8) >> 2) == (int8_t)(uint8_t)0xFA) &&
+ ((((int16_t)0xE9AC) >> 4) == (int16_t)(uint16_t)0xFE9A) &&
+ ((((int32_t)0x937C918A) >> 9) == (int32_t)(uint32_t)0xFFC9BE48) &&
+ ((((int64_t)0xA8B72231DF9CF4B9ULL) >> 19) == (int64_t)(uint64_t)0xFFFFF516E4463BF3ULL) &&
+#if defined(SECP256K1_WIDEMUL_INT128)
+ ((((int128_t)(((uint128_t)0xCD833A65684A0DBCULL << 64) + 0xB349312F71EA7637ULL)) >> 39) == (int128_t)(((uint128_t)0xFFFFFFFFFF9B0674ULL << 64) + 0xCAD0941B79669262ULL)) &&
+#endif
+ 1) * 2 - 1];
+};
+
+#endif /* SECP256K1_ASSUMPTIONS_H */
diff --git a/src/secp256k1/src/basic-config.h b/src/secp256k1/src/basic-config.h
index e9be39d4ca..b0d82e89b4 100644
--- a/src/secp256k1/src/basic-config.h
+++ b/src/secp256k1/src/basic-config.h
@@ -11,26 +11,22 @@
#undef USE_ASM_X86_64
#undef USE_ECMULT_STATIC_PRECOMPUTATION
-#undef USE_ENDOMORPHISM
#undef USE_EXTERNAL_ASM
#undef USE_EXTERNAL_DEFAULT_CALLBACKS
-#undef USE_FIELD_10X26
-#undef USE_FIELD_5X52
#undef USE_FIELD_INV_BUILTIN
#undef USE_FIELD_INV_NUM
#undef USE_NUM_GMP
#undef USE_NUM_NONE
-#undef USE_SCALAR_4X64
-#undef USE_SCALAR_8X32
#undef USE_SCALAR_INV_BUILTIN
#undef USE_SCALAR_INV_NUM
+#undef USE_FORCE_WIDEMUL_INT64
+#undef USE_FORCE_WIDEMUL_INT128
#undef ECMULT_WINDOW_SIZE
#define USE_NUM_NONE 1
#define USE_FIELD_INV_BUILTIN 1
#define USE_SCALAR_INV_BUILTIN 1
-#define USE_FIELD_10X26 1
-#define USE_SCALAR_8X32 1
+#define USE_WIDEMUL_64 1
#define ECMULT_WINDOW_SIZE 15
#endif /* USE_BASIC_CONFIG */
diff --git a/src/secp256k1/src/bench_internal.c b/src/secp256k1/src/bench_internal.c
index 20759127d3..5f2b7a9759 100644
--- a/src/secp256k1/src/bench_internal.c
+++ b/src/secp256k1/src/bench_internal.c
@@ -7,6 +7,7 @@
#include "include/secp256k1.h"
+#include "assumptions.h"
#include "util.h"
#include "hash_impl.h"
#include "num_impl.h"
@@ -19,10 +20,10 @@
#include "secp256k1.c"
typedef struct {
- secp256k1_scalar scalar_x, scalar_y;
- secp256k1_fe fe_x, fe_y;
- secp256k1_ge ge_x, ge_y;
- secp256k1_gej gej_x, gej_y;
+ secp256k1_scalar scalar[2];
+ secp256k1_fe fe[4];
+ secp256k1_ge ge[2];
+ secp256k1_gej gej[2];
unsigned char data[64];
int wnaf[256];
} bench_inv;
@@ -30,30 +31,53 @@ typedef struct {
void bench_setup(void* arg) {
bench_inv *data = (bench_inv*)arg;
- static const unsigned char init_x[32] = {
- 0x02, 0x03, 0x05, 0x07, 0x0b, 0x0d, 0x11, 0x13,
- 0x17, 0x1d, 0x1f, 0x25, 0x29, 0x2b, 0x2f, 0x35,
- 0x3b, 0x3d, 0x43, 0x47, 0x49, 0x4f, 0x53, 0x59,
- 0x61, 0x65, 0x67, 0x6b, 0x6d, 0x71, 0x7f, 0x83
+ static const unsigned char init[4][32] = {
+ /* Initializer for scalar[0], fe[0], first half of data, the X coordinate of ge[0],
+ and the (implied affine) X coordinate of gej[0]. */
+ {
+ 0x02, 0x03, 0x05, 0x07, 0x0b, 0x0d, 0x11, 0x13,
+ 0x17, 0x1d, 0x1f, 0x25, 0x29, 0x2b, 0x2f, 0x35,
+ 0x3b, 0x3d, 0x43, 0x47, 0x49, 0x4f, 0x53, 0x59,
+ 0x61, 0x65, 0x67, 0x6b, 0x6d, 0x71, 0x7f, 0x83
+ },
+ /* Initializer for scalar[1], fe[1], first half of data, the X coordinate of ge[1],
+ and the (implied affine) X coordinate of gej[1]. */
+ {
+ 0x82, 0x83, 0x85, 0x87, 0x8b, 0x8d, 0x81, 0x83,
+ 0x97, 0xad, 0xaf, 0xb5, 0xb9, 0xbb, 0xbf, 0xc5,
+ 0xdb, 0xdd, 0xe3, 0xe7, 0xe9, 0xef, 0xf3, 0xf9,
+ 0x11, 0x15, 0x17, 0x1b, 0x1d, 0xb1, 0xbf, 0xd3
+ },
+ /* Initializer for fe[2] and the Z coordinate of gej[0]. */
+ {
+ 0x3d, 0x2d, 0xef, 0xf4, 0x25, 0x98, 0x4f, 0x5d,
+ 0xe2, 0xca, 0x5f, 0x41, 0x3f, 0x3f, 0xce, 0x44,
+ 0xaa, 0x2c, 0x53, 0x8a, 0xc6, 0x59, 0x1f, 0x38,
+ 0x38, 0x23, 0xe4, 0x11, 0x27, 0xc6, 0xa0, 0xe7
+ },
+ /* Initializer for fe[3] and the Z coordinate of gej[1]. */
+ {
+ 0xbd, 0x21, 0xa5, 0xe1, 0x13, 0x50, 0x73, 0x2e,
+ 0x52, 0x98, 0xc8, 0x9e, 0xab, 0x00, 0xa2, 0x68,
+ 0x43, 0xf5, 0xd7, 0x49, 0x80, 0x72, 0xa7, 0xf3,
+ 0xd7, 0x60, 0xe6, 0xab, 0x90, 0x92, 0xdf, 0xc5
+ }
};
- static const unsigned char init_y[32] = {
- 0x82, 0x83, 0x85, 0x87, 0x8b, 0x8d, 0x81, 0x83,
- 0x97, 0xad, 0xaf, 0xb5, 0xb9, 0xbb, 0xbf, 0xc5,
- 0xdb, 0xdd, 0xe3, 0xe7, 0xe9, 0xef, 0xf3, 0xf9,
- 0x11, 0x15, 0x17, 0x1b, 0x1d, 0xb1, 0xbf, 0xd3
- };
-
- secp256k1_scalar_set_b32(&data->scalar_x, init_x, NULL);
- secp256k1_scalar_set_b32(&data->scalar_y, init_y, NULL);
- secp256k1_fe_set_b32(&data->fe_x, init_x);
- secp256k1_fe_set_b32(&data->fe_y, init_y);
- CHECK(secp256k1_ge_set_xo_var(&data->ge_x, &data->fe_x, 0));
- CHECK(secp256k1_ge_set_xo_var(&data->ge_y, &data->fe_y, 1));
- secp256k1_gej_set_ge(&data->gej_x, &data->ge_x);
- secp256k1_gej_set_ge(&data->gej_y, &data->ge_y);
- memcpy(data->data, init_x, 32);
- memcpy(data->data + 32, init_y, 32);
+ secp256k1_scalar_set_b32(&data->scalar[0], init[0], NULL);
+ secp256k1_scalar_set_b32(&data->scalar[1], init[1], NULL);
+ secp256k1_fe_set_b32(&data->fe[0], init[0]);
+ secp256k1_fe_set_b32(&data->fe[1], init[1]);
+ secp256k1_fe_set_b32(&data->fe[2], init[2]);
+ secp256k1_fe_set_b32(&data->fe[3], init[3]);
+ CHECK(secp256k1_ge_set_xo_var(&data->ge[0], &data->fe[0], 0));
+ CHECK(secp256k1_ge_set_xo_var(&data->ge[1], &data->fe[1], 1));
+ secp256k1_gej_set_ge(&data->gej[0], &data->ge[0]);
+ secp256k1_gej_rescale(&data->gej[0], &data->fe[2]);
+ secp256k1_gej_set_ge(&data->gej[1], &data->ge[1]);
+ secp256k1_gej_rescale(&data->gej[1], &data->fe[3]);
+ memcpy(data->data, init[0], 32);
+ memcpy(data->data + 32, init[1], 32);
}
void bench_scalar_add(void* arg, int iters) {
@@ -61,7 +85,7 @@ void bench_scalar_add(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- j += secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
+ j += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
CHECK(j <= iters);
}
@@ -71,7 +95,7 @@ void bench_scalar_negate(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_scalar_negate(&data->scalar_x, &data->scalar_x);
+ secp256k1_scalar_negate(&data->scalar[0], &data->scalar[0]);
}
}
@@ -80,7 +104,7 @@ void bench_scalar_sqr(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_scalar_sqr(&data->scalar_x, &data->scalar_x);
+ secp256k1_scalar_sqr(&data->scalar[0], &data->scalar[0]);
}
}
@@ -89,30 +113,28 @@ void bench_scalar_mul(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_scalar_mul(&data->scalar_x, &data->scalar_x, &data->scalar_y);
+ secp256k1_scalar_mul(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
}
-#ifdef USE_ENDOMORPHISM
void bench_scalar_split(void* arg, int iters) {
int i, j = 0;
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_scalar_split_lambda(&data->scalar_x, &data->scalar_y, &data->scalar_x);
- j += secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
+ secp256k1_scalar_split_lambda(&data->scalar[0], &data->scalar[1], &data->scalar[0]);
+ j += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
CHECK(j <= iters);
}
-#endif
void bench_scalar_inverse(void* arg, int iters) {
int i, j = 0;
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_scalar_inverse(&data->scalar_x, &data->scalar_x);
- j += secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
+ secp256k1_scalar_inverse(&data->scalar[0], &data->scalar[0]);
+ j += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
CHECK(j <= iters);
}
@@ -122,8 +144,8 @@ void bench_scalar_inverse_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_scalar_inverse_var(&data->scalar_x, &data->scalar_x);
- j += secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
+ secp256k1_scalar_inverse_var(&data->scalar[0], &data->scalar[0]);
+ j += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
CHECK(j <= iters);
}
@@ -133,7 +155,7 @@ void bench_field_normalize(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_fe_normalize(&data->fe_x);
+ secp256k1_fe_normalize(&data->fe[0]);
}
}
@@ -142,7 +164,7 @@ void bench_field_normalize_weak(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_fe_normalize_weak(&data->fe_x);
+ secp256k1_fe_normalize_weak(&data->fe[0]);
}
}
@@ -151,7 +173,7 @@ void bench_field_mul(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_fe_mul(&data->fe_x, &data->fe_x, &data->fe_y);
+ secp256k1_fe_mul(&data->fe[0], &data->fe[0], &data->fe[1]);
}
}
@@ -160,7 +182,7 @@ void bench_field_sqr(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_fe_sqr(&data->fe_x, &data->fe_x);
+ secp256k1_fe_sqr(&data->fe[0], &data->fe[0]);
}
}
@@ -169,8 +191,8 @@ void bench_field_inverse(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_fe_inv(&data->fe_x, &data->fe_x);
- secp256k1_fe_add(&data->fe_x, &data->fe_y);
+ secp256k1_fe_inv(&data->fe[0], &data->fe[0]);
+ secp256k1_fe_add(&data->fe[0], &data->fe[1]);
}
}
@@ -179,8 +201,8 @@ void bench_field_inverse_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_fe_inv_var(&data->fe_x, &data->fe_x);
- secp256k1_fe_add(&data->fe_x, &data->fe_y);
+ secp256k1_fe_inv_var(&data->fe[0], &data->fe[0]);
+ secp256k1_fe_add(&data->fe[0], &data->fe[1]);
}
}
@@ -190,9 +212,9 @@ void bench_field_sqrt(void* arg, int iters) {
secp256k1_fe t;
for (i = 0; i < iters; i++) {
- t = data->fe_x;
- j += secp256k1_fe_sqrt(&data->fe_x, &t);
- secp256k1_fe_add(&data->fe_x, &data->fe_y);
+ t = data->fe[0];
+ j += secp256k1_fe_sqrt(&data->fe[0], &t);
+ secp256k1_fe_add(&data->fe[0], &data->fe[1]);
}
CHECK(j <= iters);
}
@@ -202,7 +224,7 @@ void bench_group_double_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_gej_double_var(&data->gej_x, &data->gej_x, NULL);
+ secp256k1_gej_double_var(&data->gej[0], &data->gej[0], NULL);
}
}
@@ -211,7 +233,7 @@ void bench_group_add_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_gej_add_var(&data->gej_x, &data->gej_x, &data->gej_y, NULL);
+ secp256k1_gej_add_var(&data->gej[0], &data->gej[0], &data->gej[1], NULL);
}
}
@@ -220,7 +242,7 @@ void bench_group_add_affine(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_gej_add_ge(&data->gej_x, &data->gej_x, &data->ge_y);
+ secp256k1_gej_add_ge(&data->gej[0], &data->gej[0], &data->ge[1]);
}
}
@@ -229,7 +251,7 @@ void bench_group_add_affine_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- secp256k1_gej_add_ge_var(&data->gej_x, &data->gej_x, &data->ge_y, NULL);
+ secp256k1_gej_add_ge_var(&data->gej[0], &data->gej[0], &data->ge[1], NULL);
}
}
@@ -238,9 +260,37 @@ void bench_group_jacobi_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- j += secp256k1_gej_has_quad_y_var(&data->gej_x);
+ j += secp256k1_gej_has_quad_y_var(&data->gej[0]);
+ /* Vary the Y and Z coordinates of the input (the X coordinate doesn't matter to
+ secp256k1_gej_has_quad_y_var). Note that the resulting coordinates will
+ generally not correspond to a point on the curve, but this is not a problem
+ for the code being benchmarked here. Adding and normalizing have less
+ overhead than EC operations (which could guarantee the point remains on the
+ curve). */
+ secp256k1_fe_add(&data->gej[0].y, &data->fe[1]);
+ secp256k1_fe_add(&data->gej[0].z, &data->fe[2]);
+ secp256k1_fe_normalize_var(&data->gej[0].y);
+ secp256k1_fe_normalize_var(&data->gej[0].z);
+ }
+ CHECK(j <= iters);
+}
+
+void bench_group_to_affine_var(void* arg, int iters) {
+ int i;
+ bench_inv *data = (bench_inv*)arg;
+
+ for (i = 0; i < iters; ++i) {
+ secp256k1_ge_set_gej_var(&data->ge[1], &data->gej[0]);
+ /* Use the output affine X/Y coordinates to vary the input X/Y/Z coordinates.
+ Similar to bench_group_jacobi_var, this approach does not result in
+ coordinates of points on the curve. */
+ secp256k1_fe_add(&data->gej[0].x, &data->ge[1].y);
+ secp256k1_fe_add(&data->gej[0].y, &data->fe[2]);
+ secp256k1_fe_add(&data->gej[0].z, &data->ge[1].x);
+ secp256k1_fe_normalize_var(&data->gej[0].x);
+ secp256k1_fe_normalize_var(&data->gej[0].y);
+ secp256k1_fe_normalize_var(&data->gej[0].z);
}
- CHECK(j == iters);
}
void bench_ecmult_wnaf(void* arg, int iters) {
@@ -248,8 +298,8 @@ void bench_ecmult_wnaf(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- bits += secp256k1_ecmult_wnaf(data->wnaf, 256, &data->scalar_x, WINDOW_A);
- overflow += secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
+ bits += secp256k1_ecmult_wnaf(data->wnaf, 256, &data->scalar[0], WINDOW_A);
+ overflow += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
CHECK(overflow >= 0);
CHECK(bits <= 256*iters);
@@ -260,8 +310,8 @@ void bench_wnaf_const(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
- bits += secp256k1_wnaf_const(data->wnaf, &data->scalar_x, WINDOW_A, 256);
- overflow += secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
+ bits += secp256k1_wnaf_const(data->wnaf, &data->scalar[0], WINDOW_A, 256);
+ overflow += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
CHECK(overflow >= 0);
CHECK(bits <= 256*iters);
@@ -323,14 +373,15 @@ void bench_context_sign(void* arg, int iters) {
void bench_num_jacobi(void* arg, int iters) {
int i, j = 0;
bench_inv *data = (bench_inv*)arg;
- secp256k1_num nx, norder;
+ secp256k1_num nx, na, norder;
- secp256k1_scalar_get_num(&nx, &data->scalar_x);
+ secp256k1_scalar_get_num(&nx, &data->scalar[0]);
secp256k1_scalar_order_get_num(&norder);
- secp256k1_scalar_get_num(&norder, &data->scalar_y);
+ secp256k1_scalar_get_num(&na, &data->scalar[1]);
for (i = 0; i < iters; i++) {
j += secp256k1_num_jacobi(&nx, &norder);
+ secp256k1_num_add(&nx, &nx, &na);
}
CHECK(j <= iters);
}
@@ -344,9 +395,7 @@ int main(int argc, char **argv) {
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, iters*100);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "sqr")) run_benchmark("scalar_sqr", bench_scalar_sqr, bench_setup, NULL, &data, 10, iters*10);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, iters*10);
-#ifdef USE_ENDOMORPHISM
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "split")) run_benchmark("scalar_split", bench_scalar_split, bench_setup, NULL, &data, 10, iters);
-#endif
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, 2000);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, 2000);
@@ -363,6 +412,7 @@ int main(int argc, char **argv) {
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, iters*10);
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, iters*10);
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "jacobi")) run_benchmark("group_jacobi_var", bench_group_jacobi_var, bench_setup, NULL, &data, 10, iters);
+ if (have_flag(argc, argv, "group") || have_flag(argc, argv, "to_affine")) run_benchmark("group_to_affine_var", bench_group_to_affine_var, bench_setup, NULL, &data, 10, iters);
if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, iters);
if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("ecmult_wnaf", bench_ecmult_wnaf, bench_setup, NULL, &data, 10, iters);
diff --git a/src/secp256k1/src/bench_schnorrsig.c b/src/secp256k1/src/bench_schnorrsig.c
new file mode 100644
index 0000000000..315f5af28e
--- /dev/null
+++ b/src/secp256k1/src/bench_schnorrsig.c
@@ -0,0 +1,102 @@
+/**********************************************************************
+ * Copyright (c) 2018-2020 Andrew Poelstra, Jonas Nick *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#include <string.h>
+#include <stdlib.h>
+
+
+#include "include/secp256k1.h"
+#include "include/secp256k1_schnorrsig.h"
+#include "util.h"
+#include "bench.h"
+
+typedef struct {
+ secp256k1_context *ctx;
+ int n;
+
+ const secp256k1_keypair **keypairs;
+ const unsigned char **pk;
+ const unsigned char **sigs;
+ const unsigned char **msgs;
+} bench_schnorrsig_data;
+
+void bench_schnorrsig_sign(void* arg, int iters) {
+ bench_schnorrsig_data *data = (bench_schnorrsig_data *)arg;
+ int i;
+ unsigned char msg[32] = "benchmarkexamplemessagetemplate";
+ unsigned char sig[64];
+
+ for (i = 0; i < iters; i++) {
+ msg[0] = i;
+ msg[1] = i >> 8;
+ CHECK(secp256k1_schnorrsig_sign(data->ctx, sig, msg, data->keypairs[i], NULL, NULL));
+ }
+}
+
+void bench_schnorrsig_verify(void* arg, int iters) {
+ bench_schnorrsig_data *data = (bench_schnorrsig_data *)arg;
+ int i;
+
+ for (i = 0; i < iters; i++) {
+ secp256k1_xonly_pubkey pk;
+ CHECK(secp256k1_xonly_pubkey_parse(data->ctx, &pk, data->pk[i]) == 1);
+ CHECK(secp256k1_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], &pk));
+ }
+}
+
+int main(void) {
+ int i;
+ bench_schnorrsig_data data;
+ int iters = get_iters(10000);
+
+ data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY | SECP256K1_CONTEXT_SIGN);
+ data.keypairs = (const secp256k1_keypair **)malloc(iters * sizeof(secp256k1_keypair *));
+ data.pk = (const unsigned char **)malloc(iters * sizeof(unsigned char *));
+ data.msgs = (const unsigned char **)malloc(iters * sizeof(unsigned char *));
+ data.sigs = (const unsigned char **)malloc(iters * sizeof(unsigned char *));
+
+ for (i = 0; i < iters; i++) {
+ unsigned char sk[32];
+ unsigned char *msg = (unsigned char *)malloc(32);
+ unsigned char *sig = (unsigned char *)malloc(64);
+ secp256k1_keypair *keypair = (secp256k1_keypair *)malloc(sizeof(*keypair));
+ unsigned char *pk_char = (unsigned char *)malloc(32);
+ secp256k1_xonly_pubkey pk;
+ msg[0] = sk[0] = i;
+ msg[1] = sk[1] = i >> 8;
+ msg[2] = sk[2] = i >> 16;
+ msg[3] = sk[3] = i >> 24;
+ memset(&msg[4], 'm', 28);
+ memset(&sk[4], 's', 28);
+
+ data.keypairs[i] = keypair;
+ data.pk[i] = pk_char;
+ data.msgs[i] = msg;
+ data.sigs[i] = sig;
+
+ CHECK(secp256k1_keypair_create(data.ctx, keypair, sk));
+ CHECK(secp256k1_schnorrsig_sign(data.ctx, sig, msg, keypair, NULL, NULL));
+ CHECK(secp256k1_keypair_xonly_pub(data.ctx, &pk, NULL, keypair));
+ CHECK(secp256k1_xonly_pubkey_serialize(data.ctx, pk_char, &pk) == 1);
+ }
+
+ run_benchmark("schnorrsig_sign", bench_schnorrsig_sign, NULL, NULL, (void *) &data, 10, iters);
+ run_benchmark("schnorrsig_verify", bench_schnorrsig_verify, NULL, NULL, (void *) &data, 10, iters);
+
+ for (i = 0; i < iters; i++) {
+ free((void *)data.keypairs[i]);
+ free((void *)data.pk[i]);
+ free((void *)data.msgs[i]);
+ free((void *)data.sigs[i]);
+ }
+ free(data.keypairs);
+ free(data.pk);
+ free(data.msgs);
+ free(data.sigs);
+
+ secp256k1_context_destroy(data.ctx);
+ return 0;
+}
diff --git a/src/secp256k1/src/ecmult.h b/src/secp256k1/src/ecmult.h
index c9b198239d..09e8146414 100644
--- a/src/secp256k1/src/ecmult.h
+++ b/src/secp256k1/src/ecmult.h
@@ -15,9 +15,7 @@
typedef struct {
/* For accelerating the computation of a*P + b*G: */
secp256k1_ge_storage (*pre_g)[]; /* odd multiples of the generator */
-#ifdef USE_ENDOMORPHISM
secp256k1_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */
-#endif
} secp256k1_ecmult_context;
static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
diff --git a/src/secp256k1/src/ecmult_const_impl.h b/src/secp256k1/src/ecmult_const_impl.h
index 6d6d354aa4..bb9511108b 100644
--- a/src/secp256k1/src/ecmult_const_impl.h
+++ b/src/secp256k1/src/ecmult_const_impl.h
@@ -105,16 +105,22 @@ static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w
/* 4 */
u_last = secp256k1_scalar_shr_int(&s, w);
do {
- int sign;
int even;
/* 4.1 4.4 */
u = secp256k1_scalar_shr_int(&s, w);
/* 4.2 */
even = ((u & 1) == 0);
- sign = 2 * (u_last > 0) - 1;
- u += sign * even;
- u_last -= sign * even * (1 << w);
+ /* In contrast to the original algorithm, u_last is always > 0 and
+ * therefore we do not need to check its sign. In particular, it's easy
+ * to see that u_last is never < 0 because u is never < 0. Moreover,
+ * u_last is never = 0 because u is never even after a loop
+ * iteration. The same holds analogously for the initial value of
+ * u_last (in the first loop iteration). */
+ VERIFY_CHECK(u_last > 0);
+ VERIFY_CHECK((u_last & 1) == 1);
+ u += even;
+ u_last -= even * (1 << w);
/* 4.3, adapted for global sign change */
wnaf[word++] = u_last * global_sign;
@@ -134,19 +140,16 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
secp256k1_fe Z;
int skew_1;
-#ifdef USE_ENDOMORPHISM
secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)];
int skew_lam;
secp256k1_scalar q_1, q_lam;
-#endif
int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
int i;
/* build wnaf representation for q. */
int rsize = size;
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
rsize = 128;
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
@@ -154,12 +157,9 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
} else
-#endif
{
skew_1 = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size);
-#ifdef USE_ENDOMORPHISM
skew_lam = 0;
-#endif
}
/* Calculate odd multiples of a.
@@ -173,14 +173,12 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_fe_normalize_weak(&pre_a[i].y);
}
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
}
}
-#endif
/* first loop iteration (separated out so we can directly set r, rather
* than having it start at infinity, get doubled several times, then have
@@ -189,34 +187,30 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
secp256k1_gej_set_ge(r, &tmpa);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A);
secp256k1_gej_add_ge(r, r, &tmpa);
}
-#endif
/* remaining loop iterations */
for (i = WNAF_SIZE_BITS(rsize, WINDOW_A - 1) - 1; i >= 0; i--) {
int n;
int j;
for (j = 0; j < WINDOW_A - 1; ++j) {
- secp256k1_gej_double_nonzero(r, r);
+ secp256k1_gej_double(r, r);
}
n = wnaf_1[i];
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
VERIFY_CHECK(n != 0);
secp256k1_gej_add_ge(r, r, &tmpa);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
n = wnaf_lam[i];
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
VERIFY_CHECK(n != 0);
secp256k1_gej_add_ge(r, r, &tmpa);
}
-#endif
}
secp256k1_fe_mul(&r->z, &r->z, &Z);
@@ -225,43 +219,35 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
/* Correct for wNAF skew */
secp256k1_ge correction = *a;
secp256k1_ge_storage correction_1_stor;
-#ifdef USE_ENDOMORPHISM
secp256k1_ge_storage correction_lam_stor;
-#endif
secp256k1_ge_storage a2_stor;
secp256k1_gej tmpj;
secp256k1_gej_set_ge(&tmpj, &correction);
secp256k1_gej_double_var(&tmpj, &tmpj, NULL);
secp256k1_ge_set_gej(&correction, &tmpj);
secp256k1_ge_to_storage(&correction_1_stor, a);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_to_storage(&correction_lam_stor, a);
}
-#endif
secp256k1_ge_to_storage(&a2_stor, &correction);
/* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */
secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2);
}
-#endif
/* Apply the correction */
secp256k1_ge_from_storage(&correction, &correction_1_stor);
secp256k1_ge_neg(&correction, &correction);
secp256k1_gej_add_ge(r, r, &correction);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_from_storage(&correction, &correction_lam_stor);
secp256k1_ge_neg(&correction, &correction);
secp256k1_ge_mul_lambda(&correction, &correction);
secp256k1_gej_add_ge(r, r, &correction);
}
-#endif
}
}
diff --git a/src/secp256k1/src/ecmult_impl.h b/src/secp256k1/src/ecmult_impl.h
index f03fa9469d..057a69cf73 100644
--- a/src/secp256k1/src/ecmult_impl.h
+++ b/src/secp256k1/src/ecmult_impl.h
@@ -38,8 +38,8 @@
* (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes,
* where sizeof(secp256k1_ge_storage) is typically 64 bytes but can
* be larger due to platform-specific padding and alignment.
- * If the endomorphism optimization is enabled (USE_ENDOMORMPHSIM)
- * two tables of this size are used instead of only one.
+ * Two tables of this size are used (due to the endomorphism
+ * optimization).
*/
# define WINDOW_G ECMULT_WINDOW_SIZE
#endif
@@ -59,11 +59,7 @@
# error Set ECMULT_WINDOW_SIZE to an integer in range [2..24].
#endif
-#ifdef USE_ENDOMORPHISM
- #define WNAF_BITS 128
-#else
- #define WNAF_BITS 256
-#endif
+#define WNAF_BITS 128
#define WNAF_SIZE_BITS(bits, w) (((bits) + (w) - 1) / (w))
#define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w)
@@ -77,17 +73,9 @@
#define PIPPENGER_MAX_BUCKET_WINDOW 12
/* Minimum number of points for which pippenger_wnaf is faster than strauss wnaf */
-#ifdef USE_ENDOMORPHISM
- #define ECMULT_PIPPENGER_THRESHOLD 88
-#else
- #define ECMULT_PIPPENGER_THRESHOLD 160
-#endif
+#define ECMULT_PIPPENGER_THRESHOLD 88
-#ifdef USE_ENDOMORPHISM
- #define ECMULT_MAX_POINTS_PER_BATCH 5000000
-#else
- #define ECMULT_MAX_POINTS_PER_BATCH 10000000
-#endif
+#define ECMULT_MAX_POINTS_PER_BATCH 5000000
/** Fill a table 'prej' with precomputed odd multiples of a. Prej will contain
* the values [1*a,3*a,...,(2*n-1)*a], so it space for n values. zr[0] will
@@ -313,16 +301,12 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp25
static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE =
ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
-#ifdef USE_ENDOMORPHISM
+ ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
-#endif
;
static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) {
ctx->pre_g = NULL;
-#ifdef USE_ENDOMORPHISM
ctx->pre_g_128 = NULL;
-#endif
}
static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc) {
@@ -347,7 +331,6 @@ static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void *
/* precompute the tables with odd multiples */
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj);
-#ifdef USE_ENDOMORPHISM
{
secp256k1_gej g_128j;
int i;
@@ -364,7 +347,6 @@ static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void *
}
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j);
}
-#endif
}
static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src) {
@@ -372,11 +354,9 @@ static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *d
/* We cast to void* first to suppress a -Wcast-align warning. */
dst->pre_g = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src));
}
-#ifdef USE_ENDOMORPHISM
if (src->pre_g_128 != NULL) {
dst->pre_g_128 = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src));
}
-#endif
}
static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx) {
@@ -447,16 +427,11 @@ static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a,
}
struct secp256k1_strauss_point_state {
-#ifdef USE_ENDOMORPHISM
secp256k1_scalar na_1, na_lam;
- int wnaf_na_1[130];
- int wnaf_na_lam[130];
+ int wnaf_na_1[129];
+ int wnaf_na_lam[129];
int bits_na_1;
int bits_na_lam;
-#else
- int wnaf_na[256];
- int bits_na;
-#endif
size_t input_pos;
};
@@ -464,26 +439,19 @@ struct secp256k1_strauss_state {
secp256k1_gej* prej;
secp256k1_fe* zr;
secp256k1_ge* pre_a;
-#ifdef USE_ENDOMORPHISM
secp256k1_ge* pre_a_lam;
-#endif
struct secp256k1_strauss_point_state* ps;
};
static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, const struct secp256k1_strauss_state *state, secp256k1_gej *r, int num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) {
secp256k1_ge tmpa;
secp256k1_fe Z;
-#ifdef USE_ENDOMORPHISM
/* Splitted G factors. */
secp256k1_scalar ng_1, ng_128;
int wnaf_ng_1[129];
int bits_ng_1 = 0;
int wnaf_ng_128[129];
int bits_ng_128 = 0;
-#else
- int wnaf_ng[256];
- int bits_ng = 0;
-#endif
int i;
int bits = 0;
int np;
@@ -494,28 +462,20 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c
continue;
}
state->ps[no].input_pos = np;
-#ifdef USE_ENDOMORPHISM
/* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */
secp256k1_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]);
/* build wnaf representation for na_1 and na_lam. */
- state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 130, &state->ps[no].na_1, WINDOW_A);
- state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 130, &state->ps[no].na_lam, WINDOW_A);
- VERIFY_CHECK(state->ps[no].bits_na_1 <= 130);
- VERIFY_CHECK(state->ps[no].bits_na_lam <= 130);
+ state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &state->ps[no].na_1, WINDOW_A);
+ state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &state->ps[no].na_lam, WINDOW_A);
+ VERIFY_CHECK(state->ps[no].bits_na_1 <= 129);
+ VERIFY_CHECK(state->ps[no].bits_na_lam <= 129);
if (state->ps[no].bits_na_1 > bits) {
bits = state->ps[no].bits_na_1;
}
if (state->ps[no].bits_na_lam > bits) {
bits = state->ps[no].bits_na_lam;
}
-#else
- /* build wnaf representation for na. */
- state->ps[no].bits_na = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na, 256, &na[np], WINDOW_A);
- if (state->ps[no].bits_na > bits) {
- bits = state->ps[no].bits_na;
- }
-#endif
++no;
}
@@ -547,7 +507,6 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c
secp256k1_fe_set_int(&Z, 1);
}
-#ifdef USE_ENDOMORPHISM
for (np = 0; np < no; ++np) {
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_ge_mul_lambda(&state->pre_a_lam[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i]);
@@ -568,21 +527,12 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c
bits = bits_ng_128;
}
}
-#else
- if (ng) {
- bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, 256, ng, WINDOW_G);
- if (bits_ng > bits) {
- bits = bits_ng;
- }
- }
-#endif
secp256k1_gej_set_infinity(r);
for (i = bits - 1; i >= 0; i--) {
int n;
secp256k1_gej_double_var(r, r, NULL);
-#ifdef USE_ENDOMORPHISM
for (np = 0; np < no; ++np) {
if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) {
ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
@@ -601,18 +551,6 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c
ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g_128, n, WINDOW_G);
secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
}
-#else
- for (np = 0; np < no; ++np) {
- if (i < state->ps[np].bits_na && (n = state->ps[np].wnaf_na[i])) {
- ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
- secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
- }
- }
- if (i < bits_ng && (n = wnaf_ng[i])) {
- ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G);
- secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
- }
-#endif
}
if (!r->infinity) {
@@ -625,27 +563,19 @@ static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej
secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)];
secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
struct secp256k1_strauss_point_state ps[1];
-#ifdef USE_ENDOMORPHISM
secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
-#endif
struct secp256k1_strauss_state state;
state.prej = prej;
state.zr = zr;
state.pre_a = pre_a;
-#ifdef USE_ENDOMORPHISM
state.pre_a_lam = pre_a_lam;
-#endif
state.ps = ps;
secp256k1_ecmult_strauss_wnaf(ctx, &state, r, 1, a, na, ng);
}
static size_t secp256k1_strauss_scratch_size(size_t n_points) {
-#ifdef USE_ENDOMORPHISM
static const size_t point_size = (2 * sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar);
-#else
- static const size_t point_size = (sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar);
-#endif
return n_points*point_size;
}
@@ -665,12 +595,8 @@ static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callba
scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar));
state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej));
state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe));
-#ifdef USE_ENDOMORPHISM
state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
state.pre_a_lam = state.pre_a + n_points * ECMULT_TABLE_SIZE(WINDOW_A);
-#else
- state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
-#endif
state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state));
if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL) {
@@ -868,7 +794,6 @@ static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_wi
* set of buckets) for a given number of points.
*/
static int secp256k1_pippenger_bucket_window(size_t n) {
-#ifdef USE_ENDOMORPHISM
if (n <= 1) {
return 1;
} else if (n <= 4) {
@@ -892,33 +817,6 @@ static int secp256k1_pippenger_bucket_window(size_t n) {
} else {
return PIPPENGER_MAX_BUCKET_WINDOW;
}
-#else
- if (n <= 1) {
- return 1;
- } else if (n <= 11) {
- return 2;
- } else if (n <= 45) {
- return 3;
- } else if (n <= 100) {
- return 4;
- } else if (n <= 275) {
- return 5;
- } else if (n <= 625) {
- return 6;
- } else if (n <= 1850) {
- return 7;
- } else if (n <= 3400) {
- return 8;
- } else if (n <= 9630) {
- return 9;
- } else if (n <= 17900) {
- return 10;
- } else if (n <= 32800) {
- return 11;
- } else {
- return PIPPENGER_MAX_BUCKET_WINDOW;
- }
-#endif
}
/**
@@ -926,7 +824,6 @@ static int secp256k1_pippenger_bucket_window(size_t n) {
*/
static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) {
switch(bucket_window) {
-#ifdef USE_ENDOMORPHISM
case 1: return 1;
case 2: return 4;
case 3: return 20;
@@ -939,26 +836,11 @@ static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) {
case 10: return 7880;
case 11: return 16050;
case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX;
-#else
- case 1: return 1;
- case 2: return 11;
- case 3: return 45;
- case 4: return 100;
- case 5: return 275;
- case 6: return 625;
- case 7: return 1850;
- case 8: return 3400;
- case 9: return 9630;
- case 10: return 17900;
- case 11: return 32800;
- case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX;
-#endif
}
return 0;
}
-#ifdef USE_ENDOMORPHISM
SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, secp256k1_scalar *s2, secp256k1_ge *p1, secp256k1_ge *p2) {
secp256k1_scalar tmp = *s1;
secp256k1_scalar_split_lambda(s1, s2, &tmp);
@@ -973,32 +855,23 @@ SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, s
secp256k1_ge_neg(p2, p2);
}
}
-#endif
/**
* Returns the scratch size required for a given number of points (excluding
* base point G) without considering alignment.
*/
static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window) {
-#ifdef USE_ENDOMORPHISM
size_t entries = 2*n_points + 2;
-#else
- size_t entries = n_points + 1;
-#endif
size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size;
}
static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
- /* Use 2(n+1) with the endomorphism, n+1 without, when calculating batch
+ /* Use 2(n+1) with the endomorphism, when calculating batch
* sizes. The reason for +1 is that we add the G scalar to the list of
* other scalars. */
-#ifdef USE_ENDOMORPHISM
size_t entries = 2*n_points + 2;
-#else
- size_t entries = n_points + 1;
-#endif
secp256k1_ge *points;
secp256k1_scalar *scalars;
secp256k1_gej *buckets;
@@ -1035,10 +908,8 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_call
scalars[0] = *inp_g_sc;
points[0] = secp256k1_ge_const_g;
idx++;
-#ifdef USE_ENDOMORPHISM
secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]);
idx++;
-#endif
}
while (point_idx < n_points) {
@@ -1047,10 +918,8 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_call
return 0;
}
idx++;
-#ifdef USE_ENDOMORPHISM
secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]);
idx++;
-#endif
point_idx++;
}
@@ -1093,9 +962,7 @@ static size_t secp256k1_pippenger_max_points(const secp256k1_callback* error_cal
size_t space_overhead;
size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
-#ifdef USE_ENDOMORPHISM
entry_size = 2*entry_size;
-#endif
space_overhead = (sizeof(secp256k1_gej) << bucket_window) + entry_size + sizeof(struct secp256k1_pippenger_state);
if (space_overhead > max_alloc) {
break;
diff --git a/src/secp256k1/src/field.h b/src/secp256k1/src/field.h
index 7993a1f11e..aca1fb72c5 100644
--- a/src/secp256k1/src/field.h
+++ b/src/secp256k1/src/field.h
@@ -22,16 +22,16 @@
#include "libsecp256k1-config.h"
#endif
-#if defined(USE_FIELD_10X26)
-#include "field_10x26.h"
-#elif defined(USE_FIELD_5X52)
+#include "util.h"
+
+#if defined(SECP256K1_WIDEMUL_INT128)
#include "field_5x52.h"
+#elif defined(SECP256K1_WIDEMUL_INT64)
+#include "field_10x26.h"
#else
-#error "Please select field implementation"
+#error "Please select wide multiplication implementation"
#endif
-#include "util.h"
-
/** Normalize a field element. This brings the field element to a canonical representation, reduces
* its magnitude to 1, and reduces it modulo field size `p`.
*/
diff --git a/src/secp256k1/src/field_5x52.h b/src/secp256k1/src/field_5x52.h
index fc5bfe357e..6a068484c2 100644
--- a/src/secp256k1/src/field_5x52.h
+++ b/src/secp256k1/src/field_5x52.h
@@ -46,4 +46,10 @@ typedef struct {
(d6) | (((uint64_t)(d7)) << 32) \
}}
+#define SECP256K1_FE_STORAGE_CONST_GET(d) \
+ (uint32_t)(d.n[3] >> 32), (uint32_t)d.n[3], \
+ (uint32_t)(d.n[2] >> 32), (uint32_t)d.n[2], \
+ (uint32_t)(d.n[1] >> 32), (uint32_t)d.n[1], \
+ (uint32_t)(d.n[0] >> 32), (uint32_t)d.n[0]
+
#endif /* SECP256K1_FIELD_REPR_H */
diff --git a/src/secp256k1/src/field_impl.h b/src/secp256k1/src/field_impl.h
index 485921a60e..18e4d2f30e 100644
--- a/src/secp256k1/src/field_impl.h
+++ b/src/secp256k1/src/field_impl.h
@@ -14,12 +14,12 @@
#include "util.h"
#include "num.h"
-#if defined(USE_FIELD_10X26)
-#include "field_10x26_impl.h"
-#elif defined(USE_FIELD_5X52)
+#if defined(SECP256K1_WIDEMUL_INT128)
#include "field_5x52_impl.h"
+#elif defined(SECP256K1_WIDEMUL_INT64)
+#include "field_10x26_impl.h"
#else
-#error "Please select field implementation"
+#error "Please select wide multiplication implementation"
#endif
SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) {
diff --git a/src/secp256k1/src/gen_context.c b/src/secp256k1/src/gen_context.c
index 539f574bfd..8b7729aee4 100644
--- a/src/secp256k1/src/gen_context.c
+++ b/src/secp256k1/src/gen_context.c
@@ -13,6 +13,7 @@
#include "basic-config.h"
#include "include/secp256k1.h"
+#include "assumptions.h"
#include "util.h"
#include "field_impl.h"
#include "scalar_impl.h"
diff --git a/src/secp256k1/src/group.h b/src/secp256k1/src/group.h
index 863644f0f0..36e39ecf0f 100644
--- a/src/secp256k1/src/group.h
+++ b/src/secp256k1/src/group.h
@@ -59,6 +59,7 @@ static int secp256k1_ge_is_infinity(const secp256k1_ge *a);
/** Check whether a group element is valid (i.e., on the curve). */
static int secp256k1_ge_is_valid_var(const secp256k1_ge *a);
+/** Set r equal to the inverse of a (i.e., mirrored around the X axis) */
static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a);
/** Set a group element equal to another which is given in jacobian coordinates */
@@ -95,8 +96,8 @@ static int secp256k1_gej_is_infinity(const secp256k1_gej *a);
/** Check whether a group element's y coordinate is a quadratic residue. */
static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a);
-/** Set r equal to the double of a, a cannot be infinity. Constant time. */
-static void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a);
+/** Set r equal to the double of a. Constant time. */
+static void secp256k1_gej_double(secp256k1_gej *r, const secp256k1_gej *a);
/** Set r equal to the double of a. If rzr is not-NULL this sets *rzr such that r->z == a->z * *rzr (where infinity means an implicit z = 0). */
static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr);
@@ -115,10 +116,8 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
/** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */
static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv);
-#ifdef USE_ENDOMORPHISM
/** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a);
-#endif
/** Clear a secp256k1_gej to prevent leaking sensitive information. */
static void secp256k1_gej_clear(secp256k1_gej *r);
@@ -138,4 +137,15 @@ static void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_g
/** Rescale a jacobian point by b which must be non-zero. Constant-time. */
static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *b);
+/** Determine if a point (which is assumed to be on the curve) is in the correct (sub)group of the curve.
+ *
+ * In normal mode, the used group is secp256k1, which has cofactor=1 meaning that every point on the curve is in the
+ * group, and this function returns always true.
+ *
+ * When compiling in exhaustive test mode, a slightly different curve equation is used, leading to a group with a
+ * (very) small subgroup, and that subgroup is what is used for all cryptographic operations. In that mode, this
+ * function checks whether a point that is on the curve is in fact also in that subgroup.
+ */
+static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge);
+
#endif /* SECP256K1_GROUP_H */
diff --git a/src/secp256k1/src/group_impl.h b/src/secp256k1/src/group_impl.h
index 43b039becf..a5fbc91a0f 100644
--- a/src/secp256k1/src/group_impl.h
+++ b/src/secp256k1/src/group_impl.h
@@ -11,49 +11,38 @@
#include "field.h"
#include "group.h"
-/* These points can be generated in sage as follows:
+/* These exhaustive group test orders and generators are chosen such that:
+ * - The field size is equal to that of secp256k1, so field code is the same.
+ * - The curve equation is of the form y^2=x^3+B for some constant B.
+ * - The subgroup has a generator 2*P, where P.x=1.
+ * - The subgroup has size less than 1000 to permit exhaustive testing.
+ * - The subgroup admits an endomorphism of the form lambda*(x,y) == (beta*x,y).
*
- * 0. Setup a worksheet with the following parameters.
- * b = 4 # whatever CURVE_B will be set to
- * F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F)
- * C = EllipticCurve ([F (0), F (b)])
- *
- * 1. Determine all the small orders available to you. (If there are
- * no satisfactory ones, go back and change b.)
- * print C.order().factor(limit=1000)
- *
- * 2. Choose an order as one of the prime factors listed in the above step.
- * (You can also multiply some to get a composite order, though the
- * tests will crash trying to invert scalars during signing.) We take a
- * random point and scale it to drop its order to the desired value.
- * There is some probability this won't work; just try again.
- * order = 199
- * P = C.random_point()
- * P = (int(P.order()) / int(order)) * P
- * assert(P.order() == order)
- *
- * 3. Print the values. You'll need to use a vim macro or something to
- * split the hex output into 4-byte chunks.
- * print "%x %x" % P.xy()
+ * These parameters are generated using sage/gen_exhaustive_groups.sage.
*/
#if defined(EXHAUSTIVE_TEST_ORDER)
-# if EXHAUSTIVE_TEST_ORDER == 199
+# if EXHAUSTIVE_TEST_ORDER == 13
static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
- 0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069,
- 0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18,
- 0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868,
- 0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED
+ 0xc3459c3d, 0x35326167, 0xcd86cce8, 0x07a2417f,
+ 0x5b8bd567, 0xde8538ee, 0x0d507b0c, 0xd128f5bb,
+ 0x8e467fec, 0xcd30000a, 0x6cc1184e, 0x25d382c2,
+ 0xa2f4494e, 0x2fbe9abc, 0x8b64abac, 0xd005fb24
);
-
-static const int CURVE_B = 4;
-# elif EXHAUSTIVE_TEST_ORDER == 13
+static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(
+ 0x3d3486b2, 0x159a9ca5, 0xc75638be, 0xb23a69bc,
+ 0x946a45ab, 0x24801247, 0xb4ed2b8e, 0x26b6a417
+);
+# elif EXHAUSTIVE_TEST_ORDER == 199
static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
- 0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0,
- 0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15,
- 0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e,
- 0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac
+ 0x226e653f, 0xc8df7744, 0x9bacbf12, 0x7d1dcbf9,
+ 0x87f05b2a, 0xe7edbd28, 0x1f564575, 0xc48dcf18,
+ 0xa13872c2, 0xe933bb17, 0x5d9ffd5b, 0xb5b6e10c,
+ 0x57fe3c00, 0xbaaaa15a, 0xe003ec3e, 0x9c269bae
+);
+static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(
+ 0x2cca28fa, 0xfc614b80, 0x2a3db42b, 0x00ba00b1,
+ 0xbea8d943, 0xdace9ab2, 0x9536daea, 0x0074defb
);
-static const int CURVE_B = 2;
# else
# error No known generator for the specified exhaustive test group order.
# endif
@@ -68,7 +57,7 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
);
-static const int CURVE_B = 7;
+static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7);
#endif
static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) {
@@ -219,14 +208,13 @@ static void secp256k1_ge_clear(secp256k1_ge *r) {
}
static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) {
- secp256k1_fe x2, x3, c;
+ secp256k1_fe x2, x3;
r->x = *x;
secp256k1_fe_sqr(&x2, x);
secp256k1_fe_mul(&x3, x, &x2);
r->infinity = 0;
- secp256k1_fe_set_int(&c, CURVE_B);
- secp256k1_fe_add(&c, &x3);
- return secp256k1_fe_sqrt(&r->y, &c);
+ secp256k1_fe_add(&x3, &secp256k1_fe_const_b);
+ return secp256k1_fe_sqrt(&r->y, &x3);
}
static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) {
@@ -269,41 +257,20 @@ static int secp256k1_gej_is_infinity(const secp256k1_gej *a) {
return a->infinity;
}
-static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) {
- secp256k1_fe y2, x3, z2, z6;
- if (a->infinity) {
- return 0;
- }
- /** y^2 = x^3 + 7
- * (Y/Z^3)^2 = (X/Z^2)^3 + 7
- * Y^2 / Z^6 = X^3 / Z^6 + 7
- * Y^2 = X^3 + 7*Z^6
- */
- secp256k1_fe_sqr(&y2, &a->y);
- secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
- secp256k1_fe_sqr(&z2, &a->z);
- secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2);
- secp256k1_fe_mul_int(&z6, CURVE_B);
- secp256k1_fe_add(&x3, &z6);
- secp256k1_fe_normalize_weak(&x3);
- return secp256k1_fe_equal_var(&y2, &x3);
-}
-
static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) {
- secp256k1_fe y2, x3, c;
+ secp256k1_fe y2, x3;
if (a->infinity) {
return 0;
}
/* y^2 = x^3 + 7 */
secp256k1_fe_sqr(&y2, &a->y);
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
- secp256k1_fe_set_int(&c, CURVE_B);
- secp256k1_fe_add(&x3, &c);
+ secp256k1_fe_add(&x3, &secp256k1_fe_const_b);
secp256k1_fe_normalize_weak(&x3);
return secp256k1_fe_equal_var(&y2, &x3);
}
-static SECP256K1_INLINE void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a) {
+static SECP256K1_INLINE void secp256k1_gej_double(secp256k1_gej *r, const secp256k1_gej *a) {
/* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate.
*
* Note that there is an implementation described at
@@ -313,8 +280,7 @@ static SECP256K1_INLINE void secp256k1_gej_double_nonzero(secp256k1_gej *r, cons
*/
secp256k1_fe t1,t2,t3,t4;
- VERIFY_CHECK(!secp256k1_gej_is_infinity(a));
- r->infinity = 0;
+ r->infinity = a->infinity;
secp256k1_fe_mul(&r->z, &a->z, &a->y);
secp256k1_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */
@@ -363,7 +329,7 @@ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, s
secp256k1_fe_mul_int(rzr, 2);
}
- secp256k1_gej_double_nonzero(r, a);
+ secp256k1_gej_double(r, a);
}
static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) {
@@ -400,7 +366,7 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons
if (rzr != NULL) {
secp256k1_fe_set_int(rzr, 0);
}
- r->infinity = 1;
+ secp256k1_gej_set_infinity(r);
}
return;
}
@@ -450,7 +416,7 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
if (rzr != NULL) {
secp256k1_fe_set_int(rzr, 0);
}
- r->infinity = 1;
+ secp256k1_gej_set_infinity(r);
}
return;
}
@@ -509,7 +475,7 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
if (secp256k1_fe_normalizes_to_zero_var(&i)) {
secp256k1_gej_double_var(r, a, NULL);
} else {
- r->infinity = 1;
+ secp256k1_gej_set_infinity(r);
}
return;
}
@@ -680,7 +646,6 @@ static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r,
secp256k1_fe_storage_cmov(&r->y, &a->y, flag);
}
-#ifdef USE_ENDOMORPHISM
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) {
static const secp256k1_fe beta = SECP256K1_FE_CONST(
0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul,
@@ -689,7 +654,6 @@ static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) {
*r = *a;
secp256k1_fe_mul(&r->x, &r->x, &beta);
}
-#endif
static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) {
secp256k1_fe yz;
@@ -705,4 +669,25 @@ static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) {
return secp256k1_fe_is_quad_var(&yz);
}
+static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge) {
+#ifdef EXHAUSTIVE_TEST_ORDER
+ secp256k1_gej out;
+ int i;
+
+ /* A very simple EC multiplication ladder that avoids a dependecy on ecmult. */
+ secp256k1_gej_set_infinity(&out);
+ for (i = 0; i < 32; ++i) {
+ secp256k1_gej_double_var(&out, &out, NULL);
+ if ((((uint32_t)EXHAUSTIVE_TEST_ORDER) >> (31 - i)) & 1) {
+ secp256k1_gej_add_ge_var(&out, &out, ge, NULL);
+ }
+ }
+ return secp256k1_gej_is_infinity(&out);
+#else
+ (void)ge;
+ /* The real secp256k1 group has cofactor 1, so the subgroup is the entire curve. */
+ return 1;
+#endif
+}
+
#endif /* SECP256K1_GROUP_IMPL_H */
diff --git a/src/secp256k1/src/hash_impl.h b/src/secp256k1/src/hash_impl.h
index 782f97216c..409772587b 100644
--- a/src/secp256k1/src/hash_impl.h
+++ b/src/secp256k1/src/hash_impl.h
@@ -8,6 +8,7 @@
#define SECP256K1_HASH_IMPL_H
#include "hash.h"
+#include "util.h"
#include <stdlib.h>
#include <stdint.h>
@@ -27,9 +28,9 @@
(h) = t1 + t2; \
} while(0)
-#ifdef WORDS_BIGENDIAN
+#if defined(SECP256K1_BIG_ENDIAN)
#define BE32(x) (x)
-#else
+#elif defined(SECP256K1_LITTLE_ENDIAN)
#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
#endif
@@ -163,6 +164,19 @@ static void secp256k1_sha256_finalize(secp256k1_sha256 *hash, unsigned char *out
memcpy(out32, (const unsigned char*)out, 32);
}
+/* Initializes a sha256 struct and writes the 64 byte string
+ * SHA256(tag)||SHA256(tag) into it. */
+static void secp256k1_sha256_initialize_tagged(secp256k1_sha256 *hash, const unsigned char *tag, size_t taglen) {
+ unsigned char buf[32];
+ secp256k1_sha256_initialize(hash);
+ secp256k1_sha256_write(hash, tag, taglen);
+ secp256k1_sha256_finalize(hash, buf);
+
+ secp256k1_sha256_initialize(hash);
+ secp256k1_sha256_write(hash, buf, 32);
+ secp256k1_sha256_write(hash, buf, 32);
+}
+
static void secp256k1_hmac_sha256_initialize(secp256k1_hmac_sha256 *hash, const unsigned char *key, size_t keylen) {
size_t n;
unsigned char rkey[64];
diff --git a/src/secp256k1/src/modules/ecdh/tests_impl.h b/src/secp256k1/src/modules/ecdh/tests_impl.h
index fe26e8fb69..e8d2aeab9a 100644
--- a/src/secp256k1/src/modules/ecdh/tests_impl.h
+++ b/src/secp256k1/src/modules/ecdh/tests_impl.h
@@ -80,7 +80,7 @@ void test_ecdh_generator_basepoint(void) {
/* compute "explicitly" */
CHECK(secp256k1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1);
/* compare */
- CHECK(memcmp(output_ecdh, point_ser, 65) == 0);
+ CHECK(secp256k1_memcmp_var(output_ecdh, point_ser, 65) == 0);
/* compute using ECDH function with default hash function */
CHECK(secp256k1_ecdh(ctx, output_ecdh, &point[0], s_b32, NULL, NULL) == 1);
@@ -90,7 +90,7 @@ void test_ecdh_generator_basepoint(void) {
secp256k1_sha256_write(&sha, point_ser, point_ser_len);
secp256k1_sha256_finalize(&sha, output_ser);
/* compare */
- CHECK(memcmp(output_ecdh, output_ser, 32) == 0);
+ CHECK(secp256k1_memcmp_var(output_ecdh, output_ser, 32) == 0);
}
}
diff --git a/src/secp256k1/src/modules/extrakeys/Makefile.am.include b/src/secp256k1/src/modules/extrakeys/Makefile.am.include
new file mode 100644
index 0000000000..0d901ec1f4
--- /dev/null
+++ b/src/secp256k1/src/modules/extrakeys/Makefile.am.include
@@ -0,0 +1,4 @@
+include_HEADERS += include/secp256k1_extrakeys.h
+noinst_HEADERS += src/modules/extrakeys/tests_impl.h
+noinst_HEADERS += src/modules/extrakeys/tests_exhaustive_impl.h
+noinst_HEADERS += src/modules/extrakeys/main_impl.h
diff --git a/src/secp256k1/src/modules/extrakeys/main_impl.h b/src/secp256k1/src/modules/extrakeys/main_impl.h
new file mode 100644
index 0000000000..5378d2f301
--- /dev/null
+++ b/src/secp256k1/src/modules/extrakeys/main_impl.h
@@ -0,0 +1,251 @@
+/**********************************************************************
+ * Copyright (c) 2020 Jonas Nick *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_EXTRAKEYS_MAIN_
+#define _SECP256K1_MODULE_EXTRAKEYS_MAIN_
+
+#include "include/secp256k1.h"
+#include "include/secp256k1_extrakeys.h"
+
+static SECP256K1_INLINE int secp256k1_xonly_pubkey_load(const secp256k1_context* ctx, secp256k1_ge *ge, const secp256k1_xonly_pubkey *pubkey) {
+ return secp256k1_pubkey_load(ctx, ge, (const secp256k1_pubkey *) pubkey);
+}
+
+static SECP256K1_INLINE void secp256k1_xonly_pubkey_save(secp256k1_xonly_pubkey *pubkey, secp256k1_ge *ge) {
+ secp256k1_pubkey_save((secp256k1_pubkey *) pubkey, ge);
+}
+
+int secp256k1_xonly_pubkey_parse(const secp256k1_context* ctx, secp256k1_xonly_pubkey *pubkey, const unsigned char *input32) {
+ secp256k1_ge pk;
+ secp256k1_fe x;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(pubkey != NULL);
+ memset(pubkey, 0, sizeof(*pubkey));
+ ARG_CHECK(input32 != NULL);
+
+ if (!secp256k1_fe_set_b32(&x, input32)) {
+ return 0;
+ }
+ if (!secp256k1_ge_set_xo_var(&pk, &x, 0)) {
+ return 0;
+ }
+ if (!secp256k1_ge_is_in_correct_subgroup(&pk)) {
+ return 0;
+ }
+ secp256k1_xonly_pubkey_save(pubkey, &pk);
+ return 1;
+}
+
+int secp256k1_xonly_pubkey_serialize(const secp256k1_context* ctx, unsigned char *output32, const secp256k1_xonly_pubkey *pubkey) {
+ secp256k1_ge pk;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(output32 != NULL);
+ memset(output32, 0, 32);
+ ARG_CHECK(pubkey != NULL);
+
+ if (!secp256k1_xonly_pubkey_load(ctx, &pk, pubkey)) {
+ return 0;
+ }
+ secp256k1_fe_get_b32(output32, &pk.x);
+ return 1;
+}
+
+/** Keeps a group element as is if it has an even Y and otherwise negates it.
+ * y_parity is set to 0 in the former case and to 1 in the latter case.
+ * Requires that the coordinates of r are normalized. */
+static int secp256k1_extrakeys_ge_even_y(secp256k1_ge *r) {
+ int y_parity = 0;
+ VERIFY_CHECK(!secp256k1_ge_is_infinity(r));
+
+ if (secp256k1_fe_is_odd(&r->y)) {
+ secp256k1_fe_negate(&r->y, &r->y, 1);
+ y_parity = 1;
+ }
+ return y_parity;
+}
+
+int secp256k1_xonly_pubkey_from_pubkey(const secp256k1_context* ctx, secp256k1_xonly_pubkey *xonly_pubkey, int *pk_parity, const secp256k1_pubkey *pubkey) {
+ secp256k1_ge pk;
+ int tmp;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(xonly_pubkey != NULL);
+ ARG_CHECK(pubkey != NULL);
+
+ if (!secp256k1_pubkey_load(ctx, &pk, pubkey)) {
+ return 0;
+ }
+ tmp = secp256k1_extrakeys_ge_even_y(&pk);
+ if (pk_parity != NULL) {
+ *pk_parity = tmp;
+ }
+ secp256k1_xonly_pubkey_save(xonly_pubkey, &pk);
+ return 1;
+}
+
+int secp256k1_xonly_pubkey_tweak_add(const secp256k1_context* ctx, secp256k1_pubkey *output_pubkey, const secp256k1_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) {
+ secp256k1_ge pk;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(output_pubkey != NULL);
+ memset(output_pubkey, 0, sizeof(*output_pubkey));
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(internal_pubkey != NULL);
+ ARG_CHECK(tweak32 != NULL);
+
+ if (!secp256k1_xonly_pubkey_load(ctx, &pk, internal_pubkey)
+ || !secp256k1_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32)) {
+ return 0;
+ }
+ secp256k1_pubkey_save(output_pubkey, &pk);
+ return 1;
+}
+
+int secp256k1_xonly_pubkey_tweak_add_check(const secp256k1_context* ctx, const unsigned char *tweaked_pubkey32, int tweaked_pk_parity, const secp256k1_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) {
+ secp256k1_ge pk;
+ unsigned char pk_expected32[32];
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(internal_pubkey != NULL);
+ ARG_CHECK(tweaked_pubkey32 != NULL);
+ ARG_CHECK(tweak32 != NULL);
+
+ if (!secp256k1_xonly_pubkey_load(ctx, &pk, internal_pubkey)
+ || !secp256k1_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32)) {
+ return 0;
+ }
+ secp256k1_fe_normalize_var(&pk.x);
+ secp256k1_fe_normalize_var(&pk.y);
+ secp256k1_fe_get_b32(pk_expected32, &pk.x);
+
+ return secp256k1_memcmp_var(&pk_expected32, tweaked_pubkey32, 32) == 0
+ && secp256k1_fe_is_odd(&pk.y) == tweaked_pk_parity;
+}
+
+static void secp256k1_keypair_save(secp256k1_keypair *keypair, const secp256k1_scalar *sk, secp256k1_ge *pk) {
+ secp256k1_scalar_get_b32(&keypair->data[0], sk);
+ secp256k1_pubkey_save((secp256k1_pubkey *)&keypair->data[32], pk);
+}
+
+
+static int secp256k1_keypair_seckey_load(const secp256k1_context* ctx, secp256k1_scalar *sk, const secp256k1_keypair *keypair) {
+ int ret;
+
+ ret = secp256k1_scalar_set_b32_seckey(sk, &keypair->data[0]);
+ /* We can declassify ret here because sk is only zero if a keypair function
+ * failed (which zeroes the keypair) and its return value is ignored. */
+ secp256k1_declassify(ctx, &ret, sizeof(ret));
+ ARG_CHECK(ret);
+ return ret;
+}
+
+/* Load a keypair into pk and sk (if non-NULL). This function declassifies pk
+ * and ARG_CHECKs that the keypair is not invalid. It always initializes sk and
+ * pk with dummy values. */
+static int secp256k1_keypair_load(const secp256k1_context* ctx, secp256k1_scalar *sk, secp256k1_ge *pk, const secp256k1_keypair *keypair) {
+ int ret;
+ const secp256k1_pubkey *pubkey = (const secp256k1_pubkey *)&keypair->data[32];
+
+ /* Need to declassify the pubkey because pubkey_load ARG_CHECKs if it's
+ * invalid. */
+ secp256k1_declassify(ctx, pubkey, sizeof(*pubkey));
+ ret = secp256k1_pubkey_load(ctx, pk, pubkey);
+ if (sk != NULL) {
+ ret = ret && secp256k1_keypair_seckey_load(ctx, sk, keypair);
+ }
+ if (!ret) {
+ *pk = secp256k1_ge_const_g;
+ if (sk != NULL) {
+ *sk = secp256k1_scalar_one;
+ }
+ }
+ return ret;
+}
+
+int secp256k1_keypair_create(const secp256k1_context* ctx, secp256k1_keypair *keypair, const unsigned char *seckey32) {
+ secp256k1_scalar sk;
+ secp256k1_ge pk;
+ int ret = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(keypair != NULL);
+ memset(keypair, 0, sizeof(*keypair));
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(seckey32 != NULL);
+
+ ret = secp256k1_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &sk, &pk, seckey32);
+ secp256k1_keypair_save(keypair, &sk, &pk);
+ memczero(keypair, sizeof(*keypair), !ret);
+
+ secp256k1_scalar_clear(&sk);
+ return ret;
+}
+
+int secp256k1_keypair_pub(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const secp256k1_keypair *keypair) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(pubkey != NULL);
+ memset(pubkey, 0, sizeof(*pubkey));
+ ARG_CHECK(keypair != NULL);
+
+ memcpy(pubkey->data, &keypair->data[32], sizeof(*pubkey));
+ return 1;
+}
+
+int secp256k1_keypair_xonly_pub(const secp256k1_context* ctx, secp256k1_xonly_pubkey *pubkey, int *pk_parity, const secp256k1_keypair *keypair) {
+ secp256k1_ge pk;
+ int tmp;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(pubkey != NULL);
+ memset(pubkey, 0, sizeof(*pubkey));
+ ARG_CHECK(keypair != NULL);
+
+ if (!secp256k1_keypair_load(ctx, NULL, &pk, keypair)) {
+ return 0;
+ }
+ tmp = secp256k1_extrakeys_ge_even_y(&pk);
+ if (pk_parity != NULL) {
+ *pk_parity = tmp;
+ }
+ secp256k1_xonly_pubkey_save(pubkey, &pk);
+
+ return 1;
+}
+
+int secp256k1_keypair_xonly_tweak_add(const secp256k1_context* ctx, secp256k1_keypair *keypair, const unsigned char *tweak32) {
+ secp256k1_ge pk;
+ secp256k1_scalar sk;
+ int y_parity;
+ int ret;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(keypair != NULL);
+ ARG_CHECK(tweak32 != NULL);
+
+ ret = secp256k1_keypair_load(ctx, &sk, &pk, keypair);
+ memset(keypair, 0, sizeof(*keypair));
+
+ y_parity = secp256k1_extrakeys_ge_even_y(&pk);
+ if (y_parity == 1) {
+ secp256k1_scalar_negate(&sk, &sk);
+ }
+
+ ret &= secp256k1_ec_seckey_tweak_add_helper(&sk, tweak32);
+ ret &= secp256k1_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &pk, tweak32);
+
+ secp256k1_declassify(ctx, &ret, sizeof(ret));
+ if (ret) {
+ secp256k1_keypair_save(keypair, &sk, &pk);
+ }
+
+ secp256k1_scalar_clear(&sk);
+ return ret;
+}
+
+#endif
diff --git a/src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h b/src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h
new file mode 100644
index 0000000000..0e29bc6b09
--- /dev/null
+++ b/src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h
@@ -0,0 +1,68 @@
+/**********************************************************************
+ * Copyright (c) 2020 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_
+#define _SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_
+
+#include "src/modules/extrakeys/main_impl.h"
+#include "include/secp256k1_extrakeys.h"
+
+static void test_exhaustive_extrakeys(const secp256k1_context *ctx, const secp256k1_ge* group) {
+ secp256k1_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1];
+ secp256k1_pubkey pubkey[EXHAUSTIVE_TEST_ORDER - 1];
+ secp256k1_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1];
+ int parities[EXHAUSTIVE_TEST_ORDER - 1];
+ unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32];
+ int i;
+
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
+ secp256k1_fe fe;
+ secp256k1_scalar scalar_i;
+ unsigned char buf[33];
+ int parity;
+
+ secp256k1_scalar_set_int(&scalar_i, i);
+ secp256k1_scalar_get_b32(buf, &scalar_i);
+
+ /* Construct pubkey and keypair. */
+ CHECK(secp256k1_keypair_create(ctx, &keypair[i - 1], buf));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey[i - 1], buf));
+
+ /* Construct serialized xonly_pubkey from keypair. */
+ CHECK(secp256k1_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parities[i - 1], &keypair[i - 1]));
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1]));
+
+ /* Parse the xonly_pubkey back and verify it matches the previously serialized value. */
+ CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pubkey[i - 1], xonly_pubkey_bytes[i - 1]));
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1]));
+ CHECK(secp256k1_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0);
+
+ /* Construct the xonly_pubkey from the pubkey, and verify it matches the same. */
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pubkey[i - 1], &parity, &pubkey[i - 1]));
+ CHECK(parity == parities[i - 1]);
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1]));
+ CHECK(secp256k1_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0);
+
+ /* Compare the xonly_pubkey bytes against the precomputed group. */
+ secp256k1_fe_set_b32(&fe, xonly_pubkey_bytes[i - 1]);
+ CHECK(secp256k1_fe_equal_var(&fe, &group[i].x));
+
+ /* Check the parity against the precomputed group. */
+ fe = group[i].y;
+ secp256k1_fe_normalize_var(&fe);
+ CHECK(secp256k1_fe_is_odd(&fe) == parities[i - 1]);
+
+ /* Verify that the higher half is identical to the lower half mirrored. */
+ if (i > EXHAUSTIVE_TEST_ORDER / 2) {
+ CHECK(secp256k1_memcmp_var(xonly_pubkey_bytes[i - 1], xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - i - 1], 32) == 0);
+ CHECK(parities[i - 1] == 1 - parities[EXHAUSTIVE_TEST_ORDER - i - 1]);
+ }
+ }
+
+ /* TODO: keypair/xonly_pubkey tweak tests */
+}
+
+#endif
diff --git a/src/secp256k1/src/modules/extrakeys/tests_impl.h b/src/secp256k1/src/modules/extrakeys/tests_impl.h
new file mode 100644
index 0000000000..5ee135849e
--- /dev/null
+++ b/src/secp256k1/src/modules/extrakeys/tests_impl.h
@@ -0,0 +1,524 @@
+/**********************************************************************
+ * Copyright (c) 2020 Jonas Nick *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_EXTRAKEYS_TESTS_
+#define _SECP256K1_MODULE_EXTRAKEYS_TESTS_
+
+#include "secp256k1_extrakeys.h"
+
+static secp256k1_context* api_test_context(int flags, int *ecount) {
+ secp256k1_context *ctx0 = secp256k1_context_create(flags);
+ secp256k1_context_set_error_callback(ctx0, counting_illegal_callback_fn, ecount);
+ secp256k1_context_set_illegal_callback(ctx0, counting_illegal_callback_fn, ecount);
+ return ctx0;
+}
+
+void test_xonly_pubkey(void) {
+ secp256k1_pubkey pk;
+ secp256k1_xonly_pubkey xonly_pk, xonly_pk_tmp;
+ secp256k1_ge pk1;
+ secp256k1_ge pk2;
+ secp256k1_fe y;
+ unsigned char sk[32];
+ unsigned char xy_sk[32];
+ unsigned char buf32[32];
+ unsigned char ones32[32];
+ unsigned char zeros64[64] = { 0 };
+ int pk_parity;
+ int i;
+
+ int ecount;
+ secp256k1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
+ secp256k1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
+ secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
+
+ secp256k1_testrand256(sk);
+ memset(ones32, 0xFF, 32);
+ secp256k1_testrand256(xy_sk);
+ CHECK(secp256k1_ec_pubkey_create(sign, &pk, sk) == 1);
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1);
+
+ /* Test xonly_pubkey_from_pubkey */
+ ecount = 0;
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1);
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(sign, &xonly_pk, &pk_parity, &pk) == 1);
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(verify, &xonly_pk, &pk_parity, &pk) == 1);
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(none, NULL, &pk_parity, &pk) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1);
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, NULL) == 0);
+ CHECK(ecount == 2);
+ memset(&pk, 0, sizeof(pk));
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 0);
+ CHECK(ecount == 3);
+
+ /* Choose a secret key such that the resulting pubkey and xonly_pubkey match. */
+ memset(sk, 0, sizeof(sk));
+ sk[0] = 1;
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pk, sk) == 1);
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1);
+ CHECK(secp256k1_memcmp_var(&pk, &xonly_pk, sizeof(pk)) == 0);
+ CHECK(pk_parity == 0);
+
+ /* Choose a secret key such that pubkey and xonly_pubkey are each others
+ * negation. */
+ sk[0] = 2;
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pk, sk) == 1);
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1);
+ CHECK(secp256k1_memcmp_var(&xonly_pk, &pk, sizeof(xonly_pk)) != 0);
+ CHECK(pk_parity == 1);
+ secp256k1_pubkey_load(ctx, &pk1, &pk);
+ secp256k1_pubkey_load(ctx, &pk2, (secp256k1_pubkey *) &xonly_pk);
+ CHECK(secp256k1_fe_equal(&pk1.x, &pk2.x) == 1);
+ secp256k1_fe_negate(&y, &pk2.y, 1);
+ CHECK(secp256k1_fe_equal(&pk1.y, &y) == 1);
+
+ /* Test xonly_pubkey_serialize and xonly_pubkey_parse */
+ ecount = 0;
+ CHECK(secp256k1_xonly_pubkey_serialize(none, NULL, &xonly_pk) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_xonly_pubkey_serialize(none, buf32, NULL) == 0);
+ CHECK(secp256k1_memcmp_var(buf32, zeros64, 32) == 0);
+ CHECK(ecount == 2);
+ {
+ /* A pubkey filled with 0s will fail to serialize due to pubkey_load
+ * special casing. */
+ secp256k1_xonly_pubkey pk_tmp;
+ memset(&pk_tmp, 0, sizeof(pk_tmp));
+ CHECK(secp256k1_xonly_pubkey_serialize(none, buf32, &pk_tmp) == 0);
+ }
+ /* pubkey_load called illegal callback */
+ CHECK(ecount == 3);
+
+ CHECK(secp256k1_xonly_pubkey_serialize(none, buf32, &xonly_pk) == 1);
+ ecount = 0;
+ CHECK(secp256k1_xonly_pubkey_parse(none, NULL, buf32) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_xonly_pubkey_parse(none, &xonly_pk, NULL) == 0);
+ CHECK(ecount == 2);
+
+ /* Serialization and parse roundtrip */
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1);
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, buf32, &xonly_pk) == 1);
+ CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pk_tmp, buf32) == 1);
+ CHECK(secp256k1_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0);
+
+ /* Test parsing invalid field elements */
+ memset(&xonly_pk, 1, sizeof(xonly_pk));
+ /* Overflowing field element */
+ CHECK(secp256k1_xonly_pubkey_parse(none, &xonly_pk, ones32) == 0);
+ CHECK(secp256k1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
+ memset(&xonly_pk, 1, sizeof(xonly_pk));
+ /* There's no point with x-coordinate 0 on secp256k1 */
+ CHECK(secp256k1_xonly_pubkey_parse(none, &xonly_pk, zeros64) == 0);
+ CHECK(secp256k1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
+ /* If a random 32-byte string can not be parsed with ec_pubkey_parse
+ * (because interpreted as X coordinate it does not correspond to a point on
+ * the curve) then xonly_pubkey_parse should fail as well. */
+ for (i = 0; i < count; i++) {
+ unsigned char rand33[33];
+ secp256k1_testrand256(&rand33[1]);
+ rand33[0] = SECP256K1_TAG_PUBKEY_EVEN;
+ if (!secp256k1_ec_pubkey_parse(ctx, &pk, rand33, 33)) {
+ memset(&xonly_pk, 1, sizeof(xonly_pk));
+ CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 0);
+ CHECK(secp256k1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
+ } else {
+ CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 1);
+ }
+ }
+ CHECK(ecount == 2);
+
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(verify);
+}
+
+void test_xonly_pubkey_tweak(void) {
+ unsigned char zeros64[64] = { 0 };
+ unsigned char overflows[32];
+ unsigned char sk[32];
+ secp256k1_pubkey internal_pk;
+ secp256k1_xonly_pubkey internal_xonly_pk;
+ secp256k1_pubkey output_pk;
+ int pk_parity;
+ unsigned char tweak[32];
+ int i;
+
+ int ecount;
+ secp256k1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
+ secp256k1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
+ secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
+
+ memset(overflows, 0xff, sizeof(overflows));
+ secp256k1_testrand256(tweak);
+ secp256k1_testrand256(sk);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &internal_pk, sk) == 1);
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1);
+
+ ecount = 0;
+ CHECK(secp256k1_xonly_pubkey_tweak_add(none, &output_pk, &internal_xonly_pk, tweak) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_xonly_pubkey_tweak_add(sign, &output_pk, &internal_xonly_pk, tweak) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 1);
+ CHECK(secp256k1_xonly_pubkey_tweak_add(verify, NULL, &internal_xonly_pk, tweak) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, NULL, tweak) == 0);
+ CHECK(ecount == 4);
+ /* NULL internal_xonly_pk zeroes the output_pk */
+ CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
+ CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, NULL) == 0);
+ CHECK(ecount == 5);
+ /* NULL tweak zeroes the output_pk */
+ CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
+
+ /* Invalid tweak zeroes the output_pk */
+ CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, overflows) == 0);
+ CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
+
+ /* A zero tweak is fine */
+ CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, zeros64) == 1);
+
+ /* Fails if the resulting key was infinity */
+ for (i = 0; i < count; i++) {
+ secp256k1_scalar scalar_tweak;
+ /* Because sk may be negated before adding, we need to try with tweak =
+ * sk as well as tweak = -sk. */
+ secp256k1_scalar_set_b32(&scalar_tweak, sk, NULL);
+ secp256k1_scalar_negate(&scalar_tweak, &scalar_tweak);
+ secp256k1_scalar_get_b32(tweak, &scalar_tweak);
+ CHECK((secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, sk) == 0)
+ || (secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0));
+ CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
+ }
+
+ /* Invalid pk with a valid tweak */
+ memset(&internal_xonly_pk, 0, sizeof(internal_xonly_pk));
+ secp256k1_testrand256(tweak);
+ ecount = 0;
+ CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
+
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(verify);
+}
+
+void test_xonly_pubkey_tweak_check(void) {
+ unsigned char zeros64[64] = { 0 };
+ unsigned char overflows[32];
+ unsigned char sk[32];
+ secp256k1_pubkey internal_pk;
+ secp256k1_xonly_pubkey internal_xonly_pk;
+ secp256k1_pubkey output_pk;
+ secp256k1_xonly_pubkey output_xonly_pk;
+ unsigned char output_pk32[32];
+ unsigned char buf32[32];
+ int pk_parity;
+ unsigned char tweak[32];
+
+ int ecount;
+ secp256k1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
+ secp256k1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
+ secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
+
+ memset(overflows, 0xff, sizeof(overflows));
+ secp256k1_testrand256(tweak);
+ secp256k1_testrand256(sk);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &internal_pk, sk) == 1);
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1);
+
+ ecount = 0;
+ CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 1);
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(verify, &output_xonly_pk, &pk_parity, &output_pk) == 1);
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, buf32, &output_xonly_pk) == 1);
+ CHECK(secp256k1_xonly_pubkey_tweak_add_check(none, buf32, pk_parity, &internal_xonly_pk, tweak) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_xonly_pubkey_tweak_add_check(sign, buf32, pk_parity, &internal_xonly_pk, tweak) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, &internal_xonly_pk, tweak) == 1);
+ CHECK(secp256k1_xonly_pubkey_tweak_add_check(verify, NULL, pk_parity, &internal_xonly_pk, tweak) == 0);
+ CHECK(ecount == 3);
+ /* invalid pk_parity value */
+ CHECK(secp256k1_xonly_pubkey_tweak_add_check(verify, buf32, 2, &internal_xonly_pk, tweak) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, NULL, tweak) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_xonly_pubkey_tweak_add_check(verify, buf32, pk_parity, &internal_xonly_pk, NULL) == 0);
+ CHECK(ecount == 5);
+
+ memset(tweak, 1, sizeof(tweak));
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &internal_xonly_pk, NULL, &internal_pk) == 1);
+ CHECK(secp256k1_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, tweak) == 1);
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &output_xonly_pk, &pk_parity, &output_pk) == 1);
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, output_pk32, &output_xonly_pk) == 1);
+ CHECK(secp256k1_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, tweak) == 1);
+
+ /* Wrong pk_parity */
+ CHECK(secp256k1_xonly_pubkey_tweak_add_check(ctx, output_pk32, !pk_parity, &internal_xonly_pk, tweak) == 0);
+ /* Wrong public key */
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, buf32, &internal_xonly_pk) == 1);
+ CHECK(secp256k1_xonly_pubkey_tweak_add_check(ctx, buf32, pk_parity, &internal_xonly_pk, tweak) == 0);
+
+ /* Overflowing tweak not allowed */
+ CHECK(secp256k1_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, overflows) == 0);
+ CHECK(secp256k1_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, overflows) == 0);
+ CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
+ CHECK(ecount == 5);
+
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(verify);
+}
+
+/* Starts with an initial pubkey and recursively creates N_PUBKEYS - 1
+ * additional pubkeys by calling tweak_add. Then verifies every tweak starting
+ * from the last pubkey. */
+#define N_PUBKEYS 32
+void test_xonly_pubkey_tweak_recursive(void) {
+ unsigned char sk[32];
+ secp256k1_pubkey pk[N_PUBKEYS];
+ unsigned char pk_serialized[32];
+ unsigned char tweak[N_PUBKEYS - 1][32];
+ int i;
+
+ secp256k1_testrand256(sk);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pk[0], sk) == 1);
+ /* Add tweaks */
+ for (i = 0; i < N_PUBKEYS - 1; i++) {
+ secp256k1_xonly_pubkey xonly_pk;
+ memset(tweak[i], i + 1, sizeof(tweak[i]));
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk[i]) == 1);
+ CHECK(secp256k1_xonly_pubkey_tweak_add(ctx, &pk[i + 1], &xonly_pk, tweak[i]) == 1);
+ }
+
+ /* Verify tweaks */
+ for (i = N_PUBKEYS - 1; i > 0; i--) {
+ secp256k1_xonly_pubkey xonly_pk;
+ int pk_parity;
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk[i]) == 1);
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, pk_serialized, &xonly_pk) == 1);
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, NULL, &pk[i - 1]) == 1);
+ CHECK(secp256k1_xonly_pubkey_tweak_add_check(ctx, pk_serialized, pk_parity, &xonly_pk, tweak[i - 1]) == 1);
+ }
+}
+#undef N_PUBKEYS
+
+void test_keypair(void) {
+ unsigned char sk[32];
+ unsigned char zeros96[96] = { 0 };
+ unsigned char overflows[32];
+ secp256k1_keypair keypair;
+ secp256k1_pubkey pk, pk_tmp;
+ secp256k1_xonly_pubkey xonly_pk, xonly_pk_tmp;
+ int pk_parity, pk_parity_tmp;
+ int ecount;
+ secp256k1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
+ secp256k1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
+ secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
+
+ CHECK(sizeof(zeros96) == sizeof(keypair));
+ memset(overflows, 0xFF, sizeof(overflows));
+
+ /* Test keypair_create */
+ ecount = 0;
+ secp256k1_testrand256(sk);
+ CHECK(secp256k1_keypair_create(none, &keypair, sk) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_keypair_create(verify, &keypair, sk) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_keypair_create(sign, &keypair, sk) == 1);
+ CHECK(secp256k1_keypair_create(sign, NULL, sk) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_keypair_create(sign, &keypair, NULL) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
+ CHECK(ecount == 4);
+
+ /* Invalid secret key */
+ CHECK(secp256k1_keypair_create(sign, &keypair, zeros96) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
+ CHECK(secp256k1_keypair_create(sign, &keypair, overflows) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
+
+ /* Test keypair_pub */
+ ecount = 0;
+ secp256k1_testrand256(sk);
+ CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
+ CHECK(secp256k1_keypair_pub(none, &pk, &keypair) == 1);
+ CHECK(secp256k1_keypair_pub(none, NULL, &keypair) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_keypair_pub(none, &pk, NULL) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_memcmp_var(zeros96, &pk, sizeof(pk)) == 0);
+
+ /* Using an invalid keypair is fine for keypair_pub */
+ memset(&keypair, 0, sizeof(keypair));
+ CHECK(secp256k1_keypair_pub(none, &pk, &keypair) == 1);
+ CHECK(secp256k1_memcmp_var(zeros96, &pk, sizeof(pk)) == 0);
+
+ /* keypair holds the same pubkey as pubkey_create */
+ CHECK(secp256k1_ec_pubkey_create(sign, &pk, sk) == 1);
+ CHECK(secp256k1_keypair_create(sign, &keypair, sk) == 1);
+ CHECK(secp256k1_keypair_pub(none, &pk_tmp, &keypair) == 1);
+ CHECK(secp256k1_memcmp_var(&pk, &pk_tmp, sizeof(pk)) == 0);
+
+ /** Test keypair_xonly_pub **/
+ ecount = 0;
+ secp256k1_testrand256(sk);
+ CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
+ CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1);
+ CHECK(secp256k1_keypair_xonly_pub(none, NULL, &pk_parity, &keypair) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, NULL, &keypair) == 1);
+ CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, NULL) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
+ /* Using an invalid keypair will set the xonly_pk to 0 (first reset
+ * xonly_pk). */
+ CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1);
+ memset(&keypair, 0, sizeof(keypair));
+ CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
+ CHECK(ecount == 3);
+
+ /** keypair holds the same xonly pubkey as pubkey_create **/
+ CHECK(secp256k1_ec_pubkey_create(sign, &pk, sk) == 1);
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1);
+ CHECK(secp256k1_keypair_create(sign, &keypair, sk) == 1);
+ CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk_tmp, &pk_parity_tmp, &keypair) == 1);
+ CHECK(secp256k1_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0);
+ CHECK(pk_parity == pk_parity_tmp);
+
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(verify);
+}
+
+void test_keypair_add(void) {
+ unsigned char sk[32];
+ secp256k1_keypair keypair;
+ unsigned char overflows[32];
+ unsigned char zeros96[96] = { 0 };
+ unsigned char tweak[32];
+ int i;
+ int ecount = 0;
+ secp256k1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
+ secp256k1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
+ secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
+
+ CHECK(sizeof(zeros96) == sizeof(keypair));
+ secp256k1_testrand256(sk);
+ secp256k1_testrand256(tweak);
+ memset(overflows, 0xFF, 32);
+ CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
+
+ CHECK(secp256k1_keypair_xonly_tweak_add(none, &keypair, tweak) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_keypair_xonly_tweak_add(sign, &keypair, tweak) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 1);
+ CHECK(secp256k1_keypair_xonly_tweak_add(verify, NULL, tweak) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_keypair_xonly_tweak_add(verify, &keypair, NULL) == 0);
+ CHECK(ecount == 4);
+ /* This does not set the keypair to zeroes */
+ CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) != 0);
+
+ /* Invalid tweak zeroes the keypair */
+ CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
+ CHECK(secp256k1_keypair_xonly_tweak_add(ctx, &keypair, overflows) == 0);
+ CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0);
+
+ /* A zero tweak is fine */
+ CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
+ CHECK(secp256k1_keypair_xonly_tweak_add(ctx, &keypair, zeros96) == 1);
+
+ /* Fails if the resulting keypair was (sk=0, pk=infinity) */
+ for (i = 0; i < count; i++) {
+ secp256k1_scalar scalar_tweak;
+ secp256k1_keypair keypair_tmp;
+ secp256k1_testrand256(sk);
+ CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
+ memcpy(&keypair_tmp, &keypair, sizeof(keypair));
+ /* Because sk may be negated before adding, we need to try with tweak =
+ * sk as well as tweak = -sk. */
+ secp256k1_scalar_set_b32(&scalar_tweak, sk, NULL);
+ secp256k1_scalar_negate(&scalar_tweak, &scalar_tweak);
+ secp256k1_scalar_get_b32(tweak, &scalar_tweak);
+ CHECK((secp256k1_keypair_xonly_tweak_add(ctx, &keypair, sk) == 0)
+ || (secp256k1_keypair_xonly_tweak_add(ctx, &keypair_tmp, tweak) == 0));
+ CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0
+ || secp256k1_memcmp_var(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0);
+ }
+
+ /* Invalid keypair with a valid tweak */
+ memset(&keypair, 0, sizeof(keypair));
+ secp256k1_testrand256(tweak);
+ ecount = 0;
+ CHECK(secp256k1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0);
+ /* Only seckey part of keypair invalid */
+ CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
+ memset(&keypair, 0, 32);
+ CHECK(secp256k1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0);
+ CHECK(ecount == 2);
+ /* Only pubkey part of keypair invalid */
+ CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
+ memset(&keypair.data[32], 0, 64);
+ CHECK(secp256k1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0);
+ CHECK(ecount == 3);
+
+ /* Check that the keypair_tweak_add implementation is correct */
+ CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
+ for (i = 0; i < count; i++) {
+ secp256k1_xonly_pubkey internal_pk;
+ secp256k1_xonly_pubkey output_pk;
+ secp256k1_pubkey output_pk_xy;
+ secp256k1_pubkey output_pk_expected;
+ unsigned char pk32[32];
+ int pk_parity;
+
+ secp256k1_testrand256(tweak);
+ CHECK(secp256k1_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1);
+ CHECK(secp256k1_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1);
+ CHECK(secp256k1_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1);
+
+ /* Check that it passes xonly_pubkey_tweak_add_check */
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, pk32, &output_pk) == 1);
+ CHECK(secp256k1_xonly_pubkey_tweak_add_check(ctx, pk32, pk_parity, &internal_pk, tweak) == 1);
+
+ /* Check that the resulting pubkey matches xonly_pubkey_tweak_add */
+ CHECK(secp256k1_keypair_pub(ctx, &output_pk_xy, &keypair) == 1);
+ CHECK(secp256k1_xonly_pubkey_tweak_add(ctx, &output_pk_expected, &internal_pk, tweak) == 1);
+ CHECK(secp256k1_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0);
+
+ /* Check that the secret key in the keypair is tweaked correctly */
+ CHECK(secp256k1_ec_pubkey_create(ctx, &output_pk_expected, &keypair.data[0]) == 1);
+ CHECK(secp256k1_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0);
+ }
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(verify);
+}
+
+void run_extrakeys_tests(void) {
+ /* xonly key test cases */
+ test_xonly_pubkey();
+ test_xonly_pubkey_tweak();
+ test_xonly_pubkey_tweak_check();
+ test_xonly_pubkey_tweak_recursive();
+
+ /* keypair tests */
+ test_keypair();
+ test_keypair_add();
+}
+
+#endif
diff --git a/src/secp256k1/src/modules/recovery/Makefile.am.include b/src/secp256k1/src/modules/recovery/Makefile.am.include
index bf23c26e71..e2d3f1248d 100644
--- a/src/secp256k1/src/modules/recovery/Makefile.am.include
+++ b/src/secp256k1/src/modules/recovery/Makefile.am.include
@@ -1,6 +1,7 @@
include_HEADERS += include/secp256k1_recovery.h
noinst_HEADERS += src/modules/recovery/main_impl.h
noinst_HEADERS += src/modules/recovery/tests_impl.h
+noinst_HEADERS += src/modules/recovery/tests_exhaustive_impl.h
if USE_BENCHMARK
noinst_PROGRAMS += bench_recover
bench_recover_SOURCES = src/bench_recover.c
diff --git a/src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h b/src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h
new file mode 100644
index 0000000000..a2f381d77a
--- /dev/null
+++ b/src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h
@@ -0,0 +1,149 @@
+/**********************************************************************
+ * Copyright (c) 2016 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H
+#define SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H
+
+#include "src/modules/recovery/main_impl.h"
+#include "include/secp256k1_recovery.h"
+
+void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group) {
+ int i, j, k;
+ uint64_t iter = 0;
+
+ /* Loop */
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { /* message */
+ for (j = 1; j < EXHAUSTIVE_TEST_ORDER; j++) { /* key */
+ if (skip_section(&iter)) continue;
+ for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */
+ const int starting_k = k;
+ secp256k1_fe r_dot_y_normalized;
+ secp256k1_ecdsa_recoverable_signature rsig;
+ secp256k1_ecdsa_signature sig;
+ secp256k1_scalar sk, msg, r, s, expected_r;
+ unsigned char sk32[32], msg32[32];
+ int expected_recid;
+ int recid;
+ int overflow;
+ secp256k1_scalar_set_int(&msg, i);
+ secp256k1_scalar_set_int(&sk, j);
+ secp256k1_scalar_get_b32(sk32, &sk);
+ secp256k1_scalar_get_b32(msg32, &msg);
+
+ secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
+
+ /* Check directly */
+ secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig);
+ r_from_k(&expected_r, group, k, &overflow);
+ CHECK(r == expected_r);
+ CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER ||
+ (k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER);
+ /* The recid's second bit is for conveying overflow (R.x value >= group order).
+ * In the actual secp256k1 this is an astronomically unlikely event, but in the
+ * small group used here, it will be the case for all points except the ones where
+ * R.x=1 (which the group is specifically selected to have).
+ * Note that this isn't actually useful; full recovery would need to convey
+ * floor(R.x / group_order), but only one bit is used as that is sufficient
+ * in the real group. */
+ expected_recid = overflow ? 2 : 0;
+ r_dot_y_normalized = group[k].y;
+ secp256k1_fe_normalize(&r_dot_y_normalized);
+ /* Also the recovery id is flipped depending if we hit the low-s branch */
+ if ((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER) {
+ expected_recid |= secp256k1_fe_is_odd(&r_dot_y_normalized);
+ } else {
+ expected_recid |= !secp256k1_fe_is_odd(&r_dot_y_normalized);
+ }
+ CHECK(recid == expected_recid);
+
+ /* Convert to a standard sig then check */
+ secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
+ /* Note that we compute expected_r *after* signing -- this is important
+ * because our nonce-computing function function might change k during
+ * signing. */
+ r_from_k(&expected_r, group, k, NULL);
+ CHECK(r == expected_r);
+ CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER ||
+ (k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER);
+
+ /* Overflow means we've tried every possible nonce */
+ if (k < starting_k) {
+ break;
+ }
+ }
+ }
+ }
+}
+
+void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group) {
+ /* This is essentially a copy of test_exhaustive_verify, with recovery added */
+ int s, r, msg, key;
+ uint64_t iter = 0;
+ for (s = 1; s < EXHAUSTIVE_TEST_ORDER; s++) {
+ for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) {
+ for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) {
+ for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) {
+ secp256k1_ge nonconst_ge;
+ secp256k1_ecdsa_recoverable_signature rsig;
+ secp256k1_ecdsa_signature sig;
+ secp256k1_pubkey pk;
+ secp256k1_scalar sk_s, msg_s, r_s, s_s;
+ secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s;
+ int recid = 0;
+ int k, should_verify;
+ unsigned char msg32[32];
+
+ if (skip_section(&iter)) continue;
+
+ secp256k1_scalar_set_int(&s_s, s);
+ secp256k1_scalar_set_int(&r_s, r);
+ secp256k1_scalar_set_int(&msg_s, msg);
+ secp256k1_scalar_set_int(&sk_s, key);
+ secp256k1_scalar_get_b32(msg32, &msg_s);
+
+ /* Verify by hand */
+ /* Run through every k value that gives us this r and check that *one* works.
+ * Note there could be none, there could be multiple, ECDSA is weird. */
+ should_verify = 0;
+ for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) {
+ secp256k1_scalar check_x_s;
+ r_from_k(&check_x_s, group, k, NULL);
+ if (r_s == check_x_s) {
+ secp256k1_scalar_set_int(&s_times_k_s, k);
+ secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
+ secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
+ secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
+ should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
+ }
+ }
+ /* nb we have a "high s" rule */
+ should_verify &= !secp256k1_scalar_is_high(&s_s);
+
+ /* We would like to try recovering the pubkey and checking that it matches,
+ * but pubkey recovery is impossible in the exhaustive tests (the reason
+ * being that there are 12 nonzero r values, 12 nonzero points, and no
+ * overlap between the sets, so there are no valid signatures). */
+
+ /* Verify by converting to a standard signature and calling verify */
+ secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid);
+ secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
+ memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
+ secp256k1_pubkey_save(&pk, &nonconst_ge);
+ CHECK(should_verify ==
+ secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk));
+ }
+ }
+ }
+ }
+}
+
+static void test_exhaustive_recovery(const secp256k1_context *ctx, const secp256k1_ge *group) {
+ test_exhaustive_recovery_sign(ctx, group);
+ test_exhaustive_recovery_verify(ctx, group);
+}
+
+#endif /* SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H */
diff --git a/src/secp256k1/src/modules/recovery/tests_impl.h b/src/secp256k1/src/modules/recovery/tests_impl.h
index 38a533a755..09cae38403 100644
--- a/src/secp256k1/src/modules/recovery/tests_impl.h
+++ b/src/secp256k1/src/modules/recovery/tests_impl.h
@@ -25,7 +25,7 @@ static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned c
}
/* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */
memset(nonce32, 1, 32);
- return secp256k1_rand_bits(1);
+ return secp256k1_testrand_bits(1);
}
void test_ecdsa_recovery_api(void) {
@@ -184,7 +184,7 @@ void test_ecdsa_recovery_end_to_end(void) {
CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
- CHECK(memcmp(&signature[4], &signature[0], 64) == 0);
+ CHECK(secp256k1_memcmp_var(&signature[4], &signature[0], 64) == 0);
CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
memset(&rsignature[4], 0, sizeof(rsignature[4]));
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
@@ -193,16 +193,16 @@ void test_ecdsa_recovery_end_to_end(void) {
/* Parse compact (with recovery id) and recover. */
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1);
- CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
/* Serialize/destroy/parse signature and verify again. */
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
- sig[secp256k1_rand_bits(6)] += 1 + secp256k1_rand_int(255);
+ sig[secp256k1_testrand_bits(6)] += 1 + secp256k1_testrand_int(255);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0);
/* Recover again */
CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 ||
- memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
+ secp256k1_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
}
/* Tests several edge cases. */
diff --git a/src/secp256k1/src/modules/schnorrsig/Makefile.am.include b/src/secp256k1/src/modules/schnorrsig/Makefile.am.include
new file mode 100644
index 0000000000..568bcc3523
--- /dev/null
+++ b/src/secp256k1/src/modules/schnorrsig/Makefile.am.include
@@ -0,0 +1,9 @@
+include_HEADERS += include/secp256k1_schnorrsig.h
+noinst_HEADERS += src/modules/schnorrsig/main_impl.h
+noinst_HEADERS += src/modules/schnorrsig/tests_impl.h
+noinst_HEADERS += src/modules/schnorrsig/tests_exhaustive_impl.h
+if USE_BENCHMARK
+noinst_PROGRAMS += bench_schnorrsig
+bench_schnorrsig_SOURCES = src/bench_schnorrsig.c
+bench_schnorrsig_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB)
+endif
diff --git a/src/secp256k1/src/modules/schnorrsig/main_impl.h b/src/secp256k1/src/modules/schnorrsig/main_impl.h
new file mode 100644
index 0000000000..b0d8481f9b
--- /dev/null
+++ b/src/secp256k1/src/modules/schnorrsig/main_impl.h
@@ -0,0 +1,239 @@
+/**********************************************************************
+ * Copyright (c) 2018-2020 Andrew Poelstra, Jonas Nick *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_SCHNORRSIG_MAIN_
+#define _SECP256K1_MODULE_SCHNORRSIG_MAIN_
+
+#include "include/secp256k1.h"
+#include "include/secp256k1_schnorrsig.h"
+#include "hash.h"
+
+/* Initializes SHA256 with fixed midstate. This midstate was computed by applying
+ * SHA256 to SHA256("BIP0340/nonce")||SHA256("BIP0340/nonce"). */
+static void secp256k1_nonce_function_bip340_sha256_tagged(secp256k1_sha256 *sha) {
+ secp256k1_sha256_initialize(sha);
+ sha->s[0] = 0x46615b35ul;
+ sha->s[1] = 0xf4bfbff7ul;
+ sha->s[2] = 0x9f8dc671ul;
+ sha->s[3] = 0x83627ab3ul;
+ sha->s[4] = 0x60217180ul;
+ sha->s[5] = 0x57358661ul;
+ sha->s[6] = 0x21a29e54ul;
+ sha->s[7] = 0x68b07b4cul;
+
+ sha->bytes = 64;
+}
+
+/* Initializes SHA256 with fixed midstate. This midstate was computed by applying
+ * SHA256 to SHA256("BIP0340/aux")||SHA256("BIP0340/aux"). */
+static void secp256k1_nonce_function_bip340_sha256_tagged_aux(secp256k1_sha256 *sha) {
+ secp256k1_sha256_initialize(sha);
+ sha->s[0] = 0x24dd3219ul;
+ sha->s[1] = 0x4eba7e70ul;
+ sha->s[2] = 0xca0fabb9ul;
+ sha->s[3] = 0x0fa3166dul;
+ sha->s[4] = 0x3afbe4b1ul;
+ sha->s[5] = 0x4c44df97ul;
+ sha->s[6] = 0x4aac2739ul;
+ sha->s[7] = 0x249e850aul;
+
+ sha->bytes = 64;
+}
+
+/* algo16 argument for nonce_function_bip340 to derive the nonce exactly as stated in BIP-340
+ * by using the correct tagged hash function. */
+static const unsigned char bip340_algo16[16] = "BIP0340/nonce\0\0\0";
+
+static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) {
+ secp256k1_sha256 sha;
+ unsigned char masked_key[32];
+ int i;
+
+ if (algo16 == NULL) {
+ return 0;
+ }
+
+ if (data != NULL) {
+ secp256k1_nonce_function_bip340_sha256_tagged_aux(&sha);
+ secp256k1_sha256_write(&sha, data, 32);
+ secp256k1_sha256_finalize(&sha, masked_key);
+ for (i = 0; i < 32; i++) {
+ masked_key[i] ^= key32[i];
+ }
+ }
+
+ /* Tag the hash with algo16 which is important to avoid nonce reuse across
+ * algorithms. If this nonce function is used in BIP-340 signing as defined
+ * in the spec, an optimized tagging implementation is used. */
+ if (secp256k1_memcmp_var(algo16, bip340_algo16, 16) == 0) {
+ secp256k1_nonce_function_bip340_sha256_tagged(&sha);
+ } else {
+ int algo16_len = 16;
+ /* Remove terminating null bytes */
+ while (algo16_len > 0 && !algo16[algo16_len - 1]) {
+ algo16_len--;
+ }
+ secp256k1_sha256_initialize_tagged(&sha, algo16, algo16_len);
+ }
+
+ /* Hash (masked-)key||pk||msg using the tagged hash as per the spec */
+ if (data != NULL) {
+ secp256k1_sha256_write(&sha, masked_key, 32);
+ } else {
+ secp256k1_sha256_write(&sha, key32, 32);
+ }
+ secp256k1_sha256_write(&sha, xonly_pk32, 32);
+ secp256k1_sha256_write(&sha, msg32, 32);
+ secp256k1_sha256_finalize(&sha, nonce32);
+ return 1;
+}
+
+const secp256k1_nonce_function_hardened secp256k1_nonce_function_bip340 = nonce_function_bip340;
+
+/* Initializes SHA256 with fixed midstate. This midstate was computed by applying
+ * SHA256 to SHA256("BIP0340/challenge")||SHA256("BIP0340/challenge"). */
+static void secp256k1_schnorrsig_sha256_tagged(secp256k1_sha256 *sha) {
+ secp256k1_sha256_initialize(sha);
+ sha->s[0] = 0x9cecba11ul;
+ sha->s[1] = 0x23925381ul;
+ sha->s[2] = 0x11679112ul;
+ sha->s[3] = 0xd1627e0ful;
+ sha->s[4] = 0x97c87550ul;
+ sha->s[5] = 0x003cc765ul;
+ sha->s[6] = 0x90f61164ul;
+ sha->s[7] = 0x33e9b66aul;
+ sha->bytes = 64;
+}
+
+static void secp256k1_schnorrsig_challenge(secp256k1_scalar* e, const unsigned char *r32, const unsigned char *msg32, const unsigned char *pubkey32)
+{
+ unsigned char buf[32];
+ secp256k1_sha256 sha;
+
+ /* tagged hash(r.x, pk.x, msg32) */
+ secp256k1_schnorrsig_sha256_tagged(&sha);
+ secp256k1_sha256_write(&sha, r32, 32);
+ secp256k1_sha256_write(&sha, pubkey32, 32);
+ secp256k1_sha256_write(&sha, msg32, 32);
+ secp256k1_sha256_finalize(&sha, buf);
+ /* Set scalar e to the challenge hash modulo the curve order as per
+ * BIP340. */
+ secp256k1_scalar_set_b32(e, buf, NULL);
+}
+
+int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const secp256k1_keypair *keypair, secp256k1_nonce_function_hardened noncefp, void *ndata) {
+ secp256k1_scalar sk;
+ secp256k1_scalar e;
+ secp256k1_scalar k;
+ secp256k1_gej rj;
+ secp256k1_ge pk;
+ secp256k1_ge r;
+ unsigned char buf[32] = { 0 };
+ unsigned char pk_buf[32];
+ unsigned char seckey[32];
+ int ret = 1;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(sig64 != NULL);
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(keypair != NULL);
+
+ if (noncefp == NULL) {
+ noncefp = secp256k1_nonce_function_bip340;
+ }
+
+ ret &= secp256k1_keypair_load(ctx, &sk, &pk, keypair);
+ /* Because we are signing for a x-only pubkey, the secret key is negated
+ * before signing if the point corresponding to the secret key does not
+ * have an even Y. */
+ if (secp256k1_fe_is_odd(&pk.y)) {
+ secp256k1_scalar_negate(&sk, &sk);
+ }
+
+ secp256k1_scalar_get_b32(seckey, &sk);
+ secp256k1_fe_get_b32(pk_buf, &pk.x);
+ ret &= !!noncefp(buf, msg32, seckey, pk_buf, bip340_algo16, ndata);
+ secp256k1_scalar_set_b32(&k, buf, NULL);
+ ret &= !secp256k1_scalar_is_zero(&k);
+ secp256k1_scalar_cmov(&k, &secp256k1_scalar_one, !ret);
+
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &rj, &k);
+ secp256k1_ge_set_gej(&r, &rj);
+
+ /* We declassify r to allow using it as a branch point. This is fine
+ * because r is not a secret. */
+ secp256k1_declassify(ctx, &r, sizeof(r));
+ secp256k1_fe_normalize_var(&r.y);
+ if (secp256k1_fe_is_odd(&r.y)) {
+ secp256k1_scalar_negate(&k, &k);
+ }
+ secp256k1_fe_normalize_var(&r.x);
+ secp256k1_fe_get_b32(&sig64[0], &r.x);
+
+ secp256k1_schnorrsig_challenge(&e, &sig64[0], msg32, pk_buf);
+ secp256k1_scalar_mul(&e, &e, &sk);
+ secp256k1_scalar_add(&e, &e, &k);
+ secp256k1_scalar_get_b32(&sig64[32], &e);
+
+ memczero(sig64, 64, !ret);
+ secp256k1_scalar_clear(&k);
+ secp256k1_scalar_clear(&sk);
+ memset(seckey, 0, sizeof(seckey));
+
+ return ret;
+}
+
+int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned char *sig64, const unsigned char *msg32, const secp256k1_xonly_pubkey *pubkey) {
+ secp256k1_scalar s;
+ secp256k1_scalar e;
+ secp256k1_gej rj;
+ secp256k1_ge pk;
+ secp256k1_gej pkj;
+ secp256k1_fe rx;
+ secp256k1_ge r;
+ unsigned char buf[32];
+ int overflow;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(sig64 != NULL);
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(pubkey != NULL);
+
+ if (!secp256k1_fe_set_b32(&rx, &sig64[0])) {
+ return 0;
+ }
+
+ secp256k1_scalar_set_b32(&s, &sig64[32], &overflow);
+ if (overflow) {
+ return 0;
+ }
+
+ if (!secp256k1_xonly_pubkey_load(ctx, &pk, pubkey)) {
+ return 0;
+ }
+
+ /* Compute e. */
+ secp256k1_fe_get_b32(buf, &pk.x);
+ secp256k1_schnorrsig_challenge(&e, &sig64[0], msg32, buf);
+
+ /* Compute rj = s*G + (-e)*pkj */
+ secp256k1_scalar_negate(&e, &e);
+ secp256k1_gej_set_ge(&pkj, &pk);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &rj, &pkj, &e, &s);
+
+ secp256k1_ge_set_gej_var(&r, &rj);
+ if (secp256k1_ge_is_infinity(&r)) {
+ return 0;
+ }
+
+ secp256k1_fe_normalize_var(&r.y);
+ return !secp256k1_fe_is_odd(&r.y) &&
+ secp256k1_fe_equal_var(&rx, &r.x);
+}
+
+#endif
diff --git a/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h b/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h
new file mode 100644
index 0000000000..4bf0bc1680
--- /dev/null
+++ b/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h
@@ -0,0 +1,206 @@
+/**********************************************************************
+ * Copyright (c) 2020 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_
+#define _SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_
+
+#include "include/secp256k1_schnorrsig.h"
+#include "src/modules/schnorrsig/main_impl.h"
+
+static const unsigned char invalid_pubkey_bytes[][32] = {
+ /* 0 */
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ },
+ /* 2 */
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2
+ },
+ /* order */
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ((EXHAUSTIVE_TEST_ORDER + 0UL) >> 24) & 0xFF,
+ ((EXHAUSTIVE_TEST_ORDER + 0UL) >> 16) & 0xFF,
+ ((EXHAUSTIVE_TEST_ORDER + 0UL) >> 8) & 0xFF,
+ (EXHAUSTIVE_TEST_ORDER + 0UL) & 0xFF
+ },
+ /* order + 1 */
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ((EXHAUSTIVE_TEST_ORDER + 1UL) >> 24) & 0xFF,
+ ((EXHAUSTIVE_TEST_ORDER + 1UL) >> 16) & 0xFF,
+ ((EXHAUSTIVE_TEST_ORDER + 1UL) >> 8) & 0xFF,
+ (EXHAUSTIVE_TEST_ORDER + 1UL) & 0xFF
+ },
+ /* field size */
+ {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x2F
+ },
+ /* field size + 1 (note that 1 is legal) */
+ {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x30
+ },
+ /* 2^256 - 1 */
+ {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+ }
+};
+
+#define NUM_INVALID_KEYS (sizeof(invalid_pubkey_bytes) / sizeof(invalid_pubkey_bytes[0]))
+
+static int secp256k1_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
+ const unsigned char *key32, const unsigned char *xonly_pk32,
+ const unsigned char *algo16, void* data) {
+ secp256k1_scalar s;
+ int *idata = data;
+ (void)msg32;
+ (void)key32;
+ (void)xonly_pk32;
+ (void)algo16;
+ secp256k1_scalar_set_int(&s, *idata);
+ secp256k1_scalar_get_b32(nonce32, &s);
+ return 1;
+}
+
+static void test_exhaustive_schnorrsig_verify(const secp256k1_context *ctx, const secp256k1_xonly_pubkey* pubkeys, unsigned char (*xonly_pubkey_bytes)[32], const int* parities) {
+ int d;
+ uint64_t iter = 0;
+ /* Iterate over the possible public keys to verify against (through their corresponding DL d). */
+ for (d = 1; d <= EXHAUSTIVE_TEST_ORDER / 2; ++d) {
+ int actual_d;
+ unsigned k;
+ unsigned char pk32[32];
+ memcpy(pk32, xonly_pubkey_bytes[d - 1], 32);
+ actual_d = parities[d - 1] ? EXHAUSTIVE_TEST_ORDER - d : d;
+ /* Iterate over the possible valid first 32 bytes in the signature, through their corresponding DL k.
+ Values above EXHAUSTIVE_TEST_ORDER/2 refer to the entries in invalid_pubkey_bytes. */
+ for (k = 1; k <= EXHAUSTIVE_TEST_ORDER / 2 + NUM_INVALID_KEYS; ++k) {
+ unsigned char sig64[64];
+ int actual_k = -1;
+ int e_done[EXHAUSTIVE_TEST_ORDER] = {0};
+ int e_count_done = 0;
+ if (skip_section(&iter)) continue;
+ if (k <= EXHAUSTIVE_TEST_ORDER / 2) {
+ memcpy(sig64, xonly_pubkey_bytes[k - 1], 32);
+ actual_k = parities[k - 1] ? EXHAUSTIVE_TEST_ORDER - k : k;
+ } else {
+ memcpy(sig64, invalid_pubkey_bytes[k - 1 - EXHAUSTIVE_TEST_ORDER / 2], 32);
+ }
+ /* Randomly generate messages until all challenges have been hit. */
+ while (e_count_done < EXHAUSTIVE_TEST_ORDER) {
+ secp256k1_scalar e;
+ unsigned char msg32[32];
+ secp256k1_testrand256(msg32);
+ secp256k1_schnorrsig_challenge(&e, sig64, msg32, pk32);
+ /* Only do work if we hit a challenge we haven't tried before. */
+ if (!e_done[e]) {
+ /* Iterate over the possible valid last 32 bytes in the signature.
+ 0..order=that s value; order+1=random bytes */
+ int count_valid = 0, s;
+ for (s = 0; s <= EXHAUSTIVE_TEST_ORDER + 1; ++s) {
+ int expect_valid, valid;
+ if (s <= EXHAUSTIVE_TEST_ORDER) {
+ secp256k1_scalar s_s;
+ secp256k1_scalar_set_int(&s_s, s);
+ secp256k1_scalar_get_b32(sig64 + 32, &s_s);
+ expect_valid = actual_k != -1 && s != EXHAUSTIVE_TEST_ORDER &&
+ (s_s == (actual_k + actual_d * e) % EXHAUSTIVE_TEST_ORDER);
+ } else {
+ secp256k1_testrand256(sig64 + 32);
+ expect_valid = 0;
+ }
+ valid = secp256k1_schnorrsig_verify(ctx, sig64, msg32, &pubkeys[d - 1]);
+ CHECK(valid == expect_valid);
+ count_valid += valid;
+ }
+ /* Exactly one s value must verify, unless R is illegal. */
+ CHECK(count_valid == (actual_k != -1));
+ /* Don't retry other messages that result in the same challenge. */
+ e_done[e] = 1;
+ ++e_count_done;
+ }
+ }
+ }
+ }
+}
+
+static void test_exhaustive_schnorrsig_sign(const secp256k1_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const secp256k1_keypair* keypairs, const int* parities) {
+ int d, k;
+ uint64_t iter = 0;
+ /* Loop over keys. */
+ for (d = 1; d < EXHAUSTIVE_TEST_ORDER; ++d) {
+ int actual_d = d;
+ if (parities[d - 1]) actual_d = EXHAUSTIVE_TEST_ORDER - d;
+ /* Loop over nonces. */
+ for (k = 1; k < EXHAUSTIVE_TEST_ORDER; ++k) {
+ int e_done[EXHAUSTIVE_TEST_ORDER] = {0};
+ int e_count_done = 0;
+ unsigned char msg32[32];
+ unsigned char sig64[64];
+ int actual_k = k;
+ if (skip_section(&iter)) continue;
+ if (parities[k - 1]) actual_k = EXHAUSTIVE_TEST_ORDER - k;
+ /* Generate random messages until all challenges have been tried. */
+ while (e_count_done < EXHAUSTIVE_TEST_ORDER) {
+ secp256k1_scalar e;
+ secp256k1_testrand256(msg32);
+ secp256k1_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, xonly_pubkey_bytes[d - 1]);
+ /* Only do work if we hit a challenge we haven't tried before. */
+ if (!e_done[e]) {
+ secp256k1_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER;
+ unsigned char expected_s_bytes[32];
+ secp256k1_scalar_get_b32(expected_s_bytes, &expected_s);
+ /* Invoke the real function to construct a signature. */
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig64, msg32, &keypairs[d - 1], secp256k1_hardened_nonce_function_smallint, &k));
+ /* The first 32 bytes must match the xonly pubkey for the specified k. */
+ CHECK(secp256k1_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0);
+ /* The last 32 bytes must match the expected s value. */
+ CHECK(secp256k1_memcmp_var(sig64 + 32, expected_s_bytes, 32) == 0);
+ /* Don't retry other messages that result in the same challenge. */
+ e_done[e] = 1;
+ ++e_count_done;
+ }
+ }
+ }
+ }
+}
+
+static void test_exhaustive_schnorrsig(const secp256k1_context *ctx) {
+ secp256k1_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1];
+ secp256k1_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1];
+ int parity[EXHAUSTIVE_TEST_ORDER - 1];
+ unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32];
+ unsigned i;
+
+ /* Verify that all invalid_pubkey_bytes are actually invalid. */
+ for (i = 0; i < NUM_INVALID_KEYS; ++i) {
+ secp256k1_xonly_pubkey pk;
+ CHECK(!secp256k1_xonly_pubkey_parse(ctx, &pk, invalid_pubkey_bytes[i]));
+ }
+
+ /* Construct keypairs and xonly-pubkeys for the entire group. */
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; ++i) {
+ secp256k1_scalar scalar_i;
+ unsigned char buf[32];
+ secp256k1_scalar_set_int(&scalar_i, i);
+ secp256k1_scalar_get_b32(buf, &scalar_i);
+ CHECK(secp256k1_keypair_create(ctx, &keypair[i - 1], buf));
+ CHECK(secp256k1_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parity[i - 1], &keypair[i - 1]));
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1]));
+ }
+
+ test_exhaustive_schnorrsig_sign(ctx, xonly_pubkey_bytes, keypair, parity);
+ test_exhaustive_schnorrsig_verify(ctx, xonly_pubkey, xonly_pubkey_bytes, parity);
+}
+
+#endif
diff --git a/src/secp256k1/src/modules/schnorrsig/tests_impl.h b/src/secp256k1/src/modules/schnorrsig/tests_impl.h
new file mode 100644
index 0000000000..f522fcb320
--- /dev/null
+++ b/src/secp256k1/src/modules/schnorrsig/tests_impl.h
@@ -0,0 +1,806 @@
+/**********************************************************************
+ * Copyright (c) 2018-2020 Andrew Poelstra, Jonas Nick *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_SCHNORRSIG_TESTS_
+#define _SECP256K1_MODULE_SCHNORRSIG_TESTS_
+
+#include "secp256k1_schnorrsig.h"
+
+/* Checks that a bit flip in the n_flip-th argument (that has n_bytes many
+ * bytes) changes the hash function
+ */
+void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes) {
+ unsigned char nonces[2][32];
+ CHECK(nonce_function_bip340(nonces[0], args[0], args[1], args[2], args[3], args[4]) == 1);
+ secp256k1_testrand_flip(args[n_flip], n_bytes);
+ CHECK(nonce_function_bip340(nonces[1], args[0], args[1], args[2], args[3], args[4]) == 1);
+ CHECK(secp256k1_memcmp_var(nonces[0], nonces[1], 32) != 0);
+}
+
+/* Tests for the equality of two sha256 structs. This function only produces a
+ * correct result if an integer multiple of 64 many bytes have been written
+ * into the hash functions. */
+void test_sha256_eq(const secp256k1_sha256 *sha1, const secp256k1_sha256 *sha2) {
+ /* Is buffer fully consumed? */
+ CHECK((sha1->bytes & 0x3F) == 0);
+
+ CHECK(sha1->bytes == sha2->bytes);
+ CHECK(secp256k1_memcmp_var(sha1->s, sha2->s, sizeof(sha1->s)) == 0);
+}
+
+void run_nonce_function_bip340_tests(void) {
+ unsigned char tag[13] = "BIP0340/nonce";
+ unsigned char aux_tag[11] = "BIP0340/aux";
+ unsigned char algo16[16] = "BIP0340/nonce\0\0\0";
+ secp256k1_sha256 sha;
+ secp256k1_sha256 sha_optimized;
+ unsigned char nonce[32];
+ unsigned char msg[32];
+ unsigned char key[32];
+ unsigned char pk[32];
+ unsigned char aux_rand[32];
+ unsigned char *args[5];
+ int i;
+
+ /* Check that hash initialized by
+ * secp256k1_nonce_function_bip340_sha256_tagged has the expected
+ * state. */
+ secp256k1_sha256_initialize_tagged(&sha, tag, sizeof(tag));
+ secp256k1_nonce_function_bip340_sha256_tagged(&sha_optimized);
+ test_sha256_eq(&sha, &sha_optimized);
+
+ /* Check that hash initialized by
+ * secp256k1_nonce_function_bip340_sha256_tagged_aux has the expected
+ * state. */
+ secp256k1_sha256_initialize_tagged(&sha, aux_tag, sizeof(aux_tag));
+ secp256k1_nonce_function_bip340_sha256_tagged_aux(&sha_optimized);
+ test_sha256_eq(&sha, &sha_optimized);
+
+ secp256k1_testrand256(msg);
+ secp256k1_testrand256(key);
+ secp256k1_testrand256(pk);
+ secp256k1_testrand256(aux_rand);
+
+ /* Check that a bitflip in an argument results in different nonces. */
+ args[0] = msg;
+ args[1] = key;
+ args[2] = pk;
+ args[3] = algo16;
+ args[4] = aux_rand;
+ for (i = 0; i < count; i++) {
+ nonce_function_bip340_bitflip(args, 0, 32);
+ nonce_function_bip340_bitflip(args, 1, 32);
+ nonce_function_bip340_bitflip(args, 2, 32);
+ /* Flip algo16 special case "BIP0340/nonce" */
+ nonce_function_bip340_bitflip(args, 3, 16);
+ /* Flip algo16 again */
+ nonce_function_bip340_bitflip(args, 3, 16);
+ nonce_function_bip340_bitflip(args, 4, 32);
+ }
+
+ /* NULL algo16 is disallowed */
+ CHECK(nonce_function_bip340(nonce, msg, key, pk, NULL, NULL) == 0);
+ /* Empty algo16 is fine */
+ memset(algo16, 0x00, 16);
+ CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1);
+ /* algo16 with terminating null bytes is fine */
+ algo16[1] = 65;
+ CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1);
+ /* Other algo16 is fine */
+ memset(algo16, 0xFF, 16);
+ CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1);
+
+ /* NULL aux_rand argument is allowed. */
+ CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1);
+}
+
+void test_schnorrsig_api(void) {
+ unsigned char sk1[32];
+ unsigned char sk2[32];
+ unsigned char sk3[32];
+ unsigned char msg[32];
+ secp256k1_keypair keypairs[3];
+ secp256k1_keypair invalid_keypair = { 0 };
+ secp256k1_xonly_pubkey pk[3];
+ secp256k1_xonly_pubkey zero_pk;
+ unsigned char sig[64];
+
+ /** setup **/
+ secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
+ secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+ secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
+ secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ int ecount;
+
+ secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
+
+ secp256k1_testrand256(sk1);
+ secp256k1_testrand256(sk2);
+ secp256k1_testrand256(sk3);
+ secp256k1_testrand256(msg);
+ CHECK(secp256k1_keypair_create(ctx, &keypairs[0], sk1) == 1);
+ CHECK(secp256k1_keypair_create(ctx, &keypairs[1], sk2) == 1);
+ CHECK(secp256k1_keypair_create(ctx, &keypairs[2], sk3) == 1);
+ CHECK(secp256k1_keypair_xonly_pub(ctx, &pk[0], NULL, &keypairs[0]) == 1);
+ CHECK(secp256k1_keypair_xonly_pub(ctx, &pk[1], NULL, &keypairs[1]) == 1);
+ CHECK(secp256k1_keypair_xonly_pub(ctx, &pk[2], NULL, &keypairs[2]) == 1);
+ memset(&zero_pk, 0, sizeof(zero_pk));
+
+ /** main test body **/
+ ecount = 0;
+ CHECK(secp256k1_schnorrsig_sign(none, sig, msg, &keypairs[0], NULL, NULL) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_schnorrsig_sign(vrfy, sig, msg, &keypairs[0], NULL, NULL) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_schnorrsig_sign(sign, NULL, msg, &keypairs[0], NULL, NULL) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_schnorrsig_sign(sign, sig, NULL, &keypairs[0], NULL, NULL) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, NULL, NULL, NULL) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &invalid_keypair, NULL, NULL) == 0);
+ CHECK(ecount == 6);
+
+ ecount = 0;
+ CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1);
+ CHECK(secp256k1_schnorrsig_verify(none, sig, msg, &pk[0]) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_schnorrsig_verify(sign, sig, msg, &pk[0]) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, &pk[0]) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_schnorrsig_verify(vrfy, NULL, msg, &pk[0]) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_schnorrsig_verify(vrfy, sig, NULL, &pk[0]) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, NULL) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, &zero_pk) == 0);
+ CHECK(ecount == 6);
+
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(vrfy);
+ secp256k1_context_destroy(both);
+}
+
+/* Checks that hash initialized by secp256k1_schnorrsig_sha256_tagged has the
+ * expected state. */
+void test_schnorrsig_sha256_tagged(void) {
+ char tag[17] = "BIP0340/challenge";
+ secp256k1_sha256 sha;
+ secp256k1_sha256 sha_optimized;
+
+ secp256k1_sha256_initialize_tagged(&sha, (unsigned char *) tag, sizeof(tag));
+ secp256k1_schnorrsig_sha256_tagged(&sha_optimized);
+ test_sha256_eq(&sha, &sha_optimized);
+}
+
+/* Helper function for schnorrsig_bip_vectors
+ * Signs the message and checks that it's the same as expected_sig. */
+void test_schnorrsig_bip_vectors_check_signing(const unsigned char *sk, const unsigned char *pk_serialized, unsigned char *aux_rand, const unsigned char *msg, const unsigned char *expected_sig) {
+ unsigned char sig[64];
+ secp256k1_keypair keypair;
+ secp256k1_xonly_pubkey pk, pk_expected;
+
+ CHECK(secp256k1_keypair_create(ctx, &keypair, sk));
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, aux_rand));
+ CHECK(secp256k1_memcmp_var(sig, expected_sig, 64) == 0);
+
+ CHECK(secp256k1_xonly_pubkey_parse(ctx, &pk_expected, pk_serialized));
+ CHECK(secp256k1_keypair_xonly_pub(ctx, &pk, NULL, &keypair));
+ CHECK(secp256k1_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0);
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, &pk));
+}
+
+/* Helper function for schnorrsig_bip_vectors
+ * Checks that both verify and verify_batch (TODO) return the same value as expected. */
+void test_schnorrsig_bip_vectors_check_verify(const unsigned char *pk_serialized, const unsigned char *msg32, const unsigned char *sig, int expected) {
+ secp256k1_xonly_pubkey pk;
+
+ CHECK(secp256k1_xonly_pubkey_parse(ctx, &pk, pk_serialized));
+ CHECK(expected == secp256k1_schnorrsig_verify(ctx, sig, msg32, &pk));
+}
+
+/* Test vectors according to BIP-340 ("Schnorr Signatures for secp256k1"). See
+ * https://github.com/bitcoin/bips/blob/master/bip-0340/test-vectors.csv. */
+void test_schnorrsig_bip_vectors(void) {
+ {
+ /* Test vector 0 */
+ const unsigned char sk[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03
+ };
+ const unsigned char pk[32] = {
+ 0xF9, 0x30, 0x8A, 0x01, 0x92, 0x58, 0xC3, 0x10,
+ 0x49, 0x34, 0x4F, 0x85, 0xF8, 0x9D, 0x52, 0x29,
+ 0xB5, 0x31, 0xC8, 0x45, 0x83, 0x6F, 0x99, 0xB0,
+ 0x86, 0x01, 0xF1, 0x13, 0xBC, 0xE0, 0x36, 0xF9
+ };
+ unsigned char aux_rand[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+ const unsigned char msg[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+ const unsigned char sig[64] = {
+ 0xE9, 0x07, 0x83, 0x1F, 0x80, 0x84, 0x8D, 0x10,
+ 0x69, 0xA5, 0x37, 0x1B, 0x40, 0x24, 0x10, 0x36,
+ 0x4B, 0xDF, 0x1C, 0x5F, 0x83, 0x07, 0xB0, 0x08,
+ 0x4C, 0x55, 0xF1, 0xCE, 0x2D, 0xCA, 0x82, 0x15,
+ 0x25, 0xF6, 0x6A, 0x4A, 0x85, 0xEA, 0x8B, 0x71,
+ 0xE4, 0x82, 0xA7, 0x4F, 0x38, 0x2D, 0x2C, 0xE5,
+ 0xEB, 0xEE, 0xE8, 0xFD, 0xB2, 0x17, 0x2F, 0x47,
+ 0x7D, 0xF4, 0x90, 0x0D, 0x31, 0x05, 0x36, 0xC0
+ };
+ test_schnorrsig_bip_vectors_check_signing(sk, pk, aux_rand, msg, sig);
+ test_schnorrsig_bip_vectors_check_verify(pk, msg, sig, 1);
+ }
+ {
+ /* Test vector 1 */
+ const unsigned char sk[32] = {
+ 0xB7, 0xE1, 0x51, 0x62, 0x8A, 0xED, 0x2A, 0x6A,
+ 0xBF, 0x71, 0x58, 0x80, 0x9C, 0xF4, 0xF3, 0xC7,
+ 0x62, 0xE7, 0x16, 0x0F, 0x38, 0xB4, 0xDA, 0x56,
+ 0xA7, 0x84, 0xD9, 0x04, 0x51, 0x90, 0xCF, 0xEF
+ };
+ const unsigned char pk[32] = {
+ 0xDF, 0xF1, 0xD7, 0x7F, 0x2A, 0x67, 0x1C, 0x5F,
+ 0x36, 0x18, 0x37, 0x26, 0xDB, 0x23, 0x41, 0xBE,
+ 0x58, 0xFE, 0xAE, 0x1D, 0xA2, 0xDE, 0xCE, 0xD8,
+ 0x43, 0x24, 0x0F, 0x7B, 0x50, 0x2B, 0xA6, 0x59
+ };
+ unsigned char aux_rand[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+ };
+ const unsigned char msg[32] = {
+ 0x24, 0x3F, 0x6A, 0x88, 0x85, 0xA3, 0x08, 0xD3,
+ 0x13, 0x19, 0x8A, 0x2E, 0x03, 0x70, 0x73, 0x44,
+ 0xA4, 0x09, 0x38, 0x22, 0x29, 0x9F, 0x31, 0xD0,
+ 0x08, 0x2E, 0xFA, 0x98, 0xEC, 0x4E, 0x6C, 0x89
+ };
+ const unsigned char sig[64] = {
+ 0x68, 0x96, 0xBD, 0x60, 0xEE, 0xAE, 0x29, 0x6D,
+ 0xB4, 0x8A, 0x22, 0x9F, 0xF7, 0x1D, 0xFE, 0x07,
+ 0x1B, 0xDE, 0x41, 0x3E, 0x6D, 0x43, 0xF9, 0x17,
+ 0xDC, 0x8D, 0xCF, 0x8C, 0x78, 0xDE, 0x33, 0x41,
+ 0x89, 0x06, 0xD1, 0x1A, 0xC9, 0x76, 0xAB, 0xCC,
+ 0xB2, 0x0B, 0x09, 0x12, 0x92, 0xBF, 0xF4, 0xEA,
+ 0x89, 0x7E, 0xFC, 0xB6, 0x39, 0xEA, 0x87, 0x1C,
+ 0xFA, 0x95, 0xF6, 0xDE, 0x33, 0x9E, 0x4B, 0x0A
+ };
+ test_schnorrsig_bip_vectors_check_signing(sk, pk, aux_rand, msg, sig);
+ test_schnorrsig_bip_vectors_check_verify(pk, msg, sig, 1);
+ }
+ {
+ /* Test vector 2 */
+ const unsigned char sk[32] = {
+ 0xC9, 0x0F, 0xDA, 0xA2, 0x21, 0x68, 0xC2, 0x34,
+ 0xC4, 0xC6, 0x62, 0x8B, 0x80, 0xDC, 0x1C, 0xD1,
+ 0x29, 0x02, 0x4E, 0x08, 0x8A, 0x67, 0xCC, 0x74,
+ 0x02, 0x0B, 0xBE, 0xA6, 0x3B, 0x14, 0xE5, 0xC9
+ };
+ const unsigned char pk[32] = {
+ 0xDD, 0x30, 0x8A, 0xFE, 0xC5, 0x77, 0x7E, 0x13,
+ 0x12, 0x1F, 0xA7, 0x2B, 0x9C, 0xC1, 0xB7, 0xCC,
+ 0x01, 0x39, 0x71, 0x53, 0x09, 0xB0, 0x86, 0xC9,
+ 0x60, 0xE1, 0x8F, 0xD9, 0x69, 0x77, 0x4E, 0xB8
+ };
+ unsigned char aux_rand[32] = {
+ 0xC8, 0x7A, 0xA5, 0x38, 0x24, 0xB4, 0xD7, 0xAE,
+ 0x2E, 0xB0, 0x35, 0xA2, 0xB5, 0xBB, 0xBC, 0xCC,
+ 0x08, 0x0E, 0x76, 0xCD, 0xC6, 0xD1, 0x69, 0x2C,
+ 0x4B, 0x0B, 0x62, 0xD7, 0x98, 0xE6, 0xD9, 0x06
+ };
+ const unsigned char msg[32] = {
+ 0x7E, 0x2D, 0x58, 0xD8, 0xB3, 0xBC, 0xDF, 0x1A,
+ 0xBA, 0xDE, 0xC7, 0x82, 0x90, 0x54, 0xF9, 0x0D,
+ 0xDA, 0x98, 0x05, 0xAA, 0xB5, 0x6C, 0x77, 0x33,
+ 0x30, 0x24, 0xB9, 0xD0, 0xA5, 0x08, 0xB7, 0x5C
+ };
+ const unsigned char sig[64] = {
+ 0x58, 0x31, 0xAA, 0xEE, 0xD7, 0xB4, 0x4B, 0xB7,
+ 0x4E, 0x5E, 0xAB, 0x94, 0xBA, 0x9D, 0x42, 0x94,
+ 0xC4, 0x9B, 0xCF, 0x2A, 0x60, 0x72, 0x8D, 0x8B,
+ 0x4C, 0x20, 0x0F, 0x50, 0xDD, 0x31, 0x3C, 0x1B,
+ 0xAB, 0x74, 0x58, 0x79, 0xA5, 0xAD, 0x95, 0x4A,
+ 0x72, 0xC4, 0x5A, 0x91, 0xC3, 0xA5, 0x1D, 0x3C,
+ 0x7A, 0xDE, 0xA9, 0x8D, 0x82, 0xF8, 0x48, 0x1E,
+ 0x0E, 0x1E, 0x03, 0x67, 0x4A, 0x6F, 0x3F, 0xB7
+ };
+ test_schnorrsig_bip_vectors_check_signing(sk, pk, aux_rand, msg, sig);
+ test_schnorrsig_bip_vectors_check_verify(pk, msg, sig, 1);
+ }
+ {
+ /* Test vector 3 */
+ const unsigned char sk[32] = {
+ 0x0B, 0x43, 0x2B, 0x26, 0x77, 0x93, 0x73, 0x81,
+ 0xAE, 0xF0, 0x5B, 0xB0, 0x2A, 0x66, 0xEC, 0xD0,
+ 0x12, 0x77, 0x30, 0x62, 0xCF, 0x3F, 0xA2, 0x54,
+ 0x9E, 0x44, 0xF5, 0x8E, 0xD2, 0x40, 0x17, 0x10
+ };
+ const unsigned char pk[32] = {
+ 0x25, 0xD1, 0xDF, 0xF9, 0x51, 0x05, 0xF5, 0x25,
+ 0x3C, 0x40, 0x22, 0xF6, 0x28, 0xA9, 0x96, 0xAD,
+ 0x3A, 0x0D, 0x95, 0xFB, 0xF2, 0x1D, 0x46, 0x8A,
+ 0x1B, 0x33, 0xF8, 0xC1, 0x60, 0xD8, 0xF5, 0x17
+ };
+ unsigned char aux_rand[32] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+ };
+ const unsigned char msg[32] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+ };
+ const unsigned char sig[64] = {
+ 0x7E, 0xB0, 0x50, 0x97, 0x57, 0xE2, 0x46, 0xF1,
+ 0x94, 0x49, 0x88, 0x56, 0x51, 0x61, 0x1C, 0xB9,
+ 0x65, 0xEC, 0xC1, 0xA1, 0x87, 0xDD, 0x51, 0xB6,
+ 0x4F, 0xDA, 0x1E, 0xDC, 0x96, 0x37, 0xD5, 0xEC,
+ 0x97, 0x58, 0x2B, 0x9C, 0xB1, 0x3D, 0xB3, 0x93,
+ 0x37, 0x05, 0xB3, 0x2B, 0xA9, 0x82, 0xAF, 0x5A,
+ 0xF2, 0x5F, 0xD7, 0x88, 0x81, 0xEB, 0xB3, 0x27,
+ 0x71, 0xFC, 0x59, 0x22, 0xEF, 0xC6, 0x6E, 0xA3
+ };
+ test_schnorrsig_bip_vectors_check_signing(sk, pk, aux_rand, msg, sig);
+ test_schnorrsig_bip_vectors_check_verify(pk, msg, sig, 1);
+ }
+ {
+ /* Test vector 4 */
+ const unsigned char pk[32] = {
+ 0xD6, 0x9C, 0x35, 0x09, 0xBB, 0x99, 0xE4, 0x12,
+ 0xE6, 0x8B, 0x0F, 0xE8, 0x54, 0x4E, 0x72, 0x83,
+ 0x7D, 0xFA, 0x30, 0x74, 0x6D, 0x8B, 0xE2, 0xAA,
+ 0x65, 0x97, 0x5F, 0x29, 0xD2, 0x2D, 0xC7, 0xB9
+ };
+ const unsigned char msg[32] = {
+ 0x4D, 0xF3, 0xC3, 0xF6, 0x8F, 0xCC, 0x83, 0xB2,
+ 0x7E, 0x9D, 0x42, 0xC9, 0x04, 0x31, 0xA7, 0x24,
+ 0x99, 0xF1, 0x78, 0x75, 0xC8, 0x1A, 0x59, 0x9B,
+ 0x56, 0x6C, 0x98, 0x89, 0xB9, 0x69, 0x67, 0x03
+ };
+ const unsigned char sig[64] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x3B, 0x78, 0xCE, 0x56, 0x3F,
+ 0x89, 0xA0, 0xED, 0x94, 0x14, 0xF5, 0xAA, 0x28,
+ 0xAD, 0x0D, 0x96, 0xD6, 0x79, 0x5F, 0x9C, 0x63,
+ 0x76, 0xAF, 0xB1, 0x54, 0x8A, 0xF6, 0x03, 0xB3,
+ 0xEB, 0x45, 0xC9, 0xF8, 0x20, 0x7D, 0xEE, 0x10,
+ 0x60, 0xCB, 0x71, 0xC0, 0x4E, 0x80, 0xF5, 0x93,
+ 0x06, 0x0B, 0x07, 0xD2, 0x83, 0x08, 0xD7, 0xF4
+ };
+ test_schnorrsig_bip_vectors_check_verify(pk, msg, sig, 1);
+ }
+ {
+ /* Test vector 5 */
+ const unsigned char pk[32] = {
+ 0xEE, 0xFD, 0xEA, 0x4C, 0xDB, 0x67, 0x77, 0x50,
+ 0xA4, 0x20, 0xFE, 0xE8, 0x07, 0xEA, 0xCF, 0x21,
+ 0xEB, 0x98, 0x98, 0xAE, 0x79, 0xB9, 0x76, 0x87,
+ 0x66, 0xE4, 0xFA, 0xA0, 0x4A, 0x2D, 0x4A, 0x34
+ };
+ secp256k1_xonly_pubkey pk_parsed;
+ /* No need to check the signature of the test vector as parsing the pubkey already fails */
+ CHECK(!secp256k1_xonly_pubkey_parse(ctx, &pk_parsed, pk));
+ }
+ {
+ /* Test vector 6 */
+ const unsigned char pk[32] = {
+ 0xDF, 0xF1, 0xD7, 0x7F, 0x2A, 0x67, 0x1C, 0x5F,
+ 0x36, 0x18, 0x37, 0x26, 0xDB, 0x23, 0x41, 0xBE,
+ 0x58, 0xFE, 0xAE, 0x1D, 0xA2, 0xDE, 0xCE, 0xD8,
+ 0x43, 0x24, 0x0F, 0x7B, 0x50, 0x2B, 0xA6, 0x59
+ };
+ const unsigned char msg[32] = {
+ 0x24, 0x3F, 0x6A, 0x88, 0x85, 0xA3, 0x08, 0xD3,
+ 0x13, 0x19, 0x8A, 0x2E, 0x03, 0x70, 0x73, 0x44,
+ 0xA4, 0x09, 0x38, 0x22, 0x29, 0x9F, 0x31, 0xD0,
+ 0x08, 0x2E, 0xFA, 0x98, 0xEC, 0x4E, 0x6C, 0x89
+ };
+ const unsigned char sig[64] = {
+ 0xFF, 0xF9, 0x7B, 0xD5, 0x75, 0x5E, 0xEE, 0xA4,
+ 0x20, 0x45, 0x3A, 0x14, 0x35, 0x52, 0x35, 0xD3,
+ 0x82, 0xF6, 0x47, 0x2F, 0x85, 0x68, 0xA1, 0x8B,
+ 0x2F, 0x05, 0x7A, 0x14, 0x60, 0x29, 0x75, 0x56,
+ 0x3C, 0xC2, 0x79, 0x44, 0x64, 0x0A, 0xC6, 0x07,
+ 0xCD, 0x10, 0x7A, 0xE1, 0x09, 0x23, 0xD9, 0xEF,
+ 0x7A, 0x73, 0xC6, 0x43, 0xE1, 0x66, 0xBE, 0x5E,
+ 0xBE, 0xAF, 0xA3, 0x4B, 0x1A, 0xC5, 0x53, 0xE2
+ };
+ test_schnorrsig_bip_vectors_check_verify(pk, msg, sig, 0);
+ }
+ {
+ /* Test vector 7 */
+ const unsigned char pk[32] = {
+ 0xDF, 0xF1, 0xD7, 0x7F, 0x2A, 0x67, 0x1C, 0x5F,
+ 0x36, 0x18, 0x37, 0x26, 0xDB, 0x23, 0x41, 0xBE,
+ 0x58, 0xFE, 0xAE, 0x1D, 0xA2, 0xDE, 0xCE, 0xD8,
+ 0x43, 0x24, 0x0F, 0x7B, 0x50, 0x2B, 0xA6, 0x59
+ };
+ const unsigned char msg[32] = {
+ 0x24, 0x3F, 0x6A, 0x88, 0x85, 0xA3, 0x08, 0xD3,
+ 0x13, 0x19, 0x8A, 0x2E, 0x03, 0x70, 0x73, 0x44,
+ 0xA4, 0x09, 0x38, 0x22, 0x29, 0x9F, 0x31, 0xD0,
+ 0x08, 0x2E, 0xFA, 0x98, 0xEC, 0x4E, 0x6C, 0x89
+ };
+ const unsigned char sig[64] = {
+ 0x1F, 0xA6, 0x2E, 0x33, 0x1E, 0xDB, 0xC2, 0x1C,
+ 0x39, 0x47, 0x92, 0xD2, 0xAB, 0x11, 0x00, 0xA7,
+ 0xB4, 0x32, 0xB0, 0x13, 0xDF, 0x3F, 0x6F, 0xF4,
+ 0xF9, 0x9F, 0xCB, 0x33, 0xE0, 0xE1, 0x51, 0x5F,
+ 0x28, 0x89, 0x0B, 0x3E, 0xDB, 0x6E, 0x71, 0x89,
+ 0xB6, 0x30, 0x44, 0x8B, 0x51, 0x5C, 0xE4, 0xF8,
+ 0x62, 0x2A, 0x95, 0x4C, 0xFE, 0x54, 0x57, 0x35,
+ 0xAA, 0xEA, 0x51, 0x34, 0xFC, 0xCD, 0xB2, 0xBD
+ };
+ test_schnorrsig_bip_vectors_check_verify(pk, msg, sig, 0);
+ }
+ {
+ /* Test vector 8 */
+ const unsigned char pk[32] = {
+ 0xDF, 0xF1, 0xD7, 0x7F, 0x2A, 0x67, 0x1C, 0x5F,
+ 0x36, 0x18, 0x37, 0x26, 0xDB, 0x23, 0x41, 0xBE,
+ 0x58, 0xFE, 0xAE, 0x1D, 0xA2, 0xDE, 0xCE, 0xD8,
+ 0x43, 0x24, 0x0F, 0x7B, 0x50, 0x2B, 0xA6, 0x59
+ };
+ const unsigned char msg[32] = {
+ 0x24, 0x3F, 0x6A, 0x88, 0x85, 0xA3, 0x08, 0xD3,
+ 0x13, 0x19, 0x8A, 0x2E, 0x03, 0x70, 0x73, 0x44,
+ 0xA4, 0x09, 0x38, 0x22, 0x29, 0x9F, 0x31, 0xD0,
+ 0x08, 0x2E, 0xFA, 0x98, 0xEC, 0x4E, 0x6C, 0x89
+ };
+ const unsigned char sig[64] = {
+ 0x6C, 0xFF, 0x5C, 0x3B, 0xA8, 0x6C, 0x69, 0xEA,
+ 0x4B, 0x73, 0x76, 0xF3, 0x1A, 0x9B, 0xCB, 0x4F,
+ 0x74, 0xC1, 0x97, 0x60, 0x89, 0xB2, 0xD9, 0x96,
+ 0x3D, 0xA2, 0xE5, 0x54, 0x3E, 0x17, 0x77, 0x69,
+ 0x96, 0x17, 0x64, 0xB3, 0xAA, 0x9B, 0x2F, 0xFC,
+ 0xB6, 0xEF, 0x94, 0x7B, 0x68, 0x87, 0xA2, 0x26,
+ 0xE8, 0xD7, 0xC9, 0x3E, 0x00, 0xC5, 0xED, 0x0C,
+ 0x18, 0x34, 0xFF, 0x0D, 0x0C, 0x2E, 0x6D, 0xA6
+ };
+ test_schnorrsig_bip_vectors_check_verify(pk, msg, sig, 0);
+ }
+ {
+ /* Test vector 9 */
+ const unsigned char pk[32] = {
+ 0xDF, 0xF1, 0xD7, 0x7F, 0x2A, 0x67, 0x1C, 0x5F,
+ 0x36, 0x18, 0x37, 0x26, 0xDB, 0x23, 0x41, 0xBE,
+ 0x58, 0xFE, 0xAE, 0x1D, 0xA2, 0xDE, 0xCE, 0xD8,
+ 0x43, 0x24, 0x0F, 0x7B, 0x50, 0x2B, 0xA6, 0x59
+ };
+ const unsigned char msg[32] = {
+ 0x24, 0x3F, 0x6A, 0x88, 0x85, 0xA3, 0x08, 0xD3,
+ 0x13, 0x19, 0x8A, 0x2E, 0x03, 0x70, 0x73, 0x44,
+ 0xA4, 0x09, 0x38, 0x22, 0x29, 0x9F, 0x31, 0xD0,
+ 0x08, 0x2E, 0xFA, 0x98, 0xEC, 0x4E, 0x6C, 0x89
+ };
+ const unsigned char sig[64] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x12, 0x3D, 0xDA, 0x83, 0x28, 0xAF, 0x9C, 0x23,
+ 0xA9, 0x4C, 0x1F, 0xEE, 0xCF, 0xD1, 0x23, 0xBA,
+ 0x4F, 0xB7, 0x34, 0x76, 0xF0, 0xD5, 0x94, 0xDC,
+ 0xB6, 0x5C, 0x64, 0x25, 0xBD, 0x18, 0x60, 0x51
+ };
+ test_schnorrsig_bip_vectors_check_verify(pk, msg, sig, 0);
+ }
+ {
+ /* Test vector 10 */
+ const unsigned char pk[32] = {
+ 0xDF, 0xF1, 0xD7, 0x7F, 0x2A, 0x67, 0x1C, 0x5F,
+ 0x36, 0x18, 0x37, 0x26, 0xDB, 0x23, 0x41, 0xBE,
+ 0x58, 0xFE, 0xAE, 0x1D, 0xA2, 0xDE, 0xCE, 0xD8,
+ 0x43, 0x24, 0x0F, 0x7B, 0x50, 0x2B, 0xA6, 0x59
+ };
+ const unsigned char msg[32] = {
+ 0x24, 0x3F, 0x6A, 0x88, 0x85, 0xA3, 0x08, 0xD3,
+ 0x13, 0x19, 0x8A, 0x2E, 0x03, 0x70, 0x73, 0x44,
+ 0xA4, 0x09, 0x38, 0x22, 0x29, 0x9F, 0x31, 0xD0,
+ 0x08, 0x2E, 0xFA, 0x98, 0xEC, 0x4E, 0x6C, 0x89
+ };
+ const unsigned char sig[64] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x76, 0x15, 0xFB, 0xAF, 0x5A, 0xE2, 0x88, 0x64,
+ 0x01, 0x3C, 0x09, 0x97, 0x42, 0xDE, 0xAD, 0xB4,
+ 0xDB, 0xA8, 0x7F, 0x11, 0xAC, 0x67, 0x54, 0xF9,
+ 0x37, 0x80, 0xD5, 0xA1, 0x83, 0x7C, 0xF1, 0x97
+ };
+ test_schnorrsig_bip_vectors_check_verify(pk, msg, sig, 0);
+ }
+ {
+ /* Test vector 11 */
+ const unsigned char pk[32] = {
+ 0xDF, 0xF1, 0xD7, 0x7F, 0x2A, 0x67, 0x1C, 0x5F,
+ 0x36, 0x18, 0x37, 0x26, 0xDB, 0x23, 0x41, 0xBE,
+ 0x58, 0xFE, 0xAE, 0x1D, 0xA2, 0xDE, 0xCE, 0xD8,
+ 0x43, 0x24, 0x0F, 0x7B, 0x50, 0x2B, 0xA6, 0x59
+ };
+ const unsigned char msg[32] = {
+ 0x24, 0x3F, 0x6A, 0x88, 0x85, 0xA3, 0x08, 0xD3,
+ 0x13, 0x19, 0x8A, 0x2E, 0x03, 0x70, 0x73, 0x44,
+ 0xA4, 0x09, 0x38, 0x22, 0x29, 0x9F, 0x31, 0xD0,
+ 0x08, 0x2E, 0xFA, 0x98, 0xEC, 0x4E, 0x6C, 0x89
+ };
+ const unsigned char sig[64] = {
+ 0x4A, 0x29, 0x8D, 0xAC, 0xAE, 0x57, 0x39, 0x5A,
+ 0x15, 0xD0, 0x79, 0x5D, 0xDB, 0xFD, 0x1D, 0xCB,
+ 0x56, 0x4D, 0xA8, 0x2B, 0x0F, 0x26, 0x9B, 0xC7,
+ 0x0A, 0x74, 0xF8, 0x22, 0x04, 0x29, 0xBA, 0x1D,
+ 0x69, 0xE8, 0x9B, 0x4C, 0x55, 0x64, 0xD0, 0x03,
+ 0x49, 0x10, 0x6B, 0x84, 0x97, 0x78, 0x5D, 0xD7,
+ 0xD1, 0xD7, 0x13, 0xA8, 0xAE, 0x82, 0xB3, 0x2F,
+ 0xA7, 0x9D, 0x5F, 0x7F, 0xC4, 0x07, 0xD3, 0x9B
+ };
+ test_schnorrsig_bip_vectors_check_verify(pk, msg, sig, 0);
+ }
+ {
+ /* Test vector 12 */
+ const unsigned char pk[32] = {
+ 0xDF, 0xF1, 0xD7, 0x7F, 0x2A, 0x67, 0x1C, 0x5F,
+ 0x36, 0x18, 0x37, 0x26, 0xDB, 0x23, 0x41, 0xBE,
+ 0x58, 0xFE, 0xAE, 0x1D, 0xA2, 0xDE, 0xCE, 0xD8,
+ 0x43, 0x24, 0x0F, 0x7B, 0x50, 0x2B, 0xA6, 0x59
+ };
+ const unsigned char msg[32] = {
+ 0x24, 0x3F, 0x6A, 0x88, 0x85, 0xA3, 0x08, 0xD3,
+ 0x13, 0x19, 0x8A, 0x2E, 0x03, 0x70, 0x73, 0x44,
+ 0xA4, 0x09, 0x38, 0x22, 0x29, 0x9F, 0x31, 0xD0,
+ 0x08, 0x2E, 0xFA, 0x98, 0xEC, 0x4E, 0x6C, 0x89
+ };
+ const unsigned char sig[64] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x2F,
+ 0x69, 0xE8, 0x9B, 0x4C, 0x55, 0x64, 0xD0, 0x03,
+ 0x49, 0x10, 0x6B, 0x84, 0x97, 0x78, 0x5D, 0xD7,
+ 0xD1, 0xD7, 0x13, 0xA8, 0xAE, 0x82, 0xB3, 0x2F,
+ 0xA7, 0x9D, 0x5F, 0x7F, 0xC4, 0x07, 0xD3, 0x9B
+ };
+ test_schnorrsig_bip_vectors_check_verify(pk, msg, sig, 0);
+ }
+ {
+ /* Test vector 13 */
+ const unsigned char pk[32] = {
+ 0xDF, 0xF1, 0xD7, 0x7F, 0x2A, 0x67, 0x1C, 0x5F,
+ 0x36, 0x18, 0x37, 0x26, 0xDB, 0x23, 0x41, 0xBE,
+ 0x58, 0xFE, 0xAE, 0x1D, 0xA2, 0xDE, 0xCE, 0xD8,
+ 0x43, 0x24, 0x0F, 0x7B, 0x50, 0x2B, 0xA6, 0x59
+ };
+ const unsigned char msg[32] = {
+ 0x24, 0x3F, 0x6A, 0x88, 0x85, 0xA3, 0x08, 0xD3,
+ 0x13, 0x19, 0x8A, 0x2E, 0x03, 0x70, 0x73, 0x44,
+ 0xA4, 0x09, 0x38, 0x22, 0x29, 0x9F, 0x31, 0xD0,
+ 0x08, 0x2E, 0xFA, 0x98, 0xEC, 0x4E, 0x6C, 0x89
+ };
+ const unsigned char sig[64] = {
+ 0x6C, 0xFF, 0x5C, 0x3B, 0xA8, 0x6C, 0x69, 0xEA,
+ 0x4B, 0x73, 0x76, 0xF3, 0x1A, 0x9B, 0xCB, 0x4F,
+ 0x74, 0xC1, 0x97, 0x60, 0x89, 0xB2, 0xD9, 0x96,
+ 0x3D, 0xA2, 0xE5, 0x54, 0x3E, 0x17, 0x77, 0x69,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE,
+ 0xBA, 0xAE, 0xDC, 0xE6, 0xAF, 0x48, 0xA0, 0x3B,
+ 0xBF, 0xD2, 0x5E, 0x8C, 0xD0, 0x36, 0x41, 0x41
+ };
+ test_schnorrsig_bip_vectors_check_verify(pk, msg, sig, 0);
+ }
+ {
+ /* Test vector 14 */
+ const unsigned char pk[32] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x30
+ };
+ secp256k1_xonly_pubkey pk_parsed;
+ /* No need to check the signature of the test vector as parsing the pubkey already fails */
+ CHECK(!secp256k1_xonly_pubkey_parse(ctx, &pk_parsed, pk));
+ }
+}
+
+/* Nonce function that returns constant 0 */
+static int nonce_function_failing(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) {
+ (void) msg32;
+ (void) key32;
+ (void) xonly_pk32;
+ (void) algo16;
+ (void) data;
+ (void) nonce32;
+ return 0;
+}
+
+/* Nonce function that sets nonce to 0 */
+static int nonce_function_0(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) {
+ (void) msg32;
+ (void) key32;
+ (void) xonly_pk32;
+ (void) algo16;
+ (void) data;
+
+ memset(nonce32, 0, 32);
+ return 1;
+}
+
+/* Nonce function that sets nonce to 0xFF...0xFF */
+static int nonce_function_overflowing(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) {
+ (void) msg32;
+ (void) key32;
+ (void) xonly_pk32;
+ (void) algo16;
+ (void) data;
+
+ memset(nonce32, 0xFF, 32);
+ return 1;
+}
+
+void test_schnorrsig_sign(void) {
+ unsigned char sk[32];
+ secp256k1_keypair keypair;
+ const unsigned char msg[32] = "this is a msg for a schnorrsig..";
+ unsigned char sig[64];
+ unsigned char zeros64[64] = { 0 };
+
+ secp256k1_testrand256(sk);
+ CHECK(secp256k1_keypair_create(ctx, &keypair, sk));
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1);
+
+ /* Test different nonce functions */
+ memset(sig, 1, sizeof(sig));
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_failing, NULL) == 0);
+ CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) == 0);
+ memset(&sig, 1, sizeof(sig));
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_0, NULL) == 0);
+ CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) == 0);
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_overflowing, NULL) == 1);
+ CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) != 0);
+}
+
+#define N_SIGS 3
+/* Creates N_SIGS valid signatures and verifies them with verify and
+ * verify_batch (TODO). Then flips some bits and checks that verification now
+ * fails. */
+void test_schnorrsig_sign_verify(void) {
+ unsigned char sk[32];
+ unsigned char msg[N_SIGS][32];
+ unsigned char sig[N_SIGS][64];
+ size_t i;
+ secp256k1_keypair keypair;
+ secp256k1_xonly_pubkey pk;
+ secp256k1_scalar s;
+
+ secp256k1_testrand256(sk);
+ CHECK(secp256k1_keypair_create(ctx, &keypair, sk));
+ CHECK(secp256k1_keypair_xonly_pub(ctx, &pk, NULL, &keypair));
+
+ for (i = 0; i < N_SIGS; i++) {
+ secp256k1_testrand256(msg[i]);
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL, NULL));
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig[i], msg[i], &pk));
+ }
+
+ {
+ /* Flip a few bits in the signature and in the message and check that
+ * verify and verify_batch (TODO) fail */
+ size_t sig_idx = secp256k1_testrand_int(N_SIGS);
+ size_t byte_idx = secp256k1_testrand_int(32);
+ unsigned char xorbyte = secp256k1_testrand_int(254)+1;
+ sig[sig_idx][byte_idx] ^= xorbyte;
+ CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
+ sig[sig_idx][byte_idx] ^= xorbyte;
+
+ byte_idx = secp256k1_testrand_int(32);
+ sig[sig_idx][32+byte_idx] ^= xorbyte;
+ CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
+ sig[sig_idx][32+byte_idx] ^= xorbyte;
+
+ byte_idx = secp256k1_testrand_int(32);
+ msg[sig_idx][byte_idx] ^= xorbyte;
+ CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
+ msg[sig_idx][byte_idx] ^= xorbyte;
+
+ /* Check that above bitflips have been reversed correctly */
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
+ }
+
+ /* Test overflowing s */
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL));
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
+ memset(&sig[0][32], 0xFF, 32);
+ CHECK(!secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
+
+ /* Test negative s */
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL));
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
+ secp256k1_scalar_set_b32(&s, &sig[0][32], NULL);
+ secp256k1_scalar_negate(&s, &s);
+ secp256k1_scalar_get_b32(&sig[0][32], &s);
+ CHECK(!secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
+}
+#undef N_SIGS
+
+void test_schnorrsig_taproot(void) {
+ unsigned char sk[32];
+ secp256k1_keypair keypair;
+ secp256k1_xonly_pubkey internal_pk;
+ unsigned char internal_pk_bytes[32];
+ secp256k1_xonly_pubkey output_pk;
+ unsigned char output_pk_bytes[32];
+ unsigned char tweak[32];
+ int pk_parity;
+ unsigned char msg[32];
+ unsigned char sig[64];
+
+ /* Create output key */
+ secp256k1_testrand256(sk);
+ CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
+ CHECK(secp256k1_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1);
+ /* In actual taproot the tweak would be hash of internal_pk */
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, tweak, &internal_pk) == 1);
+ CHECK(secp256k1_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1);
+ CHECK(secp256k1_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1);
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, output_pk_bytes, &output_pk) == 1);
+
+ /* Key spend */
+ secp256k1_testrand256(msg);
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1);
+ /* Verify key spend */
+ CHECK(secp256k1_xonly_pubkey_parse(ctx, &output_pk, output_pk_bytes) == 1);
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, &output_pk) == 1);
+
+ /* Script spend */
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, internal_pk_bytes, &internal_pk) == 1);
+ /* Verify script spend */
+ CHECK(secp256k1_xonly_pubkey_parse(ctx, &internal_pk, internal_pk_bytes) == 1);
+ CHECK(secp256k1_xonly_pubkey_tweak_add_check(ctx, output_pk_bytes, pk_parity, &internal_pk, tweak) == 1);
+}
+
+void run_schnorrsig_tests(void) {
+ int i;
+ run_nonce_function_bip340_tests();
+
+ test_schnorrsig_api();
+ test_schnorrsig_sha256_tagged();
+ test_schnorrsig_bip_vectors();
+ for (i = 0; i < count; i++) {
+ test_schnorrsig_sign();
+ test_schnorrsig_sign_verify();
+ }
+ test_schnorrsig_taproot();
+}
+
+#endif
diff --git a/src/secp256k1/src/scalar.h b/src/secp256k1/src/scalar.h
index 2a74703523..fb3fb187ce 100644
--- a/src/secp256k1/src/scalar.h
+++ b/src/secp256k1/src/scalar.h
@@ -8,6 +8,7 @@
#define SECP256K1_SCALAR_H
#include "num.h"
+#include "util.h"
#if defined HAVE_CONFIG_H
#include "libsecp256k1-config.h"
@@ -15,12 +16,12 @@
#if defined(EXHAUSTIVE_TEST_ORDER)
#include "scalar_low.h"
-#elif defined(USE_SCALAR_4X64)
+#elif defined(SECP256K1_WIDEMUL_INT128)
#include "scalar_4x64.h"
-#elif defined(USE_SCALAR_8X32)
+#elif defined(SECP256K1_WIDEMUL_INT64)
#include "scalar_8x32.h"
#else
-#error "Please select scalar implementation"
+#error "Please select wide multiplication implementation"
#endif
/** Clear a scalar to prevent the leak of sensitive data. */
@@ -101,12 +102,11 @@ static void secp256k1_scalar_order_get_num(secp256k1_num *r);
/** Compare two scalars. */
static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b);
-#ifdef USE_ENDOMORPHISM
-/** Find r1 and r2 such that r1+r2*2^128 = a. */
-static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a);
-/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see secp256k1_gej_mul_lambda). */
-static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a);
-#endif
+/** Find r1 and r2 such that r1+r2*2^128 = k. */
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k);
+/** Find r1 and r2 such that r1+r2*lambda = k,
+ * where r1 and r2 or their negations are maximum 128 bits long (see secp256k1_ge_mul_lambda). */
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k);
/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */
static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift);
diff --git a/src/secp256k1/src/scalar_4x64_impl.h b/src/secp256k1/src/scalar_4x64_impl.h
index 8f539c4bc6..73cbd5e18a 100644
--- a/src/secp256k1/src/scalar_4x64_impl.h
+++ b/src/secp256k1/src/scalar_4x64_impl.h
@@ -192,9 +192,9 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
tl = t; \
} \
c0 += tl; /* overflow is handled on the next line */ \
- th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
c1 += th; /* overflow is handled on the next line */ \
- c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \
+ c2 += (c1 < th); /* never overflows by contract (verified in the next line) */ \
VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
}
@@ -207,7 +207,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
tl = t; \
} \
c0 += tl; /* overflow is handled on the next line */ \
- th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
c1 += th; /* never overflows by contract (verified in the next line) */ \
VERIFY_CHECK(c1 >= th); \
}
@@ -221,16 +221,16 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
tl = t; \
} \
th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \
- c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \
- th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ th2 += (tl2 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
c0 += tl2; /* overflow is handled on the next line */ \
- th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \
+ th2 += (c0 < tl2); /* second overflow is handled on the next line */ \
c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
c1 += th2; /* overflow is handled on the next line */ \
- c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
}
@@ -238,15 +238,15 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
#define sumadd(a) { \
unsigned int over; \
c0 += (a); /* overflow is handled on the next line */ \
- over = (c0 < (a)) ? 1 : 0; \
+ over = (c0 < (a)); \
c1 += over; /* overflow is handled on the next line */ \
- c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \
+ c2 += (c1 < over); /* never overflows by contract */ \
}
/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
#define sumadd_fast(a) { \
c0 += (a); /* overflow is handled on the next line */ \
- c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ c1 += (c0 < (a)); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
VERIFY_CHECK(c2 == 0); \
}
@@ -912,18 +912,16 @@ static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a)
secp256k1_scalar_reduce_512(r, l);
}
-#ifdef USE_ENDOMORPHISM
-static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
- r1->d[0] = a->d[0];
- r1->d[1] = a->d[1];
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
+ r1->d[0] = k->d[0];
+ r1->d[1] = k->d[1];
r1->d[2] = 0;
r1->d[3] = 0;
- r2->d[0] = a->d[2];
- r2->d[1] = a->d[3];
+ r2->d[0] = k->d[2];
+ r2->d[1] = k->d[3];
r2->d[2] = 0;
r2->d[3] = 0;
}
-#endif
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
diff --git a/src/secp256k1/src/scalar_8x32_impl.h b/src/secp256k1/src/scalar_8x32_impl.h
index 3c372f34fe..6853f79ecc 100644
--- a/src/secp256k1/src/scalar_8x32_impl.h
+++ b/src/secp256k1/src/scalar_8x32_impl.h
@@ -271,9 +271,9 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
tl = t; \
} \
c0 += tl; /* overflow is handled on the next line */ \
- th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
+ th += (c0 < tl); /* at most 0xFFFFFFFF */ \
c1 += th; /* overflow is handled on the next line */ \
- c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \
+ c2 += (c1 < th); /* never overflows by contract (verified in the next line) */ \
VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
}
@@ -286,7 +286,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
tl = t; \
} \
c0 += tl; /* overflow is handled on the next line */ \
- th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
+ th += (c0 < tl); /* at most 0xFFFFFFFF */ \
c1 += th; /* never overflows by contract (verified in the next line) */ \
VERIFY_CHECK(c1 >= th); \
}
@@ -300,16 +300,16 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
tl = t; \
} \
th2 = th + th; /* at most 0xFFFFFFFE (in case th was 0x7FFFFFFF) */ \
- c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
tl2 = tl + tl; /* at most 0xFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFF) */ \
- th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
+ th2 += (tl2 < tl); /* at most 0xFFFFFFFF */ \
c0 += tl2; /* overflow is handled on the next line */ \
- th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \
+ th2 += (c0 < tl2); /* second overflow is handled on the next line */ \
c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
c1 += th2; /* overflow is handled on the next line */ \
- c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
}
@@ -317,15 +317,15 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
#define sumadd(a) { \
unsigned int over; \
c0 += (a); /* overflow is handled on the next line */ \
- over = (c0 < (a)) ? 1 : 0; \
+ over = (c0 < (a)); \
c1 += over; /* overflow is handled on the next line */ \
- c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \
+ c2 += (c1 < over); /* never overflows by contract */ \
}
/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
#define sumadd_fast(a) { \
c0 += (a); /* overflow is handled on the next line */ \
- c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ c1 += (c0 < (a)); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
VERIFY_CHECK(c2 == 0); \
}
@@ -672,26 +672,24 @@ static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a)
secp256k1_scalar_reduce_512(r, l);
}
-#ifdef USE_ENDOMORPHISM
-static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
- r1->d[0] = a->d[0];
- r1->d[1] = a->d[1];
- r1->d[2] = a->d[2];
- r1->d[3] = a->d[3];
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
+ r1->d[0] = k->d[0];
+ r1->d[1] = k->d[1];
+ r1->d[2] = k->d[2];
+ r1->d[3] = k->d[3];
r1->d[4] = 0;
r1->d[5] = 0;
r1->d[6] = 0;
r1->d[7] = 0;
- r2->d[0] = a->d[4];
- r2->d[1] = a->d[5];
- r2->d[2] = a->d[6];
- r2->d[3] = a->d[7];
+ r2->d[0] = k->d[4];
+ r2->d[1] = k->d[5];
+ r2->d[2] = k->d[6];
+ r2->d[3] = k->d[7];
r2->d[4] = 0;
r2->d[5] = 0;
r2->d[6] = 0;
r2->d[7] = 0;
}
-#endif
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0;
diff --git a/src/secp256k1/src/scalar_impl.h b/src/secp256k1/src/scalar_impl.h
index 70cd73db06..fc75891818 100644
--- a/src/secp256k1/src/scalar_impl.h
+++ b/src/secp256k1/src/scalar_impl.h
@@ -7,6 +7,10 @@
#ifndef SECP256K1_SCALAR_IMPL_H
#define SECP256K1_SCALAR_IMPL_H
+#ifdef VERIFY
+#include <string.h>
+#endif
+
#include "scalar.h"
#include "util.h"
@@ -16,12 +20,12 @@
#if defined(EXHAUSTIVE_TEST_ORDER)
#include "scalar_low_impl.h"
-#elif defined(USE_SCALAR_4X64)
+#elif defined(SECP256K1_WIDEMUL_INT128)
#include "scalar_4x64_impl.h"
-#elif defined(USE_SCALAR_8X32)
+#elif defined(SECP256K1_WIDEMUL_INT64)
#include "scalar_8x32_impl.h"
#else
-#error "Please select scalar implementation"
+#error "Please select wide multiplication implementation"
#endif
static const secp256k1_scalar secp256k1_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
@@ -252,37 +256,65 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
#endif
}
-#ifdef USE_ENDOMORPHISM
+/* These parameters are generated using sage/gen_exhaustive_groups.sage. */
#if defined(EXHAUSTIVE_TEST_ORDER)
+# if EXHAUSTIVE_TEST_ORDER == 13
+# define EXHAUSTIVE_TEST_LAMBDA 9
+# elif EXHAUSTIVE_TEST_ORDER == 199
+# define EXHAUSTIVE_TEST_LAMBDA 92
+# else
+# error No known lambda for the specified exhaustive test group order.
+# endif
+
/**
- * Find k1 and k2 given k, such that k1 + k2 * lambda == k mod n; unlike in the
- * full case we don't bother making k1 and k2 be small, we just want them to be
+ * Find r1 and r2 given k, such that r1 + r2 * lambda == k mod n; unlike in the
+ * full case we don't bother making r1 and r2 be small, we just want them to be
* nontrivial to get full test coverage for the exhaustive tests. We therefore
- * (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda.
+ * (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n).
*/
-static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
- *r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER;
- *r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
+ *r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER;
+ *r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
}
#else
/**
* The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where
- * lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a,
- * 0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78,0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72}
+ * lambda is: */
+static const secp256k1_scalar secp256k1_const_lambda = SECP256K1_SCALAR_CONST(
+ 0x5363AD4CUL, 0xC05C30E0UL, 0xA5261C02UL, 0x8812645AUL,
+ 0x122E22EAUL, 0x20816678UL, 0xDF02967CUL, 0x1B23BD72UL
+);
+
+#ifdef VERIFY
+static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k);
+#endif
+
+/*
+ * Both lambda and beta are primitive cube roots of unity. That is lamba^3 == 1 mod n and
+ * beta^3 == 1 mod p, where n is the curve order and p is the field order.
*
- * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
- * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
- * and k2 have a small size.
- * It relies on constants a1, b1, a2, b2. These constants for the value of lambda above are:
+ * Futhermore, because (X^3 - 1) = (X - 1)(X^2 + X + 1), the primitive cube roots of unity are
+ * roots of X^2 + X + 1. Therefore lambda^2 + lamba == -1 mod n and beta^2 + beta == -1 mod p.
+ * (The other primitive cube roots of unity are lambda^2 and beta^2 respectively.)
+ *
+ * Let l = -1/2 + i*sqrt(3)/2, the complex root of X^2 + X + 1. We can define a ring
+ * homomorphism phi : Z[l] -> Z_n where phi(a + b*l) == a + b*lambda mod n. The kernel of phi
+ * is a lattice over Z[l] (considering Z[l] as a Z-module). This lattice is generated by a
+ * reduced basis {a1 + b1*l, a2 + b2*l} where
*
* - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
* - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3}
* - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8}
* - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
*
- * The algorithm then computes c1 = round(b1 * k / n) and c2 = round(b2 * k / n), and gives
+ * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
+ * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
+ * and k2 are small in absolute value.
+ *
+ * The algorithm computes c1 = round(b2 * k / n) and c2 = round((-b1) * k / n), and gives
* k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and
- * compute k1 as k - k2 * lambda, avoiding the need for constants a1 and a2.
+ * compute r2 = k2 mod n, and r1 = k1 mod n = (k - r2 * lambda) mod n, avoiding the need for
+ * the constants a1 and a2.
*
* g1, g2 are precomputed constants used to replace division with a rounded multiplication
* when decomposing the scalar for an endomorphism-based point multiplication.
@@ -294,21 +326,21 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar
* Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez),
* Section 4.3 (here we use a somewhat higher-precision estimate):
* d = a1*b2 - b1*a2
- * g1 = round((2^272)*b2/d)
- * g2 = round((2^272)*b1/d)
+ * g1 = round(2^384 * b2/d)
+ * g2 = round(2^384 * (-b1)/d)
*
- * (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found
- * as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda').
+ * (Note that d is also equal to the curve order, n, here because [a1,b1] and [a2,b2]
+ * can be found as outputs of the Extended Euclidean Algorithm on inputs n and lambda).
*
- * The function below splits a in r1 and r2, such that r1 + lambda * r2 == a (mod order).
+ * The function below splits k into r1 and r2, such that
+ * - r1 + lambda * r2 == k (mod n)
+ * - either r1 < 2^128 or -r1 mod n < 2^128
+ * - either r2 < 2^128 or -r2 mod n < 2^128
+ *
+ * See proof below.
*/
-
-static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
secp256k1_scalar c1, c2;
- static const secp256k1_scalar minus_lambda = SECP256K1_SCALAR_CONST(
- 0xAC9C52B3UL, 0x3FA3CF1FUL, 0x5AD9E3FDUL, 0x77ED9BA4UL,
- 0xA880B9FCUL, 0x8EC739C2UL, 0xE0CFC810UL, 0xB51283CFUL
- );
static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST(
0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL,
0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL
@@ -318,25 +350,167 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar
0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL
);
static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST(
- 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00003086UL,
- 0xD221A7D4UL, 0x6BCDE86CUL, 0x90E49284UL, 0xEB153DABUL
+ 0x3086D221UL, 0xA7D46BCDUL, 0xE86C90E4UL, 0x9284EB15UL,
+ 0x3DAA8A14UL, 0x71E8CA7FUL, 0xE893209AUL, 0x45DBB031UL
);
static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST(
- 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x0000E443UL,
- 0x7ED6010EUL, 0x88286F54UL, 0x7FA90ABFUL, 0xE4C42212UL
+ 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL,
+ 0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL
);
- VERIFY_CHECK(r1 != a);
- VERIFY_CHECK(r2 != a);
+ VERIFY_CHECK(r1 != k);
+ VERIFY_CHECK(r2 != k);
/* these _var calls are constant time since the shift amount is constant */
- secp256k1_scalar_mul_shift_var(&c1, a, &g1, 272);
- secp256k1_scalar_mul_shift_var(&c2, a, &g2, 272);
+ secp256k1_scalar_mul_shift_var(&c1, k, &g1, 384);
+ secp256k1_scalar_mul_shift_var(&c2, k, &g2, 384);
secp256k1_scalar_mul(&c1, &c1, &minus_b1);
secp256k1_scalar_mul(&c2, &c2, &minus_b2);
secp256k1_scalar_add(r2, &c1, &c2);
- secp256k1_scalar_mul(r1, r2, &minus_lambda);
- secp256k1_scalar_add(r1, r1, a);
-}
-#endif
+ secp256k1_scalar_mul(r1, r2, &secp256k1_const_lambda);
+ secp256k1_scalar_negate(r1, r1);
+ secp256k1_scalar_add(r1, r1, k);
+
+#ifdef VERIFY
+ secp256k1_scalar_split_lambda_verify(r1, r2, k);
#endif
+}
+
+#ifdef VERIFY
+/*
+ * Proof for secp256k1_scalar_split_lambda's bounds.
+ *
+ * Let
+ * - epsilon1 = 2^256 * |g1/2^384 - b2/d|
+ * - epsilon2 = 2^256 * |g2/2^384 - (-b1)/d|
+ * - c1 = round(k*g1/2^384)
+ * - c2 = round(k*g2/2^384)
+ *
+ * Lemma 1: |c1 - k*b2/d| < 2^-1 + epsilon1
+ *
+ * |c1 - k*b2/d|
+ * =
+ * |c1 - k*g1/2^384 + k*g1/2^384 - k*b2/d|
+ * <= {triangle inequality}
+ * |c1 - k*g1/2^384| + |k*g1/2^384 - k*b2/d|
+ * =
+ * |c1 - k*g1/2^384| + k*|g1/2^384 - b2/d|
+ * < {rounding in c1 and 0 <= k < 2^256}
+ * 2^-1 + 2^256 * |g1/2^384 - b2/d|
+ * = {definition of epsilon1}
+ * 2^-1 + epsilon1
+ *
+ * Lemma 2: |c2 - k*(-b1)/d| < 2^-1 + epsilon2
+ *
+ * |c2 - k*(-b1)/d|
+ * =
+ * |c2 - k*g2/2^384 + k*g2/2^384 - k*(-b1)/d|
+ * <= {triangle inequality}
+ * |c2 - k*g2/2^384| + |k*g2/2^384 - k*(-b1)/d|
+ * =
+ * |c2 - k*g2/2^384| + k*|g2/2^384 - (-b1)/d|
+ * < {rounding in c2 and 0 <= k < 2^256}
+ * 2^-1 + 2^256 * |g2/2^384 - (-b1)/d|
+ * = {definition of epsilon2}
+ * 2^-1 + epsilon2
+ *
+ * Let
+ * - k1 = k - c1*a1 - c2*a2
+ * - k2 = - c1*b1 - c2*b2
+ *
+ * Lemma 3: |k1| < (a1 + a2 + 1)/2 < 2^128
+ *
+ * |k1|
+ * = {definition of k1}
+ * |k - c1*a1 - c2*a2|
+ * = {(a1*b2 - b1*a2)/n = 1}
+ * |k*(a1*b2 - b1*a2)/n - c1*a1 - c2*a2|
+ * =
+ * |a1*(k*b2/n - c1) + a2*(k*(-b1)/n - c2)|
+ * <= {triangle inequality}
+ * a1*|k*b2/n - c1| + a2*|k*(-b1)/n - c2|
+ * < {Lemma 1 and Lemma 2}
+ * a1*(2^-1 + epslion1) + a2*(2^-1 + epsilon2)
+ * < {rounding up to an integer}
+ * (a1 + a2 + 1)/2
+ * < {rounding up to a power of 2}
+ * 2^128
+ *
+ * Lemma 4: |k2| < (-b1 + b2)/2 + 1 < 2^128
+ *
+ * |k2|
+ * = {definition of k2}
+ * |- c1*a1 - c2*a2|
+ * = {(b1*b2 - b1*b2)/n = 0}
+ * |k*(b1*b2 - b1*b2)/n - c1*b1 - c2*b2|
+ * =
+ * |b1*(k*b2/n - c1) + b2*(k*(-b1)/n - c2)|
+ * <= {triangle inequality}
+ * (-b1)*|k*b2/n - c1| + b2*|k*(-b1)/n - c2|
+ * < {Lemma 1 and Lemma 2}
+ * (-b1)*(2^-1 + epslion1) + b2*(2^-1 + epsilon2)
+ * < {rounding up to an integer}
+ * (-b1 + b2)/2 + 1
+ * < {rounding up to a power of 2}
+ * 2^128
+ *
+ * Let
+ * - r2 = k2 mod n
+ * - r1 = k - r2*lambda mod n.
+ *
+ * Notice that r1 is defined such that r1 + r2 * lambda == k (mod n).
+ *
+ * Lemma 5: r1 == k1 mod n.
+ *
+ * r1
+ * == {definition of r1 and r2}
+ * k - k2*lambda
+ * == {definition of k2}
+ * k - (- c1*b1 - c2*b2)*lambda
+ * ==
+ * k + c1*b1*lambda + c2*b2*lambda
+ * == {a1 + b1*lambda == 0 mod n and a2 + b2*lambda == 0 mod n}
+ * k - c1*a1 - c2*a2
+ * == {definition of k1}
+ * k1
+ *
+ * From Lemma 3, Lemma 4, Lemma 5 and the definition of r2, we can conclude that
+ *
+ * - either r1 < 2^128 or -r1 mod n < 2^128
+ * - either r2 < 2^128 or -r2 mod n < 2^128.
+ *
+ * Q.E.D.
+ */
+static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k) {
+ secp256k1_scalar s;
+ unsigned char buf1[32];
+ unsigned char buf2[32];
+
+ /* (a1 + a2 + 1)/2 is 0xa2a8918ca85bafe22016d0b917e4dd77 */
+ static const unsigned char k1_bound[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xa2, 0xa8, 0x91, 0x8c, 0xa8, 0x5b, 0xaf, 0xe2, 0x20, 0x16, 0xd0, 0xb9, 0x17, 0xe4, 0xdd, 0x77
+ };
+
+ /* (-b1 + b2)/2 + 1 is 0x8a65287bd47179fb2be08846cea267ed */
+ static const unsigned char k2_bound[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x8a, 0x65, 0x28, 0x7b, 0xd4, 0x71, 0x79, 0xfb, 0x2b, 0xe0, 0x88, 0x46, 0xce, 0xa2, 0x67, 0xed
+ };
+
+ secp256k1_scalar_mul(&s, &secp256k1_const_lambda, r2);
+ secp256k1_scalar_add(&s, &s, r1);
+ VERIFY_CHECK(secp256k1_scalar_eq(&s, k));
+
+ secp256k1_scalar_negate(&s, r1);
+ secp256k1_scalar_get_b32(buf1, r1);
+ secp256k1_scalar_get_b32(buf2, &s);
+ VERIFY_CHECK(secp256k1_memcmp_var(buf1, k1_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k1_bound, 32) < 0);
+
+ secp256k1_scalar_negate(&s, r2);
+ secp256k1_scalar_get_b32(buf1, r2);
+ secp256k1_scalar_get_b32(buf2, &s);
+ VERIFY_CHECK(secp256k1_memcmp_var(buf1, k2_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k2_bound, 32) < 0);
+}
+#endif /* VERIFY */
+#endif /* !defined(EXHAUSTIVE_TEST_ORDER) */
#endif /* SECP256K1_SCALAR_IMPL_H */
diff --git a/src/secp256k1/src/scalar_low_impl.h b/src/secp256k1/src/scalar_low_impl.h
index b79cf1ff6c..a615ec074b 100644
--- a/src/secp256k1/src/scalar_low_impl.h
+++ b/src/secp256k1/src/scalar_low_impl.h
@@ -48,14 +48,17 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
}
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
- const int base = 0x100 % EXHAUSTIVE_TEST_ORDER;
int i;
+ int over = 0;
*r = 0;
for (i = 0; i < 32; i++) {
- *r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER;
+ *r = (*r * 0x100) + b32[i];
+ if (*r >= EXHAUSTIVE_TEST_ORDER) {
+ over = 1;
+ *r %= EXHAUSTIVE_TEST_ORDER;
+ }
}
- /* just deny overflow, it basically always happens */
- if (overflow) *overflow = 0;
+ if (overflow) *overflow = over;
}
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
diff --git a/src/secp256k1/src/scratch_impl.h b/src/secp256k1/src/scratch_impl.h
index 4cee700001..f381e2e322 100644
--- a/src/secp256k1/src/scratch_impl.h
+++ b/src/secp256k1/src/scratch_impl.h
@@ -11,7 +11,7 @@
#include "scratch.h"
static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t size) {
- const size_t base_alloc = ((sizeof(secp256k1_scratch) + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT;
+ const size_t base_alloc = ROUND_TO_ALIGN(sizeof(secp256k1_scratch));
void *alloc = checked_malloc(error_callback, base_alloc + size);
secp256k1_scratch* ret = (secp256k1_scratch *)alloc;
if (ret != NULL) {
@@ -26,7 +26,7 @@ static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* err
static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback, secp256k1_scratch* scratch) {
if (scratch != NULL) {
VERIFY_CHECK(scratch->alloc_size == 0); /* all checkpoints should be applied */
- if (memcmp(scratch->magic, "scratch", 8) != 0) {
+ if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return;
}
@@ -36,7 +36,7 @@ static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback,
}
static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch) {
- if (memcmp(scratch->magic, "scratch", 8) != 0) {
+ if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return 0;
}
@@ -44,7 +44,7 @@ static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callb
}
static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t checkpoint) {
- if (memcmp(scratch->magic, "scratch", 8) != 0) {
+ if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return;
}
@@ -56,10 +56,14 @@ static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_c
}
static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch, size_t objects) {
- if (memcmp(scratch->magic, "scratch", 8) != 0) {
+ if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return 0;
}
+ /* Ensure that multiplication will not wrap around */
+ if (ALIGNMENT > 1 && objects > SIZE_MAX/(ALIGNMENT - 1)) {
+ return 0;
+ }
if (scratch->max_size - scratch->alloc_size <= objects * (ALIGNMENT - 1)) {
return 0;
}
@@ -68,9 +72,16 @@ static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_c
static void *secp256k1_scratch_alloc(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t size) {
void *ret;
- size = ROUND_TO_ALIGN(size);
+ size_t rounded_size;
+
+ rounded_size = ROUND_TO_ALIGN(size);
+ /* Check that rounding did not wrap around */
+ if (rounded_size < size) {
+ return NULL;
+ }
+ size = rounded_size;
- if (memcmp(scratch->magic, "scratch", 8) != 0) {
+ if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return NULL;
}
diff --git a/src/secp256k1/src/secp256k1.c b/src/secp256k1/src/secp256k1.c
index b03a6e6345..dae506d08c 100644
--- a/src/secp256k1/src/secp256k1.c
+++ b/src/secp256k1/src/secp256k1.c
@@ -7,6 +7,7 @@
#include "include/secp256k1.h"
#include "include/secp256k1_preallocated.h"
+#include "assumptions.h"
#include "util.h"
#include "num_impl.h"
#include "field_impl.h"
@@ -19,6 +20,7 @@
#include "eckey_impl.h"
#include "hash_impl.h"
#include "scratch_impl.h"
+#include "selftest.h"
#if defined(VALGRIND)
# include <valgrind/memcheck.h>
@@ -117,6 +119,9 @@ secp256k1_context* secp256k1_context_preallocated_create(void* prealloc, unsigne
size_t prealloc_size;
secp256k1_context* ret;
+ if (!secp256k1_selftest()) {
+ secp256k1_callback_call(&default_error_callback, "self test failed");
+ }
VERIFY_CHECK(prealloc != NULL);
prealloc_size = secp256k1_context_preallocated_size(flags);
ret = (secp256k1_context*)manual_alloc(&prealloc, sizeof(secp256k1_context), base, prealloc_size);
@@ -226,7 +231,7 @@ void secp256k1_scratch_space_destroy(const secp256k1_context *ctx, secp256k1_scr
* of the software. This is setup for use with valgrind but could be substituted with
* the appropriate instrumentation for other analysis tools.
*/
-static SECP256K1_INLINE void secp256k1_declassify(const secp256k1_context* ctx, void *p, size_t len) {
+static SECP256K1_INLINE void secp256k1_declassify(const secp256k1_context* ctx, const void *p, size_t len) {
#if defined(VALGRIND)
if (EXPECT(ctx->declassify,0)) VALGRIND_MAKE_MEM_DEFINED(p, len);
#else
@@ -279,6 +284,9 @@ int secp256k1_ec_pubkey_parse(const secp256k1_context* ctx, secp256k1_pubkey* pu
if (!secp256k1_eckey_pubkey_parse(&Q, input, inputlen)) {
return 0;
}
+ if (!secp256k1_ge_is_in_correct_subgroup(&Q)) {
+ return 0;
+ }
secp256k1_pubkey_save(pubkey, &Q);
secp256k1_ge_clear(&Q);
return 1;
@@ -291,7 +299,7 @@ int secp256k1_ec_pubkey_serialize(const secp256k1_context* ctx, unsigned char *o
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(outputlen != NULL);
- ARG_CHECK(*outputlen >= ((flags & SECP256K1_FLAGS_BIT_COMPRESSION) ? 33 : 65));
+ ARG_CHECK(*outputlen >= ((flags & SECP256K1_FLAGS_BIT_COMPRESSION) ? 33u : 65u));
len = *outputlen;
*outputlen = 0;
ARG_CHECK(output != NULL);
@@ -548,10 +556,21 @@ int secp256k1_ec_seckey_verify(const secp256k1_context* ctx, const unsigned char
return ret;
}
-int secp256k1_ec_pubkey_create(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *seckey) {
+static int secp256k1_ec_pubkey_create_helper(const secp256k1_ecmult_gen_context *ecmult_gen_ctx, secp256k1_scalar *seckey_scalar, secp256k1_ge *p, const unsigned char *seckey) {
secp256k1_gej pj;
+ int ret;
+
+ ret = secp256k1_scalar_set_b32_seckey(seckey_scalar, seckey);
+ secp256k1_scalar_cmov(seckey_scalar, &secp256k1_scalar_one, !ret);
+
+ secp256k1_ecmult_gen(ecmult_gen_ctx, &pj, seckey_scalar);
+ secp256k1_ge_set_gej(p, &pj);
+ return ret;
+}
+
+int secp256k1_ec_pubkey_create(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *seckey) {
secp256k1_ge p;
- secp256k1_scalar sec;
+ secp256k1_scalar seckey_scalar;
int ret = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(pubkey != NULL);
@@ -559,15 +578,11 @@ int secp256k1_ec_pubkey_create(const secp256k1_context* ctx, secp256k1_pubkey *p
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(seckey != NULL);
- ret = secp256k1_scalar_set_b32_seckey(&sec, seckey);
- secp256k1_scalar_cmov(&sec, &secp256k1_scalar_one, !ret);
-
- secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pj, &sec);
- secp256k1_ge_set_gej(&p, &pj);
+ ret = secp256k1_ec_pubkey_create_helper(&ctx->ecmult_gen_ctx, &seckey_scalar, &p, seckey);
secp256k1_pubkey_save(pubkey, &p);
memczero(pubkey, sizeof(*pubkey), !ret);
- secp256k1_scalar_clear(&sec);
+ secp256k1_scalar_clear(&seckey_scalar);
return ret;
}
@@ -605,24 +620,31 @@ int secp256k1_ec_pubkey_negate(const secp256k1_context* ctx, secp256k1_pubkey *p
return ret;
}
-int secp256k1_ec_seckey_tweak_add(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) {
+
+static int secp256k1_ec_seckey_tweak_add_helper(secp256k1_scalar *sec, const unsigned char *tweak) {
secp256k1_scalar term;
+ int overflow = 0;
+ int ret = 0;
+
+ secp256k1_scalar_set_b32(&term, tweak, &overflow);
+ ret = (!overflow) & secp256k1_eckey_privkey_tweak_add(sec, &term);
+ secp256k1_scalar_clear(&term);
+ return ret;
+}
+
+int secp256k1_ec_seckey_tweak_add(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) {
secp256k1_scalar sec;
int ret = 0;
- int overflow = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(seckey != NULL);
ARG_CHECK(tweak != NULL);
- secp256k1_scalar_set_b32(&term, tweak, &overflow);
ret = secp256k1_scalar_set_b32_seckey(&sec, seckey);
-
- ret &= (!overflow) & secp256k1_eckey_privkey_tweak_add(&sec, &term);
+ ret &= secp256k1_ec_seckey_tweak_add_helper(&sec, tweak);
secp256k1_scalar_cmov(&sec, &secp256k1_scalar_zero, !ret);
secp256k1_scalar_get_b32(seckey, &sec);
secp256k1_scalar_clear(&sec);
- secp256k1_scalar_clear(&term);
return ret;
}
@@ -630,25 +652,26 @@ int secp256k1_ec_privkey_tweak_add(const secp256k1_context* ctx, unsigned char *
return secp256k1_ec_seckey_tweak_add(ctx, seckey, tweak);
}
+static int secp256k1_ec_pubkey_tweak_add_helper(const secp256k1_ecmult_context* ecmult_ctx, secp256k1_ge *p, const unsigned char *tweak) {
+ secp256k1_scalar term;
+ int overflow = 0;
+ secp256k1_scalar_set_b32(&term, tweak, &overflow);
+ return !overflow && secp256k1_eckey_pubkey_tweak_add(ecmult_ctx, p, &term);
+}
+
int secp256k1_ec_pubkey_tweak_add(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) {
secp256k1_ge p;
- secp256k1_scalar term;
int ret = 0;
- int overflow = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(pubkey != NULL);
ARG_CHECK(tweak != NULL);
- secp256k1_scalar_set_b32(&term, tweak, &overflow);
- ret = !overflow && secp256k1_pubkey_load(ctx, &p, pubkey);
+ ret = secp256k1_pubkey_load(ctx, &p, pubkey);
memset(pubkey, 0, sizeof(*pubkey));
+ ret = ret && secp256k1_ec_pubkey_tweak_add_helper(&ctx->ecmult_ctx, &p, tweak);
if (ret) {
- if (secp256k1_eckey_pubkey_tweak_add(&ctx->ecmult_ctx, &p, &term)) {
- secp256k1_pubkey_save(pubkey, &p);
- } else {
- ret = 0;
- }
+ secp256k1_pubkey_save(pubkey, &p);
}
return ret;
@@ -741,3 +764,11 @@ int secp256k1_ec_pubkey_combine(const secp256k1_context* ctx, secp256k1_pubkey *
#ifdef ENABLE_MODULE_RECOVERY
# include "modules/recovery/main_impl.h"
#endif
+
+#ifdef ENABLE_MODULE_EXTRAKEYS
+# include "modules/extrakeys/main_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_SCHNORRSIG
+# include "modules/schnorrsig/main_impl.h"
+#endif
diff --git a/src/secp256k1/src/selftest.h b/src/secp256k1/src/selftest.h
new file mode 100644
index 0000000000..0e37510c1e
--- /dev/null
+++ b/src/secp256k1/src/selftest.h
@@ -0,0 +1,32 @@
+/**********************************************************************
+ * Copyright (c) 2020 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_SELFTEST_H
+#define SECP256K1_SELFTEST_H
+
+#include "hash.h"
+
+#include <string.h>
+
+static int secp256k1_selftest_sha256(void) {
+ static const char *input63 = "For this sample, this 63-byte string will be used as input data";
+ static const unsigned char output32[32] = {
+ 0xf0, 0x8a, 0x78, 0xcb, 0xba, 0xee, 0x08, 0x2b, 0x05, 0x2a, 0xe0, 0x70, 0x8f, 0x32, 0xfa, 0x1e,
+ 0x50, 0xc5, 0xc4, 0x21, 0xaa, 0x77, 0x2b, 0xa5, 0xdb, 0xb4, 0x06, 0xa2, 0xea, 0x6b, 0xe3, 0x42,
+ };
+ unsigned char out[32];
+ secp256k1_sha256 hasher;
+ secp256k1_sha256_initialize(&hasher);
+ secp256k1_sha256_write(&hasher, (const unsigned char*)input63, 63);
+ secp256k1_sha256_finalize(&hasher, out);
+ return secp256k1_memcmp_var(out, output32, 32) == 0;
+}
+
+static int secp256k1_selftest(void) {
+ return secp256k1_selftest_sha256();
+}
+
+#endif /* SECP256K1_SELFTEST_H */
diff --git a/src/secp256k1/src/testrand.h b/src/secp256k1/src/testrand.h
index f1f9be077e..a76003d5b8 100644
--- a/src/secp256k1/src/testrand.h
+++ b/src/secp256k1/src/testrand.h
@@ -14,25 +14,34 @@
/* A non-cryptographic RNG used only for test infrastructure. */
/** Seed the pseudorandom number generator for testing. */
-SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16);
+SECP256K1_INLINE static void secp256k1_testrand_seed(const unsigned char *seed16);
/** Generate a pseudorandom number in the range [0..2**32-1]. */
-static uint32_t secp256k1_rand32(void);
+static uint32_t secp256k1_testrand32(void);
/** Generate a pseudorandom number in the range [0..2**bits-1]. Bits must be 1 or
* more. */
-static uint32_t secp256k1_rand_bits(int bits);
+static uint32_t secp256k1_testrand_bits(int bits);
/** Generate a pseudorandom number in the range [0..range-1]. */
-static uint32_t secp256k1_rand_int(uint32_t range);
+static uint32_t secp256k1_testrand_int(uint32_t range);
/** Generate a pseudorandom 32-byte array. */
-static void secp256k1_rand256(unsigned char *b32);
+static void secp256k1_testrand256(unsigned char *b32);
/** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */
-static void secp256k1_rand256_test(unsigned char *b32);
+static void secp256k1_testrand256_test(unsigned char *b32);
/** Generate pseudorandom bytes with long sequences of zero and one bits. */
-static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len);
+static void secp256k1_testrand_bytes_test(unsigned char *bytes, size_t len);
+
+/** Flip a single random bit in a byte array */
+static void secp256k1_testrand_flip(unsigned char *b, size_t len);
+
+/** Initialize the test RNG using (hex encoded) array up to 16 bytes, or randomly if hexseed is NULL. */
+static void secp256k1_testrand_init(const char* hexseed);
+
+/** Print final test information. */
+static void secp256k1_testrand_finish(void);
#endif /* SECP256K1_TESTRAND_H */
diff --git a/src/secp256k1/src/testrand_impl.h b/src/secp256k1/src/testrand_impl.h
index 30a91e5296..3392566329 100644
--- a/src/secp256k1/src/testrand_impl.h
+++ b/src/secp256k1/src/testrand_impl.h
@@ -8,6 +8,7 @@
#define SECP256K1_TESTRAND_IMPL_H
#include <stdint.h>
+#include <stdio.h>
#include <string.h>
#include "testrand.h"
@@ -19,11 +20,11 @@ static int secp256k1_test_rng_precomputed_used = 8;
static uint64_t secp256k1_test_rng_integer;
static int secp256k1_test_rng_integer_bits_left = 0;
-SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16) {
+SECP256K1_INLINE static void secp256k1_testrand_seed(const unsigned char *seed16) {
secp256k1_rfc6979_hmac_sha256_initialize(&secp256k1_test_rng, seed16, 16);
}
-SECP256K1_INLINE static uint32_t secp256k1_rand32(void) {
+SECP256K1_INLINE static uint32_t secp256k1_testrand32(void) {
if (secp256k1_test_rng_precomputed_used == 8) {
secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, (unsigned char*)(&secp256k1_test_rng_precomputed[0]), sizeof(secp256k1_test_rng_precomputed));
secp256k1_test_rng_precomputed_used = 0;
@@ -31,10 +32,10 @@ SECP256K1_INLINE static uint32_t secp256k1_rand32(void) {
return secp256k1_test_rng_precomputed[secp256k1_test_rng_precomputed_used++];
}
-static uint32_t secp256k1_rand_bits(int bits) {
+static uint32_t secp256k1_testrand_bits(int bits) {
uint32_t ret;
if (secp256k1_test_rng_integer_bits_left < bits) {
- secp256k1_test_rng_integer |= (((uint64_t)secp256k1_rand32()) << secp256k1_test_rng_integer_bits_left);
+ secp256k1_test_rng_integer |= (((uint64_t)secp256k1_testrand32()) << secp256k1_test_rng_integer_bits_left);
secp256k1_test_rng_integer_bits_left += 32;
}
ret = secp256k1_test_rng_integer;
@@ -44,7 +45,7 @@ static uint32_t secp256k1_rand_bits(int bits) {
return ret;
}
-static uint32_t secp256k1_rand_int(uint32_t range) {
+static uint32_t secp256k1_testrand_int(uint32_t range) {
/* We want a uniform integer between 0 and range-1, inclusive.
* B is the smallest number such that range <= 2**B.
* two mechanisms implemented here:
@@ -76,25 +77,25 @@ static uint32_t secp256k1_rand_int(uint32_t range) {
mult = 1;
}
while(1) {
- uint32_t x = secp256k1_rand_bits(bits);
+ uint32_t x = secp256k1_testrand_bits(bits);
if (x < trange) {
return (mult == 1) ? x : (x % range);
}
}
}
-static void secp256k1_rand256(unsigned char *b32) {
+static void secp256k1_testrand256(unsigned char *b32) {
secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, b32, 32);
}
-static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len) {
+static void secp256k1_testrand_bytes_test(unsigned char *bytes, size_t len) {
size_t bits = 0;
memset(bytes, 0, len);
while (bits < len * 8) {
int now;
uint32_t val;
- now = 1 + (secp256k1_rand_bits(6) * secp256k1_rand_bits(5) + 16) / 31;
- val = secp256k1_rand_bits(1);
+ now = 1 + (secp256k1_testrand_bits(6) * secp256k1_testrand_bits(5) + 16) / 31;
+ val = secp256k1_testrand_bits(1);
while (now > 0 && bits < len * 8) {
bytes[bits / 8] |= val << (bits % 8);
now--;
@@ -103,8 +104,55 @@ static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len) {
}
}
-static void secp256k1_rand256_test(unsigned char *b32) {
- secp256k1_rand_bytes_test(b32, 32);
+static void secp256k1_testrand256_test(unsigned char *b32) {
+ secp256k1_testrand_bytes_test(b32, 32);
+}
+
+static void secp256k1_testrand_flip(unsigned char *b, size_t len) {
+ b[secp256k1_testrand_int(len)] ^= (1 << secp256k1_testrand_int(8));
+}
+
+static void secp256k1_testrand_init(const char* hexseed) {
+ unsigned char seed16[16] = {0};
+ if (hexseed && strlen(hexseed) != 0) {
+ int pos = 0;
+ while (pos < 16 && hexseed[0] != 0 && hexseed[1] != 0) {
+ unsigned short sh;
+ if ((sscanf(hexseed, "%2hx", &sh)) == 1) {
+ seed16[pos] = sh;
+ } else {
+ break;
+ }
+ hexseed += 2;
+ pos++;
+ }
+ } else {
+ FILE *frand = fopen("/dev/urandom", "r");
+ if ((frand == NULL) || fread(&seed16, 1, sizeof(seed16), frand) != sizeof(seed16)) {
+ uint64_t t = time(NULL) * (uint64_t)1337;
+ fprintf(stderr, "WARNING: could not read 16 bytes from /dev/urandom; falling back to insecure PRNG\n");
+ seed16[0] ^= t;
+ seed16[1] ^= t >> 8;
+ seed16[2] ^= t >> 16;
+ seed16[3] ^= t >> 24;
+ seed16[4] ^= t >> 32;
+ seed16[5] ^= t >> 40;
+ seed16[6] ^= t >> 48;
+ seed16[7] ^= t >> 56;
+ }
+ if (frand) {
+ fclose(frand);
+ }
+ }
+
+ printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]);
+ secp256k1_testrand_seed(seed16);
+}
+
+static void secp256k1_testrand_finish(void) {
+ unsigned char run32[32];
+ secp256k1_testrand256(run32);
+ printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]);
}
#endif /* SECP256K1_TESTRAND_IMPL_H */
diff --git a/src/secp256k1/src/tests.c b/src/secp256k1/src/tests.c
index 374ed7dc12..bb4b5b4c07 100644
--- a/src/secp256k1/src/tests.c
+++ b/src/secp256k1/src/tests.c
@@ -54,7 +54,7 @@ static void uncounting_illegal_callback_fn(const char* str, void* data) {
void random_field_element_test(secp256k1_fe *fe) {
do {
unsigned char b32[32];
- secp256k1_rand256_test(b32);
+ secp256k1_testrand256_test(b32);
if (secp256k1_fe_set_b32(fe, b32)) {
break;
}
@@ -63,7 +63,7 @@ void random_field_element_test(secp256k1_fe *fe) {
void random_field_element_magnitude(secp256k1_fe *fe) {
secp256k1_fe zero;
- int n = secp256k1_rand_int(9);
+ int n = secp256k1_testrand_int(9);
secp256k1_fe_normalize(fe);
if (n == 0) {
return;
@@ -81,11 +81,12 @@ void random_group_element_test(secp256k1_ge *ge) {
secp256k1_fe fe;
do {
random_field_element_test(&fe);
- if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_rand_bits(1))) {
+ if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_testrand_bits(1))) {
secp256k1_fe_normalize(&ge->y);
break;
}
} while(1);
+ ge->infinity = 0;
}
void random_group_element_jacobian_test(secp256k1_gej *gej, const secp256k1_ge *ge) {
@@ -107,7 +108,7 @@ void random_scalar_order_test(secp256k1_scalar *num) {
do {
unsigned char b32[32];
int overflow = 0;
- secp256k1_rand256_test(b32);
+ secp256k1_testrand256_test(b32);
secp256k1_scalar_set_b32(num, b32, &overflow);
if (overflow || secp256k1_scalar_is_zero(num)) {
continue;
@@ -120,7 +121,7 @@ void random_scalar_order(secp256k1_scalar *num) {
do {
unsigned char b32[32];
int overflow = 0;
- secp256k1_rand256(b32);
+ secp256k1_testrand256(b32);
secp256k1_scalar_set_b32(num, b32, &overflow);
if (overflow || secp256k1_scalar_is_zero(num)) {
continue;
@@ -182,8 +183,10 @@ void run_context_tests(int use_prealloc) {
ecount2 = 10;
secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount2);
- secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, NULL);
- CHECK(vrfy->error_callback.fn != sign->error_callback.fn);
+ /* set error callback (to a function that still aborts in case malloc() fails in secp256k1_context_clone() below) */
+ secp256k1_context_set_error_callback(sign, secp256k1_default_illegal_callback_fn, NULL);
+ CHECK(sign->error_callback.fn != vrfy->error_callback.fn);
+ CHECK(sign->error_callback.fn == secp256k1_default_illegal_callback_fn);
/* check if sizes for cloning are consistent */
CHECK(secp256k1_context_preallocated_clone_size(none) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE));
@@ -239,7 +242,8 @@ void run_context_tests(int use_prealloc) {
}
/* Verify that the error callback makes it across the clone. */
- CHECK(vrfy->error_callback.fn != sign->error_callback.fn);
+ CHECK(sign->error_callback.fn != vrfy->error_callback.fn);
+ CHECK(sign->error_callback.fn == secp256k1_default_illegal_callback_fn);
/* And that it resets back to default. */
secp256k1_context_set_error_callback(sign, NULL, NULL);
CHECK(vrfy->error_callback.fn == sign->error_callback.fn);
@@ -361,8 +365,8 @@ void run_scratch_tests(void) {
CHECK(scratch->alloc_size != 0);
CHECK(scratch->alloc_size % ALIGNMENT == 0);
- /* Allocating another 500 bytes fails */
- CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) == NULL);
+ /* Allocating another 501 bytes fails */
+ CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 501) == NULL);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1));
CHECK(scratch->alloc_size != 0);
@@ -395,6 +399,18 @@ void run_scratch_tests(void) {
secp256k1_scratch_space_destroy(none, scratch);
CHECK(ecount == 5);
+ /* Test that large integers do not wrap around in a bad way */
+ scratch = secp256k1_scratch_space_create(none, 1000);
+ /* Try max allocation with a large number of objects. Only makes sense if
+ * ALIGNMENT is greater than 1 because otherwise the objects take no extra
+ * space. */
+ CHECK(ALIGNMENT <= 1 || !secp256k1_scratch_max_allocation(&none->error_callback, scratch, (SIZE_MAX / (ALIGNMENT - 1)) + 1));
+ /* Try allocating SIZE_MAX to test wrap around which only happens if
+ * ALIGNMENT > 1, otherwise it returns NULL anyway because the scratch
+ * space is too small. */
+ CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, SIZE_MAX) == NULL);
+ secp256k1_scratch_space_destroy(none, scratch);
+
/* cleanup */
secp256k1_scratch_space_destroy(none, NULL); /* no-op */
secp256k1_context_destroy(none);
@@ -426,14 +442,14 @@ void run_sha256_tests(void) {
secp256k1_sha256_initialize(&hasher);
secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i]));
secp256k1_sha256_finalize(&hasher, out);
- CHECK(memcmp(out, outputs[i], 32) == 0);
+ CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0);
if (strlen(inputs[i]) > 0) {
- int split = secp256k1_rand_int(strlen(inputs[i]));
+ int split = secp256k1_testrand_int(strlen(inputs[i]));
secp256k1_sha256_initialize(&hasher);
secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split);
secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split);
secp256k1_sha256_finalize(&hasher, out);
- CHECK(memcmp(out, outputs[i], 32) == 0);
+ CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0);
}
}
}
@@ -470,14 +486,14 @@ void run_hmac_sha256_tests(void) {
secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i]));
secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i]));
secp256k1_hmac_sha256_finalize(&hasher, out);
- CHECK(memcmp(out, outputs[i], 32) == 0);
+ CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0);
if (strlen(inputs[i]) > 0) {
- int split = secp256k1_rand_int(strlen(inputs[i]));
+ int split = secp256k1_testrand_int(strlen(inputs[i]));
secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i]));
secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split);
secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split);
secp256k1_hmac_sha256_finalize(&hasher, out);
- CHECK(memcmp(out, outputs[i], 32) == 0);
+ CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0);
}
}
}
@@ -504,21 +520,21 @@ void run_rfc6979_hmac_sha256_tests(void) {
secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 64);
for (i = 0; i < 3; i++) {
secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
- CHECK(memcmp(out, out1[i], 32) == 0);
+ CHECK(secp256k1_memcmp_var(out, out1[i], 32) == 0);
}
secp256k1_rfc6979_hmac_sha256_finalize(&rng);
secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 65);
for (i = 0; i < 3; i++) {
secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
- CHECK(memcmp(out, out1[i], 32) != 0);
+ CHECK(secp256k1_memcmp_var(out, out1[i], 32) != 0);
}
secp256k1_rfc6979_hmac_sha256_finalize(&rng);
secp256k1_rfc6979_hmac_sha256_initialize(&rng, key2, 64);
for (i = 0; i < 3; i++) {
secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
- CHECK(memcmp(out, out2[i], 32) == 0);
+ CHECK(secp256k1_memcmp_var(out, out2[i], 32) == 0);
}
secp256k1_rfc6979_hmac_sha256_finalize(&rng);
}
@@ -542,7 +558,7 @@ void test_rand_bits(int rand32, int bits) {
/* Multiply the output of all rand calls with the odd number m, which
should not change the uniformity of its distribution. */
for (i = 0; i < rounds[usebits]; i++) {
- uint32_t r = (rand32 ? secp256k1_rand32() : secp256k1_rand_bits(bits));
+ uint32_t r = (rand32 ? secp256k1_testrand32() : secp256k1_testrand_bits(bits));
CHECK((((uint64_t)r) >> bits) == 0);
for (m = 0; m < sizeof(mults) / sizeof(mults[0]); m++) {
uint32_t rm = r * mults[m];
@@ -567,7 +583,7 @@ void test_rand_int(uint32_t range, uint32_t subrange) {
uint64_t x = 0;
CHECK((range % subrange) == 0);
for (i = 0; i < rounds; i++) {
- uint32_t r = secp256k1_rand_int(range);
+ uint32_t r = secp256k1_testrand_int(range);
CHECK(r < range);
r = r % subrange;
x |= (((uint64_t)1) << r);
@@ -599,7 +615,7 @@ void run_rand_int(void) {
#ifndef USE_NUM_NONE
void random_num_negate(secp256k1_num *num) {
- if (secp256k1_rand_bits(1)) {
+ if (secp256k1_testrand_bits(1)) {
secp256k1_num_negate(num);
}
}
@@ -643,11 +659,11 @@ void test_num_add_sub(void) {
secp256k1_num n2;
secp256k1_num n1p2, n2p1, n1m2, n2m1;
random_num_order_test(&n1); /* n1 = R1 */
- if (secp256k1_rand_bits(1)) {
+ if (secp256k1_testrand_bits(1)) {
random_num_negate(&n1);
}
random_num_order_test(&n2); /* n2 = R2 */
- if (secp256k1_rand_bits(1)) {
+ if (secp256k1_testrand_bits(1)) {
random_num_negate(&n2);
}
secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = R1 + R2 */
@@ -838,7 +854,7 @@ void scalar_test(void) {
while (i < 256) {
secp256k1_scalar t;
int j;
- int now = secp256k1_rand_int(15) + 1;
+ int now = secp256k1_testrand_int(15) + 1;
if (now + i > 256) {
now = 256 - i;
}
@@ -915,7 +931,7 @@ void scalar_test(void) {
secp256k1_num rnum;
secp256k1_num rnum2;
unsigned char cone[1] = {0x01};
- unsigned int shift = 256 + secp256k1_rand_int(257);
+ unsigned int shift = 256 + secp256k1_testrand_int(257);
secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift);
secp256k1_num_mul(&rnum, &s1num, &s2num);
secp256k1_num_shift(&rnum, shift - 1);
@@ -933,7 +949,7 @@ void scalar_test(void) {
random_scalar_order_test(&r);
for (i = 0; i < 100; ++i) {
int low;
- int shift = 1 + secp256k1_rand_int(15);
+ int shift = 1 + secp256k1_testrand_int(15);
int expected = r.d[0] % (1 << shift);
low = secp256k1_scalar_shr_int(&r, shift);
CHECK(expected == low);
@@ -981,7 +997,7 @@ void scalar_test(void) {
secp256k1_scalar b;
int i;
/* Test add_bit. */
- int bit = secp256k1_rand_bits(8);
+ int bit = secp256k1_testrand_bits(8);
secp256k1_scalar_set_int(&b, 1);
CHECK(secp256k1_scalar_is_one(&b));
for (i = 0; i < bit; i++) {
@@ -1142,7 +1158,7 @@ void run_scalar_tests(void) {
secp256k1_scalar_set_b32(&scalar, bin, &overflow);
CHECK(overflow == 0);
secp256k1_scalar_get_b32(bin_tmp, &scalar);
- CHECK(memcmp(bin, bin_tmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(bin, bin_tmp, 32) == 0);
/* A scalar set to all 1s should overflow. */
memset(bin, 0xFF, 32);
@@ -1752,7 +1768,7 @@ void run_scalar_tests(void) {
void random_fe(secp256k1_fe *x) {
unsigned char bin[32];
do {
- secp256k1_rand256(bin);
+ secp256k1_testrand256(bin);
if (secp256k1_fe_set_b32(x, bin)) {
return;
}
@@ -1762,7 +1778,7 @@ void random_fe(secp256k1_fe *x) {
void random_fe_test(secp256k1_fe *x) {
unsigned char bin[32];
do {
- secp256k1_rand256_test(bin);
+ secp256k1_testrand256_test(bin);
if (secp256k1_fe_set_b32(x, bin)) {
return;
}
@@ -1830,18 +1846,18 @@ void run_field_convert(void) {
CHECK(secp256k1_fe_equal_var(&fe, &fe2));
/* Check conversion from fe. */
secp256k1_fe_get_b32(b322, &fe);
- CHECK(memcmp(b322, b32, 32) == 0);
+ CHECK(secp256k1_memcmp_var(b322, b32, 32) == 0);
secp256k1_fe_to_storage(&fes2, &fe);
- CHECK(memcmp(&fes2, &fes, sizeof(fes)) == 0);
+ CHECK(secp256k1_memcmp_var(&fes2, &fes, sizeof(fes)) == 0);
}
-int fe_memcmp(const secp256k1_fe *a, const secp256k1_fe *b) {
+int fe_secp256k1_memcmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
secp256k1_fe t = *b;
#ifdef VERIFY
t.magnitude = a->magnitude;
t.normalized = a->normalized;
#endif
- return memcmp(a, &t, sizeof(secp256k1_fe));
+ return secp256k1_memcmp_var(a, &t, sizeof(secp256k1_fe));
}
void run_field_misc(void) {
@@ -1867,13 +1883,13 @@ void run_field_misc(void) {
CHECK(x.normalized && x.magnitude == 1);
#endif
secp256k1_fe_cmov(&x, &x, 1);
- CHECK(fe_memcmp(&x, &z) != 0);
- CHECK(fe_memcmp(&x, &q) == 0);
+ CHECK(fe_secp256k1_memcmp_var(&x, &z) != 0);
+ CHECK(fe_secp256k1_memcmp_var(&x, &q) == 0);
secp256k1_fe_cmov(&q, &z, 1);
#ifdef VERIFY
CHECK(!q.normalized && q.magnitude == z.magnitude);
#endif
- CHECK(fe_memcmp(&q, &z) == 0);
+ CHECK(fe_secp256k1_memcmp_var(&q, &z) == 0);
secp256k1_fe_normalize_var(&x);
secp256k1_fe_normalize_var(&z);
CHECK(!secp256k1_fe_equal_var(&x, &z));
@@ -1897,9 +1913,9 @@ void run_field_misc(void) {
secp256k1_fe_to_storage(&zs, &z);
secp256k1_fe_storage_cmov(&zs, &xs, 0);
secp256k1_fe_storage_cmov(&zs, &zs, 1);
- CHECK(memcmp(&xs, &zs, sizeof(xs)) != 0);
+ CHECK(secp256k1_memcmp_var(&xs, &zs, sizeof(xs)) != 0);
secp256k1_fe_storage_cmov(&ys, &xs, 1);
- CHECK(memcmp(&xs, &ys, sizeof(xs)) == 0);
+ CHECK(secp256k1_memcmp_var(&xs, &ys, sizeof(xs)) == 0);
secp256k1_fe_from_storage(&x, &xs);
secp256k1_fe_from_storage(&y, &ys);
secp256k1_fe_from_storage(&z, &zs);
@@ -1955,7 +1971,7 @@ void run_field_inv_all_var(void) {
secp256k1_fe_inv_all_var(xi, x, 0);
for (i = 0; i < count; i++) {
size_t j;
- size_t len = secp256k1_rand_int(15) + 1;
+ size_t len = secp256k1_testrand_int(15) + 1;
for (j = 0; j < len; j++) {
random_fe_non_zero(&x[j]);
}
@@ -2086,17 +2102,12 @@ void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) {
void test_ge(void) {
int i, i1;
-#ifdef USE_ENDOMORPHISM
int runs = 6;
-#else
- int runs = 4;
-#endif
- /* Points: (infinity, p1, p1, -p1, -p1, p2, p2, -p2, -p2, p3, p3, -p3, -p3, p4, p4, -p4, -p4).
- * The second in each pair of identical points uses a random Z coordinate in the Jacobian form.
- * All magnitudes are randomized.
- * All 17*17 combinations of points are added to each other, using all applicable methods.
- *
- * When the endomorphism code is compiled in, p5 = lambda*p1 and p6 = lambda^2*p1 are added as well.
+ /* 25 points are used:
+ * - infinity
+ * - for each of four random points p1 p2 p3 p4, we add the point, its
+ * negation, and then those two again but with randomized Z coordinate.
+ * - The same is then done for lambda*p1 and lambda^2*p1.
*/
secp256k1_ge *ge = (secp256k1_ge *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_ge) * (1 + 4 * runs));
secp256k1_gej *gej = (secp256k1_gej *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_gej) * (1 + 4 * runs));
@@ -2111,14 +2122,12 @@ void test_ge(void) {
int j;
secp256k1_ge g;
random_group_element_test(&g);
-#ifdef USE_ENDOMORPHISM
if (i >= runs - 2) {
secp256k1_ge_mul_lambda(&g, &ge[1]);
}
if (i >= runs - 1) {
secp256k1_ge_mul_lambda(&g, &g);
}
-#endif
ge[1 + 4 * i] = g;
ge[2 + 4 * i] = g;
secp256k1_ge_neg(&ge[3 + 4 * i], &g);
@@ -2215,6 +2224,9 @@ void test_ge(void) {
/* Normal doubling. */
secp256k1_gej_double_var(&resj, &gej[i2], NULL);
ge_equals_gej(&ref, &resj);
+ /* Constant-time doubling. */
+ secp256k1_gej_double(&resj, &gej[i2]);
+ ge_equals_gej(&ref, &resj);
}
/* Test adding opposites. */
@@ -2244,7 +2256,7 @@ void test_ge(void) {
gej_shuffled[i] = gej[i];
}
for (i = 0; i < 4 * runs + 1; i++) {
- int swap = i + secp256k1_rand_int(4 * runs + 1 - i);
+ int swap = i + secp256k1_testrand_int(4 * runs + 1 - i);
if (swap != i) {
secp256k1_gej t = gej_shuffled[i];
gej_shuffled[i] = gej_shuffled[swap];
@@ -2300,6 +2312,39 @@ void test_ge(void) {
free(zinv);
}
+
+void test_intialized_inf(void) {
+ secp256k1_ge p;
+ secp256k1_gej pj, npj, infj1, infj2, infj3;
+ secp256k1_fe zinv;
+
+ /* Test that adding P+(-P) results in a fully initalized infinity*/
+ random_group_element_test(&p);
+ secp256k1_gej_set_ge(&pj, &p);
+ secp256k1_gej_neg(&npj, &pj);
+
+ secp256k1_gej_add_var(&infj1, &pj, &npj, NULL);
+ CHECK(secp256k1_gej_is_infinity(&infj1));
+ CHECK(secp256k1_fe_is_zero(&infj1.x));
+ CHECK(secp256k1_fe_is_zero(&infj1.y));
+ CHECK(secp256k1_fe_is_zero(&infj1.z));
+
+ secp256k1_gej_add_ge_var(&infj2, &npj, &p, NULL);
+ CHECK(secp256k1_gej_is_infinity(&infj2));
+ CHECK(secp256k1_fe_is_zero(&infj2.x));
+ CHECK(secp256k1_fe_is_zero(&infj2.y));
+ CHECK(secp256k1_fe_is_zero(&infj2.z));
+
+ secp256k1_fe_set_int(&zinv, 1);
+ secp256k1_gej_add_zinv_var(&infj3, &npj, &p, &zinv);
+ CHECK(secp256k1_gej_is_infinity(&infj3));
+ CHECK(secp256k1_fe_is_zero(&infj3.x));
+ CHECK(secp256k1_fe_is_zero(&infj3.y));
+ CHECK(secp256k1_fe_is_zero(&infj3.z));
+
+
+}
+
void test_add_neg_y_diff_x(void) {
/* The point of this test is to check that we can add two points
* whose y-coordinates are negatives of each other but whose x
@@ -2373,6 +2418,7 @@ void run_ge(void) {
test_ge();
}
test_add_neg_y_diff_x();
+ test_intialized_inf();
}
void test_ec_combine(void) {
@@ -2396,7 +2442,7 @@ void test_ec_combine(void) {
secp256k1_ge_set_gej(&Q, &Qj);
secp256k1_pubkey_save(&sd, &Q);
CHECK(secp256k1_ec_pubkey_combine(ctx, &sd2, d, i) == 1);
- CHECK(memcmp(&sd, &sd2, sizeof(sd)) == 0);
+ CHECK(secp256k1_memcmp_var(&sd, &sd2, sizeof(sd)) == 0);
}
}
@@ -2562,7 +2608,6 @@ void test_point_times_order(const secp256k1_gej *point) {
secp256k1_ecmult(&ctx->ecmult_ctx, &res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */
secp256k1_gej_add_var(&res1, &res1, &res2, NULL);
CHECK(secp256k1_gej_is_infinity(&res1));
- CHECK(secp256k1_gej_is_valid_var(&res1) == 0);
secp256k1_ge_set_gej(&res3, &res1);
CHECK(secp256k1_ge_is_infinity(&res3));
CHECK(secp256k1_ge_is_valid_var(&res3) == 0);
@@ -2581,6 +2626,87 @@ void test_point_times_order(const secp256k1_gej *point) {
ge_equals_ge(&res3, &secp256k1_ge_const_g);
}
+/* These scalars reach large (in absolute value) outputs when fed to secp256k1_scalar_split_lambda.
+ *
+ * They are computed as:
+ * - For a in [-2, -1, 0, 1, 2]:
+ * - For b in [-3, -1, 1, 3]:
+ * - Output (a*LAMBDA + (ORDER+b)/2) % ORDER
+ */
+static const secp256k1_scalar scalars_near_split_bounds[20] = {
+ SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fc),
+ SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fd),
+ SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fe),
+ SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6ff),
+ SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf7632d),
+ SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf7632e),
+ SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf7632f),
+ SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf76330),
+ SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b209f),
+ SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b20a0),
+ SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b20a1),
+ SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b20a2),
+ SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede11),
+ SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede12),
+ SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede13),
+ SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede14),
+ SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a42),
+ SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a43),
+ SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a44),
+ SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a45)
+};
+
+void test_ecmult_target(const secp256k1_scalar* target, int mode) {
+ /* Mode: 0=ecmult_gen, 1=ecmult, 2=ecmult_const */
+ secp256k1_scalar n1, n2;
+ secp256k1_ge p;
+ secp256k1_gej pj, p1j, p2j, ptj;
+ static const secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+
+ /* Generate random n1,n2 such that n1+n2 = -target. */
+ random_scalar_order_test(&n1);
+ secp256k1_scalar_add(&n2, &n1, target);
+ secp256k1_scalar_negate(&n2, &n2);
+
+ /* Generate a random input point. */
+ if (mode != 0) {
+ random_group_element_test(&p);
+ secp256k1_gej_set_ge(&pj, &p);
+ }
+
+ /* EC multiplications */
+ if (mode == 0) {
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &p1j, &n1);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &p2j, &n2);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &ptj, target);
+ } else if (mode == 1) {
+ secp256k1_ecmult(&ctx->ecmult_ctx, &p1j, &pj, &n1, &zero);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &p2j, &pj, &n2, &zero);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &ptj, &pj, target, &zero);
+ } else {
+ secp256k1_ecmult_const(&p1j, &p, &n1, 256);
+ secp256k1_ecmult_const(&p2j, &p, &n2, 256);
+ secp256k1_ecmult_const(&ptj, &p, target, 256);
+ }
+
+ /* Add them all up: n1*P + n2*P + target*P = (n1+n2+target)*P = (n1+n1-n1-n2)*P = 0. */
+ secp256k1_gej_add_var(&ptj, &ptj, &p1j, NULL);
+ secp256k1_gej_add_var(&ptj, &ptj, &p2j, NULL);
+ CHECK(secp256k1_gej_is_infinity(&ptj));
+}
+
+void run_ecmult_near_split_bound(void) {
+ int i;
+ unsigned j;
+ for (i = 0; i < 4*count; ++i) {
+ for (j = 0; j < sizeof(scalars_near_split_bounds) / sizeof(scalars_near_split_bounds[0]); ++j) {
+ test_ecmult_target(&scalars_near_split_bounds[j], 0);
+ test_ecmult_target(&scalars_near_split_bounds[j], 1);
+ test_ecmult_target(&scalars_near_split_bounds[j], 2);
+ }
+ }
+}
+
void run_point_times_order(void) {
int i;
secp256k1_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2);
@@ -2594,7 +2720,6 @@ void run_point_times_order(void) {
secp256k1_gej j;
CHECK(secp256k1_ge_is_valid_var(&p));
secp256k1_gej_set_ge(&j, &p);
- CHECK(secp256k1_gej_is_valid_var(&j));
test_point_times_order(&j);
}
secp256k1_fe_sqr(&x, &x);
@@ -2967,14 +3092,16 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
void test_ecmult_multi_batch_single(secp256k1_ecmult_multi_func ecmult_multi) {
secp256k1_scalar szero;
- secp256k1_scalar sc[32];
- secp256k1_ge pt[32];
+ secp256k1_scalar sc;
+ secp256k1_ge pt;
secp256k1_gej r;
ecmult_multi_data data;
secp256k1_scratch *scratch_empty;
- data.sc = sc;
- data.pt = pt;
+ random_group_element_test(&pt);
+ random_scalar_order(&sc);
+ data.sc = &sc;
+ data.pt = &pt;
secp256k1_scalar_set_int(&szero, 0);
/* Try to multiply 1 point, but scratch space is empty.*/
@@ -2988,12 +3115,10 @@ void test_secp256k1_pippenger_bucket_window_inv(void) {
CHECK(secp256k1_pippenger_bucket_window_inv(0) == 0);
for(i = 1; i <= PIPPENGER_MAX_BUCKET_WINDOW; i++) {
-#ifdef USE_ENDOMORPHISM
/* Bucket_window of 8 is not used with endo */
if (i == 8) {
continue;
}
-#endif
CHECK(secp256k1_pippenger_bucket_window(secp256k1_pippenger_bucket_window_inv(i)) == i);
if (i != PIPPENGER_MAX_BUCKET_WINDOW) {
CHECK(secp256k1_pippenger_bucket_window(secp256k1_pippenger_bucket_window_inv(i)+1) > i);
@@ -3006,7 +3131,7 @@ void test_secp256k1_pippenger_bucket_window_inv(void) {
* for a given scratch space.
*/
void test_ecmult_multi_pippenger_max_points(void) {
- size_t scratch_size = secp256k1_rand_int(256);
+ size_t scratch_size = secp256k1_testrand_int(256);
size_t max_size = secp256k1_pippenger_scratch_size(secp256k1_pippenger_bucket_window_inv(PIPPENGER_MAX_BUCKET_WINDOW-1)+512, 12);
secp256k1_scratch *scratch;
size_t n_points_supported;
@@ -3232,16 +3357,14 @@ void test_constant_wnaf(const secp256k1_scalar *number, int w) {
int skew;
int bits = 256;
secp256k1_scalar num = *number;
+ secp256k1_scalar scalar_skew;
secp256k1_scalar_set_int(&x, 0);
secp256k1_scalar_set_int(&shift, 1 << w);
- /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */
-#ifdef USE_ENDOMORPHISM
for (i = 0; i < 16; ++i) {
secp256k1_scalar_shr_int(&num, 8);
}
bits = 128;
-#endif
skew = secp256k1_wnaf_const(wnaf, &num, w, bits);
for (i = WNAF_SIZE_BITS(bits, w); i >= 0; --i) {
@@ -3262,7 +3385,8 @@ void test_constant_wnaf(const secp256k1_scalar *number, int w) {
secp256k1_scalar_add(&x, &x, &t);
}
/* Skew num because when encoding numbers as odd we use an offset */
- secp256k1_scalar_cadd_bit(&num, skew == 2, 1);
+ secp256k1_scalar_set_int(&scalar_skew, 1 << (skew == 2));
+ secp256k1_scalar_add(&num, &num, &scalar_skew);
CHECK(secp256k1_scalar_eq(&x, &num));
}
@@ -3275,12 +3399,9 @@ void test_fixed_wnaf(const secp256k1_scalar *number, int w) {
secp256k1_scalar_set_int(&x, 0);
secp256k1_scalar_set_int(&shift, 1 << w);
- /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */
-#ifdef USE_ENDOMORPHISM
for (i = 0; i < 16; ++i) {
secp256k1_scalar_shr_int(&num, 8);
}
-#endif
skew = secp256k1_wnaf_fixed(wnaf, &num, w);
for (i = WNAF_SIZE(w)-1; i >= 0; --i) {
@@ -3374,13 +3495,32 @@ void run_wnaf(void) {
int i;
secp256k1_scalar n = {{0}};
+ test_constant_wnaf(&n, 4);
/* Sanity check: 1 and 2 are the smallest odd and even numbers and should
* have easier-to-diagnose failure modes */
n.d[0] = 1;
test_constant_wnaf(&n, 4);
n.d[0] = 2;
test_constant_wnaf(&n, 4);
- /* Test 0 */
+ /* Test -1, because it's a special case in wnaf_const */
+ n = secp256k1_scalar_one;
+ secp256k1_scalar_negate(&n, &n);
+ test_constant_wnaf(&n, 4);
+
+ /* Test -2, which may not lead to overflows in wnaf_const */
+ secp256k1_scalar_add(&n, &secp256k1_scalar_one, &secp256k1_scalar_one);
+ secp256k1_scalar_negate(&n, &n);
+ test_constant_wnaf(&n, 4);
+
+ /* Test (1/2) - 1 = 1/-2 and 1/2 = (1/-2) + 1
+ as corner cases of negation handling in wnaf_const */
+ secp256k1_scalar_inverse(&n, &n);
+ test_constant_wnaf(&n, 4);
+
+ secp256k1_scalar_add(&n, &n, &secp256k1_scalar_one);
+ test_constant_wnaf(&n, 4);
+
+ /* Test 0 for fixed wnaf */
test_fixed_wnaf_small();
/* Random tests */
for (i = 0; i < count; i++) {
@@ -3445,7 +3585,7 @@ void test_ecmult_gen_blind(void) {
secp256k1_ge pge;
random_scalar_order_test(&key);
secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej, &key);
- secp256k1_rand256(seed32);
+ secp256k1_testrand256(seed32);
b = ctx->ecmult_gen_ctx.blind;
i = ctx->ecmult_gen_ctx.initial;
secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32);
@@ -3477,16 +3617,18 @@ void run_ecmult_gen_blind(void) {
}
}
-#ifdef USE_ENDOMORPHISM
/***** ENDOMORPHISH TESTS *****/
-void test_scalar_split(void) {
- secp256k1_scalar full;
- secp256k1_scalar s1, slam;
+void test_scalar_split(const secp256k1_scalar* full) {
+ secp256k1_scalar s, s1, slam;
const unsigned char zero[32] = {0};
unsigned char tmp[32];
- random_scalar_order_test(&full);
- secp256k1_scalar_split_lambda(&s1, &slam, &full);
+ secp256k1_scalar_split_lambda(&s1, &slam, full);
+
+ /* check slam*lambda + s1 == full */
+ secp256k1_scalar_mul(&s, &secp256k1_const_lambda, &slam);
+ secp256k1_scalar_add(&s, &s, &s1);
+ CHECK(secp256k1_scalar_eq(&s, full));
/* check that both are <= 128 bits in size */
if (secp256k1_scalar_is_high(&s1)) {
@@ -3497,15 +3639,32 @@ void test_scalar_split(void) {
}
secp256k1_scalar_get_b32(tmp, &s1);
- CHECK(memcmp(zero, tmp, 16) == 0);
+ CHECK(secp256k1_memcmp_var(zero, tmp, 16) == 0);
secp256k1_scalar_get_b32(tmp, &slam);
- CHECK(memcmp(zero, tmp, 16) == 0);
+ CHECK(secp256k1_memcmp_var(zero, tmp, 16) == 0);
}
+
void run_endomorphism_tests(void) {
- test_scalar_split();
+ unsigned i;
+ static secp256k1_scalar s;
+ test_scalar_split(&secp256k1_scalar_zero);
+ test_scalar_split(&secp256k1_scalar_one);
+ secp256k1_scalar_negate(&s,&secp256k1_scalar_one);
+ test_scalar_split(&s);
+ test_scalar_split(&secp256k1_const_lambda);
+ secp256k1_scalar_add(&s, &secp256k1_const_lambda, &secp256k1_scalar_one);
+ test_scalar_split(&s);
+
+ for (i = 0; i < 100U * count; ++i) {
+ secp256k1_scalar full;
+ random_scalar_order_test(&full);
+ test_scalar_split(&full);
+ }
+ for (i = 0; i < sizeof(scalars_near_split_bounds) / sizeof(scalars_near_split_bounds[0]); ++i) {
+ test_scalar_split(&scalars_near_split_bounds[i]);
+ }
}
-#endif
void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvalid) {
unsigned char pubkeyc[65];
@@ -3547,7 +3706,7 @@ void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvali
CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
VG_CHECK(pubkeyo, outl);
CHECK(outl == 33);
- CHECK(memcmp(&pubkeyo[1], &pubkeyc[1], 32) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkeyo[1], &pubkeyc[1], 32) == 0);
CHECK((pubkeyclen != 33) || (pubkeyo[0] == pubkeyc[0]));
if (ypass) {
/* This test isn't always done because we decode with alternative signs, so the y won't match. */
@@ -3563,7 +3722,7 @@ void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvali
VG_CHECK(pubkeyo, outl);
CHECK(outl == 65);
CHECK(pubkeyo[0] == 4);
- CHECK(memcmp(&pubkeyo[1], input, 64) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkeyo[1], input, 64) == 0);
}
CHECK(ecount == 0);
} else {
@@ -3932,7 +4091,7 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, orderc) == 0);
VG_CHECK(&pubkey, sizeof(pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
/* Maximum value is too large, reject. */
memset(ctmp, 255, 32);
CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0);
@@ -3940,7 +4099,7 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0);
VG_CHECK(&pubkey, sizeof(pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
/* Zero is too small, reject. */
memset(ctmp, 0, 32);
CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0);
@@ -3948,7 +4107,7 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0);
VG_CHECK(&pubkey, sizeof(pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
/* One must be accepted. */
ctmp[31] = 0x01;
CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1);
@@ -3956,7 +4115,7 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1);
VG_CHECK(&pubkey, sizeof(pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
pubkey_one = pubkey;
/* Group order + 1 is too large, reject. */
memcpy(ctmp, orderc, 32);
@@ -3966,7 +4125,7 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0);
VG_CHECK(&pubkey, sizeof(pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
/* -1 must be accepted. */
ctmp[31] = 0x40;
CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1);
@@ -3974,20 +4133,20 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1);
VG_CHECK(&pubkey, sizeof(pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
pubkey_negone = pubkey;
/* Tweak of zero leaves the value unchanged. */
memset(ctmp2, 0, 32);
CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 1);
- CHECK(memcmp(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40);
+ CHECK(secp256k1_memcmp_var(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40);
memcpy(&pubkey2, &pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1);
- CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
/* Multiply tweak of zero zeroizes the output. */
CHECK(secp256k1_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0);
- CHECK(memcmp(zeros, ctmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0);
CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, ctmp2) == 0);
- CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0);
memcpy(&pubkey, &pubkey2, sizeof(pubkey));
/* If seckey_tweak_add or seckey_tweak_mul are called with an overflowing
seckey, the seckey is zeroized. */
@@ -3997,29 +4156,29 @@ void run_eckey_edge_case_test(void) {
CHECK(secp256k1_ec_seckey_verify(ctx, ctmp2) == 1);
CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0);
CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 0);
- CHECK(memcmp(zeros, ctmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0);
memcpy(ctmp, orderc, 32);
CHECK(secp256k1_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0);
- CHECK(memcmp(zeros, ctmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0);
/* If seckey_tweak_add or seckey_tweak_mul are called with an overflowing
tweak, the seckey is zeroized. */
memcpy(ctmp, orderc, 32);
ctmp[31] = 0x40;
CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp, orderc) == 0);
- CHECK(memcmp(zeros, ctmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0);
memcpy(ctmp, orderc, 32);
ctmp[31] = 0x40;
CHECK(secp256k1_ec_seckey_tweak_mul(ctx, ctmp, orderc) == 0);
- CHECK(memcmp(zeros, ctmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0);
memcpy(ctmp, orderc, 32);
ctmp[31] = 0x40;
/* If pubkey_tweak_add or pubkey_tweak_mul are called with an overflowing
tweak, the pubkey is zeroized. */
CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, orderc) == 0);
- CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0);
memcpy(&pubkey, &pubkey2, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, orderc) == 0);
- CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0);
memcpy(&pubkey, &pubkey2, sizeof(pubkey));
/* If the resulting key in secp256k1_ec_seckey_tweak_add and
* secp256k1_ec_pubkey_tweak_add is 0 the functions fail and in the latter
@@ -4029,25 +4188,25 @@ void run_eckey_edge_case_test(void) {
memset(ctmp2, 0, 32);
ctmp2[31] = 1;
CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 0);
- CHECK(memcmp(zeros, ctmp2, 32) == 0);
+ CHECK(secp256k1_memcmp_var(zeros, ctmp2, 32) == 0);
ctmp2[31] = 1;
CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0);
- CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0);
memcpy(&pubkey, &pubkey2, sizeof(pubkey));
/* Tweak computation wraps and results in a key of 1. */
ctmp2[31] = 2;
CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 1);
- CHECK(memcmp(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1);
+ CHECK(secp256k1_memcmp_var(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1);
ctmp2[31] = 2;
CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1);
ctmp2[31] = 1;
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, ctmp2) == 1);
- CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
/* Tweak mul * 2 = 1+1. */
CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1);
ctmp2[31] = 2;
CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 1);
- CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
/* Test argument errors. */
ecount = 0;
secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount);
@@ -4056,12 +4215,12 @@ void run_eckey_edge_case_test(void) {
memset(&pubkey, 0, 32);
CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0);
CHECK(ecount == 1);
- CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0);
memcpy(&pubkey, &pubkey2, sizeof(pubkey));
memset(&pubkey2, 0, 32);
CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 0);
CHECK(ecount == 2);
- CHECK(memcmp(&pubkey2, zeros, sizeof(pubkey2)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey2, zeros, sizeof(pubkey2)) == 0);
/* Plain argument errors. */
ecount = 0;
CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1);
@@ -4101,7 +4260,7 @@ void run_eckey_edge_case_test(void) {
memset(&pubkey, 1, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, NULL) == 0);
CHECK(ecount == 2);
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
/* secp256k1_ec_pubkey_combine tests. */
ecount = 0;
pubkeys[0] = &pubkey_one;
@@ -4112,28 +4271,28 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 0) == 0);
VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_ec_pubkey_combine(ctx, NULL, pubkeys, 1) == 0);
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
CHECK(ecount == 2);
memset(&pubkey, 255, sizeof(secp256k1_pubkey));
VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, NULL, 1) == 0);
VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
CHECK(ecount == 3);
pubkeys[0] = &pubkey_negone;
memset(&pubkey, 255, sizeof(secp256k1_pubkey));
VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 1) == 1);
VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
CHECK(ecount == 3);
len = 33;
CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1);
- CHECK(memcmp(ctmp, ctmp2, 33) == 0);
+ CHECK(secp256k1_memcmp_var(ctmp, ctmp2, 33) == 0);
/* Result is infinity. */
pubkeys[0] = &pubkey_one;
pubkeys[1] = &pubkey_negone;
@@ -4141,7 +4300,7 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 0);
VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
CHECK(ecount == 3);
/* Passes through infinity but comes out one. */
pubkeys[2] = &pubkey_one;
@@ -4149,19 +4308,19 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 3) == 1);
VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
CHECK(ecount == 3);
len = 33;
CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1);
- CHECK(memcmp(ctmp, ctmp2, 33) == 0);
+ CHECK(secp256k1_memcmp_var(ctmp, ctmp2, 33) == 0);
/* Adds to two. */
pubkeys[1] = &pubkey_one;
memset(&pubkey, 255, sizeof(secp256k1_pubkey));
VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 1);
VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
CHECK(ecount == 3);
secp256k1_context_set_illegal_callback(ctx, NULL, NULL);
}
@@ -4175,21 +4334,21 @@ void run_eckey_negate_test(void) {
/* Verify negation changes the key and changes it back */
CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 1);
- CHECK(memcmp(seckey, seckey_tmp, 32) != 0);
+ CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) != 0);
CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 1);
- CHECK(memcmp(seckey, seckey_tmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0);
/* Check that privkey alias gives same result */
CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 1);
CHECK(secp256k1_ec_privkey_negate(ctx, seckey_tmp) == 1);
- CHECK(memcmp(seckey, seckey_tmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0);
/* Negating all 0s fails */
memset(seckey, 0, 32);
memset(seckey_tmp, 0, 32);
CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 0);
/* Check that seckey is not modified */
- CHECK(memcmp(seckey, seckey_tmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0);
/* Negating an overflowing seckey fails and the seckey is zeroed. In this
* test, the seckey has 16 random bytes to ensure that ec_seckey_negate
@@ -4198,7 +4357,7 @@ void run_eckey_negate_test(void) {
memset(seckey, 0xFF, 16);
memset(seckey_tmp, 0, 32);
CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 0);
- CHECK(memcmp(seckey, seckey_tmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0);
}
void random_sign(secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *key, const secp256k1_scalar *msg, int *recid) {
@@ -4220,7 +4379,7 @@ void test_ecdsa_sign_verify(void) {
random_scalar_order_test(&key);
secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key);
secp256k1_ge_set_gej(&pub, &pubj);
- getrec = secp256k1_rand_bits(1);
+ getrec = secp256k1_testrand_bits(1);
random_sign(&sigr, &sigs, &key, &msg, getrec?&recid:NULL);
if (getrec) {
CHECK(recid >= 0 && recid < 4);
@@ -4287,7 +4446,7 @@ static int nonce_function_test_retry(unsigned char *nonce32, const unsigned char
int is_empty_signature(const secp256k1_ecdsa_signature *sig) {
static const unsigned char res[sizeof(secp256k1_ecdsa_signature)] = {0};
- return memcmp(sig, res, sizeof(secp256k1_ecdsa_signature)) == 0;
+ return secp256k1_memcmp_var(sig, res, sizeof(secp256k1_ecdsa_signature)) == 0;
}
void test_ecdsa_end_to_end(void) {
@@ -4320,31 +4479,31 @@ void test_ecdsa_end_to_end(void) {
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
/* Verify exporting and importing public key. */
- CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, secp256k1_rand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED));
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, secp256k1_testrand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED));
memset(&pubkey, 0, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1);
/* Verify negation changes the key and changes it back */
memcpy(&pubkey_tmp, &pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1);
- CHECK(memcmp(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0);
+ CHECK(secp256k1_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0);
CHECK(secp256k1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1);
- CHECK(memcmp(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0);
/* Verify private key import and export. */
- CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, secp256k1_rand_bits(1) == 1));
+ CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, secp256k1_testrand_bits(1) == 1));
CHECK(ec_privkey_import_der(ctx, privkey2, seckey, seckeylen) == 1);
- CHECK(memcmp(privkey, privkey2, 32) == 0);
+ CHECK(secp256k1_memcmp_var(privkey, privkey2, 32) == 0);
/* Optionally tweak the keys using addition. */
- if (secp256k1_rand_int(3) == 0) {
+ if (secp256k1_testrand_int(3) == 0) {
int ret1;
int ret2;
int ret3;
unsigned char rnd[32];
unsigned char privkey_tmp[32];
secp256k1_pubkey pubkey2;
- secp256k1_rand256_test(rnd);
+ secp256k1_testrand256_test(rnd);
memcpy(privkey_tmp, privkey, 32);
ret1 = secp256k1_ec_seckey_tweak_add(ctx, privkey, rnd);
ret2 = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, rnd);
@@ -4355,20 +4514,20 @@ void test_ecdsa_end_to_end(void) {
if (ret1 == 0) {
return;
}
- CHECK(memcmp(privkey, privkey_tmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(privkey, privkey_tmp, 32) == 0);
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1);
- CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
}
/* Optionally tweak the keys using multiplication. */
- if (secp256k1_rand_int(3) == 0) {
+ if (secp256k1_testrand_int(3) == 0) {
int ret1;
int ret2;
int ret3;
unsigned char rnd[32];
unsigned char privkey_tmp[32];
secp256k1_pubkey pubkey2;
- secp256k1_rand256_test(rnd);
+ secp256k1_testrand256_test(rnd);
memcpy(privkey_tmp, privkey, 32);
ret1 = secp256k1_ec_seckey_tweak_mul(ctx, privkey, rnd);
ret2 = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, rnd);
@@ -4379,9 +4538,9 @@ void test_ecdsa_end_to_end(void) {
if (ret1 == 0) {
return;
}
- CHECK(memcmp(privkey, privkey_tmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(privkey, privkey_tmp, 32) == 0);
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1);
- CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
}
/* Sign. */
@@ -4393,13 +4552,13 @@ void test_ecdsa_end_to_end(void) {
extra[31] = 0;
extra[0] = 1;
CHECK(secp256k1_ecdsa_sign(ctx, &signature[3], message, privkey, NULL, extra) == 1);
- CHECK(memcmp(&signature[0], &signature[4], sizeof(signature[0])) == 0);
- CHECK(memcmp(&signature[0], &signature[1], sizeof(signature[0])) != 0);
- CHECK(memcmp(&signature[0], &signature[2], sizeof(signature[0])) != 0);
- CHECK(memcmp(&signature[0], &signature[3], sizeof(signature[0])) != 0);
- CHECK(memcmp(&signature[1], &signature[2], sizeof(signature[0])) != 0);
- CHECK(memcmp(&signature[1], &signature[3], sizeof(signature[0])) != 0);
- CHECK(memcmp(&signature[2], &signature[3], sizeof(signature[0])) != 0);
+ CHECK(secp256k1_memcmp_var(&signature[0], &signature[4], sizeof(signature[0])) == 0);
+ CHECK(secp256k1_memcmp_var(&signature[0], &signature[1], sizeof(signature[0])) != 0);
+ CHECK(secp256k1_memcmp_var(&signature[0], &signature[2], sizeof(signature[0])) != 0);
+ CHECK(secp256k1_memcmp_var(&signature[0], &signature[3], sizeof(signature[0])) != 0);
+ CHECK(secp256k1_memcmp_var(&signature[1], &signature[2], sizeof(signature[0])) != 0);
+ CHECK(secp256k1_memcmp_var(&signature[1], &signature[3], sizeof(signature[0])) != 0);
+ CHECK(secp256k1_memcmp_var(&signature[2], &signature[3], sizeof(signature[0])) != 0);
/* Verify. */
CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &signature[1], message, &pubkey) == 1);
@@ -4420,7 +4579,7 @@ void test_ecdsa_end_to_end(void) {
secp256k1_ecdsa_signature_save(&signature[5], &r, &s);
CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5]));
CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1);
- CHECK(memcmp(&signature[5], &signature[0], 64) == 0);
+ CHECK(secp256k1_memcmp_var(&signature[5], &signature[0], 64) == 0);
/* Serialize/parse DER and verify again */
CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1);
@@ -4430,7 +4589,7 @@ void test_ecdsa_end_to_end(void) {
/* Serialize/destroy/parse DER and verify again. */
siglen = 74;
CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1);
- sig[secp256k1_rand_int(siglen)] += 1 + secp256k1_rand_int(255);
+ sig[secp256k1_testrand_int(siglen)] += 1 + secp256k1_testrand_int(255);
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 0 ||
secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 0);
}
@@ -4440,23 +4599,23 @@ void test_random_pubkeys(void) {
secp256k1_ge elem2;
unsigned char in[65];
/* Generate some randomly sized pubkeys. */
- size_t len = secp256k1_rand_bits(2) == 0 ? 65 : 33;
- if (secp256k1_rand_bits(2) == 0) {
- len = secp256k1_rand_bits(6);
+ size_t len = secp256k1_testrand_bits(2) == 0 ? 65 : 33;
+ if (secp256k1_testrand_bits(2) == 0) {
+ len = secp256k1_testrand_bits(6);
}
if (len == 65) {
- in[0] = secp256k1_rand_bits(1) ? 4 : (secp256k1_rand_bits(1) ? 6 : 7);
+ in[0] = secp256k1_testrand_bits(1) ? 4 : (secp256k1_testrand_bits(1) ? 6 : 7);
} else {
- in[0] = secp256k1_rand_bits(1) ? 2 : 3;
+ in[0] = secp256k1_testrand_bits(1) ? 2 : 3;
}
- if (secp256k1_rand_bits(3) == 0) {
- in[0] = secp256k1_rand_bits(8);
+ if (secp256k1_testrand_bits(3) == 0) {
+ in[0] = secp256k1_testrand_bits(8);
}
if (len > 1) {
- secp256k1_rand256(&in[1]);
+ secp256k1_testrand256(&in[1]);
}
if (len > 33) {
- secp256k1_rand256(&in[33]);
+ secp256k1_testrand256(&in[33]);
}
if (secp256k1_eckey_pubkey_parse(&elem, in, len)) {
unsigned char out[65];
@@ -4467,7 +4626,7 @@ void test_random_pubkeys(void) {
/* If the pubkey can be parsed, it should round-trip... */
CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, len == 33));
CHECK(size == len);
- CHECK(memcmp(&in[1], &out[1], len-1) == 0);
+ CHECK(secp256k1_memcmp_var(&in[1], &out[1], len-1) == 0);
/* ... except for the type of hybrid inputs. */
if ((in[0] != 6) && (in[0] != 7)) {
CHECK(in[0] == out[0]);
@@ -4478,7 +4637,7 @@ void test_random_pubkeys(void) {
CHECK(secp256k1_eckey_pubkey_parse(&elem2, in, size));
ge_equals_ge(&elem,&elem2);
/* Check that the X9.62 hybrid type is checked. */
- in[0] = secp256k1_rand_bits(1) ? 6 : 7;
+ in[0] = secp256k1_testrand_bits(1) ? 6 : 7;
res = secp256k1_eckey_pubkey_parse(&elem2, in, size);
if (firstb == 2 || firstb == 3) {
if (in[0] == firstb + 4) {
@@ -4490,7 +4649,7 @@ void test_random_pubkeys(void) {
if (res) {
ge_equals_ge(&elem,&elem2);
CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, 0));
- CHECK(memcmp(&in[1], &out[1], 64) == 0);
+ CHECK(secp256k1_memcmp_var(&in[1], &out[1], 64) == 0);
}
}
}
@@ -4546,21 +4705,21 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_
parsed_der = secp256k1_ecdsa_signature_parse_der(ctx, &sig_der, sig, siglen);
if (parsed_der) {
ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der, &sig_der)) << 0;
- valid_der = (memcmp(compact_der, zeroes, 32) != 0) && (memcmp(compact_der + 32, zeroes, 32) != 0);
+ valid_der = (secp256k1_memcmp_var(compact_der, zeroes, 32) != 0) && (secp256k1_memcmp_var(compact_der + 32, zeroes, 32) != 0);
}
if (valid_der) {
ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der, &len_der, &sig_der)) << 1;
- roundtrips_der = (len_der == siglen) && memcmp(roundtrip_der, sig, siglen) == 0;
+ roundtrips_der = (len_der == siglen) && secp256k1_memcmp_var(roundtrip_der, sig, siglen) == 0;
}
parsed_der_lax = ecdsa_signature_parse_der_lax(ctx, &sig_der_lax, sig, siglen);
if (parsed_der_lax) {
ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der_lax, &sig_der_lax)) << 10;
- valid_der_lax = (memcmp(compact_der_lax, zeroes, 32) != 0) && (memcmp(compact_der_lax + 32, zeroes, 32) != 0);
+ valid_der_lax = (secp256k1_memcmp_var(compact_der_lax, zeroes, 32) != 0) && (secp256k1_memcmp_var(compact_der_lax + 32, zeroes, 32) != 0);
}
if (valid_der_lax) {
ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11;
- roundtrips_der_lax = (len_der_lax == siglen) && memcmp(roundtrip_der_lax, sig, siglen) == 0;
+ roundtrips_der_lax = (len_der_lax == siglen) && secp256k1_memcmp_var(roundtrip_der_lax, sig, siglen) == 0;
}
if (certainly_der) {
@@ -4576,7 +4735,7 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_
if (valid_der) {
ret |= (!roundtrips_der_lax) << 12;
ret |= (len_der != len_der_lax) << 13;
- ret |= ((len_der != len_der_lax) || (memcmp(roundtrip_der_lax, roundtrip_der, len_der) != 0)) << 14;
+ ret |= ((len_der != len_der_lax) || (secp256k1_memcmp_var(roundtrip_der_lax, roundtrip_der, len_der) != 0)) << 14;
}
ret |= (roundtrips_der != roundtrips_der_lax) << 15;
if (parsed_der) {
@@ -4593,19 +4752,19 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_
if (valid_openssl) {
unsigned char tmp[32] = {0};
BN_bn2bin(r, tmp + 32 - BN_num_bytes(r));
- valid_openssl = memcmp(tmp, max_scalar, 32) < 0;
+ valid_openssl = secp256k1_memcmp_var(tmp, max_scalar, 32) < 0;
}
if (valid_openssl) {
unsigned char tmp[32] = {0};
BN_bn2bin(s, tmp + 32 - BN_num_bytes(s));
- valid_openssl = memcmp(tmp, max_scalar, 32) < 0;
+ valid_openssl = secp256k1_memcmp_var(tmp, max_scalar, 32) < 0;
}
}
len_openssl = i2d_ECDSA_SIG(sig_openssl, NULL);
if (len_openssl <= 2048) {
unsigned char *ptr = roundtrip_openssl;
CHECK(i2d_ECDSA_SIG(sig_openssl, &ptr) == len_openssl);
- roundtrips_openssl = valid_openssl && ((size_t)len_openssl == siglen) && (memcmp(roundtrip_openssl, sig, siglen) == 0);
+ roundtrips_openssl = valid_openssl && ((size_t)len_openssl == siglen) && (secp256k1_memcmp_var(roundtrip_openssl, sig, siglen) == 0);
} else {
len_openssl = 0;
}
@@ -4617,7 +4776,7 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_
ret |= (roundtrips_der != roundtrips_openssl) << 7;
if (roundtrips_openssl) {
ret |= (len_der != (size_t)len_openssl) << 8;
- ret |= ((len_der != (size_t)len_openssl) || (memcmp(roundtrip_der, roundtrip_openssl, len_der) != 0)) << 9;
+ ret |= ((len_der != (size_t)len_openssl) || (secp256k1_memcmp_var(roundtrip_der, roundtrip_openssl, len_der) != 0)) << 9;
}
#endif
return ret;
@@ -4637,27 +4796,27 @@ static void assign_big_endian(unsigned char *ptr, size_t ptrlen, uint32_t val) {
static void damage_array(unsigned char *sig, size_t *len) {
int pos;
- int action = secp256k1_rand_bits(3);
+ int action = secp256k1_testrand_bits(3);
if (action < 1 && *len > 3) {
/* Delete a byte. */
- pos = secp256k1_rand_int(*len);
+ pos = secp256k1_testrand_int(*len);
memmove(sig + pos, sig + pos + 1, *len - pos - 1);
(*len)--;
return;
} else if (action < 2 && *len < 2048) {
/* Insert a byte. */
- pos = secp256k1_rand_int(1 + *len);
+ pos = secp256k1_testrand_int(1 + *len);
memmove(sig + pos + 1, sig + pos, *len - pos);
- sig[pos] = secp256k1_rand_bits(8);
+ sig[pos] = secp256k1_testrand_bits(8);
(*len)++;
return;
} else if (action < 4) {
/* Modify a byte. */
- sig[secp256k1_rand_int(*len)] += 1 + secp256k1_rand_int(255);
+ sig[secp256k1_testrand_int(*len)] += 1 + secp256k1_testrand_int(255);
return;
} else { /* action < 8 */
/* Modify a bit. */
- sig[secp256k1_rand_int(*len)] ^= 1 << secp256k1_rand_bits(3);
+ sig[secp256k1_testrand_int(*len)] ^= 1 << secp256k1_testrand_bits(3);
return;
}
}
@@ -4670,23 +4829,23 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
int n;
*len = 0;
- der = secp256k1_rand_bits(2) == 0;
+ der = secp256k1_testrand_bits(2) == 0;
*certainly_der = der;
*certainly_not_der = 0;
- indet = der ? 0 : secp256k1_rand_int(10) == 0;
+ indet = der ? 0 : secp256k1_testrand_int(10) == 0;
for (n = 0; n < 2; n++) {
/* We generate two classes of numbers: nlow==1 "low" ones (up to 32 bytes), nlow==0 "high" ones (32 bytes with 129 top bits set, or larger than 32 bytes) */
- nlow[n] = der ? 1 : (secp256k1_rand_bits(3) != 0);
+ nlow[n] = der ? 1 : (secp256k1_testrand_bits(3) != 0);
/* The length of the number in bytes (the first byte of which will always be nonzero) */
- nlen[n] = nlow[n] ? secp256k1_rand_int(33) : 32 + secp256k1_rand_int(200) * secp256k1_rand_int(8) / 8;
+ nlen[n] = nlow[n] ? secp256k1_testrand_int(33) : 32 + secp256k1_testrand_int(200) * secp256k1_testrand_int(8) / 8;
CHECK(nlen[n] <= 232);
/* The top bit of the number. */
- nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : secp256k1_rand_bits(1));
+ nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : secp256k1_testrand_bits(1));
/* The top byte of the number (after the potential hardcoded 16 0xFF characters for "high" 32 bytes numbers) */
- nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + secp256k1_rand_bits(7) : 1 + secp256k1_rand_int(127));
+ nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + secp256k1_testrand_bits(7) : 1 + secp256k1_testrand_int(127));
/* The number of zero bytes in front of the number (which is 0 or 1 in case of DER, otherwise we extend up to 300 bytes) */
- nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? secp256k1_rand_int(3) : secp256k1_rand_int(300 - nlen[n]) * secp256k1_rand_int(8) / 8);
+ nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? secp256k1_testrand_int(3) : secp256k1_testrand_int(300 - nlen[n]) * secp256k1_testrand_int(8) / 8);
if (nzlen[n] > ((nlen[n] == 0 || nhbit[n]) ? 1 : 0)) {
*certainly_not_der = 1;
}
@@ -4695,7 +4854,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
nlenlen[n] = nlen[n] + nzlen[n] < 128 ? 0 : (nlen[n] + nzlen[n] < 256 ? 1 : 2);
if (!der) {
/* nlenlen[n] max 127 bytes */
- int add = secp256k1_rand_int(127 - nlenlen[n]) * secp256k1_rand_int(16) * secp256k1_rand_int(16) / 256;
+ int add = secp256k1_testrand_int(127 - nlenlen[n]) * secp256k1_testrand_int(16) * secp256k1_testrand_int(16) / 256;
nlenlen[n] += add;
if (add != 0) {
*certainly_not_der = 1;
@@ -4709,7 +4868,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
CHECK(tlen <= 856);
/* The length of the garbage inside the tuple. */
- elen = (der || indet) ? 0 : secp256k1_rand_int(980 - tlen) * secp256k1_rand_int(8) / 8;
+ elen = (der || indet) ? 0 : secp256k1_testrand_int(980 - tlen) * secp256k1_testrand_int(8) / 8;
if (elen != 0) {
*certainly_not_der = 1;
}
@@ -4717,7 +4876,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
CHECK(tlen <= 980);
/* The length of the garbage after the end of the tuple. */
- glen = der ? 0 : secp256k1_rand_int(990 - tlen) * secp256k1_rand_int(8) / 8;
+ glen = der ? 0 : secp256k1_testrand_int(990 - tlen) * secp256k1_testrand_int(8) / 8;
if (glen != 0) {
*certainly_not_der = 1;
}
@@ -4732,7 +4891,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
} else {
int tlenlen = tlen < 128 ? 0 : (tlen < 256 ? 1 : 2);
if (!der) {
- int add = secp256k1_rand_int(127 - tlenlen) * secp256k1_rand_int(16) * secp256k1_rand_int(16) / 256;
+ int add = secp256k1_testrand_int(127 - tlenlen) * secp256k1_testrand_int(16) * secp256k1_testrand_int(16) / 256;
tlenlen += add;
if (add != 0) {
*certainly_not_der = 1;
@@ -4783,13 +4942,13 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
nlen[n]--;
}
/* Generate remaining random bytes of number */
- secp256k1_rand_bytes_test(sig + *len, nlen[n]);
+ secp256k1_testrand_bytes_test(sig + *len, nlen[n]);
*len += nlen[n];
nlen[n] = 0;
}
/* Generate random garbage inside tuple. */
- secp256k1_rand_bytes_test(sig + *len, elen);
+ secp256k1_testrand_bytes_test(sig + *len, elen);
*len += elen;
/* Generate end-of-contents bytes. */
@@ -4801,7 +4960,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
CHECK(tlen + glen <= 1121);
/* Generate random garbage outside tuple. */
- secp256k1_rand_bytes_test(sig + *len, glen);
+ secp256k1_testrand_bytes_test(sig + *len, glen);
*len += glen;
tlen += glen;
CHECK(tlen <= 1121);
@@ -5133,11 +5292,11 @@ void test_ecdsa_edge_cases(void) {
CHECK(!is_empty_signature(&sig));
CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, nonce_function_rfc6979, extra) == 1);
CHECK(!is_empty_signature(&sig2));
- CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0);
+ CHECK(secp256k1_memcmp_var(&sig, &sig2, sizeof(sig)) == 0);
/* The default nonce function is deterministic. */
CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1);
CHECK(!is_empty_signature(&sig2));
- CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0);
+ CHECK(secp256k1_memcmp_var(&sig, &sig2, sizeof(sig)) == 0);
/* The default nonce function changes output with different messages. */
for(i = 0; i < 256; i++) {
int j;
@@ -5184,12 +5343,12 @@ void test_ecdsa_edge_cases(void) {
VG_CHECK(nonce3,32);
CHECK(nonce_function_rfc6979(nonce4, zeros, zeros, zeros, (void *)zeros, 0) == 1);
VG_CHECK(nonce4,32);
- CHECK(memcmp(nonce, nonce2, 32) != 0);
- CHECK(memcmp(nonce, nonce3, 32) != 0);
- CHECK(memcmp(nonce, nonce4, 32) != 0);
- CHECK(memcmp(nonce2, nonce3, 32) != 0);
- CHECK(memcmp(nonce2, nonce4, 32) != 0);
- CHECK(memcmp(nonce3, nonce4, 32) != 0);
+ CHECK(secp256k1_memcmp_var(nonce, nonce2, 32) != 0);
+ CHECK(secp256k1_memcmp_var(nonce, nonce3, 32) != 0);
+ CHECK(secp256k1_memcmp_var(nonce, nonce4, 32) != 0);
+ CHECK(secp256k1_memcmp_var(nonce2, nonce3, 32) != 0);
+ CHECK(secp256k1_memcmp_var(nonce2, nonce4, 32) != 0);
+ CHECK(secp256k1_memcmp_var(nonce3, nonce4, 32) != 0);
}
@@ -5218,7 +5377,7 @@ EC_KEY *get_openssl_key(const unsigned char *key32) {
unsigned char privkey[300];
size_t privkeylen;
const unsigned char* pbegin = privkey;
- int compr = secp256k1_rand_bits(1);
+ int compr = secp256k1_testrand_bits(1);
EC_KEY *ec_key = EC_KEY_new_by_curve_name(NID_secp256k1);
CHECK(ec_privkey_export_der(ctx, privkey, &privkeylen, key32, compr));
CHECK(d2i_ECPrivateKey(&ec_key, &pbegin, privkeylen));
@@ -5239,7 +5398,7 @@ void test_ecdsa_openssl(void) {
unsigned char message[32];
unsigned char signature[80];
unsigned char key32[32];
- secp256k1_rand256_test(message);
+ secp256k1_testrand256_test(message);
secp256k1_scalar_set_b32(&msg, message, NULL);
random_scalar_order_test(&key);
secp256k1_scalar_get_b32(key32, &key);
@@ -5277,6 +5436,14 @@ void run_ecdsa_openssl(void) {
# include "modules/recovery/tests_impl.h"
#endif
+#ifdef ENABLE_MODULE_EXTRAKEYS
+# include "modules/extrakeys/tests_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_SCHNORRSIG
+# include "modules/schnorrsig/tests_impl.h"
+#endif
+
void run_memczero_test(void) {
unsigned char buf1[6] = {1, 2, 3, 4, 5, 6};
unsigned char buf2[sizeof(buf1)];
@@ -5284,12 +5451,12 @@ void run_memczero_test(void) {
/* memczero(..., ..., 0) is a noop. */
memcpy(buf2, buf1, sizeof(buf1));
memczero(buf1, sizeof(buf1), 0);
- CHECK(memcmp(buf1, buf2, sizeof(buf1)) == 0);
+ CHECK(secp256k1_memcmp_var(buf1, buf2, sizeof(buf1)) == 0);
/* memczero(..., ..., 1) zeros the buffer. */
memset(buf2, 0, sizeof(buf2));
memczero(buf1, sizeof(buf1) , 1);
- CHECK(memcmp(buf1, buf2, sizeof(buf1)) == 0);
+ CHECK(secp256k1_memcmp_var(buf1, buf2, sizeof(buf1)) == 0);
}
void int_cmov_test(void) {
@@ -5328,23 +5495,23 @@ void fe_cmov_test(void) {
secp256k1_fe a = zero;
secp256k1_fe_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
r = zero; a = max;
secp256k1_fe_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
a = zero;
secp256k1_fe_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &zero, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0);
a = one;
secp256k1_fe_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
r = one; a = zero;
secp256k1_fe_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
}
void fe_storage_cmov_test(void) {
@@ -5358,23 +5525,23 @@ void fe_storage_cmov_test(void) {
secp256k1_fe_storage a = zero;
secp256k1_fe_storage_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
r = zero; a = max;
secp256k1_fe_storage_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
a = zero;
secp256k1_fe_storage_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &zero, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0);
a = one;
secp256k1_fe_storage_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
r = one; a = zero;
secp256k1_fe_storage_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
}
void scalar_cmov_test(void) {
@@ -5388,23 +5555,23 @@ void scalar_cmov_test(void) {
secp256k1_scalar a = zero;
secp256k1_scalar_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
r = zero; a = max;
secp256k1_scalar_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
a = zero;
secp256k1_scalar_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &zero, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0);
a = one;
secp256k1_scalar_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
r = one; a = zero;
secp256k1_scalar_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
}
void ge_storage_cmov_test(void) {
@@ -5420,23 +5587,23 @@ void ge_storage_cmov_test(void) {
secp256k1_ge_storage a = zero;
secp256k1_ge_storage_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
r = zero; a = max;
secp256k1_ge_storage_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
a = zero;
secp256k1_ge_storage_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &zero, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0);
a = one;
secp256k1_ge_storage_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
r = one; a = zero;
secp256k1_ge_storage_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
}
void run_cmov_tests(void) {
@@ -5448,9 +5615,6 @@ void run_cmov_tests(void) {
}
int main(int argc, char **argv) {
- unsigned char seed16[16] = {0};
- unsigned char run32[32] = {0};
-
/* Disable buffering for stdout to improve reliability of getting
* diagnostic information. Happens right at the start of main because
* setbuf must be used before any other operation on the stream. */
@@ -5463,52 +5627,20 @@ int main(int argc, char **argv) {
if (argc > 1) {
count = strtol(argv[1], NULL, 0);
}
+ printf("test count = %i\n", count);
/* find random seed */
- if (argc > 2) {
- int pos = 0;
- const char* ch = argv[2];
- while (pos < 16 && ch[0] != 0 && ch[1] != 0) {
- unsigned short sh;
- if ((sscanf(ch, "%2hx", &sh)) == 1) {
- seed16[pos] = sh;
- } else {
- break;
- }
- ch += 2;
- pos++;
- }
- } else {
- FILE *frand = fopen("/dev/urandom", "r");
- if ((frand == NULL) || fread(&seed16, 1, sizeof(seed16), frand) != sizeof(seed16)) {
- uint64_t t = time(NULL) * (uint64_t)1337;
- fprintf(stderr, "WARNING: could not read 16 bytes from /dev/urandom; falling back to insecure PRNG\n");
- seed16[0] ^= t;
- seed16[1] ^= t >> 8;
- seed16[2] ^= t >> 16;
- seed16[3] ^= t >> 24;
- seed16[4] ^= t >> 32;
- seed16[5] ^= t >> 40;
- seed16[6] ^= t >> 48;
- seed16[7] ^= t >> 56;
- }
- if (frand) {
- fclose(frand);
- }
- }
- secp256k1_rand_seed(seed16);
-
- printf("test count = %i\n", count);
- printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]);
+ secp256k1_testrand_init(argc > 2 ? argv[2] : NULL);
/* initialize */
run_context_tests(0);
run_context_tests(1);
run_scratch_tests();
ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
- if (secp256k1_rand_bits(1)) {
- secp256k1_rand256(run32);
- CHECK(secp256k1_context_randomize(ctx, secp256k1_rand_bits(1) ? run32 : NULL));
+ if (secp256k1_testrand_bits(1)) {
+ unsigned char rand32[32];
+ secp256k1_testrand256(rand32);
+ CHECK(secp256k1_context_randomize(ctx, secp256k1_testrand_bits(1) ? rand32 : NULL));
}
run_rand_bits();
@@ -5542,6 +5674,7 @@ int main(int argc, char **argv) {
/* ecmult tests */
run_wnaf();
run_point_times_order();
+ run_ecmult_near_split_bound();
run_ecmult_chain();
run_ecmult_constants();
run_ecmult_gen_blind();
@@ -5550,9 +5683,7 @@ int main(int argc, char **argv) {
run_ec_combine();
/* endomorphism tests */
-#ifdef USE_ENDOMORPHISM
run_endomorphism_tests();
-#endif
/* EC point parser test */
run_ec_pubkey_parse_test();
@@ -5583,13 +5714,20 @@ int main(int argc, char **argv) {
run_recovery_tests();
#endif
+#ifdef ENABLE_MODULE_EXTRAKEYS
+ run_extrakeys_tests();
+#endif
+
+#ifdef ENABLE_MODULE_SCHNORRSIG
+ run_schnorrsig_tests();
+#endif
+
/* util tests */
run_memczero_test();
run_cmov_tests();
- secp256k1_rand256(run32);
- printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]);
+ secp256k1_testrand_finish();
/* shutdown */
secp256k1_context_destroy(ctx);
diff --git a/src/secp256k1/src/tests_exhaustive.c b/src/secp256k1/src/tests_exhaustive.c
index 8cca1cef21..f4d5b8e176 100644
--- a/src/secp256k1/src/tests_exhaustive.c
+++ b/src/secp256k1/src/tests_exhaustive.c
@@ -18,18 +18,15 @@
#ifndef EXHAUSTIVE_TEST_ORDER
/* see group_impl.h for allowable values */
#define EXHAUSTIVE_TEST_ORDER 13
-#define EXHAUSTIVE_TEST_LAMBDA 9 /* cube root of 1 mod 13 */
#endif
#include "include/secp256k1.h"
+#include "assumptions.h"
#include "group.h"
#include "secp256k1.c"
#include "testrand_impl.h"
-#ifdef ENABLE_MODULE_RECOVERY
-#include "src/modules/recovery/main_impl.h"
-#include "include/secp256k1_recovery.h"
-#endif
+static int count = 2;
/** stolen from tests.c */
void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) {
@@ -61,7 +58,7 @@ void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) {
void random_fe(secp256k1_fe *x) {
unsigned char bin[32];
do {
- secp256k1_rand256(bin);
+ secp256k1_testrand256(bin);
if (secp256k1_fe_set_b32(x, bin)) {
return;
}
@@ -69,6 +66,15 @@ void random_fe(secp256k1_fe *x) {
}
/** END stolen from tests.c */
+static uint32_t num_cores = 1;
+static uint32_t this_core = 0;
+
+SECP256K1_INLINE static int skip_section(uint64_t* iter) {
+ if (num_cores == 1) return 0;
+ *iter += 0xe7037ed1a0b428dbULL;
+ return ((((uint32_t)*iter ^ (*iter >> 32)) * num_cores) >> 32) != this_core;
+}
+
int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
const unsigned char *key32, const unsigned char *algo16,
void *data, unsigned int attempt) {
@@ -89,93 +95,93 @@ int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned cha
return 1;
}
-#ifdef USE_ENDOMORPHISM
-void test_exhaustive_endomorphism(const secp256k1_ge *group, int order) {
+void test_exhaustive_endomorphism(const secp256k1_ge *group) {
int i;
- for (i = 0; i < order; i++) {
+ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_ge res;
secp256k1_ge_mul_lambda(&res, &group[i]);
ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res);
}
}
-#endif
-void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
+void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj) {
int i, j;
+ uint64_t iter = 0;
/* Sanity-check (and check infinity functions) */
CHECK(secp256k1_ge_is_infinity(&group[0]));
CHECK(secp256k1_gej_is_infinity(&groupj[0]));
- for (i = 1; i < order; i++) {
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
CHECK(!secp256k1_ge_is_infinity(&group[i]));
CHECK(!secp256k1_gej_is_infinity(&groupj[i]));
}
/* Check all addition formulae */
- for (j = 0; j < order; j++) {
+ for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) {
secp256k1_fe fe_inv;
+ if (skip_section(&iter)) continue;
secp256k1_fe_inv(&fe_inv, &groupj[j].z);
- for (i = 0; i < order; i++) {
+ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_ge zless_gej;
secp256k1_gej tmp;
/* add_var */
secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL);
- ge_equals_gej(&group[(i + j) % order], &tmp);
+ ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
/* add_ge */
if (j > 0) {
secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]);
- ge_equals_gej(&group[(i + j) % order], &tmp);
+ ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
/* add_ge_var */
secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL);
- ge_equals_gej(&group[(i + j) % order], &tmp);
+ ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
/* add_zinv_var */
zless_gej.infinity = groupj[j].infinity;
zless_gej.x = groupj[j].x;
zless_gej.y = groupj[j].y;
secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv);
- ge_equals_gej(&group[(i + j) % order], &tmp);
+ ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
}
/* Check doubling */
- for (i = 0; i < order; i++) {
+ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_gej tmp;
- if (i > 0) {
- secp256k1_gej_double_nonzero(&tmp, &groupj[i]);
- ge_equals_gej(&group[(2 * i) % order], &tmp);
- }
+ secp256k1_gej_double(&tmp, &groupj[i]);
+ ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp);
secp256k1_gej_double_var(&tmp, &groupj[i], NULL);
- ge_equals_gej(&group[(2 * i) % order], &tmp);
+ ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
/* Check negation */
- for (i = 1; i < order; i++) {
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_ge tmp;
secp256k1_gej tmpj;
secp256k1_ge_neg(&tmp, &group[i]);
- ge_equals_ge(&group[order - i], &tmp);
+ ge_equals_ge(&group[EXHAUSTIVE_TEST_ORDER - i], &tmp);
secp256k1_gej_neg(&tmpj, &groupj[i]);
- ge_equals_gej(&group[order - i], &tmpj);
+ ge_equals_gej(&group[EXHAUSTIVE_TEST_ORDER - i], &tmpj);
}
}
-void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
+void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj) {
int i, j, r_log;
- for (r_log = 1; r_log < order; r_log++) {
- for (j = 0; j < order; j++) {
- for (i = 0; i < order; i++) {
+ uint64_t iter = 0;
+ for (r_log = 1; r_log < EXHAUSTIVE_TEST_ORDER; r_log++) {
+ for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) {
+ if (skip_section(&iter)) continue;
+ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_gej tmp;
secp256k1_scalar na, ng;
secp256k1_scalar_set_int(&na, i);
secp256k1_scalar_set_int(&ng, j);
secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng);
- ge_equals_gej(&group[(i * r_log + j) % order], &tmp);
+ ge_equals_gej(&group[(i * r_log + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
if (i > 0) {
secp256k1_ecmult_const(&tmp, &group[i], &ng, 256);
- ge_equals_gej(&group[(i * j) % order], &tmp);
+ ge_equals_gej(&group[(i * j) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
}
}
@@ -194,14 +200,16 @@ static int ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t
return 1;
}
-void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ge *group) {
int i, j, k, x, y;
+ uint64_t iter = 0;
secp256k1_scratch *scratch = secp256k1_scratch_create(&ctx->error_callback, 4096);
- for (i = 0; i < order; i++) {
- for (j = 0; j < order; j++) {
- for (k = 0; k < order; k++) {
- for (x = 0; x < order; x++) {
- for (y = 0; y < order; y++) {
+ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
+ for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) {
+ for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) {
+ for (x = 0; x < EXHAUSTIVE_TEST_ORDER; x++) {
+ if (skip_section(&iter)) continue;
+ for (y = 0; y < EXHAUSTIVE_TEST_ORDER; y++) {
secp256k1_gej tmp;
secp256k1_scalar g_sc;
ecmult_multi_data data;
@@ -213,7 +221,7 @@ void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_
data.pt[1] = group[y];
secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2);
- ge_equals_gej(&group[(i * x + j * y + k) % order], &tmp);
+ ge_equals_gej(&group[(i * x + j * y + k) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
}
}
@@ -222,22 +230,23 @@ void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
}
-void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) {
+void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k, int* overflow) {
secp256k1_fe x;
unsigned char x_bin[32];
k %= EXHAUSTIVE_TEST_ORDER;
x = group[k].x;
secp256k1_fe_normalize(&x);
secp256k1_fe_get_b32(x_bin, &x);
- secp256k1_scalar_set_b32(r, x_bin, NULL);
+ secp256k1_scalar_set_b32(r, x_bin, overflow);
}
-void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group) {
int s, r, msg, key;
- for (s = 1; s < order; s++) {
- for (r = 1; r < order; r++) {
- for (msg = 1; msg < order; msg++) {
- for (key = 1; key < order; key++) {
+ uint64_t iter = 0;
+ for (s = 1; s < EXHAUSTIVE_TEST_ORDER; s++) {
+ for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) {
+ for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) {
+ for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) {
secp256k1_ge nonconst_ge;
secp256k1_ecdsa_signature sig;
secp256k1_pubkey pk;
@@ -246,6 +255,8 @@ void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *gr
int k, should_verify;
unsigned char msg32[32];
+ if (skip_section(&iter)) continue;
+
secp256k1_scalar_set_int(&s_s, s);
secp256k1_scalar_set_int(&r_s, r);
secp256k1_scalar_set_int(&msg_s, msg);
@@ -255,9 +266,9 @@ void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *gr
/* Run through every k value that gives us this r and check that *one* works.
* Note there could be none, there could be multiple, ECDSA is weird. */
should_verify = 0;
- for (k = 0; k < order; k++) {
+ for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) {
secp256k1_scalar check_x_s;
- r_from_k(&check_x_s, group, k);
+ r_from_k(&check_x_s, group, k, NULL);
if (r_s == check_x_s) {
secp256k1_scalar_set_int(&s_times_k_s, k);
secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
@@ -282,13 +293,15 @@ void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *gr
}
}
-void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group) {
int i, j, k;
+ uint64_t iter = 0;
/* Loop */
- for (i = 1; i < order; i++) { /* message */
- for (j = 1; j < order; j++) { /* key */
- for (k = 1; k < order; k++) { /* nonce */
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { /* message */
+ for (j = 1; j < EXHAUSTIVE_TEST_ORDER; j++) { /* key */
+ if (skip_section(&iter)) continue;
+ for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */
const int starting_k = k;
secp256k1_ecdsa_signature sig;
secp256k1_scalar sk, msg, r, s, expected_r;
@@ -304,10 +317,10 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou
/* Note that we compute expected_r *after* signing -- this is important
* because our nonce-computing function function might change k during
* signing. */
- r_from_k(&expected_r, group, k);
+ r_from_k(&expected_r, group, k, NULL);
CHECK(r == expected_r);
- CHECK((k * s) % order == (i + r * j) % order ||
- (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
+ CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER ||
+ (k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER);
/* Overflow means we've tried every possible nonce */
if (k < starting_k) {
@@ -328,184 +341,114 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou
}
#ifdef ENABLE_MODULE_RECOVERY
-void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
- int i, j, k;
-
- /* Loop */
- for (i = 1; i < order; i++) { /* message */
- for (j = 1; j < order; j++) { /* key */
- for (k = 1; k < order; k++) { /* nonce */
- const int starting_k = k;
- secp256k1_fe r_dot_y_normalized;
- secp256k1_ecdsa_recoverable_signature rsig;
- secp256k1_ecdsa_signature sig;
- secp256k1_scalar sk, msg, r, s, expected_r;
- unsigned char sk32[32], msg32[32];
- int expected_recid;
- int recid;
- secp256k1_scalar_set_int(&msg, i);
- secp256k1_scalar_set_int(&sk, j);
- secp256k1_scalar_get_b32(sk32, &sk);
- secp256k1_scalar_get_b32(msg32, &msg);
-
- secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
+#include "src/modules/recovery/tests_exhaustive_impl.h"
+#endif
- /* Check directly */
- secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig);
- r_from_k(&expected_r, group, k);
- CHECK(r == expected_r);
- CHECK((k * s) % order == (i + r * j) % order ||
- (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
- /* In computing the recid, there is an overflow condition that is disabled in
- * scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value
- * will exceed the group order, and our signing code always holds out for r
- * values that don't overflow, so with a proper overflow check the tests would
- * loop indefinitely. */
- r_dot_y_normalized = group[k].y;
- secp256k1_fe_normalize(&r_dot_y_normalized);
- /* Also the recovery id is flipped depending if we hit the low-s branch */
- if ((k * s) % order == (i + r * j) % order) {
- expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 1 : 0;
- } else {
- expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 0 : 1;
- }
- CHECK(recid == expected_recid);
+#ifdef ENABLE_MODULE_EXTRAKEYS
+#include "src/modules/extrakeys/tests_exhaustive_impl.h"
+#endif
- /* Convert to a standard sig then check */
- secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
- secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
- /* Note that we compute expected_r *after* signing -- this is important
- * because our nonce-computing function function might change k during
- * signing. */
- r_from_k(&expected_r, group, k);
- CHECK(r == expected_r);
- CHECK((k * s) % order == (i + r * j) % order ||
- (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
+#ifdef ENABLE_MODULE_SCHNORRSIG
+#include "src/modules/schnorrsig/tests_exhaustive_impl.h"
+#endif
- /* Overflow means we've tried every possible nonce */
- if (k < starting_k) {
- break;
- }
- }
+int main(int argc, char** argv) {
+ int i;
+ secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER];
+ secp256k1_ge group[EXHAUSTIVE_TEST_ORDER];
+ unsigned char rand32[32];
+ secp256k1_context *ctx;
+
+ /* Disable buffering for stdout to improve reliability of getting
+ * diagnostic information. Happens right at the start of main because
+ * setbuf must be used before any other operation on the stream. */
+ setbuf(stdout, NULL);
+ /* Also disable buffering for stderr because it's not guaranteed that it's
+ * unbuffered on all systems. */
+ setbuf(stderr, NULL);
+
+ printf("Exhaustive tests for order %lu\n", (unsigned long)EXHAUSTIVE_TEST_ORDER);
+
+ /* find iteration count */
+ if (argc > 1) {
+ count = strtol(argv[1], NULL, 0);
+ }
+ printf("test count = %i\n", count);
+
+ /* find random seed */
+ secp256k1_testrand_init(argc > 2 ? argv[2] : NULL);
+
+ /* set up split processing */
+ if (argc > 4) {
+ num_cores = strtol(argv[3], NULL, 0);
+ this_core = strtol(argv[4], NULL, 0);
+ if (num_cores < 1 || this_core >= num_cores) {
+ fprintf(stderr, "Usage: %s [count] [seed] [numcores] [thiscore]\n", argv[0]);
+ return 1;
}
+ printf("running tests for core %lu (out of [0..%lu])\n", (unsigned long)this_core, (unsigned long)num_cores - 1);
}
-}
-void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
- /* This is essentially a copy of test_exhaustive_verify, with recovery added */
- int s, r, msg, key;
- for (s = 1; s < order; s++) {
- for (r = 1; r < order; r++) {
- for (msg = 1; msg < order; msg++) {
- for (key = 1; key < order; key++) {
- secp256k1_ge nonconst_ge;
- secp256k1_ecdsa_recoverable_signature rsig;
- secp256k1_ecdsa_signature sig;
- secp256k1_pubkey pk;
- secp256k1_scalar sk_s, msg_s, r_s, s_s;
- secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s;
- int recid = 0;
- int k, should_verify;
- unsigned char msg32[32];
-
- secp256k1_scalar_set_int(&s_s, s);
- secp256k1_scalar_set_int(&r_s, r);
- secp256k1_scalar_set_int(&msg_s, msg);
- secp256k1_scalar_set_int(&sk_s, key);
- secp256k1_scalar_get_b32(msg32, &msg_s);
+ while (count--) {
+ /* Build context */
+ ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ secp256k1_testrand256(rand32);
+ CHECK(secp256k1_context_randomize(ctx, rand32));
+
+ /* Generate the entire group */
+ secp256k1_gej_set_infinity(&groupj[0]);
+ secp256k1_ge_set_gej(&group[0], &groupj[0]);
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
+ secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g);
+ secp256k1_ge_set_gej(&group[i], &groupj[i]);
+ if (count != 0) {
+ /* Set a different random z-value for each Jacobian point, except z=1
+ is used in the last iteration. */
+ secp256k1_fe z;
+ random_fe(&z);
+ secp256k1_gej_rescale(&groupj[i], &z);
+ }
- /* Verify by hand */
- /* Run through every k value that gives us this r and check that *one* works.
- * Note there could be none, there could be multiple, ECDSA is weird. */
- should_verify = 0;
- for (k = 0; k < order; k++) {
- secp256k1_scalar check_x_s;
- r_from_k(&check_x_s, group, k);
- if (r_s == check_x_s) {
- secp256k1_scalar_set_int(&s_times_k_s, k);
- secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
- secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
- secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
- should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
- }
- }
- /* nb we have a "high s" rule */
- should_verify &= !secp256k1_scalar_is_high(&s_s);
+ /* Verify against ecmult_gen */
+ {
+ secp256k1_scalar scalar_i;
+ secp256k1_gej generatedj;
+ secp256k1_ge generated;
- /* We would like to try recovering the pubkey and checking that it matches,
- * but pubkey recovery is impossible in the exhaustive tests (the reason
- * being that there are 12 nonzero r values, 12 nonzero points, and no
- * overlap between the sets, so there are no valid signatures). */
+ secp256k1_scalar_set_int(&scalar_i, i);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i);
+ secp256k1_ge_set_gej(&generated, &generatedj);
- /* Verify by converting to a standard signature and calling verify */
- secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid);
- secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
- memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
- secp256k1_pubkey_save(&pk, &nonconst_ge);
- CHECK(should_verify ==
- secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk));
- }
+ CHECK(group[i].infinity == 0);
+ CHECK(generated.infinity == 0);
+ CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x));
+ CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y));
}
}
- }
-}
-#endif
-
-int main(void) {
- int i;
- secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER];
- secp256k1_ge group[EXHAUSTIVE_TEST_ORDER];
- /* Build context */
- secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ /* Run the tests */
+ test_exhaustive_endomorphism(group);
+ test_exhaustive_addition(group, groupj);
+ test_exhaustive_ecmult(ctx, group, groupj);
+ test_exhaustive_ecmult_multi(ctx, group);
+ test_exhaustive_sign(ctx, group);
+ test_exhaustive_verify(ctx, group);
- /* TODO set z = 1, then do num_tests runs with random z values */
+#ifdef ENABLE_MODULE_RECOVERY
+ test_exhaustive_recovery(ctx, group);
+#endif
+#ifdef ENABLE_MODULE_EXTRAKEYS
+ test_exhaustive_extrakeys(ctx, group);
+#endif
+#ifdef ENABLE_MODULE_SCHNORRSIG
+ test_exhaustive_schnorrsig(ctx);
+#endif
- /* Generate the entire group */
- secp256k1_gej_set_infinity(&groupj[0]);
- secp256k1_ge_set_gej(&group[0], &groupj[0]);
- for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
- /* Set a different random z-value for each Jacobian point */
- secp256k1_fe z;
- random_fe(&z);
-
- secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g);
- secp256k1_ge_set_gej(&group[i], &groupj[i]);
- secp256k1_gej_rescale(&groupj[i], &z);
-
- /* Verify against ecmult_gen */
- {
- secp256k1_scalar scalar_i;
- secp256k1_gej generatedj;
- secp256k1_ge generated;
-
- secp256k1_scalar_set_int(&scalar_i, i);
- secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i);
- secp256k1_ge_set_gej(&generated, &generatedj);
-
- CHECK(group[i].infinity == 0);
- CHECK(generated.infinity == 0);
- CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x));
- CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y));
- }
+ secp256k1_context_destroy(ctx);
}
- /* Run the tests */
-#ifdef USE_ENDOMORPHISM
- test_exhaustive_endomorphism(group, EXHAUSTIVE_TEST_ORDER);
-#endif
- test_exhaustive_addition(group, groupj, EXHAUSTIVE_TEST_ORDER);
- test_exhaustive_ecmult(ctx, group, groupj, EXHAUSTIVE_TEST_ORDER);
- test_exhaustive_ecmult_multi(ctx, group, EXHAUSTIVE_TEST_ORDER);
- test_exhaustive_sign(ctx, group, EXHAUSTIVE_TEST_ORDER);
- test_exhaustive_verify(ctx, group, EXHAUSTIVE_TEST_ORDER);
+ secp256k1_testrand_finish();
-#ifdef ENABLE_MODULE_RECOVERY
- test_exhaustive_recovery_sign(ctx, group, EXHAUSTIVE_TEST_ORDER);
- test_exhaustive_recovery_verify(ctx, group, EXHAUSTIVE_TEST_ORDER);
-#endif
-
- secp256k1_context_destroy(ctx);
+ printf("no problems found\n");
return 0;
}
-
diff --git a/src/secp256k1/src/util.h b/src/secp256k1/src/util.h
index 8289e23e0c..3a88a41bc6 100644
--- a/src/secp256k1/src/util.h
+++ b/src/secp256k1/src/util.h
@@ -170,13 +170,35 @@ static SECP256K1_INLINE void *manual_alloc(void** prealloc_ptr, size_t alloc_siz
# define I64uFORMAT "llu"
#endif
-#if defined(HAVE___INT128)
-# if defined(__GNUC__)
-# define SECP256K1_GNUC_EXT __extension__
-# else
-# define SECP256K1_GNUC_EXT
+#if defined(__GNUC__)
+# define SECP256K1_GNUC_EXT __extension__
+#else
+# define SECP256K1_GNUC_EXT
+#endif
+
+/* If SECP256K1_{LITTLE,BIG}_ENDIAN is not explicitly provided, infer from various other system macros. */
+#if !defined(SECP256K1_LITTLE_ENDIAN) && !defined(SECP256K1_BIG_ENDIAN)
+/* Inspired by https://github.com/rofl0r/endianness.h/blob/9853923246b065a3b52d2c43835f3819a62c7199/endianness.h#L52L73 */
+# if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
+ defined(_X86_) || defined(__x86_64__) || defined(__i386__) || \
+ defined(__i486__) || defined(__i586__) || defined(__i686__) || \
+ defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) || \
+ defined(__ARMEL__) || defined(__AARCH64EL__) || \
+ (defined(__LITTLE_ENDIAN__) && __LITTLE_ENDIAN__ == 1) || \
+ (defined(_LITTLE_ENDIAN) && _LITTLE_ENDIAN == 1) || \
+ defined(_M_IX86) || defined(_M_AMD64) || defined(_M_ARM) /* MSVC */
+# define SECP256K1_LITTLE_ENDIAN
+# endif
+# if (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \
+ defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) || \
+ defined(__MICROBLAZEEB__) || defined(__ARMEB__) || defined(__AARCH64EB__) || \
+ (defined(__BIG_ENDIAN__) && __BIG_ENDIAN__ == 1) || \
+ (defined(_BIG_ENDIAN) && _BIG_ENDIAN == 1)
+# define SECP256K1_BIG_ENDIAN
# endif
-SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t;
+#endif
+#if defined(SECP256K1_LITTLE_ENDIAN) == defined(SECP256K1_BIG_ENDIAN)
+# error Please make sure that either SECP256K1_LITTLE_ENDIAN or SECP256K1_BIG_ENDIAN is set, see src/util.h.
#endif
/* Zero memory if flag == 1. Flag must be 0 or 1. Constant time. */
@@ -194,13 +216,36 @@ static SECP256K1_INLINE void memczero(void *s, size_t len, int flag) {
}
}
+/** Semantics like memcmp. Variable-time.
+ *
+ * We use this to avoid possible compiler bugs with memcmp, e.g.
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95189
+ */
+static SECP256K1_INLINE int secp256k1_memcmp_var(const void *s1, const void *s2, size_t n) {
+ const unsigned char *p1 = s1, *p2 = s2;
+ size_t i;
+
+ for (i = 0; i < n; i++) {
+ int diff = p1[i] - p2[i];
+ if (diff != 0) {
+ return diff;
+ }
+ }
+ return 0;
+}
+
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized and non-negative.*/
static SECP256K1_INLINE void secp256k1_int_cmov(int *r, const int *a, int flag) {
unsigned int mask0, mask1, r_masked, a_masked;
+ /* Access flag with a volatile-qualified lvalue.
+ This prevents clang from figuring out (after inlining) that flag can
+ take only be 0 or 1, which leads to variable time code. */
+ volatile int vflag = flag;
+
/* Casting a negative int to unsigned and back to int is implementation defined behavior */
VERIFY_CHECK(*r >= 0 && *a >= 0);
- mask0 = (unsigned int)flag + ~0u;
+ mask0 = (unsigned int)vflag + ~0u;
mask1 = ~mask0;
r_masked = ((unsigned int)*r & mask0);
a_masked = ((unsigned int)*a & mask1);
@@ -208,4 +253,21 @@ static SECP256K1_INLINE void secp256k1_int_cmov(int *r, const int *a, int flag)
*r = (int)(r_masked | a_masked);
}
+/* If USE_FORCE_WIDEMUL_{INT128,INT64} is set, use that wide multiplication implementation.
+ * Otherwise use the presence of __SIZEOF_INT128__ to decide.
+ */
+#if defined(USE_FORCE_WIDEMUL_INT128)
+# define SECP256K1_WIDEMUL_INT128 1
+#elif defined(USE_FORCE_WIDEMUL_INT64)
+# define SECP256K1_WIDEMUL_INT64 1
+#elif defined(__SIZEOF_INT128__)
+# define SECP256K1_WIDEMUL_INT128 1
+#else
+# define SECP256K1_WIDEMUL_INT64 1
+#endif
+#if defined(SECP256K1_WIDEMUL_INT128)
+SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t;
+SECP256K1_GNUC_EXT typedef __int128 int128_t;
+#endif
+
#endif /* SECP256K1_UTIL_H */
diff --git a/src/secp256k1/src/valgrind_ctime_test.c b/src/secp256k1/src/valgrind_ctime_test.c
index 60a82d599e..3169e3651c 100644
--- a/src/secp256k1/src/valgrind_ctime_test.c
+++ b/src/secp256k1/src/valgrind_ctime_test.c
@@ -6,16 +6,25 @@
#include <valgrind/memcheck.h>
#include "include/secp256k1.h"
+#include "assumptions.h"
#include "util.h"
-#if ENABLE_MODULE_ECDH
+#ifdef ENABLE_MODULE_ECDH
# include "include/secp256k1_ecdh.h"
#endif
-#if ENABLE_MODULE_RECOVERY
+#ifdef ENABLE_MODULE_RECOVERY
# include "include/secp256k1_recovery.h"
#endif
+#ifdef ENABLE_MODULE_EXTRAKEYS
+# include "include/secp256k1_extrakeys.h"
+#endif
+
+#ifdef ENABLE_MODULE_SCHNORRSIG
+#include "include/secp256k1_schnorrsig.h"
+#endif
+
int main(void) {
secp256k1_context* ctx;
secp256k1_ecdsa_signature signature;
@@ -28,10 +37,13 @@ int main(void) {
unsigned char key[32];
unsigned char sig[74];
unsigned char spubkey[33];
-#if ENABLE_MODULE_RECOVERY
+#ifdef ENABLE_MODULE_RECOVERY
secp256k1_ecdsa_recoverable_signature recoverable_signature;
int recid;
#endif
+#ifdef ENABLE_MODULE_EXTRAKEYS
+ secp256k1_keypair keypair;
+#endif
if (!RUNNING_ON_VALGRIND) {
fprintf(stderr, "This test can only usefully be run inside valgrind.\n");
@@ -49,7 +61,9 @@ int main(void) {
msg[i] = i + 1;
}
- ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_DECLASSIFY);
+ ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN
+ | SECP256K1_CONTEXT_VERIFY
+ | SECP256K1_CONTEXT_DECLASSIFY);
/* Test keygen. */
VALGRIND_MAKE_MEM_UNDEFINED(key, 32);
@@ -67,7 +81,7 @@ int main(void) {
CHECK(ret);
CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature));
-#if ENABLE_MODULE_ECDH
+#ifdef ENABLE_MODULE_ECDH
/* Test ECDH. */
VALGRIND_MAKE_MEM_UNDEFINED(key, 32);
ret = secp256k1_ecdh(ctx, msg, &pubkey, key, NULL, NULL);
@@ -75,7 +89,7 @@ int main(void) {
CHECK(ret == 1);
#endif
-#if ENABLE_MODULE_RECOVERY
+#ifdef ENABLE_MODULE_RECOVERY
/* Test signing a recoverable signature. */
VALGRIND_MAKE_MEM_UNDEFINED(key, 32);
ret = secp256k1_ecdsa_sign_recoverable(ctx, &recoverable_signature, msg, key, NULL, NULL);
@@ -114,6 +128,30 @@ int main(void) {
VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret));
CHECK(ret);
+ /* Test keypair_create and keypair_xonly_tweak_add. */
+#ifdef ENABLE_MODULE_EXTRAKEYS
+ VALGRIND_MAKE_MEM_UNDEFINED(key, 32);
+ ret = secp256k1_keypair_create(ctx, &keypair, key);
+ VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret));
+ CHECK(ret == 1);
+
+ /* The tweak is not treated as a secret in keypair_tweak_add */
+ VALGRIND_MAKE_MEM_DEFINED(msg, 32);
+ ret = secp256k1_keypair_xonly_tweak_add(ctx, &keypair, msg);
+ VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret));
+ CHECK(ret == 1);
+#endif
+
+#ifdef ENABLE_MODULE_SCHNORRSIG
+ VALGRIND_MAKE_MEM_UNDEFINED(key, 32);
+ ret = secp256k1_keypair_create(ctx, &keypair, key);
+ VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret));
+ CHECK(ret == 1);
+ ret = secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL);
+ VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret));
+ CHECK(ret == 1);
+#endif
+
secp256k1_context_destroy(ctx);
return 0;
}