aboutsummaryrefslogtreecommitdiff
path: root/src/scalar_impl.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/scalar_impl.h')
-rw-r--r--src/scalar_impl.h229
1 files changed, 5 insertions, 224 deletions
diff --git a/src/scalar_impl.h b/src/scalar_impl.h
index fc75891818..e124474773 100644
--- a/src/scalar_impl.h
+++ b/src/scalar_impl.h
@@ -1,8 +1,8 @@
-/**********************************************************************
- * Copyright (c) 2014 Pieter Wuille *
- * Distributed under the MIT software license, see the accompanying *
- * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
- **********************************************************************/
+/***********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
+ ***********************************************************************/
#ifndef SECP256K1_SCALAR_IMPL_H
#define SECP256K1_SCALAR_IMPL_H
@@ -31,231 +31,12 @@
static const secp256k1_scalar secp256k1_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
static const secp256k1_scalar secp256k1_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
-#ifndef USE_NUM_NONE
-static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a) {
- unsigned char c[32];
- secp256k1_scalar_get_b32(c, a);
- secp256k1_num_set_bin(r, c, 32);
-}
-
-/** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */
-static void secp256k1_scalar_order_get_num(secp256k1_num *r) {
-#if defined(EXHAUSTIVE_TEST_ORDER)
- static const unsigned char order[32] = {
- 0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER
- };
-#else
- static const unsigned char order[32] = {
- 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
- 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
- 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
- 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
- };
-#endif
- secp256k1_num_set_bin(r, order, 32);
-}
-#endif
-
static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned char *bin) {
int overflow;
secp256k1_scalar_set_b32(r, bin, &overflow);
return (!overflow) & (!secp256k1_scalar_is_zero(r));
}
-static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
-#if defined(EXHAUSTIVE_TEST_ORDER)
- int i;
- *r = 0;
- for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
- if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
- *r = i;
- /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
- * have a composite group order; fix it in exhaustive_tests.c). */
- VERIFY_CHECK(*r != 0);
-}
-#else
- secp256k1_scalar *t;
- int i;
- /* First compute xN as x ^ (2^N - 1) for some values of N,
- * and uM as x ^ M for some values of M. */
- secp256k1_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126;
- secp256k1_scalar u2, u5, u9, u11, u13;
-
- secp256k1_scalar_sqr(&u2, x);
- secp256k1_scalar_mul(&x2, &u2, x);
- secp256k1_scalar_mul(&u5, &u2, &x2);
- secp256k1_scalar_mul(&x3, &u5, &u2);
- secp256k1_scalar_mul(&u9, &x3, &u2);
- secp256k1_scalar_mul(&u11, &u9, &u2);
- secp256k1_scalar_mul(&u13, &u11, &u2);
-
- secp256k1_scalar_sqr(&x6, &u13);
- secp256k1_scalar_sqr(&x6, &x6);
- secp256k1_scalar_mul(&x6, &x6, &u11);
-
- secp256k1_scalar_sqr(&x8, &x6);
- secp256k1_scalar_sqr(&x8, &x8);
- secp256k1_scalar_mul(&x8, &x8, &x2);
-
- secp256k1_scalar_sqr(&x14, &x8);
- for (i = 0; i < 5; i++) {
- secp256k1_scalar_sqr(&x14, &x14);
- }
- secp256k1_scalar_mul(&x14, &x14, &x6);
-
- secp256k1_scalar_sqr(&x28, &x14);
- for (i = 0; i < 13; i++) {
- secp256k1_scalar_sqr(&x28, &x28);
- }
- secp256k1_scalar_mul(&x28, &x28, &x14);
-
- secp256k1_scalar_sqr(&x56, &x28);
- for (i = 0; i < 27; i++) {
- secp256k1_scalar_sqr(&x56, &x56);
- }
- secp256k1_scalar_mul(&x56, &x56, &x28);
-
- secp256k1_scalar_sqr(&x112, &x56);
- for (i = 0; i < 55; i++) {
- secp256k1_scalar_sqr(&x112, &x112);
- }
- secp256k1_scalar_mul(&x112, &x112, &x56);
-
- secp256k1_scalar_sqr(&x126, &x112);
- for (i = 0; i < 13; i++) {
- secp256k1_scalar_sqr(&x126, &x126);
- }
- secp256k1_scalar_mul(&x126, &x126, &x14);
-
- /* Then accumulate the final result (t starts at x126). */
- t = &x126;
- for (i = 0; i < 3; i++) {
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &u5); /* 101 */
- for (i = 0; i < 4; i++) { /* 0 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &x3); /* 111 */
- for (i = 0; i < 4; i++) { /* 0 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &u5); /* 101 */
- for (i = 0; i < 5; i++) { /* 0 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &u11); /* 1011 */
- for (i = 0; i < 4; i++) {
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &u11); /* 1011 */
- for (i = 0; i < 4; i++) { /* 0 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &x3); /* 111 */
- for (i = 0; i < 5; i++) { /* 00 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &x3); /* 111 */
- for (i = 0; i < 6; i++) { /* 00 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &u13); /* 1101 */
- for (i = 0; i < 4; i++) { /* 0 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &u5); /* 101 */
- for (i = 0; i < 3; i++) {
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &x3); /* 111 */
- for (i = 0; i < 5; i++) { /* 0 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &u9); /* 1001 */
- for (i = 0; i < 6; i++) { /* 000 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &u5); /* 101 */
- for (i = 0; i < 10; i++) { /* 0000000 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &x3); /* 111 */
- for (i = 0; i < 4; i++) { /* 0 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &x3); /* 111 */
- for (i = 0; i < 9; i++) { /* 0 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &x8); /* 11111111 */
- for (i = 0; i < 5; i++) { /* 0 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &u9); /* 1001 */
- for (i = 0; i < 6; i++) { /* 00 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &u11); /* 1011 */
- for (i = 0; i < 4; i++) {
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &u13); /* 1101 */
- for (i = 0; i < 5; i++) {
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &x2); /* 11 */
- for (i = 0; i < 6; i++) { /* 00 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &u13); /* 1101 */
- for (i = 0; i < 10; i++) { /* 000000 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &u13); /* 1101 */
- for (i = 0; i < 4; i++) {
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, &u9); /* 1001 */
- for (i = 0; i < 6; i++) { /* 00000 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(t, t, x); /* 1 */
- for (i = 0; i < 8; i++) { /* 00 */
- secp256k1_scalar_sqr(t, t);
- }
- secp256k1_scalar_mul(r, t, &x6); /* 111111 */
-}
-
-SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
- return !(a->d[0] & 1);
-}
-#endif
-
-static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
-#if defined(USE_SCALAR_INV_BUILTIN)
- secp256k1_scalar_inverse(r, x);
-#elif defined(USE_SCALAR_INV_NUM)
- unsigned char b[32];
- secp256k1_num n, m;
- secp256k1_scalar t = *x;
- secp256k1_scalar_get_b32(b, &t);
- secp256k1_num_set_bin(&n, b, 32);
- secp256k1_scalar_order_get_num(&m);
- secp256k1_num_mod_inverse(&n, &n, &m);
- secp256k1_num_get_bin(b, 32, &n);
- secp256k1_scalar_set_b32(r, b, NULL);
- /* Verify that the inverse was computed correctly, without GMP code. */
- secp256k1_scalar_mul(&t, &t, r);
- CHECK(secp256k1_scalar_is_one(&t));
-#else
-#error "Please select scalar inverse implementation"
-#endif
-}
-
/* These parameters are generated using sage/gen_exhaustive_groups.sage. */
#if defined(EXHAUSTIVE_TEST_ORDER)
# if EXHAUSTIVE_TEST_ORDER == 13