diff options
Diffstat (limited to 'src/ecmult_gen_impl.h')
-rw-r--r-- | src/ecmult_gen_impl.h | 134 |
1 files changed, 96 insertions, 38 deletions
diff --git a/src/ecmult_gen_impl.h b/src/ecmult_gen_impl.h index 849452c7a1..4697753ac8 100644 --- a/src/ecmult_gen_impl.h +++ b/src/ecmult_gen_impl.h @@ -1,5 +1,5 @@ /********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * + * Copyright (c) 2013, 2014, 2015 Pieter Wuille, Gregory Maxwell * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ @@ -10,36 +10,23 @@ #include "scalar.h" #include "group.h" #include "ecmult_gen.h" +#include "hash_impl.h" -typedef struct { - /* For accelerating the computation of a*G: - * To harden against timing attacks, use the following mechanism: - * * Break up the multiplicand into groups of 4 bits, called n_0, n_1, n_2, ..., n_63. - * * Compute sum(n_i * 16^i * G + U_i, i=0..63), where: - * * U_i = U * 2^i (for i=0..62) - * * U_i = U * (1-2^63) (for i=63) - * where U is a point with no known corresponding scalar. Note that sum(U_i, i=0..63) = 0. - * For each i, and each of the 16 possible values of n_i, (n_i * 16^i * G + U_i) is - * precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0..63). - * None of the resulting prec group elements have a known scalar, and neither do any of - * the intermediate sums while computing a*G. - */ - secp256k1_ge_storage_t prec[64][16]; /* prec[j][i] = 16^j * i * G + U_i */ -} secp256k1_ecmult_gen_consts_t; - -static const secp256k1_ecmult_gen_consts_t *secp256k1_ecmult_gen_consts = NULL; +static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context_t *ctx) { + ctx->prec = NULL; +} -static void secp256k1_ecmult_gen_start(void) { +static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context_t *ctx) { secp256k1_ge_t prec[1024]; secp256k1_gej_t gj; secp256k1_gej_t nums_gej; - secp256k1_ecmult_gen_consts_t *ret; int i, j; - if (secp256k1_ecmult_gen_consts != NULL) + + if (ctx->prec != NULL) { return; + } - /* Allocate the precomputation table. */ - ret = (secp256k1_ecmult_gen_consts_t*)checked_malloc(sizeof(secp256k1_ecmult_gen_consts_t)); + ctx->prec = (secp256k1_ge_storage_t (*)[64][16])checked_malloc(sizeof(*ctx->prec)); /* get the generator */ secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g); @@ -85,42 +72,113 @@ static void secp256k1_ecmult_gen_start(void) { } for (j = 0; j < 64; j++) { for (i = 0; i < 16; i++) { - secp256k1_ge_to_storage(&ret->prec[j][i], &prec[j*16 + i]); + secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]); } } + secp256k1_ecmult_gen_blind(ctx, NULL); +} - /* Set the global pointer to the precomputation table. */ - secp256k1_ecmult_gen_consts = ret; +static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context_t* ctx) { + return ctx->prec != NULL; } -static void secp256k1_ecmult_gen_stop(void) { - secp256k1_ecmult_gen_consts_t *c; - if (secp256k1_ecmult_gen_consts == NULL) - return; +static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context_t *dst, + const secp256k1_ecmult_gen_context_t *src) { + if (src->prec == NULL) { + dst->prec = NULL; + } else { + dst->prec = (secp256k1_ge_storage_t (*)[64][16])checked_malloc(sizeof(*dst->prec)); + memcpy(dst->prec, src->prec, sizeof(*dst->prec)); + dst->initial = src->initial; + dst->blind = src->blind; + } +} - c = (secp256k1_ecmult_gen_consts_t*)secp256k1_ecmult_gen_consts; - secp256k1_ecmult_gen_consts = NULL; - free(c); +static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context_t *ctx) { + free(ctx->prec); + secp256k1_scalar_clear(&ctx->blind); + secp256k1_gej_clear(&ctx->initial); + ctx->prec = NULL; } -static void secp256k1_ecmult_gen(secp256k1_gej_t *r, const secp256k1_scalar_t *gn) { - const secp256k1_ecmult_gen_consts_t *c = secp256k1_ecmult_gen_consts; +static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context_t *ctx, secp256k1_gej_t *r, const secp256k1_scalar_t *gn) { secp256k1_ge_t add; secp256k1_ge_storage_t adds; + secp256k1_scalar_t gnb; int bits; int i, j; - secp256k1_gej_set_infinity(r); + memset(&adds, 0, sizeof(adds)); + *r = ctx->initial; + /* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */ + secp256k1_scalar_add(&gnb, gn, &ctx->blind); add.infinity = 0; for (j = 0; j < 64; j++) { - bits = secp256k1_scalar_get_bits(gn, j * 4, 4); + bits = secp256k1_scalar_get_bits(&gnb, j * 4, 4); for (i = 0; i < 16; i++) { - secp256k1_ge_storage_cmov(&adds, &c->prec[j][i], i == bits); + /** This uses a conditional move to avoid any secret data in array indexes. + * _Any_ use of secret indexes has been demonstrated to result in timing + * sidechannels, even when the cache-line access patterns are uniform. + * See also: + * "A word of warning", CHES 2013 Rump Session, by Daniel J. Bernstein and Peter Schwabe + * (https://cryptojedi.org/peter/data/chesrump-20130822.pdf) and + * "Cache Attacks and Countermeasures: the Case of AES", RSA 2006, + * by Dag Arne Osvik, Adi Shamir, and Eran Tromer + * (http://www.tau.ac.il/~tromer/papers/cache.pdf) + */ + secp256k1_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits); } secp256k1_ge_from_storage(&add, &adds); secp256k1_gej_add_ge(r, r, &add); } bits = 0; secp256k1_ge_clear(&add); + secp256k1_scalar_clear(&gnb); +} + +/* Setup blinding values for secp256k1_ecmult_gen. */ +static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context_t *ctx, const unsigned char *seed32) { + secp256k1_scalar_t b; + secp256k1_gej_t gb; + secp256k1_fe_t s; + unsigned char nonce32[32]; + secp256k1_rfc6979_hmac_sha256_t rng; + int retry; + if (!seed32) { + /* When seed is NULL, reset the initial point and blinding value. */ + secp256k1_gej_set_ge(&ctx->initial, &secp256k1_ge_const_g); + secp256k1_gej_neg(&ctx->initial, &ctx->initial); + secp256k1_scalar_set_int(&ctx->blind, 1); + } + /* The prior blinding value (if not reset) is chained forward by including it in the hash. */ + secp256k1_scalar_get_b32(nonce32, &ctx->blind); + /** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data, + * and guards against weak or adversarial seeds. This is a simpler and safer interface than + * asking the caller for blinding values directly and expecting them to retry on failure. + */ + secp256k1_rfc6979_hmac_sha256_initialize(&rng, seed32 ? seed32 : nonce32, 32, nonce32, 32, NULL, 0); + /* Retry for out of range results to achieve uniformity. */ + do { + secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + retry = !secp256k1_fe_set_b32(&s, nonce32); + retry |= secp256k1_fe_is_zero(&s); + } while (retry); + /* Randomize the projection to defend against multiplier sidechannels. */ + secp256k1_gej_rescale(&ctx->initial, &s); + secp256k1_fe_clear(&s); + do { + secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); + secp256k1_scalar_set_b32(&b, nonce32, &retry); + /* A blinding value of 0 works, but would undermine the projection hardening. */ + retry |= secp256k1_scalar_is_zero(&b); + } while (retry); + secp256k1_rfc6979_hmac_sha256_finalize(&rng); + memset(nonce32, 0, 32); + secp256k1_ecmult_gen(ctx, &gb, &b); + secp256k1_scalar_negate(&b, &b); + ctx->blind = b; + ctx->initial = gb; + secp256k1_scalar_clear(&b); + secp256k1_gej_clear(&gb); } #endif |