aboutsummaryrefslogtreecommitdiff
path: root/src/secp256k1/src/ecmult_impl.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/secp256k1/src/ecmult_impl.h')
-rw-r--r--src/secp256k1/src/ecmult_impl.h183
1 files changed, 109 insertions, 74 deletions
diff --git a/src/secp256k1/src/ecmult_impl.h b/src/secp256k1/src/ecmult_impl.h
index 1986914a4f..f03fa9469d 100644
--- a/src/secp256k1/src/ecmult_impl.h
+++ b/src/secp256k1/src/ecmult_impl.h
@@ -10,6 +10,7 @@
#include <string.h>
#include <stdint.h>
+#include "util.h"
#include "group.h"
#include "scalar.h"
#include "ecmult.h"
@@ -30,16 +31,32 @@
# endif
#else
/* optimal for 128-bit and 256-bit exponents. */
-#define WINDOW_A 5
-/** larger numbers may result in slightly better performance, at the cost of
- exponentially larger precomputed tables. */
-#ifdef USE_ENDOMORPHISM
-/** Two tables for window size 15: 1.375 MiB. */
-#define WINDOW_G 15
-#else
-/** One table for window size 16: 1.375 MiB. */
-#define WINDOW_G 16
+# define WINDOW_A 5
+/** Larger values for ECMULT_WINDOW_SIZE result in possibly better
+ * performance at the cost of an exponentially larger precomputed
+ * table. The exact table size is
+ * (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes,
+ * where sizeof(secp256k1_ge_storage) is typically 64 bytes but can
+ * be larger due to platform-specific padding and alignment.
+ * If the endomorphism optimization is enabled (USE_ENDOMORMPHSIM)
+ * two tables of this size are used instead of only one.
+ */
+# define WINDOW_G ECMULT_WINDOW_SIZE
#endif
+
+/* Noone will ever need more than a window size of 24. The code might
+ * be correct for larger values of ECMULT_WINDOW_SIZE but this is not
+ * not tested.
+ *
+ * The following limitations are known, and there are probably more:
+ * If WINDOW_G > 27 and size_t has 32 bits, then the code is incorrect
+ * because the size of the memory object that we allocate (in bytes)
+ * will not fit in a size_t.
+ * If WINDOW_G > 31 and int has 32 bits, then the code is incorrect
+ * because certain expressions will overflow.
+ */
+#if ECMULT_WINDOW_SIZE < 2 || ECMULT_WINDOW_SIZE > 24
+# error Set ECMULT_WINDOW_SIZE to an integer in range [2..24].
#endif
#ifdef USE_ENDOMORPHISM
@@ -121,7 +138,7 @@ static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_gej *prej, sec
* It only operates on tables sized for WINDOW_A wnaf multiples.
* - secp256k1_ecmult_odd_multiples_table_storage_var, which converts its
* resulting point set to actually affine points, and stores those in pre.
- * It operates on tables of any size, but uses heap-allocated temporaries.
+ * It operates on tables of any size.
*
* To compute a*P + b*G, we compute a table for P using the first function,
* and for G using the second (which requires an inverse, but it only needs to
@@ -294,6 +311,13 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp25
} \
} while(0)
+static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE =
+ ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
+#ifdef USE_ENDOMORPHISM
+ + ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
+#endif
+ ;
+
static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) {
ctx->pre_g = NULL;
#ifdef USE_ENDOMORPHISM
@@ -301,8 +325,10 @@ static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) {
#endif
}
-static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const secp256k1_callback *cb) {
+static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc) {
secp256k1_gej gj;
+ void* const base = *prealloc;
+ size_t const prealloc_size = SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
if (ctx->pre_g != NULL) {
return;
@@ -311,7 +337,12 @@ static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const
/* get the generator */
secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
- ctx->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G));
+ {
+ size_t size = sizeof((*ctx->pre_g)[0]) * ((size_t)ECMULT_TABLE_SIZE(WINDOW_G));
+ /* check for overflow */
+ VERIFY_CHECK(size / sizeof((*ctx->pre_g)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)));
+ ctx->pre_g = (secp256k1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size);
+ }
/* precompute the tables with odd multiples */
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj);
@@ -321,7 +352,10 @@ static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const
secp256k1_gej g_128j;
int i;
- ctx->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G));
+ size_t size = sizeof((*ctx->pre_g_128)[0]) * ((size_t) ECMULT_TABLE_SIZE(WINDOW_G));
+ /* check for overflow */
+ VERIFY_CHECK(size / sizeof((*ctx->pre_g_128)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)));
+ ctx->pre_g_128 = (secp256k1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size);
/* calculate 2^128*generator */
g_128j = gj;
@@ -333,22 +367,14 @@ static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const
#endif
}
-static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context *dst,
- const secp256k1_ecmult_context *src, const secp256k1_callback *cb) {
- if (src->pre_g == NULL) {
- dst->pre_g = NULL;
- } else {
- size_t size = sizeof((*dst->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G);
- dst->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, size);
- memcpy(dst->pre_g, src->pre_g, size);
+static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src) {
+ if (src->pre_g != NULL) {
+ /* We cast to void* first to suppress a -Wcast-align warning. */
+ dst->pre_g = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src));
}
#ifdef USE_ENDOMORPHISM
- if (src->pre_g_128 == NULL) {
- dst->pre_g_128 = NULL;
- } else {
- size_t size = sizeof((*dst->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G);
- dst->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, size);
- memcpy(dst->pre_g_128, src->pre_g_128, size);
+ if (src->pre_g_128 != NULL) {
+ dst->pre_g_128 = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src));
}
#endif
}
@@ -358,10 +384,6 @@ static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx
}
static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx) {
- free(ctx->pre_g);
-#ifdef USE_ENDOMORPHISM
- free(ctx->pre_g_128);
-#endif
secp256k1_ecmult_context_init(ctx);
}
@@ -373,7 +395,7 @@ static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx) {
* than the number of bits in the (absolute value) of the input.
*/
static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, int w) {
- secp256k1_scalar s = *a;
+ secp256k1_scalar s;
int last_set_bit = -1;
int bit = 0;
int sign = 1;
@@ -386,6 +408,7 @@ static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a,
memset(wnaf, 0, len * sizeof(wnaf[0]));
+ s = *a;
if (secp256k1_scalar_get_bits(&s, 255, 1)) {
secp256k1_scalar_negate(&s, &s);
sign = -1;
@@ -418,7 +441,7 @@ static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a,
CHECK(carry == 0);
while (bit < 256) {
CHECK(secp256k1_scalar_get_bits(&s, bit++, 1) == 0);
- }
+ }
#endif
return last_set_bit + 1;
}
@@ -626,52 +649,55 @@ static size_t secp256k1_strauss_scratch_size(size_t n_points) {
return n_points*point_size;
}
-static int secp256k1_ecmult_strauss_batch(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
+static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
secp256k1_gej* points;
secp256k1_scalar* scalars;
struct secp256k1_strauss_state state;
size_t i;
+ const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
secp256k1_gej_set_infinity(r);
if (inp_g_sc == NULL && n_points == 0) {
return 1;
}
- if (!secp256k1_scratch_allocate_frame(scratch, secp256k1_strauss_scratch_size(n_points), STRAUSS_SCRATCH_OBJECTS)) {
- return 0;
- }
- points = (secp256k1_gej*)secp256k1_scratch_alloc(scratch, n_points * sizeof(secp256k1_gej));
- scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(scratch, n_points * sizeof(secp256k1_scalar));
- state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej));
- state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe));
+ points = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_gej));
+ scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar));
+ state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej));
+ state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe));
#ifdef USE_ENDOMORPHISM
- state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
+ state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
state.pre_a_lam = state.pre_a + n_points * ECMULT_TABLE_SIZE(WINDOW_A);
#else
- state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
+ state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
#endif
- state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(scratch, n_points * sizeof(struct secp256k1_strauss_point_state));
+ state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state));
+
+ if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL) {
+ secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
+ return 0;
+ }
for (i = 0; i < n_points; i++) {
secp256k1_ge point;
if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) {
- secp256k1_scratch_deallocate_frame(scratch);
+ secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 0;
}
secp256k1_gej_set_ge(&points[i], &point);
}
secp256k1_ecmult_strauss_wnaf(ctx, &state, r, n_points, points, scalars, inp_g_sc);
- secp256k1_scratch_deallocate_frame(scratch);
+ secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 1;
}
/* Wrapper for secp256k1_ecmult_multi_func interface */
-static int secp256k1_ecmult_strauss_batch_single(const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
- return secp256k1_ecmult_strauss_batch(actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
+static int secp256k1_ecmult_strauss_batch_single(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
+ return secp256k1_ecmult_strauss_batch(error_callback, actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
}
-static size_t secp256k1_strauss_max_points(secp256k1_scratch *scratch) {
- return secp256k1_scratch_max_allocation(scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1);
+static size_t secp256k1_strauss_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) {
+ return secp256k1_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1);
}
/** Convert a number to WNAF notation.
@@ -963,7 +989,8 @@ static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_windo
return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size;
}
-static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
+static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
+ const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
/* Use 2(n+1) with the endomorphism, n+1 without, when calculating batch
* sizes. The reason for +1 is that we add the G scalar to the list of
* other scalars. */
@@ -988,15 +1015,21 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx,
}
bucket_window = secp256k1_pippenger_bucket_window(n_points);
- if (!secp256k1_scratch_allocate_frame(scratch, secp256k1_pippenger_scratch_size(n_points, bucket_window), PIPPENGER_SCRATCH_OBJECTS)) {
+ points = (secp256k1_ge *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*points));
+ scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars));
+ state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(error_callback, scratch, sizeof(*state_space));
+ if (points == NULL || scalars == NULL || state_space == NULL) {
+ secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
+ return 0;
+ }
+
+ state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps));
+ state_space->wnaf_na = (int *) secp256k1_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int));
+ buckets = (secp256k1_gej *) secp256k1_scratch_alloc(error_callback, scratch, (1<<bucket_window) * sizeof(*buckets));
+ if (state_space->ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) {
+ secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 0;
}
- points = (secp256k1_ge *) secp256k1_scratch_alloc(scratch, entries * sizeof(*points));
- scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(scratch, entries * sizeof(*scalars));
- state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(scratch, sizeof(*state_space));
- state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(scratch, entries * sizeof(*state_space->ps));
- state_space->wnaf_na = (int *) secp256k1_scratch_alloc(scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int));
- buckets = (secp256k1_gej *) secp256k1_scratch_alloc(scratch, sizeof(*buckets) << bucket_window);
if (inp_g_sc != NULL) {
scalars[0] = *inp_g_sc;
@@ -1010,7 +1043,7 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx,
while (point_idx < n_points) {
if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) {
- secp256k1_scratch_deallocate_frame(scratch);
+ secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 0;
}
idx++;
@@ -1034,13 +1067,13 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx,
for(i = 0; i < 1<<bucket_window; i++) {
secp256k1_gej_clear(&buckets[i]);
}
- secp256k1_scratch_deallocate_frame(scratch);
+ secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 1;
}
/* Wrapper for secp256k1_ecmult_multi_func interface */
-static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
- return secp256k1_ecmult_pippenger_batch(actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
+static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
+ return secp256k1_ecmult_pippenger_batch(error_callback, actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
}
/**
@@ -1048,8 +1081,8 @@ static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_ecmult_contex
* a given scratch space. The function ensures that fewer points may also be
* used.
*/
-static size_t secp256k1_pippenger_max_points(secp256k1_scratch *scratch) {
- size_t max_alloc = secp256k1_scratch_max_allocation(scratch, PIPPENGER_SCRATCH_OBJECTS);
+static size_t secp256k1_pippenger_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) {
+ size_t max_alloc = secp256k1_scratch_max_allocation(error_callback, scratch, PIPPENGER_SCRATCH_OBJECTS);
int bucket_window;
size_t res = 0;
@@ -1131,11 +1164,11 @@ static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n
return 1;
}
-typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t);
-static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
+typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_callback* error_callback, const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t);
+static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
size_t i;
- int (*f)(const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t);
+ int (*f)(const secp256k1_callback* error_callback, const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t);
size_t n_batches;
size_t n_batch_points;
@@ -1152,16 +1185,18 @@ static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp2
return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n);
}
- /* Compute the batch sizes for pippenger given a scratch space. If it's greater than a threshold
- * use pippenger. Otherwise use strauss */
- if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(scratch), n)) {
- return 0;
+ /* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than
+ * a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm.
+ * As a first step check if there's enough space for Pippenger's algo (which requires less space
+ * than Strauss' algo) and if not, use the simple algorithm. */
+ if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(error_callback, scratch), n)) {
+ return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n);
}
if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) {
f = secp256k1_ecmult_pippenger_batch;
} else {
- if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(scratch), n)) {
- return 0;
+ if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(error_callback, scratch), n)) {
+ return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n);
}
f = secp256k1_ecmult_strauss_batch;
}
@@ -1169,7 +1204,7 @@ static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp2
size_t nbp = n < n_batch_points ? n : n_batch_points;
size_t offset = n_batch_points*i;
secp256k1_gej tmp;
- if (!f(ctx, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) {
+ if (!f(error_callback, ctx, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) {
return 0;
}
secp256k1_gej_add_var(r, r, &tmp, NULL);