aboutsummaryrefslogtreecommitdiff
path: root/src/scalar_4x64_impl.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/scalar_4x64_impl.h')
-rw-r--r--src/scalar_4x64_impl.h102
1 files changed, 87 insertions, 15 deletions
diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h
index f78718234f..d144775220 100644
--- a/src/scalar_4x64_impl.h
+++ b/src/scalar_4x64_impl.h
@@ -33,9 +33,27 @@ SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar_t *r) {
r->d[3] = 0;
}
-SECP256K1_INLINE static int secp256k1_scalar_get_bits(const secp256k1_scalar_t *a, int offset, int count) {
- VERIFY_CHECK((offset + count - 1) / 64 == offset / 64);
- return (a->d[offset / 64] >> (offset % 64)) & ((((uint64_t)1) << count) - 1);
+SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar_t *r, unsigned int v) {
+ r->d[0] = v;
+ r->d[1] = 0;
+ r->d[2] = 0;
+ r->d[3] = 0;
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count) {
+ VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
+ return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count) {
+ VERIFY_CHECK(count < 32);
+ VERIFY_CHECK(offset + count <= 256);
+ if ((offset + count - 1) >> 6 == offset >> 6) {
+ return secp256k1_scalar_get_bits(a, offset, count);
+ } else {
+ VERIFY_CHECK((offset >> 6) + 1 < 4);
+ return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1);
+ }
}
SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar_t *a) {
@@ -63,7 +81,7 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar_t *r, unsig
return overflow;
}
-static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
+static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
uint128_t t = (uint128_t)a->d[0] + b->d[0];
r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
t += (uint128_t)a->d[1] + b->d[1];
@@ -72,7 +90,26 @@ static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t
r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
t += (uint128_t)a->d[3] + b->d[3];
r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
- secp256k1_scalar_reduce(r, t + secp256k1_scalar_check_overflow(r));
+ int overflow = t + secp256k1_scalar_check_overflow(r);
+ VERIFY_CHECK(overflow == 0 || overflow == 1);
+ secp256k1_scalar_reduce(r, overflow);
+ return overflow;
+}
+
+static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit) {
+ VERIFY_CHECK(bit < 256);
+ uint128_t t = (uint128_t)r->d[0] + (((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
+ r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[1] + (((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F));
+ r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[2] + (((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F));
+ r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[3] + (((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
+ r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
+#ifdef VERIFY
+ VERIFY_CHECK((t >> 64) == 0);
+ VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
+#endif
}
static void secp256k1_scalar_set_b32(secp256k1_scalar_t *r, const unsigned char *b32, int *overflow) {
@@ -280,13 +317,11 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint64_t *l
secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
}
-static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
+static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
/* 160 bit accumulator. */
uint64_t c0 = 0, c1 = 0;
uint32_t c2 = 0;
- uint64_t l[8];
-
/* l[0..7] = a[0..3] * b[0..3]. */
muladd_fast(a->d[0], b->d[0]);
extract_fast(l[0]);
@@ -313,17 +348,13 @@ static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t
extract_fast(l[6]);
VERIFY_CHECK(c1 <= 0);
l[7] = c0;
-
- secp256k1_scalar_reduce_512(r, l);
}
-static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
+static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar_t *a) {
/* 160 bit accumulator. */
uint64_t c0 = 0, c1 = 0;
uint32_t c2 = 0;
- uint64_t l[8];
-
/* l[0..7] = a[0..3] * b[0..3]. */
muladd_fast(a->d[0], a->d[0]);
extract_fast(l[0]);
@@ -344,8 +375,6 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t
extract_fast(l[6]);
VERIFY_CHECK(c1 == 0);
l[7] = c0;
-
- secp256k1_scalar_reduce_512(r, l);
}
#undef sumadd
@@ -356,4 +385,47 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t
#undef extract
#undef extract_fast
+static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
+ uint64_t l[8];
+ secp256k1_scalar_mul_512(l, a, b);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
+ uint64_t l[8];
+ secp256k1_scalar_sqr_512(l, a);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) {
+ r1->d[0] = a->d[0];
+ r1->d[1] = a->d[1];
+ r1->d[2] = 0;
+ r1->d[3] = 0;
+ r2->d[0] = a->d[2];
+ r2->d[1] = a->d[3];
+ r2->d[2] = 0;
+ r2->d[3] = 0;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
+ return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift) {
+ VERIFY_CHECK(shift >= 256);
+ uint64_t l[8];
+ secp256k1_scalar_mul_512(l, a, b);
+ unsigned int shiftlimbs = shift >> 6;
+ unsigned int shiftlow = shift & 0x3F;
+ unsigned int shifthigh = 64 - shiftlow;
+ r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
+ if ((l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1) {
+ secp256k1_scalar_add_bit(r, 0);
+ }
+}
+
#endif