aboutsummaryrefslogtreecommitdiff
path: root/src/field_5x52_int128_impl.h
diff options
context:
space:
mode:
authorPieter Wuille <pieter.wuille@gmail.com>2015-03-27 14:03:36 -0700
committerPieter Wuille <pieter.wuille@gmail.com>2015-03-27 14:03:36 -0700
commit9d09322b41776a0d6ecde182f731eff77d0f052b (patch)
tree1c8df9fa9ddfb2b035ac0327fe074b634e458f56 /src/field_5x52_int128_impl.h
parent7873633b5752621548b8d38fc175f5a5f2f1e5d6 (diff)
Squashed 'src/secp256k1/' changes from 50cc6ab..1897b8e
1897b8e Merge pull request #229 efc571c Add simple testcases for signing with rfc6979 extra entropy. 1573a10 Add ability to pass extra entropy to rfc6979 3087bc4 Merge pull request #228 d9b9f11 Merge pull request #218 0065a8f Eliminate multiple-returns from secp256k1.c. 354ffa3 Make secp256k1_ec_pubkey_create reject oversized secrets. 27bc131 Silence some warnings from pedantic static analysis tools, improve compatibility with C++. 3b7ea63 Merge pull request #221 f789c5b Merge pull request #215 4bc273b Merge pull request #222 137a8ec Merge pull request #216 7c3771d Disable overlength-strings warnings. 8956111 use 128-bit hex seed 02efd06 Use RFC6979 for test PRNGs ae55e85 Use faster byteswapping and avoid alignment-increasing casts. 443cd4b Get rid of hex format and some binary conversions 0bada0e Merge #214: Improve signing API documentation & specification 8030d7c Improve signing API documentation & specification 7b2fc1c Merge #213: Removed gotos, which are hard to trace and maintain. 11690d3 Removed gotos, which are hard to trace and maintain. 122a1ec Merge pull request #205 035406d Merge pull request #206 2d4cd53 Merge pull request #161 34b898d Additional comments for the testing PRNG and a seeding fix. 6efd6e7 Some comments explaining some of the constants in the code. ffccfd2 x86_64 assembly optimization for scalar_4x64 67cbdf0 Merge pull request #207 039723d Benchmarks for all internal operations 6cc8425 Include a comment on secp256k1_ecdsa_sign explaining low-s. f88343f Merge pull request #203 d61e899 Add group operation counts 2473f17 Merge pull request #202 b5bbce6 Some readme updates, e.g. removal of the GMP field. f0d851e Merge pull request #201 a0ea884 Merge pull request #200 f735446 Convert the rest of the codebase to C89. bf2e1ac Convert tests to C89. (also fixes a use of bare "inline" in field) fc8285f Merge pull request #199 fff412e Merge pull request #197 4be8d6f Centralize the definition of uint128_t and use it uniformly. d9543c9 Switch scalar code to C89. fcc48c4 Remove the non-storage cmov 55422b6 Switch ecmult_gen to use storage types 41f8455 Use group element storage type in EC multiplications e68d720 Add group element storage type ff889f7 Field storage type 7137be8 Merge pull request #196 0768bd5 Get rid of variable-length hex string conversions e84e761 Merge pull request #195 792bcdb Covert several more files to C89. 45cdf44 Merge pull request #193 17db09e Merge pull request #194 402878a fix ifdef/ifndef 25b35c7 Convert field code to strict C89 (+ long long, +__int128) 3627437 C89 nits and dead code removal. a9f350d Merge pull request #191 4732d26 Convert the field/group/ecdsa constant initialization to static consts 19f3e76 Remove unused secp256k1_fe_inner_{start, stop} functions f1ebfe3 Convert the scalar constant initialization to static consts git-subtree-dir: src/secp256k1 git-subtree-split: 1897b8e90bbbdcd919427c9a8ae35b420e919d8f
Diffstat (limited to 'src/field_5x52_int128_impl.h')
-rw-r--r--src/field_5x52_int128_impl.h120
1 files changed, 60 insertions, 60 deletions
diff --git a/src/field_5x52_int128_impl.h b/src/field_5x52_int128_impl.h
index ec631833cf..9280bb5ea2 100644
--- a/src/field_5x52_int128_impl.h
+++ b/src/field_5x52_int128_impl.h
@@ -16,6 +16,11 @@
#endif
SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
+ uint128_t c, d;
+ uint64_t t3, t4, tx, u0;
+ uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
+ const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL;
+
VERIFY_BITS(a[0], 56);
VERIFY_BITS(a[1], 56);
VERIFY_BITS(a[2], 56);
@@ -28,63 +33,58 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
VERIFY_BITS(b[4], 52);
VERIFY_CHECK(r != b);
- const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL;
/* [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n.
* px is a shorthand for sum(a[i]*b[x-i], i=0..x).
* Note that [x 0 0 0 0 0] = [x*R].
*/
- uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
-
- __int128 c, d;
-
- d = (__int128)a0 * b[3]
- + (__int128)a1 * b[2]
- + (__int128)a2 * b[1]
- + (__int128)a3 * b[0];
+ d = (uint128_t)a0 * b[3]
+ + (uint128_t)a1 * b[2]
+ + (uint128_t)a2 * b[1]
+ + (uint128_t)a3 * b[0];
VERIFY_BITS(d, 114);
/* [d 0 0 0] = [p3 0 0 0] */
- c = (__int128)a4 * b[4];
+ c = (uint128_t)a4 * b[4];
VERIFY_BITS(c, 112);
/* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
d += (c & M) * R; c >>= 52;
VERIFY_BITS(d, 115);
VERIFY_BITS(c, 60);
/* [c 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
- uint64_t t3 = d & M; d >>= 52;
+ t3 = d & M; d >>= 52;
VERIFY_BITS(t3, 52);
VERIFY_BITS(d, 63);
/* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
- d += (__int128)a0 * b[4]
- + (__int128)a1 * b[3]
- + (__int128)a2 * b[2]
- + (__int128)a3 * b[1]
- + (__int128)a4 * b[0];
+ d += (uint128_t)a0 * b[4]
+ + (uint128_t)a1 * b[3]
+ + (uint128_t)a2 * b[2]
+ + (uint128_t)a3 * b[1]
+ + (uint128_t)a4 * b[0];
VERIFY_BITS(d, 115);
/* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
d += c * R;
VERIFY_BITS(d, 116);
/* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
- uint64_t t4 = d & M; d >>= 52;
+ t4 = d & M; d >>= 52;
VERIFY_BITS(t4, 52);
VERIFY_BITS(d, 64);
/* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
- uint64_t tx = (t4 >> 48); t4 &= (M >> 4);
+ tx = (t4 >> 48); t4 &= (M >> 4);
VERIFY_BITS(tx, 4);
VERIFY_BITS(t4, 48);
/* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
- c = (__int128)a0 * b[0];
+ c = (uint128_t)a0 * b[0];
VERIFY_BITS(c, 112);
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */
- d += (__int128)a1 * b[4]
- + (__int128)a2 * b[3]
- + (__int128)a3 * b[2]
- + (__int128)a4 * b[1];
+ d += (uint128_t)a1 * b[4]
+ + (uint128_t)a2 * b[3]
+ + (uint128_t)a3 * b[2]
+ + (uint128_t)a4 * b[1];
VERIFY_BITS(d, 115);
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
- uint64_t u0 = d & M; d >>= 52;
+ u0 = d & M; d >>= 52;
VERIFY_BITS(u0, 52);
VERIFY_BITS(d, 63);
/* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
@@ -92,7 +92,7 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
u0 = (u0 << 4) | tx;
VERIFY_BITS(u0, 56);
/* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
- c += (__int128)u0 * (R >> 4);
+ c += (uint128_t)u0 * (R >> 4);
VERIFY_BITS(c, 115);
/* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
r[0] = c & M; c >>= 52;
@@ -100,13 +100,13 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
VERIFY_BITS(c, 61);
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */
- c += (__int128)a0 * b[1]
- + (__int128)a1 * b[0];
+ c += (uint128_t)a0 * b[1]
+ + (uint128_t)a1 * b[0];
VERIFY_BITS(c, 114);
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */
- d += (__int128)a2 * b[4]
- + (__int128)a3 * b[3]
- + (__int128)a4 * b[2];
+ d += (uint128_t)a2 * b[4]
+ + (uint128_t)a3 * b[3]
+ + (uint128_t)a4 * b[2];
VERIFY_BITS(d, 114);
/* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
c += (d & M) * R; d >>= 52;
@@ -118,13 +118,13 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
VERIFY_BITS(c, 63);
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
- c += (__int128)a0 * b[2]
- + (__int128)a1 * b[1]
- + (__int128)a2 * b[0];
+ c += (uint128_t)a0 * b[2]
+ + (uint128_t)a1 * b[1]
+ + (uint128_t)a2 * b[0];
VERIFY_BITS(c, 114);
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */
- d += (__int128)a3 * b[4]
- + (__int128)a4 * b[3];
+ d += (uint128_t)a3 * b[4]
+ + (uint128_t)a4 * b[3];
VERIFY_BITS(d, 114);
/* [d 0 0 t4 t3 c t1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
c += (d & M) * R; d >>= 52;
@@ -153,64 +153,64 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
}
SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
+ uint128_t c, d;
+ uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
+ int64_t t3, t4, tx, u0;
+ const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL;
+
VERIFY_BITS(a[0], 56);
VERIFY_BITS(a[1], 56);
VERIFY_BITS(a[2], 56);
VERIFY_BITS(a[3], 56);
VERIFY_BITS(a[4], 52);
- const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL;
/** [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n.
* px is a shorthand for sum(a[i]*a[x-i], i=0..x).
* Note that [x 0 0 0 0 0] = [x*R].
*/
- __int128 c, d;
-
- uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
-
- d = (__int128)(a0*2) * a3
- + (__int128)(a1*2) * a2;
+ d = (uint128_t)(a0*2) * a3
+ + (uint128_t)(a1*2) * a2;
VERIFY_BITS(d, 114);
/* [d 0 0 0] = [p3 0 0 0] */
- c = (__int128)a4 * a4;
+ c = (uint128_t)a4 * a4;
VERIFY_BITS(c, 112);
/* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
d += (c & M) * R; c >>= 52;
VERIFY_BITS(d, 115);
VERIFY_BITS(c, 60);
/* [c 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
- uint64_t t3 = d & M; d >>= 52;
+ t3 = d & M; d >>= 52;
VERIFY_BITS(t3, 52);
VERIFY_BITS(d, 63);
/* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
a4 *= 2;
- d += (__int128)a0 * a4
- + (__int128)(a1*2) * a3
- + (__int128)a2 * a2;
+ d += (uint128_t)a0 * a4
+ + (uint128_t)(a1*2) * a3
+ + (uint128_t)a2 * a2;
VERIFY_BITS(d, 115);
/* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
d += c * R;
VERIFY_BITS(d, 116);
/* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
- uint64_t t4 = d & M; d >>= 52;
+ t4 = d & M; d >>= 52;
VERIFY_BITS(t4, 52);
VERIFY_BITS(d, 64);
/* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
- uint64_t tx = (t4 >> 48); t4 &= (M >> 4);
+ tx = (t4 >> 48); t4 &= (M >> 4);
VERIFY_BITS(tx, 4);
VERIFY_BITS(t4, 48);
/* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
- c = (__int128)a0 * a0;
+ c = (uint128_t)a0 * a0;
VERIFY_BITS(c, 112);
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */
- d += (__int128)a1 * a4
- + (__int128)(a2*2) * a3;
+ d += (uint128_t)a1 * a4
+ + (uint128_t)(a2*2) * a3;
VERIFY_BITS(d, 114);
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
- uint64_t u0 = d & M; d >>= 52;
+ u0 = d & M; d >>= 52;
VERIFY_BITS(u0, 52);
VERIFY_BITS(d, 62);
/* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
@@ -218,7 +218,7 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t
u0 = (u0 << 4) | tx;
VERIFY_BITS(u0, 56);
/* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
- c += (__int128)u0 * (R >> 4);
+ c += (uint128_t)u0 * (R >> 4);
VERIFY_BITS(c, 113);
/* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
r[0] = c & M; c >>= 52;
@@ -227,11 +227,11 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */
a0 *= 2;
- c += (__int128)a0 * a1;
+ c += (uint128_t)a0 * a1;
VERIFY_BITS(c, 114);
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */
- d += (__int128)a2 * a4
- + (__int128)a3 * a3;
+ d += (uint128_t)a2 * a4
+ + (uint128_t)a3 * a3;
VERIFY_BITS(d, 114);
/* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
c += (d & M) * R; d >>= 52;
@@ -243,11 +243,11 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t
VERIFY_BITS(c, 63);
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
- c += (__int128)a0 * a2
- + (__int128)a1 * a1;
+ c += (uint128_t)a0 * a2
+ + (uint128_t)a1 * a1;
VERIFY_BITS(c, 114);
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */
- d += (__int128)a3 * a4;
+ d += (uint128_t)a3 * a4;
VERIFY_BITS(d, 114);
/* [d 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
c += (d & M) * R; d >>= 52;