From 64dfdde0aa7f7ef24e6cbf3c57e6d24efc55367e Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Tue, 13 Dec 2016 12:37:40 +0100 Subject: Squashed 'src/secp256k1/' changes from 6c527ec..8225239 8225239 Merge #433: Make the libcrypto detection fail the newer API. 12de863 Make the libcrypto detection fail the newer API. 2928420 Merge #427: Remove Schnorr from travis as well 8eecc4a Remove Schnorr from travis as well a8abae7 Merge #310: Add exhaustive test for group functions on a low-order subgroup b4ceedf Add exhaustive test for verification 83836a9 Add exhaustive tests for group arithmetic, signing, and ecmult on a small group 20b8877 Add exhaustive test for group functions on a low-order subgroup 80773a6 Merge #425: Remove Schnorr experiment e06e878 Remove Schnorr experiment 04c8ef3 Merge #407: Modify parameter order of internal functions to match API parameter order 6e06696 Merge #411: Remove guarantees about memcmp-ability 40c8d7e Merge #421: Update scalar_4x64_impl.h a922365 Merge #422: Restructure nonce clearing 3769783 Restructure nonce clearing 0f9e69d Restructure nonce clearing 9d67afa Update scalar_4x64_impl.h 7d15cd7 Merge #413: fix auto-enabled static precompuatation 00c5d2e fix auto-enabled static precompuatation 91219a1 Remove guarantees about memcmp-ability 7a49cac Merge #410: Add string.h include to ecmult_impl 0bbd5d4 Add string.h include to ecmult_impl 353c1bf Fix secp256k1_ge_set_table_gej_var parameter order 541b783 Fix secp256k1_ge_set_all_gej_var parameter order 7d893f4 Fix secp256k1_fe_inv_all_var parameter order c5b32e1 Merge #405: Make secp256k1_fe_sqrt constant time 926836a Make secp256k1_fe_sqrt constant time e2a8e92 Merge #404: Replace 3M + 4S doubling formula with 2M + 5S one 8ec49d8 Add note about 2M + 5S doubling formula 5a91bd7 Merge #400: A couple minor cleanups ac01378 build: add -DSECP256K1_BUILD to benchmark_internal build flags a6c6f99 Remove a bunch of unused stdlib #includes 65285a6 Merge #403: configure: add flag to disable OpenSSL tests a9b2a5d configure: add flag to disable OpenSSL tests b340123 Merge #402: Add support for testing quadratic residues e6e9805 Add function for testing quadratic residue field/group elements. efd953a Add Jacobi symbol test via GMP fa36a0d Merge #401: ecmult_const: unify endomorphism and non-endomorphism skew cases c6191fd ecmult_const: unify endomorphism and non-endomorphism skew cases 0b3e618 Merge #378: .gitignore build-aux cleanup 6042217 Merge #384: JNI: align shared files copyright/comments to bitcoinj's 24ad20f Merge #399: build: verify that the native compiler works for static precomp b3be852 Merge #398: Test whether ECDH and Schnorr are enabled for JNI aa0b1fd build: verify that the native compiler works for static precomp eee808d Test whether ECDH and Schnorr are enabled for JNI 7b0fb18 Merge #366: ARM assembly implementation of field_10x26 inner (rebase of #173) 001f176 ARM assembly implementation of field_10x26 inner 0172be9 Merge #397: Small fixes for sha256 3f8b78e Fix undefs in hash_impl.h 2ab4695 Fix state size in sha256 struct 6875b01 Merge #386: Add some missing `VERIFY_CHECK(ctx != NULL)` 2c52b5d Merge #389: Cast pointers through uintptr_t under JNI 43097a4 Merge #390: Update bitcoin-core GitHub links 31c9c12 Merge #391: JNI: Only call ecdsa_verify if its inputs parsed correctly 1cb2302 Merge #392: Add testcase which hits additional branch in secp256k1_scalar_sqr d2ee340 Merge #388: bench_ecdh: fix call to secp256k1_context_create 093a497 Add testcase which hits additional branch in secp256k1_scalar_sqr a40c701 JNI: Only call ecdsa_verify if its inputs parsed correctly faa2a11 Update bitcoin-core GitHub links 47b9e78 Cast pointers through uintptr_t under JNI f36f9c6 bench_ecdh: fix call to secp256k1_context_create bcc4881 Add some missing `VERIFY_CHECK(ctx != NULL)` for functions that use `ARG_CHECK` 6ceea2c align shared files copyright/comments to bitcoinj's 70141a8 Update .gitignore 7b549b1 Merge #373: build: fix x86_64 asm detection for some compilers bc7c93c Merge #374: Add note about y=0 being possible on one of the sextic twists e457018 Merge #364: JNI rebased 86e2d07 JNI library: cleanup, removed unimplemented code 3093576a JNI library bd2895f Merge pull request #371 e72e93a Add note about y=0 being possible on one of the sextic twists 3f8fdfb build: fix x86_64 asm detection for some compilers e5a9047 [Trivial] Remove double semicolons c18b869 Merge pull request #360 3026daa Merge pull request #302 03d4611 Add sage verification script for the group laws a965937 Merge pull request #361 83221ec Add experimental features to configure 5d4c5a3 Prevent damage_array in the signature test from going out of bounds. 419bf7f Merge pull request #356 03d84a4 Benchmark against OpenSSL verification git-subtree-dir: src/secp256k1 git-subtree-split: 8225239f490f79842a5a3b82ad6cc8aa11d5208e --- src/asm/field_10x26_arm.s | 919 ++++++++++++++++++++++++++ src/bench_ecdh.c | 3 +- src/bench_internal.c | 34 +- src/bench_verify.c | 44 ++ src/ecdsa_impl.h | 18 + src/ecmult_const_impl.h | 69 +- src/ecmult_gen_impl.h | 2 +- src/ecmult_impl.h | 21 +- src/field.h | 15 +- src/field_10x26_impl.h | 12 +- src/field_5x52_impl.h | 1 - src/field_5x52_int128_impl.h | 4 +- src/field_impl.h | 38 +- src/group.h | 9 +- src/group_impl.h | 112 +++- src/hash.h | 2 +- src/hash_impl.h | 10 +- src/java/org/bitcoin/NativeSecp256k1.java | 440 +++++++++++- src/java/org/bitcoin/NativeSecp256k1Test.java | 226 +++++++ src/java/org/bitcoin/NativeSecp256k1Util.java | 45 ++ src/java/org/bitcoin/Secp256k1Context.java | 51 ++ src/java/org_bitcoin_NativeSecp256k1.c | 378 ++++++++++- src/java/org_bitcoin_NativeSecp256k1.h | 104 ++- src/java/org_bitcoin_Secp256k1Context.c | 15 + src/java/org_bitcoin_Secp256k1Context.h | 22 + src/modules/ecdh/Makefile.am.include | 2 +- src/modules/recovery/Makefile.am.include | 2 +- src/modules/recovery/main_impl.h | 4 +- src/modules/schnorr/Makefile.am.include | 10 - src/modules/schnorr/main_impl.h | 164 ----- src/modules/schnorr/schnorr.h | 20 - src/modules/schnorr/schnorr_impl.h | 207 ------ src/modules/schnorr/tests_impl.h | 175 ----- src/num.h | 6 + src/num_gmp_impl.h | 26 + src/scalar.h | 4 +- src/scalar_4x64_impl.h | 26 +- src/scalar_impl.h | 41 +- src/scalar_low.h | 15 + src/scalar_low_impl.h | 114 ++++ src/secp256k1.c | 19 +- src/tests.c | 202 +++++- src/tests_exhaustive.c | 329 +++++++++ 43 files changed, 3188 insertions(+), 772 deletions(-) create mode 100644 src/asm/field_10x26_arm.s create mode 100644 src/java/org/bitcoin/NativeSecp256k1Test.java create mode 100644 src/java/org/bitcoin/NativeSecp256k1Util.java create mode 100644 src/java/org/bitcoin/Secp256k1Context.java create mode 100644 src/java/org_bitcoin_Secp256k1Context.c create mode 100644 src/java/org_bitcoin_Secp256k1Context.h mode change 100644 => 100755 src/modules/recovery/main_impl.h delete mode 100644 src/modules/schnorr/Makefile.am.include delete mode 100644 src/modules/schnorr/main_impl.h delete mode 100644 src/modules/schnorr/schnorr.h delete mode 100644 src/modules/schnorr/schnorr_impl.h delete mode 100644 src/modules/schnorr/tests_impl.h create mode 100644 src/scalar_low.h create mode 100644 src/scalar_low_impl.h mode change 100644 => 100755 src/secp256k1.c create mode 100644 src/tests_exhaustive.c (limited to 'src') diff --git a/src/asm/field_10x26_arm.s b/src/asm/field_10x26_arm.s new file mode 100644 index 0000000000..5df561f2fc --- /dev/null +++ b/src/asm/field_10x26_arm.s @@ -0,0 +1,919 @@ +@ vim: set tabstop=8 softtabstop=8 shiftwidth=8 noexpandtab syntax=armasm: +/********************************************************************** + * Copyright (c) 2014 Wladimir J. van der Laan * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ +/* +ARM implementation of field_10x26 inner loops. + +Note: + +- To avoid unnecessary loads and make use of available registers, two + 'passes' have every time been interleaved, with the odd passes accumulating c' and d' + which will be added to c and d respectively in the the even passes + +*/ + + .syntax unified + .arch armv7-a + @ eabi attributes - see readelf -A + .eabi_attribute 8, 1 @ Tag_ARM_ISA_use = yes + .eabi_attribute 9, 0 @ Tag_Thumb_ISA_use = no + .eabi_attribute 10, 0 @ Tag_FP_arch = none + .eabi_attribute 24, 1 @ Tag_ABI_align_needed = 8-byte + .eabi_attribute 25, 1 @ Tag_ABI_align_preserved = 8-byte, except leaf SP + .eabi_attribute 30, 2 @ Tag_ABI_optimization_goals = Agressive Speed + .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access = v6 + .text + + @ Field constants + .set field_R0, 0x3d10 + .set field_R1, 0x400 + .set field_not_M, 0xfc000000 @ ~M = ~0x3ffffff + + .align 2 + .global secp256k1_fe_mul_inner + .type secp256k1_fe_mul_inner, %function + @ Arguments: + @ r0 r Restrict: can overlap with a, not with b + @ r1 a + @ r2 b + @ Stack (total 4+10*4 = 44) + @ sp + #0 saved 'r' pointer + @ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9 +secp256k1_fe_mul_inner: + stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14} + sub sp, sp, #48 @ frame=44 + alignment + str r0, [sp, #0] @ save result address, we need it only at the end + + /****************************************** + * Main computation code. + ****************************************** + + Allocation: + r0,r14,r7,r8 scratch + r1 a (pointer) + r2 b (pointer) + r3:r4 c + r5:r6 d + r11:r12 c' + r9:r10 d' + + Note: do not write to r[] here, it may overlap with a[] + */ + + /* A - interleaved with B */ + ldr r7, [r1, #0*4] @ a[0] + ldr r8, [r2, #9*4] @ b[9] + ldr r0, [r1, #1*4] @ a[1] + umull r5, r6, r7, r8 @ d = a[0] * b[9] + ldr r14, [r2, #8*4] @ b[8] + umull r9, r10, r0, r8 @ d' = a[1] * b[9] + ldr r7, [r1, #2*4] @ a[2] + umlal r5, r6, r0, r14 @ d += a[1] * b[8] + ldr r8, [r2, #7*4] @ b[7] + umlal r9, r10, r7, r14 @ d' += a[2] * b[8] + ldr r0, [r1, #3*4] @ a[3] + umlal r5, r6, r7, r8 @ d += a[2] * b[7] + ldr r14, [r2, #6*4] @ b[6] + umlal r9, r10, r0, r8 @ d' += a[3] * b[7] + ldr r7, [r1, #4*4] @ a[4] + umlal r5, r6, r0, r14 @ d += a[3] * b[6] + ldr r8, [r2, #5*4] @ b[5] + umlal r9, r10, r7, r14 @ d' += a[4] * b[6] + ldr r0, [r1, #5*4] @ a[5] + umlal r5, r6, r7, r8 @ d += a[4] * b[5] + ldr r14, [r2, #4*4] @ b[4] + umlal r9, r10, r0, r8 @ d' += a[5] * b[5] + ldr r7, [r1, #6*4] @ a[6] + umlal r5, r6, r0, r14 @ d += a[5] * b[4] + ldr r8, [r2, #3*4] @ b[3] + umlal r9, r10, r7, r14 @ d' += a[6] * b[4] + ldr r0, [r1, #7*4] @ a[7] + umlal r5, r6, r7, r8 @ d += a[6] * b[3] + ldr r14, [r2, #2*4] @ b[2] + umlal r9, r10, r0, r8 @ d' += a[7] * b[3] + ldr r7, [r1, #8*4] @ a[8] + umlal r5, r6, r0, r14 @ d += a[7] * b[2] + ldr r8, [r2, #1*4] @ b[1] + umlal r9, r10, r7, r14 @ d' += a[8] * b[2] + ldr r0, [r1, #9*4] @ a[9] + umlal r5, r6, r7, r8 @ d += a[8] * b[1] + ldr r14, [r2, #0*4] @ b[0] + umlal r9, r10, r0, r8 @ d' += a[9] * b[1] + ldr r7, [r1, #0*4] @ a[0] + umlal r5, r6, r0, r14 @ d += a[9] * b[0] + @ r7,r14 used in B + + bic r0, r5, field_not_M @ t9 = d & M + str r0, [sp, #4 + 4*9] + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + + /* B */ + umull r3, r4, r7, r14 @ c = a[0] * b[0] + adds r5, r5, r9 @ d += d' + adc r6, r6, r10 + + bic r0, r5, field_not_M @ u0 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u0 * R0 + umlal r3, r4, r0, r14 + + bic r14, r3, field_not_M @ t0 = c & M + str r14, [sp, #4 + 0*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u0 * R1 + umlal r3, r4, r0, r14 + + /* C - interleaved with D */ + ldr r7, [r1, #0*4] @ a[0] + ldr r8, [r2, #2*4] @ b[2] + ldr r14, [r2, #1*4] @ b[1] + umull r11, r12, r7, r8 @ c' = a[0] * b[2] + ldr r0, [r1, #1*4] @ a[1] + umlal r3, r4, r7, r14 @ c += a[0] * b[1] + ldr r8, [r2, #0*4] @ b[0] + umlal r11, r12, r0, r14 @ c' += a[1] * b[1] + ldr r7, [r1, #2*4] @ a[2] + umlal r3, r4, r0, r8 @ c += a[1] * b[0] + ldr r14, [r2, #9*4] @ b[9] + umlal r11, r12, r7, r8 @ c' += a[2] * b[0] + ldr r0, [r1, #3*4] @ a[3] + umlal r5, r6, r7, r14 @ d += a[2] * b[9] + ldr r8, [r2, #8*4] @ b[8] + umull r9, r10, r0, r14 @ d' = a[3] * b[9] + ldr r7, [r1, #4*4] @ a[4] + umlal r5, r6, r0, r8 @ d += a[3] * b[8] + ldr r14, [r2, #7*4] @ b[7] + umlal r9, r10, r7, r8 @ d' += a[4] * b[8] + ldr r0, [r1, #5*4] @ a[5] + umlal r5, r6, r7, r14 @ d += a[4] * b[7] + ldr r8, [r2, #6*4] @ b[6] + umlal r9, r10, r0, r14 @ d' += a[5] * b[7] + ldr r7, [r1, #6*4] @ a[6] + umlal r5, r6, r0, r8 @ d += a[5] * b[6] + ldr r14, [r2, #5*4] @ b[5] + umlal r9, r10, r7, r8 @ d' += a[6] * b[6] + ldr r0, [r1, #7*4] @ a[7] + umlal r5, r6, r7, r14 @ d += a[6] * b[5] + ldr r8, [r2, #4*4] @ b[4] + umlal r9, r10, r0, r14 @ d' += a[7] * b[5] + ldr r7, [r1, #8*4] @ a[8] + umlal r5, r6, r0, r8 @ d += a[7] * b[4] + ldr r14, [r2, #3*4] @ b[3] + umlal r9, r10, r7, r8 @ d' += a[8] * b[4] + ldr r0, [r1, #9*4] @ a[9] + umlal r5, r6, r7, r14 @ d += a[8] * b[3] + ldr r8, [r2, #2*4] @ b[2] + umlal r9, r10, r0, r14 @ d' += a[9] * b[3] + umlal r5, r6, r0, r8 @ d += a[9] * b[2] + + bic r0, r5, field_not_M @ u1 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u1 * R0 + umlal r3, r4, r0, r14 + + bic r14, r3, field_not_M @ t1 = c & M + str r14, [sp, #4 + 1*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u1 * R1 + umlal r3, r4, r0, r14 + + /* D */ + adds r3, r3, r11 @ c += c' + adc r4, r4, r12 + adds r5, r5, r9 @ d += d' + adc r6, r6, r10 + + bic r0, r5, field_not_M @ u2 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u2 * R0 + umlal r3, r4, r0, r14 + + bic r14, r3, field_not_M @ t2 = c & M + str r14, [sp, #4 + 2*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u2 * R1 + umlal r3, r4, r0, r14 + + /* E - interleaved with F */ + ldr r7, [r1, #0*4] @ a[0] + ldr r8, [r2, #4*4] @ b[4] + umull r11, r12, r7, r8 @ c' = a[0] * b[4] + ldr r8, [r2, #3*4] @ b[3] + umlal r3, r4, r7, r8 @ c += a[0] * b[3] + ldr r7, [r1, #1*4] @ a[1] + umlal r11, r12, r7, r8 @ c' += a[1] * b[3] + ldr r8, [r2, #2*4] @ b[2] + umlal r3, r4, r7, r8 @ c += a[1] * b[2] + ldr r7, [r1, #2*4] @ a[2] + umlal r11, r12, r7, r8 @ c' += a[2] * b[2] + ldr r8, [r2, #1*4] @ b[1] + umlal r3, r4, r7, r8 @ c += a[2] * b[1] + ldr r7, [r1, #3*4] @ a[3] + umlal r11, r12, r7, r8 @ c' += a[3] * b[1] + ldr r8, [r2, #0*4] @ b[0] + umlal r3, r4, r7, r8 @ c += a[3] * b[0] + ldr r7, [r1, #4*4] @ a[4] + umlal r11, r12, r7, r8 @ c' += a[4] * b[0] + ldr r8, [r2, #9*4] @ b[9] + umlal r5, r6, r7, r8 @ d += a[4] * b[9] + ldr r7, [r1, #5*4] @ a[5] + umull r9, r10, r7, r8 @ d' = a[5] * b[9] + ldr r8, [r2, #8*4] @ b[8] + umlal r5, r6, r7, r8 @ d += a[5] * b[8] + ldr r7, [r1, #6*4] @ a[6] + umlal r9, r10, r7, r8 @ d' += a[6] * b[8] + ldr r8, [r2, #7*4] @ b[7] + umlal r5, r6, r7, r8 @ d += a[6] * b[7] + ldr r7, [r1, #7*4] @ a[7] + umlal r9, r10, r7, r8 @ d' += a[7] * b[7] + ldr r8, [r2, #6*4] @ b[6] + umlal r5, r6, r7, r8 @ d += a[7] * b[6] + ldr r7, [r1, #8*4] @ a[8] + umlal r9, r10, r7, r8 @ d' += a[8] * b[6] + ldr r8, [r2, #5*4] @ b[5] + umlal r5, r6, r7, r8 @ d += a[8] * b[5] + ldr r7, [r1, #9*4] @ a[9] + umlal r9, r10, r7, r8 @ d' += a[9] * b[5] + ldr r8, [r2, #4*4] @ b[4] + umlal r5, r6, r7, r8 @ d += a[9] * b[4] + + bic r0, r5, field_not_M @ u3 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u3 * R0 + umlal r3, r4, r0, r14 + + bic r14, r3, field_not_M @ t3 = c & M + str r14, [sp, #4 + 3*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u3 * R1 + umlal r3, r4, r0, r14 + + /* F */ + adds r3, r3, r11 @ c += c' + adc r4, r4, r12 + adds r5, r5, r9 @ d += d' + adc r6, r6, r10 + + bic r0, r5, field_not_M @ u4 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u4 * R0 + umlal r3, r4, r0, r14 + + bic r14, r3, field_not_M @ t4 = c & M + str r14, [sp, #4 + 4*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u4 * R1 + umlal r3, r4, r0, r14 + + /* G - interleaved with H */ + ldr r7, [r1, #0*4] @ a[0] + ldr r8, [r2, #6*4] @ b[6] + ldr r14, [r2, #5*4] @ b[5] + umull r11, r12, r7, r8 @ c' = a[0] * b[6] + ldr r0, [r1, #1*4] @ a[1] + umlal r3, r4, r7, r14 @ c += a[0] * b[5] + ldr r8, [r2, #4*4] @ b[4] + umlal r11, r12, r0, r14 @ c' += a[1] * b[5] + ldr r7, [r1, #2*4] @ a[2] + umlal r3, r4, r0, r8 @ c += a[1] * b[4] + ldr r14, [r2, #3*4] @ b[3] + umlal r11, r12, r7, r8 @ c' += a[2] * b[4] + ldr r0, [r1, #3*4] @ a[3] + umlal r3, r4, r7, r14 @ c += a[2] * b[3] + ldr r8, [r2, #2*4] @ b[2] + umlal r11, r12, r0, r14 @ c' += a[3] * b[3] + ldr r7, [r1, #4*4] @ a[4] + umlal r3, r4, r0, r8 @ c += a[3] * b[2] + ldr r14, [r2, #1*4] @ b[1] + umlal r11, r12, r7, r8 @ c' += a[4] * b[2] + ldr r0, [r1, #5*4] @ a[5] + umlal r3, r4, r7, r14 @ c += a[4] * b[1] + ldr r8, [r2, #0*4] @ b[0] + umlal r11, r12, r0, r14 @ c' += a[5] * b[1] + ldr r7, [r1, #6*4] @ a[6] + umlal r3, r4, r0, r8 @ c += a[5] * b[0] + ldr r14, [r2, #9*4] @ b[9] + umlal r11, r12, r7, r8 @ c' += a[6] * b[0] + ldr r0, [r1, #7*4] @ a[7] + umlal r5, r6, r7, r14 @ d += a[6] * b[9] + ldr r8, [r2, #8*4] @ b[8] + umull r9, r10, r0, r14 @ d' = a[7] * b[9] + ldr r7, [r1, #8*4] @ a[8] + umlal r5, r6, r0, r8 @ d += a[7] * b[8] + ldr r14, [r2, #7*4] @ b[7] + umlal r9, r10, r7, r8 @ d' += a[8] * b[8] + ldr r0, [r1, #9*4] @ a[9] + umlal r5, r6, r7, r14 @ d += a[8] * b[7] + ldr r8, [r2, #6*4] @ b[6] + umlal r9, r10, r0, r14 @ d' += a[9] * b[7] + umlal r5, r6, r0, r8 @ d += a[9] * b[6] + + bic r0, r5, field_not_M @ u5 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u5 * R0 + umlal r3, r4, r0, r14 + + bic r14, r3, field_not_M @ t5 = c & M + str r14, [sp, #4 + 5*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u5 * R1 + umlal r3, r4, r0, r14 + + /* H */ + adds r3, r3, r11 @ c += c' + adc r4, r4, r12 + adds r5, r5, r9 @ d += d' + adc r6, r6, r10 + + bic r0, r5, field_not_M @ u6 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u6 * R0 + umlal r3, r4, r0, r14 + + bic r14, r3, field_not_M @ t6 = c & M + str r14, [sp, #4 + 6*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u6 * R1 + umlal r3, r4, r0, r14 + + /* I - interleaved with J */ + ldr r8, [r2, #8*4] @ b[8] + ldr r7, [r1, #0*4] @ a[0] + ldr r14, [r2, #7*4] @ b[7] + umull r11, r12, r7, r8 @ c' = a[0] * b[8] + ldr r0, [r1, #1*4] @ a[1] + umlal r3, r4, r7, r14 @ c += a[0] * b[7] + ldr r8, [r2, #6*4] @ b[6] + umlal r11, r12, r0, r14 @ c' += a[1] * b[7] + ldr r7, [r1, #2*4] @ a[2] + umlal r3, r4, r0, r8 @ c += a[1] * b[6] + ldr r14, [r2, #5*4] @ b[5] + umlal r11, r12, r7, r8 @ c' += a[2] * b[6] + ldr r0, [r1, #3*4] @ a[3] + umlal r3, r4, r7, r14 @ c += a[2] * b[5] + ldr r8, [r2, #4*4] @ b[4] + umlal r11, r12, r0, r14 @ c' += a[3] * b[5] + ldr r7, [r1, #4*4] @ a[4] + umlal r3, r4, r0, r8 @ c += a[3] * b[4] + ldr r14, [r2, #3*4] @ b[3] + umlal r11, r12, r7, r8 @ c' += a[4] * b[4] + ldr r0, [r1, #5*4] @ a[5] + umlal r3, r4, r7, r14 @ c += a[4] * b[3] + ldr r8, [r2, #2*4] @ b[2] + umlal r11, r12, r0, r14 @ c' += a[5] * b[3] + ldr r7, [r1, #6*4] @ a[6] + umlal r3, r4, r0, r8 @ c += a[5] * b[2] + ldr r14, [r2, #1*4] @ b[1] + umlal r11, r12, r7, r8 @ c' += a[6] * b[2] + ldr r0, [r1, #7*4] @ a[7] + umlal r3, r4, r7, r14 @ c += a[6] * b[1] + ldr r8, [r2, #0*4] @ b[0] + umlal r11, r12, r0, r14 @ c' += a[7] * b[1] + ldr r7, [r1, #8*4] @ a[8] + umlal r3, r4, r0, r8 @ c += a[7] * b[0] + ldr r14, [r2, #9*4] @ b[9] + umlal r11, r12, r7, r8 @ c' += a[8] * b[0] + ldr r0, [r1, #9*4] @ a[9] + umlal r5, r6, r7, r14 @ d += a[8] * b[9] + ldr r8, [r2, #8*4] @ b[8] + umull r9, r10, r0, r14 @ d' = a[9] * b[9] + umlal r5, r6, r0, r8 @ d += a[9] * b[8] + + bic r0, r5, field_not_M @ u7 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u7 * R0 + umlal r3, r4, r0, r14 + + bic r14, r3, field_not_M @ t7 = c & M + str r14, [sp, #4 + 7*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u7 * R1 + umlal r3, r4, r0, r14 + + /* J */ + adds r3, r3, r11 @ c += c' + adc r4, r4, r12 + adds r5, r5, r9 @ d += d' + adc r6, r6, r10 + + bic r0, r5, field_not_M @ u8 = d & M + str r0, [sp, #4 + 8*4] + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u8 * R0 + umlal r3, r4, r0, r14 + + /****************************************** + * compute and write back result + ****************************************** + Allocation: + r0 r + r3:r4 c + r5:r6 d + r7 t0 + r8 t1 + r9 t2 + r11 u8 + r12 t9 + r1,r2,r10,r14 scratch + + Note: do not read from a[] after here, it may overlap with r[] + */ + ldr r0, [sp, #0] + add r1, sp, #4 + 3*4 @ r[3..7] = t3..7, r11=u8, r12=t9 + ldmia r1, {r2,r7,r8,r9,r10,r11,r12} + add r1, r0, #3*4 + stmia r1, {r2,r7,r8,r9,r10} + + bic r2, r3, field_not_M @ r[8] = c & M + str r2, [r0, #8*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u8 * R1 + umlal r3, r4, r11, r14 + movw r14, field_R0 @ c += d * R0 + umlal r3, r4, r5, r14 + adds r3, r3, r12 @ c += t9 + adc r4, r4, #0 + + add r1, sp, #4 + 0*4 @ r7,r8,r9 = t0,t1,t2 + ldmia r1, {r7,r8,r9} + + ubfx r2, r3, #0, #22 @ r[9] = c & (M >> 4) + str r2, [r0, #9*4] + mov r3, r3, lsr #22 @ c >>= 22 + orr r3, r3, r4, asl #10 + mov r4, r4, lsr #22 + movw r14, field_R1 << 4 @ c += d * (R1 << 4) + umlal r3, r4, r5, r14 + + movw r14, field_R0 >> 4 @ d = c * (R0 >> 4) + t0 (64x64 multiply+add) + umull r5, r6, r3, r14 @ d = c.lo * (R0 >> 4) + adds r5, r5, r7 @ d.lo += t0 + mla r6, r14, r4, r6 @ d.hi += c.hi * (R0 >> 4) + adc r6, r6, 0 @ d.hi += carry + + bic r2, r5, field_not_M @ r[0] = d & M + str r2, [r0, #0*4] + + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + + movw r14, field_R1 >> 4 @ d += c * (R1 >> 4) + t1 (64x64 multiply+add) + umull r1, r2, r3, r14 @ tmp = c.lo * (R1 >> 4) + adds r5, r5, r8 @ d.lo += t1 + adc r6, r6, #0 @ d.hi += carry + adds r5, r5, r1 @ d.lo += tmp.lo + mla r2, r14, r4, r2 @ tmp.hi += c.hi * (R1 >> 4) + adc r6, r6, r2 @ d.hi += carry + tmp.hi + + bic r2, r5, field_not_M @ r[1] = d & M + str r2, [r0, #1*4] + mov r5, r5, lsr #26 @ d >>= 26 (ignore hi) + orr r5, r5, r6, asl #6 + + add r5, r5, r9 @ d += t2 + str r5, [r0, #2*4] @ r[2] = d + + add sp, sp, #48 + ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc} + .size secp256k1_fe_mul_inner, .-secp256k1_fe_mul_inner + + .align 2 + .global secp256k1_fe_sqr_inner + .type secp256k1_fe_sqr_inner, %function + @ Arguments: + @ r0 r Can overlap with a + @ r1 a + @ Stack (total 4+10*4 = 44) + @ sp + #0 saved 'r' pointer + @ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9 +secp256k1_fe_sqr_inner: + stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14} + sub sp, sp, #48 @ frame=44 + alignment + str r0, [sp, #0] @ save result address, we need it only at the end + /****************************************** + * Main computation code. + ****************************************** + + Allocation: + r0,r14,r2,r7,r8 scratch + r1 a (pointer) + r3:r4 c + r5:r6 d + r11:r12 c' + r9:r10 d' + + Note: do not write to r[] here, it may overlap with a[] + */ + /* A interleaved with B */ + ldr r0, [r1, #1*4] @ a[1]*2 + ldr r7, [r1, #0*4] @ a[0] + mov r0, r0, asl #1 + ldr r14, [r1, #9*4] @ a[9] + umull r3, r4, r7, r7 @ c = a[0] * a[0] + ldr r8, [r1, #8*4] @ a[8] + mov r7, r7, asl #1 + umull r5, r6, r7, r14 @ d = a[0]*2 * a[9] + ldr r7, [r1, #2*4] @ a[2]*2 + umull r9, r10, r0, r14 @ d' = a[1]*2 * a[9] + ldr r14, [r1, #7*4] @ a[7] + umlal r5, r6, r0, r8 @ d += a[1]*2 * a[8] + mov r7, r7, asl #1 + ldr r0, [r1, #3*4] @ a[3]*2 + umlal r9, r10, r7, r8 @ d' += a[2]*2 * a[8] + ldr r8, [r1, #6*4] @ a[6] + umlal r5, r6, r7, r14 @ d += a[2]*2 * a[7] + mov r0, r0, asl #1 + ldr r7, [r1, #4*4] @ a[4]*2 + umlal r9, r10, r0, r14 @ d' += a[3]*2 * a[7] + ldr r14, [r1, #5*4] @ a[5] + mov r7, r7, asl #1 + umlal r5, r6, r0, r8 @ d += a[3]*2 * a[6] + umlal r9, r10, r7, r8 @ d' += a[4]*2 * a[6] + umlal r5, r6, r7, r14 @ d += a[4]*2 * a[5] + umlal r9, r10, r14, r14 @ d' += a[5] * a[5] + + bic r0, r5, field_not_M @ t9 = d & M + str r0, [sp, #4 + 9*4] + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + + /* B */ + adds r5, r5, r9 @ d += d' + adc r6, r6, r10 + + bic r0, r5, field_not_M @ u0 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u0 * R0 + umlal r3, r4, r0, r14 + bic r14, r3, field_not_M @ t0 = c & M + str r14, [sp, #4 + 0*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u0 * R1 + umlal r3, r4, r0, r14 + + /* C interleaved with D */ + ldr r0, [r1, #0*4] @ a[0]*2 + ldr r14, [r1, #1*4] @ a[1] + mov r0, r0, asl #1 + ldr r8, [r1, #2*4] @ a[2] + umlal r3, r4, r0, r14 @ c += a[0]*2 * a[1] + mov r7, r8, asl #1 @ a[2]*2 + umull r11, r12, r14, r14 @ c' = a[1] * a[1] + ldr r14, [r1, #9*4] @ a[9] + umlal r11, r12, r0, r8 @ c' += a[0]*2 * a[2] + ldr r0, [r1, #3*4] @ a[3]*2 + ldr r8, [r1, #8*4] @ a[8] + umlal r5, r6, r7, r14 @ d += a[2]*2 * a[9] + mov r0, r0, asl #1 + ldr r7, [r1, #4*4] @ a[4]*2 + umull r9, r10, r0, r14 @ d' = a[3]*2 * a[9] + ldr r14, [r1, #7*4] @ a[7] + umlal r5, r6, r0, r8 @ d += a[3]*2 * a[8] + mov r7, r7, asl #1 + ldr r0, [r1, #5*4] @ a[5]*2 + umlal r9, r10, r7, r8 @ d' += a[4]*2 * a[8] + ldr r8, [r1, #6*4] @ a[6] + mov r0, r0, asl #1 + umlal r5, r6, r7, r14 @ d += a[4]*2 * a[7] + umlal r9, r10, r0, r14 @ d' += a[5]*2 * a[7] + umlal r5, r6, r0, r8 @ d += a[5]*2 * a[6] + umlal r9, r10, r8, r8 @ d' += a[6] * a[6] + + bic r0, r5, field_not_M @ u1 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u1 * R0 + umlal r3, r4, r0, r14 + bic r14, r3, field_not_M @ t1 = c & M + str r14, [sp, #4 + 1*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u1 * R1 + umlal r3, r4, r0, r14 + + /* D */ + adds r3, r3, r11 @ c += c' + adc r4, r4, r12 + adds r5, r5, r9 @ d += d' + adc r6, r6, r10 + + bic r0, r5, field_not_M @ u2 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u2 * R0 + umlal r3, r4, r0, r14 + bic r14, r3, field_not_M @ t2 = c & M + str r14, [sp, #4 + 2*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u2 * R1 + umlal r3, r4, r0, r14 + + /* E interleaved with F */ + ldr r7, [r1, #0*4] @ a[0]*2 + ldr r0, [r1, #1*4] @ a[1]*2 + ldr r14, [r1, #2*4] @ a[2] + mov r7, r7, asl #1 + ldr r8, [r1, #3*4] @ a[3] + ldr r2, [r1, #4*4] + umlal r3, r4, r7, r8 @ c += a[0]*2 * a[3] + mov r0, r0, asl #1 + umull r11, r12, r7, r2 @ c' = a[0]*2 * a[4] + mov r2, r2, asl #1 @ a[4]*2 + umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[3] + ldr r8, [r1, #9*4] @ a[9] + umlal r3, r4, r0, r14 @ c += a[1]*2 * a[2] + ldr r0, [r1, #5*4] @ a[5]*2 + umlal r11, r12, r14, r14 @ c' += a[2] * a[2] + ldr r14, [r1, #8*4] @ a[8] + mov r0, r0, asl #1 + umlal r5, r6, r2, r8 @ d += a[4]*2 * a[9] + ldr r7, [r1, #6*4] @ a[6]*2 + umull r9, r10, r0, r8 @ d' = a[5]*2 * a[9] + mov r7, r7, asl #1 + ldr r8, [r1, #7*4] @ a[7] + umlal r5, r6, r0, r14 @ d += a[5]*2 * a[8] + umlal r9, r10, r7, r14 @ d' += a[6]*2 * a[8] + umlal r5, r6, r7, r8 @ d += a[6]*2 * a[7] + umlal r9, r10, r8, r8 @ d' += a[7] * a[7] + + bic r0, r5, field_not_M @ u3 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u3 * R0 + umlal r3, r4, r0, r14 + bic r14, r3, field_not_M @ t3 = c & M + str r14, [sp, #4 + 3*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u3 * R1 + umlal r3, r4, r0, r14 + + /* F */ + adds r3, r3, r11 @ c += c' + adc r4, r4, r12 + adds r5, r5, r9 @ d += d' + adc r6, r6, r10 + + bic r0, r5, field_not_M @ u4 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u4 * R0 + umlal r3, r4, r0, r14 + bic r14, r3, field_not_M @ t4 = c & M + str r14, [sp, #4 + 4*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u4 * R1 + umlal r3, r4, r0, r14 + + /* G interleaved with H */ + ldr r7, [r1, #0*4] @ a[0]*2 + ldr r0, [r1, #1*4] @ a[1]*2 + mov r7, r7, asl #1 + ldr r8, [r1, #5*4] @ a[5] + ldr r2, [r1, #6*4] @ a[6] + umlal r3, r4, r7, r8 @ c += a[0]*2 * a[5] + ldr r14, [r1, #4*4] @ a[4] + mov r0, r0, asl #1 + umull r11, r12, r7, r2 @ c' = a[0]*2 * a[6] + ldr r7, [r1, #2*4] @ a[2]*2 + umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[5] + mov r7, r7, asl #1 + ldr r8, [r1, #3*4] @ a[3] + umlal r3, r4, r0, r14 @ c += a[1]*2 * a[4] + mov r0, r2, asl #1 @ a[6]*2 + umlal r11, r12, r7, r14 @ c' += a[2]*2 * a[4] + ldr r14, [r1, #9*4] @ a[9] + umlal r3, r4, r7, r8 @ c += a[2]*2 * a[3] + ldr r7, [r1, #7*4] @ a[7]*2 + umlal r11, r12, r8, r8 @ c' += a[3] * a[3] + mov r7, r7, asl #1 + ldr r8, [r1, #8*4] @ a[8] + umlal r5, r6, r0, r14 @ d += a[6]*2 * a[9] + umull r9, r10, r7, r14 @ d' = a[7]*2 * a[9] + umlal r5, r6, r7, r8 @ d += a[7]*2 * a[8] + umlal r9, r10, r8, r8 @ d' += a[8] * a[8] + + bic r0, r5, field_not_M @ u5 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u5 * R0 + umlal r3, r4, r0, r14 + bic r14, r3, field_not_M @ t5 = c & M + str r14, [sp, #4 + 5*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u5 * R1 + umlal r3, r4, r0, r14 + + /* H */ + adds r3, r3, r11 @ c += c' + adc r4, r4, r12 + adds r5, r5, r9 @ d += d' + adc r6, r6, r10 + + bic r0, r5, field_not_M @ u6 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u6 * R0 + umlal r3, r4, r0, r14 + bic r14, r3, field_not_M @ t6 = c & M + str r14, [sp, #4 + 6*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u6 * R1 + umlal r3, r4, r0, r14 + + /* I interleaved with J */ + ldr r7, [r1, #0*4] @ a[0]*2 + ldr r0, [r1, #1*4] @ a[1]*2 + mov r7, r7, asl #1 + ldr r8, [r1, #7*4] @ a[7] + ldr r2, [r1, #8*4] @ a[8] + umlal r3, r4, r7, r8 @ c += a[0]*2 * a[7] + ldr r14, [r1, #6*4] @ a[6] + mov r0, r0, asl #1 + umull r11, r12, r7, r2 @ c' = a[0]*2 * a[8] + ldr r7, [r1, #2*4] @ a[2]*2 + umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[7] + ldr r8, [r1, #5*4] @ a[5] + umlal r3, r4, r0, r14 @ c += a[1]*2 * a[6] + ldr r0, [r1, #3*4] @ a[3]*2 + mov r7, r7, asl #1 + umlal r11, r12, r7, r14 @ c' += a[2]*2 * a[6] + ldr r14, [r1, #4*4] @ a[4] + mov r0, r0, asl #1 + umlal r3, r4, r7, r8 @ c += a[2]*2 * a[5] + mov r2, r2, asl #1 @ a[8]*2 + umlal r11, r12, r0, r8 @ c' += a[3]*2 * a[5] + umlal r3, r4, r0, r14 @ c += a[3]*2 * a[4] + umlal r11, r12, r14, r14 @ c' += a[4] * a[4] + ldr r8, [r1, #9*4] @ a[9] + umlal r5, r6, r2, r8 @ d += a[8]*2 * a[9] + @ r8 will be used in J + + bic r0, r5, field_not_M @ u7 = d & M + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u7 * R0 + umlal r3, r4, r0, r14 + bic r14, r3, field_not_M @ t7 = c & M + str r14, [sp, #4 + 7*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u7 * R1 + umlal r3, r4, r0, r14 + + /* J */ + adds r3, r3, r11 @ c += c' + adc r4, r4, r12 + umlal r5, r6, r8, r8 @ d += a[9] * a[9] + + bic r0, r5, field_not_M @ u8 = d & M + str r0, [sp, #4 + 8*4] + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + movw r14, field_R0 @ c += u8 * R0 + umlal r3, r4, r0, r14 + + /****************************************** + * compute and write back result + ****************************************** + Allocation: + r0 r + r3:r4 c + r5:r6 d + r7 t0 + r8 t1 + r9 t2 + r11 u8 + r12 t9 + r1,r2,r10,r14 scratch + + Note: do not read from a[] after here, it may overlap with r[] + */ + ldr r0, [sp, #0] + add r1, sp, #4 + 3*4 @ r[3..7] = t3..7, r11=u8, r12=t9 + ldmia r1, {r2,r7,r8,r9,r10,r11,r12} + add r1, r0, #3*4 + stmia r1, {r2,r7,r8,r9,r10} + + bic r2, r3, field_not_M @ r[8] = c & M + str r2, [r0, #8*4] + mov r3, r3, lsr #26 @ c >>= 26 + orr r3, r3, r4, asl #6 + mov r4, r4, lsr #26 + mov r14, field_R1 @ c += u8 * R1 + umlal r3, r4, r11, r14 + movw r14, field_R0 @ c += d * R0 + umlal r3, r4, r5, r14 + adds r3, r3, r12 @ c += t9 + adc r4, r4, #0 + + add r1, sp, #4 + 0*4 @ r7,r8,r9 = t0,t1,t2 + ldmia r1, {r7,r8,r9} + + ubfx r2, r3, #0, #22 @ r[9] = c & (M >> 4) + str r2, [r0, #9*4] + mov r3, r3, lsr #22 @ c >>= 22 + orr r3, r3, r4, asl #10 + mov r4, r4, lsr #22 + movw r14, field_R1 << 4 @ c += d * (R1 << 4) + umlal r3, r4, r5, r14 + + movw r14, field_R0 >> 4 @ d = c * (R0 >> 4) + t0 (64x64 multiply+add) + umull r5, r6, r3, r14 @ d = c.lo * (R0 >> 4) + adds r5, r5, r7 @ d.lo += t0 + mla r6, r14, r4, r6 @ d.hi += c.hi * (R0 >> 4) + adc r6, r6, 0 @ d.hi += carry + + bic r2, r5, field_not_M @ r[0] = d & M + str r2, [r0, #0*4] + + mov r5, r5, lsr #26 @ d >>= 26 + orr r5, r5, r6, asl #6 + mov r6, r6, lsr #26 + + movw r14, field_R1 >> 4 @ d += c * (R1 >> 4) + t1 (64x64 multiply+add) + umull r1, r2, r3, r14 @ tmp = c.lo * (R1 >> 4) + adds r5, r5, r8 @ d.lo += t1 + adc r6, r6, #0 @ d.hi += carry + adds r5, r5, r1 @ d.lo += tmp.lo + mla r2, r14, r4, r2 @ tmp.hi += c.hi * (R1 >> 4) + adc r6, r6, r2 @ d.hi += carry + tmp.hi + + bic r2, r5, field_not_M @ r[1] = d & M + str r2, [r0, #1*4] + mov r5, r5, lsr #26 @ d >>= 26 (ignore hi) + orr r5, r5, r6, asl #6 + + add r5, r5, r9 @ d += t2 + str r5, [r0, #2*4] @ r[2] = d + + add sp, sp, #48 + ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc} + .size secp256k1_fe_sqr_inner, .-secp256k1_fe_sqr_inner + diff --git a/src/bench_ecdh.c b/src/bench_ecdh.c index 5a7c6376e0..cde5e2dbb4 100644 --- a/src/bench_ecdh.c +++ b/src/bench_ecdh.c @@ -28,7 +28,8 @@ static void bench_ecdh_setup(void* arg) { 0xa2, 0xba, 0xd1, 0x84, 0xf8, 0x83, 0xc6, 0x9f }; - data->ctx = secp256k1_context_create(0); + /* create a context with no capabilities */ + data->ctx = secp256k1_context_create(SECP256K1_FLAGS_TYPE_CONTEXT); for (i = 0; i < 32; i++) { data->scalar[i] = i + 1; } diff --git a/src/bench_internal.c b/src/bench_internal.c index 7809f5f8cf..0809f77bda 100644 --- a/src/bench_internal.c +++ b/src/bench_internal.c @@ -181,12 +181,12 @@ void bench_field_inverse_var(void* arg) { } } -void bench_field_sqrt_var(void* arg) { +void bench_field_sqrt(void* arg) { int i; bench_inv_t *data = (bench_inv_t*)arg; for (i = 0; i < 20000; i++) { - secp256k1_fe_sqrt_var(&data->fe_x, &data->fe_x); + secp256k1_fe_sqrt(&data->fe_x, &data->fe_x); secp256k1_fe_add(&data->fe_x, &data->fe_y); } } @@ -227,6 +227,15 @@ void bench_group_add_affine_var(void* arg) { } } +void bench_group_jacobi_var(void* arg) { + int i; + bench_inv_t *data = (bench_inv_t*)arg; + + for (i = 0; i < 20000; i++) { + secp256k1_gej_has_quad_y_var(&data->gej_x); + } +} + void bench_ecmult_wnaf(void* arg) { int i; bench_inv_t *data = (bench_inv_t*)arg; @@ -299,6 +308,21 @@ void bench_context_sign(void* arg) { } } +#ifndef USE_NUM_NONE +void bench_num_jacobi(void* arg) { + int i; + bench_inv_t *data = (bench_inv_t*)arg; + secp256k1_num nx, norder; + + secp256k1_scalar_get_num(&nx, &data->scalar_x); + secp256k1_scalar_order_get_num(&norder); + secp256k1_scalar_get_num(&norder, &data->scalar_y); + + for (i = 0; i < 200000; i++) { + secp256k1_num_jacobi(&nx, &norder); + } +} +#endif int have_flag(int argc, char** argv, char *flag) { char** argm = argv + argc; @@ -333,12 +357,13 @@ int main(int argc, char **argv) { if (have_flag(argc, argv, "field") || have_flag(argc, argv, "mul")) run_benchmark("field_mul", bench_field_mul, bench_setup, NULL, &data, 10, 200000); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse", bench_field_inverse, bench_setup, NULL, &data, 10, 20000); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse_var", bench_field_inverse_var, bench_setup, NULL, &data, 10, 20000); - if (have_flag(argc, argv, "field") || have_flag(argc, argv, "sqrt")) run_benchmark("field_sqrt_var", bench_field_sqrt_var, bench_setup, NULL, &data, 10, 20000); + if (have_flag(argc, argv, "field") || have_flag(argc, argv, "sqrt")) run_benchmark("field_sqrt", bench_field_sqrt, bench_setup, NULL, &data, 10, 20000); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "double")) run_benchmark("group_double_var", bench_group_double_var, bench_setup, NULL, &data, 10, 200000); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_var", bench_group_add_var, bench_setup, NULL, &data, 10, 200000); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, 200000); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, 200000); + if (have_flag(argc, argv, "group") || have_flag(argc, argv, "jacobi")) run_benchmark("group_jacobi_var", bench_group_jacobi_var, bench_setup, NULL, &data, 10, 20000); if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, 20000); if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("ecmult_wnaf", bench_ecmult_wnaf, bench_setup, NULL, &data, 10, 20000); @@ -350,5 +375,8 @@ int main(int argc, char **argv) { if (have_flag(argc, argv, "context") || have_flag(argc, argv, "verify")) run_benchmark("context_verify", bench_context_verify, bench_setup, NULL, &data, 10, 20); if (have_flag(argc, argv, "context") || have_flag(argc, argv, "sign")) run_benchmark("context_sign", bench_context_sign, bench_setup, NULL, &data, 10, 200); +#ifndef USE_NUM_NONE + if (have_flag(argc, argv, "num") || have_flag(argc, argv, "jacobi")) run_benchmark("num_jacobi", bench_num_jacobi, bench_setup, NULL, &data, 10, 200000); +#endif return 0; } diff --git a/src/bench_verify.c b/src/bench_verify.c index 5718320cda..418defa0aa 100644 --- a/src/bench_verify.c +++ b/src/bench_verify.c @@ -11,6 +11,12 @@ #include "util.h" #include "bench.h" +#ifdef ENABLE_OPENSSL_TESTS +#include +#include +#include +#endif + typedef struct { secp256k1_context *ctx; unsigned char msg[32]; @@ -19,6 +25,9 @@ typedef struct { size_t siglen; unsigned char pubkey[33]; size_t pubkeylen; +#ifdef ENABLE_OPENSSL_TESTS + EC_GROUP* ec_group; +#endif } benchmark_verify_t; static void benchmark_verify(void* arg) { @@ -40,6 +49,36 @@ static void benchmark_verify(void* arg) { } } +#ifdef ENABLE_OPENSSL_TESTS +static void benchmark_verify_openssl(void* arg) { + int i; + benchmark_verify_t* data = (benchmark_verify_t*)arg; + + for (i = 0; i < 20000; i++) { + data->sig[data->siglen - 1] ^= (i & 0xFF); + data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); + data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); + { + EC_KEY *pkey = EC_KEY_new(); + const unsigned char *pubkey = &data->pubkey[0]; + int result; + + CHECK(pkey != NULL); + result = EC_KEY_set_group(pkey, data->ec_group); + CHECK(result); + result = (o2i_ECPublicKey(&pkey, &pubkey, data->pubkeylen)) != NULL; + CHECK(result); + result = ECDSA_verify(0, &data->msg[0], sizeof(data->msg), &data->sig[0], data->siglen, pkey) == (i == 0); + CHECK(result); + EC_KEY_free(pkey); + } + data->sig[data->siglen - 1] ^= (i & 0xFF); + data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); + data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); + } +} +#endif + int main(void) { int i; secp256k1_pubkey pubkey; @@ -62,6 +101,11 @@ int main(void) { CHECK(secp256k1_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); run_benchmark("ecdsa_verify", benchmark_verify, NULL, NULL, &data, 10, 20000); +#ifdef ENABLE_OPENSSL_TESTS + data.ec_group = EC_GROUP_new_by_curve_name(NID_secp256k1); + run_benchmark("ecdsa_verify_openssl", benchmark_verify_openssl, NULL, NULL, &data, 10, 20000); + EC_GROUP_free(data.ec_group); +#endif secp256k1_context_destroy(data.ctx); return 0; diff --git a/src/ecdsa_impl.h b/src/ecdsa_impl.h index d110b4bb1d..9a42e519bd 100644 --- a/src/ecdsa_impl.h +++ b/src/ecdsa_impl.h @@ -203,7 +203,9 @@ static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const secp256k1_scalar *sigr, const secp256k1_scalar *sigs, const secp256k1_ge *pubkey, const secp256k1_scalar *message) { unsigned char c[32]; secp256k1_scalar sn, u1, u2; +#if !defined(EXHAUSTIVE_TEST_ORDER) secp256k1_fe xr; +#endif secp256k1_gej pubkeyj; secp256k1_gej pr; @@ -219,6 +221,21 @@ static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const if (secp256k1_gej_is_infinity(&pr)) { return 0; } + +#if defined(EXHAUSTIVE_TEST_ORDER) +{ + secp256k1_scalar computed_r; + int overflow = 0; + secp256k1_ge pr_ge; + secp256k1_ge_set_gej(&pr_ge, &pr); + secp256k1_fe_normalize(&pr_ge.x); + + secp256k1_fe_get_b32(c, &pr_ge.x); + secp256k1_scalar_set_b32(&computed_r, c, &overflow); + /* we fully expect overflow */ + return secp256k1_scalar_eq(sigr, &computed_r); +} +#else secp256k1_scalar_get_b32(c, sigr); secp256k1_fe_set_b32(&xr, c); @@ -252,6 +269,7 @@ static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const return 1; } return 0; +#endif } static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context *ctx, secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *seckey, const secp256k1_scalar *message, const secp256k1_scalar *nonce, int *recid) { diff --git a/src/ecmult_const_impl.h b/src/ecmult_const_impl.h index 90ac94770e..0db314c48e 100644 --- a/src/ecmult_const_impl.h +++ b/src/ecmult_const_impl.h @@ -58,25 +58,27 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w) { int global_sign; int skew = 0; int word = 0; + /* 1 2 3 */ int u_last; int u; -#ifdef USE_ENDOMORPHISM int flip; int bit; secp256k1_scalar neg_s; int not_neg_one; - /* If we are using the endomorphism, we cannot handle even numbers by negating - * them, since we are working with 128-bit numbers whose negations would be 256 - * bits, eliminating the performance advantage. Instead we use a technique from + /* Note that we cannot handle even numbers by negating them to be odd, as is + * done in other implementations, since if our scalars were specified to have + * width < 256 for performance reasons, their negations would have width 256 + * and we'd lose any performance benefit. Instead, we use a technique from * Section 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for even) - * or 2 (for odd) to the number we are encoding, then compensating after the - * multiplication. */ - /* Negative 128-bit numbers will be negated, since otherwise they are 256-bit */ + * or 2 (for odd) to the number we are encoding, returning a skew value indicating + * this, and having the caller compensate after doing the multiplication. */ + + /* Negative numbers will be negated to keep their bit representation below the maximum width */ flip = secp256k1_scalar_is_high(&s); /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */ - bit = flip ^ (s.d[0] & 1); + bit = flip ^ !secp256k1_scalar_is_even(&s); /* We check for negative one, since adding 2 to it will cause an overflow */ secp256k1_scalar_negate(&neg_s, &s); not_neg_one = !secp256k1_scalar_is_one(&neg_s); @@ -89,11 +91,6 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w) { global_sign = secp256k1_scalar_cond_negate(&s, flip); global_sign *= not_neg_one * 2 - 1; skew = 1 << bit; -#else - /* Otherwise, we just negate to force oddness */ - int is_even = secp256k1_scalar_is_even(&s); - global_sign = secp256k1_scalar_cond_negate(&s, is_even); -#endif /* 4 */ u_last = secp256k1_scalar_shr_int(&s, w); @@ -127,15 +124,13 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons secp256k1_ge tmpa; secp256k1_fe Z; + int skew_1; + int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)]; #ifdef USE_ENDOMORPHISM secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; - int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)]; int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)]; - int skew_1; int skew_lam; secp256k1_scalar q_1, q_lam; -#else - int wnaf[1 + WNAF_SIZE(WINDOW_A - 1)]; #endif int i; @@ -145,18 +140,10 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons #ifdef USE_ENDOMORPHISM /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */ secp256k1_scalar_split_lambda(&q_1, &q_lam, &sc); - /* no need for zero correction when using endomorphism since even - * numbers have one added to them anyway */ skew_1 = secp256k1_wnaf_const(wnaf_1, q_1, WINDOW_A - 1); skew_lam = secp256k1_wnaf_const(wnaf_lam, q_lam, WINDOW_A - 1); #else - int is_zero = secp256k1_scalar_is_zero(scalar); - /* the wNAF ladder cannot handle zero, so bump this to one .. we will - * correct the result after the fact */ - sc.d[0] += is_zero; - VERIFY_CHECK(!secp256k1_scalar_is_zero(&sc)); - - secp256k1_wnaf_const(wnaf, sc, WINDOW_A - 1); + skew_1 = secp256k1_wnaf_const(wnaf_1, sc, WINDOW_A - 1); #endif /* Calculate odd multiples of a. @@ -179,21 +166,15 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons /* first loop iteration (separated out so we can directly set r, rather * than having it start at infinity, get doubled several times, then have * its new value added to it) */ -#ifdef USE_ENDOMORPHISM i = wnaf_1[WNAF_SIZE(WINDOW_A - 1)]; VERIFY_CHECK(i != 0); ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A); secp256k1_gej_set_ge(r, &tmpa); - +#ifdef USE_ENDOMORPHISM i = wnaf_lam[WNAF_SIZE(WINDOW_A - 1)]; VERIFY_CHECK(i != 0); ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A); secp256k1_gej_add_ge(r, r, &tmpa); -#else - i = wnaf[WNAF_SIZE(WINDOW_A - 1)]; - VERIFY_CHECK(i != 0); - ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A); - secp256k1_gej_set_ge(r, &tmpa); #endif /* remaining loop iterations */ for (i = WNAF_SIZE(WINDOW_A - 1) - 1; i >= 0; i--) { @@ -202,59 +183,57 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons for (j = 0; j < WINDOW_A - 1; ++j) { secp256k1_gej_double_nonzero(r, r, NULL); } -#ifdef USE_ENDOMORPHISM + n = wnaf_1[i]; ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); VERIFY_CHECK(n != 0); secp256k1_gej_add_ge(r, r, &tmpa); - +#ifdef USE_ENDOMORPHISM n = wnaf_lam[i]; ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A); VERIFY_CHECK(n != 0); secp256k1_gej_add_ge(r, r, &tmpa); -#else - n = wnaf[i]; - VERIFY_CHECK(n != 0); - ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); - secp256k1_gej_add_ge(r, r, &tmpa); #endif } secp256k1_fe_mul(&r->z, &r->z, &Z); -#ifdef USE_ENDOMORPHISM { /* Correct for wNAF skew */ secp256k1_ge correction = *a; secp256k1_ge_storage correction_1_stor; +#ifdef USE_ENDOMORPHISM secp256k1_ge_storage correction_lam_stor; +#endif secp256k1_ge_storage a2_stor; secp256k1_gej tmpj; secp256k1_gej_set_ge(&tmpj, &correction); secp256k1_gej_double_var(&tmpj, &tmpj, NULL); secp256k1_ge_set_gej(&correction, &tmpj); secp256k1_ge_to_storage(&correction_1_stor, a); +#ifdef USE_ENDOMORPHISM secp256k1_ge_to_storage(&correction_lam_stor, a); +#endif secp256k1_ge_to_storage(&a2_stor, &correction); /* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */ secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2); +#ifdef USE_ENDOMORPHISM secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2); +#endif /* Apply the correction */ secp256k1_ge_from_storage(&correction, &correction_1_stor); secp256k1_ge_neg(&correction, &correction); secp256k1_gej_add_ge(r, r, &correction); +#ifdef USE_ENDOMORPHISM secp256k1_ge_from_storage(&correction, &correction_lam_stor); secp256k1_ge_neg(&correction, &correction); secp256k1_ge_mul_lambda(&correction, &correction); secp256k1_gej_add_ge(r, r, &correction); - } -#else - /* correct for zero */ - r->infinity |= is_zero; #endif + } } #endif diff --git a/src/ecmult_gen_impl.h b/src/ecmult_gen_impl.h index b63c4d8662..35f2546077 100644 --- a/src/ecmult_gen_impl.h +++ b/src/ecmult_gen_impl.h @@ -77,7 +77,7 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); } } - secp256k1_ge_set_all_gej_var(1024, prec, precj, cb); + secp256k1_ge_set_all_gej_var(prec, precj, 1024, cb); } for (j = 0; j < 64; j++) { for (i = 0; i < 16; i++) { diff --git a/src/ecmult_impl.h b/src/ecmult_impl.h index e6e5f47188..4e40104ad4 100644 --- a/src/ecmult_impl.h +++ b/src/ecmult_impl.h @@ -7,13 +7,29 @@ #ifndef _SECP256K1_ECMULT_IMPL_H_ #define _SECP256K1_ECMULT_IMPL_H_ +#include + #include "group.h" #include "scalar.h" #include "ecmult.h" +#if defined(EXHAUSTIVE_TEST_ORDER) +/* We need to lower these values for exhaustive tests because + * the tables cannot have infinities in them (this breaks the + * affine-isomorphism stuff which tracks z-ratios) */ +# if EXHAUSTIVE_TEST_ORDER > 128 +# define WINDOW_A 5 +# define WINDOW_G 8 +# elif EXHAUSTIVE_TEST_ORDER > 8 +# define WINDOW_A 4 +# define WINDOW_G 4 +# else +# define WINDOW_A 2 +# define WINDOW_G 2 +# endif +#else /* optimal for 128-bit and 256-bit exponents. */ #define WINDOW_A 5 - /** larger numbers may result in slightly better performance, at the cost of exponentially larger precomputed tables. */ #ifdef USE_ENDOMORPHISM @@ -23,6 +39,7 @@ /** One table for window size 16: 1.375 MiB. */ #define WINDOW_G 16 #endif +#endif /** The number of entries a table with precomputed multiples needs to have. */ #define ECMULT_TABLE_SIZE(w) (1 << ((w)-2)) @@ -101,7 +118,7 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(int n, secp256k1_ge /* Compute the odd multiples in Jacobian form. */ secp256k1_ecmult_odd_multiples_table(n, prej, zr, a); /* Convert them in batch to affine coordinates. */ - secp256k1_ge_set_table_gej_var(n, prea, prej, zr); + secp256k1_ge_set_table_gej_var(prea, prej, zr, n); /* Convert them to compact storage form. */ for (i = 0; i < n; i++) { secp256k1_ge_to_storage(&pre[i], &prea[i]); diff --git a/src/field.h b/src/field.h index 2d52af5e36..bbb1ee866c 100644 --- a/src/field.h +++ b/src/field.h @@ -30,6 +30,8 @@ #error "Please select field implementation" #endif +#include "util.h" + /** Normalize a field element. */ static void secp256k1_fe_normalize(secp256k1_fe *r); @@ -50,6 +52,9 @@ static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r); /** Set a field element equal to a small integer. Resulting field element is normalized. */ static void secp256k1_fe_set_int(secp256k1_fe *r, int a); +/** Sets a field element equal to zero, initializing all fields. */ +static void secp256k1_fe_clear(secp256k1_fe *a); + /** Verify whether a field element is zero. Requires the input to be normalized. */ static int secp256k1_fe_is_zero(const secp256k1_fe *a); @@ -57,6 +62,9 @@ static int secp256k1_fe_is_zero(const secp256k1_fe *a); static int secp256k1_fe_is_odd(const secp256k1_fe *a); /** Compare two field elements. Requires magnitude-1 inputs. */ +static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b); + +/** Same as secp256k1_fe_equal, but may be variable time. */ static int secp256k1_fe_equal_var(const secp256k1_fe *a, const secp256k1_fe *b); /** Compare two field elements. Requires both inputs to be normalized */ @@ -92,7 +100,10 @@ static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a); * The input's magnitude can be at most 8. The output magnitude is 1 (but not * guaranteed to be normalized). The result in r will always be a square * itself. */ -static int secp256k1_fe_sqrt_var(secp256k1_fe *r, const secp256k1_fe *a); +static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a); + +/** Checks whether a field element is a quadratic residue. */ +static int secp256k1_fe_is_quad_var(const secp256k1_fe *a); /** Sets a field element to be the (modular) inverse of another. Requires the input's magnitude to be * at most 8. The output magnitude is 1 (but not guaranteed to be normalized). */ @@ -104,7 +115,7 @@ static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a); /** Calculate the (modular) inverses of a batch of field elements. Requires the inputs' magnitudes to be * at most 8. The output magnitudes are 1 (but not guaranteed to be normalized). The inputs and * outputs must not overlap in memory. */ -static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe *r, const secp256k1_fe *a); +static void secp256k1_fe_inv_all_var(secp256k1_fe *r, const secp256k1_fe *a, size_t len); /** Convert a field element to the storage type. */ static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a); diff --git a/src/field_10x26_impl.h b/src/field_10x26_impl.h index 212cc5396a..7b8c079608 100644 --- a/src/field_10x26_impl.h +++ b/src/field_10x26_impl.h @@ -7,8 +7,6 @@ #ifndef _SECP256K1_FIELD_REPR_IMPL_H_ #define _SECP256K1_FIELD_REPR_IMPL_H_ -#include -#include #include "util.h" #include "num.h" #include "field.h" @@ -429,6 +427,14 @@ SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_f #endif } +#if defined(USE_EXTERNAL_ASM) + +/* External assembler implementation */ +void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b); +void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a); + +#else + #ifdef VERIFY #define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0) #else @@ -1037,7 +1043,7 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t VERIFY_BITS(r[2], 27); /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } - +#endif static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) { #ifdef VERIFY diff --git a/src/field_5x52_impl.h b/src/field_5x52_impl.h index b31e24ab81..7a99eb21ec 100644 --- a/src/field_5x52_impl.h +++ b/src/field_5x52_impl.h @@ -11,7 +11,6 @@ #include "libsecp256k1-config.h" #endif -#include #include "util.h" #include "num.h" #include "field.h" diff --git a/src/field_5x52_int128_impl.h b/src/field_5x52_int128_impl.h index 9280bb5ea2..0bf22bdd3e 100644 --- a/src/field_5x52_int128_impl.h +++ b/src/field_5x52_int128_impl.h @@ -137,7 +137,7 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t VERIFY_BITS(r[2], 52); VERIFY_BITS(c, 63); /* [d 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += d * R + t3;; + c += d * R + t3; VERIFY_BITS(c, 100); /* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[3] = c & M; c >>= 52; @@ -259,7 +259,7 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t VERIFY_BITS(c, 63); /* [d 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += d * R + t3;; + c += d * R + t3; VERIFY_BITS(c, 100); /* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[3] = c & M; c >>= 52; diff --git a/src/field_impl.h b/src/field_impl.h index 77f4aae2f9..5127b279bc 100644 --- a/src/field_impl.h +++ b/src/field_impl.h @@ -21,6 +21,13 @@ #error "Please select field implementation" #endif +SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) { + secp256k1_fe na; + secp256k1_fe_negate(&na, a, 1); + secp256k1_fe_add(&na, b); + return secp256k1_fe_normalizes_to_zero(&na); +} + SECP256K1_INLINE static int secp256k1_fe_equal_var(const secp256k1_fe *a, const secp256k1_fe *b) { secp256k1_fe na; secp256k1_fe_negate(&na, a, 1); @@ -28,7 +35,7 @@ SECP256K1_INLINE static int secp256k1_fe_equal_var(const secp256k1_fe *a, const return secp256k1_fe_normalizes_to_zero_var(&na); } -static int secp256k1_fe_sqrt_var(secp256k1_fe *r, const secp256k1_fe *a) { +static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a) { /** Given that p is congruent to 3 mod 4, we can compute the square root of * a mod p as the (p+1)/4'th power of a. * @@ -123,7 +130,7 @@ static int secp256k1_fe_sqrt_var(secp256k1_fe *r, const secp256k1_fe *a) { /* Check that a square root was actually calculated */ secp256k1_fe_sqr(&t1, r); - return secp256k1_fe_equal_var(&t1, a); + return secp256k1_fe_equal(&t1, a); } static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a) { @@ -253,7 +260,7 @@ static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) { #endif } -static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe *r, const secp256k1_fe *a) { +static void secp256k1_fe_inv_all_var(secp256k1_fe *r, const secp256k1_fe *a, size_t len) { secp256k1_fe u; size_t i; if (len < 1) { @@ -280,4 +287,29 @@ static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe *r, const secp256k r[0] = u; } +static int secp256k1_fe_is_quad_var(const secp256k1_fe *a) { +#ifndef USE_NUM_NONE + unsigned char b[32]; + secp256k1_num n; + secp256k1_num m; + /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ + static const unsigned char prime[32] = { + 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, + 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, + 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, + 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F + }; + + secp256k1_fe c = *a; + secp256k1_fe_normalize_var(&c); + secp256k1_fe_get_b32(b, &c); + secp256k1_num_set_bin(&n, b, 32); + secp256k1_num_set_bin(&m, prime, 32); + return secp256k1_num_jacobi(&n, &m) >= 0; +#else + secp256k1_fe r; + return secp256k1_fe_sqrt(&r, a); +#endif +} + #endif diff --git a/src/group.h b/src/group.h index ebfe1ca70c..4957b248fe 100644 --- a/src/group.h +++ b/src/group.h @@ -47,7 +47,7 @@ static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const se * and a Y coordinate that is a quadratic residue modulo p. The return value * is true iff a coordinate with the given X coordinate exists. */ -static int secp256k1_ge_set_xquad_var(secp256k1_ge *r, const secp256k1_fe *x); +static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x); /** Set a group element (affine) equal to the point with the given X coordinate, and given oddness * for Y. Return value indicates whether the result is valid. */ @@ -65,12 +65,12 @@ static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a); static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a); /** Set a batch of group elements equal to the inputs given in jacobian coordinates */ -static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_callback *cb); +static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len, const secp256k1_callback *cb); /** Set a batch of group elements equal to the inputs given in jacobian * coordinates (with known z-ratios). zr must contain the known z-ratios such * that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. */ -static void secp256k1_ge_set_table_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr); +static void secp256k1_ge_set_table_gej_var(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr, size_t len); /** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to * the same global z "denominator". zr must contain the known z-ratios such @@ -94,6 +94,9 @@ static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a); /** Check whether a group element is the point at infinity. */ static int secp256k1_gej_is_infinity(const secp256k1_gej *a); +/** Check whether a group element's y coordinate is a quadratic residue. */ +static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a); + /** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0). * a may not be zero. Constant time. */ static void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr); diff --git a/src/group_impl.h b/src/group_impl.h index 42e2f6e6eb..2e192b62fd 100644 --- a/src/group_impl.h +++ b/src/group_impl.h @@ -7,12 +7,57 @@ #ifndef _SECP256K1_GROUP_IMPL_H_ #define _SECP256K1_GROUP_IMPL_H_ -#include - #include "num.h" #include "field.h" #include "group.h" +/* These points can be generated in sage as follows: + * + * 0. Setup a worksheet with the following parameters. + * b = 4 # whatever CURVE_B will be set to + * F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F) + * C = EllipticCurve ([F (0), F (b)]) + * + * 1. Determine all the small orders available to you. (If there are + * no satisfactory ones, go back and change b.) + * print C.order().factor(limit=1000) + * + * 2. Choose an order as one of the prime factors listed in the above step. + * (You can also multiply some to get a composite order, though the + * tests will crash trying to invert scalars during signing.) We take a + * random point and scale it to drop its order to the desired value. + * There is some probability this won't work; just try again. + * order = 199 + * P = C.random_point() + * P = (int(P.order()) / int(order)) * P + * assert(P.order() == order) + * + * 3. Print the values. You'll need to use a vim macro or something to + * split the hex output into 4-byte chunks. + * print "%x %x" % P.xy() + */ +#if defined(EXHAUSTIVE_TEST_ORDER) +# if EXHAUSTIVE_TEST_ORDER == 199 +const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( + 0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069, + 0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18, + 0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868, + 0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED +); + +const int CURVE_B = 4; +# elif EXHAUSTIVE_TEST_ORDER == 13 +const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( + 0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0, + 0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15, + 0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e, + 0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac +); +const int CURVE_B = 2; +# else +# error No known generator for the specified exhaustive test group order. +# endif +#else /** Generator for secp256k1, value 'g' defined in * "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ @@ -23,8 +68,11 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( 0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL ); +const int CURVE_B = 7; +#endif + static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) { - secp256k1_fe zi2; + secp256k1_fe zi2; secp256k1_fe zi3; secp256k1_fe_sqr(&zi2, zi); secp256k1_fe_mul(&zi3, &zi2, zi); @@ -78,7 +126,7 @@ static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) { r->y = a->y; } -static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_callback *cb) { +static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len, const secp256k1_callback *cb) { secp256k1_fe *az; secp256k1_fe *azi; size_t i; @@ -91,7 +139,7 @@ static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge *r, const secp } azi = (secp256k1_fe *)checked_malloc(cb, sizeof(secp256k1_fe) * count); - secp256k1_fe_inv_all_var(count, azi, az); + secp256k1_fe_inv_all_var(azi, az, count); free(az); count = 0; @@ -104,7 +152,7 @@ static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge *r, const secp free(azi); } -static void secp256k1_ge_set_table_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr) { +static void secp256k1_ge_set_table_gej_var(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr, size_t len) { size_t i = len - 1; secp256k1_fe zi; @@ -147,9 +195,15 @@ static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp static void secp256k1_gej_set_infinity(secp256k1_gej *r) { r->infinity = 1; - secp256k1_fe_set_int(&r->x, 0); - secp256k1_fe_set_int(&r->y, 0); - secp256k1_fe_set_int(&r->z, 0); + secp256k1_fe_clear(&r->x); + secp256k1_fe_clear(&r->y); + secp256k1_fe_clear(&r->z); +} + +static void secp256k1_ge_set_infinity(secp256k1_ge *r) { + r->infinity = 1; + secp256k1_fe_clear(&r->x); + secp256k1_fe_clear(&r->y); } static void secp256k1_gej_clear(secp256k1_gej *r) { @@ -165,19 +219,19 @@ static void secp256k1_ge_clear(secp256k1_ge *r) { secp256k1_fe_clear(&r->y); } -static int secp256k1_ge_set_xquad_var(secp256k1_ge *r, const secp256k1_fe *x) { +static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) { secp256k1_fe x2, x3, c; r->x = *x; secp256k1_fe_sqr(&x2, x); secp256k1_fe_mul(&x3, x, &x2); r->infinity = 0; - secp256k1_fe_set_int(&c, 7); + secp256k1_fe_set_int(&c, CURVE_B); secp256k1_fe_add(&c, &x3); - return secp256k1_fe_sqrt_var(&r->y, &c); + return secp256k1_fe_sqrt(&r->y, &c); } static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) { - if (!secp256k1_ge_set_xquad_var(r, x)) { + if (!secp256k1_ge_set_xquad(r, x)) { return 0; } secp256k1_fe_normalize_var(&r->y); @@ -230,7 +284,7 @@ static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) { secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); secp256k1_fe_sqr(&z2, &a->z); secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2); - secp256k1_fe_mul_int(&z6, 7); + secp256k1_fe_mul_int(&z6, CURVE_B); secp256k1_fe_add(&x3, &z6); secp256k1_fe_normalize_weak(&x3); return secp256k1_fe_equal_var(&y2, &x3); @@ -244,18 +298,30 @@ static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) { /* y^2 = x^3 + 7 */ secp256k1_fe_sqr(&y2, &a->y); secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); - secp256k1_fe_set_int(&c, 7); + secp256k1_fe_set_int(&c, CURVE_B); secp256k1_fe_add(&x3, &c); secp256k1_fe_normalize_weak(&x3); return secp256k1_fe_equal_var(&y2, &x3); } static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) { - /* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate */ + /* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate. + * + * Note that there is an implementation described at + * https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l + * which trades a multiply for a square, but in practice this is actually slower, + * mainly because it requires more normalizations. + */ secp256k1_fe t1,t2,t3,t4; /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity, * Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have * y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p. + * + * Having said this, if this function receives a point on a sextic twist, e.g. by + * a fault attack, it is possible for y to be 0. This happens for y^2 = x^3 + 6, + * since -6 does have a cube root mod p. For this point, this function will not set + * the infinity flag even though the point doubles to infinity, and the result + * point will be gibberish (z = 0 but infinity = 0). */ r->infinity = a->infinity; if (r->infinity) { @@ -623,4 +689,18 @@ static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) { } #endif +static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) { + secp256k1_fe yz; + + if (a->infinity) { + return 0; + } + + /* We rely on the fact that the Jacobi symbol of 1 / a->z^3 is the same as + * that of a->z. Thus a->y / a->z^3 is a quadratic residue iff a->y * a->z + is */ + secp256k1_fe_mul(&yz, &a->y, &a->z); + return secp256k1_fe_is_quad_var(&yz); +} + #endif diff --git a/src/hash.h b/src/hash.h index 0ff01e63fa..fca98cab9f 100644 --- a/src/hash.h +++ b/src/hash.h @@ -11,7 +11,7 @@ #include typedef struct { - uint32_t s[32]; + uint32_t s[8]; uint32_t buf[16]; /* In big endian */ size_t bytes; } secp256k1_sha256_t; diff --git a/src/hash_impl.h b/src/hash_impl.h index ae55df6d8a..b47e65f830 100644 --- a/src/hash_impl.h +++ b/src/hash_impl.h @@ -269,15 +269,13 @@ static void secp256k1_rfc6979_hmac_sha256_finalize(secp256k1_rfc6979_hmac_sha256 rng->retry = 0; } - +#undef BE32 #undef Round -#undef sigma0 #undef sigma1 -#undef Sigma0 +#undef sigma0 #undef Sigma1 -#undef Ch +#undef Sigma0 #undef Maj -#undef ReadBE32 -#undef WriteBE32 +#undef Ch #endif diff --git a/src/java/org/bitcoin/NativeSecp256k1.java b/src/java/org/bitcoin/NativeSecp256k1.java index 90a498eaa2..1c67802fba 100644 --- a/src/java/org/bitcoin/NativeSecp256k1.java +++ b/src/java/org/bitcoin/NativeSecp256k1.java @@ -1,60 +1,446 @@ +/* + * Copyright 2013 Google Inc. + * Copyright 2014-2016 the libsecp256k1 contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.bitcoin; import java.nio.ByteBuffer; import java.nio.ByteOrder; +import java.math.BigInteger; import com.google.common.base.Preconditions; - +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import static org.bitcoin.NativeSecp256k1Util.*; /** - * This class holds native methods to handle ECDSA verification. - * You can find an example library that can be used for this at - * https://github.com/sipa/secp256k1 + *

This class holds native methods to handle ECDSA verification.

+ * + *

You can find an example library that can be used for this at https://github.com/bitcoin/secp256k1

+ * + *

To build secp256k1 for use with bitcoinj, run + * `./configure --enable-jni --enable-experimental --enable-module-ecdh` + * and `make` then copy `.libs/libsecp256k1.so` to your system library path + * or point the JVM to the folder containing it with -Djava.library.path + *

*/ public class NativeSecp256k1 { - public static final boolean enabled; - static { - boolean isEnabled = true; - try { - System.loadLibrary("javasecp256k1"); - } catch (UnsatisfiedLinkError e) { - isEnabled = false; - } - enabled = isEnabled; - } - + + private static final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock(); + private static final Lock r = rwl.readLock(); + private static final Lock w = rwl.writeLock(); private static ThreadLocal nativeECDSABuffer = new ThreadLocal(); /** * Verifies the given secp256k1 signature in native code. * Calling when enabled == false is undefined (probably library not loaded) - * + * * @param data The data which was signed, must be exactly 32 bytes * @param signature The signature * @param pub The public key which did the signing */ - public static boolean verify(byte[] data, byte[] signature, byte[] pub) { + public static boolean verify(byte[] data, byte[] signature, byte[] pub) throws AssertFailException{ Preconditions.checkArgument(data.length == 32 && signature.length <= 520 && pub.length <= 520); ByteBuffer byteBuff = nativeECDSABuffer.get(); - if (byteBuff == null) { - byteBuff = ByteBuffer.allocateDirect(32 + 8 + 520 + 520); + if (byteBuff == null || byteBuff.capacity() < 520) { + byteBuff = ByteBuffer.allocateDirect(520); byteBuff.order(ByteOrder.nativeOrder()); nativeECDSABuffer.set(byteBuff); } byteBuff.rewind(); byteBuff.put(data); - byteBuff.putInt(signature.length); - byteBuff.putInt(pub.length); byteBuff.put(signature); byteBuff.put(pub); - return secp256k1_ecdsa_verify(byteBuff) == 1; + + byte[][] retByteArray; + + r.lock(); + try { + return secp256k1_ecdsa_verify(byteBuff, Secp256k1Context.getContext(), signature.length, pub.length) == 1; + } finally { + r.unlock(); + } + } + + /** + * libsecp256k1 Create an ECDSA signature. + * + * @param data Message hash, 32 bytes + * @param key Secret key, 32 bytes + * + * Return values + * @param sig byte array of signature + */ + public static byte[] sign(byte[] data, byte[] sec) throws AssertFailException{ + Preconditions.checkArgument(data.length == 32 && sec.length <= 32); + + ByteBuffer byteBuff = nativeECDSABuffer.get(); + if (byteBuff == null || byteBuff.capacity() < 32 + 32) { + byteBuff = ByteBuffer.allocateDirect(32 + 32); + byteBuff.order(ByteOrder.nativeOrder()); + nativeECDSABuffer.set(byteBuff); + } + byteBuff.rewind(); + byteBuff.put(data); + byteBuff.put(sec); + + byte[][] retByteArray; + + r.lock(); + try { + retByteArray = secp256k1_ecdsa_sign(byteBuff, Secp256k1Context.getContext()); + } finally { + r.unlock(); + } + + byte[] sigArr = retByteArray[0]; + int sigLen = new BigInteger(new byte[] { retByteArray[1][0] }).intValue(); + int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue(); + + assertEquals(sigArr.length, sigLen, "Got bad signature length."); + + return retVal == 0 ? new byte[0] : sigArr; + } + + /** + * libsecp256k1 Seckey Verify - returns 1 if valid, 0 if invalid + * + * @param seckey ECDSA Secret key, 32 bytes + */ + public static boolean secKeyVerify(byte[] seckey) { + Preconditions.checkArgument(seckey.length == 32); + + ByteBuffer byteBuff = nativeECDSABuffer.get(); + if (byteBuff == null || byteBuff.capacity() < seckey.length) { + byteBuff = ByteBuffer.allocateDirect(seckey.length); + byteBuff.order(ByteOrder.nativeOrder()); + nativeECDSABuffer.set(byteBuff); + } + byteBuff.rewind(); + byteBuff.put(seckey); + + r.lock(); + try { + return secp256k1_ec_seckey_verify(byteBuff,Secp256k1Context.getContext()) == 1; + } finally { + r.unlock(); + } + } + + + /** + * libsecp256k1 Compute Pubkey - computes public key from secret key + * + * @param seckey ECDSA Secret key, 32 bytes + * + * Return values + * @param pubkey ECDSA Public key, 33 or 65 bytes + */ + //TODO add a 'compressed' arg + public static byte[] computePubkey(byte[] seckey) throws AssertFailException{ + Preconditions.checkArgument(seckey.length == 32); + + ByteBuffer byteBuff = nativeECDSABuffer.get(); + if (byteBuff == null || byteBuff.capacity() < seckey.length) { + byteBuff = ByteBuffer.allocateDirect(seckey.length); + byteBuff.order(ByteOrder.nativeOrder()); + nativeECDSABuffer.set(byteBuff); + } + byteBuff.rewind(); + byteBuff.put(seckey); + + byte[][] retByteArray; + + r.lock(); + try { + retByteArray = secp256k1_ec_pubkey_create(byteBuff, Secp256k1Context.getContext()); + } finally { + r.unlock(); + } + + byte[] pubArr = retByteArray[0]; + int pubLen = new BigInteger(new byte[] { retByteArray[1][0] }).intValue(); + int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue(); + + assertEquals(pubArr.length, pubLen, "Got bad pubkey length."); + + return retVal == 0 ? new byte[0]: pubArr; + } + + /** + * libsecp256k1 Cleanup - This destroys the secp256k1 context object + * This should be called at the end of the program for proper cleanup of the context. + */ + public static synchronized void cleanup() { + w.lock(); + try { + secp256k1_destroy_context(Secp256k1Context.getContext()); + } finally { + w.unlock(); + } + } + + public static long cloneContext() { + r.lock(); + try { + return secp256k1_ctx_clone(Secp256k1Context.getContext()); + } finally { r.unlock(); } + } + + /** + * libsecp256k1 PrivKey Tweak-Mul - Tweak privkey by multiplying to it + * + * @param tweak some bytes to tweak with + * @param seckey 32-byte seckey + */ + public static byte[] privKeyTweakMul(byte[] privkey, byte[] tweak) throws AssertFailException{ + Preconditions.checkArgument(privkey.length == 32); + + ByteBuffer byteBuff = nativeECDSABuffer.get(); + if (byteBuff == null || byteBuff.capacity() < privkey.length + tweak.length) { + byteBuff = ByteBuffer.allocateDirect(privkey.length + tweak.length); + byteBuff.order(ByteOrder.nativeOrder()); + nativeECDSABuffer.set(byteBuff); + } + byteBuff.rewind(); + byteBuff.put(privkey); + byteBuff.put(tweak); + + byte[][] retByteArray; + r.lock(); + try { + retByteArray = secp256k1_privkey_tweak_mul(byteBuff,Secp256k1Context.getContext()); + } finally { + r.unlock(); + } + + byte[] privArr = retByteArray[0]; + + int privLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF; + int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue(); + + assertEquals(privArr.length, privLen, "Got bad pubkey length."); + + assertEquals(retVal, 1, "Failed return value check."); + + return privArr; + } + + /** + * libsecp256k1 PrivKey Tweak-Add - Tweak privkey by adding to it + * + * @param tweak some bytes to tweak with + * @param seckey 32-byte seckey + */ + public static byte[] privKeyTweakAdd(byte[] privkey, byte[] tweak) throws AssertFailException{ + Preconditions.checkArgument(privkey.length == 32); + + ByteBuffer byteBuff = nativeECDSABuffer.get(); + if (byteBuff == null || byteBuff.capacity() < privkey.length + tweak.length) { + byteBuff = ByteBuffer.allocateDirect(privkey.length + tweak.length); + byteBuff.order(ByteOrder.nativeOrder()); + nativeECDSABuffer.set(byteBuff); + } + byteBuff.rewind(); + byteBuff.put(privkey); + byteBuff.put(tweak); + + byte[][] retByteArray; + r.lock(); + try { + retByteArray = secp256k1_privkey_tweak_add(byteBuff,Secp256k1Context.getContext()); + } finally { + r.unlock(); + } + + byte[] privArr = retByteArray[0]; + + int privLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF; + int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue(); + + assertEquals(privArr.length, privLen, "Got bad pubkey length."); + + assertEquals(retVal, 1, "Failed return value check."); + + return privArr; + } + + /** + * libsecp256k1 PubKey Tweak-Add - Tweak pubkey by adding to it + * + * @param tweak some bytes to tweak with + * @param pubkey 32-byte seckey + */ + public static byte[] pubKeyTweakAdd(byte[] pubkey, byte[] tweak) throws AssertFailException{ + Preconditions.checkArgument(pubkey.length == 33 || pubkey.length == 65); + + ByteBuffer byteBuff = nativeECDSABuffer.get(); + if (byteBuff == null || byteBuff.capacity() < pubkey.length + tweak.length) { + byteBuff = ByteBuffer.allocateDirect(pubkey.length + tweak.length); + byteBuff.order(ByteOrder.nativeOrder()); + nativeECDSABuffer.set(byteBuff); + } + byteBuff.rewind(); + byteBuff.put(pubkey); + byteBuff.put(tweak); + + byte[][] retByteArray; + r.lock(); + try { + retByteArray = secp256k1_pubkey_tweak_add(byteBuff,Secp256k1Context.getContext(), pubkey.length); + } finally { + r.unlock(); + } + + byte[] pubArr = retByteArray[0]; + + int pubLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF; + int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue(); + + assertEquals(pubArr.length, pubLen, "Got bad pubkey length."); + + assertEquals(retVal, 1, "Failed return value check."); + + return pubArr; + } + + /** + * libsecp256k1 PubKey Tweak-Mul - Tweak pubkey by multiplying to it + * + * @param tweak some bytes to tweak with + * @param pubkey 32-byte seckey + */ + public static byte[] pubKeyTweakMul(byte[] pubkey, byte[] tweak) throws AssertFailException{ + Preconditions.checkArgument(pubkey.length == 33 || pubkey.length == 65); + + ByteBuffer byteBuff = nativeECDSABuffer.get(); + if (byteBuff == null || byteBuff.capacity() < pubkey.length + tweak.length) { + byteBuff = ByteBuffer.allocateDirect(pubkey.length + tweak.length); + byteBuff.order(ByteOrder.nativeOrder()); + nativeECDSABuffer.set(byteBuff); + } + byteBuff.rewind(); + byteBuff.put(pubkey); + byteBuff.put(tweak); + + byte[][] retByteArray; + r.lock(); + try { + retByteArray = secp256k1_pubkey_tweak_mul(byteBuff,Secp256k1Context.getContext(), pubkey.length); + } finally { + r.unlock(); + } + + byte[] pubArr = retByteArray[0]; + + int pubLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF; + int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue(); + + assertEquals(pubArr.length, pubLen, "Got bad pubkey length."); + + assertEquals(retVal, 1, "Failed return value check."); + + return pubArr; } /** - * @param byteBuff signature format is byte[32] data, - * native-endian int signatureLength, native-endian int pubkeyLength, - * byte[signatureLength] signature, byte[pubkeyLength] pub - * @returns 1 for valid signature, anything else for invalid + * libsecp256k1 create ECDH secret - constant time ECDH calculation + * + * @param seckey byte array of secret key used in exponentiaion + * @param pubkey byte array of public key used in exponentiaion */ - private static native int secp256k1_ecdsa_verify(ByteBuffer byteBuff); + public static byte[] createECDHSecret(byte[] seckey, byte[] pubkey) throws AssertFailException{ + Preconditions.checkArgument(seckey.length <= 32 && pubkey.length <= 65); + + ByteBuffer byteBuff = nativeECDSABuffer.get(); + if (byteBuff == null || byteBuff.capacity() < 32 + pubkey.length) { + byteBuff = ByteBuffer.allocateDirect(32 + pubkey.length); + byteBuff.order(ByteOrder.nativeOrder()); + nativeECDSABuffer.set(byteBuff); + } + byteBuff.rewind(); + byteBuff.put(seckey); + byteBuff.put(pubkey); + + byte[][] retByteArray; + r.lock(); + try { + retByteArray = secp256k1_ecdh(byteBuff, Secp256k1Context.getContext(), pubkey.length); + } finally { + r.unlock(); + } + + byte[] resArr = retByteArray[0]; + int retVal = new BigInteger(new byte[] { retByteArray[1][0] }).intValue(); + + assertEquals(resArr.length, 32, "Got bad result length."); + assertEquals(retVal, 1, "Failed return value check."); + + return resArr; + } + + /** + * libsecp256k1 randomize - updates the context randomization + * + * @param seed 32-byte random seed + */ + public static synchronized boolean randomize(byte[] seed) throws AssertFailException{ + Preconditions.checkArgument(seed.length == 32 || seed == null); + + ByteBuffer byteBuff = nativeECDSABuffer.get(); + if (byteBuff == null || byteBuff.capacity() < seed.length) { + byteBuff = ByteBuffer.allocateDirect(seed.length); + byteBuff.order(ByteOrder.nativeOrder()); + nativeECDSABuffer.set(byteBuff); + } + byteBuff.rewind(); + byteBuff.put(seed); + + w.lock(); + try { + return secp256k1_context_randomize(byteBuff, Secp256k1Context.getContext()) == 1; + } finally { + w.unlock(); + } + } + + private static native long secp256k1_ctx_clone(long context); + + private static native int secp256k1_context_randomize(ByteBuffer byteBuff, long context); + + private static native byte[][] secp256k1_privkey_tweak_add(ByteBuffer byteBuff, long context); + + private static native byte[][] secp256k1_privkey_tweak_mul(ByteBuffer byteBuff, long context); + + private static native byte[][] secp256k1_pubkey_tweak_add(ByteBuffer byteBuff, long context, int pubLen); + + private static native byte[][] secp256k1_pubkey_tweak_mul(ByteBuffer byteBuff, long context, int pubLen); + + private static native void secp256k1_destroy_context(long context); + + private static native int secp256k1_ecdsa_verify(ByteBuffer byteBuff, long context, int sigLen, int pubLen); + + private static native byte[][] secp256k1_ecdsa_sign(ByteBuffer byteBuff, long context); + + private static native int secp256k1_ec_seckey_verify(ByteBuffer byteBuff, long context); + + private static native byte[][] secp256k1_ec_pubkey_create(ByteBuffer byteBuff, long context); + + private static native byte[][] secp256k1_ec_pubkey_parse(ByteBuffer byteBuff, long context, int inputLen); + + private static native byte[][] secp256k1_ecdh(ByteBuffer byteBuff, long context, int inputLen); + } diff --git a/src/java/org/bitcoin/NativeSecp256k1Test.java b/src/java/org/bitcoin/NativeSecp256k1Test.java new file mode 100644 index 0000000000..c00d08899b --- /dev/null +++ b/src/java/org/bitcoin/NativeSecp256k1Test.java @@ -0,0 +1,226 @@ +package org.bitcoin; + +import com.google.common.io.BaseEncoding; +import java.util.Arrays; +import java.math.BigInteger; +import javax.xml.bind.DatatypeConverter; +import static org.bitcoin.NativeSecp256k1Util.*; + +/** + * This class holds test cases defined for testing this library. + */ +public class NativeSecp256k1Test { + + //TODO improve comments/add more tests + /** + * This tests verify() for a valid signature + */ + public static void testVerifyPos() throws AssertFailException{ + boolean result = false; + byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A90".toLowerCase()); //sha256hash of "testing" + byte[] sig = BaseEncoding.base16().lowerCase().decode("3044022079BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980220294F14E883B3F525B5367756C2A11EF6CF84B730B36C17CB0C56F0AAB2C98589".toLowerCase()); + byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase()); + + result = NativeSecp256k1.verify( data, sig, pub); + assertEquals( result, true , "testVerifyPos"); + } + + /** + * This tests verify() for a non-valid signature + */ + public static void testVerifyNeg() throws AssertFailException{ + boolean result = false; + byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A91".toLowerCase()); //sha256hash of "testing" + byte[] sig = BaseEncoding.base16().lowerCase().decode("3044022079BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980220294F14E883B3F525B5367756C2A11EF6CF84B730B36C17CB0C56F0AAB2C98589".toLowerCase()); + byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase()); + + result = NativeSecp256k1.verify( data, sig, pub); + //System.out.println(" TEST " + new BigInteger(1, resultbytes).toString(16)); + assertEquals( result, false , "testVerifyNeg"); + } + + /** + * This tests secret key verify() for a valid secretkey + */ + public static void testSecKeyVerifyPos() throws AssertFailException{ + boolean result = false; + byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase()); + + result = NativeSecp256k1.secKeyVerify( sec ); + //System.out.println(" TEST " + new BigInteger(1, resultbytes).toString(16)); + assertEquals( result, true , "testSecKeyVerifyPos"); + } + + /** + * This tests secret key verify() for a invalid secretkey + */ + public static void testSecKeyVerifyNeg() throws AssertFailException{ + boolean result = false; + byte[] sec = BaseEncoding.base16().lowerCase().decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".toLowerCase()); + + result = NativeSecp256k1.secKeyVerify( sec ); + //System.out.println(" TEST " + new BigInteger(1, resultbytes).toString(16)); + assertEquals( result, false , "testSecKeyVerifyNeg"); + } + + /** + * This tests public key create() for a valid secretkey + */ + public static void testPubKeyCreatePos() throws AssertFailException{ + byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase()); + + byte[] resultArr = NativeSecp256k1.computePubkey( sec); + String pubkeyString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); + assertEquals( pubkeyString , "04C591A8FF19AC9C4E4E5793673B83123437E975285E7B442F4EE2654DFFCA5E2D2103ED494718C697AC9AEBCFD19612E224DB46661011863ED2FC54E71861E2A6" , "testPubKeyCreatePos"); + } + + /** + * This tests public key create() for a invalid secretkey + */ + public static void testPubKeyCreateNeg() throws AssertFailException{ + byte[] sec = BaseEncoding.base16().lowerCase().decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".toLowerCase()); + + byte[] resultArr = NativeSecp256k1.computePubkey( sec); + String pubkeyString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); + assertEquals( pubkeyString, "" , "testPubKeyCreateNeg"); + } + + /** + * This tests sign() for a valid secretkey + */ + public static void testSignPos() throws AssertFailException{ + + byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A90".toLowerCase()); //sha256hash of "testing" + byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase()); + + byte[] resultArr = NativeSecp256k1.sign(data, sec); + String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); + assertEquals( sigString, "30440220182A108E1448DC8F1FB467D06A0F3BB8EA0533584CB954EF8DA112F1D60E39A202201C66F36DA211C087F3AF88B50EDF4F9BDAA6CF5FD6817E74DCA34DB12390C6E9" , "testSignPos"); + } + + /** + * This tests sign() for a invalid secretkey + */ + public static void testSignNeg() throws AssertFailException{ + byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A90".toLowerCase()); //sha256hash of "testing" + byte[] sec = BaseEncoding.base16().lowerCase().decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".toLowerCase()); + + byte[] resultArr = NativeSecp256k1.sign(data, sec); + String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); + assertEquals( sigString, "" , "testSignNeg"); + } + + /** + * This tests private key tweak-add + */ + public static void testPrivKeyTweakAdd_1() throws AssertFailException { + byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase()); + byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak" + + byte[] resultArr = NativeSecp256k1.privKeyTweakAdd( sec , data ); + String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); + assertEquals( sigString , "A168571E189E6F9A7E2D657A4B53AE99B909F7E712D1C23CED28093CD57C88F3" , "testPrivKeyAdd_1"); + } + + /** + * This tests private key tweak-mul + */ + public static void testPrivKeyTweakMul_1() throws AssertFailException { + byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase()); + byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak" + + byte[] resultArr = NativeSecp256k1.privKeyTweakMul( sec , data ); + String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); + assertEquals( sigString , "97F8184235F101550F3C71C927507651BD3F1CDB4A5A33B8986ACF0DEE20FFFC" , "testPrivKeyMul_1"); + } + + /** + * This tests private key tweak-add uncompressed + */ + public static void testPrivKeyTweakAdd_2() throws AssertFailException { + byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase()); + byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak" + + byte[] resultArr = NativeSecp256k1.pubKeyTweakAdd( pub , data ); + String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); + assertEquals( sigString , "0411C6790F4B663CCE607BAAE08C43557EDC1A4D11D88DFCB3D841D0C6A941AF525A268E2A863C148555C48FB5FBA368E88718A46E205FABC3DBA2CCFFAB0796EF" , "testPrivKeyAdd_2"); + } + + /** + * This tests private key tweak-mul uncompressed + */ + public static void testPrivKeyTweakMul_2() throws AssertFailException { + byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase()); + byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak" + + byte[] resultArr = NativeSecp256k1.pubKeyTweakMul( pub , data ); + String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); + assertEquals( sigString , "04E0FE6FE55EBCA626B98A807F6CAF654139E14E5E3698F01A9A658E21DC1D2791EC060D4F412A794D5370F672BC94B722640B5F76914151CFCA6E712CA48CC589" , "testPrivKeyMul_2"); + } + + /** + * This tests seed randomization + */ + public static void testRandomize() throws AssertFailException { + byte[] seed = BaseEncoding.base16().lowerCase().decode("A441B15FE9A3CF56661190A0B93B9DEC7D04127288CC87250967CF3B52894D11".toLowerCase()); //sha256hash of "random" + boolean result = NativeSecp256k1.randomize(seed); + assertEquals( result, true, "testRandomize"); + } + + public static void testCreateECDHSecret() throws AssertFailException{ + + byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase()); + byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase()); + + byte[] resultArr = NativeSecp256k1.createECDHSecret(sec, pub); + String ecdhString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); + assertEquals( ecdhString, "2A2A67007A926E6594AF3EB564FC74005B37A9C8AEF2033C4552051B5C87F043" , "testCreateECDHSecret"); + } + + public static void main(String[] args) throws AssertFailException{ + + + System.out.println("\n libsecp256k1 enabled: " + Secp256k1Context.isEnabled() + "\n"); + + assertEquals( Secp256k1Context.isEnabled(), true, "isEnabled" ); + + //Test verify() success/fail + testVerifyPos(); + testVerifyNeg(); + + //Test secKeyVerify() success/fail + testSecKeyVerifyPos(); + testSecKeyVerifyNeg(); + + //Test computePubkey() success/fail + testPubKeyCreatePos(); + testPubKeyCreateNeg(); + + //Test sign() success/fail + testSignPos(); + testSignNeg(); + + //Test privKeyTweakAdd() 1 + testPrivKeyTweakAdd_1(); + + //Test privKeyTweakMul() 2 + testPrivKeyTweakMul_1(); + + //Test privKeyTweakAdd() 3 + testPrivKeyTweakAdd_2(); + + //Test privKeyTweakMul() 4 + testPrivKeyTweakMul_2(); + + //Test randomize() + testRandomize(); + + //Test ECDH + testCreateECDHSecret(); + + NativeSecp256k1.cleanup(); + + System.out.println(" All tests passed." ); + + } +} diff --git a/src/java/org/bitcoin/NativeSecp256k1Util.java b/src/java/org/bitcoin/NativeSecp256k1Util.java new file mode 100644 index 0000000000..04732ba044 --- /dev/null +++ b/src/java/org/bitcoin/NativeSecp256k1Util.java @@ -0,0 +1,45 @@ +/* + * Copyright 2014-2016 the libsecp256k1 contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bitcoin; + +public class NativeSecp256k1Util{ + + public static void assertEquals( int val, int val2, String message ) throws AssertFailException{ + if( val != val2 ) + throw new AssertFailException("FAIL: " + message); + } + + public static void assertEquals( boolean val, boolean val2, String message ) throws AssertFailException{ + if( val != val2 ) + throw new AssertFailException("FAIL: " + message); + else + System.out.println("PASS: " + message); + } + + public static void assertEquals( String val, String val2, String message ) throws AssertFailException{ + if( !val.equals(val2) ) + throw new AssertFailException("FAIL: " + message); + else + System.out.println("PASS: " + message); + } + + public static class AssertFailException extends Exception { + public AssertFailException(String message) { + super( message ); + } + } +} diff --git a/src/java/org/bitcoin/Secp256k1Context.java b/src/java/org/bitcoin/Secp256k1Context.java new file mode 100644 index 0000000000..216c986a8b --- /dev/null +++ b/src/java/org/bitcoin/Secp256k1Context.java @@ -0,0 +1,51 @@ +/* + * Copyright 2014-2016 the libsecp256k1 contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bitcoin; + +/** + * This class holds the context reference used in native methods + * to handle ECDSA operations. + */ +public class Secp256k1Context { + private static final boolean enabled; //true if the library is loaded + private static final long context; //ref to pointer to context obj + + static { //static initializer + boolean isEnabled = true; + long contextRef = -1; + try { + System.loadLibrary("secp256k1"); + contextRef = secp256k1_init_context(); + } catch (UnsatisfiedLinkError e) { + System.out.println("UnsatisfiedLinkError: " + e.toString()); + isEnabled = false; + } + enabled = isEnabled; + context = contextRef; + } + + public static boolean isEnabled() { + return enabled; + } + + public static long getContext() { + if(!enabled) return -1; //sanity check + return context; + } + + private static native long secp256k1_init_context(); +} diff --git a/src/java/org_bitcoin_NativeSecp256k1.c b/src/java/org_bitcoin_NativeSecp256k1.c index bb4cd70728..bcef7b32ce 100644 --- a/src/java/org_bitcoin_NativeSecp256k1.c +++ b/src/java/org_bitcoin_NativeSecp256k1.c @@ -1,23 +1,377 @@ +#include +#include +#include #include "org_bitcoin_NativeSecp256k1.h" #include "include/secp256k1.h" +#include "include/secp256k1_ecdh.h" +#include "include/secp256k1_recovery.h" -JNIEXPORT jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify - (JNIEnv* env, jclass classObject, jobject byteBufferObject) + +SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone + (JNIEnv* env, jclass classObject, jlong ctx_l) +{ + const secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + + jlong ctx_clone_l = (uintptr_t) secp256k1_context_clone(ctx); + + (void)classObject;(void)env; + + return ctx_clone_l; + +} + +SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1context_1randomize + (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) { - unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); - int sigLen = *((int*)(data + 32)); - int pubLen = *((int*)(data + 32 + 4)); + secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + + const unsigned char* seed = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); + + (void)classObject; + + return secp256k1_context_randomize(ctx, seed); - return secp256k1_ecdsa_verify(data, 32, data+32+8, sigLen, data+32+8+sigLen, pubLen); } -static void __javasecp256k1_attach(void) __attribute__((constructor)); -static void __javasecp256k1_detach(void) __attribute__((destructor)); +SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1destroy_1context + (JNIEnv* env, jclass classObject, jlong ctx_l) +{ + secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + + secp256k1_context_destroy(ctx); -static void __javasecp256k1_attach(void) { - secp256k1_start(SECP256K1_START_VERIFY); + (void)classObject;(void)env; } -static void __javasecp256k1_detach(void) { - secp256k1_stop(); +SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify + (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint siglen, jint publen) +{ + secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + + unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); + const unsigned char* sigdata = { (unsigned char*) (data + 32) }; + const unsigned char* pubdata = { (unsigned char*) (data + siglen + 32) }; + + secp256k1_ecdsa_signature sig; + secp256k1_pubkey pubkey; + + int ret = secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigdata, siglen); + + if( ret ) { + ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pubdata, publen); + + if( ret ) { + ret = secp256k1_ecdsa_verify(ctx, &sig, data, &pubkey); + } + } + + (void)classObject; + + return ret; +} + +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1sign + (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) +{ + secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); + unsigned char* secKey = (unsigned char*) (data + 32); + + jobjectArray retArray; + jbyteArray sigArray, intsByteArray; + unsigned char intsarray[2]; + + secp256k1_ecdsa_signature sig[72]; + + int ret = secp256k1_ecdsa_sign(ctx, sig, data, secKey, NULL, NULL ); + + unsigned char outputSer[72]; + size_t outputLen = 72; + + if( ret ) { + int ret2 = secp256k1_ecdsa_signature_serialize_der(ctx,outputSer, &outputLen, sig ); (void)ret2; + } + + intsarray[0] = outputLen; + intsarray[1] = ret; + + retArray = (*env)->NewObjectArray(env, 2, + (*env)->FindClass(env, "[B"), + (*env)->NewByteArray(env, 1)); + + sigArray = (*env)->NewByteArray(env, outputLen); + (*env)->SetByteArrayRegion(env, sigArray, 0, outputLen, (jbyte*)outputSer); + (*env)->SetObjectArrayElement(env, retArray, 0, sigArray); + + intsByteArray = (*env)->NewByteArray(env, 2); + (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray); + (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); + + (void)classObject; + + return retArray; +} + +SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1seckey_1verify + (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) +{ + secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + unsigned char* secKey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); + + (void)classObject; + + return secp256k1_ec_seckey_verify(ctx, secKey); +} + +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1create + (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) +{ + secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + const unsigned char* secKey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); + + secp256k1_pubkey pubkey; + + jobjectArray retArray; + jbyteArray pubkeyArray, intsByteArray; + unsigned char intsarray[2]; + + int ret = secp256k1_ec_pubkey_create(ctx, &pubkey, secKey); + + unsigned char outputSer[65]; + size_t outputLen = 65; + + if( ret ) { + int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2; + } + + intsarray[0] = outputLen; + intsarray[1] = ret; + + retArray = (*env)->NewObjectArray(env, 2, + (*env)->FindClass(env, "[B"), + (*env)->NewByteArray(env, 1)); + + pubkeyArray = (*env)->NewByteArray(env, outputLen); + (*env)->SetByteArrayRegion(env, pubkeyArray, 0, outputLen, (jbyte*)outputSer); + (*env)->SetObjectArrayElement(env, retArray, 0, pubkeyArray); + + intsByteArray = (*env)->NewByteArray(env, 2); + (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray); + (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); + + (void)classObject; + + return retArray; + +} + +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1add + (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) +{ + secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + unsigned char* privkey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); + const unsigned char* tweak = (unsigned char*) (privkey + 32); + + jobjectArray retArray; + jbyteArray privArray, intsByteArray; + unsigned char intsarray[2]; + + int privkeylen = 32; + + int ret = secp256k1_ec_privkey_tweak_add(ctx, privkey, tweak); + + intsarray[0] = privkeylen; + intsarray[1] = ret; + + retArray = (*env)->NewObjectArray(env, 2, + (*env)->FindClass(env, "[B"), + (*env)->NewByteArray(env, 1)); + + privArray = (*env)->NewByteArray(env, privkeylen); + (*env)->SetByteArrayRegion(env, privArray, 0, privkeylen, (jbyte*)privkey); + (*env)->SetObjectArrayElement(env, retArray, 0, privArray); + + intsByteArray = (*env)->NewByteArray(env, 2); + (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray); + (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); + + (void)classObject; + + return retArray; +} + +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1mul + (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) +{ + secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + unsigned char* privkey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); + const unsigned char* tweak = (unsigned char*) (privkey + 32); + + jobjectArray retArray; + jbyteArray privArray, intsByteArray; + unsigned char intsarray[2]; + + int privkeylen = 32; + + int ret = secp256k1_ec_privkey_tweak_mul(ctx, privkey, tweak); + + intsarray[0] = privkeylen; + intsarray[1] = ret; + + retArray = (*env)->NewObjectArray(env, 2, + (*env)->FindClass(env, "[B"), + (*env)->NewByteArray(env, 1)); + + privArray = (*env)->NewByteArray(env, privkeylen); + (*env)->SetByteArrayRegion(env, privArray, 0, privkeylen, (jbyte*)privkey); + (*env)->SetObjectArrayElement(env, retArray, 0, privArray); + + intsByteArray = (*env)->NewByteArray(env, 2); + (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray); + (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); + + (void)classObject; + + return retArray; +} + +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1add + (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen) +{ + secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; +/* secp256k1_pubkey* pubkey = (secp256k1_pubkey*) (*env)->GetDirectBufferAddress(env, byteBufferObject);*/ + unsigned char* pkey = (*env)->GetDirectBufferAddress(env, byteBufferObject); + const unsigned char* tweak = (unsigned char*) (pkey + publen); + + jobjectArray retArray; + jbyteArray pubArray, intsByteArray; + unsigned char intsarray[2]; + unsigned char outputSer[65]; + size_t outputLen = 65; + + secp256k1_pubkey pubkey; + int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pkey, publen); + + if( ret ) { + ret = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, tweak); + } + + if( ret ) { + int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2; + } + + intsarray[0] = outputLen; + intsarray[1] = ret; + + retArray = (*env)->NewObjectArray(env, 2, + (*env)->FindClass(env, "[B"), + (*env)->NewByteArray(env, 1)); + + pubArray = (*env)->NewByteArray(env, outputLen); + (*env)->SetByteArrayRegion(env, pubArray, 0, outputLen, (jbyte*)outputSer); + (*env)->SetObjectArrayElement(env, retArray, 0, pubArray); + + intsByteArray = (*env)->NewByteArray(env, 2); + (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray); + (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); + + (void)classObject; + + return retArray; +} + +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1mul + (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen) +{ + secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + unsigned char* pkey = (*env)->GetDirectBufferAddress(env, byteBufferObject); + const unsigned char* tweak = (unsigned char*) (pkey + publen); + + jobjectArray retArray; + jbyteArray pubArray, intsByteArray; + unsigned char intsarray[2]; + unsigned char outputSer[65]; + size_t outputLen = 65; + + secp256k1_pubkey pubkey; + int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pkey, publen); + + if ( ret ) { + ret = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, tweak); + } + + if( ret ) { + int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2; + } + + intsarray[0] = outputLen; + intsarray[1] = ret; + + retArray = (*env)->NewObjectArray(env, 2, + (*env)->FindClass(env, "[B"), + (*env)->NewByteArray(env, 1)); + + pubArray = (*env)->NewByteArray(env, outputLen); + (*env)->SetByteArrayRegion(env, pubArray, 0, outputLen, (jbyte*)outputSer); + (*env)->SetObjectArrayElement(env, retArray, 0, pubArray); + + intsByteArray = (*env)->NewByteArray(env, 2); + (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray); + (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); + + (void)classObject; + + return retArray; +} + +SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1pubkey_1combine + (JNIEnv * env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint numkeys) +{ + (void)classObject;(void)env;(void)byteBufferObject;(void)ctx_l;(void)numkeys; + + return 0; +} + +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdh + (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen) +{ + secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; + const unsigned char* secdata = (*env)->GetDirectBufferAddress(env, byteBufferObject); + const unsigned char* pubdata = (const unsigned char*) (secdata + 32); + + jobjectArray retArray; + jbyteArray outArray, intsByteArray; + unsigned char intsarray[1]; + secp256k1_pubkey pubkey; + unsigned char nonce_res[32]; + size_t outputLen = 32; + + int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pubdata, publen); + + if (ret) { + ret = secp256k1_ecdh( + ctx, + nonce_res, + &pubkey, + secdata + ); + } + + intsarray[0] = ret; + + retArray = (*env)->NewObjectArray(env, 2, + (*env)->FindClass(env, "[B"), + (*env)->NewByteArray(env, 1)); + + outArray = (*env)->NewByteArray(env, outputLen); + (*env)->SetByteArrayRegion(env, outArray, 0, 32, (jbyte*)nonce_res); + (*env)->SetObjectArrayElement(env, retArray, 0, outArray); + + intsByteArray = (*env)->NewByteArray(env, 1); + (*env)->SetByteArrayRegion(env, intsByteArray, 0, 1, (jbyte*)intsarray); + (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); + + (void)classObject; + + return retArray; } diff --git a/src/java/org_bitcoin_NativeSecp256k1.h b/src/java/org_bitcoin_NativeSecp256k1.h index d7fb004fa8..fe613c9e9e 100644 --- a/src/java/org_bitcoin_NativeSecp256k1.h +++ b/src/java/org_bitcoin_NativeSecp256k1.h @@ -1,5 +1,6 @@ /* DO NOT EDIT THIS FILE - it is machine generated */ #include +#include "include/secp256k1.h" /* Header for class org_bitcoin_NativeSecp256k1 */ #ifndef _Included_org_bitcoin_NativeSecp256k1 @@ -7,13 +8,110 @@ #ifdef __cplusplus extern "C" { #endif +/* + * Class: org_bitcoin_NativeSecp256k1 + * Method: secp256k1_ctx_clone + * Signature: (J)J + */ +SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone + (JNIEnv *, jclass, jlong); + +/* + * Class: org_bitcoin_NativeSecp256k1 + * Method: secp256k1_context_randomize + * Signature: (Ljava/nio/ByteBuffer;J)I + */ +SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1context_1randomize + (JNIEnv *, jclass, jobject, jlong); + +/* + * Class: org_bitcoin_NativeSecp256k1 + * Method: secp256k1_privkey_tweak_add + * Signature: (Ljava/nio/ByteBuffer;J)[[B + */ +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1add + (JNIEnv *, jclass, jobject, jlong); + +/* + * Class: org_bitcoin_NativeSecp256k1 + * Method: secp256k1_privkey_tweak_mul + * Signature: (Ljava/nio/ByteBuffer;J)[[B + */ +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1mul + (JNIEnv *, jclass, jobject, jlong); + +/* + * Class: org_bitcoin_NativeSecp256k1 + * Method: secp256k1_pubkey_tweak_add + * Signature: (Ljava/nio/ByteBuffer;JI)[[B + */ +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1add + (JNIEnv *, jclass, jobject, jlong, jint); + +/* + * Class: org_bitcoin_NativeSecp256k1 + * Method: secp256k1_pubkey_tweak_mul + * Signature: (Ljava/nio/ByteBuffer;JI)[[B + */ +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1mul + (JNIEnv *, jclass, jobject, jlong, jint); + +/* + * Class: org_bitcoin_NativeSecp256k1 + * Method: secp256k1_destroy_context + * Signature: (J)V + */ +SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1destroy_1context + (JNIEnv *, jclass, jlong); + /* * Class: org_bitcoin_NativeSecp256k1 * Method: secp256k1_ecdsa_verify - * Signature: (Ljava/nio/ByteBuffer;)I + * Signature: (Ljava/nio/ByteBuffer;JII)I + */ +SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify + (JNIEnv *, jclass, jobject, jlong, jint, jint); + +/* + * Class: org_bitcoin_NativeSecp256k1 + * Method: secp256k1_ecdsa_sign + * Signature: (Ljava/nio/ByteBuffer;J)[[B + */ +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1sign + (JNIEnv *, jclass, jobject, jlong); + +/* + * Class: org_bitcoin_NativeSecp256k1 + * Method: secp256k1_ec_seckey_verify + * Signature: (Ljava/nio/ByteBuffer;J)I + */ +SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1seckey_1verify + (JNIEnv *, jclass, jobject, jlong); + +/* + * Class: org_bitcoin_NativeSecp256k1 + * Method: secp256k1_ec_pubkey_create + * Signature: (Ljava/nio/ByteBuffer;J)[[B + */ +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1create + (JNIEnv *, jclass, jobject, jlong); + +/* + * Class: org_bitcoin_NativeSecp256k1 + * Method: secp256k1_ec_pubkey_parse + * Signature: (Ljava/nio/ByteBuffer;JI)[[B + */ +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1parse + (JNIEnv *, jclass, jobject, jlong, jint); + +/* + * Class: org_bitcoin_NativeSecp256k1 + * Method: secp256k1_ecdh + * Signature: (Ljava/nio/ByteBuffer;JI)[[B */ -JNIEXPORT jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify - (JNIEnv *, jclass, jobject); +SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdh + (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen); + #ifdef __cplusplus } diff --git a/src/java/org_bitcoin_Secp256k1Context.c b/src/java/org_bitcoin_Secp256k1Context.c new file mode 100644 index 0000000000..a52939e7e7 --- /dev/null +++ b/src/java/org_bitcoin_Secp256k1Context.c @@ -0,0 +1,15 @@ +#include +#include +#include "org_bitcoin_Secp256k1Context.h" +#include "include/secp256k1.h" + +SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_secp256k1_1init_1context + (JNIEnv* env, jclass classObject) +{ + secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + + (void)classObject;(void)env; + + return (uintptr_t)ctx; +} + diff --git a/src/java/org_bitcoin_Secp256k1Context.h b/src/java/org_bitcoin_Secp256k1Context.h new file mode 100644 index 0000000000..0d2bc84b7f --- /dev/null +++ b/src/java/org_bitcoin_Secp256k1Context.h @@ -0,0 +1,22 @@ +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +#include "include/secp256k1.h" +/* Header for class org_bitcoin_Secp256k1Context */ + +#ifndef _Included_org_bitcoin_Secp256k1Context +#define _Included_org_bitcoin_Secp256k1Context +#ifdef __cplusplus +extern "C" { +#endif +/* + * Class: org_bitcoin_Secp256k1Context + * Method: secp256k1_init_context + * Signature: ()J + */ +SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_secp256k1_1init_1context + (JNIEnv *, jclass); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/src/modules/ecdh/Makefile.am.include b/src/modules/ecdh/Makefile.am.include index 670b9c1152..e3088b4697 100644 --- a/src/modules/ecdh/Makefile.am.include +++ b/src/modules/ecdh/Makefile.am.include @@ -4,5 +4,5 @@ noinst_HEADERS += src/modules/ecdh/tests_impl.h if USE_BENCHMARK noinst_PROGRAMS += bench_ecdh bench_ecdh_SOURCES = src/bench_ecdh.c -bench_ecdh_LDADD = libsecp256k1.la $(SECP_LIBS) +bench_ecdh_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB) endif diff --git a/src/modules/recovery/Makefile.am.include b/src/modules/recovery/Makefile.am.include index 5de3ea33ea..bf23c26e71 100644 --- a/src/modules/recovery/Makefile.am.include +++ b/src/modules/recovery/Makefile.am.include @@ -4,5 +4,5 @@ noinst_HEADERS += src/modules/recovery/tests_impl.h if USE_BENCHMARK noinst_PROGRAMS += bench_recover bench_recover_SOURCES = src/bench_recover.c -bench_recover_LDADD = libsecp256k1.la $(SECP_LIBS) +bench_recover_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB) endif diff --git a/src/modules/recovery/main_impl.h b/src/modules/recovery/main_impl.h old mode 100644 new mode 100755 index ec42f4bb6c..86f2f0cb2b --- a/src/modules/recovery/main_impl.h +++ b/src/modules/recovery/main_impl.h @@ -138,16 +138,15 @@ int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecd secp256k1_scalar_set_b32(&sec, seckey, &overflow); /* Fail if the secret key is invalid. */ if (!overflow && !secp256k1_scalar_is_zero(&sec)) { + unsigned char nonce32[32]; unsigned int count = 0; secp256k1_scalar_set_b32(&msg, msg32, NULL); while (1) { - unsigned char nonce32[32]; ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count); if (!ret) { break; } secp256k1_scalar_set_b32(&non, nonce32, &overflow); - memset(nonce32, 0, 32); if (!secp256k1_scalar_is_zero(&non) && !overflow) { if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, &recid)) { break; @@ -155,6 +154,7 @@ int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecd } count++; } + memset(nonce32, 0, 32); secp256k1_scalar_clear(&msg); secp256k1_scalar_clear(&non); secp256k1_scalar_clear(&sec); diff --git a/src/modules/schnorr/Makefile.am.include b/src/modules/schnorr/Makefile.am.include deleted file mode 100644 index b3bfa7d5cc..0000000000 --- a/src/modules/schnorr/Makefile.am.include +++ /dev/null @@ -1,10 +0,0 @@ -include_HEADERS += include/secp256k1_schnorr.h -noinst_HEADERS += src/modules/schnorr/main_impl.h -noinst_HEADERS += src/modules/schnorr/schnorr.h -noinst_HEADERS += src/modules/schnorr/schnorr_impl.h -noinst_HEADERS += src/modules/schnorr/tests_impl.h -if USE_BENCHMARK -noinst_PROGRAMS += bench_schnorr_verify -bench_schnorr_verify_SOURCES = src/bench_schnorr_verify.c -bench_schnorr_verify_LDADD = libsecp256k1.la $(SECP_LIBS) -endif diff --git a/src/modules/schnorr/main_impl.h b/src/modules/schnorr/main_impl.h deleted file mode 100644 index fa176a1767..0000000000 --- a/src/modules/schnorr/main_impl.h +++ /dev/null @@ -1,164 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef SECP256K1_MODULE_SCHNORR_MAIN -#define SECP256K1_MODULE_SCHNORR_MAIN - -#include "include/secp256k1_schnorr.h" -#include "modules/schnorr/schnorr_impl.h" - -static void secp256k1_schnorr_msghash_sha256(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32) { - secp256k1_sha256_t sha; - secp256k1_sha256_initialize(&sha); - secp256k1_sha256_write(&sha, r32, 32); - secp256k1_sha256_write(&sha, msg32, 32); - secp256k1_sha256_finalize(&sha, h32); -} - -static const unsigned char secp256k1_schnorr_algo16[17] = "Schnorr+SHA256 "; - -int secp256k1_schnorr_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) { - secp256k1_scalar sec, non; - int ret = 0; - int overflow = 0; - unsigned int count = 0; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(sig64 != NULL); - ARG_CHECK(seckey != NULL); - if (noncefp == NULL) { - noncefp = secp256k1_nonce_function_default; - } - - secp256k1_scalar_set_b32(&sec, seckey, NULL); - while (1) { - unsigned char nonce32[32]; - ret = noncefp(nonce32, msg32, seckey, secp256k1_schnorr_algo16, (void*)noncedata, count); - if (!ret) { - break; - } - secp256k1_scalar_set_b32(&non, nonce32, &overflow); - memset(nonce32, 0, 32); - if (!secp256k1_scalar_is_zero(&non) && !overflow) { - if (secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64, &sec, &non, NULL, secp256k1_schnorr_msghash_sha256, msg32)) { - break; - } - } - count++; - } - if (!ret) { - memset(sig64, 0, 64); - } - secp256k1_scalar_clear(&non); - secp256k1_scalar_clear(&sec); - return ret; -} - -int secp256k1_schnorr_verify(const secp256k1_context* ctx, const unsigned char *sig64, const unsigned char *msg32, const secp256k1_pubkey *pubkey) { - secp256k1_ge q; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(sig64 != NULL); - ARG_CHECK(pubkey != NULL); - - secp256k1_pubkey_load(ctx, &q, pubkey); - return secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64, &q, secp256k1_schnorr_msghash_sha256, msg32); -} - -int secp256k1_schnorr_recover(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *sig64, const unsigned char *msg32) { - secp256k1_ge q; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(sig64 != NULL); - ARG_CHECK(pubkey != NULL); - - if (secp256k1_schnorr_sig_recover(&ctx->ecmult_ctx, sig64, &q, secp256k1_schnorr_msghash_sha256, msg32)) { - secp256k1_pubkey_save(pubkey, &q); - return 1; - } else { - memset(pubkey, 0, sizeof(*pubkey)); - return 0; - } -} - -int secp256k1_schnorr_generate_nonce_pair(const secp256k1_context* ctx, secp256k1_pubkey *pubnonce, unsigned char *privnonce32, const unsigned char *sec32, const unsigned char *msg32, secp256k1_nonce_function noncefp, const void* noncedata) { - int count = 0; - int ret = 1; - secp256k1_gej Qj; - secp256k1_ge Q; - secp256k1_scalar sec; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(sec32 != NULL); - ARG_CHECK(pubnonce != NULL); - ARG_CHECK(privnonce32 != NULL); - - if (noncefp == NULL) { - noncefp = secp256k1_nonce_function_default; - } - - do { - int overflow; - ret = noncefp(privnonce32, sec32, msg32, secp256k1_schnorr_algo16, (void*)noncedata, count++); - if (!ret) { - break; - } - secp256k1_scalar_set_b32(&sec, privnonce32, &overflow); - if (overflow || secp256k1_scalar_is_zero(&sec)) { - continue; - } - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sec); - secp256k1_ge_set_gej(&Q, &Qj); - - secp256k1_pubkey_save(pubnonce, &Q); - break; - } while(1); - - secp256k1_scalar_clear(&sec); - if (!ret) { - memset(pubnonce, 0, sizeof(*pubnonce)); - } - return ret; -} - -int secp256k1_schnorr_partial_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const unsigned char *sec32, const secp256k1_pubkey *pubnonce_others, const unsigned char *secnonce32) { - int overflow = 0; - secp256k1_scalar sec, non; - secp256k1_ge pubnon; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(sig64 != NULL); - ARG_CHECK(sec32 != NULL); - ARG_CHECK(secnonce32 != NULL); - ARG_CHECK(pubnonce_others != NULL); - - secp256k1_scalar_set_b32(&sec, sec32, &overflow); - if (overflow || secp256k1_scalar_is_zero(&sec)) { - return -1; - } - secp256k1_scalar_set_b32(&non, secnonce32, &overflow); - if (overflow || secp256k1_scalar_is_zero(&non)) { - return -1; - } - secp256k1_pubkey_load(ctx, &pubnon, pubnonce_others); - return secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64, &sec, &non, &pubnon, secp256k1_schnorr_msghash_sha256, msg32); -} - -int secp256k1_schnorr_partial_combine(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char * const *sig64sin, size_t n) { - ARG_CHECK(sig64 != NULL); - ARG_CHECK(n >= 1); - ARG_CHECK(sig64sin != NULL); - return secp256k1_schnorr_sig_combine(sig64, n, sig64sin); -} - -#endif diff --git a/src/modules/schnorr/schnorr.h b/src/modules/schnorr/schnorr.h deleted file mode 100644 index de18147bd5..0000000000 --- a/src/modules/schnorr/schnorr.h +++ /dev/null @@ -1,20 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2014-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php. * - ***********************************************************************/ - -#ifndef _SECP256K1_MODULE_SCHNORR_H_ -#define _SECP256K1_MODULE_SCHNORR_H_ - -#include "scalar.h" -#include "group.h" - -typedef void (*secp256k1_schnorr_msghash)(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32); - -static int secp256k1_schnorr_sig_sign(const secp256k1_ecmult_gen_context* ctx, unsigned char *sig64, const secp256k1_scalar *key, const secp256k1_scalar *nonce, const secp256k1_ge *pubnonce, secp256k1_schnorr_msghash hash, const unsigned char *msg32); -static int secp256k1_schnorr_sig_verify(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, const secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32); -static int secp256k1_schnorr_sig_recover(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32); -static int secp256k1_schnorr_sig_combine(unsigned char *sig64, size_t n, const unsigned char * const *sig64ins); - -#endif diff --git a/src/modules/schnorr/schnorr_impl.h b/src/modules/schnorr/schnorr_impl.h deleted file mode 100644 index e13ab6db7c..0000000000 --- a/src/modules/schnorr/schnorr_impl.h +++ /dev/null @@ -1,207 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2014-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php. * - ***********************************************************************/ - -#ifndef _SECP256K1_SCHNORR_IMPL_H_ -#define _SECP256K1_SCHNORR_IMPL_H_ - -#include - -#include "schnorr.h" -#include "num.h" -#include "field.h" -#include "group.h" -#include "ecmult.h" -#include "ecmult_gen.h" - -/** - * Custom Schnorr-based signature scheme. They support multiparty signing, public key - * recovery and batch validation. - * - * Rationale for verifying R's y coordinate: - * In order to support batch validation and public key recovery, the full R point must - * be known to verifiers, rather than just its x coordinate. In order to not risk - * being more strict in batch validation than normal validation, validators must be - * required to reject signatures with incorrect y coordinate. This is only possible - * by including a (relatively slow) field inverse, or a field square root. However, - * batch validation offers potentially much higher benefits than this cost. - * - * Rationale for having an implicit y coordinate oddness: - * If we commit to having the full R point known to verifiers, there are two mechanism. - * Either include its oddness in the signature, or give it an implicit fixed value. - * As the R y coordinate can be flipped by a simple negation of the nonce, we choose the - * latter, as it comes with nearly zero impact on signing or validation performance, and - * saves a byte in the signature. - * - * Signing: - * Inputs: 32-byte message m, 32-byte scalar key x (!=0), 32-byte scalar nonce k (!=0) - * - * Compute point R = k * G. Reject nonce if R's y coordinate is odd (or negate nonce). - * Compute 32-byte r, the serialization of R's x coordinate. - * Compute scalar h = Hash(r || m). Reject nonce if h == 0 or h >= order. - * Compute scalar s = k - h * x. - * The signature is (r, s). - * - * - * Verification: - * Inputs: 32-byte message m, public key point Q, signature: (32-byte r, scalar s) - * - * Signature is invalid if s >= order. - * Signature is invalid if r >= p. - * Compute scalar h = Hash(r || m). Signature is invalid if h == 0 or h >= order. - * Option 1 (faster for single verification): - * Compute point R = h * Q + s * G. Signature is invalid if R is infinity or R's y coordinate is odd. - * Signature is valid if the serialization of R's x coordinate equals r. - * Option 2 (allows batch validation and pubkey recovery): - * Decompress x coordinate r into point R, with odd y coordinate. Fail if R is not on the curve. - * Signature is valid if R + h * Q + s * G == 0. - */ - -static int secp256k1_schnorr_sig_sign(const secp256k1_ecmult_gen_context* ctx, unsigned char *sig64, const secp256k1_scalar *key, const secp256k1_scalar *nonce, const secp256k1_ge *pubnonce, secp256k1_schnorr_msghash hash, const unsigned char *msg32) { - secp256k1_gej Rj; - secp256k1_ge Ra; - unsigned char h32[32]; - secp256k1_scalar h, s; - int overflow; - secp256k1_scalar n; - - if (secp256k1_scalar_is_zero(key) || secp256k1_scalar_is_zero(nonce)) { - return 0; - } - n = *nonce; - - secp256k1_ecmult_gen(ctx, &Rj, &n); - if (pubnonce != NULL) { - secp256k1_gej_add_ge(&Rj, &Rj, pubnonce); - } - secp256k1_ge_set_gej(&Ra, &Rj); - secp256k1_fe_normalize(&Ra.y); - if (secp256k1_fe_is_odd(&Ra.y)) { - /* R's y coordinate is odd, which is not allowed (see rationale above). - Force it to be even by negating the nonce. Note that this even works - for multiparty signing, as the R point is known to all participants, - which can all decide to flip the sign in unison, resulting in the - overall R point to be negated too. */ - secp256k1_scalar_negate(&n, &n); - } - secp256k1_fe_normalize(&Ra.x); - secp256k1_fe_get_b32(sig64, &Ra.x); - hash(h32, sig64, msg32); - overflow = 0; - secp256k1_scalar_set_b32(&h, h32, &overflow); - if (overflow || secp256k1_scalar_is_zero(&h)) { - secp256k1_scalar_clear(&n); - return 0; - } - secp256k1_scalar_mul(&s, &h, key); - secp256k1_scalar_negate(&s, &s); - secp256k1_scalar_add(&s, &s, &n); - secp256k1_scalar_clear(&n); - secp256k1_scalar_get_b32(sig64 + 32, &s); - return 1; -} - -static int secp256k1_schnorr_sig_verify(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, const secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32) { - secp256k1_gej Qj, Rj; - secp256k1_ge Ra; - secp256k1_fe Rx; - secp256k1_scalar h, s; - unsigned char hh[32]; - int overflow; - - if (secp256k1_ge_is_infinity(pubkey)) { - return 0; - } - hash(hh, sig64, msg32); - overflow = 0; - secp256k1_scalar_set_b32(&h, hh, &overflow); - if (overflow || secp256k1_scalar_is_zero(&h)) { - return 0; - } - overflow = 0; - secp256k1_scalar_set_b32(&s, sig64 + 32, &overflow); - if (overflow) { - return 0; - } - if (!secp256k1_fe_set_b32(&Rx, sig64)) { - return 0; - } - secp256k1_gej_set_ge(&Qj, pubkey); - secp256k1_ecmult(ctx, &Rj, &Qj, &h, &s); - if (secp256k1_gej_is_infinity(&Rj)) { - return 0; - } - secp256k1_ge_set_gej_var(&Ra, &Rj); - secp256k1_fe_normalize_var(&Ra.y); - if (secp256k1_fe_is_odd(&Ra.y)) { - return 0; - } - return secp256k1_fe_equal_var(&Rx, &Ra.x); -} - -static int secp256k1_schnorr_sig_recover(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32) { - secp256k1_gej Qj, Rj; - secp256k1_ge Ra; - secp256k1_fe Rx; - secp256k1_scalar h, s; - unsigned char hh[32]; - int overflow; - - hash(hh, sig64, msg32); - overflow = 0; - secp256k1_scalar_set_b32(&h, hh, &overflow); - if (overflow || secp256k1_scalar_is_zero(&h)) { - return 0; - } - overflow = 0; - secp256k1_scalar_set_b32(&s, sig64 + 32, &overflow); - if (overflow) { - return 0; - } - if (!secp256k1_fe_set_b32(&Rx, sig64)) { - return 0; - } - if (!secp256k1_ge_set_xo_var(&Ra, &Rx, 0)) { - return 0; - } - secp256k1_gej_set_ge(&Rj, &Ra); - secp256k1_scalar_inverse_var(&h, &h); - secp256k1_scalar_negate(&s, &s); - secp256k1_scalar_mul(&s, &s, &h); - secp256k1_ecmult(ctx, &Qj, &Rj, &h, &s); - if (secp256k1_gej_is_infinity(&Qj)) { - return 0; - } - secp256k1_ge_set_gej(pubkey, &Qj); - return 1; -} - -static int secp256k1_schnorr_sig_combine(unsigned char *sig64, size_t n, const unsigned char * const *sig64ins) { - secp256k1_scalar s = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - size_t i; - for (i = 0; i < n; i++) { - secp256k1_scalar si; - int overflow; - secp256k1_scalar_set_b32(&si, sig64ins[i] + 32, &overflow); - if (overflow) { - return -1; - } - if (i) { - if (memcmp(sig64ins[i - 1], sig64ins[i], 32) != 0) { - return -1; - } - } - secp256k1_scalar_add(&s, &s, &si); - } - if (secp256k1_scalar_is_zero(&s)) { - return 0; - } - memcpy(sig64, sig64ins[0], 32); - secp256k1_scalar_get_b32(sig64 + 32, &s); - secp256k1_scalar_clear(&s); - return 1; -} - -#endif diff --git a/src/modules/schnorr/tests_impl.h b/src/modules/schnorr/tests_impl.h deleted file mode 100644 index 5bd14a03e3..0000000000 --- a/src/modules/schnorr/tests_impl.h +++ /dev/null @@ -1,175 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef SECP256K1_MODULE_SCHNORR_TESTS -#define SECP256K1_MODULE_SCHNORR_TESTS - -#include "include/secp256k1_schnorr.h" - -void test_schnorr_end_to_end(void) { - unsigned char privkey[32]; - unsigned char message[32]; - unsigned char schnorr_signature[64]; - secp256k1_pubkey pubkey, recpubkey; - - /* Generate a random key and message. */ - { - secp256k1_scalar key; - random_scalar_order_test(&key); - secp256k1_scalar_get_b32(privkey, &key); - secp256k1_rand256_test(message); - } - - /* Construct and verify corresponding public key. */ - CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); - - /* Schnorr sign. */ - CHECK(secp256k1_schnorr_sign(ctx, schnorr_signature, message, privkey, NULL, NULL) == 1); - CHECK(secp256k1_schnorr_verify(ctx, schnorr_signature, message, &pubkey) == 1); - CHECK(secp256k1_schnorr_recover(ctx, &recpubkey, schnorr_signature, message) == 1); - CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0); - /* Destroy signature and verify again. */ - schnorr_signature[secp256k1_rand_bits(6)] += 1 + secp256k1_rand_int(255); - CHECK(secp256k1_schnorr_verify(ctx, schnorr_signature, message, &pubkey) == 0); - CHECK(secp256k1_schnorr_recover(ctx, &recpubkey, schnorr_signature, message) != 1 || - memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0); -} - -/** Horribly broken hash function. Do not use for anything but tests. */ -void test_schnorr_hash(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32) { - int i; - for (i = 0; i < 32; i++) { - h32[i] = r32[i] ^ msg32[i]; - } -} - -void test_schnorr_sign_verify(void) { - unsigned char msg32[32]; - unsigned char sig64[3][64]; - secp256k1_gej pubkeyj[3]; - secp256k1_ge pubkey[3]; - secp256k1_scalar nonce[3], key[3]; - int i = 0; - int k; - - secp256k1_rand256_test(msg32); - - for (k = 0; k < 3; k++) { - random_scalar_order_test(&key[k]); - - do { - random_scalar_order_test(&nonce[k]); - if (secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64[k], &key[k], &nonce[k], NULL, &test_schnorr_hash, msg32)) { - break; - } - } while(1); - - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubkeyj[k], &key[k]); - secp256k1_ge_set_gej_var(&pubkey[k], &pubkeyj[k]); - CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64[k], &pubkey[k], &test_schnorr_hash, msg32)); - - for (i = 0; i < 4; i++) { - int pos = secp256k1_rand_bits(6); - int mod = 1 + secp256k1_rand_int(255); - sig64[k][pos] ^= mod; - CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64[k], &pubkey[k], &test_schnorr_hash, msg32) == 0); - sig64[k][pos] ^= mod; - } - } -} - -void test_schnorr_threshold(void) { - unsigned char msg[32]; - unsigned char sec[5][32]; - secp256k1_pubkey pub[5]; - unsigned char nonce[5][32]; - secp256k1_pubkey pubnonce[5]; - unsigned char sig[5][64]; - const unsigned char* sigs[5]; - unsigned char allsig[64]; - const secp256k1_pubkey* pubs[5]; - secp256k1_pubkey allpub; - int n, i; - int damage; - int ret = 0; - - damage = secp256k1_rand_bits(1) ? (1 + secp256k1_rand_int(4)) : 0; - secp256k1_rand256_test(msg); - n = 2 + secp256k1_rand_int(4); - for (i = 0; i < n; i++) { - do { - secp256k1_rand256_test(sec[i]); - } while (!secp256k1_ec_seckey_verify(ctx, sec[i])); - CHECK(secp256k1_ec_pubkey_create(ctx, &pub[i], sec[i])); - CHECK(secp256k1_schnorr_generate_nonce_pair(ctx, &pubnonce[i], nonce[i], msg, sec[i], NULL, NULL)); - pubs[i] = &pub[i]; - } - if (damage == 1) { - nonce[secp256k1_rand_int(n)][secp256k1_rand_int(32)] ^= 1 + secp256k1_rand_int(255); - } else if (damage == 2) { - sec[secp256k1_rand_int(n)][secp256k1_rand_int(32)] ^= 1 + secp256k1_rand_int(255); - } - for (i = 0; i < n; i++) { - secp256k1_pubkey allpubnonce; - const secp256k1_pubkey *pubnonces[4]; - int j; - for (j = 0; j < i; j++) { - pubnonces[j] = &pubnonce[j]; - } - for (j = i + 1; j < n; j++) { - pubnonces[j - 1] = &pubnonce[j]; - } - CHECK(secp256k1_ec_pubkey_combine(ctx, &allpubnonce, pubnonces, n - 1)); - ret |= (secp256k1_schnorr_partial_sign(ctx, sig[i], msg, sec[i], &allpubnonce, nonce[i]) != 1) * 1; - sigs[i] = sig[i]; - } - if (damage == 3) { - sig[secp256k1_rand_int(n)][secp256k1_rand_bits(6)] ^= 1 + secp256k1_rand_int(255); - } - ret |= (secp256k1_ec_pubkey_combine(ctx, &allpub, pubs, n) != 1) * 2; - if ((ret & 1) == 0) { - ret |= (secp256k1_schnorr_partial_combine(ctx, allsig, sigs, n) != 1) * 4; - } - if (damage == 4) { - allsig[secp256k1_rand_int(32)] ^= 1 + secp256k1_rand_int(255); - } - if ((ret & 7) == 0) { - ret |= (secp256k1_schnorr_verify(ctx, allsig, msg, &allpub) != 1) * 8; - } - CHECK((ret == 0) == (damage == 0)); -} - -void test_schnorr_recovery(void) { - unsigned char msg32[32]; - unsigned char sig64[64]; - secp256k1_ge Q; - - secp256k1_rand256_test(msg32); - secp256k1_rand256_test(sig64); - secp256k1_rand256_test(sig64 + 32); - if (secp256k1_schnorr_sig_recover(&ctx->ecmult_ctx, sig64, &Q, &test_schnorr_hash, msg32) == 1) { - CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64, &Q, &test_schnorr_hash, msg32) == 1); - } -} - -void run_schnorr_tests(void) { - int i; - for (i = 0; i < 32*count; i++) { - test_schnorr_end_to_end(); - } - for (i = 0; i < 32 * count; i++) { - test_schnorr_sign_verify(); - } - for (i = 0; i < 16 * count; i++) { - test_schnorr_recovery(); - } - for (i = 0; i < 10 * count; i++) { - test_schnorr_threshold(); - } -} - -#endif diff --git a/src/num.h b/src/num.h index ebfa71eb44..7bb9c5be8c 100644 --- a/src/num.h +++ b/src/num.h @@ -32,6 +32,9 @@ static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsi /** Compute a modular inverse. The input must be less than the modulus. */ static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m); +/** Compute the jacobi symbol (a|b). b must be positive and odd. */ +static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b); + /** Compare the absolute value of two numbers. */ static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b); @@ -57,6 +60,9 @@ static void secp256k1_num_shift(secp256k1_num *r, int bits); /** Check whether a number is zero. */ static int secp256k1_num_is_zero(const secp256k1_num *a); +/** Check whether a number is one. */ +static int secp256k1_num_is_one(const secp256k1_num *a); + /** Check whether a number is strictly negative. */ static int secp256k1_num_is_neg(const secp256k1_num *a); diff --git a/src/num_gmp_impl.h b/src/num_gmp_impl.h index 7b6a89719a..3a46495eea 100644 --- a/src/num_gmp_impl.h +++ b/src/num_gmp_impl.h @@ -144,6 +144,32 @@ static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, memset(v, 0, sizeof(v)); } +static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b) { + int ret; + mpz_t ga, gb; + secp256k1_num_sanity(a); + secp256k1_num_sanity(b); + VERIFY_CHECK(!b->neg && (b->limbs > 0) && (b->data[0] & 1)); + + mpz_inits(ga, gb, NULL); + + mpz_import(gb, b->limbs, -1, sizeof(mp_limb_t), 0, 0, b->data); + mpz_import(ga, a->limbs, -1, sizeof(mp_limb_t), 0, 0, a->data); + if (a->neg) { + mpz_neg(ga, ga); + } + + ret = mpz_jacobi(ga, gb); + + mpz_clears(ga, gb, NULL); + + return ret; +} + +static int secp256k1_num_is_one(const secp256k1_num *a) { + return (a->limbs == 1 && a->data[0] == 1); +} + static int secp256k1_num_is_zero(const secp256k1_num *a) { return (a->limbs == 1 && a->data[0] == 0); } diff --git a/src/scalar.h b/src/scalar.h index b590ccd6dd..27e9d8375e 100644 --- a/src/scalar.h +++ b/src/scalar.h @@ -13,7 +13,9 @@ #include "libsecp256k1-config.h" #endif -#if defined(USE_SCALAR_4X64) +#if defined(EXHAUSTIVE_TEST_ORDER) +#include "scalar_low.h" +#elif defined(USE_SCALAR_4X64) #include "scalar_4x64.h" #elif defined(USE_SCALAR_8X32) #include "scalar_8x32.h" diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h index aa2703dd23..56e7bd82af 100644 --- a/src/scalar_4x64_impl.h +++ b/src/scalar_4x64_impl.h @@ -282,8 +282,8 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "movq 56(%%rsi), %%r14\n" /* Initialize r8,r9,r10 */ "movq 0(%%rsi), %%r8\n" - "movq $0, %%r9\n" - "movq $0, %%r10\n" + "xorq %%r9, %%r9\n" + "xorq %%r10, %%r10\n" /* (r8,r9) += n0 * c0 */ "movq %8, %%rax\n" "mulq %%r11\n" @@ -291,7 +291,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq %%rdx, %%r9\n" /* extract m0 */ "movq %%r8, %q0\n" - "movq $0, %%r8\n" + "xorq %%r8, %%r8\n" /* (r9,r10) += l1 */ "addq 8(%%rsi), %%r9\n" "adcq $0, %%r10\n" @@ -309,7 +309,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq $0, %%r8\n" /* extract m1 */ "movq %%r9, %q1\n" - "movq $0, %%r9\n" + "xorq %%r9, %%r9\n" /* (r10,r8,r9) += l2 */ "addq 16(%%rsi), %%r10\n" "adcq $0, %%r8\n" @@ -332,7 +332,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq $0, %%r9\n" /* extract m2 */ "movq %%r10, %q2\n" - "movq $0, %%r10\n" + "xorq %%r10, %%r10\n" /* (r8,r9,r10) += l3 */ "addq 24(%%rsi), %%r8\n" "adcq $0, %%r9\n" @@ -355,7 +355,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq $0, %%r10\n" /* extract m3 */ "movq %%r8, %q3\n" - "movq $0, %%r8\n" + "xorq %%r8, %%r8\n" /* (r9,r10,r8) += n3 * c1 */ "movq %9, %%rax\n" "mulq %%r14\n" @@ -387,8 +387,8 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "movq %q11, %%r13\n" /* Initialize (r8,r9,r10) */ "movq %q5, %%r8\n" - "movq $0, %%r9\n" - "movq $0, %%r10\n" + "xorq %%r9, %%r9\n" + "xorq %%r10, %%r10\n" /* (r8,r9) += m4 * c0 */ "movq %12, %%rax\n" "mulq %%r11\n" @@ -396,7 +396,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq %%rdx, %%r9\n" /* extract p0 */ "movq %%r8, %q0\n" - "movq $0, %%r8\n" + "xorq %%r8, %%r8\n" /* (r9,r10) += m1 */ "addq %q6, %%r9\n" "adcq $0, %%r10\n" @@ -414,7 +414,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq $0, %%r8\n" /* extract p1 */ "movq %%r9, %q1\n" - "movq $0, %%r9\n" + "xorq %%r9, %%r9\n" /* (r10,r8,r9) += m2 */ "addq %q7, %%r10\n" "adcq $0, %%r8\n" @@ -472,7 +472,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "movq %%rax, 0(%q6)\n" /* Move to (r8,r9) */ "movq %%rdx, %%r8\n" - "movq $0, %%r9\n" + "xorq %%r9, %%r9\n" /* (r8,r9) += p1 */ "addq %q2, %%r8\n" "adcq $0, %%r9\n" @@ -483,7 +483,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq %%rdx, %%r9\n" /* Extract r1 */ "movq %%r8, 8(%q6)\n" - "movq $0, %%r8\n" + "xorq %%r8, %%r8\n" /* (r9,r8) += p4 */ "addq %%r10, %%r9\n" "adcq $0, %%r8\n" @@ -492,7 +492,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) "adcq $0, %%r8\n" /* Extract r2 */ "movq %%r9, 16(%q6)\n" - "movq $0, %%r9\n" + "xorq %%r9, %%r9\n" /* (r8,r9) += p3 */ "addq %q4, %%r8\n" "adcq $0, %%r9\n" diff --git a/src/scalar_impl.h b/src/scalar_impl.h index 88ea97de86..f5b2376407 100644 --- a/src/scalar_impl.h +++ b/src/scalar_impl.h @@ -7,8 +7,6 @@ #ifndef _SECP256K1_SCALAR_IMPL_H_ #define _SECP256K1_SCALAR_IMPL_H_ -#include - #include "group.h" #include "scalar.h" @@ -16,7 +14,9 @@ #include "libsecp256k1-config.h" #endif -#if defined(USE_SCALAR_4X64) +#if defined(EXHAUSTIVE_TEST_ORDER) +#include "scalar_low_impl.h" +#elif defined(USE_SCALAR_4X64) #include "scalar_4x64_impl.h" #elif defined(USE_SCALAR_8X32) #include "scalar_8x32_impl.h" @@ -33,17 +33,37 @@ static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a /** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */ static void secp256k1_scalar_order_get_num(secp256k1_num *r) { +#if defined(EXHAUSTIVE_TEST_ORDER) + static const unsigned char order[32] = { + 0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER + }; +#else static const unsigned char order[32] = { 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE, 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B, 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41 }; +#endif secp256k1_num_set_bin(r, order, 32); } #endif static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) { +#if defined(EXHAUSTIVE_TEST_ORDER) + int i; + *r = 0; + for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) + if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) + *r = i; + /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus + * have a composite group order; fix it in exhaustive_tests.c). */ + VERIFY_CHECK(*r != 0); +} +#else secp256k1_scalar *t; int i; /* First compute x ^ (2^N - 1) for some values of N. */ @@ -235,9 +255,9 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar } SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { - /* d[0] is present and is the lowest word for all representations */ return !(a->d[0] & 1); } +#endif static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { #if defined(USE_SCALAR_INV_BUILTIN) @@ -261,6 +281,18 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc } #ifdef USE_ENDOMORPHISM +#if defined(EXHAUSTIVE_TEST_ORDER) +/** + * Find k1 and k2 given k, such that k1 + k2 * lambda == k mod n; unlike in the + * full case we don't bother making k1 and k2 be small, we just want them to be + * nontrivial to get full test coverage for the exhaustive tests. We therefore + * (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda. + */ +static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { + *r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER; + *r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER; +} +#else /** * The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where * lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a, @@ -333,5 +365,6 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar secp256k1_scalar_add(r1, r1, a); } #endif +#endif #endif diff --git a/src/scalar_low.h b/src/scalar_low.h new file mode 100644 index 0000000000..5574c44c7a --- /dev/null +++ b/src/scalar_low.h @@ -0,0 +1,15 @@ +/********************************************************************** + * Copyright (c) 2015 Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef _SECP256K1_SCALAR_REPR_ +#define _SECP256K1_SCALAR_REPR_ + +#include + +/** A scalar modulo the group order of the secp256k1 curve. */ +typedef uint32_t secp256k1_scalar; + +#endif diff --git a/src/scalar_low_impl.h b/src/scalar_low_impl.h new file mode 100644 index 0000000000..4f94441f49 --- /dev/null +++ b/src/scalar_low_impl.h @@ -0,0 +1,114 @@ +/********************************************************************** + * Copyright (c) 2015 Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_ +#define _SECP256K1_SCALAR_REPR_IMPL_H_ + +#include "scalar.h" + +#include + +SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { + return !(*a & 1); +} + +SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = 0; } +SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { *r = v; } + +SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { + if (offset < 32) + return ((*a >> offset) & ((((uint32_t)1) << count) - 1)); + else + return 0; +} + +SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { + return secp256k1_scalar_get_bits(a, offset, count); +} + +SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } + +static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { + *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER; + return *r < *b; +} + +static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { + if (flag && bit < 32) + *r += (1 << bit); +#ifdef VERIFY + VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); +#endif +} + +static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { + const int base = 0x100 % EXHAUSTIVE_TEST_ORDER; + int i; + *r = 0; + for (i = 0; i < 32; i++) { + *r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER; + } + /* just deny overflow, it basically always happens */ + if (overflow) *overflow = 0; +} + +static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { + memset(bin, 0, 32); + bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a; +} + +SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { + return *a == 0; +} + +static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { + if (*a == 0) { + *r = 0; + } else { + *r = EXHAUSTIVE_TEST_ORDER - *a; + } +} + +SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { + return *a == 1; +} + +static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { + return *a > EXHAUSTIVE_TEST_ORDER / 2; +} + +static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { + if (flag) secp256k1_scalar_negate(r, r); + return flag ? -1 : 1; +} + +static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { + *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER; +} + +static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { + int ret; + VERIFY_CHECK(n > 0); + VERIFY_CHECK(n < 16); + ret = *r & ((1 << n) - 1); + *r >>= n; + return ret; +} + +static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) { + *r = (*a * *a) % EXHAUSTIVE_TEST_ORDER; +} + +static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { + *r1 = *a; + *r2 = 0; +} + +SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { + return *a == *b; +} + +#endif diff --git a/src/secp256k1.c b/src/secp256k1.c old mode 100644 new mode 100755 index 62d192baeb..fb8b882faa --- a/src/secp256k1.c +++ b/src/secp256k1.c @@ -4,8 +4,6 @@ * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ -#define SECP256K1_BUILD (1) - #include "include/secp256k1.h" #include "util.h" @@ -152,7 +150,6 @@ static void secp256k1_pubkey_save(secp256k1_pubkey* pubkey, secp256k1_ge* ge) { int secp256k1_ec_pubkey_parse(const secp256k1_context* ctx, secp256k1_pubkey* pubkey, const unsigned char *input, size_t inputlen) { secp256k1_ge Q; - (void)ctx; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); @@ -170,7 +167,6 @@ int secp256k1_ec_pubkey_serialize(const secp256k1_context* ctx, unsigned char *o size_t len; int ret = 0; - (void)ctx; VERIFY_CHECK(ctx != NULL); ARG_CHECK(outputlen != NULL); ARG_CHECK(*outputlen >= ((flags & SECP256K1_FLAGS_BIT_COMPRESSION) ? 33 : 65)); @@ -216,7 +212,7 @@ static void secp256k1_ecdsa_signature_save(secp256k1_ecdsa_signature* sig, const int secp256k1_ecdsa_signature_parse_der(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { secp256k1_scalar r, s; - (void)ctx; + VERIFY_CHECK(ctx != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(input != NULL); @@ -234,7 +230,7 @@ int secp256k1_ecdsa_signature_parse_compact(const secp256k1_context* ctx, secp25 int ret = 1; int overflow = 0; - (void)ctx; + VERIFY_CHECK(ctx != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(input64 != NULL); @@ -253,7 +249,7 @@ int secp256k1_ecdsa_signature_parse_compact(const secp256k1_context* ctx, secp25 int secp256k1_ecdsa_signature_serialize_der(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_ecdsa_signature* sig) { secp256k1_scalar r, s; - (void)ctx; + VERIFY_CHECK(ctx != NULL); ARG_CHECK(output != NULL); ARG_CHECK(outputlen != NULL); ARG_CHECK(sig != NULL); @@ -265,7 +261,7 @@ int secp256k1_ecdsa_signature_serialize_der(const secp256k1_context* ctx, unsign int secp256k1_ecdsa_signature_serialize_compact(const secp256k1_context* ctx, unsigned char *output64, const secp256k1_ecdsa_signature* sig) { secp256k1_scalar r, s; - (void)ctx; + VERIFY_CHECK(ctx != NULL); ARG_CHECK(output64 != NULL); ARG_CHECK(sig != NULL); @@ -363,16 +359,15 @@ int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature secp256k1_scalar_set_b32(&sec, seckey, &overflow); /* Fail if the secret key is invalid. */ if (!overflow && !secp256k1_scalar_is_zero(&sec)) { + unsigned char nonce32[32]; unsigned int count = 0; secp256k1_scalar_set_b32(&msg, msg32, NULL); while (1) { - unsigned char nonce32[32]; ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count); if (!ret) { break; } secp256k1_scalar_set_b32(&non, nonce32, &overflow); - memset(nonce32, 0, 32); if (!overflow && !secp256k1_scalar_is_zero(&non)) { if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, NULL)) { break; @@ -380,6 +375,7 @@ int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature } count++; } + memset(nonce32, 0, 32); secp256k1_scalar_clear(&msg); secp256k1_scalar_clear(&non); secp256k1_scalar_clear(&sec); @@ -398,7 +394,6 @@ int secp256k1_ec_seckey_verify(const secp256k1_context* ctx, const unsigned char int overflow; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); - (void)ctx; secp256k1_scalar_set_b32(&sec, seckey, &overflow); ret = !overflow && !secp256k1_scalar_is_zero(&sec); @@ -437,7 +432,6 @@ int secp256k1_ec_privkey_tweak_add(const secp256k1_context* ctx, unsigned char * VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak != NULL); - (void)ctx; secp256k1_scalar_set_b32(&term, tweak, &overflow); secp256k1_scalar_set_b32(&sec, seckey, NULL); @@ -485,7 +479,6 @@ int secp256k1_ec_privkey_tweak_mul(const secp256k1_context* ctx, unsigned char * VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak != NULL); - (void)ctx; secp256k1_scalar_set_b32(&factor, tweak, &overflow); secp256k1_scalar_set_b32(&sec, seckey, NULL); diff --git a/src/tests.c b/src/tests.c index 687a5f2fdd..9ae7d30281 100644 --- a/src/tests.c +++ b/src/tests.c @@ -473,6 +473,8 @@ void test_num_negate(void) { } void test_num_add_sub(void) { + int i; + secp256k1_scalar s; secp256k1_num n1; secp256k1_num n2; secp256k1_num n1p2, n2p1, n1m2, n2m1; @@ -498,6 +500,110 @@ void test_num_add_sub(void) { CHECK(!secp256k1_num_eq(&n2p1, &n1)); secp256k1_num_sub(&n2p1, &n2p1, &n2); /* n2p1 = R2 + R1 - R2 = R1 */ CHECK(secp256k1_num_eq(&n2p1, &n1)); + + /* check is_one */ + secp256k1_scalar_set_int(&s, 1); + secp256k1_scalar_get_num(&n1, &s); + CHECK(secp256k1_num_is_one(&n1)); + /* check that 2^n + 1 is never 1 */ + secp256k1_scalar_get_num(&n2, &s); + for (i = 0; i < 250; ++i) { + secp256k1_num_add(&n1, &n1, &n1); /* n1 *= 2 */ + secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = n1 + 1 */ + CHECK(!secp256k1_num_is_one(&n1p2)); + } +} + +void test_num_mod(void) { + int i; + secp256k1_scalar s; + secp256k1_num order, n; + + /* check that 0 mod anything is 0 */ + random_scalar_order_test(&s); + secp256k1_scalar_get_num(&order, &s); + secp256k1_scalar_set_int(&s, 0); + secp256k1_scalar_get_num(&n, &s); + secp256k1_num_mod(&n, &order); + CHECK(secp256k1_num_is_zero(&n)); + + /* check that anything mod 1 is 0 */ + secp256k1_scalar_set_int(&s, 1); + secp256k1_scalar_get_num(&order, &s); + secp256k1_scalar_get_num(&n, &s); + secp256k1_num_mod(&n, &order); + CHECK(secp256k1_num_is_zero(&n)); + + /* check that increasing the number past 2^256 does not break this */ + random_scalar_order_test(&s); + secp256k1_scalar_get_num(&n, &s); + /* multiply by 2^8, which'll test this case with high probability */ + for (i = 0; i < 8; ++i) { + secp256k1_num_add(&n, &n, &n); + } + secp256k1_num_mod(&n, &order); + CHECK(secp256k1_num_is_zero(&n)); +} + +void test_num_jacobi(void) { + secp256k1_scalar sqr; + secp256k1_scalar small; + secp256k1_scalar five; /* five is not a quadratic residue */ + secp256k1_num order, n; + int i; + /* squares mod 5 are 1, 4 */ + const int jacobi5[10] = { 0, 1, -1, -1, 1, 0, 1, -1, -1, 1 }; + + /* check some small values with 5 as the order */ + secp256k1_scalar_set_int(&five, 5); + secp256k1_scalar_get_num(&order, &five); + for (i = 0; i < 10; ++i) { + secp256k1_scalar_set_int(&small, i); + secp256k1_scalar_get_num(&n, &small); + CHECK(secp256k1_num_jacobi(&n, &order) == jacobi5[i]); + } + + /** test large values with 5 as group order */ + secp256k1_scalar_get_num(&order, &five); + /* we first need a scalar which is not a multiple of 5 */ + do { + secp256k1_num fiven; + random_scalar_order_test(&sqr); + secp256k1_scalar_get_num(&fiven, &five); + secp256k1_scalar_get_num(&n, &sqr); + secp256k1_num_mod(&n, &fiven); + } while (secp256k1_num_is_zero(&n)); + /* next force it to be a residue. 2 is a nonresidue mod 5 so we can + * just multiply by two, i.e. add the number to itself */ + if (secp256k1_num_jacobi(&n, &order) == -1) { + secp256k1_num_add(&n, &n, &n); + } + + /* test residue */ + CHECK(secp256k1_num_jacobi(&n, &order) == 1); + /* test nonresidue */ + secp256k1_num_add(&n, &n, &n); + CHECK(secp256k1_num_jacobi(&n, &order) == -1); + + /** test with secp group order as order */ + secp256k1_scalar_order_get_num(&order); + random_scalar_order_test(&sqr); + secp256k1_scalar_sqr(&sqr, &sqr); + /* test residue */ + secp256k1_scalar_get_num(&n, &sqr); + CHECK(secp256k1_num_jacobi(&n, &order) == 1); + /* test nonresidue */ + secp256k1_scalar_mul(&sqr, &sqr, &five); + secp256k1_scalar_get_num(&n, &sqr); + CHECK(secp256k1_num_jacobi(&n, &order) == -1); + /* test multiple of the order*/ + CHECK(secp256k1_num_jacobi(&order, &order) == 0); + + /* check one less than the order */ + secp256k1_scalar_set_int(&small, 1); + secp256k1_scalar_get_num(&n, &small); + secp256k1_num_sub(&n, &order, &n); + CHECK(secp256k1_num_jacobi(&n, &order) == 1); /* sage confirms this is 1 */ } void run_num_smalltests(void) { @@ -505,6 +611,8 @@ void run_num_smalltests(void) { for (i = 0; i < 100*count; i++) { test_num_negate(); test_num_add_sub(); + test_num_mod(); + test_num_jacobi(); } } #endif @@ -689,6 +797,10 @@ void scalar_test(void) { secp256k1_scalar_inverse(&inv, &inv); /* Inverting one must result in one. */ CHECK(secp256k1_scalar_is_one(&inv)); +#ifndef USE_NUM_NONE + secp256k1_scalar_get_num(&invnum, &inv); + CHECK(secp256k1_num_is_one(&invnum)); +#endif } } @@ -855,7 +967,7 @@ void run_scalar_tests(void) { secp256k1_scalar zzv; #endif int overflow; - unsigned char chal[32][2][32] = { + unsigned char chal[33][2][32] = { {{0xff, 0xff, 0x03, 0x07, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, @@ -1111,9 +1223,17 @@ void run_scalar_tests(void) { {0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0xf8, 0x07, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xc7, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff}} + 0xff, 0xc7, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff}}, + {{0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00, + 0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x03, 0xfb, + 0xfa, 0x8a, 0x7d, 0xdf, 0x13, 0x86, 0xe2, 0x03}, + {0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00, + 0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x03, 0xfb, + 0xfa, 0x8a, 0x7d, 0xdf, 0x13, 0x86, 0xe2, 0x03}} }; - unsigned char res[32][2][32] = { + unsigned char res[33][2][32] = { {{0x0c, 0x3b, 0x0a, 0xca, 0x8d, 0x1a, 0x2f, 0xb9, 0x8a, 0x7b, 0x53, 0x5a, 0x1f, 0xc5, 0x22, 0xa1, 0x07, 0x2a, 0x48, 0xea, 0x02, 0xeb, 0xb3, 0xd6, @@ -1369,10 +1489,18 @@ void run_scalar_tests(void) { {0xe4, 0xf1, 0x23, 0x84, 0xe1, 0xb5, 0x9d, 0xf2, 0xb8, 0x73, 0x8b, 0x45, 0x2b, 0x35, 0x46, 0x38, 0x10, 0x2b, 0x50, 0xf8, 0x8b, 0x35, 0xcd, 0x34, - 0xc8, 0x0e, 0xf6, 0xdb, 0x09, 0x35, 0xf0, 0xda}} + 0xc8, 0x0e, 0xf6, 0xdb, 0x09, 0x35, 0xf0, 0xda}}, + {{0xdb, 0x21, 0x5c, 0x8d, 0x83, 0x1d, 0xb3, 0x34, + 0xc7, 0x0e, 0x43, 0xa1, 0x58, 0x79, 0x67, 0x13, + 0x1e, 0x86, 0x5d, 0x89, 0x63, 0xe6, 0x0a, 0x46, + 0x5c, 0x02, 0x97, 0x1b, 0x62, 0x43, 0x86, 0xf5}, + {0xdb, 0x21, 0x5c, 0x8d, 0x83, 0x1d, 0xb3, 0x34, + 0xc7, 0x0e, 0x43, 0xa1, 0x58, 0x79, 0x67, 0x13, + 0x1e, 0x86, 0x5d, 0x89, 0x63, 0xe6, 0x0a, 0x46, + 0x5c, 0x02, 0x97, 0x1b, 0x62, 0x43, 0x86, 0xf5}} }; secp256k1_scalar_set_int(&one, 1); - for (i = 0; i < 32; i++) { + for (i = 0; i < 33; i++) { secp256k1_scalar_set_b32(&x, chal[i][0], &overflow); CHECK(!overflow); secp256k1_scalar_set_b32(&y, chal[i][1], &overflow); @@ -1446,7 +1574,7 @@ void random_fe_non_zero(secp256k1_fe *nz) { void random_fe_non_square(secp256k1_fe *ns) { secp256k1_fe r; random_fe_non_zero(ns); - if (secp256k1_fe_sqrt_var(&r, ns)) { + if (secp256k1_fe_sqrt(&r, ns)) { secp256k1_fe_negate(ns, ns, 1); } } @@ -1605,18 +1733,18 @@ void run_field_inv_all_var(void) { secp256k1_fe x[16], xi[16], xii[16]; int i; /* Check it's safe to call for 0 elements */ - secp256k1_fe_inv_all_var(0, xi, x); + secp256k1_fe_inv_all_var(xi, x, 0); for (i = 0; i < count; i++) { size_t j; size_t len = secp256k1_rand_int(15) + 1; for (j = 0; j < len; j++) { random_fe_non_zero(&x[j]); } - secp256k1_fe_inv_all_var(len, xi, x); + secp256k1_fe_inv_all_var(xi, x, len); for (j = 0; j < len; j++) { CHECK(check_fe_inverse(&x[j], &xi[j])); } - secp256k1_fe_inv_all_var(len, xii, xi); + secp256k1_fe_inv_all_var(xii, xi, len); for (j = 0; j < len; j++) { CHECK(check_fe_equal(&x[j], &xii[j])); } @@ -1641,7 +1769,7 @@ void run_sqr(void) { void test_sqrt(const secp256k1_fe *a, const secp256k1_fe *k) { secp256k1_fe r1, r2; - int v = secp256k1_fe_sqrt_var(&r1, a); + int v = secp256k1_fe_sqrt(&r1, a); CHECK((v == 0) == (k == NULL)); if (k != NULL) { @@ -1802,7 +1930,7 @@ void test_ge(void) { zs[i] = gej[i].z; } } - secp256k1_fe_inv_all_var(4 * runs + 1, zinv, zs); + secp256k1_fe_inv_all_var(zinv, zs, 4 * runs + 1); free(zs); } @@ -1922,8 +2050,8 @@ void test_ge(void) { secp256k1_fe_mul(&zr[i + 1], &zinv[i], &gej[i + 1].z); } } - secp256k1_ge_set_table_gej_var(4 * runs + 1, ge_set_table, gej, zr); - secp256k1_ge_set_all_gej_var(4 * runs + 1, ge_set_all, gej, &ctx->error_callback); + secp256k1_ge_set_table_gej_var(ge_set_table, gej, zr, 4 * runs + 1); + secp256k1_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1, &ctx->error_callback); for (i = 0; i < 4 * runs + 1; i++) { secp256k1_fe s; random_fe_non_zero(&s); @@ -1951,8 +2079,8 @@ void test_add_neg_y_diff_x(void) { * of the sum to be wrong (since infinity has no xy coordinates). * HOWEVER, if the x-coordinates are different, infinity is the * wrong answer, and such degeneracies are exposed. This is the - * root of https://github.com/bitcoin/secp256k1/issues/257 which - * this test is a regression test for. + * root of https://github.com/bitcoin-core/secp256k1/issues/257 + * which this test is a regression test for. * * These points were generated in sage as * # secp256k1 params @@ -2051,15 +2179,16 @@ void run_ec_combine(void) { void test_group_decompress(const secp256k1_fe* x) { /* The input itself, normalized. */ secp256k1_fe fex = *x; - secp256k1_fe tmp; + secp256k1_fe fez; /* Results of set_xquad_var, set_xo_var(..., 0), set_xo_var(..., 1). */ secp256k1_ge ge_quad, ge_even, ge_odd; + secp256k1_gej gej_quad; /* Return values of the above calls. */ int res_quad, res_even, res_odd; secp256k1_fe_normalize_var(&fex); - res_quad = secp256k1_ge_set_xquad_var(&ge_quad, &fex); + res_quad = secp256k1_ge_set_xquad(&ge_quad, &fex); res_even = secp256k1_ge_set_xo_var(&ge_even, &fex, 0); res_odd = secp256k1_ge_set_xo_var(&ge_odd, &fex, 1); @@ -2085,13 +2214,29 @@ void test_group_decompress(const secp256k1_fe* x) { CHECK(secp256k1_fe_equal_var(&ge_odd.x, x)); /* Check that the Y coordinate result in ge_quad is a square. */ - CHECK(secp256k1_fe_sqrt_var(&tmp, &ge_quad.y)); - secp256k1_fe_sqr(&tmp, &tmp); - CHECK(secp256k1_fe_equal_var(&tmp, &ge_quad.y)); + CHECK(secp256k1_fe_is_quad_var(&ge_quad.y)); /* Check odd/even Y in ge_odd, ge_even. */ CHECK(secp256k1_fe_is_odd(&ge_odd.y)); CHECK(!secp256k1_fe_is_odd(&ge_even.y)); + + /* Check secp256k1_gej_has_quad_y_var. */ + secp256k1_gej_set_ge(&gej_quad, &ge_quad); + CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); + do { + random_fe_test(&fez); + } while (secp256k1_fe_is_zero(&fez)); + secp256k1_gej_rescale(&gej_quad, &fez); + CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); + secp256k1_gej_neg(&gej_quad, &gej_quad); + CHECK(!secp256k1_gej_has_quad_y_var(&gej_quad)); + do { + random_fe_test(&fez); + } while (secp256k1_fe_is_zero(&fez)); + secp256k1_gej_rescale(&gej_quad, &fez); + CHECK(!secp256k1_gej_has_quad_y_var(&gej_quad)); + secp256k1_gej_neg(&gej_quad, &gej_quad); + CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); } } @@ -2383,9 +2528,7 @@ void test_constant_wnaf(const secp256k1_scalar *number, int w) { secp256k1_scalar x, shift; int wnaf[256] = {0}; int i; -#ifdef USE_ENDOMORPHISM int skew; -#endif secp256k1_scalar num = *number; secp256k1_scalar_set_int(&x, 0); @@ -2395,10 +2538,8 @@ void test_constant_wnaf(const secp256k1_scalar *number, int w) { for (i = 0; i < 16; ++i) { secp256k1_scalar_shr_int(&num, 8); } - skew = secp256k1_wnaf_const(wnaf, num, w); -#else - secp256k1_wnaf_const(wnaf, num, w); #endif + skew = secp256k1_wnaf_const(wnaf, num, w); for (i = WNAF_SIZE(w); i >= 0; --i) { secp256k1_scalar t; @@ -2417,10 +2558,8 @@ void test_constant_wnaf(const secp256k1_scalar *number, int w) { } secp256k1_scalar_add(&x, &x, &t); } -#ifdef USE_ENDOMORPHISM - /* Skew num because when encoding 128-bit numbers as odd we use an offset */ + /* Skew num because when encoding numbers as odd we use an offset */ secp256k1_scalar_cadd_bit(&num, skew == 2, 1); -#endif CHECK(secp256k1_scalar_eq(&x, &num)); } @@ -3484,12 +3623,14 @@ void run_ecdsa_end_to_end(void) { int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_der, int certainly_not_der) { static const unsigned char zeroes[32] = {0}; +#ifdef ENABLE_OPENSSL_TESTS static const unsigned char max_scalar[32] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40 }; +#endif int ret = 0; @@ -3607,13 +3748,13 @@ static void assign_big_endian(unsigned char *ptr, size_t ptrlen, uint32_t val) { static void damage_array(unsigned char *sig, size_t *len) { int pos; int action = secp256k1_rand_bits(3); - if (action < 1) { + if (action < 1 && *len > 3) { /* Delete a byte. */ pos = secp256k1_rand_int(*len); memmove(sig + pos, sig + pos + 1, *len - pos - 1); (*len)--; return; - } else if (action < 2) { + } else if (action < 2 && *len < 2048) { /* Insert a byte. */ pos = secp256k1_rand_int(1 + *len); memmove(sig + pos + 1, sig + pos, *len - pos); @@ -3785,6 +3926,7 @@ void run_ecdsa_der_parse(void) { int certainly_der = 0; int certainly_not_der = 0; random_ber_signature(buffer, &buflen, &certainly_der, &certainly_not_der); + CHECK(buflen <= 2048); for (j = 0; j < 16; j++) { int ret = 0; if (j > 0) { diff --git a/src/tests_exhaustive.c b/src/tests_exhaustive.c new file mode 100644 index 0000000000..bda6ee475c --- /dev/null +++ b/src/tests_exhaustive.c @@ -0,0 +1,329 @@ +/*********************************************************************** + * Copyright (c) 2016 Andrew Poelstra * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#if defined HAVE_CONFIG_H +#include "libsecp256k1-config.h" +#endif + +#include +#include + +#include + +#undef USE_ECMULT_STATIC_PRECOMPUTATION + +#ifndef EXHAUSTIVE_TEST_ORDER +/* see group_impl.h for allowable values */ +#define EXHAUSTIVE_TEST_ORDER 13 +#define EXHAUSTIVE_TEST_LAMBDA 9 /* cube root of 1 mod 13 */ +#endif + +#include "include/secp256k1.h" +#include "group.h" +#include "secp256k1.c" +#include "testrand_impl.h" + +/** stolen from tests.c */ +void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) { + CHECK(a->infinity == b->infinity); + if (a->infinity) { + return; + } + CHECK(secp256k1_fe_equal_var(&a->x, &b->x)); + CHECK(secp256k1_fe_equal_var(&a->y, &b->y)); +} + +void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) { + secp256k1_fe z2s; + secp256k1_fe u1, u2, s1, s2; + CHECK(a->infinity == b->infinity); + if (a->infinity) { + return; + } + /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ + secp256k1_fe_sqr(&z2s, &b->z); + secp256k1_fe_mul(&u1, &a->x, &z2s); + u2 = b->x; secp256k1_fe_normalize_weak(&u2); + secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z); + s2 = b->y; secp256k1_fe_normalize_weak(&s2); + CHECK(secp256k1_fe_equal_var(&u1, &u2)); + CHECK(secp256k1_fe_equal_var(&s1, &s2)); +} + +void random_fe(secp256k1_fe *x) { + unsigned char bin[32]; + do { + secp256k1_rand256(bin); + if (secp256k1_fe_set_b32(x, bin)) { + return; + } + } while(1); +} +/** END stolen from tests.c */ + +int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, + const unsigned char *key32, const unsigned char *algo16, + void *data, unsigned int attempt) { + secp256k1_scalar s; + int *idata = data; + (void)msg32; + (void)key32; + (void)algo16; + /* Some nonces cannot be used because they'd cause s and/or r to be zero. + * The signing function has retry logic here that just re-calls the nonce + * function with an increased `attempt`. So if attempt > 0 this means we + * need to change the nonce to avoid an infinite loop. */ + if (attempt > 0) { + (*idata)++; + } + secp256k1_scalar_set_int(&s, *idata); + secp256k1_scalar_get_b32(nonce32, &s); + return 1; +} + +#ifdef USE_ENDOMORPHISM +void test_exhaustive_endomorphism(const secp256k1_ge *group, int order) { + int i; + for (i = 0; i < order; i++) { + secp256k1_ge res; + secp256k1_ge_mul_lambda(&res, &group[i]); + ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res); + } +} +#endif + +void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) { + int i, j; + + /* Sanity-check (and check infinity functions) */ + CHECK(secp256k1_ge_is_infinity(&group[0])); + CHECK(secp256k1_gej_is_infinity(&groupj[0])); + for (i = 1; i < order; i++) { + CHECK(!secp256k1_ge_is_infinity(&group[i])); + CHECK(!secp256k1_gej_is_infinity(&groupj[i])); + } + + /* Check all addition formulae */ + for (j = 0; j < order; j++) { + secp256k1_fe fe_inv; + secp256k1_fe_inv(&fe_inv, &groupj[j].z); + for (i = 0; i < order; i++) { + secp256k1_ge zless_gej; + secp256k1_gej tmp; + /* add_var */ + secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); + ge_equals_gej(&group[(i + j) % order], &tmp); + /* add_ge */ + if (j > 0) { + secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]); + ge_equals_gej(&group[(i + j) % order], &tmp); + } + /* add_ge_var */ + secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); + ge_equals_gej(&group[(i + j) % order], &tmp); + /* add_zinv_var */ + zless_gej.infinity = groupj[j].infinity; + zless_gej.x = groupj[j].x; + zless_gej.y = groupj[j].y; + secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); + ge_equals_gej(&group[(i + j) % order], &tmp); + } + } + + /* Check doubling */ + for (i = 0; i < order; i++) { + secp256k1_gej tmp; + if (i > 0) { + secp256k1_gej_double_nonzero(&tmp, &groupj[i], NULL); + ge_equals_gej(&group[(2 * i) % order], &tmp); + } + secp256k1_gej_double_var(&tmp, &groupj[i], NULL); + ge_equals_gej(&group[(2 * i) % order], &tmp); + } + + /* Check negation */ + for (i = 1; i < order; i++) { + secp256k1_ge tmp; + secp256k1_gej tmpj; + secp256k1_ge_neg(&tmp, &group[i]); + ge_equals_ge(&group[order - i], &tmp); + secp256k1_gej_neg(&tmpj, &groupj[i]); + ge_equals_gej(&group[order - i], &tmpj); + } +} + +void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj, int order) { + int i, j, r_log; + for (r_log = 1; r_log < order; r_log++) { + for (j = 0; j < order; j++) { + for (i = 0; i < order; i++) { + secp256k1_gej tmp; + secp256k1_scalar na, ng; + secp256k1_scalar_set_int(&na, i); + secp256k1_scalar_set_int(&ng, j); + + secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng); + ge_equals_gej(&group[(i * r_log + j) % order], &tmp); + + if (i > 0) { + secp256k1_ecmult_const(&tmp, &group[i], &ng); + ge_equals_gej(&group[(i * j) % order], &tmp); + } + } + } + } +} + +void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) { + secp256k1_fe x; + unsigned char x_bin[32]; + k %= EXHAUSTIVE_TEST_ORDER; + x = group[k].x; + secp256k1_fe_normalize(&x); + secp256k1_fe_get_b32(x_bin, &x); + secp256k1_scalar_set_b32(r, x_bin, NULL); +} + +void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { + int s, r, msg, key; + for (s = 1; s < order; s++) { + for (r = 1; r < order; r++) { + for (msg = 1; msg < order; msg++) { + for (key = 1; key < order; key++) { + secp256k1_ge nonconst_ge; + secp256k1_ecdsa_signature sig; + secp256k1_pubkey pk; + secp256k1_scalar sk_s, msg_s, r_s, s_s; + secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; + int k, should_verify; + unsigned char msg32[32]; + + secp256k1_scalar_set_int(&s_s, s); + secp256k1_scalar_set_int(&r_s, r); + secp256k1_scalar_set_int(&msg_s, msg); + secp256k1_scalar_set_int(&sk_s, key); + + /* Verify by hand */ + /* Run through every k value that gives us this r and check that *one* works. + * Note there could be none, there could be multiple, ECDSA is weird. */ + should_verify = 0; + for (k = 0; k < order; k++) { + secp256k1_scalar check_x_s; + r_from_k(&check_x_s, group, k); + if (r_s == check_x_s) { + secp256k1_scalar_set_int(&s_times_k_s, k); + secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); + secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); + secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); + should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); + } + } + /* nb we have a "high s" rule */ + should_verify &= !secp256k1_scalar_is_high(&s_s); + + /* Verify by calling verify */ + secp256k1_ecdsa_signature_save(&sig, &r_s, &s_s); + memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); + secp256k1_pubkey_save(&pk, &nonconst_ge); + secp256k1_scalar_get_b32(msg32, &msg_s); + CHECK(should_verify == + secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); + } + } + } + } +} + +void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { + int i, j, k; + + /* Loop */ + for (i = 1; i < order; i++) { /* message */ + for (j = 1; j < order; j++) { /* key */ + for (k = 1; k < order; k++) { /* nonce */ + secp256k1_ecdsa_signature sig; + secp256k1_scalar sk, msg, r, s, expected_r; + unsigned char sk32[32], msg32[32]; + secp256k1_scalar_set_int(&msg, i); + secp256k1_scalar_set_int(&sk, j); + secp256k1_scalar_get_b32(sk32, &sk); + secp256k1_scalar_get_b32(msg32, &msg); + + secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k); + + secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); + /* Note that we compute expected_r *after* signing -- this is important + * because our nonce-computing function function might change k during + * signing. */ + r_from_k(&expected_r, group, k); + CHECK(r == expected_r); + CHECK((k * s) % order == (i + r * j) % order || + (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); + } + } + } + + /* We would like to verify zero-knowledge here by counting how often every + * possible (s, r) tuple appears, but because the group order is larger + * than the field order, when coercing the x-values to scalar values, some + * appear more often than others, so we are actually not zero-knowledge. + * (This effect also appears in the real code, but the difference is on the + * order of 1/2^128th the field order, so the deviation is not useful to a + * computationally bounded attacker.) + */ +} + +int main(void) { + int i; + secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER]; + secp256k1_ge group[EXHAUSTIVE_TEST_ORDER]; + + /* Build context */ + secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + + /* TODO set z = 1, then do num_tests runs with random z values */ + + /* Generate the entire group */ + secp256k1_gej_set_infinity(&groupj[0]); + secp256k1_ge_set_gej(&group[0], &groupj[0]); + for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { + /* Set a different random z-value for each Jacobian point */ + secp256k1_fe z; + random_fe(&z); + + secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g); + secp256k1_ge_set_gej(&group[i], &groupj[i]); + secp256k1_gej_rescale(&groupj[i], &z); + + /* Verify against ecmult_gen */ + { + secp256k1_scalar scalar_i; + secp256k1_gej generatedj; + secp256k1_ge generated; + + secp256k1_scalar_set_int(&scalar_i, i); + secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); + secp256k1_ge_set_gej(&generated, &generatedj); + + CHECK(group[i].infinity == 0); + CHECK(generated.infinity == 0); + CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x)); + CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y)); + } + } + + /* Run the tests */ +#ifdef USE_ENDOMORPHISM + test_exhaustive_endomorphism(group, EXHAUSTIVE_TEST_ORDER); +#endif + test_exhaustive_addition(group, groupj, EXHAUSTIVE_TEST_ORDER); + test_exhaustive_ecmult(ctx, group, groupj, EXHAUSTIVE_TEST_ORDER); + test_exhaustive_sign(ctx, group, EXHAUSTIVE_TEST_ORDER); + test_exhaustive_verify(ctx, group, EXHAUSTIVE_TEST_ORDER); + + return 0; +} + -- cgit v1.2.3