From c2b79342506e24e9b7100fb7a6025dc870375ef6 Mon Sep 17 00:00:00 2001 From: Pavol Rusnak Date: Thu, 20 Jan 2022 17:26:00 +0100 Subject: Rename SHANI to X86_SHANI to allow future implementation of ARM_SHANI --- configure.ac | 14 +- src/Makefile.am | 16 +- src/crypto/sha256.cpp | 22 +-- src/crypto/sha256_shani.cpp | 356 ---------------------------------------- src/crypto/sha256_x86_shani.cpp | 356 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 382 insertions(+), 382 deletions(-) delete mode 100644 src/crypto/sha256_shani.cpp create mode 100644 src/crypto/sha256_x86_shani.cpp diff --git a/configure.ac b/configure.ac index bef3973996..4cf2446c6d 100644 --- a/configure.ac +++ b/configure.ac @@ -466,7 +466,7 @@ AX_CHECK_COMPILE_FLAG([-fno-extended-identifiers], [CXXFLAGS="$CXXFLAGS -fno-ext enable_sse42=no enable_sse41=no enable_avx2=no -enable_shani=no +enable_x86_shani=no if test "$use_asm" = "yes"; then @@ -478,7 +478,7 @@ dnl x86 AX_CHECK_COMPILE_FLAG([-msse4.2], [SSE42_CXXFLAGS="-msse4.2"], [], [$CXXFLAG_WERROR]) AX_CHECK_COMPILE_FLAG([-msse4.1], [SSE41_CXXFLAGS="-msse4.1"], [], [$CXXFLAG_WERROR]) AX_CHECK_COMPILE_FLAG([-mavx -mavx2], [AVX2_CXXFLAGS="-mavx -mavx2"], [], [$CXXFLAG_WERROR]) -AX_CHECK_COMPILE_FLAG([-msse4 -msha], [SHANI_CXXFLAGS="-msse4 -msha"], [], [$CXXFLAG_WERROR]) +AX_CHECK_COMPILE_FLAG([-msse4 -msha], [X86_SHANI_CXXFLAGS="-msse4 -msha"], [], [$CXXFLAG_WERROR]) enable_clmul= AX_CHECK_COMPILE_FLAG([-mpclmul], [enable_clmul=yes], [], [$CXXFLAG_WERROR], [AC_LANG_PROGRAM([ @@ -551,8 +551,8 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ CXXFLAGS="$TEMP_CXXFLAGS" TEMP_CXXFLAGS="$CXXFLAGS" -CXXFLAGS="$CXXFLAGS $SHANI_CXXFLAGS" -AC_MSG_CHECKING([for SHA-NI intrinsics]) +CXXFLAGS="$CXXFLAGS $X86_SHANI_CXXFLAGS" +AC_MSG_CHECKING([for x86 SHA-NI intrinsics]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #include #include @@ -562,7 +562,7 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ __m128i k = _mm_set1_epi32(2); return _mm_extract_epi32(_mm_sha256rnds2_epu32(i, i, k), 0); ]])], - [ AC_MSG_RESULT([yes]); enable_shani=yes; AC_DEFINE([ENABLE_SHANI], [1], [Define this symbol to build code that uses SHA-NI intrinsics]) ], + [ AC_MSG_RESULT([yes]); enable_x86_shani=yes; AC_DEFINE([ENABLE_X86_SHANI], [1], [Define this symbol to build code that uses x86 SHA-NI intrinsics]) ], [ AC_MSG_RESULT([no])] ) CXXFLAGS="$TEMP_CXXFLAGS" @@ -1806,7 +1806,7 @@ AM_CONDITIONAL([HARDEN], [test "$use_hardening" = "yes"]) AM_CONDITIONAL([ENABLE_SSE42], [test "$enable_sse42" = "yes"]) AM_CONDITIONAL([ENABLE_SSE41], [test "$enable_sse41" = "yes"]) AM_CONDITIONAL([ENABLE_AVX2], [test "$enable_avx2" = "yes"]) -AM_CONDITIONAL([ENABLE_SHANI], [test "$enable_shani" = "yes"]) +AM_CONDITIONAL([ENABLE_X86_SHANI], [test "$enable_x86_shani" = "yes"]) AM_CONDITIONAL([ENABLE_ARM_CRC], [test "$enable_arm_crc" = "yes"]) AM_CONDITIONAL([USE_ASM], [test "$use_asm" = "yes"]) AM_CONDITIONAL([WORDS_BIGENDIAN], [test "$ac_cv_c_bigendian" = "yes"]) @@ -1864,7 +1864,7 @@ AC_SUBST(SSE42_CXXFLAGS) AC_SUBST(SSE41_CXXFLAGS) AC_SUBST(CLMUL_CXXFLAGS) AC_SUBST(AVX2_CXXFLAGS) -AC_SUBST(SHANI_CXXFLAGS) +AC_SUBST(X86_SHANI_CXXFLAGS) AC_SUBST(ARM_CRC_CXXFLAGS) AC_SUBST(LIBTOOL_APP_LDFLAGS) AC_SUBST(USE_SQLITE) diff --git a/src/Makefile.am b/src/Makefile.am index 0b177480c8..41208993e8 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -46,9 +46,9 @@ if ENABLE_AVX2 LIBBITCOIN_CRYPTO_AVX2 = crypto/libbitcoin_crypto_avx2.a LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_AVX2) endif -if ENABLE_SHANI -LIBBITCOIN_CRYPTO_SHANI = crypto/libbitcoin_crypto_shani.a -LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_SHANI) +if ENABLE_X86_SHANI +LIBBITCOIN_CRYPTO_X86_SHANI = crypto/libbitcoin_crypto_x86_shani.a +LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_X86_SHANI) endif $(LIBSECP256K1): $(wildcard secp256k1/src/*.h) $(wildcard secp256k1/src/*.c) $(wildcard secp256k1/include/*) @@ -498,11 +498,11 @@ crypto_libbitcoin_crypto_avx2_a_CXXFLAGS += $(AVX2_CXXFLAGS) crypto_libbitcoin_crypto_avx2_a_CPPFLAGS += -DENABLE_AVX2 crypto_libbitcoin_crypto_avx2_a_SOURCES = crypto/sha256_avx2.cpp -crypto_libbitcoin_crypto_shani_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) -crypto_libbitcoin_crypto_shani_a_CPPFLAGS = $(AM_CPPFLAGS) -crypto_libbitcoin_crypto_shani_a_CXXFLAGS += $(SHANI_CXXFLAGS) -crypto_libbitcoin_crypto_shani_a_CPPFLAGS += -DENABLE_SHANI -crypto_libbitcoin_crypto_shani_a_SOURCES = crypto/sha256_shani.cpp +crypto_libbitcoin_crypto_x86_shani_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +crypto_libbitcoin_crypto_x86_shani_a_CPPFLAGS = $(AM_CPPFLAGS) +crypto_libbitcoin_crypto_x86_shani_a_CXXFLAGS += $(X86_SHANI_CXXFLAGS) +crypto_libbitcoin_crypto_x86_shani_a_CPPFLAGS += -DENABLE_X86_SHANI +crypto_libbitcoin_crypto_x86_shani_a_SOURCES = crypto/sha256_x86_shani.cpp # consensus: shared between all executables that validate any consensus rules. libbitcoin_consensus_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) diff --git a/src/crypto/sha256.cpp b/src/crypto/sha256.cpp index e35d526d35..48ee121e2d 100644 --- a/src/crypto/sha256.cpp +++ b/src/crypto/sha256.cpp @@ -29,12 +29,12 @@ namespace sha256d64_avx2 void Transform_8way(unsigned char* out, const unsigned char* in); } -namespace sha256d64_shani +namespace sha256d64_x86_shani { void Transform_2way(unsigned char* out, const unsigned char* in); } -namespace sha256_shani +namespace sha256_x86_shani { void Transform(uint32_t* s, const unsigned char* chunk, size_t blocks); } @@ -567,7 +567,7 @@ std::string SHA256AutoDetect() bool have_xsave = false; bool have_avx = false; bool have_avx2 = false; - bool have_shani = false; + bool have_x86_shani = false; bool enabled_avx = false; (void)AVXEnabled; @@ -575,7 +575,7 @@ std::string SHA256AutoDetect() (void)have_avx; (void)have_xsave; (void)have_avx2; - (void)have_shani; + (void)have_x86_shani; (void)enabled_avx; uint32_t eax, ebx, ecx, edx; @@ -589,15 +589,15 @@ std::string SHA256AutoDetect() if (have_sse4) { GetCPUID(7, 0, eax, ebx, ecx, edx); have_avx2 = (ebx >> 5) & 1; - have_shani = (ebx >> 29) & 1; + have_x86_shani = (ebx >> 29) & 1; } -#if defined(ENABLE_SHANI) && !defined(BUILD_BITCOIN_INTERNAL) - if (have_shani) { - Transform = sha256_shani::Transform; - TransformD64 = TransformD64Wrapper; - TransformD64_2way = sha256d64_shani::Transform_2way; - ret = "shani(1way,2way)"; +#if defined(ENABLE_X86_SHANI) && !defined(BUILD_BITCOIN_INTERNAL) + if (have_x86_shani) { + Transform = sha256_x86_shani::Transform; + TransformD64 = TransformD64Wrapper; + TransformD64_2way = sha256d64_x86_shani::Transform_2way; + ret = "x86_shani(1way,2way)"; have_sse4 = false; // Disable SSE4/AVX2; have_avx2 = false; } diff --git a/src/crypto/sha256_shani.cpp b/src/crypto/sha256_shani.cpp deleted file mode 100644 index 4f4d5b5837..0000000000 --- a/src/crypto/sha256_shani.cpp +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright (c) 2018-2020 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. -// -// Based on https://github.com/noloader/SHA-Intrinsics/blob/master/sha256-x86.c, -// Written and placed in public domain by Jeffrey Walton. -// Based on code from Intel, and by Sean Gulley for the miTLS project. - -#ifdef ENABLE_SHANI - -#include -#include - -namespace { - -alignas(__m128i) const uint8_t MASK[16] = {0x03, 0x02, 0x01, 0x00, 0x07, 0x06, 0x05, 0x04, 0x0b, 0x0a, 0x09, 0x08, 0x0f, 0x0e, 0x0d, 0x0c}; -alignas(__m128i) const uint8_t INIT0[16] = {0x8c, 0x68, 0x05, 0x9b, 0x7f, 0x52, 0x0e, 0x51, 0x85, 0xae, 0x67, 0xbb, 0x67, 0xe6, 0x09, 0x6a}; -alignas(__m128i) const uint8_t INIT1[16] = {0x19, 0xcd, 0xe0, 0x5b, 0xab, 0xd9, 0x83, 0x1f, 0x3a, 0xf5, 0x4f, 0xa5, 0x72, 0xf3, 0x6e, 0x3c}; - -void inline __attribute__((always_inline)) QuadRound(__m128i& state0, __m128i& state1, uint64_t k1, uint64_t k0) -{ - const __m128i msg = _mm_set_epi64x(k1, k0); - state1 = _mm_sha256rnds2_epu32(state1, state0, msg); - state0 = _mm_sha256rnds2_epu32(state0, state1, _mm_shuffle_epi32(msg, 0x0e)); -} - -void inline __attribute__((always_inline)) QuadRound(__m128i& state0, __m128i& state1, __m128i m, uint64_t k1, uint64_t k0) -{ - const __m128i msg = _mm_add_epi32(m, _mm_set_epi64x(k1, k0)); - state1 = _mm_sha256rnds2_epu32(state1, state0, msg); - state0 = _mm_sha256rnds2_epu32(state0, state1, _mm_shuffle_epi32(msg, 0x0e)); -} - -void inline __attribute__((always_inline)) ShiftMessageA(__m128i& m0, __m128i m1) -{ - m0 = _mm_sha256msg1_epu32(m0, m1); -} - -void inline __attribute__((always_inline)) ShiftMessageC(__m128i& m0, __m128i m1, __m128i& m2) -{ - m2 = _mm_sha256msg2_epu32(_mm_add_epi32(m2, _mm_alignr_epi8(m1, m0, 4)), m1); -} - -void inline __attribute__((always_inline)) ShiftMessageB(__m128i& m0, __m128i m1, __m128i& m2) -{ - ShiftMessageC(m0, m1, m2); - ShiftMessageA(m0, m1); -} - -void inline __attribute__((always_inline)) Shuffle(__m128i& s0, __m128i& s1) -{ - const __m128i t1 = _mm_shuffle_epi32(s0, 0xB1); - const __m128i t2 = _mm_shuffle_epi32(s1, 0x1B); - s0 = _mm_alignr_epi8(t1, t2, 0x08); - s1 = _mm_blend_epi16(t2, t1, 0xF0); -} - -void inline __attribute__((always_inline)) Unshuffle(__m128i& s0, __m128i& s1) -{ - const __m128i t1 = _mm_shuffle_epi32(s0, 0x1B); - const __m128i t2 = _mm_shuffle_epi32(s1, 0xB1); - s0 = _mm_blend_epi16(t1, t2, 0xF0); - s1 = _mm_alignr_epi8(t2, t1, 0x08); -} - -__m128i inline __attribute__((always_inline)) Load(const unsigned char* in) -{ - return _mm_shuffle_epi8(_mm_loadu_si128((const __m128i*)in), _mm_load_si128((const __m128i*)MASK)); -} - -void inline __attribute__((always_inline)) Save(unsigned char* out, __m128i s) -{ - _mm_storeu_si128((__m128i*)out, _mm_shuffle_epi8(s, _mm_load_si128((const __m128i*)MASK))); -} -} - -namespace sha256_shani { -void Transform(uint32_t* s, const unsigned char* chunk, size_t blocks) -{ - __m128i m0, m1, m2, m3, s0, s1, so0, so1; - - /* Load state */ - s0 = _mm_loadu_si128((const __m128i*)s); - s1 = _mm_loadu_si128((const __m128i*)(s + 4)); - Shuffle(s0, s1); - - while (blocks--) { - /* Remember old state */ - so0 = s0; - so1 = s1; - - /* Load data and transform */ - m0 = Load(chunk); - QuadRound(s0, s1, m0, 0xe9b5dba5b5c0fbcfull, 0x71374491428a2f98ull); - m1 = Load(chunk + 16); - QuadRound(s0, s1, m1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull); - ShiftMessageA(m0, m1); - m2 = Load(chunk + 32); - QuadRound(s0, s1, m2, 0x550c7dc3243185beull, 0x12835b01d807aa98ull); - ShiftMessageA(m1, m2); - m3 = Load(chunk + 48); - QuadRound(s0, s1, m3, 0xc19bf1749bdc06a7ull, 0x80deb1fe72be5d74ull); - ShiftMessageB(m2, m3, m0); - QuadRound(s0, s1, m0, 0x240ca1cc0fc19dc6ull, 0xefbe4786E49b69c1ull); - ShiftMessageB(m3, m0, m1); - QuadRound(s0, s1, m1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full); - ShiftMessageB(m0, m1, m2); - QuadRound(s0, s1, m2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull); - ShiftMessageB(m1, m2, m3); - QuadRound(s0, s1, m3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull); - ShiftMessageB(m2, m3, m0); - QuadRound(s0, s1, m0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull); - ShiftMessageB(m3, m0, m1); - QuadRound(s0, s1, m1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull); - ShiftMessageB(m0, m1, m2); - QuadRound(s0, s1, m2, 0xc76c51A3c24b8b70ull, 0xa81a664ba2bfe8a1ull); - ShiftMessageB(m1, m2, m3); - QuadRound(s0, s1, m3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull); - ShiftMessageB(m2, m3, m0); - QuadRound(s0, s1, m0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull); - ShiftMessageB(m3, m0, m1); - QuadRound(s0, s1, m1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull); - ShiftMessageC(m0, m1, m2); - QuadRound(s0, s1, m2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull); - ShiftMessageC(m1, m2, m3); - QuadRound(s0, s1, m3, 0xc67178f2bef9A3f7ull, 0xa4506ceb90befffaull); - - /* Combine with old state */ - s0 = _mm_add_epi32(s0, so0); - s1 = _mm_add_epi32(s1, so1); - - /* Advance */ - chunk += 64; - } - - Unshuffle(s0, s1); - _mm_storeu_si128((__m128i*)s, s0); - _mm_storeu_si128((__m128i*)(s + 4), s1); -} -} - -namespace sha256d64_shani { - -void Transform_2way(unsigned char* out, const unsigned char* in) -{ - __m128i am0, am1, am2, am3, as0, as1, aso0, aso1; - __m128i bm0, bm1, bm2, bm3, bs0, bs1, bso0, bso1; - - /* Transform 1 */ - bs0 = as0 = _mm_load_si128((const __m128i*)INIT0); - bs1 = as1 = _mm_load_si128((const __m128i*)INIT1); - am0 = Load(in); - bm0 = Load(in + 64); - QuadRound(as0, as1, am0, 0xe9b5dba5b5c0fbcfull, 0x71374491428a2f98ull); - QuadRound(bs0, bs1, bm0, 0xe9b5dba5b5c0fbcfull, 0x71374491428a2f98ull); - am1 = Load(in + 16); - bm1 = Load(in + 80); - QuadRound(as0, as1, am1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull); - QuadRound(bs0, bs1, bm1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull); - ShiftMessageA(am0, am1); - ShiftMessageA(bm0, bm1); - am2 = Load(in + 32); - bm2 = Load(in + 96); - QuadRound(as0, as1, am2, 0x550c7dc3243185beull, 0x12835b01d807aa98ull); - QuadRound(bs0, bs1, bm2, 0x550c7dc3243185beull, 0x12835b01d807aa98ull); - ShiftMessageA(am1, am2); - ShiftMessageA(bm1, bm2); - am3 = Load(in + 48); - bm3 = Load(in + 112); - QuadRound(as0, as1, am3, 0xc19bf1749bdc06a7ull, 0x80deb1fe72be5d74ull); - QuadRound(bs0, bs1, bm3, 0xc19bf1749bdc06a7ull, 0x80deb1fe72be5d74ull); - ShiftMessageB(am2, am3, am0); - ShiftMessageB(bm2, bm3, bm0); - QuadRound(as0, as1, am0, 0x240ca1cc0fc19dc6ull, 0xefbe4786E49b69c1ull); - QuadRound(bs0, bs1, bm0, 0x240ca1cc0fc19dc6ull, 0xefbe4786E49b69c1ull); - ShiftMessageB(am3, am0, am1); - ShiftMessageB(bm3, bm0, bm1); - QuadRound(as0, as1, am1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full); - QuadRound(bs0, bs1, bm1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full); - ShiftMessageB(am0, am1, am2); - ShiftMessageB(bm0, bm1, bm2); - QuadRound(as0, as1, am2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull); - QuadRound(bs0, bs1, bm2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull); - ShiftMessageB(am1, am2, am3); - ShiftMessageB(bm1, bm2, bm3); - QuadRound(as0, as1, am3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull); - QuadRound(bs0, bs1, bm3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull); - ShiftMessageB(am2, am3, am0); - ShiftMessageB(bm2, bm3, bm0); - QuadRound(as0, as1, am0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull); - QuadRound(bs0, bs1, bm0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull); - ShiftMessageB(am3, am0, am1); - ShiftMessageB(bm3, bm0, bm1); - QuadRound(as0, as1, am1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull); - QuadRound(bs0, bs1, bm1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull); - ShiftMessageB(am0, am1, am2); - ShiftMessageB(bm0, bm1, bm2); - QuadRound(as0, as1, am2, 0xc76c51A3c24b8b70ull, 0xa81a664ba2bfe8a1ull); - QuadRound(bs0, bs1, bm2, 0xc76c51A3c24b8b70ull, 0xa81a664ba2bfe8a1ull); - ShiftMessageB(am1, am2, am3); - ShiftMessageB(bm1, bm2, bm3); - QuadRound(as0, as1, am3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull); - QuadRound(bs0, bs1, bm3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull); - ShiftMessageB(am2, am3, am0); - ShiftMessageB(bm2, bm3, bm0); - QuadRound(as0, as1, am0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull); - QuadRound(bs0, bs1, bm0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull); - ShiftMessageB(am3, am0, am1); - ShiftMessageB(bm3, bm0, bm1); - QuadRound(as0, as1, am1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull); - QuadRound(bs0, bs1, bm1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull); - ShiftMessageC(am0, am1, am2); - ShiftMessageC(bm0, bm1, bm2); - QuadRound(as0, as1, am2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull); - QuadRound(bs0, bs1, bm2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull); - ShiftMessageC(am1, am2, am3); - ShiftMessageC(bm1, bm2, bm3); - QuadRound(as0, as1, am3, 0xc67178f2bef9A3f7ull, 0xa4506ceb90befffaull); - QuadRound(bs0, bs1, bm3, 0xc67178f2bef9A3f7ull, 0xa4506ceb90befffaull); - as0 = _mm_add_epi32(as0, _mm_load_si128((const __m128i*)INIT0)); - bs0 = _mm_add_epi32(bs0, _mm_load_si128((const __m128i*)INIT0)); - as1 = _mm_add_epi32(as1, _mm_load_si128((const __m128i*)INIT1)); - bs1 = _mm_add_epi32(bs1, _mm_load_si128((const __m128i*)INIT1)); - - /* Transform 2 */ - aso0 = as0; - bso0 = bs0; - aso1 = as1; - bso1 = bs1; - QuadRound(as0, as1, 0xe9b5dba5b5c0fbcfull, 0x71374491c28a2f98ull); - QuadRound(bs0, bs1, 0xe9b5dba5b5c0fbcfull, 0x71374491c28a2f98ull); - QuadRound(as0, as1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull); - QuadRound(bs0, bs1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull); - QuadRound(as0, as1, 0x550c7dc3243185beull, 0x12835b01d807aa98ull); - QuadRound(bs0, bs1, 0x550c7dc3243185beull, 0x12835b01d807aa98ull); - QuadRound(as0, as1, 0xc19bf3749bdc06a7ull, 0x80deb1fe72be5d74ull); - QuadRound(bs0, bs1, 0xc19bf3749bdc06a7ull, 0x80deb1fe72be5d74ull); - QuadRound(as0, as1, 0x240cf2540fe1edc6ull, 0xf0fe4786649b69c1ull); - QuadRound(bs0, bs1, 0x240cf2540fe1edc6ull, 0xf0fe4786649b69c1ull); - QuadRound(as0, as1, 0x16f988fa61b9411eull, 0x6cc984be4fe9346full); - QuadRound(bs0, bs1, 0x16f988fa61b9411eull, 0x6cc984be4fe9346full); - QuadRound(as0, as1, 0xb9d99ec7b019fc65ull, 0xa88e5a6df2c65152ull); - QuadRound(bs0, bs1, 0xb9d99ec7b019fc65ull, 0xa88e5a6df2c65152ull); - QuadRound(as0, as1, 0xc7353eb0fdb1232bull, 0xe70eeaa09a1231c3ull); - QuadRound(bs0, bs1, 0xc7353eb0fdb1232bull, 0xe70eeaa09a1231c3ull); - QuadRound(as0, as1, 0xdc1eeefd5a0f118full, 0xcb976d5f3069bad5ull); - QuadRound(bs0, bs1, 0xdc1eeefd5a0f118full, 0xcb976d5f3069bad5ull); - QuadRound(as0, as1, 0xe15d5b1658f4ca9dull, 0xde0b7a040a35b689ull); - QuadRound(bs0, bs1, 0xe15d5b1658f4ca9dull, 0xde0b7a040a35b689ull); - QuadRound(as0, as1, 0x6fab9537a507ea32ull, 0x37088980007f3e86ull); - QuadRound(bs0, bs1, 0x6fab9537a507ea32ull, 0x37088980007f3e86ull); - QuadRound(as0, as1, 0xc0bbbe37cdaa3b6dull, 0x0d8cd6f117406110ull); - QuadRound(bs0, bs1, 0xc0bbbe37cdaa3b6dull, 0x0d8cd6f117406110ull); - QuadRound(as0, as1, 0x6fd15ca70b02e931ull, 0xdb48a36383613bdaull); - QuadRound(bs0, bs1, 0x6fd15ca70b02e931ull, 0xdb48a36383613bdaull); - QuadRound(as0, as1, 0x6d4378906ed41a95ull, 0x31338431521afacaull); - QuadRound(bs0, bs1, 0x6d4378906ed41a95ull, 0x31338431521afacaull); - QuadRound(as0, as1, 0x532fb63cb5c9a0e6ull, 0x9eccabbdc39c91f2ull); - QuadRound(bs0, bs1, 0x532fb63cb5c9a0e6ull, 0x9eccabbdc39c91f2ull); - QuadRound(as0, as1, 0x4c191d76a4954b68ull, 0x07237ea3d2c741c6ull); - QuadRound(bs0, bs1, 0x4c191d76a4954b68ull, 0x07237ea3d2c741c6ull); - as0 = _mm_add_epi32(as0, aso0); - bs0 = _mm_add_epi32(bs0, bso0); - as1 = _mm_add_epi32(as1, aso1); - bs1 = _mm_add_epi32(bs1, bso1); - - /* Extract hash */ - Unshuffle(as0, as1); - Unshuffle(bs0, bs1); - am0 = as0; - bm0 = bs0; - am1 = as1; - bm1 = bs1; - - /* Transform 3 */ - bs0 = as0 = _mm_load_si128((const __m128i*)INIT0); - bs1 = as1 = _mm_load_si128((const __m128i*)INIT1); - QuadRound(as0, as1, am0, 0xe9b5dba5B5c0fbcfull, 0x71374491428a2f98ull); - QuadRound(bs0, bs1, bm0, 0xe9b5dba5B5c0fbcfull, 0x71374491428a2f98ull); - QuadRound(as0, as1, am1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull); - QuadRound(bs0, bs1, bm1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull); - ShiftMessageA(am0, am1); - ShiftMessageA(bm0, bm1); - bm2 = am2 = _mm_set_epi64x(0x0ull, 0x80000000ull); - QuadRound(as0, as1, 0x550c7dc3243185beull, 0x12835b015807aa98ull); - QuadRound(bs0, bs1, 0x550c7dc3243185beull, 0x12835b015807aa98ull); - ShiftMessageA(am1, am2); - ShiftMessageA(bm1, bm2); - bm3 = am3 = _mm_set_epi64x(0x10000000000ull, 0x0ull); - QuadRound(as0, as1, 0xc19bf2749bdc06a7ull, 0x80deb1fe72be5d74ull); - QuadRound(bs0, bs1, 0xc19bf2749bdc06a7ull, 0x80deb1fe72be5d74ull); - ShiftMessageB(am2, am3, am0); - ShiftMessageB(bm2, bm3, bm0); - QuadRound(as0, as1, am0, 0x240ca1cc0fc19dc6ull, 0xefbe4786e49b69c1ull); - QuadRound(bs0, bs1, bm0, 0x240ca1cc0fc19dc6ull, 0xefbe4786e49b69c1ull); - ShiftMessageB(am3, am0, am1); - ShiftMessageB(bm3, bm0, bm1); - QuadRound(as0, as1, am1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full); - QuadRound(bs0, bs1, bm1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full); - ShiftMessageB(am0, am1, am2); - ShiftMessageB(bm0, bm1, bm2); - QuadRound(as0, as1, am2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull); - QuadRound(bs0, bs1, bm2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull); - ShiftMessageB(am1, am2, am3); - ShiftMessageB(bm1, bm2, bm3); - QuadRound(as0, as1, am3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull); - QuadRound(bs0, bs1, bm3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull); - ShiftMessageB(am2, am3, am0); - ShiftMessageB(bm2, bm3, bm0); - QuadRound(as0, as1, am0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull); - QuadRound(bs0, bs1, bm0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull); - ShiftMessageB(am3, am0, am1); - ShiftMessageB(bm3, bm0, bm1); - QuadRound(as0, as1, am1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull); - QuadRound(bs0, bs1, bm1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull); - ShiftMessageB(am0, am1, am2); - ShiftMessageB(bm0, bm1, bm2); - QuadRound(as0, as1, am2, 0xc76c51a3c24b8b70ull, 0xa81a664ba2bfe8A1ull); - QuadRound(bs0, bs1, bm2, 0xc76c51a3c24b8b70ull, 0xa81a664ba2bfe8A1ull); - ShiftMessageB(am1, am2, am3); - ShiftMessageB(bm1, bm2, bm3); - QuadRound(as0, as1, am3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull); - QuadRound(bs0, bs1, bm3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull); - ShiftMessageB(am2, am3, am0); - ShiftMessageB(bm2, bm3, bm0); - QuadRound(as0, as1, am0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull); - QuadRound(bs0, bs1, bm0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull); - ShiftMessageB(am3, am0, am1); - ShiftMessageB(bm3, bm0, bm1); - QuadRound(as0, as1, am1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull); - QuadRound(bs0, bs1, bm1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull); - ShiftMessageC(am0, am1, am2); - ShiftMessageC(bm0, bm1, bm2); - QuadRound(as0, as1, am2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull); - QuadRound(bs0, bs1, bm2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull); - ShiftMessageC(am1, am2, am3); - ShiftMessageC(bm1, bm2, bm3); - QuadRound(as0, as1, am3, 0xc67178f2bef9a3f7ull, 0xa4506ceb90befffaull); - QuadRound(bs0, bs1, bm3, 0xc67178f2bef9a3f7ull, 0xa4506ceb90befffaull); - as0 = _mm_add_epi32(as0, _mm_load_si128((const __m128i*)INIT0)); - bs0 = _mm_add_epi32(bs0, _mm_load_si128((const __m128i*)INIT0)); - as1 = _mm_add_epi32(as1, _mm_load_si128((const __m128i*)INIT1)); - bs1 = _mm_add_epi32(bs1, _mm_load_si128((const __m128i*)INIT1)); - - /* Extract hash into out */ - Unshuffle(as0, as1); - Unshuffle(bs0, bs1); - Save(out, as0); - Save(out + 16, as1); - Save(out + 32, bs0); - Save(out + 48, bs1); -} - -} - -#endif diff --git a/src/crypto/sha256_x86_shani.cpp b/src/crypto/sha256_x86_shani.cpp new file mode 100644 index 0000000000..a82802199f --- /dev/null +++ b/src/crypto/sha256_x86_shani.cpp @@ -0,0 +1,356 @@ +// Copyright (c) 2018-2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +// +// Based on https://github.com/noloader/SHA-Intrinsics/blob/master/sha256-x86.c, +// Written and placed in public domain by Jeffrey Walton. +// Based on code from Intel, and by Sean Gulley for the miTLS project. + +#ifdef ENABLE_X86_SHANI + +#include +#include + +namespace { + +alignas(__m128i) const uint8_t MASK[16] = {0x03, 0x02, 0x01, 0x00, 0x07, 0x06, 0x05, 0x04, 0x0b, 0x0a, 0x09, 0x08, 0x0f, 0x0e, 0x0d, 0x0c}; +alignas(__m128i) const uint8_t INIT0[16] = {0x8c, 0x68, 0x05, 0x9b, 0x7f, 0x52, 0x0e, 0x51, 0x85, 0xae, 0x67, 0xbb, 0x67, 0xe6, 0x09, 0x6a}; +alignas(__m128i) const uint8_t INIT1[16] = {0x19, 0xcd, 0xe0, 0x5b, 0xab, 0xd9, 0x83, 0x1f, 0x3a, 0xf5, 0x4f, 0xa5, 0x72, 0xf3, 0x6e, 0x3c}; + +void inline __attribute__((always_inline)) QuadRound(__m128i& state0, __m128i& state1, uint64_t k1, uint64_t k0) +{ + const __m128i msg = _mm_set_epi64x(k1, k0); + state1 = _mm_sha256rnds2_epu32(state1, state0, msg); + state0 = _mm_sha256rnds2_epu32(state0, state1, _mm_shuffle_epi32(msg, 0x0e)); +} + +void inline __attribute__((always_inline)) QuadRound(__m128i& state0, __m128i& state1, __m128i m, uint64_t k1, uint64_t k0) +{ + const __m128i msg = _mm_add_epi32(m, _mm_set_epi64x(k1, k0)); + state1 = _mm_sha256rnds2_epu32(state1, state0, msg); + state0 = _mm_sha256rnds2_epu32(state0, state1, _mm_shuffle_epi32(msg, 0x0e)); +} + +void inline __attribute__((always_inline)) ShiftMessageA(__m128i& m0, __m128i m1) +{ + m0 = _mm_sha256msg1_epu32(m0, m1); +} + +void inline __attribute__((always_inline)) ShiftMessageC(__m128i& m0, __m128i m1, __m128i& m2) +{ + m2 = _mm_sha256msg2_epu32(_mm_add_epi32(m2, _mm_alignr_epi8(m1, m0, 4)), m1); +} + +void inline __attribute__((always_inline)) ShiftMessageB(__m128i& m0, __m128i m1, __m128i& m2) +{ + ShiftMessageC(m0, m1, m2); + ShiftMessageA(m0, m1); +} + +void inline __attribute__((always_inline)) Shuffle(__m128i& s0, __m128i& s1) +{ + const __m128i t1 = _mm_shuffle_epi32(s0, 0xB1); + const __m128i t2 = _mm_shuffle_epi32(s1, 0x1B); + s0 = _mm_alignr_epi8(t1, t2, 0x08); + s1 = _mm_blend_epi16(t2, t1, 0xF0); +} + +void inline __attribute__((always_inline)) Unshuffle(__m128i& s0, __m128i& s1) +{ + const __m128i t1 = _mm_shuffle_epi32(s0, 0x1B); + const __m128i t2 = _mm_shuffle_epi32(s1, 0xB1); + s0 = _mm_blend_epi16(t1, t2, 0xF0); + s1 = _mm_alignr_epi8(t2, t1, 0x08); +} + +__m128i inline __attribute__((always_inline)) Load(const unsigned char* in) +{ + return _mm_shuffle_epi8(_mm_loadu_si128((const __m128i*)in), _mm_load_si128((const __m128i*)MASK)); +} + +void inline __attribute__((always_inline)) Save(unsigned char* out, __m128i s) +{ + _mm_storeu_si128((__m128i*)out, _mm_shuffle_epi8(s, _mm_load_si128((const __m128i*)MASK))); +} +} + +namespace sha256_x86_shani { +void Transform(uint32_t* s, const unsigned char* chunk, size_t blocks) +{ + __m128i m0, m1, m2, m3, s0, s1, so0, so1; + + /* Load state */ + s0 = _mm_loadu_si128((const __m128i*)s); + s1 = _mm_loadu_si128((const __m128i*)(s + 4)); + Shuffle(s0, s1); + + while (blocks--) { + /* Remember old state */ + so0 = s0; + so1 = s1; + + /* Load data and transform */ + m0 = Load(chunk); + QuadRound(s0, s1, m0, 0xe9b5dba5b5c0fbcfull, 0x71374491428a2f98ull); + m1 = Load(chunk + 16); + QuadRound(s0, s1, m1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull); + ShiftMessageA(m0, m1); + m2 = Load(chunk + 32); + QuadRound(s0, s1, m2, 0x550c7dc3243185beull, 0x12835b01d807aa98ull); + ShiftMessageA(m1, m2); + m3 = Load(chunk + 48); + QuadRound(s0, s1, m3, 0xc19bf1749bdc06a7ull, 0x80deb1fe72be5d74ull); + ShiftMessageB(m2, m3, m0); + QuadRound(s0, s1, m0, 0x240ca1cc0fc19dc6ull, 0xefbe4786E49b69c1ull); + ShiftMessageB(m3, m0, m1); + QuadRound(s0, s1, m1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full); + ShiftMessageB(m0, m1, m2); + QuadRound(s0, s1, m2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull); + ShiftMessageB(m1, m2, m3); + QuadRound(s0, s1, m3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull); + ShiftMessageB(m2, m3, m0); + QuadRound(s0, s1, m0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull); + ShiftMessageB(m3, m0, m1); + QuadRound(s0, s1, m1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull); + ShiftMessageB(m0, m1, m2); + QuadRound(s0, s1, m2, 0xc76c51A3c24b8b70ull, 0xa81a664ba2bfe8a1ull); + ShiftMessageB(m1, m2, m3); + QuadRound(s0, s1, m3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull); + ShiftMessageB(m2, m3, m0); + QuadRound(s0, s1, m0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull); + ShiftMessageB(m3, m0, m1); + QuadRound(s0, s1, m1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull); + ShiftMessageC(m0, m1, m2); + QuadRound(s0, s1, m2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull); + ShiftMessageC(m1, m2, m3); + QuadRound(s0, s1, m3, 0xc67178f2bef9A3f7ull, 0xa4506ceb90befffaull); + + /* Combine with old state */ + s0 = _mm_add_epi32(s0, so0); + s1 = _mm_add_epi32(s1, so1); + + /* Advance */ + chunk += 64; + } + + Unshuffle(s0, s1); + _mm_storeu_si128((__m128i*)s, s0); + _mm_storeu_si128((__m128i*)(s + 4), s1); +} +} + +namespace sha256d64_x86_shani { + +void Transform_2way(unsigned char* out, const unsigned char* in) +{ + __m128i am0, am1, am2, am3, as0, as1, aso0, aso1; + __m128i bm0, bm1, bm2, bm3, bs0, bs1, bso0, bso1; + + /* Transform 1 */ + bs0 = as0 = _mm_load_si128((const __m128i*)INIT0); + bs1 = as1 = _mm_load_si128((const __m128i*)INIT1); + am0 = Load(in); + bm0 = Load(in + 64); + QuadRound(as0, as1, am0, 0xe9b5dba5b5c0fbcfull, 0x71374491428a2f98ull); + QuadRound(bs0, bs1, bm0, 0xe9b5dba5b5c0fbcfull, 0x71374491428a2f98ull); + am1 = Load(in + 16); + bm1 = Load(in + 80); + QuadRound(as0, as1, am1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull); + QuadRound(bs0, bs1, bm1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull); + ShiftMessageA(am0, am1); + ShiftMessageA(bm0, bm1); + am2 = Load(in + 32); + bm2 = Load(in + 96); + QuadRound(as0, as1, am2, 0x550c7dc3243185beull, 0x12835b01d807aa98ull); + QuadRound(bs0, bs1, bm2, 0x550c7dc3243185beull, 0x12835b01d807aa98ull); + ShiftMessageA(am1, am2); + ShiftMessageA(bm1, bm2); + am3 = Load(in + 48); + bm3 = Load(in + 112); + QuadRound(as0, as1, am3, 0xc19bf1749bdc06a7ull, 0x80deb1fe72be5d74ull); + QuadRound(bs0, bs1, bm3, 0xc19bf1749bdc06a7ull, 0x80deb1fe72be5d74ull); + ShiftMessageB(am2, am3, am0); + ShiftMessageB(bm2, bm3, bm0); + QuadRound(as0, as1, am0, 0x240ca1cc0fc19dc6ull, 0xefbe4786E49b69c1ull); + QuadRound(bs0, bs1, bm0, 0x240ca1cc0fc19dc6ull, 0xefbe4786E49b69c1ull); + ShiftMessageB(am3, am0, am1); + ShiftMessageB(bm3, bm0, bm1); + QuadRound(as0, as1, am1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full); + QuadRound(bs0, bs1, bm1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full); + ShiftMessageB(am0, am1, am2); + ShiftMessageB(bm0, bm1, bm2); + QuadRound(as0, as1, am2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull); + QuadRound(bs0, bs1, bm2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull); + ShiftMessageB(am1, am2, am3); + ShiftMessageB(bm1, bm2, bm3); + QuadRound(as0, as1, am3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull); + QuadRound(bs0, bs1, bm3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull); + ShiftMessageB(am2, am3, am0); + ShiftMessageB(bm2, bm3, bm0); + QuadRound(as0, as1, am0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull); + QuadRound(bs0, bs1, bm0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull); + ShiftMessageB(am3, am0, am1); + ShiftMessageB(bm3, bm0, bm1); + QuadRound(as0, as1, am1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull); + QuadRound(bs0, bs1, bm1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull); + ShiftMessageB(am0, am1, am2); + ShiftMessageB(bm0, bm1, bm2); + QuadRound(as0, as1, am2, 0xc76c51A3c24b8b70ull, 0xa81a664ba2bfe8a1ull); + QuadRound(bs0, bs1, bm2, 0xc76c51A3c24b8b70ull, 0xa81a664ba2bfe8a1ull); + ShiftMessageB(am1, am2, am3); + ShiftMessageB(bm1, bm2, bm3); + QuadRound(as0, as1, am3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull); + QuadRound(bs0, bs1, bm3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull); + ShiftMessageB(am2, am3, am0); + ShiftMessageB(bm2, bm3, bm0); + QuadRound(as0, as1, am0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull); + QuadRound(bs0, bs1, bm0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull); + ShiftMessageB(am3, am0, am1); + ShiftMessageB(bm3, bm0, bm1); + QuadRound(as0, as1, am1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull); + QuadRound(bs0, bs1, bm1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull); + ShiftMessageC(am0, am1, am2); + ShiftMessageC(bm0, bm1, bm2); + QuadRound(as0, as1, am2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull); + QuadRound(bs0, bs1, bm2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull); + ShiftMessageC(am1, am2, am3); + ShiftMessageC(bm1, bm2, bm3); + QuadRound(as0, as1, am3, 0xc67178f2bef9A3f7ull, 0xa4506ceb90befffaull); + QuadRound(bs0, bs1, bm3, 0xc67178f2bef9A3f7ull, 0xa4506ceb90befffaull); + as0 = _mm_add_epi32(as0, _mm_load_si128((const __m128i*)INIT0)); + bs0 = _mm_add_epi32(bs0, _mm_load_si128((const __m128i*)INIT0)); + as1 = _mm_add_epi32(as1, _mm_load_si128((const __m128i*)INIT1)); + bs1 = _mm_add_epi32(bs1, _mm_load_si128((const __m128i*)INIT1)); + + /* Transform 2 */ + aso0 = as0; + bso0 = bs0; + aso1 = as1; + bso1 = bs1; + QuadRound(as0, as1, 0xe9b5dba5b5c0fbcfull, 0x71374491c28a2f98ull); + QuadRound(bs0, bs1, 0xe9b5dba5b5c0fbcfull, 0x71374491c28a2f98ull); + QuadRound(as0, as1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull); + QuadRound(bs0, bs1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull); + QuadRound(as0, as1, 0x550c7dc3243185beull, 0x12835b01d807aa98ull); + QuadRound(bs0, bs1, 0x550c7dc3243185beull, 0x12835b01d807aa98ull); + QuadRound(as0, as1, 0xc19bf3749bdc06a7ull, 0x80deb1fe72be5d74ull); + QuadRound(bs0, bs1, 0xc19bf3749bdc06a7ull, 0x80deb1fe72be5d74ull); + QuadRound(as0, as1, 0x240cf2540fe1edc6ull, 0xf0fe4786649b69c1ull); + QuadRound(bs0, bs1, 0x240cf2540fe1edc6ull, 0xf0fe4786649b69c1ull); + QuadRound(as0, as1, 0x16f988fa61b9411eull, 0x6cc984be4fe9346full); + QuadRound(bs0, bs1, 0x16f988fa61b9411eull, 0x6cc984be4fe9346full); + QuadRound(as0, as1, 0xb9d99ec7b019fc65ull, 0xa88e5a6df2c65152ull); + QuadRound(bs0, bs1, 0xb9d99ec7b019fc65ull, 0xa88e5a6df2c65152ull); + QuadRound(as0, as1, 0xc7353eb0fdb1232bull, 0xe70eeaa09a1231c3ull); + QuadRound(bs0, bs1, 0xc7353eb0fdb1232bull, 0xe70eeaa09a1231c3ull); + QuadRound(as0, as1, 0xdc1eeefd5a0f118full, 0xcb976d5f3069bad5ull); + QuadRound(bs0, bs1, 0xdc1eeefd5a0f118full, 0xcb976d5f3069bad5ull); + QuadRound(as0, as1, 0xe15d5b1658f4ca9dull, 0xde0b7a040a35b689ull); + QuadRound(bs0, bs1, 0xe15d5b1658f4ca9dull, 0xde0b7a040a35b689ull); + QuadRound(as0, as1, 0x6fab9537a507ea32ull, 0x37088980007f3e86ull); + QuadRound(bs0, bs1, 0x6fab9537a507ea32ull, 0x37088980007f3e86ull); + QuadRound(as0, as1, 0xc0bbbe37cdaa3b6dull, 0x0d8cd6f117406110ull); + QuadRound(bs0, bs1, 0xc0bbbe37cdaa3b6dull, 0x0d8cd6f117406110ull); + QuadRound(as0, as1, 0x6fd15ca70b02e931ull, 0xdb48a36383613bdaull); + QuadRound(bs0, bs1, 0x6fd15ca70b02e931ull, 0xdb48a36383613bdaull); + QuadRound(as0, as1, 0x6d4378906ed41a95ull, 0x31338431521afacaull); + QuadRound(bs0, bs1, 0x6d4378906ed41a95ull, 0x31338431521afacaull); + QuadRound(as0, as1, 0x532fb63cb5c9a0e6ull, 0x9eccabbdc39c91f2ull); + QuadRound(bs0, bs1, 0x532fb63cb5c9a0e6ull, 0x9eccabbdc39c91f2ull); + QuadRound(as0, as1, 0x4c191d76a4954b68ull, 0x07237ea3d2c741c6ull); + QuadRound(bs0, bs1, 0x4c191d76a4954b68ull, 0x07237ea3d2c741c6ull); + as0 = _mm_add_epi32(as0, aso0); + bs0 = _mm_add_epi32(bs0, bso0); + as1 = _mm_add_epi32(as1, aso1); + bs1 = _mm_add_epi32(bs1, bso1); + + /* Extract hash */ + Unshuffle(as0, as1); + Unshuffle(bs0, bs1); + am0 = as0; + bm0 = bs0; + am1 = as1; + bm1 = bs1; + + /* Transform 3 */ + bs0 = as0 = _mm_load_si128((const __m128i*)INIT0); + bs1 = as1 = _mm_load_si128((const __m128i*)INIT1); + QuadRound(as0, as1, am0, 0xe9b5dba5B5c0fbcfull, 0x71374491428a2f98ull); + QuadRound(bs0, bs1, bm0, 0xe9b5dba5B5c0fbcfull, 0x71374491428a2f98ull); + QuadRound(as0, as1, am1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull); + QuadRound(bs0, bs1, bm1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull); + ShiftMessageA(am0, am1); + ShiftMessageA(bm0, bm1); + bm2 = am2 = _mm_set_epi64x(0x0ull, 0x80000000ull); + QuadRound(as0, as1, 0x550c7dc3243185beull, 0x12835b015807aa98ull); + QuadRound(bs0, bs1, 0x550c7dc3243185beull, 0x12835b015807aa98ull); + ShiftMessageA(am1, am2); + ShiftMessageA(bm1, bm2); + bm3 = am3 = _mm_set_epi64x(0x10000000000ull, 0x0ull); + QuadRound(as0, as1, 0xc19bf2749bdc06a7ull, 0x80deb1fe72be5d74ull); + QuadRound(bs0, bs1, 0xc19bf2749bdc06a7ull, 0x80deb1fe72be5d74ull); + ShiftMessageB(am2, am3, am0); + ShiftMessageB(bm2, bm3, bm0); + QuadRound(as0, as1, am0, 0x240ca1cc0fc19dc6ull, 0xefbe4786e49b69c1ull); + QuadRound(bs0, bs1, bm0, 0x240ca1cc0fc19dc6ull, 0xefbe4786e49b69c1ull); + ShiftMessageB(am3, am0, am1); + ShiftMessageB(bm3, bm0, bm1); + QuadRound(as0, as1, am1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full); + QuadRound(bs0, bs1, bm1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full); + ShiftMessageB(am0, am1, am2); + ShiftMessageB(bm0, bm1, bm2); + QuadRound(as0, as1, am2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull); + QuadRound(bs0, bs1, bm2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull); + ShiftMessageB(am1, am2, am3); + ShiftMessageB(bm1, bm2, bm3); + QuadRound(as0, as1, am3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull); + QuadRound(bs0, bs1, bm3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull); + ShiftMessageB(am2, am3, am0); + ShiftMessageB(bm2, bm3, bm0); + QuadRound(as0, as1, am0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull); + QuadRound(bs0, bs1, bm0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull); + ShiftMessageB(am3, am0, am1); + ShiftMessageB(bm3, bm0, bm1); + QuadRound(as0, as1, am1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull); + QuadRound(bs0, bs1, bm1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull); + ShiftMessageB(am0, am1, am2); + ShiftMessageB(bm0, bm1, bm2); + QuadRound(as0, as1, am2, 0xc76c51a3c24b8b70ull, 0xa81a664ba2bfe8A1ull); + QuadRound(bs0, bs1, bm2, 0xc76c51a3c24b8b70ull, 0xa81a664ba2bfe8A1ull); + ShiftMessageB(am1, am2, am3); + ShiftMessageB(bm1, bm2, bm3); + QuadRound(as0, as1, am3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull); + QuadRound(bs0, bs1, bm3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull); + ShiftMessageB(am2, am3, am0); + ShiftMessageB(bm2, bm3, bm0); + QuadRound(as0, as1, am0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull); + QuadRound(bs0, bs1, bm0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull); + ShiftMessageB(am3, am0, am1); + ShiftMessageB(bm3, bm0, bm1); + QuadRound(as0, as1, am1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull); + QuadRound(bs0, bs1, bm1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull); + ShiftMessageC(am0, am1, am2); + ShiftMessageC(bm0, bm1, bm2); + QuadRound(as0, as1, am2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull); + QuadRound(bs0, bs1, bm2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull); + ShiftMessageC(am1, am2, am3); + ShiftMessageC(bm1, bm2, bm3); + QuadRound(as0, as1, am3, 0xc67178f2bef9a3f7ull, 0xa4506ceb90befffaull); + QuadRound(bs0, bs1, bm3, 0xc67178f2bef9a3f7ull, 0xa4506ceb90befffaull); + as0 = _mm_add_epi32(as0, _mm_load_si128((const __m128i*)INIT0)); + bs0 = _mm_add_epi32(bs0, _mm_load_si128((const __m128i*)INIT0)); + as1 = _mm_add_epi32(as1, _mm_load_si128((const __m128i*)INIT1)); + bs1 = _mm_add_epi32(bs1, _mm_load_si128((const __m128i*)INIT1)); + + /* Extract hash into out */ + Unshuffle(as0, as1); + Unshuffle(bs0, bs1); + Save(out, as0); + Save(out + 16, as1); + Save(out + 32, bs0); + Save(out + 48, bs1); +} + +} + +#endif -- cgit v1.2.3 From 48a72fa81f80c8a3c7c6de8339b5feb361dece1c Mon Sep 17 00:00:00 2001 From: Pavol Rusnak Date: Thu, 20 Jan 2022 18:57:27 +0100 Subject: Add sha256_arm_shani to build system Also rename AArch64 intrinsics to ARMv8 intrinsics as these are not necessarily limited to 64-bit --- configure.ac | 23 ++++++++++++++++++++- src/Makefile.am | 10 +++++++++ src/crypto/sha256.cpp | 46 +++++++++++++++++++++++++++++++++++++++++ src/crypto/sha256_arm_shani.cpp | 22 ++++++++++++++++++++ 4 files changed, 100 insertions(+), 1 deletion(-) create mode 100644 src/crypto/sha256_arm_shani.cpp diff --git a/configure.ac b/configure.ac index 4cf2446c6d..3d389135f3 100644 --- a/configure.ac +++ b/configure.ac @@ -569,10 +569,11 @@ CXXFLAGS="$TEMP_CXXFLAGS" # ARM AX_CHECK_COMPILE_FLAG([-march=armv8-a+crc+crypto], [ARM_CRC_CXXFLAGS="-march=armv8-a+crc+crypto"], [], [$CXXFLAG_WERROR]) +AX_CHECK_COMPILE_FLAG([-march=armv8-a+crc+crypto], [ARM_SHANI_CXXFLAGS="-march=armv8-a+crc+crypto"], [], [$CXXFLAG_WERROR]) TEMP_CXXFLAGS="$CXXFLAGS" CXXFLAGS="$CXXFLAGS $ARM_CRC_CXXFLAGS" -AC_MSG_CHECKING([for AArch64 CRC32 intrinsics]) +AC_MSG_CHECKING([for ARMv8 CRC32 intrinsics]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #include #include @@ -589,6 +590,24 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ) CXXFLAGS="$TEMP_CXXFLAGS" +TEMP_CXXFLAGS="$CXXFLAGS" +CXXFLAGS="$CXXFLAGS $ARM_SHANI_CXXFLAGS" +AC_MSG_CHECKING([for ARMv8 SHA-NI intrinsics]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + #include + ]],[[ + uint32x4_t a, b, c; + vsha256h2q_u32(a, b, c); + vsha256hq_u32(a, b, c); + vsha256su0q_u32(a, b); + vsha256su1q_u32(a, b, c); + ]])], + [ AC_MSG_RESULT([yes]); enable_arm_shani=yes; AC_DEFINE([ENABLE_ARM_SHANI], [1], [Define this symbol to build code that uses ARMv8 SHA-NI intrinsics]) ], + [ AC_MSG_RESULT([no])] +) +CXXFLAGS="$TEMP_CXXFLAGS" + fi CPPFLAGS="$CPPFLAGS -DHAVE_BUILD_INFO" @@ -1808,6 +1827,7 @@ AM_CONDITIONAL([ENABLE_SSE41], [test "$enable_sse41" = "yes"]) AM_CONDITIONAL([ENABLE_AVX2], [test "$enable_avx2" = "yes"]) AM_CONDITIONAL([ENABLE_X86_SHANI], [test "$enable_x86_shani" = "yes"]) AM_CONDITIONAL([ENABLE_ARM_CRC], [test "$enable_arm_crc" = "yes"]) +AM_CONDITIONAL([ENABLE_ARM_SHANI], [test "$enable_arm_shani" = "yes"]) AM_CONDITIONAL([USE_ASM], [test "$use_asm" = "yes"]) AM_CONDITIONAL([WORDS_BIGENDIAN], [test "$ac_cv_c_bigendian" = "yes"]) AM_CONDITIONAL([USE_NATPMP], [test "$use_natpmp" = "yes"]) @@ -1866,6 +1886,7 @@ AC_SUBST(CLMUL_CXXFLAGS) AC_SUBST(AVX2_CXXFLAGS) AC_SUBST(X86_SHANI_CXXFLAGS) AC_SUBST(ARM_CRC_CXXFLAGS) +AC_SUBST(ARM_SHANI_CXXFLAGS) AC_SUBST(LIBTOOL_APP_LDFLAGS) AC_SUBST(USE_SQLITE) AC_SUBST(USE_BDB) diff --git a/src/Makefile.am b/src/Makefile.am index 41208993e8..b306944f87 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -50,6 +50,10 @@ if ENABLE_X86_SHANI LIBBITCOIN_CRYPTO_X86_SHANI = crypto/libbitcoin_crypto_x86_shani.a LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_X86_SHANI) endif +if ENABLE_ARM_SHANI +LIBBITCOIN_CRYPTO_ARM_SHANI = crypto/libbitcoin_crypto_arm_shani.a +LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_ARM_SHANI) +endif $(LIBSECP256K1): $(wildcard secp256k1/src/*.h) $(wildcard secp256k1/src/*.c) $(wildcard secp256k1/include/*) $(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C $(@D) $(@F) @@ -504,6 +508,12 @@ crypto_libbitcoin_crypto_x86_shani_a_CXXFLAGS += $(X86_SHANI_CXXFLAGS) crypto_libbitcoin_crypto_x86_shani_a_CPPFLAGS += -DENABLE_X86_SHANI crypto_libbitcoin_crypto_x86_shani_a_SOURCES = crypto/sha256_x86_shani.cpp +crypto_libbitcoin_crypto_arm_shani_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +crypto_libbitcoin_crypto_arm_shani_a_CPPFLAGS = $(AM_CPPFLAGS) +crypto_libbitcoin_crypto_arm_shani_a_CXXFLAGS += $(ARM_SHANI_CXXFLAGS) +crypto_libbitcoin_crypto_arm_shani_a_CPPFLAGS += -DENABLE_ARM_SHANI +crypto_libbitcoin_crypto_arm_shani_a_SOURCES = crypto/sha256_arm_shani.cpp + # consensus: shared between all executables that validate any consensus rules. libbitcoin_consensus_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) libbitcoin_consensus_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) diff --git a/src/crypto/sha256.cpp b/src/crypto/sha256.cpp index 48ee121e2d..7ba86c7ffa 100644 --- a/src/crypto/sha256.cpp +++ b/src/crypto/sha256.cpp @@ -10,6 +10,16 @@ #include +#if defined(__linux__) && defined(ENABLE_ARM_SHANI) && !defined(BUILD_BITCOIN_INTERNAL) +#include +#include +#endif + +#if defined(MAC_OSX) && defined(ENABLE_ARM_SHANI) && !defined(BUILD_BITCOIN_INTERNAL) +#include +#include +#endif + #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) #if defined(USE_ASM) namespace sha256_sse4 @@ -39,6 +49,11 @@ namespace sha256_x86_shani void Transform(uint32_t* s, const unsigned char* chunk, size_t blocks); } +namespace sha256_arm_shani +{ +void Transform(uint32_t* s, const unsigned char* chunk, size_t blocks); +} + // Internal implementation code. namespace { @@ -623,6 +638,37 @@ std::string SHA256AutoDetect() #endif #endif +#if defined(ENABLE_ARM_SHANI) && !defined(BUILD_BITCOIN_INTERNAL) + bool have_arm_shani = false; + +#if defined(__linux__) +#if defined(__arm__) // 32-bit + if (getauxval(AT_HWCAP2) & HWCAP2_SHA2) { + have_arm_shani = true; + } +#endif +#if defined(__aarch64__) // 64-bit + if (getauxval(AT_HWCAP) & HWCAP_SHA2) { + have_arm_shani = true; + } +#endif +#endif + +#if defined(MAC_OSX) + int val = 0; + size_t len = sizeof(val); + if (sysctlbyname("hw.optional.arm.FEAT_SHA256", &val, &len, nullptr, 0) == 0) { + have_arm_shani = val != 0; + } +#endif + + if (have_arm_shani) { + Transform = sha256_arm_shani::Transform; + TransformD64 = TransformD64Wrapper; + ret = "arm_shani(1way)"; + } +#endif + assert(SelfTest()); return ret; } diff --git a/src/crypto/sha256_arm_shani.cpp b/src/crypto/sha256_arm_shani.cpp new file mode 100644 index 0000000000..92b1d0e299 --- /dev/null +++ b/src/crypto/sha256_arm_shani.cpp @@ -0,0 +1,22 @@ +// Copyright (c) 2022 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +// +// Based on https://github.com/noloader/SHA-Intrinsics/blob/master/sha256-arm.c, +// Written and placed in public domain by Jeffrey Walton. +// Based on code from ARM, and by Johannes Schneiders, Skip Hovsmith and +// Barry O'Rourke for the mbedTLS project. + +#ifdef ENABLE_ARM_SHANI + +#include +#include + +namespace sha256_arm_shani { +void Transform(uint32_t* s, const unsigned char* chunk, size_t blocks) +{ + +} +} + +#endif -- cgit v1.2.3 From fe0629852aaf3a26f291bfa535e7e455fe7bea06 Mon Sep 17 00:00:00 2001 From: Pavol Rusnak Date: Thu, 20 Jan 2022 19:00:33 +0100 Subject: Implement sha256_arm_shani::Transform Co-Authored-By: Rauli Kumpulainen Co-Authored-By: Pieter Wuille --- src/crypto/sha256_arm_shani.cpp | 177 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 177 insertions(+) diff --git a/src/crypto/sha256_arm_shani.cpp b/src/crypto/sha256_arm_shani.cpp index 92b1d0e299..2829daf710 100644 --- a/src/crypto/sha256_arm_shani.cpp +++ b/src/crypto/sha256_arm_shani.cpp @@ -9,13 +9,190 @@ #ifdef ENABLE_ARM_SHANI +#include #include #include +#include +#include + +namespace { +alignas(uint32x4_t) static constexpr std::array K = +{ + 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, + 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, + 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, + 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, + 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, + 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, + 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, + 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, + 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, + 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, + 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, + 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, + 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, + 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, + 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, + 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2, +}; +} namespace sha256_arm_shani { void Transform(uint32_t* s, const unsigned char* chunk, size_t blocks) { + uint32x4_t STATE0, STATE1, ABEF_SAVE, CDGH_SAVE; + uint32x4_t MSG0, MSG1, MSG2, MSG3; + uint32x4_t TMP0, TMP2; + + // Load state + STATE0 = vld1q_u32(&s[0]); + STATE1 = vld1q_u32(&s[4]); + + while (blocks--) + { + // Save state + ABEF_SAVE = STATE0; + CDGH_SAVE = STATE1; + + // Load and convert input chunk to Big Endian + MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(chunk + 0))); + MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(chunk + 16))); + MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(chunk + 32))); + MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(chunk + 48))); + chunk += 64; + + // Original implemenation preloaded message and constant addition which was 1-3% slower. + // Now included as first step in quad round code saving one Q Neon register + // "TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0]));" + + // Rounds 1-4 + TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0])); + TMP2 = STATE0; + MSG0 = vsha256su0q_u32(MSG0, MSG1); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); + + // Rounds 5-8 + TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[4])); + TMP2 = STATE0; + MSG1 = vsha256su0q_u32(MSG1, MSG2); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); + + // Rounds 9-12 + TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[8])); + TMP2 = STATE0; + MSG2 = vsha256su0q_u32(MSG2, MSG3); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); + + // Rounds 13-16 + TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[12])); + TMP2 = STATE0; + MSG3 = vsha256su0q_u32(MSG3, MSG0); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); + + // Rounds 17-20 + TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[16])); + TMP2 = STATE0; + MSG0 = vsha256su0q_u32(MSG0, MSG1); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); + + // Rounds 21-24 + TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[20])); + TMP2 = STATE0; + MSG1 = vsha256su0q_u32(MSG1, MSG2); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); + + // Rounds 25-28 + TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[24])); + TMP2 = STATE0; + MSG2 = vsha256su0q_u32(MSG2, MSG3); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); + + // Rounds 29-32 + TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[28])); + TMP2 = STATE0; + MSG3 = vsha256su0q_u32(MSG3, MSG0); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); + + // Rounds 33-36 + TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[32])); + TMP2 = STATE0; + MSG0 = vsha256su0q_u32(MSG0, MSG1); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); + + // Rounds 37-40 + TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[36])); + TMP2 = STATE0; + MSG1 = vsha256su0q_u32(MSG1, MSG2); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); + + // Rounds 41-44 + TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[40])); + TMP2 = STATE0; + MSG2 = vsha256su0q_u32(MSG2, MSG3); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); + + // Rounds 45-48 + TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[44])); + TMP2 = STATE0; + MSG3 = vsha256su0q_u32(MSG3, MSG0); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); + + // Rounds 49-52 + TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[48])); + TMP2 = STATE0; + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + + // Rounds 53-56 + TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[52])); + TMP2 = STATE0; + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + + // Rounds 57-60 + TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[56])); + TMP2 = STATE0; + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + + // Rounds 61-64 + TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[60])); + TMP2 = STATE0; + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + + // Update state + STATE0 = vaddq_u32(STATE0, ABEF_SAVE); + STATE1 = vaddq_u32(STATE1, CDGH_SAVE); + } + // Save final state + vst1q_u32(&s[0], STATE0); + vst1q_u32(&s[4], STATE1); } } -- cgit v1.2.3 From aaa1d03d3acebeb44fdd40a302f086aad3d329ce Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Fri, 21 Jan 2022 19:07:36 +0000 Subject: Add optimized sha256d64_arm_shani::Transform_2way --- src/crypto/sha256.cpp | 8 +- src/crypto/sha256_arm_shani.cpp | 700 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 707 insertions(+), 1 deletion(-) diff --git a/src/crypto/sha256.cpp b/src/crypto/sha256.cpp index 7ba86c7ffa..cde543e68c 100644 --- a/src/crypto/sha256.cpp +++ b/src/crypto/sha256.cpp @@ -54,6 +54,11 @@ namespace sha256_arm_shani void Transform(uint32_t* s, const unsigned char* chunk, size_t blocks); } +namespace sha256d64_arm_shani +{ +void Transform_2way(unsigned char* out, const unsigned char* in); +} + // Internal implementation code. namespace { @@ -665,7 +670,8 @@ std::string SHA256AutoDetect() if (have_arm_shani) { Transform = sha256_arm_shani::Transform; TransformD64 = TransformD64Wrapper; - ret = "arm_shani(1way)"; + TransformD64_2way = sha256d64_arm_shani::Transform_2way; + ret = "arm_shani(1way,2way)"; } #endif diff --git a/src/crypto/sha256_arm_shani.cpp b/src/crypto/sha256_arm_shani.cpp index 2829daf710..7ffa56be70 100644 --- a/src/crypto/sha256_arm_shani.cpp +++ b/src/crypto/sha256_arm_shani.cpp @@ -6,6 +6,7 @@ // Written and placed in public domain by Jeffrey Walton. // Based on code from ARM, and by Johannes Schneiders, Skip Hovsmith and // Barry O'Rourke for the mbedTLS project. +// Variant specialized for 64-byte inputs added by Pieter Wuille. #ifdef ENABLE_ARM_SHANI @@ -196,4 +197,703 @@ void Transform(uint32_t* s, const unsigned char* chunk, size_t blocks) } } +namespace sha256d64_arm_shani { +void Transform_2way(unsigned char* output, const unsigned char* input) +{ + /* Initial state. */ + alignas(uint32x4_t) static constexpr std::array INIT = { + 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, + 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 + }; + + /* Precomputed message schedule for the 2nd transform. */ + alignas(uint32x4_t) static constexpr std::array MIDS = { + 0xc28a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, + 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, + 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf374, + 0x649b69c1, 0xf0fe4786, 0x0fe1edc6, 0x240cf254, + 0x4fe9346f, 0x6cc984be, 0x61b9411e, 0x16f988fa, + 0xf2c65152, 0xa88e5a6d, 0xb019fc65, 0xb9d99ec7, + 0x9a1231c3, 0xe70eeaa0, 0xfdb1232b, 0xc7353eb0, + 0x3069bad5, 0xcb976d5f, 0x5a0f118f, 0xdc1eeefd, + 0x0a35b689, 0xde0b7a04, 0x58f4ca9d, 0xe15d5b16, + 0x007f3e86, 0x37088980, 0xa507ea32, 0x6fab9537, + 0x17406110, 0x0d8cd6f1, 0xcdaa3b6d, 0xc0bbbe37, + 0x83613bda, 0xdb48a363, 0x0b02e931, 0x6fd15ca7, + 0x521afaca, 0x31338431, 0x6ed41a95, 0x6d437890, + 0xc39c91f2, 0x9eccabbd, 0xb5c9a0e6, 0x532fb63c, + 0xd2c741c6, 0x07237ea3, 0xa4954b68, 0x4c191d76 + }; + + /* A few precomputed message schedule values for the 3rd transform. */ + alignas(uint32x4_t) static constexpr std::array FINS = { + 0x5807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, + 0x80000000, 0x00000000, 0x00000000, 0x00000000, + 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf274 + }; + + /* Padding processed in the 3rd transform (byteswapped). */ + alignas(uint32x4_t) static constexpr std::array FINAL = {0x80000000, 0, 0, 0, 0, 0, 0, 0x100}; + + uint32x4_t STATE0A, STATE0B, STATE1A, STATE1B, ABEF_SAVEA, ABEF_SAVEB, CDGH_SAVEA, CDGH_SAVEB; + uint32x4_t MSG0A, MSG0B, MSG1A, MSG1B, MSG2A, MSG2B, MSG3A, MSG3B; + uint32x4_t TMP0A, TMP0B, TMP2A, TMP2B, TMP; + + // Transform 1: Load state + STATE0A = vld1q_u32(&INIT[0]); + STATE0B = STATE0A; + STATE1A = vld1q_u32(&INIT[4]); + STATE1B = STATE1A; + + // Transform 1: Load and convert input chunk to Big Endian + MSG0A = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(input + 0))); + MSG1A = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(input + 16))); + MSG2A = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(input + 32))); + MSG3A = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(input + 48))); + MSG0B = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(input + 64))); + MSG1B = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(input + 80))); + MSG2B = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(input + 96))); + MSG3B = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(input + 112))); + + // Transform 1: Rounds 1-4 + TMP = vld1q_u32(&K[0]); + TMP0A = vaddq_u32(MSG0A, TMP); + TMP0B = vaddq_u32(MSG0B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG0A = vsha256su0q_u32(MSG0A, MSG1A); + MSG0B = vsha256su0q_u32(MSG0B, MSG1B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG0A = vsha256su1q_u32(MSG0A, MSG2A, MSG3A); + MSG0B = vsha256su1q_u32(MSG0B, MSG2B, MSG3B); + + // Transform 1: Rounds 5-8 + TMP = vld1q_u32(&K[4]); + TMP0A = vaddq_u32(MSG1A, TMP); + TMP0B = vaddq_u32(MSG1B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG1A = vsha256su0q_u32(MSG1A, MSG2A); + MSG1B = vsha256su0q_u32(MSG1B, MSG2B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG1A = vsha256su1q_u32(MSG1A, MSG3A, MSG0A); + MSG1B = vsha256su1q_u32(MSG1B, MSG3B, MSG0B); + + // Transform 1: Rounds 9-12 + TMP = vld1q_u32(&K[8]); + TMP0A = vaddq_u32(MSG2A, TMP); + TMP0B = vaddq_u32(MSG2B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG2A = vsha256su0q_u32(MSG2A, MSG3A); + MSG2B = vsha256su0q_u32(MSG2B, MSG3B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG2A = vsha256su1q_u32(MSG2A, MSG0A, MSG1A); + MSG2B = vsha256su1q_u32(MSG2B, MSG0B, MSG1B); + + // Transform 1: Rounds 13-16 + TMP = vld1q_u32(&K[12]); + TMP0A = vaddq_u32(MSG3A, TMP); + TMP0B = vaddq_u32(MSG3B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG3A = vsha256su0q_u32(MSG3A, MSG0A); + MSG3B = vsha256su0q_u32(MSG3B, MSG0B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG3A = vsha256su1q_u32(MSG3A, MSG1A, MSG2A); + MSG3B = vsha256su1q_u32(MSG3B, MSG1B, MSG2B); + + // Transform 1: Rounds 17-20 + TMP = vld1q_u32(&K[16]); + TMP0A = vaddq_u32(MSG0A, TMP); + TMP0B = vaddq_u32(MSG0B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG0A = vsha256su0q_u32(MSG0A, MSG1A); + MSG0B = vsha256su0q_u32(MSG0B, MSG1B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG0A = vsha256su1q_u32(MSG0A, MSG2A, MSG3A); + MSG0B = vsha256su1q_u32(MSG0B, MSG2B, MSG3B); + + // Transform 1: Rounds 21-24 + TMP = vld1q_u32(&K[20]); + TMP0A = vaddq_u32(MSG1A, TMP); + TMP0B = vaddq_u32(MSG1B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG1A = vsha256su0q_u32(MSG1A, MSG2A); + MSG1B = vsha256su0q_u32(MSG1B, MSG2B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG1A = vsha256su1q_u32(MSG1A, MSG3A, MSG0A); + MSG1B = vsha256su1q_u32(MSG1B, MSG3B, MSG0B); + + // Transform 1: Rounds 25-28 + TMP = vld1q_u32(&K[24]); + TMP0A = vaddq_u32(MSG2A, TMP); + TMP0B = vaddq_u32(MSG2B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG2A = vsha256su0q_u32(MSG2A, MSG3A); + MSG2B = vsha256su0q_u32(MSG2B, MSG3B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG2A = vsha256su1q_u32(MSG2A, MSG0A, MSG1A); + MSG2B = vsha256su1q_u32(MSG2B, MSG0B, MSG1B); + + // Transform 1: Rounds 29-32 + TMP = vld1q_u32(&K[28]); + TMP0A = vaddq_u32(MSG3A, TMP); + TMP0B = vaddq_u32(MSG3B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG3A = vsha256su0q_u32(MSG3A, MSG0A); + MSG3B = vsha256su0q_u32(MSG3B, MSG0B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG3A = vsha256su1q_u32(MSG3A, MSG1A, MSG2A); + MSG3B = vsha256su1q_u32(MSG3B, MSG1B, MSG2B); + + // Transform 1: Rounds 33-36 + TMP = vld1q_u32(&K[32]); + TMP0A = vaddq_u32(MSG0A, TMP); + TMP0B = vaddq_u32(MSG0B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG0A = vsha256su0q_u32(MSG0A, MSG1A); + MSG0B = vsha256su0q_u32(MSG0B, MSG1B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG0A = vsha256su1q_u32(MSG0A, MSG2A, MSG3A); + MSG0B = vsha256su1q_u32(MSG0B, MSG2B, MSG3B); + + // Transform 1: Rounds 37-40 + TMP = vld1q_u32(&K[36]); + TMP0A = vaddq_u32(MSG1A, TMP); + TMP0B = vaddq_u32(MSG1B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG1A = vsha256su0q_u32(MSG1A, MSG2A); + MSG1B = vsha256su0q_u32(MSG1B, MSG2B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG1A = vsha256su1q_u32(MSG1A, MSG3A, MSG0A); + MSG1B = vsha256su1q_u32(MSG1B, MSG3B, MSG0B); + + // Transform 1: Rounds 41-44 + TMP = vld1q_u32(&K[40]); + TMP0A = vaddq_u32(MSG2A, TMP); + TMP0B = vaddq_u32(MSG2B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG2A = vsha256su0q_u32(MSG2A, MSG3A); + MSG2B = vsha256su0q_u32(MSG2B, MSG3B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG2A = vsha256su1q_u32(MSG2A, MSG0A, MSG1A); + MSG2B = vsha256su1q_u32(MSG2B, MSG0B, MSG1B); + + // Transform 1: Rounds 45-48 + TMP = vld1q_u32(&K[44]); + TMP0A = vaddq_u32(MSG3A, TMP); + TMP0B = vaddq_u32(MSG3B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG3A = vsha256su0q_u32(MSG3A, MSG0A); + MSG3B = vsha256su0q_u32(MSG3B, MSG0B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG3A = vsha256su1q_u32(MSG3A, MSG1A, MSG2A); + MSG3B = vsha256su1q_u32(MSG3B, MSG1B, MSG2B); + + // Transform 1: Rounds 49-52 + TMP = vld1q_u32(&K[48]); + TMP0A = vaddq_u32(MSG0A, TMP); + TMP0B = vaddq_u32(MSG0B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + + // Transform 1: Rounds 53-56 + TMP = vld1q_u32(&K[52]); + TMP0A = vaddq_u32(MSG1A, TMP); + TMP0B = vaddq_u32(MSG1B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + + // Transform 1: Rounds 57-60 + TMP = vld1q_u32(&K[56]); + TMP0A = vaddq_u32(MSG2A, TMP); + TMP0B = vaddq_u32(MSG2B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + + // Transform 1: Rounds 61-64 + TMP = vld1q_u32(&K[60]); + TMP0A = vaddq_u32(MSG3A, TMP); + TMP0B = vaddq_u32(MSG3B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + + // Transform 1: Update state + TMP = vld1q_u32(&INIT[0]); + STATE0A = vaddq_u32(STATE0A, TMP); + STATE0B = vaddq_u32(STATE0B, TMP); + TMP = vld1q_u32(&INIT[4]); + STATE1A = vaddq_u32(STATE1A, TMP); + STATE1B = vaddq_u32(STATE1B, TMP); + + // Transform 2: Save state + ABEF_SAVEA = STATE0A; + ABEF_SAVEB = STATE0B; + CDGH_SAVEA = STATE1A; + CDGH_SAVEB = STATE1B; + + // Transform 2: Rounds 1-4 + TMP = vld1q_u32(&MIDS[0]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 5-8 + TMP = vld1q_u32(&MIDS[4]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 9-12 + TMP = vld1q_u32(&MIDS[8]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 13-16 + TMP = vld1q_u32(&MIDS[12]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 17-20 + TMP = vld1q_u32(&MIDS[16]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 21-24 + TMP = vld1q_u32(&MIDS[20]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 25-28 + TMP = vld1q_u32(&MIDS[24]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 29-32 + TMP = vld1q_u32(&MIDS[28]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 33-36 + TMP = vld1q_u32(&MIDS[32]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 37-40 + TMP = vld1q_u32(&MIDS[36]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 41-44 + TMP = vld1q_u32(&MIDS[40]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 45-48 + TMP = vld1q_u32(&MIDS[44]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 49-52 + TMP = vld1q_u32(&MIDS[48]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 53-56 + TMP = vld1q_u32(&MIDS[52]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 57-60 + TMP = vld1q_u32(&MIDS[56]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Rounds 61-64 + TMP = vld1q_u32(&MIDS[60]); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + + // Transform 2: Update state + STATE0A = vaddq_u32(STATE0A, ABEF_SAVEA); + STATE0B = vaddq_u32(STATE0B, ABEF_SAVEB); + STATE1A = vaddq_u32(STATE1A, CDGH_SAVEA); + STATE1B = vaddq_u32(STATE1B, CDGH_SAVEB); + + // Transform 3: Pad previous output + MSG0A = STATE0A; + MSG0B = STATE0B; + MSG1A = STATE1A; + MSG1B = STATE1B; + MSG2A = vld1q_u32(&FINAL[0]); + MSG2B = MSG2A; + MSG3A = vld1q_u32(&FINAL[4]); + MSG3B = MSG3A; + + // Transform 3: Load state + STATE0A = vld1q_u32(&INIT[0]); + STATE0B = STATE0A; + STATE1A = vld1q_u32(&INIT[4]); + STATE1B = STATE1A; + + // Transform 3: Rounds 1-4 + TMP = vld1q_u32(&K[0]); + TMP0A = vaddq_u32(MSG0A, TMP); + TMP0B = vaddq_u32(MSG0B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG0A = vsha256su0q_u32(MSG0A, MSG1A); + MSG0B = vsha256su0q_u32(MSG0B, MSG1B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG0A = vsha256su1q_u32(MSG0A, MSG2A, MSG3A); + MSG0B = vsha256su1q_u32(MSG0B, MSG2B, MSG3B); + + // Transform 3: Rounds 5-8 + TMP = vld1q_u32(&K[4]); + TMP0A = vaddq_u32(MSG1A, TMP); + TMP0B = vaddq_u32(MSG1B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG1A = vsha256su0q_u32(MSG1A, MSG2A); + MSG1B = vsha256su0q_u32(MSG1B, MSG2B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG1A = vsha256su1q_u32(MSG1A, MSG3A, MSG0A); + MSG1B = vsha256su1q_u32(MSG1B, MSG3B, MSG0B); + + // Transform 3: Rounds 9-12 + TMP = vld1q_u32(&FINS[0]); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG2A = vld1q_u32(&FINS[4]); + MSG2B = MSG2A; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + MSG2A = vsha256su1q_u32(MSG2A, MSG0A, MSG1A); + MSG2B = vsha256su1q_u32(MSG2B, MSG0B, MSG1B); + + // Transform 3: Rounds 13-16 + TMP = vld1q_u32(&FINS[8]); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG3A = vsha256su0q_u32(MSG3A, MSG0A); + MSG3B = vsha256su0q_u32(MSG3B, MSG0B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP); + MSG3A = vsha256su1q_u32(MSG3A, MSG1A, MSG2A); + MSG3B = vsha256su1q_u32(MSG3B, MSG1B, MSG2B); + + // Transform 3: Rounds 17-20 + TMP = vld1q_u32(&K[16]); + TMP0A = vaddq_u32(MSG0A, TMP); + TMP0B = vaddq_u32(MSG0B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG0A = vsha256su0q_u32(MSG0A, MSG1A); + MSG0B = vsha256su0q_u32(MSG0B, MSG1B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG0A = vsha256su1q_u32(MSG0A, MSG2A, MSG3A); + MSG0B = vsha256su1q_u32(MSG0B, MSG2B, MSG3B); + + // Transform 3: Rounds 21-24 + TMP = vld1q_u32(&K[20]); + TMP0A = vaddq_u32(MSG1A, TMP); + TMP0B = vaddq_u32(MSG1B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG1A = vsha256su0q_u32(MSG1A, MSG2A); + MSG1B = vsha256su0q_u32(MSG1B, MSG2B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG1A = vsha256su1q_u32(MSG1A, MSG3A, MSG0A); + MSG1B = vsha256su1q_u32(MSG1B, MSG3B, MSG0B); + + // Transform 3: Rounds 25-28 + TMP = vld1q_u32(&K[24]); + TMP0A = vaddq_u32(MSG2A, TMP); + TMP0B = vaddq_u32(MSG2B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG2A = vsha256su0q_u32(MSG2A, MSG3A); + MSG2B = vsha256su0q_u32(MSG2B, MSG3B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG2A = vsha256su1q_u32(MSG2A, MSG0A, MSG1A); + MSG2B = vsha256su1q_u32(MSG2B, MSG0B, MSG1B); + + // Transform 3: Rounds 29-32 + TMP = vld1q_u32(&K[28]); + TMP0A = vaddq_u32(MSG3A, TMP); + TMP0B = vaddq_u32(MSG3B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG3A = vsha256su0q_u32(MSG3A, MSG0A); + MSG3B = vsha256su0q_u32(MSG3B, MSG0B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG3A = vsha256su1q_u32(MSG3A, MSG1A, MSG2A); + MSG3B = vsha256su1q_u32(MSG3B, MSG1B, MSG2B); + + // Transform 3: Rounds 33-36 + TMP = vld1q_u32(&K[32]); + TMP0A = vaddq_u32(MSG0A, TMP); + TMP0B = vaddq_u32(MSG0B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG0A = vsha256su0q_u32(MSG0A, MSG1A); + MSG0B = vsha256su0q_u32(MSG0B, MSG1B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG0A = vsha256su1q_u32(MSG0A, MSG2A, MSG3A); + MSG0B = vsha256su1q_u32(MSG0B, MSG2B, MSG3B); + + // Transform 3: Rounds 37-40 + TMP = vld1q_u32(&K[36]); + TMP0A = vaddq_u32(MSG1A, TMP); + TMP0B = vaddq_u32(MSG1B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG1A = vsha256su0q_u32(MSG1A, MSG2A); + MSG1B = vsha256su0q_u32(MSG1B, MSG2B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG1A = vsha256su1q_u32(MSG1A, MSG3A, MSG0A); + MSG1B = vsha256su1q_u32(MSG1B, MSG3B, MSG0B); + + // Transform 3: Rounds 41-44 + TMP = vld1q_u32(&K[40]); + TMP0A = vaddq_u32(MSG2A, TMP); + TMP0B = vaddq_u32(MSG2B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG2A = vsha256su0q_u32(MSG2A, MSG3A); + MSG2B = vsha256su0q_u32(MSG2B, MSG3B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG2A = vsha256su1q_u32(MSG2A, MSG0A, MSG1A); + MSG2B = vsha256su1q_u32(MSG2B, MSG0B, MSG1B); + + // Transform 3: Rounds 45-48 + TMP = vld1q_u32(&K[44]); + TMP0A = vaddq_u32(MSG3A, TMP); + TMP0B = vaddq_u32(MSG3B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + MSG3A = vsha256su0q_u32(MSG3A, MSG0A); + MSG3B = vsha256su0q_u32(MSG3B, MSG0B); + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + MSG3A = vsha256su1q_u32(MSG3A, MSG1A, MSG2A); + MSG3B = vsha256su1q_u32(MSG3B, MSG1B, MSG2B); + + // Transform 3: Rounds 49-52 + TMP = vld1q_u32(&K[48]); + TMP0A = vaddq_u32(MSG0A, TMP); + TMP0B = vaddq_u32(MSG0B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + + // Transform 3: Rounds 53-56 + TMP = vld1q_u32(&K[52]); + TMP0A = vaddq_u32(MSG1A, TMP); + TMP0B = vaddq_u32(MSG1B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + + // Transform 3: Rounds 57-60 + TMP = vld1q_u32(&K[56]); + TMP0A = vaddq_u32(MSG2A, TMP); + TMP0B = vaddq_u32(MSG2B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + + // Transform 3: Rounds 61-64 + TMP = vld1q_u32(&K[60]); + TMP0A = vaddq_u32(MSG3A, TMP); + TMP0B = vaddq_u32(MSG3B, TMP); + TMP2A = STATE0A; + TMP2B = STATE0B; + STATE0A = vsha256hq_u32(STATE0A, STATE1A, TMP0A); + STATE0B = vsha256hq_u32(STATE0B, STATE1B, TMP0B); + STATE1A = vsha256h2q_u32(STATE1A, TMP2A, TMP0A); + STATE1B = vsha256h2q_u32(STATE1B, TMP2B, TMP0B); + + // Transform 3: Update state + TMP = vld1q_u32(&INIT[0]); + STATE0A = vaddq_u32(STATE0A, TMP); + STATE0B = vaddq_u32(STATE0B, TMP); + TMP = vld1q_u32(&INIT[4]); + STATE1A = vaddq_u32(STATE1A, TMP); + STATE1B = vaddq_u32(STATE1B, TMP); + + // Store result + vst1q_u8(output, vrev32q_u8(vreinterpretq_u8_u32(STATE0A))); + vst1q_u8(output + 16, vrev32q_u8(vreinterpretq_u8_u32(STATE1A))); + vst1q_u8(output + 32, vrev32q_u8(vreinterpretq_u8_u32(STATE0B))); + vst1q_u8(output + 48, vrev32q_u8(vreinterpretq_u8_u32(STATE1B))); +} +} + #endif -- cgit v1.2.3