aboutsummaryrefslogtreecommitdiff
path: root/src/crypto
diff options
context:
space:
mode:
authorPavol Rusnak <pavol@rusnak.io>2022-01-20 19:00:33 +0100
committerPavol Rusnak <pavol@rusnak.io>2022-01-28 09:43:56 +0100
commitfe0629852aaf3a26f291bfa535e7e455fe7bea06 (patch)
treee5594ed52d56b4b7fafc8b97dafd59a57ca61ca2 /src/crypto
parent48a72fa81f80c8a3c7c6de8339b5feb361dece1c (diff)
downloadbitcoin-fe0629852aaf3a26f291bfa535e7e455fe7bea06.tar.xz
Implement sha256_arm_shani::Transform
Co-Authored-By: Rauli Kumpulainen <rauliweb@gmail.com> Co-Authored-By: Pieter Wuille <pieter@wuille.net>
Diffstat (limited to 'src/crypto')
-rw-r--r--src/crypto/sha256_arm_shani.cpp177
1 files changed, 177 insertions, 0 deletions
diff --git a/src/crypto/sha256_arm_shani.cpp b/src/crypto/sha256_arm_shani.cpp
index 92b1d0e299..2829daf710 100644
--- a/src/crypto/sha256_arm_shani.cpp
+++ b/src/crypto/sha256_arm_shani.cpp
@@ -9,13 +9,190 @@
#ifdef ENABLE_ARM_SHANI
+#include <array>
#include <cstdint>
#include <cstddef>
+#include <arm_acle.h>
+#include <arm_neon.h>
+
+namespace {
+alignas(uint32x4_t) static constexpr std::array<uint32_t, 64> K =
+{
+ 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5,
+ 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5,
+ 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3,
+ 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174,
+ 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC,
+ 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA,
+ 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7,
+ 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967,
+ 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13,
+ 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85,
+ 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3,
+ 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070,
+ 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5,
+ 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3,
+ 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208,
+ 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2,
+};
+}
namespace sha256_arm_shani {
void Transform(uint32_t* s, const unsigned char* chunk, size_t blocks)
{
+ uint32x4_t STATE0, STATE1, ABEF_SAVE, CDGH_SAVE;
+ uint32x4_t MSG0, MSG1, MSG2, MSG3;
+ uint32x4_t TMP0, TMP2;
+
+ // Load state
+ STATE0 = vld1q_u32(&s[0]);
+ STATE1 = vld1q_u32(&s[4]);
+
+ while (blocks--)
+ {
+ // Save state
+ ABEF_SAVE = STATE0;
+ CDGH_SAVE = STATE1;
+
+ // Load and convert input chunk to Big Endian
+ MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(chunk + 0)));
+ MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(chunk + 16)));
+ MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(chunk + 32)));
+ MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(chunk + 48)));
+ chunk += 64;
+
+ // Original implemenation preloaded message and constant addition which was 1-3% slower.
+ // Now included as first step in quad round code saving one Q Neon register
+ // "TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0]));"
+
+ // Rounds 1-4
+ TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0]));
+ TMP2 = STATE0;
+ MSG0 = vsha256su0q_u32(MSG0, MSG1);
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+ MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);
+
+ // Rounds 5-8
+ TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[4]));
+ TMP2 = STATE0;
+ MSG1 = vsha256su0q_u32(MSG1, MSG2);
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+ MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);
+
+ // Rounds 9-12
+ TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[8]));
+ TMP2 = STATE0;
+ MSG2 = vsha256su0q_u32(MSG2, MSG3);
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+ MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);
+
+ // Rounds 13-16
+ TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[12]));
+ TMP2 = STATE0;
+ MSG3 = vsha256su0q_u32(MSG3, MSG0);
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+ MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);
+
+ // Rounds 17-20
+ TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[16]));
+ TMP2 = STATE0;
+ MSG0 = vsha256su0q_u32(MSG0, MSG1);
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+ MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);
+
+ // Rounds 21-24
+ TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[20]));
+ TMP2 = STATE0;
+ MSG1 = vsha256su0q_u32(MSG1, MSG2);
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+ MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);
+
+ // Rounds 25-28
+ TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[24]));
+ TMP2 = STATE0;
+ MSG2 = vsha256su0q_u32(MSG2, MSG3);
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+ MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);
+
+ // Rounds 29-32
+ TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[28]));
+ TMP2 = STATE0;
+ MSG3 = vsha256su0q_u32(MSG3, MSG0);
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+ MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);
+
+ // Rounds 33-36
+ TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[32]));
+ TMP2 = STATE0;
+ MSG0 = vsha256su0q_u32(MSG0, MSG1);
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+ MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);
+
+ // Rounds 37-40
+ TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[36]));
+ TMP2 = STATE0;
+ MSG1 = vsha256su0q_u32(MSG1, MSG2);
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+ MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);
+
+ // Rounds 41-44
+ TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[40]));
+ TMP2 = STATE0;
+ MSG2 = vsha256su0q_u32(MSG2, MSG3);
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+ MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);
+
+ // Rounds 45-48
+ TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[44]));
+ TMP2 = STATE0;
+ MSG3 = vsha256su0q_u32(MSG3, MSG0);
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+ MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);
+
+ // Rounds 49-52
+ TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[48]));
+ TMP2 = STATE0;
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+
+ // Rounds 53-56
+ TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[52]));
+ TMP2 = STATE0;
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+
+ // Rounds 57-60
+ TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[56]));
+ TMP2 = STATE0;
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+
+ // Rounds 61-64
+ TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[60]));
+ TMP2 = STATE0;
+ STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
+ STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
+
+ // Update state
+ STATE0 = vaddq_u32(STATE0, ABEF_SAVE);
+ STATE1 = vaddq_u32(STATE1, CDGH_SAVE);
+ }
+ // Save final state
+ vst1q_u32(&s[0], STATE0);
+ vst1q_u32(&s[4], STATE1);
}
}