aboutsummaryrefslogtreecommitdiff
path: root/host/include
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-06-05 21:57:55 +0300
committerRichard Henderson <richard.henderson@linaro.org>2023-07-08 07:30:17 +0100
commit57357322e4bd35c42816c769e36f39af11fc3ddc (patch)
tree448c770285c43712978d071d179a95a5ef0826fc /host/include
parent8d97f28e368be8b6248a363792a2cd0f9e9ddf6a (diff)
host/include/ppc: Implement aes-round.h
Detect CRYPTO in cpuinfo; implement the accel hooks. Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'host/include')
-rw-r--r--host/include/ppc/host/cpuinfo.h1
-rw-r--r--host/include/ppc/host/crypto/aes-round.h182
-rw-r--r--host/include/ppc64/host/crypto/aes-round.h1
3 files changed, 184 insertions, 0 deletions
diff --git a/host/include/ppc/host/cpuinfo.h b/host/include/ppc/host/cpuinfo.h
index df11e8d417..29ee7f9ef8 100644
--- a/host/include/ppc/host/cpuinfo.h
+++ b/host/include/ppc/host/cpuinfo.h
@@ -16,6 +16,7 @@
#define CPUINFO_ISEL (1u << 5)
#define CPUINFO_ALTIVEC (1u << 6)
#define CPUINFO_VSX (1u << 7)
+#define CPUINFO_CRYPTO (1u << 8)
/* Initialized with a constructor. */
extern unsigned cpuinfo;
diff --git a/host/include/ppc/host/crypto/aes-round.h b/host/include/ppc/host/crypto/aes-round.h
new file mode 100644
index 0000000000..8062d2a537
--- /dev/null
+++ b/host/include/ppc/host/crypto/aes-round.h
@@ -0,0 +1,182 @@
+/*
+ * Power v2.07 specific aes acceleration.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PPC_HOST_CRYPTO_AES_ROUND_H
+#define PPC_HOST_CRYPTO_AES_ROUND_H
+
+#ifdef __ALTIVEC__
+#include "host/cpuinfo.h"
+
+#ifdef __CRYPTO__
+# define HAVE_AES_ACCEL true
+#else
+# define HAVE_AES_ACCEL likely(cpuinfo & CPUINFO_CRYPTO)
+#endif
+#define ATTR_AES_ACCEL
+
+/*
+ * While there is <altivec.h>, both gcc and clang "aid" with the
+ * endianness issues in different ways. Just use inline asm instead.
+ */
+
+/* Bytes in memory are host-endian; bytes in register are @be. */
+static inline AESStateVec aes_accel_ld(const AESState *p, bool be)
+{
+ AESStateVec r;
+
+ if (be) {
+ asm("lvx %0, 0, %1" : "=v"(r) : "r"(p), "m"(*p));
+ } else if (HOST_BIG_ENDIAN) {
+ AESStateVec rev = {
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
+ };
+ asm("lvx %0, 0, %1\n\t"
+ "vperm %0, %0, %0, %2"
+ : "=v"(r) : "r"(p), "v"(rev), "m"(*p));
+ } else {
+#ifdef __POWER9_VECTOR__
+ asm("lxvb16x %x0, 0, %1" : "=v"(r) : "r"(p), "m"(*p));
+#else
+ asm("lxvd2x %x0, 0, %1\n\t"
+ "xxpermdi %x0, %x0, %x0, 2"
+ : "=v"(r) : "r"(p), "m"(*p));
+#endif
+ }
+ return r;
+}
+
+static void aes_accel_st(AESState *p, AESStateVec r, bool be)
+{
+ if (be) {
+ asm("stvx %1, 0, %2" : "=m"(*p) : "v"(r), "r"(p));
+ } else if (HOST_BIG_ENDIAN) {
+ AESStateVec rev = {
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
+ };
+ asm("vperm %1, %1, %1, %2\n\t"
+ "stvx %1, 0, %3"
+ : "=m"(*p), "+v"(r) : "v"(rev), "r"(p));
+ } else {
+#ifdef __POWER9_VECTOR__
+ asm("stxvb16x %x1, 0, %2" : "=m"(*p) : "v"(r), "r"(p));
+#else
+ asm("xxpermdi %x1, %x1, %x1, 2\n\t"
+ "stxvd2x %x1, 0, %2"
+ : "=m"(*p), "+v"(r) : "r"(p));
+#endif
+ }
+}
+
+static inline AESStateVec aes_accel_vcipher(AESStateVec d, AESStateVec k)
+{
+ asm("vcipher %0, %0, %1" : "+v"(d) : "v"(k));
+ return d;
+}
+
+static inline AESStateVec aes_accel_vncipher(AESStateVec d, AESStateVec k)
+{
+ asm("vncipher %0, %0, %1" : "+v"(d) : "v"(k));
+ return d;
+}
+
+static inline AESStateVec aes_accel_vcipherlast(AESStateVec d, AESStateVec k)
+{
+ asm("vcipherlast %0, %0, %1" : "+v"(d) : "v"(k));
+ return d;
+}
+
+static inline AESStateVec aes_accel_vncipherlast(AESStateVec d, AESStateVec k)
+{
+ asm("vncipherlast %0, %0, %1" : "+v"(d) : "v"(k));
+ return d;
+}
+
+static inline void
+aesenc_MC_accel(AESState *ret, const AESState *st, bool be)
+{
+ AESStateVec t, z = { };
+
+ t = aes_accel_ld(st, be);
+ t = aes_accel_vncipherlast(t, z);
+ t = aes_accel_vcipher(t, z);
+ aes_accel_st(ret, t, be);
+}
+
+static inline void
+aesenc_SB_SR_AK_accel(AESState *ret, const AESState *st,
+ const AESState *rk, bool be)
+{
+ AESStateVec t, k;
+
+ t = aes_accel_ld(st, be);
+ k = aes_accel_ld(rk, be);
+ t = aes_accel_vcipherlast(t, k);
+ aes_accel_st(ret, t, be);
+}
+
+static inline void
+aesenc_SB_SR_MC_AK_accel(AESState *ret, const AESState *st,
+ const AESState *rk, bool be)
+{
+ AESStateVec t, k;
+
+ t = aes_accel_ld(st, be);
+ k = aes_accel_ld(rk, be);
+ t = aes_accel_vcipher(t, k);
+ aes_accel_st(ret, t, be);
+}
+
+static inline void
+aesdec_IMC_accel(AESState *ret, const AESState *st, bool be)
+{
+ AESStateVec t, z = { };
+
+ t = aes_accel_ld(st, be);
+ t = aes_accel_vcipherlast(t, z);
+ t = aes_accel_vncipher(t, z);
+ aes_accel_st(ret, t, be);
+}
+
+static inline void
+aesdec_ISB_ISR_AK_accel(AESState *ret, const AESState *st,
+ const AESState *rk, bool be)
+{
+ AESStateVec t, k;
+
+ t = aes_accel_ld(st, be);
+ k = aes_accel_ld(rk, be);
+ t = aes_accel_vncipherlast(t, k);
+ aes_accel_st(ret, t, be);
+}
+
+static inline void
+aesdec_ISB_ISR_AK_IMC_accel(AESState *ret, const AESState *st,
+ const AESState *rk, bool be)
+{
+ AESStateVec t, k;
+
+ t = aes_accel_ld(st, be);
+ k = aes_accel_ld(rk, be);
+ t = aes_accel_vncipher(t, k);
+ aes_accel_st(ret, t, be);
+}
+
+static inline void
+aesdec_ISB_ISR_IMC_AK_accel(AESState *ret, const AESState *st,
+ const AESState *rk, bool be)
+{
+ AESStateVec t, k, z = { };
+
+ t = aes_accel_ld(st, be);
+ k = aes_accel_ld(rk, be);
+ t = aes_accel_vncipher(t, z);
+ aes_accel_st(ret, t ^ k, be);
+}
+#else
+/* Without ALTIVEC, we can't even write inline assembly. */
+#include "host/include/generic/host/crypto/aes-round.h"
+#endif
+
+#endif /* PPC_HOST_CRYPTO_AES_ROUND_H */
diff --git a/host/include/ppc64/host/crypto/aes-round.h b/host/include/ppc64/host/crypto/aes-round.h
new file mode 100644
index 0000000000..5eeba6dcb7
--- /dev/null
+++ b/host/include/ppc64/host/crypto/aes-round.h
@@ -0,0 +1 @@
+#include "host/include/ppc/host/crypto/aes-round.h"