aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-06-02 03:54:39 -0700
committerRichard Henderson <richard.henderson@linaro.org>2023-07-08 07:30:17 +0100
commit7c58cb972e851c2e96ad5abd98b9c00b3f1c8a95 (patch)
tree7c77efd594045a1c0e9af5470e0398a15dd4c16d
parent5b41deb3108c48143d6697cd2dcde3f0b9a57c80 (diff)
crypto: Add aesenc_SB_SR_MC_AK
Add a primitive for SubBytes + ShiftRows + MixColumns + AddRoundKey. Acked-by: Daniel P. Berrangé <berrange@redhat.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r--crypto/aes.c58
-rw-r--r--host/include/generic/host/crypto/aes-round.h3
-rw-r--r--include/crypto/aes-round.h21
3 files changed, 82 insertions, 0 deletions
diff --git a/crypto/aes.c b/crypto/aes.c
index 6c05d731f4..a193d98d54 100644
--- a/crypto/aes.c
+++ b/crypto/aes.c
@@ -1323,6 +1323,64 @@ void aesenc_SB_SR_AK_genrev(AESState *r, const AESState *s, const AESState *k)
}
/*
+ * Perform SubBytes + ShiftRows + MixColumns + AddRoundKey.
+ */
+static inline void
+aesenc_SB_SR_MC_AK_swap(AESState *r, const AESState *st,
+ const AESState *rk, bool swap)
+{
+ int swap_b = swap * 0xf;
+ int swap_w = swap * 0x3;
+ bool be = HOST_BIG_ENDIAN ^ swap;
+ uint32_t w0, w1, w2, w3;
+
+ w0 = (AES_Te0[st->b[swap_b ^ AES_SH(0x0)]] ^
+ AES_Te1[st->b[swap_b ^ AES_SH(0x1)]] ^
+ AES_Te2[st->b[swap_b ^ AES_SH(0x2)]] ^
+ AES_Te3[st->b[swap_b ^ AES_SH(0x3)]]);
+
+ w1 = (AES_Te0[st->b[swap_b ^ AES_SH(0x4)]] ^
+ AES_Te1[st->b[swap_b ^ AES_SH(0x5)]] ^
+ AES_Te2[st->b[swap_b ^ AES_SH(0x6)]] ^
+ AES_Te3[st->b[swap_b ^ AES_SH(0x7)]]);
+
+ w2 = (AES_Te0[st->b[swap_b ^ AES_SH(0x8)]] ^
+ AES_Te1[st->b[swap_b ^ AES_SH(0x9)]] ^
+ AES_Te2[st->b[swap_b ^ AES_SH(0xA)]] ^
+ AES_Te3[st->b[swap_b ^ AES_SH(0xB)]]);
+
+ w3 = (AES_Te0[st->b[swap_b ^ AES_SH(0xC)]] ^
+ AES_Te1[st->b[swap_b ^ AES_SH(0xD)]] ^
+ AES_Te2[st->b[swap_b ^ AES_SH(0xE)]] ^
+ AES_Te3[st->b[swap_b ^ AES_SH(0xF)]]);
+
+ /* Note that AES_TeX is encoded for big-endian. */
+ if (!be) {
+ w0 = bswap32(w0);
+ w1 = bswap32(w1);
+ w2 = bswap32(w2);
+ w3 = bswap32(w3);
+ }
+
+ r->w[swap_w ^ 0] = rk->w[swap_w ^ 0] ^ w0;
+ r->w[swap_w ^ 1] = rk->w[swap_w ^ 1] ^ w1;
+ r->w[swap_w ^ 2] = rk->w[swap_w ^ 2] ^ w2;
+ r->w[swap_w ^ 3] = rk->w[swap_w ^ 3] ^ w3;
+}
+
+void aesenc_SB_SR_MC_AK_gen(AESState *r, const AESState *st,
+ const AESState *rk)
+{
+ aesenc_SB_SR_MC_AK_swap(r, st, rk, false);
+}
+
+void aesenc_SB_SR_MC_AK_genrev(AESState *r, const AESState *st,
+ const AESState *rk)
+{
+ aesenc_SB_SR_MC_AK_swap(r, st, rk, true);
+}
+
+/*
* Perform InvMixColumns.
*/
static inline void
diff --git a/host/include/generic/host/crypto/aes-round.h b/host/include/generic/host/crypto/aes-round.h
index 335ec3f11e..9886e81e50 100644
--- a/host/include/generic/host/crypto/aes-round.h
+++ b/host/include/generic/host/crypto/aes-round.h
@@ -14,6 +14,9 @@ void aesenc_MC_accel(AESState *, const AESState *, bool)
void aesenc_SB_SR_AK_accel(AESState *, const AESState *,
const AESState *, bool)
QEMU_ERROR("unsupported accel");
+void aesenc_SB_SR_MC_AK_accel(AESState *, const AESState *,
+ const AESState *, bool)
+ QEMU_ERROR("unsupported accel");
void aesdec_IMC_accel(AESState *, const AESState *, bool)
QEMU_ERROR("unsupported accel");
diff --git a/include/crypto/aes-round.h b/include/crypto/aes-round.h
index 7be2cc0d8e..03688c8640 100644
--- a/include/crypto/aes-round.h
+++ b/include/crypto/aes-round.h
@@ -60,6 +60,27 @@ static inline void aesenc_SB_SR_AK(AESState *r, const AESState *st,
}
/*
+ * Perform SubBytes + ShiftRows + MixColumns + AddRoundKey.
+ */
+
+void aesenc_SB_SR_MC_AK_gen(AESState *ret, const AESState *st,
+ const AESState *rk);
+void aesenc_SB_SR_MC_AK_genrev(AESState *ret, const AESState *st,
+ const AESState *rk);
+
+static inline void aesenc_SB_SR_MC_AK(AESState *r, const AESState *st,
+ const AESState *rk, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesenc_SB_SR_MC_AK_accel(r, st, rk, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesenc_SB_SR_MC_AK_gen(r, st, rk);
+ } else {
+ aesenc_SB_SR_MC_AK_genrev(r, st, rk);
+ }
+}
+
+/*
* Perform InvMixColumns.
*/