aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--hw/misc/aspeed_hace.c132
-rw-r--r--include/hw/misc/aspeed_hace.h4
2 files changed, 131 insertions, 5 deletions
diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c
index 59fe5bfca2..0f4059e6df 100644
--- a/hw/misc/aspeed_hace.c
+++ b/hw/misc/aspeed_hace.c
@@ -65,7 +65,6 @@
#define SG_LIST_ADDR_SIZE 4
#define SG_LIST_ADDR_MASK 0x7FFFFFFF
#define SG_LIST_ENTRY_SIZE (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE)
-#define ASPEED_HACE_MAX_SG 256 /* max number of entries */
static const struct {
uint32_t mask;
@@ -95,11 +94,104 @@ static int hash_algo_lookup(uint32_t reg)
return -1;
}
-static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
+/**
+ * Check whether the request contains padding message.
+ *
+ * @param s aspeed hace state object
+ * @param iov iov of current request
+ * @param req_len length of the current request
+ * @param total_msg_len length of all acc_mode requests(excluding padding msg)
+ * @param pad_offset start offset of padding message
+ */
+static bool has_padding(AspeedHACEState *s, struct iovec *iov,
+ hwaddr req_len, uint32_t *total_msg_len,
+ uint32_t *pad_offset)
+{
+ *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8);
+ /*
+ * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the
+ * last request. The last request should contain padding message.
+ * We check whether message contains padding by
+ * 1. Get total message length. If the current message contains
+ * padding, the last 8 bytes are total message length.
+ * 2. Check whether the total message length is valid.
+ * If it is valid, the value should less than or equal to
+ * total_req_len.
+ * 3. Current request len - padding_size to get padding offset.
+ * The padding message's first byte should be 0x80
+ */
+ if (*total_msg_len <= s->total_req_len) {
+ uint32_t padding_size = s->total_req_len - *total_msg_len;
+ uint8_t *padding = iov->iov_base;
+ *pad_offset = req_len - padding_size;
+ if (padding[*pad_offset] == 0x80) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id,
+ uint32_t *pad_offset)
+{
+ int i, iov_count;
+ if (*pad_offset != 0) {
+ s->iov_cache[s->iov_count].iov_base = iov[id].iov_base;
+ s->iov_cache[s->iov_count].iov_len = *pad_offset;
+ ++s->iov_count;
+ }
+ for (i = 0; i < s->iov_count; i++) {
+ iov[i].iov_base = s->iov_cache[i].iov_base;
+ iov[i].iov_len = s->iov_cache[i].iov_len;
+ }
+ iov_count = s->iov_count;
+ s->iov_count = 0;
+ s->total_req_len = 0;
+ return iov_count;
+}
+
+/**
+ * Generate iov for accumulative mode.
+ *
+ * @param s aspeed hace state object
+ * @param iov iov of the current request
+ * @param id index of the current iov
+ * @param req_len length of the current request
+ *
+ * @return count of iov
+ */
+static int gen_acc_mode_iov(AspeedHACEState *s, struct iovec *iov, int id,
+ hwaddr *req_len)
+{
+ uint32_t pad_offset;
+ uint32_t total_msg_len;
+ s->total_req_len += *req_len;
+
+ if (has_padding(s, &iov[id], *req_len, &total_msg_len, &pad_offset)) {
+ if (s->iov_count) {
+ return reconstruct_iov(s, iov, id, &pad_offset);
+ }
+
+ *req_len -= s->total_req_len - total_msg_len;
+ s->total_req_len = 0;
+ iov[id].iov_len = *req_len;
+ } else {
+ s->iov_cache[s->iov_count].iov_base = iov->iov_base;
+ s->iov_cache[s->iov_count].iov_len = *req_len;
+ ++s->iov_count;
+ }
+
+ return id + 1;
+}
+
+static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
+ bool acc_mode)
{
struct iovec iov[ASPEED_HACE_MAX_SG];
g_autofree uint8_t *digest_buf;
size_t digest_len = 0;
+ int niov = 0;
int i;
if (sg_mode) {
@@ -124,10 +216,16 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
MEMTXATTRS_UNSPECIFIED, NULL);
addr &= SG_LIST_ADDR_MASK;
- iov[i].iov_len = len & SG_LIST_LEN_MASK;
- plen = iov[i].iov_len;
+ plen = len & SG_LIST_LEN_MASK;
iov[i].iov_base = address_space_map(&s->dram_as, addr, &plen, false,
MEMTXATTRS_UNSPECIFIED);
+
+ if (acc_mode) {
+ niov = gen_acc_mode_iov(s, iov, i, &plen);
+
+ } else {
+ iov[i].iov_len = plen;
+ }
}
} else {
hwaddr len = s->regs[R_HASH_SRC_LEN];
@@ -137,6 +235,25 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
&len, false,
MEMTXATTRS_UNSPECIFIED);
i = 1;
+
+ if (s->iov_count) {
+ /*
+ * In aspeed sdk kernel driver, sg_mode is disabled in hash_final().
+ * Thus if we received a request with sg_mode disabled, it is
+ * required to check whether cache is empty. If no, we should
+ * combine cached iov and the current iov.
+ */
+ uint32_t total_msg_len;
+ uint32_t pad_offset;
+ s->total_req_len += len;
+ if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) {
+ niov = reconstruct_iov(s, iov, 0, &pad_offset);
+ }
+ }
+ }
+
+ if (niov) {
+ i = niov;
}
if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) {
@@ -238,7 +355,8 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
__func__, data & ahc->hash_mask);
break;
}
- do_hash_operation(s, algo, data & HASH_SG_EN);
+ do_hash_operation(s, algo, data & HASH_SG_EN,
+ ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
if (data & HASH_IRQ_EN) {
qemu_irq_raise(s->irq);
@@ -271,6 +389,8 @@ static void aspeed_hace_reset(DeviceState *dev)
struct AspeedHACEState *s = ASPEED_HACE(dev);
memset(s->regs, 0, sizeof(s->regs));
+ s->iov_count = 0;
+ s->total_req_len = 0;
}
static void aspeed_hace_realize(DeviceState *dev, Error **errp)
@@ -306,6 +426,8 @@ static const VMStateDescription vmstate_aspeed_hace = {
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS),
+ VMSTATE_UINT32(total_req_len, AspeedHACEState),
+ VMSTATE_UINT32(iov_count, AspeedHACEState),
VMSTATE_END_OF_LIST(),
}
};
diff --git a/include/hw/misc/aspeed_hace.h b/include/hw/misc/aspeed_hace.h
index 2242945eb4..40aebf1d6e 100644
--- a/include/hw/misc/aspeed_hace.h
+++ b/include/hw/misc/aspeed_hace.h
@@ -18,6 +18,7 @@
OBJECT_DECLARE_TYPE(AspeedHACEState, AspeedHACEClass, ASPEED_HACE)
#define ASPEED_HACE_NR_REGS (0x64 >> 2)
+#define ASPEED_HACE_MAX_SG 256 /* max number of entries */
struct AspeedHACEState {
SysBusDevice parent;
@@ -25,7 +26,10 @@ struct AspeedHACEState {
MemoryRegion iomem;
qemu_irq irq;
+ struct iovec iov_cache[ASPEED_HACE_MAX_SG];
uint32_t regs[ASPEED_HACE_NR_REGS];
+ uint32_t total_req_len;
+ uint32_t iov_count;
MemoryRegion *dram_mr;
AddressSpace dram_as;