diff options
author | Eric Auger <eric.auger@redhat.com> | 2018-05-04 18:05:51 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2018-05-04 18:49:21 +0100 |
commit | dadd1a0809b1aff8c4d5364f3714b3e0e039dcb0 (patch) | |
tree | 3943cd19d95610ef3e2bed806c76769f2e84c138 /hw/arm/smmuv3.c | |
parent | 6a736033d343e0e5774849fa0eef88f2582c364a (diff) |
hw/arm/smmuv3: Queue helpers
We introduce helpers to read/write into the command and event
circular queues.
smmuv3_write_eventq and smmuv3_cmq_consume will become static
in subsequent patches.
Invalidation commands are not yet dealt with. We do not cache
data that need to be invalidated. This will change with vhost
integration.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Prem Mallappa <prem.mallappa@broadcom.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 1524665762-31355-7-git-send-email-eric.auger@redhat.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw/arm/smmuv3.c')
-rw-r--r-- | hw/arm/smmuv3.c | 136 |
1 files changed, 136 insertions, 0 deletions
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index c0cedcaba3..8f50f1565b 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -95,6 +95,46 @@ void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn) trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn); } +static inline MemTxResult queue_read(SMMUQueue *q, void *data) +{ + dma_addr_t addr = Q_CONS_ENTRY(q); + + return dma_memory_read(&address_space_memory, addr, data, q->entry_size); +} + +static MemTxResult queue_write(SMMUQueue *q, void *data) +{ + dma_addr_t addr = Q_PROD_ENTRY(q); + MemTxResult ret; + + ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size); + if (ret != MEMTX_OK) { + return ret; + } + + queue_prod_incr(q); + return MEMTX_OK; +} + +void smmuv3_write_eventq(SMMUv3State *s, Evt *evt) +{ + SMMUQueue *q = &s->eventq; + + if (!smmuv3_eventq_enabled(s)) { + return; + } + + if (smmuv3_q_full(q)) { + return; + } + + queue_write(q, evt); + + if (smmuv3_q_empty(q)) { + smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0); + } +} + static void smmuv3_init_regs(SMMUv3State *s) { /** @@ -134,6 +174,102 @@ static void smmuv3_init_regs(SMMUv3State *s) s->sid_split = 0; } +int smmuv3_cmdq_consume(SMMUv3State *s) +{ + SMMUCmdError cmd_error = SMMU_CERROR_NONE; + SMMUQueue *q = &s->cmdq; + SMMUCommandType type = 0; + + if (!smmuv3_cmdq_enabled(s)) { + return 0; + } + /* + * some commands depend on register values, typically CR0. In case those + * register values change while handling the command, spec says it + * is UNPREDICTABLE whether the command is interpreted under the new + * or old value. + */ + + while (!smmuv3_q_empty(q)) { + uint32_t pending = s->gerror ^ s->gerrorn; + Cmd cmd; + + trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q), + Q_PROD_WRAP(q), Q_CONS_WRAP(q)); + + if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) { + break; + } + + if (queue_read(q, &cmd) != MEMTX_OK) { + cmd_error = SMMU_CERROR_ABT; + break; + } + + type = CMD_TYPE(&cmd); + + trace_smmuv3_cmdq_opcode(smmu_cmd_string(type)); + + switch (type) { + case SMMU_CMD_SYNC: + if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) { + smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0); + } + break; + case SMMU_CMD_PREFETCH_CONFIG: + case SMMU_CMD_PREFETCH_ADDR: + case SMMU_CMD_CFGI_STE: + case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */ + case SMMU_CMD_CFGI_CD: + case SMMU_CMD_CFGI_CD_ALL: + case SMMU_CMD_TLBI_NH_ALL: + case SMMU_CMD_TLBI_NH_ASID: + case SMMU_CMD_TLBI_NH_VA: + case SMMU_CMD_TLBI_NH_VAA: + case SMMU_CMD_TLBI_EL3_ALL: + case SMMU_CMD_TLBI_EL3_VA: + case SMMU_CMD_TLBI_EL2_ALL: + case SMMU_CMD_TLBI_EL2_ASID: + case SMMU_CMD_TLBI_EL2_VA: + case SMMU_CMD_TLBI_EL2_VAA: + case SMMU_CMD_TLBI_S12_VMALL: + case SMMU_CMD_TLBI_S2_IPA: + case SMMU_CMD_TLBI_NSNH_ALL: + case SMMU_CMD_ATC_INV: + case SMMU_CMD_PRI_RESP: + case SMMU_CMD_RESUME: + case SMMU_CMD_STALL_TERM: + trace_smmuv3_unhandled_cmd(type); + break; + default: + cmd_error = SMMU_CERROR_ILL; + qemu_log_mask(LOG_GUEST_ERROR, + "Illegal command type: %d\n", CMD_TYPE(&cmd)); + break; + } + if (cmd_error) { + break; + } + /* + * We only increment the cons index after the completion of + * the command. We do that because the SYNC returns immediately + * and does not check the completion of previous commands + */ + queue_cons_incr(q); + } + + if (cmd_error) { + trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error); + smmu_write_cmdq_err(s, cmd_error); + smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK); + } + + trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q), + Q_PROD_WRAP(q), Q_CONS_WRAP(q)); + + return 0; +} + static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data, unsigned size, MemTxAttrs attrs) { |