diff options
author | Eric Auger <eric.auger@redhat.com> | 2018-05-04 18:05:51 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2018-05-04 18:49:48 +0100 |
commit | fae4be38b35dcfae48494c023454e8988c15b69a (patch) | |
tree | 07e918d8867bb2ac51335c79ad563fc5a875f0c6 /hw/arm/smmuv3.c | |
parent | dadd1a0809b1aff8c4d5364f3714b3e0e039dcb0 (diff) |
hw/arm/smmuv3: Implement MMIO write operations
Now we have relevant helpers for queue and irq
management, let's implement MMIO write operations.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Prem Mallappa <prem.mallappa@broadcom.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 1524665762-31355-8-git-send-email-eric.auger@redhat.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw/arm/smmuv3.c')
-rw-r--r-- | hw/arm/smmuv3.c | 170 |
1 files changed, 165 insertions, 5 deletions
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 8f50f1565b..d581ada3d7 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -38,7 +38,8 @@ * @irq: irq type * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR) */ -void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t gerror_mask) +static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, + uint32_t gerror_mask) { bool pulse = false; @@ -75,7 +76,7 @@ void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t gerror_mask) } } -void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn) +static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn) { uint32_t pending = s->gerror ^ s->gerrorn; uint32_t toggled = s->gerrorn ^ new_gerrorn; @@ -174,7 +175,7 @@ static void smmuv3_init_regs(SMMUv3State *s) s->sid_split = 0; } -int smmuv3_cmdq_consume(SMMUv3State *s) +static int smmuv3_cmdq_consume(SMMUv3State *s) { SMMUCmdError cmd_error = SMMU_CERROR_NONE; SMMUQueue *q = &s->cmdq; @@ -270,11 +271,170 @@ int smmuv3_cmdq_consume(SMMUv3State *s) return 0; } +static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset, + uint64_t data, MemTxAttrs attrs) +{ + switch (offset) { + case A_GERROR_IRQ_CFG0: + s->gerror_irq_cfg0 = data; + return MEMTX_OK; + case A_STRTAB_BASE: + s->strtab_base = data; + return MEMTX_OK; + case A_CMDQ_BASE: + s->cmdq.base = data; + s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); + if (s->cmdq.log2size > SMMU_CMDQS) { + s->cmdq.log2size = SMMU_CMDQS; + } + return MEMTX_OK; + case A_EVENTQ_BASE: + s->eventq.base = data; + s->eventq.log2size = extract64(s->eventq.base, 0, 5); + if (s->eventq.log2size > SMMU_EVENTQS) { + s->eventq.log2size = SMMU_EVENTQS; + } + return MEMTX_OK; + case A_EVENTQ_IRQ_CFG0: + s->eventq_irq_cfg0 = data; + return MEMTX_OK; + default: + qemu_log_mask(LOG_UNIMP, + "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n", + __func__, offset); + return MEMTX_OK; + } +} + +static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset, + uint64_t data, MemTxAttrs attrs) +{ + switch (offset) { + case A_CR0: + s->cr[0] = data; + s->cr0ack = data & ~SMMU_CR0_RESERVED; + /* in case the command queue has been enabled */ + smmuv3_cmdq_consume(s); + return MEMTX_OK; + case A_CR1: + s->cr[1] = data; + return MEMTX_OK; + case A_CR2: + s->cr[2] = data; + return MEMTX_OK; + case A_IRQ_CTRL: + s->irq_ctrl = data; + return MEMTX_OK; + case A_GERRORN: + smmuv3_write_gerrorn(s, data); + /* + * By acknowledging the CMDQ_ERR, SW may notify cmds can + * be processed again + */ + smmuv3_cmdq_consume(s); + return MEMTX_OK; + case A_GERROR_IRQ_CFG0: /* 64b */ + s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data); + return MEMTX_OK; + case A_GERROR_IRQ_CFG0 + 4: + s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data); + return MEMTX_OK; + case A_GERROR_IRQ_CFG1: + s->gerror_irq_cfg1 = data; + return MEMTX_OK; + case A_GERROR_IRQ_CFG2: + s->gerror_irq_cfg2 = data; + return MEMTX_OK; + case A_STRTAB_BASE: /* 64b */ + s->strtab_base = deposit64(s->strtab_base, 0, 32, data); + return MEMTX_OK; + case A_STRTAB_BASE + 4: + s->strtab_base = deposit64(s->strtab_base, 32, 32, data); + return MEMTX_OK; + case A_STRTAB_BASE_CFG: + s->strtab_base_cfg = data; + if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) { + s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT); + s->features |= SMMU_FEATURE_2LVL_STE; + } + return MEMTX_OK; + case A_CMDQ_BASE: /* 64b */ + s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data); + s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); + if (s->cmdq.log2size > SMMU_CMDQS) { + s->cmdq.log2size = SMMU_CMDQS; + } + return MEMTX_OK; + case A_CMDQ_BASE + 4: /* 64b */ + s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data); + return MEMTX_OK; + case A_CMDQ_PROD: + s->cmdq.prod = data; + smmuv3_cmdq_consume(s); + return MEMTX_OK; + case A_CMDQ_CONS: + s->cmdq.cons = data; + return MEMTX_OK; + case A_EVENTQ_BASE: /* 64b */ + s->eventq.base = deposit64(s->eventq.base, 0, 32, data); + s->eventq.log2size = extract64(s->eventq.base, 0, 5); + if (s->eventq.log2size > SMMU_EVENTQS) { + s->eventq.log2size = SMMU_EVENTQS; + } + return MEMTX_OK; + case A_EVENTQ_BASE + 4: + s->eventq.base = deposit64(s->eventq.base, 32, 32, data); + return MEMTX_OK; + case A_EVENTQ_PROD: + s->eventq.prod = data; + return MEMTX_OK; + case A_EVENTQ_CONS: + s->eventq.cons = data; + return MEMTX_OK; + case A_EVENTQ_IRQ_CFG0: /* 64b */ + s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data); + return MEMTX_OK; + case A_EVENTQ_IRQ_CFG0 + 4: + s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data); + return MEMTX_OK; + case A_EVENTQ_IRQ_CFG1: + s->eventq_irq_cfg1 = data; + return MEMTX_OK; + case A_EVENTQ_IRQ_CFG2: + s->eventq_irq_cfg2 = data; + return MEMTX_OK; + default: + qemu_log_mask(LOG_UNIMP, + "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n", + __func__, offset); + return MEMTX_OK; + } +} + static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data, unsigned size, MemTxAttrs attrs) { - /* not yet implemented */ - return MEMTX_ERROR; + SMMUState *sys = opaque; + SMMUv3State *s = ARM_SMMUV3(sys); + MemTxResult r; + + /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ + offset &= ~0x10000; + + switch (size) { + case 8: + r = smmu_writell(s, offset, data, attrs); + break; + case 4: + r = smmu_writel(s, offset, data, attrs); + break; + default: + r = MEMTX_ERROR; + break; + } + + trace_smmuv3_write_mmio(offset, data, size, r); + return r; } static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset, |