diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2018-06-26 18:23:49 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2018-06-26 18:23:49 +0100 |
commit | 00928a421d47f49691cace1207481b7aad31b1f1 (patch) | |
tree | e28ed5e184928ae385737fbc03ddd9a59edef0f0 | |
parent | 7e2d08863b5cd4d35e6e0dc211b0cceaa350ade2 (diff) | |
parent | 9b945a9ee36a34eaeca412ef9ef35fbfe33c2c85 (diff) |
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20180626' into staging
target-arm queue:
* aspeed: set APB clocks correctly (fixes slowdown on palmetto)
* smmuv3: cache config data and TLB entries
* v7m/v8m: support read/write from MPU regions smaller than 1K
* various: clean up logging/debug messages
* xilinx_spips: Make dma transactions as per dma_burst_size
# gpg: Signature made Tue 26 Jun 2018 17:55:46 BST
# gpg: using RSA key 3C2525ED14360CDE
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>"
# gpg: aka "Peter Maydell <pmaydell@gmail.com>"
# gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>"
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE
* remotes/pmaydell/tags/pull-target-arm-20180626: (32 commits)
aspeed/timer: use the APB frequency from the SCU
aspeed: initialize the SCU controller first
aspeed/scu: introduce clock frequencies
hw/arm/smmuv3: Add notifications on invalidation
hw/arm/smmuv3: IOTLB emulation
hw/arm/smmuv3: Cache/invalidate config data
hw/arm/smmuv3: Fix translate error handling
target/arm: Handle small regions in get_phys_addr_pmsav8()
target/arm: Set page (region) size in get_phys_addr_pmsav7()
tcg: Support MMU protection regions smaller than TARGET_PAGE_SIZE
hw/arm/stellaris: Use HWADDR_PRIx to display register address
hw/arm/stellaris: Fix gptm_write() error message
hw/net/smc91c111: Use qemu_log_mask(UNIMP) instead of fprintf
hw/net/smc91c111: Use qemu_log_mask(GUEST_ERROR) instead of hw_error
hw/net/stellaris_enet: Use qemu_log_mask(GUEST_ERROR) instead of hw_error
hw/net/stellaris_enet: Fix a typo
hw/arm/stellaris: Use qemu_log_mask(UNIMP) instead of fprintf
hw/arm/omap: Use qemu_log_mask(GUEST_ERROR) instead of fprintf
hw/arm/omap1: Use qemu_log_mask(GUEST_ERROR) instead of fprintf
hw/i2c/omap_i2c: Use qemu_log_mask(UNIMP) instead of fprintf
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
30 files changed, 1174 insertions, 253 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 2874ddce60..8c626f6a07 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -501,9 +501,10 @@ F: include/hw/arm/digic.h F: hw/*/digic* Gumstix +M: Philippe Mathieu-Daudé <f4bug@amsat.org> L: qemu-devel@nongnu.org L: qemu-arm@nongnu.org -S: Orphan +S: Odd Fixes F: hw/arm/gumstix.c i.MX31 @@ -644,6 +645,17 @@ M: Subbaraya Sundeep <sundeep.lkml@gmail.com> S: Maintained F: hw/arm/msf2-som.c +ASPEED BMCs +M: Cédric Le Goater <clg@kaod.org> +R: Andrew Jeffery <andrew@aj.id.au> +R: Joel Stanley <joel@jms.id.au> +L: qemu-arm@nongnu.org +S: Maintained +F: hw/*/*aspeed* +F: include/hw/*/*aspeed* +F: hw/net/ftgmac100.c +F: include/hw/net/ftgmac100.h + CRIS Machines ------------- Axis Dev88 diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 719cca2268..eebe97dabb 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -613,27 +613,42 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, target_ulong code_address; uintptr_t addend; CPUTLBEntry *te, *tv, tn; - hwaddr iotlb, xlat, sz; + hwaddr iotlb, xlat, sz, paddr_page; + target_ulong vaddr_page; unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE; int asidx = cpu_asidx_from_attrs(cpu, attrs); assert_cpu_is_self(cpu); - assert(size >= TARGET_PAGE_SIZE); - if (size != TARGET_PAGE_SIZE) { - tlb_add_large_page(env, vaddr, size); + + if (size < TARGET_PAGE_SIZE) { + sz = TARGET_PAGE_SIZE; + } else { + if (size > TARGET_PAGE_SIZE) { + tlb_add_large_page(env, vaddr, size); + } + sz = size; } + vaddr_page = vaddr & TARGET_PAGE_MASK; + paddr_page = paddr & TARGET_PAGE_MASK; - sz = size; - section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz, - attrs, &prot); + section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, + &xlat, &sz, attrs, &prot); assert(sz >= TARGET_PAGE_SIZE); tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx " prot=%x idx=%d\n", vaddr, paddr, prot, mmu_idx); - address = vaddr; - if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) { + address = vaddr_page; + if (size < TARGET_PAGE_SIZE) { + /* + * Slow-path the TLB entries; we will repeat the MMU check and TLB + * fill on every access. + */ + address |= TLB_RECHECK; + } + if (!memory_region_is_ram(section->mr) && + !memory_region_is_romd(section->mr)) { /* IO memory case */ address |= TLB_MMIO; addend = 0; @@ -643,10 +658,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, } code_address = address; - iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat, - prot, &address); + iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, + paddr_page, xlat, prot, &address); - index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + index = (vaddr_page >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); te = &env->tlb_table[mmu_idx][index]; /* do not discard the translation in te, evict it into a victim tlb */ tv = &env->tlb_v_table[mmu_idx][vidx]; @@ -662,18 +677,18 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, * TARGET_PAGE_BITS, and either * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM) * + the offset within section->mr of the page base (otherwise) - * We subtract the vaddr (which is page aligned and thus won't + * We subtract the vaddr_page (which is page aligned and thus won't * disturb the low bits) to give an offset which can be added to the * (non-page-aligned) vaddr of the eventual memory access to get * the MemoryRegion offset for the access. Note that the vaddr we * subtract here is that of the page base, and not the same as the * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). */ - env->iotlb[mmu_idx][index].addr = iotlb - vaddr; + env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page; env->iotlb[mmu_idx][index].attrs = attrs; /* Now calculate the new entry */ - tn.addend = addend - vaddr; + tn.addend = addend - vaddr_page; if (prot & PAGE_READ) { tn.addr_read = address; } else { @@ -694,7 +709,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, tn.addr_write = address | TLB_MMIO; } else if (memory_region_is_ram(section->mr) && cpu_physical_memory_is_clean( - memory_region_get_ram_addr(section->mr) + xlat)) { + memory_region_get_ram_addr(section->mr) + xlat)) { tn.addr_write = address | TLB_NOTDIRTY; } else { tn.addr_write = address; @@ -767,7 +782,8 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, int mmu_idx, - target_ulong addr, uintptr_t retaddr, int size) + target_ulong addr, uintptr_t retaddr, + bool recheck, int size) { CPUState *cpu = ENV_GET_CPU(env); hwaddr mr_offset; @@ -777,6 +793,29 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, bool locked = false; MemTxResult r; + if (recheck) { + /* + * This is a TLB_RECHECK access, where the MMU protection + * covers a smaller range than a target page, and we must + * repeat the MMU check here. This tlb_fill() call might + * longjump out if this access should cause a guest exception. + */ + int index; + target_ulong tlb_addr; + + tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); + + index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + tlb_addr = env->tlb_table[mmu_idx][index].addr_read; + if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { + /* RAM access */ + uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend; + + return ldn_p((void *)haddr, size); + } + /* Fall through for handling IO accesses */ + } + section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); mr = section->mr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; @@ -811,7 +850,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, int mmu_idx, uint64_t val, target_ulong addr, - uintptr_t retaddr, int size) + uintptr_t retaddr, bool recheck, int size) { CPUState *cpu = ENV_GET_CPU(env); hwaddr mr_offset; @@ -820,6 +859,30 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, bool locked = false; MemTxResult r; + if (recheck) { + /* + * This is a TLB_RECHECK access, where the MMU protection + * covers a smaller range than a target page, and we must + * repeat the MMU check here. This tlb_fill() call might + * longjump out if this access should cause a guest exception. + */ + int index; + target_ulong tlb_addr; + + tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); + + index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { + /* RAM access */ + uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend; + + stn_p((void *)haddr, size, val); + return; + } + /* Fall through for handling IO accesses */ + } + section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); mr = section->mr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; @@ -903,6 +966,32 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); } } + + if (unlikely(env->tlb_table[mmu_idx][index].addr_code & TLB_RECHECK)) { + /* + * This is a TLB_RECHECK access, where the MMU protection + * covers a smaller range than a target page, and we must + * repeat the MMU check here. This tlb_fill() call might + * longjump out if this access should cause a guest exception. + */ + int index; + target_ulong tlb_addr; + + tlb_fill(cpu, addr, 0, MMU_INST_FETCH, mmu_idx, 0); + + index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + tlb_addr = env->tlb_table[mmu_idx][index].addr_code; + if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { + /* RAM access. We can't handle this, so for now just stop */ + cpu_abort(cpu, "Unable to handle guest executing from RAM within " + "a small MPU region at 0x" TARGET_FMT_lx, addr); + } + /* + * Fall through to handle IO accesses (which will almost certainly + * also result in failure) + */ + } + iotlbentry = &env->iotlb[mmu_idx][index]; section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); mr = section->mr; @@ -1011,8 +1100,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK; } - /* Notice an IO access */ - if (unlikely(tlb_addr & TLB_MMIO)) { + /* Notice an IO access or a needs-MMU-lookup access */ + if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) { /* There's really nothing that can be done to support this apart from stop-the-world. */ goto stop_the_world; diff --git a/accel/tcg/softmmu_template.h b/accel/tcg/softmmu_template.h index 239ea6692b..c47591c970 100644 --- a/accel/tcg/softmmu_template.h +++ b/accel/tcg/softmmu_template.h @@ -98,10 +98,12 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, size_t mmu_idx, size_t index, target_ulong addr, - uintptr_t retaddr) + uintptr_t retaddr, + bool recheck) { CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; - return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, DATA_SIZE); + return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, recheck, + DATA_SIZE); } #endif @@ -138,7 +140,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); + res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr, + tlb_addr & TLB_RECHECK); res = TGT_LE(res); return res; } @@ -205,7 +208,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); + res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr, + tlb_addr & TLB_RECHECK); res = TGT_BE(res); return res; } @@ -259,10 +263,12 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env, size_t mmu_idx, size_t index, DATA_TYPE val, target_ulong addr, - uintptr_t retaddr) + uintptr_t retaddr, + bool recheck) { CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; - return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, DATA_SIZE); + return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, + recheck, DATA_SIZE); } void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, @@ -298,7 +304,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ val = TGT_LE(val); - glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); + glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, + retaddr, tlb_addr & TLB_RECHECK); return; } @@ -375,7 +382,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ val = TGT_BE(val); - glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); + glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr, + tlb_addr & TLB_RECHECK); return; } diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c index 1955a892f4..e68911af0f 100644 --- a/hw/arm/aspeed_soc.c +++ b/hw/arm/aspeed_soc.c @@ -109,18 +109,6 @@ static void aspeed_soc_init(Object *obj) object_initialize(&s->cpu, sizeof(s->cpu), sc->info->cpu_type); object_property_add_child(obj, "cpu", OBJECT(&s->cpu), NULL); - object_initialize(&s->vic, sizeof(s->vic), TYPE_ASPEED_VIC); - object_property_add_child(obj, "vic", OBJECT(&s->vic), NULL); - qdev_set_parent_bus(DEVICE(&s->vic), sysbus_get_default()); - - object_initialize(&s->timerctrl, sizeof(s->timerctrl), TYPE_ASPEED_TIMER); - object_property_add_child(obj, "timerctrl", OBJECT(&s->timerctrl), NULL); - qdev_set_parent_bus(DEVICE(&s->timerctrl), sysbus_get_default()); - - object_initialize(&s->i2c, sizeof(s->i2c), TYPE_ASPEED_I2C); - object_property_add_child(obj, "i2c", OBJECT(&s->i2c), NULL); - qdev_set_parent_bus(DEVICE(&s->i2c), sysbus_get_default()); - object_initialize(&s->scu, sizeof(s->scu), TYPE_ASPEED_SCU); object_property_add_child(obj, "scu", OBJECT(&s->scu), NULL); qdev_set_parent_bus(DEVICE(&s->scu), sysbus_get_default()); @@ -133,6 +121,20 @@ static void aspeed_soc_init(Object *obj) object_property_add_alias(obj, "hw-prot-key", OBJECT(&s->scu), "hw-prot-key", &error_abort); + object_initialize(&s->vic, sizeof(s->vic), TYPE_ASPEED_VIC); + object_property_add_child(obj, "vic", OBJECT(&s->vic), NULL); + qdev_set_parent_bus(DEVICE(&s->vic), sysbus_get_default()); + + object_initialize(&s->timerctrl, sizeof(s->timerctrl), TYPE_ASPEED_TIMER); + object_property_add_child(obj, "timerctrl", OBJECT(&s->timerctrl), NULL); + object_property_add_const_link(OBJECT(&s->timerctrl), "scu", + OBJECT(&s->scu), &error_abort); + qdev_set_parent_bus(DEVICE(&s->timerctrl), sysbus_get_default()); + + object_initialize(&s->i2c, sizeof(s->i2c), TYPE_ASPEED_I2C); + object_property_add_child(obj, "i2c", OBJECT(&s->i2c), NULL); + qdev_set_parent_bus(DEVICE(&s->i2c), sysbus_get_default()); + object_initialize(&s->fmc, sizeof(s->fmc), sc->info->fmc_typename); object_property_add_child(obj, "fmc", OBJECT(&s->fmc), NULL); qdev_set_parent_bus(DEVICE(&s->fmc), sysbus_get_default()); @@ -195,6 +197,14 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(get_system_memory(), ASPEED_SOC_SRAM_BASE, &s->sram); + /* SCU */ + object_property_set_bool(OBJECT(&s->scu), true, "realized", &err); + if (err) { + error_propagate(errp, err); + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, ASPEED_SOC_SCU_BASE); + /* VIC */ object_property_set_bool(OBJECT(&s->vic), true, "realized", &err); if (err) { @@ -219,14 +229,6 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); } - /* SCU */ - object_property_set_bool(OBJECT(&s->scu), true, "realized", &err); - if (err) { - error_propagate(errp, err); - return; - } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, ASPEED_SOC_SCU_BASE); - /* UART - attach an 8250 to the IO space as our UART5 */ if (serial_hd(0)) { qemu_irq uart5 = qdev_get_gpio_in(DEVICE(&s->vic), uart_irqs[4]); diff --git a/hw/arm/omap1.c b/hw/arm/omap1.c index 9af04728e3..539d29ef9c 100644 --- a/hw/arm/omap1.c +++ b/hw/arm/omap1.c @@ -34,12 +34,18 @@ #include "qemu/cutils.h" #include "qemu/bcd.h" +static inline void omap_log_badwidth(const char *funcname, hwaddr addr, int sz) +{ + qemu_log_mask(LOG_GUEST_ERROR, "%s: %d-bit register %#08" HWADDR_PRIx "\n", + funcname, 8 * sz, addr); +} + /* Should signal the TCMI/GPMC */ uint32_t omap_badwidth_read8(void *opaque, hwaddr addr) { uint8_t ret; - OMAP_8B_REG(addr); + omap_log_badwidth(__func__, addr, 1); cpu_physical_memory_read(addr, &ret, 1); return ret; } @@ -49,7 +55,7 @@ void omap_badwidth_write8(void *opaque, hwaddr addr, { uint8_t val8 = value; - OMAP_8B_REG(addr); + omap_log_badwidth(__func__, addr, 1); cpu_physical_memory_write(addr, &val8, 1); } @@ -57,7 +63,7 @@ uint32_t omap_badwidth_read16(void *opaque, hwaddr addr) { uint16_t ret; - OMAP_16B_REG(addr); + omap_log_badwidth(__func__, addr, 2); cpu_physical_memory_read(addr, &ret, 2); return ret; } @@ -67,7 +73,7 @@ void omap_badwidth_write16(void *opaque, hwaddr addr, { uint16_t val16 = value; - OMAP_16B_REG(addr); + omap_log_badwidth(__func__, addr, 2); cpu_physical_memory_write(addr, &val16, 2); } @@ -75,7 +81,7 @@ uint32_t omap_badwidth_read32(void *opaque, hwaddr addr) { uint32_t ret; - OMAP_32B_REG(addr); + omap_log_badwidth(__func__, addr, 4); cpu_physical_memory_read(addr, &ret, 4); return ret; } @@ -83,7 +89,7 @@ uint32_t omap_badwidth_read32(void *opaque, hwaddr addr) void omap_badwidth_write32(void *opaque, hwaddr addr, uint32_t value) { - OMAP_32B_REG(addr); + omap_log_badwidth(__func__, addr, 4); cpu_physical_memory_write(addr, &value, 4); } diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index 3c5f7245b5..3098915d07 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -24,11 +24,43 @@ #include "qom/cpu.h" #include "hw/qdev-properties.h" #include "qapi/error.h" +#include "qemu/jhash.h" #include "qemu/error-report.h" #include "hw/arm/smmu-common.h" #include "smmu-internal.h" +/* IOTLB Management */ + +inline void smmu_iotlb_inv_all(SMMUState *s) +{ + trace_smmu_iotlb_inv_all(); + g_hash_table_remove_all(s->iotlb); +} + +static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value, + gpointer user_data) +{ + uint16_t asid = *(uint16_t *)user_data; + SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; + + return iotlb_key->asid == asid; +} + +inline void smmu_iotlb_inv_iova(SMMUState *s, uint16_t asid, dma_addr_t iova) +{ + SMMUIOTLBKey key = {.asid = asid, .iova = iova}; + + trace_smmu_iotlb_inv_iova(asid, iova); + g_hash_table_remove(s->iotlb, &key); +} + +inline void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid) +{ + trace_smmu_iotlb_inv_asid(asid); + g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid); +} + /* VMSAv8-64 Translation */ /** @@ -310,6 +342,83 @@ static AddressSpace *smmu_find_add_as(PCIBus *bus, void *opaque, int devfn) return &sdev->as; } +IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid) +{ + uint8_t bus_n, devfn; + SMMUPciBus *smmu_bus; + SMMUDevice *smmu; + + bus_n = PCI_BUS_NUM(sid); + smmu_bus = smmu_find_smmu_pcibus(s, bus_n); + if (smmu_bus) { + devfn = sid & 0x7; + smmu = smmu_bus->pbdev[devfn]; + if (smmu) { + return &smmu->iommu; + } + } + return NULL; +} + +static guint smmu_iotlb_key_hash(gconstpointer v) +{ + SMMUIOTLBKey *key = (SMMUIOTLBKey *)v; + uint32_t a, b, c; + + /* Jenkins hash */ + a = b = c = JHASH_INITVAL + sizeof(*key); + a += key->asid; + b += extract64(key->iova, 0, 32); + c += extract64(key->iova, 32, 32); + + __jhash_mix(a, b, c); + __jhash_final(a, b, c); + + return c; +} + +static gboolean smmu_iotlb_key_equal(gconstpointer v1, gconstpointer v2) +{ + const SMMUIOTLBKey *k1 = v1; + const SMMUIOTLBKey *k2 = v2; + + return (k1->asid == k2->asid) && (k1->iova == k2->iova); +} + +/* Unmap the whole notifier's range */ +static void smmu_unmap_notifier_range(IOMMUNotifier *n) +{ + IOMMUTLBEntry entry; + + entry.target_as = &address_space_memory; + entry.iova = n->start; + entry.perm = IOMMU_NONE; + entry.addr_mask = n->end - n->start; + + memory_region_notify_one(n, &entry); +} + +/* Unmap all notifiers attached to @mr */ +inline void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr) +{ + IOMMUNotifier *n; + + trace_smmu_inv_notifiers_mr(mr->parent_obj.name); + IOMMU_NOTIFIER_FOREACH(n, mr) { + smmu_unmap_notifier_range(n); + } +} + +/* Unmap all notifiers of all mr's */ +void smmu_inv_notifiers_all(SMMUState *s) +{ + SMMUNotifierNode *node; + + QLIST_FOREACH(node, &s->notifiers_list, next) { + smmu_inv_notifiers_mr(&node->sdev->iommu); + } +} + static void smmu_base_realize(DeviceState *dev, Error **errp) { SMMUState *s = ARM_SMMU(dev); @@ -321,7 +430,9 @@ static void smmu_base_realize(DeviceState *dev, Error **errp) error_propagate(errp, local_err); return; } - + s->configs = g_hash_table_new_full(NULL, NULL, NULL, g_free); + s->iotlb = g_hash_table_new_full(smmu_iotlb_key_hash, smmu_iotlb_key_equal, + g_free, g_free); s->smmu_pcibus_by_busptr = g_hash_table_new(NULL, NULL); if (s->primary_bus) { @@ -333,7 +444,10 @@ static void smmu_base_realize(DeviceState *dev, Error **errp) static void smmu_base_reset(DeviceState *dev) { - /* will be filled later on */ + SMMUState *s = ARM_SMMU(dev); + + g_hash_table_remove_all(s->configs); + g_hash_table_remove_all(s->iotlb); } static Property smmu_dev_properties[] = { diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h index a9d714b56e..bab25d640e 100644 --- a/hw/arm/smmuv3-internal.h +++ b/hw/arm/smmuv3-internal.h @@ -23,6 +23,14 @@ #include "hw/arm/smmu-common.h" +typedef enum SMMUTranslationStatus { + SMMU_TRANS_DISABLE, + SMMU_TRANS_ABORT, + SMMU_TRANS_BYPASS, + SMMU_TRANS_ERROR, + SMMU_TRANS_SUCCESS, +} SMMUTranslationStatus; + /* MMIO Registers */ REG32(IDR0, 0x0) @@ -315,7 +323,7 @@ enum { /* Command completion notification */ /* Events */ typedef enum SMMUEventType { - SMMU_EVT_OK = 0x00, + SMMU_EVT_NONE = 0x00, SMMU_EVT_F_UUT , SMMU_EVT_C_BAD_STREAMID , SMMU_EVT_F_STE_FETCH , @@ -337,7 +345,7 @@ typedef enum SMMUEventType { } SMMUEventType; static const char *event_stringify[] = { - [SMMU_EVT_OK] = "SMMU_EVT_OK", + [SMMU_EVT_NONE] = "no recorded event", [SMMU_EVT_F_UUT] = "SMMU_EVT_F_UUT", [SMMU_EVT_C_BAD_STREAMID] = "SMMU_EVT_C_BAD_STREAMID", [SMMU_EVT_F_STE_FETCH] = "SMMU_EVT_F_STE_FETCH", diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 978330900d..39fbcbf577 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -23,6 +23,7 @@ #include "hw/qdev-core.h" #include "hw/pci/pci.h" #include "exec/address-spaces.h" +#include "cpu.h" #include "trace.h" #include "qemu/log.h" #include "qemu/error-report.h" @@ -154,7 +155,7 @@ void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info) EVT_SET_SID(&evt, info->sid); switch (info->type) { - case SMMU_EVT_OK: + case SMMU_EVT_NONE: return; case SMMU_EVT_F_UUT: EVT_SET_SSID(&evt, info->u.f_uut.ssid); @@ -312,12 +313,11 @@ static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, return 0; } -/* Returns <0 if the caller has no need to continue the translation */ +/* Returns < 0 in case of invalid STE, 0 otherwise */ static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, STE *ste, SMMUEventInfo *event) { uint32_t config; - int ret = -EINVAL; if (!STE_VALID(ste)) { goto bad_ste; @@ -326,13 +326,13 @@ static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, config = STE_CONFIG(ste); if (STE_CFG_ABORT(config)) { - cfg->aborted = true; /* abort but don't record any event */ - return ret; + cfg->aborted = true; + return 0; } if (STE_CFG_BYPASS(config)) { cfg->bypassed = true; - return ret; + return 0; } if (STE_CFG_S2_ENABLED(config)) { @@ -509,7 +509,7 @@ bad_cd: * the different configuration decoding steps * @event: must be zero'ed by the caller * - * return < 0 if the translation needs to be aborted (@event is filled + * return < 0 in case of config decoding error (@event is filled * accordingly). Return 0 otherwise. */ static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, @@ -518,34 +518,98 @@ static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); uint32_t sid = smmu_get_sid(sdev); SMMUv3State *s = sdev->smmu; - int ret = -EINVAL; + int ret; STE ste; CD cd; - if (smmu_find_ste(s, sid, &ste, event)) { + ret = smmu_find_ste(s, sid, &ste, event); + if (ret) { return ret; } - if (decode_ste(s, cfg, &ste, event)) { + ret = decode_ste(s, cfg, &ste, event); + if (ret) { return ret; } - if (smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event)) { + if (cfg->aborted || cfg->bypassed) { + return 0; + } + + ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event); + if (ret) { return ret; } return decode_cd(cfg, &cd, event); } +/** + * smmuv3_get_config - Look up for a cached copy of configuration data for + * @sdev and on cache miss performs a configuration structure decoding from + * guest RAM. + * + * @sdev: SMMUDevice handle + * @event: output event info + * + * The configuration cache contains data resulting from both STE and CD + * decoding under the form of an SMMUTransCfg struct. The hash table is indexed + * by the SMMUDevice handle. + */ +static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event) +{ + SMMUv3State *s = sdev->smmu; + SMMUState *bc = &s->smmu_state; + SMMUTransCfg *cfg; + + cfg = g_hash_table_lookup(bc->configs, sdev); + if (cfg) { + sdev->cfg_cache_hits++; + trace_smmuv3_config_cache_hit(smmu_get_sid(sdev), + sdev->cfg_cache_hits, sdev->cfg_cache_misses, + 100 * sdev->cfg_cache_hits / + (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); + } else { + sdev->cfg_cache_misses++; + trace_smmuv3_config_cache_miss(smmu_get_sid(sdev), + sdev->cfg_cache_hits, sdev->cfg_cache_misses, + 100 * sdev->cfg_cache_hits / + (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); + cfg = g_new0(SMMUTransCfg, 1); + + if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) { + g_hash_table_insert(bc->configs, sdev, cfg); + } else { + g_free(cfg); + cfg = NULL; + } + } + return cfg; +} + +static void smmuv3_flush_config(SMMUDevice *sdev) +{ + SMMUv3State *s = sdev->smmu; + SMMUState *bc = &s->smmu_state; + + trace_smmuv3_config_cache_inv(smmu_get_sid(sdev)); + g_hash_table_remove(bc->configs, sdev); +} + static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, IOMMUAccessFlags flag, int iommu_idx) { SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); SMMUv3State *s = sdev->smmu; uint32_t sid = smmu_get_sid(sdev); - SMMUEventInfo event = {.type = SMMU_EVT_OK, .sid = sid}; + SMMUEventInfo event = {.type = SMMU_EVT_NONE, .sid = sid}; SMMUPTWEventInfo ptw_info = {}; - SMMUTransCfg cfg = {}; + SMMUTranslationStatus status; + SMMUState *bs = ARM_SMMU(s); + uint64_t page_mask, aligned_addr; + IOMMUTLBEntry *cached_entry = NULL; + SMMUTransTableInfo *tt; + SMMUTransCfg *cfg = NULL; IOMMUTLBEntry entry = { .target_as = &address_space_memory, .iova = addr, @@ -553,23 +617,82 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, .addr_mask = ~(hwaddr)0, .perm = IOMMU_NONE, }; - int ret = 0; + SMMUIOTLBKey key, *new_key; + + qemu_mutex_lock(&s->mutex); if (!smmu_enabled(s)) { - goto out; + status = SMMU_TRANS_DISABLE; + goto epilogue; } - ret = smmuv3_decode_config(mr, &cfg, &event); - if (ret) { - goto out; + cfg = smmuv3_get_config(sdev, &event); + if (!cfg) { + status = SMMU_TRANS_ERROR; + goto epilogue; } - if (cfg.aborted) { - goto out; + if (cfg->aborted) { + status = SMMU_TRANS_ABORT; + goto epilogue; } - ret = smmu_ptw(&cfg, addr, flag, &entry, &ptw_info); - if (ret) { + if (cfg->bypassed) { + status = SMMU_TRANS_BYPASS; + goto epilogue; + } + + tt = select_tt(cfg, addr); + if (!tt) { + if (event.record_trans_faults) { + event.type = SMMU_EVT_F_TRANSLATION; + event.u.f_translation.addr = addr; + event.u.f_translation.rnw = flag & 0x1; + } + status = SMMU_TRANS_ERROR; + goto epilogue; + } + + page_mask = (1ULL << (tt->granule_sz)) - 1; + aligned_addr = addr & ~page_mask; + + key.asid = cfg->asid; + key.iova = aligned_addr; + + cached_entry = g_hash_table_lookup(bs->iotlb, &key); + if (cached_entry) { + cfg->iotlb_hits++; + trace_smmu_iotlb_cache_hit(cfg->asid, aligned_addr, + cfg->iotlb_hits, cfg->iotlb_misses, + 100 * cfg->iotlb_hits / + (cfg->iotlb_hits + cfg->iotlb_misses)); + if ((flag & IOMMU_WO) && !(cached_entry->perm & IOMMU_WO)) { + status = SMMU_TRANS_ERROR; + if (event.record_trans_faults) { + event.type = SMMU_EVT_F_PERMISSION; + event.u.f_permission.addr = addr; + event.u.f_permission.rnw = flag & 0x1; + } + } else { + status = SMMU_TRANS_SUCCESS; + } + goto epilogue; + } + + cfg->iotlb_misses++; + trace_smmu_iotlb_cache_miss(cfg->asid, addr & ~page_mask, + cfg->iotlb_hits, cfg->iotlb_misses, + 100 * cfg->iotlb_hits / + (cfg->iotlb_hits + cfg->iotlb_misses)); + + if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) { + smmu_iotlb_inv_all(bs); + } + + cached_entry = g_new0(IOMMUTLBEntry, 1); + + if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { + g_free(cached_entry); switch (ptw_info.type) { case SMMU_PTW_ERR_WALK_EABT: event.type = SMMU_EVT_F_WALK_EABT; @@ -609,25 +732,119 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, default: g_assert_not_reached(); } + status = SMMU_TRANS_ERROR; + } else { + new_key = g_new0(SMMUIOTLBKey, 1); + new_key->asid = cfg->asid; + new_key->iova = aligned_addr; + g_hash_table_insert(bs->iotlb, new_key, cached_entry); + status = SMMU_TRANS_SUCCESS; } -out: - if (ret) { + +epilogue: + qemu_mutex_unlock(&s->mutex); + switch (status) { + case SMMU_TRANS_SUCCESS: + entry.perm = flag; + entry.translated_addr = cached_entry->translated_addr + + (addr & page_mask); + entry.addr_mask = cached_entry->addr_mask; + trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr, + entry.translated_addr, entry.perm); + break; + case SMMU_TRANS_DISABLE: + entry.perm = flag; + entry.addr_mask = ~TARGET_PAGE_MASK; + trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr, + entry.perm); + break; + case SMMU_TRANS_BYPASS: + entry.perm = flag; + entry.addr_mask = ~TARGET_PAGE_MASK; + trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr, + entry.perm); + break; + case SMMU_TRANS_ABORT: + /* no event is recorded on abort */ + trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr, + entry.perm); + break; + case SMMU_TRANS_ERROR: qemu_log_mask(LOG_GUEST_ERROR, - "%s translation failed for iova=0x%"PRIx64"(%d)\n", - mr->parent_obj.name, addr, ret); - entry.perm = IOMMU_NONE; + "%s translation failed for iova=0x%"PRIx64"(%s)\n", + mr->parent_obj.name, addr, smmu_event_string(event.type)); smmuv3_record_event(s, &event); - } else if (!cfg.aborted) { - entry.perm = flag; - trace_smmuv3_translate(mr->parent_obj.name, sid, addr, - entry.translated_addr, entry.perm); + break; } return entry; } +/** + * smmuv3_notify_iova - call the notifier @n for a given + * @asid and @iova tuple. + * + * @mr: IOMMU mr region handle + * @n: notifier to be called + * @asid: address space ID or negative value if we don't care + * @iova: iova + */ +static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, + IOMMUNotifier *n, + int asid, + dma_addr_t iova) +{ + SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); + SMMUEventInfo event = {}; + SMMUTransTableInfo *tt; + SMMUTransCfg *cfg; + IOMMUTLBEntry entry; + + cfg = smmuv3_get_config(sdev, &event); + if (!cfg) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s error decoding the configuration for iommu mr=%s\n", + __func__, mr->parent_obj.name); + return; + } + + if (asid >= 0 && cfg->asid != asid) { + return; + } + + tt = select_tt(cfg, iova); + if (!tt) { + return; + } + + entry.target_as = &address_space_memory; + entry.iova = iova; + entry.addr_mask = (1 << tt->granule_sz) - 1; + entry.perm = IOMMU_NONE; + + memory_region_notify_one(n, &entry); +} + +/* invalidate an asid/iova tuple in all mr's */ +static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova) +{ + SMMUNotifierNode *node; + + QLIST_FOREACH(node, &s->notifiers_list, next) { + IOMMUMemoryRegion *mr = &node->sdev->iommu; + IOMMUNotifier *n; + + trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova); + + IOMMU_NOTIFIER_FOREACH(n, mr) { + smmuv3_notify_iova(mr, n, asid, iova); + } + } +} + static int smmuv3_cmdq_consume(SMMUv3State *s) { + SMMUState *bs = ARM_SMMU(s); SMMUCmdError cmd_error = SMMU_CERROR_NONE; SMMUQueue *q = &s->cmdq; SMMUCommandType type = 0; @@ -662,6 +879,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) trace_smmuv3_cmdq_opcode(smmu_cmd_string(type)); + qemu_mutex_lock(&s->mutex); switch (type) { case SMMU_CMD_SYNC: if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) { @@ -670,14 +888,111 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) break; case SMMU_CMD_PREFETCH_CONFIG: case SMMU_CMD_PREFETCH_ADDR: + break; case SMMU_CMD_CFGI_STE: + { + uint32_t sid = CMD_SID(&cmd); + IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); + SMMUDevice *sdev; + + if (CMD_SSEC(&cmd)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + + if (!mr) { + break; + } + + trace_smmuv3_cmdq_cfgi_ste(sid); + sdev = container_of(mr, SMMUDevice, iommu); + smmuv3_flush_config(sdev); + + break; + } case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */ + { + uint32_t start = CMD_SID(&cmd), end, i; + uint8_t range = CMD_STE_RANGE(&cmd); + + if (CMD_SSEC(&cmd)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + + end = start + (1 << (range + 1)) - 1; + trace_smmuv3_cmdq_cfgi_ste_range(start, end); + + for (i = start; i <= end; i++) { + IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, i); + SMMUDevice *sdev; + + if (!mr) { + continue; + } + sdev = container_of(mr, SMMUDevice, iommu); + smmuv3_flush_config(sdev); + } + break; + } case SMMU_CMD_CFGI_CD: case SMMU_CMD_CFGI_CD_ALL: - case SMMU_CMD_TLBI_NH_ALL: + { + uint32_t sid = CMD_SID(&cmd); + IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); + SMMUDevice *sdev; + + if (CMD_SSEC(&cmd)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + + if (!mr) { + break; + } + + trace_smmuv3_cmdq_cfgi_cd(sid); + sdev = container_of(mr, SMMUDevice, iommu); + smmuv3_flush_config(sdev); + break; + } case SMMU_CMD_TLBI_NH_ASID: - case SMMU_CMD_TLBI_NH_VA: + { + uint16_t asid = CMD_ASID(&cmd); + + trace_smmuv3_cmdq_tlbi_nh_asid(asid); + smmu_inv_notifiers_all(&s->smmu_state); + smmu_iotlb_inv_asid(bs, asid); + break; + } + case SMMU_CMD_TLBI_NH_ALL: + case SMMU_CMD_TLBI_NSNH_ALL: + trace_smmuv3_cmdq_tlbi_nh(); + smmu_inv_notifiers_all(&s->smmu_state); + smmu_iotlb_inv_all(bs); + break; case SMMU_CMD_TLBI_NH_VAA: + { + dma_addr_t addr = CMD_ADDR(&cmd); + uint16_t vmid = CMD_VMID(&cmd); + + trace_smmuv3_cmdq_tlbi_nh_vaa(vmid, addr); + smmuv3_inv_notifiers_iova(bs, -1, addr); + smmu_iotlb_inv_all(bs); + break; + } + case SMMU_CMD_TLBI_NH_VA: + { + uint16_t asid = CMD_ASID(&cmd); + uint16_t vmid = CMD_VMID(&cmd); + dma_addr_t addr = CMD_ADDR(&cmd); + bool leaf = CMD_LEAF(&cmd); + + trace_smmuv3_cmdq_tlbi_nh_va(vmid, asid, addr, leaf); + smmuv3_inv_notifiers_iova(bs, asid, addr); + smmu_iotlb_inv_iova(bs, asid, addr); + break; + } case SMMU_CMD_TLBI_EL3_ALL: case SMMU_CMD_TLBI_EL3_VA: case SMMU_CMD_TLBI_EL2_ALL: @@ -686,7 +1001,6 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) case SMMU_CMD_TLBI_EL2_VAA: case SMMU_CMD_TLBI_S12_VMALL: case SMMU_CMD_TLBI_S2_IPA: - case SMMU_CMD_TLBI_NSNH_ALL: case SMMU_CMD_ATC_INV: case SMMU_CMD_PRI_RESP: case SMMU_CMD_RESUME: @@ -699,6 +1013,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) "Illegal command type: %d\n", CMD_TYPE(&cmd)); break; } + qemu_mutex_unlock(&s->mutex); if (cmd_error) { break; } @@ -1078,6 +1393,8 @@ static void smmu_realize(DeviceState *d, Error **errp) return; } + qemu_mutex_init(&s->mutex); + memory_region_init_io(&sys->iomem, OBJECT(s), &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000); @@ -1151,9 +1468,38 @@ static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, IOMMUNotifierFlag old, IOMMUNotifierFlag new) { + SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu); + SMMUv3State *s3 = sdev->smmu; + SMMUState *s = &(s3->smmu_state); + SMMUNotifierNode *node = NULL; + SMMUNotifierNode *next_node = NULL; + + if (new & IOMMU_NOTIFIER_MAP) { + int bus_num = pci_bus_num(sdev->bus); + PCIDevice *pcidev = pci_find_device(sdev->bus, bus_num, sdev->devfn); + + warn_report("SMMUv3 does not support notification on MAP: " + "device %s will not function properly", pcidev->name); + } + if (old == IOMMU_NOTIFIER_NONE) { - warn_report("SMMUV3 does not support vhost/vfio integration yet: " - "devices of those types will not function properly"); + trace_smmuv3_notify_flag_add(iommu->parent_obj.name); + node = g_malloc0(sizeof(*node)); + node->sdev = sdev; + QLIST_INSERT_HEAD(&s->notifiers_list, node, next); + return; + } + + /* update notifier node with new flags */ + QLIST_FOREACH_SAFE(node, &s->notifiers_list, next, next_node) { + if (node->sdev == sdev) { + if (new == IOMMU_NOTIFIER_NONE) { + trace_smmuv3_notify_flag_del(iommu->parent_obj.name); + QLIST_REMOVE(node, next); + g_free(node); + } + return; + } } } diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c index a8f1f6a912..dc521b4a5a 100644 --- a/hw/arm/stellaris.c +++ b/hw/arm/stellaris.c @@ -212,7 +212,8 @@ static uint64_t gptm_read(void *opaque, hwaddr offset, return 0; default: qemu_log_mask(LOG_GUEST_ERROR, - "GPTM: read at bad offset 0x%x\n", (int)offset); + "GPTM: read at bad offset 0x02%" HWADDR_PRIx "\n", + offset); return 0; } } @@ -294,7 +295,8 @@ static void gptm_write(void *opaque, hwaddr offset, break; default: qemu_log_mask(LOG_GUEST_ERROR, - "GPTM: read at bad offset 0x%x\n", (int)offset); + "GPTM: write at bad offset 0x02%" HWADDR_PRIx "\n", + offset); } gptm_update_irq(s); } @@ -560,7 +562,7 @@ static void ssys_write(void *opaque, hwaddr offset, case 0x040: /* SRCR0 */ case 0x044: /* SRCR1 */ case 0x048: /* SRCR2 */ - fprintf(stderr, "Peripheral reset not implemented\n"); + qemu_log_mask(LOG_UNIMP, "Peripheral reset not implemented\n"); break; case 0x054: /* IMC */ s->int_mask = value & 0x7f; diff --git a/hw/arm/trace-events b/hw/arm/trace-events index 2d92727602..27b11d655d 100644 --- a/hw/arm/trace-events +++ b/hw/arm/trace-events @@ -12,6 +12,12 @@ smmu_ptw_invalid_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, smmu_ptw_page_pte(int stage, int level, uint64_t iova, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t address) "stage=%d level=%d iova=0x%"PRIx64" base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" page address = 0x%"PRIx64 smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t iova, uint64_t gpa, int bsize_mb) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" iova=0x%"PRIx64" block address = 0x%"PRIx64" block size = %d MiB" smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64 +smmu_iotlb_cache_hit(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" +smmu_iotlb_cache_miss(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" +smmu_iotlb_inv_all(void) "IOTLB invalidate all" +smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d" +smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64 +smmu_inv_notifiers_mr(const char *name) "iommu mr=%s" #hw/arm/smmuv3.c smmuv3_read_mmio(uint64_t addr, uint64_t val, unsigned size, uint32_t r) "addr: 0x%"PRIx64" val:0x%"PRIx64" size: 0x%x(%d)" @@ -33,9 +39,24 @@ smmuv3_record_event(const char *type, uint32_t sid) "%s sid=%d" smmuv3_find_ste(uint16_t sid, uint32_t features, uint16_t sid_split) "SID:0x%x features:0x%x, sid_split:0x%x" smmuv3_find_ste_2lvl(uint64_t strtab_base, uint64_t l1ptr, int l1_ste_offset, uint64_t l2ptr, int l2_ste_offset, int max_l2_ste) "strtab_base:0x%"PRIx64" l1ptr:0x%"PRIx64" l1_off:0x%x, l2ptr:0x%"PRIx64" l2_off:0x%x max_l2_ste:%d" smmuv3_get_ste(uint64_t addr) "STE addr: 0x%"PRIx64 -smmuv3_translate_bypass(const char *n, uint16_t sid, uint64_t addr, bool is_write) "%s sid=%d bypass iova:0x%"PRIx64" is_write=%d" -smmuv3_translate_in(uint16_t sid, int pci_bus_num, uint64_t strtab_base) "SID:0x%x bus:%d strtab_base:0x%"PRIx64 +smmuv3_translate_disable(const char *n, uint16_t sid, uint64_t addr, bool is_write) "%s sid=%d bypass (smmu disabled) iova:0x%"PRIx64" is_write=%d" +smmuv3_translate_bypass(const char *n, uint16_t sid, uint64_t addr, bool is_write) "%s sid=%d STE bypass iova:0x%"PRIx64" is_write=%d" +smmuv3_translate_abort(const char *n, uint16_t sid, uint64_t addr, bool is_write) "%s sid=%d abort on iova:0x%"PRIx64" is_write=%d" +smmuv3_translate_success(const char *n, uint16_t sid, uint64_t iova, uint64_t translated, int perm) "%s sid=%d iova=0x%"PRIx64" translated=0x%"PRIx64" perm=0x%x" smmuv3_get_cd(uint64_t addr) "CD addr: 0x%"PRIx64 -smmuv3_translate(const char *n, uint16_t sid, uint64_t iova, uint64_t translated, int perm) "%s sid=%d iova=0x%"PRIx64" translated=0x%"PRIx64" perm=0x%x" smmuv3_decode_cd(uint32_t oas) "oas=%d" smmuv3_decode_cd_tt(int i, uint32_t tsz, uint64_t ttb, uint32_t granule_sz) "TT[%d]:tsz:%d ttb:0x%"PRIx64" granule_sz:%d" +smmuv3_cmdq_cfgi_ste(int streamid) "streamid =%d" +smmuv3_cmdq_cfgi_ste_range(int start, int end) "start=0x%d - end=0x%d" +smmuv3_cmdq_cfgi_cd(uint32_t sid) "streamid = %d" +smmuv3_config_cache_hit(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache HIT for sid %d (hits=%d, misses=%d, hit rate=%d)" +smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache MISS for sid %d (hits=%d, misses=%d, hit rate=%d)" +smmuv3_cmdq_tlbi_nh_va(int vmid, int asid, uint64_t addr, bool leaf) "vmid =%d asid =%d addr=0x%"PRIx64" leaf=%d" +smmuv3_cmdq_tlbi_nh_vaa(int vmid, uint64_t addr) "vmid =%d addr=0x%"PRIx64 +smmuv3_cmdq_tlbi_nh(void) "" +smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d" +smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid %d" +smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s" +smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s" +smmuv3_inv_notifiers_iova(const char *name, uint16_t asid, uint64_t iova) "iommu mr=%s asid=%d iova=0x%"PRIx64 + diff --git a/hw/dma/omap_dma.c b/hw/dma/omap_dma.c index abd18c67ea..cbb920f31d 100644 --- a/hw/dma/omap_dma.c +++ b/hw/dma/omap_dma.c @@ -18,6 +18,7 @@ * with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "qemu-common.h" #include "qemu/timer.h" #include "hw/arm/omap.h" @@ -878,15 +879,18 @@ static int omap_dma_ch_reg_write(struct omap_dma_s *s, ch->burst[0] = (value & 0x0180) >> 7; ch->pack[0] = (value & 0x0040) >> 6; ch->port[0] = (enum omap_dma_port) ((value & 0x003c) >> 2); - if (ch->port[0] >= __omap_dma_port_last) - printf("%s: invalid DMA port %i\n", __func__, - ch->port[0]); - if (ch->port[1] >= __omap_dma_port_last) - printf("%s: invalid DMA port %i\n", __func__, - ch->port[1]); + if (ch->port[0] >= __omap_dma_port_last) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA port %i\n", + __func__, ch->port[0]); + } + if (ch->port[1] >= __omap_dma_port_last) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA port %i\n", + __func__, ch->port[1]); + } ch->data_type = 1 << (value & 3); if ((value & 3) == 3) { - printf("%s: bad data_type for DMA channel\n", __func__); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad data_type for DMA channel\n", __func__); ch->data_type >>= 1; } break; @@ -1439,8 +1443,9 @@ static int omap_dma_sys_read(struct omap_dma_s *s, int offset, case 0x480: /* DMA_PCh0_SR */ case 0x482: /* DMA_PCh1_SR */ case 0x4c0: /* DMA_PChD_SR_0 */ - printf("%s: Physical Channel Status Registers not implemented.\n", - __func__); + qemu_log_mask(LOG_UNIMP, + "%s: Physical Channel Status Registers not implemented\n", + __func__); *ret = 0xff; break; @@ -1897,14 +1902,18 @@ static void omap_dma4_write(void *opaque, hwaddr addr, if (value & 2) /* SOFTRESET */ omap_dma_reset(s->dma); s->ocp = value & 0x3321; - if (((s->ocp >> 12) & 3) == 3) /* MIDLEMODE */ - fprintf(stderr, "%s: invalid DMA power mode\n", __func__); + if (((s->ocp >> 12) & 3) == 3) { /* MIDLEMODE */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA power mode\n", + __func__); + } return; case 0x78: /* DMA4_GCR */ s->gcr = value & 0x00ff00ff; - if ((value & 0xff) == 0x00) /* MAX_CHANNEL_FIFO_DEPTH */ - fprintf(stderr, "%s: wrong FIFO depth in GCR\n", __func__); + if ((value & 0xff) == 0x00) { /* MAX_CHANNEL_FIFO_DEPTH */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: wrong FIFO depth in GCR\n", + __func__); + } return; case 0x80 ... 0xfff: @@ -1933,9 +1942,11 @@ static void omap_dma4_write(void *opaque, hwaddr addr, case 0x00: /* DMA4_CCR */ ch->buf_disable = (value >> 25) & 1; ch->src_sync = (value >> 24) & 1; /* XXX For CamDMA must be 1 */ - if (ch->buf_disable && !ch->src_sync) - fprintf(stderr, "%s: Buffering disable is not allowed in " - "destination synchronised mode\n", __func__); + if (ch->buf_disable && !ch->src_sync) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Buffering disable is not allowed in " + "destination synchronised mode\n", __func__); + } ch->prefetch = (value >> 23) & 1; ch->bs = (value >> 18) & 1; ch->transparent_copy = (value >> 17) & 1; @@ -1945,9 +1956,11 @@ static void omap_dma4_write(void *opaque, hwaddr addr, ch->suspend = (value & 0x0100) >> 8; ch->priority = (value & 0x0040) >> 6; ch->fs = (value & 0x0020) >> 5; - if (ch->fs && ch->bs && ch->mode[0] && ch->mode[1]) - fprintf(stderr, "%s: For a packet transfer at least one port " - "must be constant-addressed\n", __func__); + if (ch->fs && ch->bs && ch->mode[0] && ch->mode[1]) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: For a packet transfer at least one port " + "must be constant-addressed\n", __func__); + } ch->sync = (value & 0x001f) | ((value >> 14) & 0x0060); /* XXX must be 0x01 for CamDMA */ @@ -1976,9 +1989,11 @@ static void omap_dma4_write(void *opaque, hwaddr addr, ch->endian_lock[0] =(value >> 20) & 1; ch->endian[1] =(value >> 19) & 1; ch->endian_lock[1] =(value >> 18) & 1; - if (ch->endian[0] != ch->endian[1]) - fprintf(stderr, "%s: DMA endianness conversion enable attempt\n", - __func__); + if (ch->endian[0] != ch->endian[1]) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: DMA endianness conversion enable attempt\n", + __func__); + } ch->write_mode = (value >> 16) & 3; ch->burst[1] = (value & 0xc000) >> 14; ch->pack[1] = (value & 0x2000) >> 13; @@ -1986,12 +2001,15 @@ static void omap_dma4_write(void *opaque, hwaddr addr, ch->burst[0] = (value & 0x0180) >> 7; ch->pack[0] = (value & 0x0040) >> 6; ch->translate[0] = (value & 0x003c) >> 2; - if (ch->translate[0] | ch->translate[1]) - fprintf(stderr, "%s: bad MReqAddressTranslate sideband signal\n", - __func__); + if (ch->translate[0] | ch->translate[1]) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad MReqAddressTranslate sideband signal\n", + __func__); + } ch->data_type = 1 << (value & 3); if ((value & 3) == 3) { - printf("%s: bad data_type for DMA channel\n", __func__); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad data_type for DMA channel\n", __func__); ch->data_type >>= 1; } break; diff --git a/hw/i2c/omap_i2c.c b/hw/i2c/omap_i2c.c index 26e3e5ebf6..d02e734ea8 100644 --- a/hw/i2c/omap_i2c.c +++ b/hw/i2c/omap_i2c.c @@ -17,6 +17,7 @@ * with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "hw/hw.h" #include "hw/i2c/i2c.h" #include "hw/arm/omap.h" @@ -339,14 +340,15 @@ static void omap_i2c_write(void *opaque, hwaddr addr, } break; } - if ((value & (1 << 15)) && !(value & (1 << 10))) { /* MST */ - fprintf(stderr, "%s: I^2C slave mode not supported\n", - __func__); + if ((value & (1 << 15)) && !(value & (1 << 10))) { /* MST */ + qemu_log_mask(LOG_UNIMP, "%s: I^2C slave mode not supported\n", + __func__); break; } - if ((value & (1 << 15)) && value & (1 << 8)) { /* XA */ - fprintf(stderr, "%s: 10-bit addressing mode not supported\n", - __func__); + if ((value & (1 << 15)) && value & (1 << 8)) { /* XA */ + qemu_log_mask(LOG_UNIMP, + "%s: 10-bit addressing mode not supported\n", + __func__); break; } if ((value & (1 << 15)) && value & (1 << 0)) { /* STT */ @@ -392,8 +394,10 @@ static void omap_i2c_write(void *opaque, hwaddr addr, s->stat |= 0x3f; omap_i2c_interrupts_update(s); } - if (value & (1 << 15)) /* ST_EN */ - fprintf(stderr, "%s: System Test not supported\n", __func__); + if (value & (1 << 15)) { /* ST_EN */ + qemu_log_mask(LOG_UNIMP, + "%s: System Test not supported\n", __func__); + } break; default: diff --git a/hw/input/pckbd.c b/hw/input/pckbd.c index f33e3fc63d..07c8801387 100644 --- a/hw/input/pckbd.c +++ b/hw/input/pckbd.c @@ -22,6 +22,7 @@ * THE SOFTWARE. */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "hw/hw.h" #include "hw/isa/isa.h" #include "hw/i386/pc.h" @@ -308,7 +309,8 @@ static void kbd_write_command(void *opaque, hwaddr addr, /* ignore that */ break; default: - fprintf(stderr, "qemu: unsupported keyboard cmd=0x%02x\n", (int)val); + qemu_log_mask(LOG_GUEST_ERROR, + "unsupported keyboard cmd=0x%02" PRIx64 "\n", val); break; } } diff --git a/hw/input/tsc2005.c b/hw/input/tsc2005.c index 7990954b6c..4dd95596ab 100644 --- a/hw/input/tsc2005.c +++ b/hw/input/tsc2005.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "hw/hw.h" #include "qemu/timer.h" #include "ui/console.h" @@ -208,9 +209,10 @@ static void tsc2005_write(TSC2005State *s, int reg, uint16_t data) } s->nextprecision = (data >> 13) & 1; s->timing[0] = data & 0x1fff; - if ((s->timing[0] >> 11) == 3) - fprintf(stderr, "%s: illegal conversion clock setting\n", - __func__); + if ((s->timing[0] >> 11) == 3) { + qemu_log_mask(LOG_GUEST_ERROR, + "tsc2005_write: illegal conversion clock setting\n"); + } break; case 0xd: /* CFR1 */ s->timing[1] = data & 0xf07; @@ -221,8 +223,9 @@ static void tsc2005_write(TSC2005State *s, int reg, uint16_t data) break; default: - fprintf(stderr, "%s: write into read-only register %x\n", - __func__, reg); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write into read-only register 0x%x\n", + __func__, reg); } } diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c index 59315010db..59333b50ab 100644 --- a/hw/misc/aspeed_scu.c +++ b/hw/misc/aspeed_scu.c @@ -168,6 +168,27 @@ static uint32_t aspeed_scu_get_random(void) return num; } +static void aspeed_scu_set_apb_freq(AspeedSCUState *s) +{ + uint32_t apb_divider; + + switch (s->silicon_rev) { + case AST2400_A0_SILICON_REV: + case AST2400_A1_SILICON_REV: + apb_divider = 2; + break; + case AST2500_A0_SILICON_REV: + case AST2500_A1_SILICON_REV: + apb_divider = 4; + break; + default: + g_assert_not_reached(); + } + + s->apb_freq = s->hpll / (SCU_CLK_GET_PCLK_DIV(s->regs[CLK_SEL]) + 1) + / apb_divider; +} + static uint64_t aspeed_scu_read(void *opaque, hwaddr offset, unsigned size) { AspeedSCUState *s = ASPEED_SCU(opaque); @@ -222,6 +243,10 @@ static void aspeed_scu_write(void *opaque, hwaddr offset, uint64_t data, case PROT_KEY: s->regs[reg] = (data == ASPEED_SCU_PROT_KEY) ? 1 : 0; return; + case CLK_SEL: + s->regs[reg] = data; + aspeed_scu_set_apb_freq(s); + break; case FREQ_CNTR_EVAL: case VGA_SCRATCH1 ... VGA_SCRATCH8: @@ -247,19 +272,93 @@ static const MemoryRegionOps aspeed_scu_ops = { .valid.unaligned = false, }; +static uint32_t aspeed_scu_get_clkin(AspeedSCUState *s) +{ + if (s->hw_strap1 & SCU_HW_STRAP_CLK_25M_IN) { + return 25000000; + } else if (s->hw_strap1 & SCU_HW_STRAP_CLK_48M_IN) { + return 48000000; + } else { + return 24000000; + } +} + +/* + * Strapped frequencies for the AST2400 in MHz. They depend on the + * clkin frequency. + */ +static const uint32_t hpll_ast2400_freqs[][4] = { + { 384, 360, 336, 408 }, /* 24MHz or 48MHz */ + { 400, 375, 350, 425 }, /* 25MHz */ +}; + +static uint32_t aspeed_scu_calc_hpll_ast2400(AspeedSCUState *s) +{ + uint32_t hpll_reg = s->regs[HPLL_PARAM]; + uint8_t freq_select; + bool clk_25m_in; + + if (hpll_reg & SCU_AST2400_H_PLL_OFF) { + return 0; + } + + if (hpll_reg & SCU_AST2400_H_PLL_PROGRAMMED) { + uint32_t multiplier = 1; + + if (!(hpll_reg & SCU_AST2400_H_PLL_BYPASS_EN)) { + uint32_t n = (hpll_reg >> 5) & 0x3f; + uint32_t od = (hpll_reg >> 4) & 0x1; + uint32_t d = hpll_reg & 0xf; + + multiplier = (2 - od) * ((n + 2) / (d + 1)); + } + + return s->clkin * multiplier; + } + + /* HW strapping */ + clk_25m_in = !!(s->hw_strap1 & SCU_HW_STRAP_CLK_25M_IN); + freq_select = SCU_AST2400_HW_STRAP_GET_H_PLL_CLK(s->hw_strap1); + + return hpll_ast2400_freqs[clk_25m_in][freq_select] * 1000000; +} + +static uint32_t aspeed_scu_calc_hpll_ast2500(AspeedSCUState *s) +{ + uint32_t hpll_reg = s->regs[HPLL_PARAM]; + uint32_t multiplier = 1; + + if (hpll_reg & SCU_H_PLL_OFF) { + return 0; + } + + if (!(hpll_reg & SCU_H_PLL_BYPASS_EN)) { + uint32_t p = (hpll_reg >> 13) & 0x3f; + uint32_t m = (hpll_reg >> 5) & 0xff; + uint32_t n = hpll_reg & 0x1f; + + multiplier = ((m + 1) / (n + 1)) / (p + 1); + } + + return s->clkin * multiplier; +} + static void aspeed_scu_reset(DeviceState *dev) { AspeedSCUState *s = ASPEED_SCU(dev); const uint32_t *reset; + uint32_t (*calc_hpll)(AspeedSCUState *s); switch (s->silicon_rev) { case AST2400_A0_SILICON_REV: case AST2400_A1_SILICON_REV: reset = ast2400_a0_resets; + calc_hpll = aspeed_scu_calc_hpll_ast2400; break; case AST2500_A0_SILICON_REV: case AST2500_A1_SILICON_REV: reset = ast2500_a1_resets; + calc_hpll = aspeed_scu_calc_hpll_ast2500; break; default: g_assert_not_reached(); @@ -270,6 +369,13 @@ static void aspeed_scu_reset(DeviceState *dev) s->regs[HW_STRAP1] = s->hw_strap1; s->regs[HW_STRAP2] = s->hw_strap2; s->regs[PROT_KEY] = s->hw_prot_key; + + /* + * All registers are set. Now compute the frequencies of the main clocks + */ + s->clkin = aspeed_scu_get_clkin(s); + s->hpll = calc_hpll(s); + aspeed_scu_set_apb_freq(s); } static uint32_t aspeed_silicon_revs[] = { diff --git a/hw/net/smc91c111.c b/hw/net/smc91c111.c index c8cc5379b7..d2fd2040e8 100644 --- a/hw/net/smc91c111.c +++ b/hw/net/smc91c111.c @@ -11,6 +11,7 @@ #include "hw/sysbus.h" #include "net/net.h" #include "hw/devices.h" +#include "qemu/log.h" /* For crc32 */ #include <zlib.h> @@ -361,10 +362,14 @@ static void smc91c111_writeb(void *opaque, hwaddr offset, SET_HIGH(gpr, value); return; case 12: /* Control */ - if (value & 1) - fprintf(stderr, "smc91c111:EEPROM store not implemented\n"); - if (value & 2) - fprintf(stderr, "smc91c111:EEPROM reload not implemented\n"); + if (value & 1) { + qemu_log_mask(LOG_UNIMP, + "smc91c111: EEPROM store not implemented\n"); + } + if (value & 2) { + qemu_log_mask(LOG_UNIMP, + "smc91c111: EEPROM reload not implemented\n"); + } value &= ~3; SET_LOW(ctr, value); return; @@ -478,7 +483,9 @@ static void smc91c111_writeb(void *opaque, hwaddr offset, } break; } - hw_error("smc91c111_write: Bad reg %d:%x\n", s->bank, (int)offset); + qemu_log_mask(LOG_GUEST_ERROR, "smc91c111_write(bank:%d) Illegal register" + " 0x%" HWADDR_PRIx " = 0x%x\n", + s->bank, offset, value); } static uint32_t smc91c111_readb(void *opaque, hwaddr offset) @@ -621,7 +628,9 @@ static uint32_t smc91c111_readb(void *opaque, hwaddr offset) } break; } - hw_error("smc91c111_read: Bad reg %d:%x\n", s->bank, (int)offset); + qemu_log_mask(LOG_GUEST_ERROR, "smc91c111_read(bank:%d) Illegal register" + " 0x%" HWADDR_PRIx "\n", + s->bank, offset); return 0; } diff --git a/hw/net/stellaris_enet.c b/hw/net/stellaris_enet.c index 04bd10ada3..165562d788 100644 --- a/hw/net/stellaris_enet.c +++ b/hw/net/stellaris_enet.c @@ -9,6 +9,7 @@ #include "qemu/osdep.h" #include "hw/sysbus.h" #include "net/net.h" +#include "qemu/log.h" #include <zlib.h> //#define DEBUG_STELLARIS_ENET 1 @@ -340,10 +341,12 @@ static uint64_t stellaris_enet_read(void *opaque, hwaddr offset, return s->np; case 0x38: /* TR */ return 0; - case 0x3c: /* Undocuented: Timestamp? */ + case 0x3c: /* Undocumented: Timestamp? */ return 0; default: - hw_error("stellaris_enet_read: Bad offset %x\n", (int)offset); + qemu_log_mask(LOG_GUEST_ERROR, "stellaris_enet_rd%d: Illegal register" + " 0x02%" HWADDR_PRIx "\n", + size * 8, offset); return 0; } } @@ -442,7 +445,9 @@ static void stellaris_enet_write(void *opaque, hwaddr offset, /* Ignored. */ break; default: - hw_error("stellaris_enet_write: Bad offset %x\n", (int)offset); + qemu_log_mask(LOG_GUEST_ERROR, "stellaris_enet_wr%d: Illegal register " + "0x02%" HWADDR_PRIx " = 0x%" PRIx64 "\n", + size * 8, offset, value); } } diff --git a/hw/sd/omap_mmc.c b/hw/sd/omap_mmc.c index 5b47cadf11..aa2a816f76 100644 --- a/hw/sd/omap_mmc.c +++ b/hw/sd/omap_mmc.c @@ -17,6 +17,7 @@ * with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "hw/hw.h" #include "hw/arm/omap.h" #include "hw/sd/sd.h" @@ -449,10 +450,14 @@ static void omap_mmc_write(void *opaque, hwaddr offset, s->enable = (value >> 11) & 1; s->be = (value >> 10) & 1; s->clkdiv = (value >> 0) & (s->rev >= 2 ? 0x3ff : 0xff); - if (s->mode != 0) - printf("SD mode %i unimplemented!\n", s->mode); - if (s->be != 0) - printf("SD FIFO byte sex unimplemented!\n"); + if (s->mode != 0) { + qemu_log_mask(LOG_UNIMP, + "omap_mmc_wr: mode #%i unimplemented\n", s->mode); + } + if (s->be != 0) { + qemu_log_mask(LOG_UNIMP, + "omap_mmc_wr: Big Endian not implemented\n"); + } if (s->dw != 0 && s->lines < 4) printf("4-bit SD bus enabled\n"); if (!s->enable) diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c index 5059396bc6..b29bfd3124 100644 --- a/hw/ssi/aspeed_smc.c +++ b/hw/ssi/aspeed_smc.c @@ -66,6 +66,8 @@ /* CEx Control Register */ #define R_CTRL0 (0x10 / 4) +#define CTRL_IO_DUAL_DATA (1 << 29) +#define CTRL_IO_DUAL_ADDR_DATA (1 << 28) /* Includes dummies */ #define CTRL_CMD_SHIFT 16 #define CTRL_CMD_MASK 0xff #define CTRL_DUMMY_HIGH_SHIFT 14 @@ -492,14 +494,20 @@ static int aspeed_smc_flash_dummies(const AspeedSMCFlash *fl) uint32_t r_ctrl0 = s->regs[s->r_ctrl0 + fl->id]; uint32_t dummy_high = (r_ctrl0 >> CTRL_DUMMY_HIGH_SHIFT) & 0x1; uint32_t dummy_low = (r_ctrl0 >> CTRL_DUMMY_LOW_SHIFT) & 0x3; + uint32_t dummies = ((dummy_high << 2) | dummy_low) * 8; - return ((dummy_high << 2) | dummy_low) * 8; + if (r_ctrl0 & CTRL_IO_DUAL_ADDR_DATA) { + dummies /= 2; + } + + return dummies; } -static void aspeed_smc_flash_send_addr(AspeedSMCFlash *fl, uint32_t addr) +static void aspeed_smc_flash_setup(AspeedSMCFlash *fl, uint32_t addr) { const AspeedSMCState *s = fl->controller; uint8_t cmd = aspeed_smc_flash_cmd(fl); + int i; /* Flash access can not exceed CS segment */ addr = aspeed_smc_check_segment_addr(fl, addr); @@ -512,6 +520,18 @@ static void aspeed_smc_flash_send_addr(AspeedSMCFlash *fl, uint32_t addr) ssi_transfer(s->spi, (addr >> 16) & 0xff); ssi_transfer(s->spi, (addr >> 8) & 0xff); ssi_transfer(s->spi, (addr & 0xff)); + + /* + * Use fake transfers to model dummy bytes. The value should + * be configured to some non-zero value in fast read mode and + * zero in read mode. But, as the HW allows inconsistent + * settings, let's check for fast read mode. + */ + if (aspeed_smc_flash_mode(fl) == CTRL_FREADMODE) { + for (i = 0; i < aspeed_smc_flash_dummies(fl); i++) { + ssi_transfer(fl->controller->spi, 0xFF); + } + } } static uint64_t aspeed_smc_flash_read(void *opaque, hwaddr addr, unsigned size) @@ -530,19 +550,7 @@ static uint64_t aspeed_smc_flash_read(void *opaque, hwaddr addr, unsigned size) case CTRL_READMODE: case CTRL_FREADMODE: aspeed_smc_flash_select(fl); - aspeed_smc_flash_send_addr(fl, addr); - - /* - * Use fake transfers to model dummy bytes. The value should - * be configured to some non-zero value in fast read mode and - * zero in read mode. But, as the HW allows inconsistent - * settings, let's check for fast read mode. - */ - if (aspeed_smc_flash_mode(fl) == CTRL_FREADMODE) { - for (i = 0; i < aspeed_smc_flash_dummies(fl); i++) { - ssi_transfer(fl->controller->spi, 0xFF); - } - } + aspeed_smc_flash_setup(fl, addr); for (i = 0; i < size; i++) { ret |= ssi_transfer(s->spi, 0x0) << (8 * i); @@ -579,7 +587,7 @@ static void aspeed_smc_flash_write(void *opaque, hwaddr addr, uint64_t data, break; case CTRL_WRITEMODE: aspeed_smc_flash_select(fl); - aspeed_smc_flash_send_addr(fl, addr); + aspeed_smc_flash_setup(fl, addr); for (i = 0; i < size; i++) { ssi_transfer(s->spi, (data >> (8 * i)) & 0xff); @@ -632,23 +640,17 @@ static void aspeed_smc_reset(DeviceState *d) aspeed_smc_segment_to_reg(&s->ctrl->segments[i]); } - /* HW strapping for AST2500 FMC controllers */ + /* HW strapping flash type for FMC controllers */ if (s->ctrl->segments == aspeed_segments_ast2500_fmc) { /* flash type is fixed to SPI for CE0 and CE1 */ s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1); - - /* 4BYTE mode is autodetected for CE0. Let's force it to 1 for - * now */ - s->regs[s->r_ce_ctrl] |= (1 << (CTRL_EXTENDED0)); } /* HW strapping for AST2400 FMC controllers (SCU70). Let's use the * configuration of the palmetto-bmc machine */ if (s->ctrl->segments == aspeed_segments_fmc) { s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); - - s->regs[s->r_ce_ctrl] |= (1 << (CTRL_EXTENDED0)); } } diff --git a/hw/ssi/omap_spi.c b/hw/ssi/omap_spi.c index 34163e5646..f278a55160 100644 --- a/hw/ssi/omap_spi.c +++ b/hw/ssi/omap_spi.c @@ -20,6 +20,7 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "hw/hw.h" #include "hw/arm/omap.h" @@ -294,11 +295,15 @@ static void omap_mcspi_write(void *opaque, hwaddr addr, case 0x2c: /* MCSPI_CHCONF */ if ((value ^ s->ch[ch].config) & (3 << 14)) /* DMAR | DMAW */ omap_mcspi_dmarequest_update(s->ch + ch); - if (((value >> 12) & 3) == 3) /* TRM */ - fprintf(stderr, "%s: invalid TRM value (3)\n", __func__); - if (((value >> 7) & 0x1f) < 3) /* WL */ - fprintf(stderr, "%s: invalid WL value (%" PRIx64 ")\n", - __func__, (value >> 7) & 0x1f); + if (((value >> 12) & 3) == 3) { /* TRM */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid TRM value (3)\n", + __func__); + } + if (((value >> 7) & 0x1f) < 3) { /* WL */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid WL value (%" PRIx64 ")\n", + __func__, (value >> 7) & 0x1f); + } s->ch[ch].config = value & 0x7fffff; break; diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c index f599025956..c052bfc4b3 100644 --- a/hw/ssi/xilinx_spips.c +++ b/hw/ssi/xilinx_spips.c @@ -851,12 +851,17 @@ static void xlnx_zynqmp_qspips_notify(void *opaque) { size_t ret; uint32_t num; - const void *rxd = pop_buf(recv_fifo, 4, &num); + const void *rxd; + int len; + + len = recv_fifo->num >= rq->dma_burst_size ? rq->dma_burst_size : + recv_fifo->num; + rxd = pop_buf(recv_fifo, len, &num); memcpy(rq->dma_buf, rxd, num); - ret = stream_push(rq->dma, rq->dma_buf, 4); - assert(ret == 4); + ret = stream_push(rq->dma, rq->dma_buf, num); + assert(ret == num); xlnx_zynqmp_qspips_check_flush(rq); } } @@ -1333,6 +1338,12 @@ static void xlnx_zynqmp_qspips_realize(DeviceState *dev, Error **errp) XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(dev); XilinxSPIPSClass *xsc = XILINX_SPIPS_GET_CLASS(s); + if (s->dma_burst_size > QSPI_DMA_MAX_BURST_SIZE) { + error_setg(errp, + "qspi dma burst size %u exceeds maximum limit %d", + s->dma_burst_size, QSPI_DMA_MAX_BURST_SIZE); + return; + } xilinx_qspips_realize(dev, errp); fifo8_create(&s->rx_fifo_g, xsc->rx_fifo_size); fifo8_create(&s->tx_fifo_g, xsc->tx_fifo_size); @@ -1411,6 +1422,11 @@ static const VMStateDescription vmstate_xlnx_zynqmp_qspips = { } }; +static Property xilinx_zynqmp_qspips_properties[] = { + DEFINE_PROP_UINT32("dma-burst-size", XlnxZynqMPQSPIPS, dma_burst_size, 64), + DEFINE_PROP_END_OF_LIST(), +}; + static Property xilinx_qspips_properties[] = { /* We had to turn this off for 2.10 as it is not compatible with migration. * It can be enabled but will prevent the device to be migrated. @@ -1463,6 +1479,7 @@ static void xlnx_zynqmp_qspips_class_init(ObjectClass *klass, void * data) dc->realize = xlnx_zynqmp_qspips_realize; dc->reset = xlnx_zynqmp_qspips_reset; dc->vmsd = &vmstate_xlnx_zynqmp_qspips; + dc->props = xilinx_zynqmp_qspips_properties; xsc->reg_ops = &xlnx_zynqmp_qspips_ops; xsc->rx_fifo_size = RXFF_A_Q; xsc->tx_fifo_size = TXFF_A_Q; diff --git a/hw/timer/aspeed_timer.c b/hw/timer/aspeed_timer.c index 1e31e22b6f..5e3f51b66b 100644 --- a/hw/timer/aspeed_timer.c +++ b/hw/timer/aspeed_timer.c @@ -10,8 +10,10 @@ */ #include "qemu/osdep.h" +#include "qapi/error.h" #include "hw/sysbus.h" #include "hw/timer/aspeed_timer.h" +#include "hw/misc/aspeed_scu.h" #include "qemu-common.h" #include "qemu/bitops.h" #include "qemu/timer.h" @@ -26,7 +28,6 @@ #define TIMER_CLOCK_USE_EXT true #define TIMER_CLOCK_EXT_HZ 1000000 #define TIMER_CLOCK_USE_APB false -#define TIMER_CLOCK_APB_HZ 24000000 #define TIMER_REG_STATUS 0 #define TIMER_REG_RELOAD 1 @@ -80,11 +81,11 @@ static inline bool timer_external_clock(AspeedTimer *t) return timer_ctrl_status(t, op_external_clock); } -static uint32_t clock_rates[] = { TIMER_CLOCK_APB_HZ, TIMER_CLOCK_EXT_HZ }; - static inline uint32_t calculate_rate(struct AspeedTimer *t) { - return clock_rates[timer_external_clock(t)]; + AspeedTimerCtrlState *s = timer_to_ctrl(t); + + return timer_external_clock(t) ? TIMER_CLOCK_EXT_HZ : s->scu->apb_freq; } static inline uint32_t calculate_ticks(struct AspeedTimer *t, uint64_t now_ns) @@ -449,6 +450,16 @@ static void aspeed_timer_realize(DeviceState *dev, Error **errp) int i; SysBusDevice *sbd = SYS_BUS_DEVICE(dev); AspeedTimerCtrlState *s = ASPEED_TIMER(dev); + Object *obj; + Error *err = NULL; + + obj = object_property_get_link(OBJECT(dev), "scu", &err); + if (!obj) { + error_propagate(errp, err); + error_prepend(errp, "required link 'scu' not found: "); + return; + } + s->scu = ASPEED_SCU(obj); for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { aspeed_init_one_timer(s, i); diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 7fa726b8e3..7338f57062 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -330,11 +330,14 @@ CPUArchState *cpu_copy(CPUArchState *env); #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2)) /* Set if TLB entry is an IO callback. */ #define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3)) +/* Set if TLB entry must have MMU lookup repeated for every access */ +#define TLB_RECHECK (1 << (TARGET_PAGE_BITS - 4)) /* Use this mask to check interception with an alignment mask * in a TCG backend. */ -#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO) +#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ + | TLB_RECHECK) void dump_exec_info(FILE *f, fprintf_function cpu_fprintf); void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf); diff --git a/include/hw/arm/omap.h b/include/hw/arm/omap.h index b398607b06..e7fbd340f3 100644 --- a/include/hw/arm/omap.h +++ b/include/hw/arm/omap.h @@ -21,6 +21,7 @@ # define hw_omap_h "omap.h" #include "hw/irq.h" #include "target/arm/cpu-qom.h" +#include "qemu/log.h" # define OMAP_EMIFS_BASE 0x00000000 # define OMAP2_Q0_BASE 0x00000000 @@ -944,8 +945,6 @@ struct omap_mpu_state_s *omap2420_mpu_init(MemoryRegion *sysmem, unsigned long sdram_size, const char *core); -#define OMAP_FMT_plx "%#08" HWADDR_PRIx - uint32_t omap_badwidth_read8(void *opaque, hwaddr addr); void omap_badwidth_write8(void *opaque, hwaddr addr, uint32_t value); @@ -959,11 +958,12 @@ void omap_badwidth_write32(void *opaque, hwaddr addr, void omap_mpu_wakeup(void *opaque, int irq, int req); # define OMAP_BAD_REG(paddr) \ - fprintf(stderr, "%s: Bad register " OMAP_FMT_plx "\n", \ - __func__, paddr) + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad register %#08"HWADDR_PRIx"\n", \ + __func__, paddr) # define OMAP_RO_REG(paddr) \ - fprintf(stderr, "%s: Read-only register " OMAP_FMT_plx "\n", \ - __func__, paddr) + qemu_log_mask(LOG_GUEST_ERROR, "%s: Read-only register %#08" \ + HWADDR_PRIx "\n", \ + __func__, paddr) /* OMAP-specific Linux bootloader tags for the ATAG_BOARD area (Board-specifc tags are not here) */ @@ -993,24 +993,6 @@ enum { #define OMAP_GPIOSW_INVERTED 0x0001 #define OMAP_GPIOSW_OUTPUT 0x0002 -# define TCMI_VERBOSE 1 - -# ifdef TCMI_VERBOSE -# define OMAP_8B_REG(paddr) \ - fprintf(stderr, "%s: 8-bit register " OMAP_FMT_plx "\n", \ - __func__, paddr) -# define OMAP_16B_REG(paddr) \ - fprintf(stderr, "%s: 16-bit register " OMAP_FMT_plx "\n", \ - __func__, paddr) -# define OMAP_32B_REG(paddr) \ - fprintf(stderr, "%s: 32-bit register " OMAP_FMT_plx "\n", \ - __func__, paddr) -# else -# define OMAP_8B_REG(paddr) -# define OMAP_16B_REG(paddr) -# define OMAP_32B_REG(paddr) -# endif - # define OMAP_MPUI_REG_MASK 0x000007ff #endif /* hw_omap_h */ diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h index c41eb5c3b0..50e2912a95 100644 --- a/include/hw/arm/smmu-common.h +++ b/include/hw/arm/smmu-common.h @@ -67,6 +67,8 @@ typedef struct SMMUTransCfg { uint8_t tbi; /* Top Byte Ignore */ uint16_t asid; SMMUTransTableInfo tt[2]; + uint32_t iotlb_hits; /* counts IOTLB hits for this asid */ + uint32_t iotlb_misses; /* counts IOTLB misses for this asid */ } SMMUTransCfg; typedef struct SMMUDevice { @@ -75,6 +77,8 @@ typedef struct SMMUDevice { int devfn; IOMMUMemoryRegion iommu; AddressSpace as; + uint32_t cfg_cache_hits; + uint32_t cfg_cache_misses; } SMMUDevice; typedef struct SMMUNotifierNode { @@ -87,6 +91,11 @@ typedef struct SMMUPciBus { SMMUDevice *pbdev[0]; /* Parent array is sparse, so dynamically alloc */ } SMMUPciBus; +typedef struct SMMUIOTLBKey { + uint64_t iova; + uint16_t asid; +} SMMUIOTLBKey; + typedef struct SMMUState { /* <private> */ SysBusDevice dev; @@ -142,4 +151,19 @@ int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm, */ SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova); +/* Return the iommu mr associated to @sid, or NULL if none */ +IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid); + +#define SMMU_IOTLB_MAX_SIZE 256 + +void smmu_iotlb_inv_all(SMMUState *s); +void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid); +void smmu_iotlb_inv_iova(SMMUState *s, uint16_t asid, dma_addr_t iova); + +/* Unmap the range of all the notifiers registered to any IOMMU mr */ +void smmu_inv_notifiers_all(SMMUState *s); + +/* Unmap the range of all the notifiers registered to @mr */ +void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr); + #endif /* HW_ARM_SMMU_COMMON */ diff --git a/include/hw/arm/smmuv3.h b/include/hw/arm/smmuv3.h index 23f70363e5..36b2f45253 100644 --- a/include/hw/arm/smmuv3.h +++ b/include/hw/arm/smmuv3.h @@ -59,6 +59,7 @@ typedef struct SMMUv3State { SMMUQueue eventq, cmdq; qemu_irq irq[4]; + QemuMutex mutex; } SMMUv3State; typedef enum { diff --git a/include/hw/misc/aspeed_scu.h b/include/hw/misc/aspeed_scu.h index d70cc0aeca..f662c38188 100644 --- a/include/hw/misc/aspeed_scu.h +++ b/include/hw/misc/aspeed_scu.h @@ -30,6 +30,10 @@ typedef struct AspeedSCUState { uint32_t hw_strap1; uint32_t hw_strap2; uint32_t hw_prot_key; + + uint32_t clkin; + uint32_t hpll; + uint32_t apb_freq; } AspeedSCUState; #define AST2400_A0_SILICON_REV 0x02000303U @@ -58,7 +62,64 @@ extern bool is_supported_silicon_rev(uint32_t silicon_rev); * 1. 2012/12/29 Ryan Chen Create */ -/* Hardware Strapping Register definition (for Aspeed AST2400 SOC) +/* SCU08 Clock Selection Register + * + * 31 Enable Video Engine clock dynamic slow down + * 30:28 Video Engine clock slow down setting + * 27 2D Engine GCLK clock source selection + * 26 2D Engine GCLK clock throttling enable + * 25:23 APB PCLK divider selection + * 22:20 LPC Host LHCLK divider selection + * 19 LPC Host LHCLK clock generation/output enable control + * 18:16 MAC AHB bus clock divider selection + * 15 SD/SDIO clock running enable + * 14:12 SD/SDIO divider selection + * 11 Reserved + * 10:8 Video port output clock delay control bit + * 7 ARM CPU/AHB clock slow down enable + * 6:4 ARM CPU/AHB clock slow down setting + * 3:2 ECLK clock source selection + * 1 CPU/AHB clock slow down idle timer + * 0 CPU/AHB clock dynamic slow down enable (defined in bit[6:4]) + */ +#define SCU_CLK_GET_PCLK_DIV(x) (((x) >> 23) & 0x7) + +/* SCU24 H-PLL Parameter Register (for Aspeed AST2400 SOC) + * + * 18 H-PLL parameter selection + * 0: Select H-PLL by strapping resistors + * 1: Select H-PLL by the programmed registers (SCU24[17:0]) + * 17 Enable H-PLL bypass mode + * 16 Turn off H-PLL + * 10:5 H-PLL Numerator + * 4 H-PLL Output Divider + * 3:0 H-PLL Denumerator + * + * (Output frequency) = 24MHz * (2-OD) * [(Numerator+2) / (Denumerator+1)] + */ + +#define SCU_AST2400_H_PLL_PROGRAMMED (0x1 << 18) +#define SCU_AST2400_H_PLL_BYPASS_EN (0x1 << 17) +#define SCU_AST2400_H_PLL_OFF (0x1 << 16) + +/* SCU24 H-PLL Parameter Register (for Aspeed AST2500 SOC) + * + * 21 Enable H-PLL reset + * 20 Enable H-PLL bypass mode + * 19 Turn off H-PLL + * 18:13 H-PLL Post Divider + * 12:5 H-PLL Numerator (M) + * 4:0 H-PLL Denumerator (N) + * + * (Output frequency) = CLKIN(24MHz) * [(M+1) / (N+1)] / (P+1) + * + * The default frequency is 792Mhz when CLKIN = 24MHz + */ + +#define SCU_H_PLL_BYPASS_EN (0x1 << 20) +#define SCU_H_PLL_OFF (0x1 << 19) + +/* SCU70 Hardware Strapping Register definition (for Aspeed AST2400 SOC) * * 31:29 Software defined strapping registers * 28:27 DRAM size setting (for VGA driver use) @@ -107,12 +168,13 @@ extern bool is_supported_silicon_rev(uint32_t silicon_rev); #define SCU_AST2400_HW_STRAP_GET_CLK_SOURCE(x) (((((x) >> 23) & 0x1) << 1) \ | (((x) >> 18) & 0x1)) #define SCU_AST2400_HW_STRAP_CLK_SOURCE_MASK ((0x1 << 23) | (0x1 << 18)) -#define AST2400_CLK_25M_IN (0x1 << 23) +#define SCU_HW_STRAP_CLK_25M_IN (0x1 << 23) #define AST2400_CLK_24M_IN 0 #define AST2400_CLK_48M_IN 1 #define AST2400_CLK_25M_IN_24M_USB_CKI 2 #define AST2400_CLK_25M_IN_48M_USB_CKI 3 +#define SCU_HW_STRAP_CLK_48M_IN (0x1 << 18) #define SCU_HW_STRAP_2ND_BOOT_WDT (0x1 << 17) #define SCU_HW_STRAP_SUPER_IO_CONFIG (0x1 << 16) #define SCU_HW_STRAP_VGA_CLASS_CODE (0x1 << 15) @@ -160,8 +222,8 @@ extern bool is_supported_silicon_rev(uint32_t silicon_rev); #define AST2400_DIS_BOOT 3 /* - * Hardware strapping register definition (for Aspeed AST2500 SoC and - * higher) + * SCU70 Hardware strapping register definition (for Aspeed AST2500 + * SoC and higher) * * 31 Enable SPI Flash Strap Auto Fetch Mode * 30 Enable GPIO Strap Mode diff --git a/include/hw/ssi/xilinx_spips.h b/include/hw/ssi/xilinx_spips.h index d398a4e81c..a0a0ae7584 100644 --- a/include/hw/ssi/xilinx_spips.h +++ b/include/hw/ssi/xilinx_spips.h @@ -37,6 +37,8 @@ typedef struct XilinxSPIPS XilinxSPIPS; /* Bite off 4k chunks at a time */ #define LQSPI_CACHE_SIZE 1024 +#define QSPI_DMA_MAX_BURST_SIZE 2048 + typedef enum { READ = 0x3, READ_4 = 0x13, FAST_READ = 0xb, FAST_READ_4 = 0x0c, @@ -95,7 +97,6 @@ typedef struct { XilinxQSPIPS parent_obj; StreamSlave *dma; - uint8_t dma_buf[4]; int gqspi_irqline; uint32_t regs[XLNX_ZYNQMP_SPIPS_R_MAX]; @@ -113,6 +114,8 @@ typedef struct { uint8_t rx_fifo_g_align; uint8_t tx_fifo_g_align; bool man_start_com_g; + uint32_t dma_burst_size; + uint8_t dma_buf[QSPI_DMA_MAX_BURST_SIZE]; } XlnxZynqMPQSPIPS; typedef struct XilinxSPIPSClass { diff --git a/include/hw/timer/aspeed_timer.h b/include/hw/timer/aspeed_timer.h index bd6c1a7f96..040a088734 100644 --- a/include/hw/timer/aspeed_timer.h +++ b/include/hw/timer/aspeed_timer.h @@ -24,6 +24,8 @@ #include "qemu/timer.h" +typedef struct AspeedSCUState AspeedSCUState; + #define ASPEED_TIMER(obj) \ OBJECT_CHECK(AspeedTimerCtrlState, (obj), TYPE_ASPEED_TIMER); #define TYPE_ASPEED_TIMER "aspeed.timer" @@ -55,6 +57,8 @@ typedef struct AspeedTimerCtrlState { uint32_t ctrl; uint32_t ctrl2; AspeedTimer timers[ASPEED_TIMER_NR_TIMERS]; + + AspeedSCUState *scu; } AspeedTimerCtrlState; #endif /* ASPEED_TIMER_H */ diff --git a/target/arm/helper.c b/target/arm/helper.c index 1248d84e6f..3c6a4c565b 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -41,6 +41,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, /* Security attributes for an address, as returned by v8m_security_lookup. */ typedef struct V8M_SAttributes { + bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ bool ns; bool nsc; uint8_t sregion; @@ -9596,6 +9597,7 @@ static inline bool m_is_system_region(CPUARMState *env, uint32_t address) static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, int *prot, + target_ulong *page_size, ARMMMUFaultInfo *fi) { ARMCPU *cpu = arm_env_get_cpu(env); @@ -9603,6 +9605,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, bool is_user = regime_is_user(env, mmu_idx); *phys_ptr = address; + *page_size = TARGET_PAGE_SIZE; *prot = 0; if (regime_translation_disabled(env, mmu_idx) || @@ -9675,16 +9678,12 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, rsize++; } } - if (rsize < TARGET_PAGE_BITS) { - qemu_log_mask(LOG_UNIMP, - "DRSR[%d]: No support for MPU (sub)region size of" - " %" PRIu32 " bytes. Minimum is %d.\n", - n, (1 << rsize), TARGET_PAGE_SIZE); - continue; - } if (srdis) { continue; } + if (rsize < TARGET_PAGE_BITS) { + *page_size = 1 << rsize; + } break; } @@ -9765,6 +9764,17 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, fi->type = ARMFault_Permission; fi->level = 1; + /* + * Core QEMU code can't handle execution from small pages yet, so + * don't try it. This way we'll get an MPU exception, rather than + * eventually causing QEMU to exit in get_page_addr_code(). + */ + if (*page_size < TARGET_PAGE_SIZE && (*prot & PAGE_EXEC)) { + qemu_log_mask(LOG_UNIMP, + "MPU: No support for execution from regions " + "smaller than 1K\n"); + *prot &= ~PAGE_EXEC; + } return !(*prot & (1 << access_type)); } @@ -9795,6 +9805,8 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address, int r; bool idau_exempt = false, idau_ns = true, idau_nsc = true; int idau_region = IREGION_NOTVALID; + uint32_t addr_page_base = address & TARGET_PAGE_MASK; + uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); if (cpu->idau) { IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); @@ -9832,6 +9844,9 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address, uint32_t limit = env->sau.rlar[r] | 0x1f; if (base <= address && limit >= address) { + if (base > addr_page_base || limit < addr_page_limit) { + sattrs->subpage = true; + } if (sattrs->srvalid) { /* If we hit in more than one region then we must report * as Secure, not NS-Callable, with no valid region @@ -9871,13 +9886,16 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address, static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, - int *prot, ARMMMUFaultInfo *fi, uint32_t *mregion) + int *prot, bool *is_subpage, + ARMMMUFaultInfo *fi, uint32_t *mregion) { /* Perform a PMSAv8 MPU lookup (without also doing the SAU check * that a full phys-to-virt translation does). * mregion is (if not NULL) set to the region number which matched, * or -1 if no region number is returned (MPU off, address did not * hit a region, address hit in multiple regions). + * We set is_subpage to true if the region hit doesn't cover the + * entire TARGET_PAGE the address is within. */ ARMCPU *cpu = arm_env_get_cpu(env); bool is_user = regime_is_user(env, mmu_idx); @@ -9885,7 +9903,10 @@ static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, int n; int matchregion = -1; bool hit = false; + uint32_t addr_page_base = address & TARGET_PAGE_MASK; + uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); + *is_subpage = false; *phys_ptr = address; *prot = 0; if (mregion) { @@ -9923,6 +9944,10 @@ static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, continue; } + if (base > addr_page_base || limit < addr_page_limit) { + *is_subpage = true; + } + if (hit) { /* Multiple regions match -- always a failure (unlike * PMSAv7 where highest-numbered-region wins) @@ -9934,23 +9959,6 @@ static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, matchregion = n; hit = true; - - if (base & ~TARGET_PAGE_MASK) { - qemu_log_mask(LOG_UNIMP, - "MPU_RBAR[%d]: No support for MPU region base" - "address of 0x%" PRIx32 ". Minimum alignment is " - "%d\n", - n, base, TARGET_PAGE_BITS); - continue; - } - if ((limit + 1) & ~TARGET_PAGE_MASK) { - qemu_log_mask(LOG_UNIMP, - "MPU_RBAR[%d]: No support for MPU region limit" - "address of 0x%" PRIx32 ". Minimum alignment is " - "%d\n", - n, limit, TARGET_PAGE_BITS); - continue; - } } } @@ -9986,6 +9994,18 @@ static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, fi->type = ARMFault_Permission; fi->level = 1; + /* + * Core QEMU code can't handle execution from small pages yet, so + * don't try it. This means any attempted execution will generate + * an MPU exception, rather than eventually causing QEMU to exit in + * get_page_addr_code(). + */ + if (*is_subpage && (*prot & PAGE_EXEC)) { + qemu_log_mask(LOG_UNIMP, + "MPU: No support for execution from regions " + "smaller than 1K\n"); + *prot &= ~PAGE_EXEC; + } return !(*prot & (1 << access_type)); } @@ -9993,10 +10013,13 @@ static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, - int *prot, ARMMMUFaultInfo *fi) + int *prot, target_ulong *page_size, + ARMMMUFaultInfo *fi) { uint32_t secure = regime_is_secure(env, mmu_idx); V8M_SAttributes sattrs = {}; + bool ret; + bool mpu_is_subpage; if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); @@ -10024,6 +10047,7 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, } else { fi->type = ARMFault_QEMU_SFault; } + *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; *phys_ptr = address; *prot = 0; return true; @@ -10046,6 +10070,7 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). */ fi->type = ARMFault_QEMU_SFault; + *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; *phys_ptr = address; *prot = 0; return true; @@ -10053,8 +10078,22 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, } } - return pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, - txattrs, prot, fi, NULL); + ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, + txattrs, prot, &mpu_is_subpage, fi, NULL); + /* + * TODO: this is a temporary hack to ignore the fact that the SAU region + * is smaller than a page if this is an executable region. We never + * supported small MPU regions, but we did (accidentally) allow small + * SAU regions, and if we now made small SAU regions not be executable + * then this would break previously working guest code. We can't + * remove this until/unless we implement support for execution from + * small regions. + */ + if (*prot & PAGE_EXEC) { + sattrs.subpage = false; + } + *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; + return ret; } static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, @@ -10330,11 +10369,11 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address, if (arm_feature(env, ARM_FEATURE_V8)) { /* PMSAv8 */ ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, - phys_ptr, attrs, prot, fi); + phys_ptr, attrs, prot, page_size, fi); } else if (arm_feature(env, ARM_FEATURE_V7)) { /* PMSAv7 */ ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, - phys_ptr, prot, fi); + phys_ptr, prot, page_size, fi); } else { /* Pre-v7 MPU */ ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, @@ -10396,9 +10435,15 @@ bool arm_tlb_fill(CPUState *cs, vaddr address, core_to_arm_mmu_idx(env, mmu_idx), &phys_addr, &attrs, &prot, &page_size, fi, NULL); if (!ret) { - /* Map a single [sub]page. */ - phys_addr &= TARGET_PAGE_MASK; - address &= TARGET_PAGE_MASK; + /* + * Map a single [sub]page. Regions smaller than our declared + * target page size are handled specially, so for those we + * pass in the exact addresses. + */ + if (page_size >= TARGET_PAGE_SIZE) { + phys_addr &= TARGET_PAGE_MASK; + address &= TARGET_PAGE_MASK; + } tlb_set_page_with_attrs(cs, address, phys_addr, attrs, prot, mmu_idx, page_size); return 0; @@ -10742,6 +10787,7 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) uint32_t mregion; bool targetpriv; bool targetsec = env->v7m.secure; + bool is_subpage; /* Work out what the security state and privilege level we're * interested in is... @@ -10771,7 +10817,8 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) if (arm_current_el(env) != 0 || alt) { /* We can ignore the return value as prot is always set */ pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, - &phys_addr, &attrs, &prot, &fi, &mregion); + &phys_addr, &attrs, &prot, &is_subpage, + &fi, &mregion); if (mregion == -1) { mrvalid = false; mregion = 0; |