aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2017-06-02 13:05:06 +0100
committerPeter Maydell <peter.maydell@linaro.org>2017-06-02 13:05:06 +0100
commit7693cd7cb6229ca034faa88497ddb3a5f27cfb41 (patch)
tree608beeca076587496c1b9bb07a4cf1f3c8b5c001
parent43771d5d92312504305c19abe29ec5bfabd55f01 (diff)
parentc7637c04be257968e6df30de961a6a23a0ac3dd8 (diff)
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20170602' into staging
target-arm queue: * virt: numa: provide ACPI distance info when needed * aspeed: fix i2c controller bugs * M profile: support MPU * gicv3: fix mishandling of BPR1, VBPR1 * load_uboot_image: don't assume a full header read * libvixl: Correct build failures on NetBSD # gpg: Signature made Fri 02 Jun 2017 12:00:42 BST # gpg: using RSA key 0x3C2525ED14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20170602: (25 commits) hw/arm/virt: fdt: generate distance-map when needed hw/arm/virt-acpi-build: build SLIT when needed aspeed: add some I2C devices to the Aspeed machines aspeed/i2c: introduce a state machine aspeed/i2c: handle LAST command under the RX command aspeed/i2c: improve command handling arm: Implement HFNMIENA support for M profile MPU arm: add MPU support to M profile CPUs armv7m: Classify faults as MemManage or BusFault arm: All M profile cores are PMSA armv7m: Implement M profile default memory map armv7m: Improve "-d mmu" tracing for PMSAv7 MPU arm: Remove unnecessary check on cpu->pmsav7_dregion arm: Don't let no-MPU PMSA cores write to SCTLR.M arm: Don't clear ARM_FEATURE_PMSA for no-mpu configs arm: Clean up handling of no-MPU PMSA CPUs arm: Use different ARMMMUIdx values for M profile arm: Add support for M profile CPUs having different MMU index semantics arm: Use the mmu_idx we're passed in arm_cpu_do_unaligned_access() target/arm: clear PMUVER field of AA64DFR0 when vPMU=off ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--disas/libvixl/Makefile.objs3
-rw-r--r--hw/arm/aspeed.c27
-rw-r--r--hw/arm/virt-acpi-build.c4
-rw-r--r--hw/arm/virt.c21
-rw-r--r--hw/core/loader.c3
-rw-r--r--hw/i2c/aspeed_i2c.c65
-rw-r--r--hw/intc/arm_gicv3_cpuif.c50
-rw-r--r--hw/intc/armv7m_nvic.c104
-rw-r--r--target/arm/cpu.c28
-rw-r--r--target/arm/cpu.h118
-rw-r--r--target/arm/helper.c338
-rw-r--r--target/arm/machine.c7
-rw-r--r--target/arm/op_helper.c3
-rw-r--r--target/arm/translate-a64.c18
-rw-r--r--target/arm/translate.c14
-rw-r--r--target/arm/translate.h2
16 files changed, 648 insertions, 157 deletions
diff --git a/disas/libvixl/Makefile.objs b/disas/libvixl/Makefile.objs
index bbe7695fdb..860fb7f384 100644
--- a/disas/libvixl/Makefile.objs
+++ b/disas/libvixl/Makefile.objs
@@ -7,5 +7,8 @@ libvixl_OBJS = vixl/utils.o \
# The -Wno-sign-compare is needed only for gcc 4.6, which complains about
# some signed-unsigned equality comparisons which later gcc versions do not.
$(addprefix $(obj)/,$(libvixl_OBJS)): QEMU_CFLAGS := -I$(SRC_PATH)/disas/libvixl $(QEMU_CFLAGS) -Wno-sign-compare
+# Ensure that C99 macros are defined regardless of the inclusion order of
+# headers in vixl. This is required at least on NetBSD.
+$(addprefix $(obj)/,$(libvixl_OBJS)): QEMU_CFLAGS += -D__STDC_CONSTANT_MACROS -D__STDC_LIMIT_MACROS -D__STDC_FORMAT_MACROS
common-obj-$(CONFIG_ARM_A64_DIS) += $(libvixl_OBJS)
diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c
index 283c038814..e824ea87a9 100644
--- a/hw/arm/aspeed.c
+++ b/hw/arm/aspeed.c
@@ -39,6 +39,7 @@ typedef struct AspeedBoardConfig {
const char *fmc_model;
const char *spi_model;
uint32_t num_cs;
+ void (*i2c_init)(AspeedBoardState *bmc);
} AspeedBoardConfig;
enum {
@@ -82,6 +83,9 @@ enum {
SCU_AST2500_HW_STRAP_ACPI_ENABLE | \
SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER))
+static void palmetto_bmc_i2c_init(AspeedBoardState *bmc);
+static void ast2500_evb_i2c_init(AspeedBoardState *bmc);
+
static const AspeedBoardConfig aspeed_boards[] = {
[PALMETTO_BMC] = {
.soc_name = "ast2400-a1",
@@ -89,6 +93,7 @@ static const AspeedBoardConfig aspeed_boards[] = {
.fmc_model = "n25q256a",
.spi_model = "mx25l25635e",
.num_cs = 1,
+ .i2c_init = palmetto_bmc_i2c_init,
},
[AST2500_EVB] = {
.soc_name = "ast2500-a1",
@@ -96,6 +101,7 @@ static const AspeedBoardConfig aspeed_boards[] = {
.fmc_model = "n25q256a",
.spi_model = "mx25l25635e",
.num_cs = 1,
+ .i2c_init = ast2500_evb_i2c_init,
},
[ROMULUS_BMC] = {
.soc_name = "ast2500-a1",
@@ -223,9 +229,22 @@ static void aspeed_board_init(MachineState *machine,
aspeed_board_binfo.ram_size = ram_size;
aspeed_board_binfo.loader_start = sc->info->sdram_base;
+ if (cfg->i2c_init) {
+ cfg->i2c_init(bmc);
+ }
+
arm_load_kernel(ARM_CPU(first_cpu), &aspeed_board_binfo);
}
+static void palmetto_bmc_i2c_init(AspeedBoardState *bmc)
+{
+ AspeedSoCState *soc = &bmc->soc;
+
+ /* The palmetto platform expects a ds3231 RTC but a ds1338 is
+ * enough to provide basic RTC features. Alarms will be missing */
+ i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 0), "ds1338", 0x68);
+}
+
static void palmetto_bmc_init(MachineState *machine)
{
aspeed_board_init(machine, &aspeed_boards[PALMETTO_BMC]);
@@ -250,6 +269,14 @@ static const TypeInfo palmetto_bmc_type = {
.class_init = palmetto_bmc_class_init,
};
+static void ast2500_evb_i2c_init(AspeedBoardState *bmc)
+{
+ AspeedSoCState *soc = &bmc->soc;
+
+ /* The AST2500 EVB expects a LM75 but a TMP105 is compatible */
+ i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 7), "tmp105", 0x4d);
+}
+
static void ast2500_evb_init(MachineState *machine)
{
aspeed_board_init(machine, &aspeed_boards[AST2500_EVB]);
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index e5852067f5..2079828c22 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -776,6 +776,10 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
if (nb_numa_nodes > 0) {
acpi_add_table(table_offsets, tables_blob);
build_srat(tables_blob, tables->linker, vms);
+ if (have_numa_distance) {
+ acpi_add_table(table_offsets, tables_blob);
+ build_slit(tables_blob, tables->linker);
+ }
}
if (its_class_name() && !vmc->no_its) {
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index c7c8159dfd..4db2d4207c 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -219,6 +219,27 @@ static void create_fdt(VirtMachineState *vms)
"clk24mhz");
qemu_fdt_setprop_cell(fdt, "/apb-pclk", "phandle", vms->clock_phandle);
+ if (have_numa_distance) {
+ int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t);
+ uint32_t *matrix = g_malloc0(size);
+ int idx, i, j;
+
+ for (i = 0; i < nb_numa_nodes; i++) {
+ for (j = 0; j < nb_numa_nodes; j++) {
+ idx = (i * nb_numa_nodes + j) * 3;
+ matrix[idx + 0] = cpu_to_be32(i);
+ matrix[idx + 1] = cpu_to_be32(j);
+ matrix[idx + 2] = cpu_to_be32(numa_info[i].distance[j]);
+ }
+ }
+
+ qemu_fdt_add_subnode(fdt, "/distance-map");
+ qemu_fdt_setprop_string(fdt, "/distance-map", "compatible",
+ "numa-distance-map-v1");
+ qemu_fdt_setprop(fdt, "/distance-map", "distance-matrix",
+ matrix, size);
+ g_free(matrix);
+ }
}
static void fdt_add_psci_node(const VirtMachineState *vms)
diff --git a/hw/core/loader.c b/hw/core/loader.c
index bf17b42cbe..f72930ca4a 100644
--- a/hw/core/loader.c
+++ b/hw/core/loader.c
@@ -611,8 +611,9 @@ static int load_uboot_image(const char *filename, hwaddr *ep, hwaddr *loadaddr,
return -1;
size = read(fd, hdr, sizeof(uboot_image_header_t));
- if (size < 0)
+ if (size < sizeof(uboot_image_header_t)) {
goto out;
+ }
bswap_uboot_header(hdr);
diff --git a/hw/i2c/aspeed_i2c.c b/hw/i2c/aspeed_i2c.c
index ce5b1f0fa4..c762c7366a 100644
--- a/hw/i2c/aspeed_i2c.c
+++ b/hw/i2c/aspeed_i2c.c
@@ -169,12 +169,33 @@ static uint64_t aspeed_i2c_bus_read(void *opaque, hwaddr offset,
}
}
+static void aspeed_i2c_set_state(AspeedI2CBus *bus, uint8_t state)
+{
+ bus->cmd &= ~(I2CD_TX_STATE_MASK << I2CD_TX_STATE_SHIFT);
+ bus->cmd |= (state & I2CD_TX_STATE_MASK) << I2CD_TX_STATE_SHIFT;
+}
+
+static uint8_t aspeed_i2c_get_state(AspeedI2CBus *bus)
+{
+ return (bus->cmd >> I2CD_TX_STATE_SHIFT) & I2CD_TX_STATE_MASK;
+}
+
+/*
+ * The state machine needs some refinement. It is only used to track
+ * invalid STOP commands for the moment.
+ */
static void aspeed_i2c_bus_handle_cmd(AspeedI2CBus *bus, uint64_t value)
{
+ bus->cmd &= ~0xFFFF;
bus->cmd |= value & 0xFFFF;
bus->intr_status = 0;
if (bus->cmd & I2CD_M_START_CMD) {
+ uint8_t state = aspeed_i2c_get_state(bus) & I2CD_MACTIVE ?
+ I2CD_MSTARTR : I2CD_MSTART;
+
+ aspeed_i2c_set_state(bus, state);
+
if (i2c_start_transfer(bus->bus, extract32(bus->buf, 1, 7),
extract32(bus->buf, 0, 1))) {
bus->intr_status |= I2CD_INTR_TX_NAK;
@@ -182,16 +203,34 @@ static void aspeed_i2c_bus_handle_cmd(AspeedI2CBus *bus, uint64_t value)
bus->intr_status |= I2CD_INTR_TX_ACK;
}
- } else if (bus->cmd & I2CD_M_TX_CMD) {
+ /* START command is also a TX command, as the slave address is
+ * sent on the bus */
+ bus->cmd &= ~(I2CD_M_START_CMD | I2CD_M_TX_CMD);
+
+ /* No slave found */
+ if (!i2c_bus_busy(bus->bus)) {
+ return;
+ }
+ aspeed_i2c_set_state(bus, I2CD_MACTIVE);
+ }
+
+ if (bus->cmd & I2CD_M_TX_CMD) {
+ aspeed_i2c_set_state(bus, I2CD_MTXD);
if (i2c_send(bus->bus, bus->buf)) {
- bus->intr_status |= (I2CD_INTR_TX_NAK | I2CD_INTR_ABNORMAL);
+ bus->intr_status |= (I2CD_INTR_TX_NAK);
i2c_end_transfer(bus->bus);
} else {
bus->intr_status |= I2CD_INTR_TX_ACK;
}
+ bus->cmd &= ~I2CD_M_TX_CMD;
+ aspeed_i2c_set_state(bus, I2CD_MACTIVE);
+ }
- } else if (bus->cmd & I2CD_M_RX_CMD) {
- int ret = i2c_recv(bus->bus);
+ if (bus->cmd & (I2CD_M_RX_CMD | I2CD_M_S_RX_CMD_LAST)) {
+ int ret;
+
+ aspeed_i2c_set_state(bus, I2CD_MRXD);
+ ret = i2c_recv(bus->bus);
if (ret < 0) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: read failed\n", __func__);
ret = 0xff;
@@ -199,20 +238,25 @@ static void aspeed_i2c_bus_handle_cmd(AspeedI2CBus *bus, uint64_t value)
bus->intr_status |= I2CD_INTR_RX_DONE;
}
bus->buf = (ret & I2CD_BYTE_BUF_RX_MASK) << I2CD_BYTE_BUF_RX_SHIFT;
+ if (bus->cmd & I2CD_M_S_RX_CMD_LAST) {
+ i2c_nack(bus->bus);
+ }
+ bus->cmd &= ~(I2CD_M_RX_CMD | I2CD_M_S_RX_CMD_LAST);
+ aspeed_i2c_set_state(bus, I2CD_MACTIVE);
}
- if (bus->cmd & (I2CD_M_STOP_CMD | I2CD_M_S_RX_CMD_LAST)) {
- if (!i2c_bus_busy(bus->bus)) {
+ if (bus->cmd & I2CD_M_STOP_CMD) {
+ if (!(aspeed_i2c_get_state(bus) & I2CD_MACTIVE)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: abnormal stop\n", __func__);
bus->intr_status |= I2CD_INTR_ABNORMAL;
} else {
+ aspeed_i2c_set_state(bus, I2CD_MSTOP);
i2c_end_transfer(bus->bus);
bus->intr_status |= I2CD_INTR_NORMAL_STOP;
}
+ bus->cmd &= ~I2CD_M_STOP_CMD;
+ aspeed_i2c_set_state(bus, I2CD_IDLE);
}
-
- /* command is handled, reset it and check for interrupts */
- bus->cmd &= ~0xFFFF;
- aspeed_i2c_bus_raise_interrupt(bus);
}
static void aspeed_i2c_bus_write(void *opaque, hwaddr offset,
@@ -262,6 +306,7 @@ static void aspeed_i2c_bus_write(void *opaque, hwaddr offset,
}
aspeed_i2c_bus_handle_cmd(bus, value);
+ aspeed_i2c_bus_raise_interrupt(bus);
break;
default:
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index 0b208560bd..09d8ba0547 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -216,18 +216,35 @@ static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group)
{
/* Return a mask word which clears the subpriority bits from
* a priority value for a virtual interrupt in the specified group.
- * This depends on the VBPR value:
+ * This depends on the VBPR value.
+ * If using VBPR0 then:
* a BPR of 0 means the group priority bits are [7:1];
* a BPR of 1 means they are [7:2], and so on down to
* a BPR of 7 meaning no group priority bits at all.
+ * If using VBPR1 then:
+ * a BPR of 0 is impossible (the minimum value is 1)
+ * a BPR of 1 means the group priority bits are [7:1];
+ * a BPR of 2 means they are [7:2], and so on down to
+ * a BPR of 7 meaning the group priority is [7].
+ *
* Which BPR to use depends on the group of the interrupt and
* the current ICH_VMCR_EL2.VCBPR settings.
+ *
+ * This corresponds to the VGroupBits() pseudocode.
*/
+ int bpr;
+
if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
group = GICV3_G0;
}
- return ~0U << (read_vbpr(cs, group) + 1);
+ bpr = read_vbpr(cs, group);
+ if (group == GICV3_G1NS) {
+ assert(bpr > 0);
+ bpr--;
+ }
+
+ return ~0U << (bpr + 1);
}
static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
@@ -674,20 +691,37 @@ static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group)
{
/* Return a mask word which clears the subpriority bits from
* a priority value for an interrupt in the specified group.
- * This depends on the BPR value:
+ * This depends on the BPR value. For CBPR0 (S or NS):
* a BPR of 0 means the group priority bits are [7:1];
* a BPR of 1 means they are [7:2], and so on down to
* a BPR of 7 meaning no group priority bits at all.
+ * For CBPR1 NS:
+ * a BPR of 0 is impossible (the minimum value is 1)
+ * a BPR of 1 means the group priority bits are [7:1];
+ * a BPR of 2 means they are [7:2], and so on down to
+ * a BPR of 7 meaning the group priority is [7].
+ *
* Which BPR to use depends on the group of the interrupt and
* the current ICC_CTLR.CBPR settings.
+ *
+ * This corresponds to the GroupBits() pseudocode.
*/
+ int bpr;
+
if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) ||
(group == GICV3_G1NS &&
cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
group = GICV3_G0;
}
- return ~0U << ((cs->icc_bpr[group] & 7) + 1);
+ bpr = cs->icc_bpr[group] & 7;
+
+ if (group == GICV3_G1NS) {
+ assert(bpr > 0);
+ bpr--;
+ }
+
+ return ~0U << (bpr + 1);
}
static bool icc_no_enabled_hppi(GICv3CPUState *cs)
@@ -1388,6 +1422,7 @@ static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
GICv3CPUState *cs = icc_cs_from_env(env);
int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
+ uint64_t minval;
if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
icv_bpr_write(env, ri, value);
@@ -1415,6 +1450,11 @@ static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
return;
}
+ minval = (grp == GICV3_G1NS) ? GIC_MIN_BPR_NS : GIC_MIN_BPR;
+ if (value < minval) {
+ value = minval;
+ }
+
cs->icc_bpr[grp] = value & 7;
gicv3_cpuif_update(cs);
}
@@ -2014,7 +2054,7 @@ static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
cs->ich_hcr_el2 = 0;
memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
- (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR1_SHIFT) |
+ ((icv_min_vbpr(cs) + 1) << ICH_VMCR_EL2_VBPR1_SHIFT) |
(icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
}
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index 32ffa0bf35..26a4b2dcb5 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -19,6 +19,7 @@
#include "hw/arm/arm.h"
#include "hw/arm/armv7m_nvic.h"
#include "target/arm/cpu.h"
+#include "exec/exec-all.h"
#include "qemu/log.h"
#include "trace.h"
@@ -528,6 +529,39 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset)
case 0xd70: /* ISAR4. */
return 0x01310102;
/* TODO: Implement debug registers. */
+ case 0xd90: /* MPU_TYPE */
+ /* Unified MPU; if the MPU is not present this value is zero */
+ return cpu->pmsav7_dregion << 8;
+ break;
+ case 0xd94: /* MPU_CTRL */
+ return cpu->env.v7m.mpu_ctrl;
+ case 0xd98: /* MPU_RNR */
+ return cpu->env.cp15.c6_rgnr;
+ case 0xd9c: /* MPU_RBAR */
+ case 0xda4: /* MPU_RBAR_A1 */
+ case 0xdac: /* MPU_RBAR_A2 */
+ case 0xdb4: /* MPU_RBAR_A3 */
+ {
+ int region = cpu->env.cp15.c6_rgnr;
+
+ if (region >= cpu->pmsav7_dregion) {
+ return 0;
+ }
+ return (cpu->env.pmsav7.drbar[region] & 0x1f) | (region & 0xf);
+ }
+ case 0xda0: /* MPU_RASR */
+ case 0xda8: /* MPU_RASR_A1 */
+ case 0xdb0: /* MPU_RASR_A2 */
+ case 0xdb8: /* MPU_RASR_A3 */
+ {
+ int region = cpu->env.cp15.c6_rgnr;
+
+ if (region >= cpu->pmsav7_dregion) {
+ return 0;
+ }
+ return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
+ (cpu->env.pmsav7.drsr[region] & 0xffff);
+ }
default:
qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
return 0;
@@ -627,6 +661,76 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value)
qemu_log_mask(LOG_UNIMP,
"NVIC: Aux fault status registers unimplemented\n");
break;
+ case 0xd90: /* MPU_TYPE */
+ return; /* RO */
+ case 0xd94: /* MPU_CTRL */
+ if ((value &
+ (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
+ == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
+ qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
+ "UNPREDICTABLE\n");
+ }
+ cpu->env.v7m.mpu_ctrl = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
+ R_V7M_MPU_CTRL_HFNMIENA_MASK |
+ R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
+ tlb_flush(CPU(cpu));
+ break;
+ case 0xd98: /* MPU_RNR */
+ if (value >= cpu->pmsav7_dregion) {
+ qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
+ PRIu32 "/%" PRIu32 "\n",
+ value, cpu->pmsav7_dregion);
+ } else {
+ cpu->env.cp15.c6_rgnr = value;
+ }
+ break;
+ case 0xd9c: /* MPU_RBAR */
+ case 0xda4: /* MPU_RBAR_A1 */
+ case 0xdac: /* MPU_RBAR_A2 */
+ case 0xdb4: /* MPU_RBAR_A3 */
+ {
+ int region;
+
+ if (value & (1 << 4)) {
+ /* VALID bit means use the region number specified in this
+ * value and also update MPU_RNR.REGION with that value.
+ */
+ region = extract32(value, 0, 4);
+ if (region >= cpu->pmsav7_dregion) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "MPU region out of range %u/%" PRIu32 "\n",
+ region, cpu->pmsav7_dregion);
+ return;
+ }
+ cpu->env.cp15.c6_rgnr = region;
+ } else {
+ region = cpu->env.cp15.c6_rgnr;
+ }
+
+ if (region >= cpu->pmsav7_dregion) {
+ return;
+ }
+
+ cpu->env.pmsav7.drbar[region] = value & ~0x1f;
+ tlb_flush(CPU(cpu));
+ break;
+ }
+ case 0xda0: /* MPU_RASR */
+ case 0xda8: /* MPU_RASR_A1 */
+ case 0xdb0: /* MPU_RASR_A2 */
+ case 0xdb8: /* MPU_RASR_A3 */
+ {
+ int region = cpu->env.cp15.c6_rgnr;
+
+ if (region >= cpu->pmsav7_dregion) {
+ return;
+ }
+
+ cpu->env.pmsav7.drsr[region] = value & 0xff3f;
+ cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
+ tlb_flush(CPU(cpu));
+ break;
+ }
case 0xf00: /* Software Triggered Interrupt Register */
{
/* user mode can only write to STIR if CCR.USERSETMPEND permits it */
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index c185eb19ac..e748097860 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -550,6 +550,14 @@ static void arm_cpu_post_init(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ /* M profile implies PMSA. We have to do this here rather than
+ * in realize with the other feature-implication checks because
+ * we look at the PMSA bit to see if we should add some properties.
+ */
+ if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
+ set_feature(&cpu->env, ARM_FEATURE_PMSA);
+ }
+
if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property,
@@ -593,7 +601,7 @@ static void arm_cpu_post_init(Object *obj)
&error_abort);
}
- if (arm_feature(&cpu->env, ARM_FEATURE_MPU)) {
+ if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property,
&error_abort);
if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
@@ -689,7 +697,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
if (arm_feature(env, ARM_FEATURE_V7) &&
!arm_feature(env, ARM_FEATURE_M) &&
- !arm_feature(env, ARM_FEATURE_MPU)) {
+ !arm_feature(env, ARM_FEATURE_PMSA)) {
/* v7VMSA drops support for the old ARMv5 tiny pages, so we
* can use 4K pages.
*/
@@ -750,8 +758,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
if (!cpu->has_pmu) {
- cpu->has_pmu = false;
unset_feature(env, ARM_FEATURE_PMU);
+ cpu->id_aa64dfr0 &= ~0xf00;
}
if (!arm_feature(env, ARM_FEATURE_EL2)) {
@@ -763,11 +771,17 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
cpu->id_pfr1 &= ~0xf000;
}
+ /* MPU can be configured out of a PMSA CPU either by setting has-mpu
+ * to false or by setting pmsav7-dregion to 0.
+ */
if (!cpu->has_mpu) {
- unset_feature(env, ARM_FEATURE_MPU);
+ cpu->pmsav7_dregion = 0;
+ }
+ if (cpu->pmsav7_dregion == 0) {
+ cpu->has_mpu = false;
}
- if (arm_feature(env, ARM_FEATURE_MPU) &&
+ if (arm_feature(env, ARM_FEATURE_PMSA) &&
arm_feature(env, ARM_FEATURE_V7)) {
uint32_t nr = cpu->pmsav7_dregion;
@@ -867,7 +881,7 @@ static void arm946_initfn(Object *obj)
cpu->dtb_compatible = "arm,arm946";
set_feature(&cpu->env, ARM_FEATURE_V5);
- set_feature(&cpu->env, ARM_FEATURE_MPU);
+ set_feature(&cpu->env, ARM_FEATURE_PMSA);
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
cpu->midr = 0x41059461;
cpu->ctr = 0x0f004006;
@@ -1079,7 +1093,7 @@ static void cortex_r5_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_THUMB_DIV);
set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
set_feature(&cpu->env, ARM_FEATURE_V7MP);
- set_feature(&cpu->env, ARM_FEATURE_MPU);
+ set_feature(&cpu->env, ARM_FEATURE_PMSA);
cpu->midr = 0x411fc153; /* r1p3 */
cpu->id_pfr0 = 0x0131;
cpu->id_pfr1 = 0x001;
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 048faed9b9..13da5036bc 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -418,6 +418,7 @@ typedef struct CPUARMState {
uint32_t dfsr; /* Debug Fault Status Register */
uint32_t mmfar; /* MemManage Fault Address */
uint32_t bfar; /* BusFault Address */
+ unsigned mpu_ctrl; /* MPU_CTRL (some bits kept in sctlr_el[1]) */
int exception;
} v7m;
@@ -1168,6 +1169,11 @@ FIELD(V7M_DFSR, DWTTRAP, 2, 1)
FIELD(V7M_DFSR, VCATCH, 3, 1)
FIELD(V7M_DFSR, EXTERNAL, 4, 1)
+/* v7M MPU_CTRL bits */
+FIELD(V7M_MPU_CTRL, ENABLE, 0, 1)
+FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1)
+FIELD(V7M_MPU_CTRL, PRIVDEFENA, 2, 1)
+
/* If adding a feature bit which corresponds to a Linux ELF
* HWCAP bit, remember to update the feature-bit-to-hwcap
* mapping in linux-user/elfload.c:get_elf_hwcap().
@@ -1181,7 +1187,7 @@ enum arm_features {
ARM_FEATURE_V6K,
ARM_FEATURE_V7,
ARM_FEATURE_THUMB2,
- ARM_FEATURE_MPU, /* Only has Memory Protection Unit, not full MMU. */
+ ARM_FEATURE_PMSA, /* no MMU; may have Memory Protection Unit */
ARM_FEATURE_VFP3,
ARM_FEATURE_VFP_FP16,
ARM_FEATURE_NEON,
@@ -2039,6 +2045,28 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* for the accesses done as part of a stage 1 page table walk, rather than
* having to walk the stage 2 page table over and over.)
*
+ * R profile CPUs have an MPU, but can use the same set of MMU indexes
+ * as A profile. They only need to distinguish NS EL0 and NS EL1 (and
+ * NS EL2 if we ever model a Cortex-R52).
+ *
+ * M profile CPUs are rather different as they do not have a true MMU.
+ * They have the following different MMU indexes:
+ * User
+ * Privileged
+ * Execution priority negative (this is like privileged, but the
+ * MPU HFNMIENA bit means that it may have different access permission
+ * check results to normal privileged code, so can't share a TLB).
+ *
+ * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
+ * are not quite the same -- different CPU types (most notably M profile
+ * vs A/R profile) would like to use MMU indexes with different semantics,
+ * but since we don't ever need to use all of those in a single CPU we
+ * can avoid setting NB_MMU_MODES to more than 8. The lower bits of
+ * ARMMMUIdx are the core TLB mmu index, and the higher bits are always
+ * the same for any particular CPU.
+ * Variables of type ARMMUIdx are always full values, and the core
+ * index values are in variables of type 'int'.
+ *
* Our enumeration includes at the end some entries which are not "true"
* mmu_idx values in that they don't have corresponding TLBs and are only
* valid for doing slow path page table walks.
@@ -2047,28 +2075,74 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* of the AT/ATS operations.
* The values used are carefully arranged to make mmu_idx => EL lookup easy.
*/
+#define ARM_MMU_IDX_A 0x10 /* A profile */
+#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
+#define ARM_MMU_IDX_M 0x40 /* M profile */
+
+#define ARM_MMU_IDX_TYPE_MASK (~0x7)
+#define ARM_MMU_IDX_COREIDX_MASK 0x7
+
typedef enum ARMMMUIdx {
- ARMMMUIdx_S12NSE0 = 0,
- ARMMMUIdx_S12NSE1 = 1,
- ARMMMUIdx_S1E2 = 2,
- ARMMMUIdx_S1E3 = 3,
- ARMMMUIdx_S1SE0 = 4,
- ARMMMUIdx_S1SE1 = 5,
- ARMMMUIdx_S2NS = 6,
+ ARMMMUIdx_S12NSE0 = 0 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S12NSE1 = 1 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S1E2 = 2 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S1E3 = 3 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S1SE0 = 4 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S1SE1 = 5 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S2NS = 6 | ARM_MMU_IDX_A,
+ ARMMMUIdx_MUser = 0 | ARM_MMU_IDX_M,
+ ARMMMUIdx_MPriv = 1 | ARM_MMU_IDX_M,
+ ARMMMUIdx_MNegPri = 2 | ARM_MMU_IDX_M,
/* Indexes below here don't have TLBs and are used only for AT system
* instructions or for the first stage of an S12 page table walk.
*/
- ARMMMUIdx_S1NSE0 = 7,
- ARMMMUIdx_S1NSE1 = 8,
+ ARMMMUIdx_S1NSE0 = 0 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_S1NSE1 = 1 | ARM_MMU_IDX_NOTLB,
} ARMMMUIdx;
+/* Bit macros for the core-mmu-index values for each index,
+ * for use when calling tlb_flush_by_mmuidx() and friends.
+ */
+typedef enum ARMMMUIdxBit {
+ ARMMMUIdxBit_S12NSE0 = 1 << 0,
+ ARMMMUIdxBit_S12NSE1 = 1 << 1,
+ ARMMMUIdxBit_S1E2 = 1 << 2,
+ ARMMMUIdxBit_S1E3 = 1 << 3,
+ ARMMMUIdxBit_S1SE0 = 1 << 4,
+ ARMMMUIdxBit_S1SE1 = 1 << 5,
+ ARMMMUIdxBit_S2NS = 1 << 6,
+ ARMMMUIdxBit_MUser = 1 << 0,
+ ARMMMUIdxBit_MPriv = 1 << 1,
+ ARMMMUIdxBit_MNegPri = 1 << 2,
+} ARMMMUIdxBit;
+
#define MMU_USER_IDX 0
+static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
+{
+ return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
+}
+
+static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
+{
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return mmu_idx | ARM_MMU_IDX_M;
+ } else {
+ return mmu_idx | ARM_MMU_IDX_A;
+ }
+}
+
/* Return the exception level we're running at if this is our mmu_idx */
static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
{
- assert(mmu_idx < ARMMMUIdx_S2NS);
- return mmu_idx & 3;
+ switch (mmu_idx & ARM_MMU_IDX_TYPE_MASK) {
+ case ARM_MMU_IDX_A:
+ return mmu_idx & 3;
+ case ARM_MMU_IDX_M:
+ return mmu_idx == ARMMMUIdx_MUser ? 0 : 1;
+ default:
+ g_assert_not_reached();
+ }
}
/* Determine the current mmu_idx to use for normal loads/stores */
@@ -2076,8 +2150,22 @@ static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
{
int el = arm_current_el(env);
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ ARMMMUIdx mmu_idx = el == 0 ? ARMMMUIdx_MUser : ARMMMUIdx_MPriv;
+
+ /* Execution priority is negative if FAULTMASK is set or
+ * we're in a HardFault or NMI handler.
+ */
+ if ((env->v7m.exception > 0 && env->v7m.exception <= 3)
+ || env->daif & PSTATE_F) {
+ return arm_to_core_mmu_idx(ARMMMUIdx_MNegPri);
+ }
+
+ return arm_to_core_mmu_idx(mmu_idx);
+ }
+
if (el < 2 && arm_is_secure_below_el3(env)) {
- return ARMMMUIdx_S1SE0 + el;
+ return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0 + el);
}
return el;
}
@@ -2473,7 +2561,7 @@ static inline uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags)
{
- ARMMMUIdx mmu_idx = cpu_mmu_index(env, false);
+ ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
if (is_a64(env)) {
*pc = env->pc;
*flags = ARM_TBFLAG_AARCH64_STATE_MASK;
@@ -2498,7 +2586,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
}
- *flags |= (mmu_idx << ARM_TBFLAG_MMUIDX_SHIFT);
+ *flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT);
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
* states defined in the ARM ARM for software singlestep:
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 8a3e4480aa..2594faa9b8 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -485,7 +485,7 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
ARMCPU *cpu = arm_env_get_cpu(env);
- if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU)
+ if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
&& !extended_addresses_enabled(env)) {
/* For VMSA (when not using the LPAE long descriptor page table
* format) this register includes the ASID, so do a TLB flush.
@@ -571,9 +571,9 @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = ENV_GET_CPU(env);
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
}
static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -582,9 +582,9 @@ static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = ENV_GET_CPU(env);
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
}
static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -605,7 +605,7 @@ static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
pageaddr = sextract64(value << 12, 0, 40);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
}
static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -621,7 +621,7 @@ static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
pageaddr = sextract64(value << 12, 0, 40);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S2NS);
}
static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -629,7 +629,7 @@ static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = ENV_GET_CPU(env);
- tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
}
static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -637,7 +637,7 @@ static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = ENV_GET_CPU(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
}
static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -646,7 +646,7 @@ static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = ENV_GET_CPU(env);
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
}
static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -656,7 +656,7 @@ static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S1E2));
+ ARMMMUIdxBit_S1E2);
}
static const ARMCPRegInfo cp_reginfo[] = {
@@ -2596,9 +2596,9 @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* Accesses to VTTBR may change the VMID so we must flush the TLB. */
if (raw_read(env, ri) != value) {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
raw_write(env, ri, value);
}
}
@@ -2957,12 +2957,12 @@ static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (arm_is_secure_below_el3(env)) {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
@@ -2974,12 +2974,12 @@ static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (sec) {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
@@ -2995,18 +2995,18 @@ static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (arm_is_secure_below_el3(env)) {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
if (arm_feature(env, ARM_FEATURE_EL2)) {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
} else {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
}
@@ -3017,7 +3017,7 @@ static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
- tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
}
static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3026,7 +3026,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
- tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E3));
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
}
static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3042,17 +3042,17 @@ static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (sec) {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else if (has_el2) {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
} else {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
@@ -3061,7 +3061,7 @@ static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = ENV_GET_CPU(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
}
static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3069,7 +3069,7 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = ENV_GET_CPU(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E3));
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
}
static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3086,12 +3086,12 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (arm_is_secure_below_el3(env)) {
tlb_flush_page_by_mmuidx(cs, pageaddr,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_page_by_mmuidx(cs, pageaddr,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
@@ -3106,7 +3106,7 @@ static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
}
static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3120,7 +3120,7 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E3));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
}
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3133,12 +3133,12 @@ static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (sec) {
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
@@ -3149,7 +3149,7 @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t pageaddr = sextract64(value << 12, 0, 56);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S1E2));
+ ARMMMUIdxBit_S1E2);
}
static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3159,7 +3159,7 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t pageaddr = sextract64(value << 12, 0, 56);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S1E3));
+ ARMMMUIdxBit_S1E3);
}
static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3181,7 +3181,7 @@ static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
pageaddr = sextract64(value << 12, 0, 48);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
}
static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3197,7 +3197,7 @@ static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
pageaddr = sextract64(value << 12, 0, 48);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S2NS);
}
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3258,6 +3258,11 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
return;
}
+ if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
+ /* M bit is RAZ/WI for PMSA with no MPU implemented */
+ value &= ~SCTLR_M;
+ }
+
raw_write(env, ri, value);
/* ??? Lots of these bits are not implemented. */
/* This may enable/disable the MMU, so do a TLB flush. */
@@ -4615,7 +4620,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, v6k_cp_reginfo);
}
if (arm_feature(env, ARM_FEATURE_V7MP) &&
- !arm_feature(env, ARM_FEATURE_MPU)) {
+ !arm_feature(env, ARM_FEATURE_PMSA)) {
define_arm_cp_regs(cpu, v7mp_cp_reginfo);
}
if (arm_feature(env, ARM_FEATURE_V7)) {
@@ -4969,7 +4974,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
}
}
- if (arm_feature(env, ARM_FEATURE_MPU)) {
+ if (arm_feature(env, ARM_FEATURE_PMSA)) {
if (arm_feature(env, ARM_FEATURE_V6)) {
/* PMSAv6 not implemented */
assert(arm_feature(env, ARM_FEATURE_V7));
@@ -5131,7 +5136,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
}
define_arm_cp_regs(cpu, id_cp_reginfo);
- if (!arm_feature(env, ARM_FEATURE_MPU)) {
+ if (!arm_feature(env, ARM_FEATURE_PMSA)) {
define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
} else if (arm_feature(env, ARM_FEATURE_V7)) {
define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
@@ -6337,10 +6342,49 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
break;
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
- /* TODO: if we implemented the MPU registers, this is where we
- * should set the MMFAR, etc from exception.fsr and exception.vaddress.
+ /* Note that for M profile we don't have a guest facing FSR, but
+ * the env->exception.fsr will be populated by the code that
+ * raises the fault, in the A profile short-descriptor format.
*/
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
+ switch (env->exception.fsr & 0xf) {
+ case 0x8: /* External Abort */
+ switch (cs->exception_index) {
+ case EXCP_PREFETCH_ABORT:
+ env->v7m.cfsr |= R_V7M_CFSR_PRECISERR_MASK;
+ qemu_log_mask(CPU_LOG_INT, "...with CFSR.PRECISERR\n");
+ break;
+ case EXCP_DATA_ABORT:
+ env->v7m.cfsr |=
+ (R_V7M_CFSR_IBUSERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
+ env->v7m.bfar = env->exception.vaddress;
+ qemu_log_mask(CPU_LOG_INT,
+ "...with CFSR.IBUSERR and BFAR 0x%x\n",
+ env->v7m.bfar);
+ break;
+ }
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS);
+ break;
+ default:
+ /* All other FSR values are either MPU faults or "can't happen
+ * for M profile" cases.
+ */
+ switch (cs->exception_index) {
+ case EXCP_PREFETCH_ABORT:
+ env->v7m.cfsr |= R_V7M_CFSR_IACCVIOL_MASK;
+ qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
+ break;
+ case EXCP_DATA_ABORT:
+ env->v7m.cfsr |=
+ (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
+ env->v7m.mmfar = env->exception.vaddress;
+ qemu_log_mask(CPU_LOG_INT,
+ "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
+ env->v7m.mmfar);
+ break;
+ }
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
+ break;
+ }
break;
case EXCP_BKPT:
if (semihosting_enabled()) {
@@ -6992,6 +7036,9 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
case ARMMMUIdx_S1SE1:
case ARMMMUIdx_S1NSE0:
case ARMMMUIdx_S1NSE1:
+ case ARMMMUIdx_MPriv:
+ case ARMMMUIdx_MNegPri:
+ case ARMMMUIdx_MUser:
return 1;
default:
g_assert_not_reached();
@@ -7008,6 +7055,9 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
case ARMMMUIdx_S1NSE1:
case ARMMMUIdx_S1E2:
case ARMMMUIdx_S2NS:
+ case ARMMMUIdx_MPriv:
+ case ARMMMUIdx_MNegPri:
+ case ARMMMUIdx_MUser:
return false;
case ARMMMUIdx_S1E3:
case ARMMMUIdx_S1SE0:
@@ -7028,6 +7078,24 @@ static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
static inline bool regime_translation_disabled(CPUARMState *env,
ARMMMUIdx mmu_idx)
{
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ switch (env->v7m.mpu_ctrl &
+ (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
+ case R_V7M_MPU_CTRL_ENABLE_MASK:
+ /* Enabled, but not for HardFault and NMI */
+ return mmu_idx == ARMMMUIdx_MNegPri;
+ case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
+ /* Enabled for all cases */
+ return false;
+ case 0:
+ default:
+ /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
+ * we warned about that in armv7m_nvic.c when the guest set it.
+ */
+ return true;
+ }
+ }
+
if (mmu_idx == ARMMMUIdx_S2NS) {
return (env->cp15.hcr_el2 & HCR_VM) == 0;
}
@@ -7049,6 +7117,17 @@ static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
}
+/* Convert a possible stage1+2 MMU index into the appropriate
+ * stage 1 MMU index
+ */
+static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
+{
+ if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
+ mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0);
+ }
+ return mmu_idx;
+}
+
/* Returns TBI0 value for current regime el */
uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
{
@@ -7056,11 +7135,9 @@ uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
uint32_t el;
/* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
- * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
- */
- if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
- mmu_idx += ARMMMUIdx_S1NSE0;
- }
+ * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
+ */
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
tcr = regime_tcr(env, mmu_idx);
el = regime_el(env, mmu_idx);
@@ -7079,11 +7156,9 @@ uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
uint32_t el;
/* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
- * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
- */
- if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
- mmu_idx += ARMMMUIdx_S1NSE0;
- }
+ * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
+ */
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
tcr = regime_tcr(env, mmu_idx);
el = regime_el(env, mmu_idx);
@@ -7129,9 +7204,7 @@ static inline bool regime_using_lpae_format(CPUARMState *env,
* on whether the long or short descriptor format is in use. */
bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
{
- if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
- mmu_idx += ARMMMUIdx_S1NSE0;
- }
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
return regime_using_lpae_format(env, mmu_idx);
}
@@ -7141,6 +7214,7 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
switch (mmu_idx) {
case ARMMMUIdx_S1SE0:
case ARMMMUIdx_S1NSE0:
+ case ARMMMUIdx_MUser:
return true;
default:
return false;
@@ -8114,18 +8188,60 @@ static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
ARMMMUIdx mmu_idx,
int32_t address, int *prot)
{
- *prot = PAGE_READ | PAGE_WRITE;
- switch (address) {
- case 0xF0000000 ... 0xFFFFFFFF:
- if (regime_sctlr(env, mmu_idx) & SCTLR_V) { /* hivecs execing is ok */
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ *prot = PAGE_READ | PAGE_WRITE;
+ switch (address) {
+ case 0xF0000000 ... 0xFFFFFFFF:
+ if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
+ /* hivecs execing is ok */
+ *prot |= PAGE_EXEC;
+ }
+ break;
+ case 0x00000000 ... 0x7FFFFFFF:
*prot |= PAGE_EXEC;
+ break;
+ }
+ } else {
+ /* Default system address map for M profile cores.
+ * The architecture specifies which regions are execute-never;
+ * at the MPU level no other checks are defined.
+ */
+ switch (address) {
+ case 0x00000000 ... 0x1fffffff: /* ROM */
+ case 0x20000000 ... 0x3fffffff: /* SRAM */
+ case 0x60000000 ... 0x7fffffff: /* RAM */
+ case 0x80000000 ... 0x9fffffff: /* RAM */
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ break;
+ case 0x40000000 ... 0x5fffffff: /* Peripheral */
+ case 0xa0000000 ... 0xbfffffff: /* Device */
+ case 0xc0000000 ... 0xdfffffff: /* Device */
+ case 0xe0000000 ... 0xffffffff: /* System */
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
+ default:
+ g_assert_not_reached();
}
- break;
- case 0x00000000 ... 0x7FFFFFFF:
- *prot |= PAGE_EXEC;
- break;
+ }
+}
+
+static bool pmsav7_use_background_region(ARMCPU *cpu,
+ ARMMMUIdx mmu_idx, bool is_user)
+{
+ /* Return true if we should use the default memory map as a
+ * "background" region if there are no hits against any MPU regions.
+ */
+ CPUARMState *env = &cpu->env;
+
+ if (is_user) {
+ return false;
}
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return env->v7m.mpu_ctrl & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
+ } else {
+ return regime_sctlr(env, mmu_idx) & SCTLR_BR;
+ }
}
static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
@@ -8154,16 +8270,18 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
}
if (!rsize) {
- qemu_log_mask(LOG_GUEST_ERROR, "DRSR.Rsize field can not be 0");
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "DRSR[%d]: Rsize field cannot be 0\n", n);
continue;
}
rsize++;
rmask = (1ull << rsize) - 1;
if (base & rmask) {
- qemu_log_mask(LOG_GUEST_ERROR, "DRBAR %" PRIx32 " misaligned "
- "to DRSR region size, mask = %" PRIx32,
- base, rmask);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "DRBAR[%d]: 0x%" PRIx32 " misaligned "
+ "to DRSR region size, mask = 0x%" PRIx32 "\n",
+ n, base, rmask);
continue;
}
@@ -8200,9 +8318,10 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
}
}
if (rsize < TARGET_PAGE_BITS) {
- qemu_log_mask(LOG_UNIMP, "No support for MPU (sub)region"
+ qemu_log_mask(LOG_UNIMP,
+ "DRSR[%d]: No support for MPU (sub)region "
"alignment of %" PRIu32 " bits. Minimum is %d\n",
- rsize, TARGET_PAGE_BITS);
+ n, rsize, TARGET_PAGE_BITS);
continue;
}
if (srdis) {
@@ -8212,8 +8331,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
}
if (n == -1) { /* no hits */
- if (cpu->pmsav7_dregion &&
- (is_user || !(regime_sctlr(env, mmu_idx) & SCTLR_BR))) {
+ if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
/* background fault */
*fsr = 0;
return true;
@@ -8237,8 +8355,8 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
- "Bad value for AP bits in DRACR %"
- PRIx32 "\n", ap);
+ "DRACR[%d]: Bad value for AP bits: 0x%"
+ PRIx32 "\n", n, ap);
}
} else { /* Priv. mode AP bits decoding */
switch (ap) {
@@ -8255,8 +8373,8 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
- "Bad value for AP bits in DRACR %"
- PRIx32 "\n", ap);
+ "DRACR[%d]: Bad value for AP bits: 0x%"
+ PRIx32 "\n", n, ap);
}
}
@@ -8385,7 +8503,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
int ret;
ret = get_phys_addr(env, address, access_type,
- mmu_idx + ARMMMUIdx_S1NSE0, &ipa, attrs,
+ stage_1_mmu_idx(mmu_idx), &ipa, attrs,
prot, page_size, fsr, fi);
/* If S1 fails or S2 is disabled, return early. */
@@ -8406,7 +8524,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
/*
* For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
*/
- mmu_idx += ARMMMUIdx_S1NSE0;
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
}
}
@@ -8432,11 +8550,23 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
/* pmsav7 has special handling for when MPU is disabled so call it before
* the common MMU/MPU disabled check below.
*/
- if (arm_feature(env, ARM_FEATURE_MPU) &&
+ if (arm_feature(env, ARM_FEATURE_PMSA) &&
arm_feature(env, ARM_FEATURE_V7)) {
+ bool ret;
*page_size = TARGET_PAGE_SIZE;
- return get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
- phys_ptr, prot, fsr);
+ ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
+ phys_ptr, prot, fsr);
+ qemu_log_mask(CPU_LOG_MMU, "PMSAv7 MPU lookup for %s at 0x%08" PRIx32
+ " mmu_idx %u -> %s (prot %c%c%c)\n",
+ access_type == 1 ? "reading" :
+ (access_type == 2 ? "writing" : "execute"),
+ (uint32_t)address, mmu_idx,
+ ret ? "Miss" : "Hit",
+ *prot & PAGE_READ ? 'r' : '-',
+ *prot & PAGE_WRITE ? 'w' : '-',
+ *prot & PAGE_EXEC ? 'x' : '-');
+
+ return ret;
}
if (regime_translation_disabled(env, mmu_idx)) {
@@ -8447,7 +8577,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
return 0;
}
- if (arm_feature(env, ARM_FEATURE_MPU)) {
+ if (arm_feature(env, ARM_FEATURE_PMSA)) {
/* Pre-v7 MPU */
*page_size = TARGET_PAGE_SIZE;
return get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
@@ -8482,7 +8612,8 @@ bool arm_tlb_fill(CPUState *cs, vaddr address,
int ret;
MemTxAttrs attrs = {};
- ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr,
+ ret = get_phys_addr(env, address, access_type,
+ core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
&attrs, &prot, &page_size, fsr, fi);
if (!ret) {
/* Map a single [sub]page. */
@@ -8507,10 +8638,11 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
bool ret;
uint32_t fsr;
ARMMMUFaultInfo fi = {};
+ ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
*attrs = (MemTxAttrs) {};
- ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr,
+ ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
attrs, &prot, &page_size, &fsr, &fi);
if (ret) {
diff --git a/target/arm/machine.c b/target/arm/machine.c
index d8094a840b..1a40469015 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -99,8 +99,8 @@ static bool m_needed(void *opaque)
static const VMStateDescription vmstate_m = {
.name = "cpu/m",
- .version_id = 3,
- .minimum_version_id = 3,
+ .version_id = 4,
+ .minimum_version_id = 4,
.needed = m_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT32(env.v7m.vecbase, ARMCPU),
@@ -112,6 +112,7 @@ static const VMStateDescription vmstate_m = {
VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
VMSTATE_UINT32(env.v7m.mmfar, ARMCPU),
VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
+ VMSTATE_UINT32(env.v7m.mpu_ctrl, ARMCPU),
VMSTATE_INT32(env.v7m.exception, ARMCPU),
VMSTATE_END_OF_LIST()
}
@@ -142,7 +143,7 @@ static bool pmsav7_needed(void *opaque)
ARMCPU *cpu = opaque;
CPUARMState *env = &cpu->env;
- return arm_feature(env, ARM_FEATURE_MPU) &&
+ return arm_feature(env, ARM_FEATURE_PMSA) &&
arm_feature(env, ARM_FEATURE_V7);
}
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index 156b825040..2a85666579 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -194,6 +194,7 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
int target_el;
bool same_el;
uint32_t syn;
+ ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
if (retaddr) {
/* now we have a real cpu fault */
@@ -208,7 +209,7 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
/* the DFSR for an alignment fault depends on whether we're using
* the LPAE long descriptor format, or the short descriptor format
*/
- if (arm_s1_regime_using_lpae_format(env, cpu_mmu_index(env, false))) {
+ if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
env->exception.fsr = (1 << 9) | 0x21;
} else {
env->exception.fsr = 0x1;
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 24de30d92c..a82ab49c94 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -101,21 +101,27 @@ void a64_translate_init(void)
offsetof(CPUARMState, exclusive_high), "exclusive_high");
}
-static inline ARMMMUIdx get_a64_user_mem_index(DisasContext *s)
+static inline int get_a64_user_mem_index(DisasContext *s)
{
- /* Return the mmu_idx to use for A64 "unprivileged load/store" insns:
+ /* Return the core mmu_idx to use for A64 "unprivileged load/store" insns:
* if EL1, access as if EL0; otherwise access at current EL
*/
+ ARMMMUIdx useridx;
+
switch (s->mmu_idx) {
case ARMMMUIdx_S12NSE1:
- return ARMMMUIdx_S12NSE0;
+ useridx = ARMMMUIdx_S12NSE0;
+ break;
case ARMMMUIdx_S1SE1:
- return ARMMMUIdx_S1SE0;
+ useridx = ARMMMUIdx_S1SE0;
+ break;
case ARMMMUIdx_S2NS:
g_assert_not_reached();
default:
- return s->mmu_idx;
+ useridx = s->mmu_idx;
+ break;
}
+ return arm_to_core_mmu_idx(useridx);
}
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
@@ -11212,7 +11218,7 @@ void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
dc->condexec_mask = 0;
dc->condexec_cond = 0;
- dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
+ dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(tb->flags));
dc->tbi0 = ARM_TBFLAG_TBI0(tb->flags);
dc->tbi1 = ARM_TBFLAG_TBI1(tb->flags);
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 0b5a0bca06..ae6646c05b 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -145,9 +145,9 @@ static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
disas_set_insn_syndrome(s, syn);
}
-static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
+static inline int get_a32_user_mem_index(DisasContext *s)
{
- /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
+ /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
* insns:
* if PL2, UNPREDICTABLE (we choose to implement as if PL0)
* otherwise, access as if at PL0.
@@ -156,11 +156,15 @@ static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
case ARMMMUIdx_S12NSE0:
case ARMMMUIdx_S12NSE1:
- return ARMMMUIdx_S12NSE0;
+ return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
case ARMMMUIdx_S1E3:
case ARMMMUIdx_S1SE0:
case ARMMMUIdx_S1SE1:
- return ARMMMUIdx_S1SE0;
+ return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
+ case ARMMMUIdx_MUser:
+ case ARMMMUIdx_MPriv:
+ case ARMMMUIdx_MNegPri:
+ return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
case ARMMMUIdx_S2NS:
default:
g_assert_not_reached();
@@ -11816,7 +11820,7 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
- dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
+ dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(tb->flags));
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
dc->user = (dc->current_el == 0);
diff --git a/target/arm/translate.h b/target/arm/translate.h
index 629dab945e..6b2cc34c33 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -88,7 +88,7 @@ static inline int arm_dc_feature(DisasContext *dc, int feature)
static inline int get_mem_index(DisasContext *s)
{
- return s->mmu_idx;
+ return arm_to_core_mmu_idx(s->mmu_idx);
}
/* Function used to determine the target exception EL when otherwise not known