aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2015-08-25 16:24:06 +0100
committerPeter Maydell <peter.maydell@linaro.org>2015-08-25 16:24:06 +0100
commit7df9671989c1cfa693764f9ae6349324b2ada02a (patch)
tree10cfbe17f8fc7f347abac4299f4834ae045a029e
parent34a4450434f1a5daee06fca223afcbb9c8f1ee24 (diff)
parentcea66e91212164e02ad1d245c2371f7e8eb59e7f (diff)
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20150825-1' into staging
target-arm queue: * add missing EL2/EL3 TLBI operations * add missing EL2/EL3 ATS operations * add missing EL2/EL3 registers * update Xilinx MAINTAINERS info * Xilinx: connect the four OCM banks # gpg: Signature made Tue 25 Aug 2015 16:22:43 BST using RSA key ID 14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" * remotes/pmaydell/tags/pull-target-arm-20150825-1: target-arm: Implement AArch64 TLBI operations on IPAs target-arm: Implement missing EL3 TLB invalidate operations target-arm: Implement missing EL2 TLBI operations target-arm: Restrict AArch64 TLB flushes to the MMU indexes they must touch target-arm: Move TLBI ALLE1/ALLE1IS definitions into numeric order cputlb: Add functions for flushing TLB for a single MMU index target-arm: Implement AArch32 ATS1H* operations target-arm: Enable the AArch32 ATS12NSO ops target-arm: Add CP_ACCESS_TRAP_UNCATEGORIZED_EL2, 3 target-arm: Wire up AArch64 EL2 and EL3 address translation ops target-arm: there is no TTBR1 for 32-bit EL2 stage 1 translations target-arm: Implement missing ACTLR registers target-arm: Implement missing AFSR registers target-arm: Implement missing AMAIR registers target-arm: Add missing MAIR_EL3 and TPIDR_EL3 registers MAINTAINERS: Add ZynqMP to MAINTAINERS file MAINTAINERS: Update Xilinx Maintainership xlnx-zynqmp: Connect the four OCM banks Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--MAINTAINERS27
-rw-r--r--cputlb.c97
-rw-r--r--hw/arm/xlnx-zynqmp.c15
-rw-r--r--include/exec/exec-all.h47
-rw-r--r--include/hw/arm/xlnx-zynqmp.h6
-rw-r--r--target-arm/cpu.h3
-rw-r--r--target-arm/helper.c489
-rw-r--r--target-arm/op_helper.c8
8 files changed, 629 insertions, 63 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index a059d5de3b..08f356a54c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -349,13 +349,22 @@ S: Maintained
F: hw/*/versatile*
Xilinx Zynq
-M: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
+M: Alistair Francis <alistair.francis@xilinx.com>
+M: Peter Crosthwaite <crosthwaite.peter@gmail.com>
S: Maintained
F: hw/arm/xilinx_zynq.c
F: hw/misc/zynq_slcr.c
F: hw/*/cadence_*
F: hw/ssi/xilinx_spips.c
+Xilinx ZynqMP
+M: Alistair Francis <alistair.francis@xilinx.com>
+M: Peter Crosthwaite <crosthwaite.peter@gmail.com>
+S: Maintained
+F: hw/arm/xlnx-zynqmp.c
+F: hw/arm/xlnx-ep108.c
+F: include/hw/arm/xlnx-zynqmp.h
+
ARM ACPI Subsystem
M: Shannon Zhao <zhaoshenglong@huawei.com>
M: Shannon Zhao <shannon.zhao@linaro.org>
@@ -405,7 +414,7 @@ S: Maintained
F: hw/microblaze/petalogix_s3adsp1800_mmu.c
petalogix_ml605
-M: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
+M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
S: Maintained
F: hw/microblaze/petalogix_ml605_mmu.c
@@ -685,10 +694,17 @@ S: Orphan
F: hw/scsi/lsi53c895a.c
SSI
-M: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
+M: Peter Crosthwaite <crosthwaite.peter@gmail.com>
S: Maintained
F: hw/ssi/*
F: hw/block/m25p80.c
+X: hw/ssi/xilinx_*
+
+Xilinx SPI
+M: Alistair Francis <alistair.francis@xilinx.com>
+M: Peter Crosthwaite <crosthwaite.peter@gmail.com>
+S: Maintained
+F: hw/ssi/xilinx_*
USB
M: Gerd Hoffmann <kraxel@redhat.com>
@@ -777,8 +793,9 @@ F: hw/scsi/megasas.c
F: hw/scsi/mfi.h
Xilinx EDK
-M: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
+M: Alistair Francis <alistair.francis@xilinx.com>
+M: Peter Crosthwaite <crosthwaite.peter@gmail.com>
S: Maintained
F: hw/*/xilinx_*
F: include/hw/xilinx.h
@@ -880,7 +897,7 @@ F: include/hw/cpu/icc_bus.h
F: hw/cpu/icc_bus.c
Device Tree
-M: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
+M: Peter Crosthwaite <crosthwaite.peter@gmail.com>
M: Alexander Graf <agraf@suse.de>
S: Maintained
F: device_tree.[ch]
diff --git a/cputlb.c b/cputlb.c
index a50608676c..4bc6c24e11 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -69,6 +69,47 @@ void tlb_flush(CPUState *cpu, int flush_global)
tlb_flush_count++;
}
+static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
+{
+ CPUArchState *env = cpu->env_ptr;
+
+#if defined(DEBUG_TLB)
+ printf("tlb_flush_by_mmuidx:");
+#endif
+ /* must reset current TB so that interrupts cannot modify the
+ links while we are modifying them */
+ cpu->current_tb = NULL;
+
+ for (;;) {
+ int mmu_idx = va_arg(argp, int);
+
+ if (mmu_idx < 0) {
+ break;
+ }
+
+#if defined(DEBUG_TLB)
+ printf(" %d", mmu_idx);
+#endif
+
+ memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
+ memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
+ }
+
+#if defined(DEBUG_TLB)
+ printf("\n");
+#endif
+
+ memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
+}
+
+void tlb_flush_by_mmuidx(CPUState *cpu, ...)
+{
+ va_list argp;
+ va_start(argp, cpu);
+ v_tlb_flush_by_mmuidx(cpu, argp);
+ va_end(argp);
+}
+
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
{
if (addr == (tlb_entry->addr_read &
@@ -121,6 +162,62 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
tb_flush_jmp_cache(cpu, addr);
}
+void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
+{
+ CPUArchState *env = cpu->env_ptr;
+ int i, k;
+ va_list argp;
+
+ va_start(argp, addr);
+
+#if defined(DEBUG_TLB)
+ printf("tlb_flush_page_by_mmu_idx: " TARGET_FMT_lx, addr);
+#endif
+ /* Check if we need to flush due to large pages. */
+ if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
+#if defined(DEBUG_TLB)
+ printf(" forced full flush ("
+ TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
+ env->tlb_flush_addr, env->tlb_flush_mask);
+#endif
+ v_tlb_flush_by_mmuidx(cpu, argp);
+ va_end(argp);
+ return;
+ }
+ /* must reset current TB so that interrupts cannot modify the
+ links while we are modifying them */
+ cpu->current_tb = NULL;
+
+ addr &= TARGET_PAGE_MASK;
+ i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+
+ for (;;) {
+ int mmu_idx = va_arg(argp, int);
+
+ if (mmu_idx < 0) {
+ break;
+ }
+
+#if defined(DEBUG_TLB)
+ printf(" %d", mmu_idx);
+#endif
+
+ tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
+
+ /* check whether there are vltb entries that need to be flushed */
+ for (k = 0; k < CPU_VTLB_SIZE; k++) {
+ tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
+ }
+ }
+ va_end(argp);
+
+#if defined(DEBUG_TLB)
+ printf("\n");
+#endif
+
+ tb_flush_jmp_cache(cpu, addr);
+}
+
/* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
index 62ef4ceb32..388baef76e 100644
--- a/hw/arm/xlnx-zynqmp.c
+++ b/hw/arm/xlnx-zynqmp.c
@@ -101,6 +101,21 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
qemu_irq gic_spi[GIC_NUM_SPI_INTR];
Error *err = NULL;
+ /* Create the four OCM banks */
+ for (i = 0; i < XLNX_ZYNQMP_NUM_OCM_BANKS; i++) {
+ char *ocm_name = g_strdup_printf("zynqmp.ocm_ram_bank_%d", i);
+
+ memory_region_init_ram(&s->ocm_ram[i], NULL, ocm_name,
+ XLNX_ZYNQMP_OCM_RAM_SIZE, &error_abort);
+ vmstate_register_ram_global(&s->ocm_ram[i]);
+ memory_region_add_subregion(get_system_memory(),
+ XLNX_ZYNQMP_OCM_RAM_0_ADDRESS +
+ i * XLNX_ZYNQMP_OCM_RAM_SIZE,
+ &s->ocm_ram[i]);
+
+ g_free(ocm_name);
+ }
+
qdev_prop_set_uint32(DEVICE(&s->gic), "num-irq", GIC_NUM_SPI_INTR + 32);
qdev_prop_set_uint32(DEVICE(&s->gic), "revision", 2);
qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", XLNX_ZYNQMP_NUM_APU_CPUS);
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 29775c012c..fbc6dcb90c 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -96,8 +96,46 @@ bool qemu_in_vcpu_thread(void);
void cpu_reload_memory_map(CPUState *cpu);
void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
/* cputlb.c */
+/**
+ * tlb_flush_page:
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ *
+ * Flush one page from the TLB of the specified CPU, for all
+ * MMU indexes.
+ */
void tlb_flush_page(CPUState *cpu, target_ulong addr);
+/**
+ * tlb_flush:
+ * @cpu: CPU whose TLB should be flushed
+ * @flush_global: ignored
+ *
+ * Flush the entire TLB for the specified CPU.
+ * The flush_global flag is in theory an indicator of whether the whole
+ * TLB should be flushed, or only those entries not marked global.
+ * In practice QEMU does not implement any global/not global flag for
+ * TLB entries, and the argument is ignored.
+ */
void tlb_flush(CPUState *cpu, int flush_global);
+/**
+ * tlb_flush_page_by_mmuidx:
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ * @...: list of MMU indexes to flush, terminated by a negative value
+ *
+ * Flush one page from the TLB of the specified CPU, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
+/**
+ * tlb_flush_by_mmuidx:
+ * @cpu: CPU whose TLB should be flushed
+ * @...: list of MMU indexes to flush, terminated by a negative value
+ *
+ * Flush all entries from the TLB of the specified CPU, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_by_mmuidx(CPUState *cpu, ...);
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size);
@@ -115,6 +153,15 @@ static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
static inline void tlb_flush(CPUState *cpu, int flush_global)
{
}
+
+static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
+ target_ulong addr, ...)
+{
+}
+
+static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
+{
+}
#endif
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h
index c379632f2a..6ccb57b187 100644
--- a/include/hw/arm/xlnx-zynqmp.h
+++ b/include/hw/arm/xlnx-zynqmp.h
@@ -32,6 +32,10 @@
#define XLNX_ZYNQMP_NUM_GEMS 4
#define XLNX_ZYNQMP_NUM_UARTS 2
+#define XLNX_ZYNQMP_NUM_OCM_BANKS 4
+#define XLNX_ZYNQMP_OCM_RAM_0_ADDRESS 0xFFFC0000
+#define XLNX_ZYNQMP_OCM_RAM_SIZE 0x10000
+
#define XLNX_ZYNQMP_GIC_REGIONS 2
/* ZynqMP maps the ARM GIC regions (GICC, GICD ...) at consecutive 64k offsets
@@ -52,6 +56,8 @@ typedef struct XlnxZynqMPState {
ARMCPU rpu_cpu[XLNX_ZYNQMP_NUM_RPU_CPUS];
GICState gic;
MemoryRegion gic_mr[XLNX_ZYNQMP_GIC_REGIONS][XLNX_ZYNQMP_GIC_ALIASES];
+ MemoryRegion ocm_ram[XLNX_ZYNQMP_NUM_OCM_BANKS];
+
CadenceGEMState gem[XLNX_ZYNQMP_NUM_GEMS];
CadenceUARTState uart[XLNX_ZYNQMP_NUM_UARTS];
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 2e680da1fc..31825d34a1 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -1284,6 +1284,9 @@ typedef enum CPAccessResult {
/* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */
CP_ACCESS_TRAP_EL2 = 3,
CP_ACCESS_TRAP_EL3 = 4,
+ /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */
+ CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5,
+ CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6,
} CPAccessResult;
/* Access functions for coprocessor registers. These cannot fail and
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 1568aa6617..7df1f0684d 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -1022,6 +1022,10 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
.resetvalue = 0 },
+ { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
+ .resetvalue = 0 },
/* For non-long-descriptor page tables these are PRRR and NMRR;
* regardless they still act as reads-as-written for QEMU.
*/
@@ -1715,12 +1719,17 @@ static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri)
{
if (ri->opc2 & 4) {
- /* Other states are only available with TrustZone; in
- * a non-TZ implementation these registers don't exist
- * at all, which is an Uncategorized trap. This underdecoding
- * is safe because the reginfo is NO_RAW.
+ /* The ATS12NSO* operations must trap to EL3 if executed in
+ * Secure EL1 (which can only happen if EL3 is AArch64).
+ * They are simply UNDEF if executed from NS EL1.
+ * They function normally from EL2 or EL3.
*/
- return CP_ACCESS_TRAP_UNCATEGORIZED;
+ if (arm_current_el(env) == 1) {
+ if (arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
+ }
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
}
return CP_ACCESS_OK;
}
@@ -1840,6 +1849,25 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
A32_BANKED_CURRENT_REG_SET(env, par, par64);
}
+static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ int access_type = ri->opc2 & 1;
+ uint64_t par64;
+
+ par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
+
+ A32_BANKED_CURRENT_REG_SET(env, par, par64);
+}
+
+static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
+ return CP_ACCESS_TRAP;
+ }
+ return CP_ACCESS_OK;
+}
+
static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -1867,10 +1895,10 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
break;
case 4: /* AT S12E1R, AT S12E1W */
- mmu_idx = ARMMMUIdx_S12NSE1;
+ mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
break;
case 6: /* AT S12E0R, AT S12E0W */
- mmu_idx = ARMMMUIdx_S12NSE0;
+ mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
break;
default:
g_assert_not_reached();
@@ -1887,6 +1915,7 @@ static const ARMCPRegInfo vapa_cp_reginfo[] = {
offsetoflow32(CPUARMState, cp15.par_ns) },
.writefn = par_write },
#ifndef CONFIG_USER_ONLY
+ /* This underdecoding is safe because the reginfo is NO_RAW. */
{ .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
.access = PL1_W, .accessfn = ats_access,
.writefn = ats_write, .type = ARM_CP_NO_RAW },
@@ -2478,65 +2507,244 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
* Page D4-1736 (DDI0487A.b)
*/
-static void tlbi_aa64_va_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
+static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ if (arm_is_secure_below_el3(env)) {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+ } else {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
+ }
+}
+
+static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
{
- /* Invalidate by VA (AArch64 version) */
+ bool sec = arm_is_secure_below_el3(env);
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ if (sec) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+ } else {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, -1);
+ }
+ }
+}
+
+static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Note that the 'ALL' scope must invalidate both stage 1 and
+ * stage 2 translations, whereas most other scopes only invalidate
+ * stage 1 translations.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ if (arm_is_secure_below_el3(env)) {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+ } else {
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
+ ARMMMUIdx_S2NS, -1);
+ } else {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
+ }
+ }
+}
+
+static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
+}
+
+static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E3, -1);
+}
+
+static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Note that the 'ALL' scope must invalidate both stage 1 and
+ * stage 2 translations, whereas most other scopes only invalidate
+ * stage 1 translations.
+ */
+ bool sec = arm_is_secure_below_el3(env);
+ bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ if (sec) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+ } else if (has_el2) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
+ } else {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, -1);
+ }
+ }
+}
+
+static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
+ }
+}
+
+static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E3, -1);
+ }
+}
+
+static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by VA, EL1&0 (AArch64 version).
+ * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page(CPU(cpu), pageaddr);
+ if (arm_is_secure_below_el3(env)) {
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1SE1,
+ ARMMMUIdx_S1SE0, -1);
+ } else {
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, -1);
+ }
}
-static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
+static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
{
- /* Invalidate by VA, all ASIDs (AArch64 version) */
+ /* Invalidate by VA, EL2
+ * Currently handles both VAE2 and VALE2, since we don't support
+ * flush-last-level-only.
+ */
ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page(CPU(cpu), pageaddr);
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1);
}
-static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri,
+static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- /* Invalidate by ASID (AArch64 version) */
+ /* Invalidate by VA, EL3
+ * Currently handles both VAE3 and VALE3, since we don't support
+ * flush-last-level-only.
+ */
ARMCPU *cpu = arm_env_get_cpu(env);
- int asid = extract64(value, 48, 16);
- tlb_flush(CPU(cpu), asid == 0);
+ CPUState *cs = CPU(cpu);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E3, -1);
}
-static void tlbi_aa64_va_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
+static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
{
+ bool sec = arm_is_secure_below_el3(env);
CPUState *other_cs;
uint64_t pageaddr = sextract64(value << 12, 0, 56);
CPU_FOREACH(other_cs) {
- tlb_flush_page(other_cs, pageaddr);
+ if (sec) {
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1SE1,
+ ARMMMUIdx_S1SE0, -1);
+ } else {
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, -1);
+ }
}
}
-static void tlbi_aa64_vaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
+static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
{
CPUState *other_cs;
uint64_t pageaddr = sextract64(value << 12, 0, 56);
CPU_FOREACH(other_cs) {
- tlb_flush_page(other_cs, pageaddr);
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1);
}
}
-static void tlbi_aa64_asid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
+static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
{
CPUState *other_cs;
- int asid = extract64(value, 48, 16);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
CPU_FOREACH(other_cs) {
- tlb_flush(other_cs, asid == 0);
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E3, -1);
+ }
+}
+
+static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by IPA. This has to invalidate any structures that
+ * contain only stage 2 translation information, but does not need
+ * to apply to structures that contain combined stage 1 and stage 2
+ * translation information.
+ * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ uint64_t pageaddr;
+
+ if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
+ return;
+ }
+
+ pageaddr = sextract64(value << 12, 0, 48);
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1);
+}
+
+static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+ uint64_t pageaddr;
+
+ if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
+ return;
+ }
+
+ pageaddr = sextract64(value << 12, 0, 48);
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1);
}
}
@@ -2672,62 +2880,86 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
.access = PL1_W, .type = ARM_CP_NOP },
/* TLBI operations */
- { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbiall_write },
- { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbiall_is_write },
{ .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbiall_is_write },
+ .writefn = tlbi_aa64_vmalle1is_write },
{ .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_va_is_write },
+ .writefn = tlbi_aa64_vae1is_write },
{ .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_asid_is_write },
+ .writefn = tlbi_aa64_vmalle1is_write },
{ .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vaa_is_write },
+ .writefn = tlbi_aa64_vae1is_write },
{ .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_va_is_write },
+ .writefn = tlbi_aa64_vae1is_write },
{ .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vaa_is_write },
+ .writefn = tlbi_aa64_vae1is_write },
{ .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbiall_write },
+ .writefn = tlbi_aa64_vmalle1_write },
{ .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_va_write },
+ .writefn = tlbi_aa64_vae1_write },
{ .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_asid_write },
+ .writefn = tlbi_aa64_vmalle1_write },
{ .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vaa_write },
+ .writefn = tlbi_aa64_vae1_write },
{ .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_va_write },
+ .writefn = tlbi_aa64_vae1_write },
{ .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vaa_write },
+ .writefn = tlbi_aa64_vae1_write },
+ { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_ipas2e1is_write },
+ { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_ipas2e1is_write },
+ { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_ipas2e1_write },
+ { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_ipas2e1_write },
+ { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1_write },
+ { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
#ifndef CONFIG_USER_ONLY
/* 64 bit address translation operations */
{ .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
@@ -2742,6 +2974,25 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
.access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 7,
+ .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
+ { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
#endif
/* TLB invalidate last level of translation table walk */
{ .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
@@ -2836,6 +3087,22 @@ static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
{ .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
.opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
.access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
+ .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
{ .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
.access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
@@ -2951,6 +3218,23 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
.opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
.access = PL2_RW, .type = ARM_CP_ALIAS,
.fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
+ { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
+ { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
+ .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
{ .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
.access = PL2_RW, .writefn = vmsa_tcr_el1_write,
@@ -2974,16 +3258,51 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
{ .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
.type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbiall_write },
+ .writefn = tlbi_aa64_alle2_write },
{ .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
.type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbi_aa64_vaa_write },
+ .writefn = tlbi_aa64_vae2_write },
+ { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae2_write },
+ { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle2is_write },
{ .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
.type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbi_aa64_vaa_write },
+ .writefn = tlbi_aa64_vae2is_write },
+ { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae2is_write },
#ifndef CONFIG_USER_ONLY
+ /* Unlike the other EL2-related AT operations, these must
+ * UNDEF from EL3 if EL2 is not implemented, which is why we
+ * define them here rather than with the rest of the AT ops.
+ */
+ { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL2_W, .accessfn = at_s1e2_access,
+ .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL2_W, .accessfn = at_s1e2_access,
+ .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
+ * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
+ * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
+ * to behave as if SCR.NS was 1.
+ */
+ { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL2_W,
+ .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
+ { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL2_W,
+ .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
{ .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
/* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
@@ -3089,6 +3408,46 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
.access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
.fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
+ { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
+ .access = PL3_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
+ { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle3is_write },
+ { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3is_write },
+ { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3is_write },
+ { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle3_write },
+ { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3_write },
+ { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3_write },
REGINFO_SENTINEL
};
@@ -3883,13 +4242,22 @@ void register_cp_regs_for_features(ARMCPU *cpu)
}
if (arm_feature(env, ARM_FEATURE_AUXCR)) {
- ARMCPRegInfo auxcr = {
- .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
- .access = PL1_RW, .type = ARM_CP_CONST,
- .resetvalue = cpu->reset_auxcr
+ ARMCPRegInfo auxcr_reginfo[] = {
+ { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST,
+ .resetvalue = cpu->reset_auxcr },
+ { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
};
- define_one_arm_cp_reg(cpu, &auxcr);
+ define_arm_cp_regs(cpu, auxcr_reginfo);
}
if (arm_feature(env, ARM_FEATURE_CBAR)) {
@@ -5937,6 +6305,11 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
if (el > 1) {
ttbr1_valid = false;
}
+ } else {
+ /* There is no TTBR1 for EL2 */
+ if (el == 2) {
+ ttbr1_valid = false;
+ }
}
/* Determine whether this address is in the region controlled by
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 663c05d1d2..1425a1d4bb 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -444,6 +444,14 @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
target_el = exception_target_el(env);
syndrome = syn_uncategorized();
break;
+ case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
+ target_el = 2;
+ syndrome = syn_uncategorized();
+ break;
+ case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
+ target_el = 3;
+ syndrome = syn_uncategorized();
+ break;
default:
g_assert_not_reached();
}