aboutsummaryrefslogtreecommitdiff
path: root/target/arm
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm')
-rw-r--r--target/arm/arm-powerctl.c3
-rw-r--r--target/arm/cpu.c41
-rw-r--r--target/arm/cpu.h12
-rw-r--r--target/arm/cpu64.c75
-rw-r--r--target/arm/helper.c139
-rw-r--r--target/arm/translate-a64.c59
6 files changed, 227 insertions, 102 deletions
diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c
index 2b856930fb..f9de5164e5 100644
--- a/target/arm/arm-powerctl.c
+++ b/target/arm/arm-powerctl.c
@@ -120,11 +120,8 @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state,
if (info->target_aa64) {
target_cpu->env.xregs[0] = info->context_id;
- target_cpu->env.thumb = false;
} else {
target_cpu->env.regs[0] = info->context_id;
- target_cpu->env.thumb = info->entry & 1;
- info->entry &= 0xfffffffe;
}
/* Start the new CPU at the requested address */
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index d6da3f4fed..3874dc9875 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -40,8 +40,31 @@
static void arm_cpu_set_pc(CPUState *cs, vaddr value)
{
ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
- cpu->env.regs[15] = value;
+ if (is_a64(env)) {
+ env->pc = value;
+ env->thumb = 0;
+ } else {
+ env->regs[15] = value & ~1;
+ env->thumb = value & 1;
+ }
+}
+
+static void arm_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ /*
+ * It's OK to look at env for the current mode here, because it's
+ * never possible for an AArch64 TB to chain to an AArch32 TB.
+ */
+ if (is_a64(env)) {
+ env->pc = tb->pc;
+ } else {
+ env->regs[15] = tb->pc;
+ }
}
static bool arm_cpu_has_work(CPUState *cs)
@@ -162,6 +185,9 @@ static void arm_cpu_reset(CPUState *s)
env->pstate = PSTATE_MODE_EL0t;
/* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
+ /* Enable all PAC keys. */
+ env->cp15.sctlr_el[1] |= (SCTLR_EnIA | SCTLR_EnIB |
+ SCTLR_EnDA | SCTLR_EnDB);
/* Enable all PAC instructions */
env->cp15.hcr_el2 |= HCR_API;
env->cp15.scr_el3 |= SCR_API;
@@ -836,6 +862,13 @@ static void arm_cpu_finalizefn(Object *obj)
QLIST_REMOVE(hook, node);
g_free(hook);
}
+#ifndef CONFIG_USER_ONLY
+ if (cpu->pmu_timer) {
+ timer_del(cpu->pmu_timer);
+ timer_deinit(cpu->pmu_timer);
+ timer_free(cpu->pmu_timer);
+ }
+#endif
}
static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
@@ -1045,6 +1078,11 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
arm_register_pre_el_change_hook(cpu, &pmu_pre_el_change, 0);
arm_register_el_change_hook(cpu, &pmu_post_el_change, 0);
}
+
+#ifndef CONFIG_USER_ONLY
+ cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, arm_pmu_timer_cb,
+ cpu);
+#endif
} else {
cpu->id_aa64dfr0 &= ~0xf00;
cpu->pmceid0 = 0;
@@ -2087,6 +2125,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
cc->dump_state = arm_cpu_dump_state;
cc->set_pc = arm_cpu_set_pc;
+ cc->synchronize_from_tb = arm_cpu_synchronize_from_tb;
cc->gdb_read_register = arm_cpu_gdb_read_register;
cc->gdb_write_register = arm_cpu_gdb_write_register;
#ifdef CONFIG_USER_ONLY
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index b8161cb6d7..a68bcc9fed 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -746,6 +746,11 @@ struct ARMCPU {
/* Timers used by the generic (architected) timer */
QEMUTimer *gt_timer[NUM_GTIMERS];
+ /*
+ * Timer used by the PMU. Its state is restored after migration by
+ * pmu_op_finish() - it does not need other handling during migration
+ */
+ QEMUTimer *pmu_timer;
/* GPIO outputs for generic timer */
qemu_irq gt_timer_outputs[NUM_GTIMERS];
/* GPIO output for GICv3 maintenance interrupt signal */
@@ -1005,6 +1010,11 @@ void pmccntr_op_finish(CPUARMState *env);
void pmu_op_start(CPUARMState *env);
void pmu_op_finish(CPUARMState *env);
+/*
+ * Called when a PMU counter is due to overflow
+ */
+void arm_pmu_timer_cb(void *opaque);
+
/**
* Functions to register as EL change hooks for PMU mode filtering
*/
@@ -2502,7 +2512,7 @@ bool write_cpustate_to_list(ARMCPU *cpu);
#if defined(TARGET_AARCH64)
# define TARGET_PHYS_ADDR_SPACE_BITS 48
-# define TARGET_VIRT_ADDR_SPACE_BITS 64
+# define TARGET_VIRT_ADDR_SPACE_BITS 48
#else
# define TARGET_PHYS_ADDR_SPACE_BITS 40
# define TARGET_VIRT_ADDR_SPACE_BITS 32
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index e9bc461c36..7107ec8d7e 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -281,38 +281,6 @@ static void cpu_max_set_sve_vq(Object *obj, Visitor *v, const char *name,
error_propagate(errp, err);
}
-#ifdef CONFIG_USER_ONLY
-static void cpu_max_get_packey(Object *obj, Visitor *v, const char *name,
- void *opaque, Error **errp)
-{
- ARMCPU *cpu = ARM_CPU(obj);
- const uint64_t *bit = opaque;
- bool enabled = (cpu->env.cp15.sctlr_el[1] & *bit) != 0;
-
- visit_type_bool(v, name, &enabled, errp);
-}
-
-static void cpu_max_set_packey(Object *obj, Visitor *v, const char *name,
- void *opaque, Error **errp)
-{
- ARMCPU *cpu = ARM_CPU(obj);
- Error *err = NULL;
- const uint64_t *bit = opaque;
- bool enabled;
-
- visit_type_bool(v, name, &enabled, errp);
-
- if (!err) {
- if (enabled) {
- cpu->env.cp15.sctlr_el[1] |= *bit;
- } else {
- cpu->env.cp15.sctlr_el[1] &= ~*bit;
- }
- }
- error_propagate(errp, err);
-}
-#endif
-
/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
* otherwise, a CPU with as many features enabled as our emulation supports.
* The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
@@ -388,34 +356,6 @@ static void aarch64_max_initfn(Object *obj)
*/
cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
cpu->dcz_blocksize = 7; /* 512 bytes */
-
- /*
- * Note that Linux will enable enable all of the keys at once.
- * But doing it this way will allow experimentation beyond that.
- */
- {
- static const uint64_t apia_bit = SCTLR_EnIA;
- static const uint64_t apib_bit = SCTLR_EnIB;
- static const uint64_t apda_bit = SCTLR_EnDA;
- static const uint64_t apdb_bit = SCTLR_EnDB;
-
- object_property_add(obj, "apia", "bool", cpu_max_get_packey,
- cpu_max_set_packey, NULL,
- (void *)&apia_bit, &error_fatal);
- object_property_add(obj, "apib", "bool", cpu_max_get_packey,
- cpu_max_set_packey, NULL,
- (void *)&apib_bit, &error_fatal);
- object_property_add(obj, "apda", "bool", cpu_max_get_packey,
- cpu_max_set_packey, NULL,
- (void *)&apda_bit, &error_fatal);
- object_property_add(obj, "apdb", "bool", cpu_max_get_packey,
- cpu_max_set_packey, NULL,
- (void *)&apdb_bit, &error_fatal);
-
- /* Enable all PAC keys by default. */
- cpu->env.cp15.sctlr_el[1] |= SCTLR_EnIA | SCTLR_EnIB;
- cpu->env.cp15.sctlr_el[1] |= SCTLR_EnDA | SCTLR_EnDB;
- }
#endif
cpu->sve_max_vq = ARM_MAX_VQ;
@@ -480,20 +420,6 @@ static void aarch64_cpu_finalizefn(Object *obj)
{
}
-static void aarch64_cpu_set_pc(CPUState *cs, vaddr value)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- /* It's OK to look at env for the current mode here, because it's
- * never possible for an AArch64 TB to chain to an AArch32 TB.
- * (Otherwise we would need to use synchronize_from_tb instead.)
- */
- if (is_a64(&cpu->env)) {
- cpu->env.pc = value;
- } else {
- cpu->env.regs[15] = value;
- }
-}
-
static gchar *aarch64_gdb_arch_name(CPUState *cs)
{
return g_strdup("aarch64");
@@ -504,7 +430,6 @@ static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
CPUClass *cc = CPU_CLASS(oc);
cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
- cc->set_pc = aarch64_cpu_set_pc;
cc->gdb_read_register = aarch64_cpu_gdb_read_register;
cc->gdb_write_register = aarch64_cpu_gdb_write_register;
cc->gdb_num_core_regs = 34;
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 66faebea8e..d070879894 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -977,6 +977,7 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
/* Definitions for the PMU registers */
#define PMCRN_MASK 0xf800
#define PMCRN_SHIFT 11
+#define PMCRLC 0x40
#define PMCRDP 0x10
#define PMCRD 0x8
#define PMCRC 0x4
@@ -1020,6 +1021,13 @@ typedef struct pm_event {
* counters hold a difference from the return value from this function
*/
uint64_t (*get_count)(CPUARMState *);
+ /*
+ * Return how many nanoseconds it will take (at a minimum) for count events
+ * to occur. A negative value indicates the counter will never overflow, or
+ * that the counter has otherwise arranged for the overflow bit to be set
+ * and the PMU interrupt to be raised on overflow.
+ */
+ int64_t (*ns_per_count)(uint64_t);
} pm_event;
static bool event_always_supported(CPUARMState *env)
@@ -1036,6 +1044,11 @@ static uint64_t swinc_get_count(CPUARMState *env)
return 0;
}
+static int64_t swinc_ns_per(uint64_t ignored)
+{
+ return -1;
+}
+
/*
* Return the underlying cycle count for the PMU cycle counters. If we're in
* usermode, simply return 0.
@@ -1051,6 +1064,11 @@ static uint64_t cycles_get_count(CPUARMState *env)
}
#ifndef CONFIG_USER_ONLY
+static int64_t cycles_ns_per(uint64_t cycles)
+{
+ return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
+}
+
static bool instructions_supported(CPUARMState *env)
{
return use_icount == 1 /* Precise instruction counting */;
@@ -1060,21 +1078,29 @@ static uint64_t instructions_get_count(CPUARMState *env)
{
return (uint64_t)cpu_get_icount_raw();
}
+
+static int64_t instructions_ns_per(uint64_t icount)
+{
+ return cpu_icount_to_ns((int64_t)icount);
+}
#endif
static const pm_event pm_events[] = {
{ .number = 0x000, /* SW_INCR */
.supported = event_always_supported,
.get_count = swinc_get_count,
+ .ns_per_count = swinc_ns_per,
},
#ifndef CONFIG_USER_ONLY
{ .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
.supported = instructions_supported,
.get_count = instructions_get_count,
+ .ns_per_count = instructions_ns_per,
},
{ .number = 0x011, /* CPU_CYCLES, Cycle */
.supported = event_always_supported,
.get_count = cycles_get_count,
+ .ns_per_count = cycles_ns_per,
}
#endif
};
@@ -1293,6 +1319,13 @@ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
return enabled && !prohibited && !filtered;
}
+static void pmu_update_irq(CPUARMState *env)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
+ (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
+}
+
/*
* Ensure c15_ccnt is the guest-visible count so that operations such as
* enabling/disabling the counter or filtering, modifying the count itself,
@@ -1310,7 +1343,16 @@ void pmccntr_op_start(CPUARMState *env)
eff_cycles /= 64;
}
- env->cp15.c15_ccnt = eff_cycles - env->cp15.c15_ccnt_delta;
+ uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
+
+ uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
+ 1ull << 63 : 1ull << 31;
+ if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
+ env->cp15.c9_pmovsr |= (1 << 31);
+ pmu_update_irq(env);
+ }
+
+ env->cp15.c15_ccnt = new_pmccntr;
}
env->cp15.c15_ccnt_delta = cycles;
}
@@ -1323,13 +1365,27 @@ void pmccntr_op_start(CPUARMState *env)
void pmccntr_op_finish(CPUARMState *env)
{
if (pmu_counter_enabled(env, 31)) {
- uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
+#ifndef CONFIG_USER_ONLY
+ /* Calculate when the counter will next overflow */
+ uint64_t remaining_cycles = -env->cp15.c15_ccnt;
+ if (!(env->cp15.c9_pmcr & PMCRLC)) {
+ remaining_cycles = (uint32_t)remaining_cycles;
+ }
+ int64_t overflow_in = cycles_ns_per(remaining_cycles);
+
+ if (overflow_in > 0) {
+ int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ overflow_in;
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
+ }
+#endif
+ uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
prev_cycles /= 64;
}
-
env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
}
}
@@ -1345,8 +1401,13 @@ static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
}
if (pmu_counter_enabled(env, counter)) {
- env->cp15.c14_pmevcntr[counter] =
- count - env->cp15.c14_pmevcntr_delta[counter];
+ uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
+
+ if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
+ env->cp15.c9_pmovsr |= (1 << counter);
+ pmu_update_irq(env);
+ }
+ env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
}
env->cp15.c14_pmevcntr_delta[counter] = count;
}
@@ -1354,6 +1415,21 @@ static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
{
if (pmu_counter_enabled(env, counter)) {
+#ifndef CONFIG_USER_ONLY
+ uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
+ uint16_t event_idx = supported_event_map[event];
+ uint64_t delta = UINT32_MAX -
+ (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
+ int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
+
+ if (overflow_in > 0) {
+ int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ overflow_in;
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
+ }
+#endif
+
env->cp15.c14_pmevcntr_delta[counter] -=
env->cp15.c14_pmevcntr[counter];
}
@@ -1387,6 +1463,20 @@ void pmu_post_el_change(ARMCPU *cpu, void *ignored)
pmu_op_finish(&cpu->env);
}
+void arm_pmu_timer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ /*
+ * Update all the counter values based on the current underlying counts,
+ * triggering interrupts to be raised, if necessary. pmu_op_finish() also
+ * has the effect of setting the cpu->pmu_timer to the next earliest time a
+ * counter may expire.
+ */
+ pmu_op_start(&cpu->env);
+ pmu_op_finish(&cpu->env);
+}
+
static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -1423,7 +1513,20 @@ static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* counter is SW_INCR */
(env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
pmevcntr_op_start(env, i);
- env->cp15.c14_pmevcntr[i]++;
+
+ /*
+ * Detect if this write causes an overflow since we can't predict
+ * PMSWINC overflows like we can for other events
+ */
+ uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
+
+ if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
+ env->cp15.c9_pmovsr |= (1 << i);
+ pmu_update_irq(env);
+ }
+
+ env->cp15.c14_pmevcntr[i] = new_pmswinc;
+
pmevcntr_op_finish(env, i);
}
}
@@ -1508,6 +1611,7 @@ static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
value &= pmu_counter_mask(env);
env->cp15.c9_pmovsr &= ~value;
+ pmu_update_irq(env);
}
static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -1515,6 +1619,7 @@ static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
value &= pmu_counter_mask(env);
env->cp15.c9_pmovsr |= value;
+ pmu_update_irq(env);
}
static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -1701,6 +1806,7 @@ static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* We have no event counters so only the C bit can be changed */
value &= pmu_counter_mask(env);
env->cp15.c9_pminten |= value;
+ pmu_update_irq(env);
}
static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -1708,6 +1814,7 @@ static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
value &= pmu_counter_mask(env);
env->cp15.c9_pminten &= ~value;
+ pmu_update_irq(env);
}
static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -1752,6 +1859,9 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
if (cpu_isar_feature(aa64_lor, cpu)) {
valid_mask |= SCR_TLOR;
}
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
+ valid_mask |= SCR_API | SCR_APK;
+ }
/* Clear all-context RES0 bits. */
value &= valid_mask;
@@ -1846,7 +1956,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
.writefn = pmcntenclr_write },
{ .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
- .access = PL0_RW,
+ .access = PL0_RW, .type = ARM_CP_IO,
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
.accessfn = pmreg_access,
.writefn = pmovsr_write,
@@ -1854,16 +1964,18 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
{ .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
.access = PL0_RW, .accessfn = pmreg_access,
- .type = ARM_CP_ALIAS,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
.writefn = pmovsr_write,
.raw_writefn = raw_write },
{ .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
- .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NO_RAW,
+ .access = PL0_W, .accessfn = pmreg_access_swinc,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO,
.writefn = pmswinc_write },
{ .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
- .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NO_RAW,
+ .access = PL0_W, .accessfn = pmreg_access_swinc,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO,
.writefn = pmswinc_write },
{ .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
.access = PL0_RW, .type = ARM_CP_ALIAS,
@@ -2050,14 +2162,14 @@ static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
/* PMOVSSET is not implemented in v7 before v7ve */
{ .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
.access = PL0_RW, .accessfn = pmreg_access,
- .type = ARM_CP_ALIAS,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
.writefn = pmovsset_write,
.raw_writefn = raw_write },
{ .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
.access = PL0_RW, .accessfn = pmreg_access,
- .type = ARM_CP_ALIAS,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
.writefn = pmovsset_write,
.raw_writefn = raw_write },
@@ -4449,6 +4561,9 @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
if (cpu_isar_feature(aa64_lor, cpu)) {
valid_mask |= HCR_TLOR;
}
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
+ valid_mask |= HCR_API | HCR_APK;
+ }
/* Clear RES0 bits. */
value &= valid_mask;
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 4d28a27c3b..a1997e3ae2 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -2036,7 +2036,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
- if (op3 != 2 || op3 != 3) {
+ if ((op3 & ~1) != 2) {
goto do_unallocated;
}
if (s->pauth_active) {
@@ -2144,7 +2144,11 @@ static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
break;
case 0x6a: /* Exception generation / System */
if (insn & (1 << 24)) {
- disas_system(s, insn);
+ if (extract32(insn, 22, 2) == 0) {
+ disas_system(s, insn);
+ } else {
+ unallocated_encoding(s);
+ }
} else {
disas_exc(s, insn);
}
@@ -2799,7 +2803,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
} else {
if (size == 3 && opc == 2) {
/* PRFM - prefetch */
- if (is_unpriv) {
+ if (idx != 0) {
unallocated_encoding(s);
return;
}
@@ -3245,6 +3249,7 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
{
int rt = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
+ int rm = extract32(insn, 16, 5);
int size = extract32(insn, 10, 2);
int opcode = extract32(insn, 12, 4);
bool is_store = !extract32(insn, 22, 1);
@@ -3264,6 +3269,11 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
return;
}
+ if (!is_postidx && rm != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
/* From the shared decode logic */
switch (opcode) {
case 0x0:
@@ -3363,7 +3373,6 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
}
if (is_postidx) {
- int rm = extract32(insn, 16, 5);
if (rm == 31) {
tcg_gen_mov_i64(tcg_rn, tcg_addr);
} else {
@@ -3400,6 +3409,7 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
{
int rt = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
+ int rm = extract32(insn, 16, 5);
int size = extract32(insn, 10, 2);
int S = extract32(insn, 12, 1);
int opc = extract32(insn, 13, 3);
@@ -3415,6 +3425,15 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
int ebytes, xs;
TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
+ if (extract32(insn, 31, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!is_postidx && rm != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
switch (scale) {
case 3:
if (!is_load || S) {
@@ -3492,7 +3511,6 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
}
if (is_postidx) {
- int rm = extract32(insn, 16, 5);
if (rm == 31) {
tcg_gen_mov_i64(tcg_rn, tcg_addr);
} else {
@@ -4183,6 +4201,7 @@ static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
int imm3 = extract32(insn, 10, 3);
int option = extract32(insn, 13, 3);
int rm = extract32(insn, 16, 5);
+ int opt = extract32(insn, 22, 2);
bool setflags = extract32(insn, 29, 1);
bool sub_op = extract32(insn, 30, 1);
bool sf = extract32(insn, 31, 1);
@@ -4191,7 +4210,7 @@ static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
TCGv_i64 tcg_rd;
TCGv_i64 tcg_result;
- if (imm3 > 4) {
+ if (imm3 > 4 || opt != 0) {
unallocated_encoding(s);
return;
}
@@ -5617,11 +5636,17 @@ static void handle_fp_fcvt(DisasContext *s, int opcode,
*/
static void disas_fp_1src(DisasContext *s, uint32_t insn)
{
+ int mos = extract32(insn, 29, 3);
int type = extract32(insn, 22, 2);
int opcode = extract32(insn, 15, 6);
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
+ if (mos) {
+ unallocated_encoding(s);
+ return;
+ }
+
switch (opcode) {
case 0x4: case 0x5: case 0x7:
{
@@ -5848,13 +5873,14 @@ static void handle_fp_2src_half(DisasContext *s, int opcode,
*/
static void disas_fp_2src(DisasContext *s, uint32_t insn)
{
+ int mos = extract32(insn, 29, 3);
int type = extract32(insn, 22, 2);
int rd = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
int rm = extract32(insn, 16, 5);
int opcode = extract32(insn, 12, 4);
- if (opcode > 8) {
+ if (opcode > 8 || mos) {
unallocated_encoding(s);
return;
}
@@ -6009,6 +6035,7 @@ static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
*/
static void disas_fp_3src(DisasContext *s, uint32_t insn)
{
+ int mos = extract32(insn, 29, 3);
int type = extract32(insn, 22, 2);
int rd = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
@@ -6017,6 +6044,11 @@ static void disas_fp_3src(DisasContext *s, uint32_t insn)
bool o0 = extract32(insn, 15, 1);
bool o1 = extract32(insn, 21, 1);
+ if (mos) {
+ unallocated_encoding(s);
+ return;
+ }
+
switch (type) {
case 0:
if (!fp_access_check(s)) {
@@ -6086,12 +6118,19 @@ uint64_t vfp_expand_imm(int size, uint8_t imm8)
static void disas_fp_imm(DisasContext *s, uint32_t insn)
{
int rd = extract32(insn, 0, 5);
+ int imm5 = extract32(insn, 5, 5);
int imm8 = extract32(insn, 13, 8);
int type = extract32(insn, 22, 2);
+ int mos = extract32(insn, 29, 3);
uint64_t imm;
TCGv_i64 tcg_res;
TCGMemOp sz;
+ if (mos || imm5) {
+ unallocated_encoding(s);
+ return;
+ }
+
switch (type) {
case 0:
sz = MO_32;
@@ -12602,7 +12641,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
break;
case 0x0e: /* SDOT */
case 0x1e: /* UDOT */
- if (size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
+ if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
unallocated_encoding(s);
return;
}
@@ -12611,7 +12650,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
case 0x13: /* FCMLA #90 */
case 0x15: /* FCMLA #180 */
case 0x17: /* FCMLA #270 */
- if (!dc_isar_feature(aa64_fcma, s)) {
+ if (is_scalar || !dc_isar_feature(aa64_fcma, s)) {
unallocated_encoding(s);
return;
}
@@ -12641,7 +12680,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
case 2: /* complex fp */
/* Each indexable element is a complex pair. */
- size <<= 1;
+ size += 1;
switch (size) {
case MO_32:
if (h && !is_q) {