aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2014-01-04 22:15:44 +0000
committerPeter Maydell <peter.maydell@linaro.org>2014-01-04 22:15:44 +0000
commitf5a0a5a5abe7b72ad14b8884681a25fcf3e91c16 (patch)
tree8d315c5d94b16cc4bc3ac8f82fbdc41f26e1033f
parent6e6efd612f58726189893fd4d948b7fc10acd872 (diff)
target-arm: Update generic cpreg code for AArch64
Update the generic cpreg support code to also handle AArch64: AArch64-visible registers coexist in the same hash table with AArch32-visible ones, with a bit in the hash key distinguishing them. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
-rw-r--r--target-arm/cpu.h78
-rw-r--r--target-arm/helper.c105
-rw-r--r--target-arm/kvm-consts.h37
3 files changed, 211 insertions, 9 deletions
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 56ed591164..943046407d 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -572,18 +572,43 @@ void armv7m_nvic_complete_irq(void *opaque, int irq);
* or via MRRC/MCRR?)
* We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
* (In this case crn and opc2 should be zero.)
+ * For AArch64, there is no 32/64 bit size distinction;
+ * instead all registers have a 2 bit op0, 3 bit op1 and op2,
+ * and 4 bit CRn and CRm. The encoding patterns are chosen
+ * to be easy to convert to and from the KVM encodings, and also
+ * so that the hashtable can contain both AArch32 and AArch64
+ * registers (to allow for interprocessing where we might run
+ * 32 bit code on a 64 bit core).
*/
+/* This bit is private to our hashtable cpreg; in KVM register
+ * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64
+ * in the upper bits of the 64 bit ID.
+ */
+#define CP_REG_AA64_SHIFT 28
+#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
+
#define ENCODE_CP_REG(cp, is64, crn, crm, opc1, opc2) \
(((cp) << 16) | ((is64) << 15) | ((crn) << 11) | \
((crm) << 7) | ((opc1) << 3) | (opc2))
+#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
+ (CP_REG_AA64_MASK | \
+ ((cp) << CP_REG_ARM_COPROC_SHIFT) | \
+ ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \
+ ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \
+ ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \
+ ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \
+ ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
+
/* Convert a full 64 bit KVM register ID to the truncated 32 bit
* version used as a key for the coprocessor register hashtable
*/
static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
{
uint32_t cpregid = kvmid;
- if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
+ if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
+ cpregid |= CP_REG_AA64_MASK;
+ } else if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
cpregid |= (1 << 15);
}
return cpregid;
@@ -594,11 +619,18 @@ static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
*/
static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
{
- uint64_t kvmid = cpregid & ~(1 << 15);
- if (cpregid & (1 << 15)) {
- kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
+ uint64_t kvmid;
+
+ if (cpregid & CP_REG_AA64_MASK) {
+ kvmid = cpregid & ~CP_REG_AA64_MASK;
+ kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64;
} else {
- kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
+ kvmid = cpregid & ~(1 << 15);
+ if (cpregid & (1 << 15)) {
+ kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
+ } else {
+ kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
+ }
}
return kvmid;
}
@@ -634,6 +666,21 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
/* Mask of only the flag bits in a type field */
#define ARM_CP_FLAG_MASK 0x7f
+/* Valid values for ARMCPRegInfo state field, indicating which of
+ * the AArch32 and AArch64 execution states this register is visible in.
+ * If the reginfo doesn't explicitly specify then it is AArch32 only.
+ * If the reginfo is declared to be visible in both states then a second
+ * reginfo is synthesised for the AArch32 view of the AArch64 register,
+ * such that the AArch32 view is the lower 32 bits of the AArch64 one.
+ * Note that we rely on the values of these enums as we iterate through
+ * the various states in some places.
+ */
+enum {
+ ARM_CP_STATE_AA32 = 0,
+ ARM_CP_STATE_AA64 = 1,
+ ARM_CP_STATE_BOTH = 2,
+};
+
/* Return true if cptype is a valid type field. This is used to try to
* catch errors where the sentinel has been accidentally left off the end
* of a list of registers.
@@ -655,6 +702,8 @@ static inline bool cptype_valid(int cptype)
* (ie anything visible in PL2 is visible in S-PL1, some things are only
* visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
* terminology a little and call this PL3.
+ * In AArch64 things are somewhat simpler as the PLx bits line up exactly
+ * with the ELx exception levels.
*
* If access permissions for a register are more complex than can be
* described with these bits, then use a laxer set of restrictions, and
@@ -676,6 +725,10 @@ static inline bool cptype_valid(int cptype)
static inline int arm_current_pl(CPUARMState *env)
{
+ if (env->aarch64) {
+ return extract32(env->pstate, 2, 2);
+ }
+
if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR) {
return 0;
}
@@ -713,12 +766,22 @@ struct ARMCPRegInfo {
* then behave differently on read/write if necessary.
* For 64 bit registers, only crm and opc1 are relevant; crn and opc2
* must both be zero.
+ * For AArch64-visible registers, opc0 is also used.
+ * Since there are no "coprocessors" in AArch64, cp is purely used as a
+ * way to distinguish (for KVM's benefit) guest-visible system registers
+ * from demuxed ones provided to preserve the "no side effects on
+ * KVM register read/write from QEMU" semantics. cp==0x13 is guest
+ * visible (to match KVM's encoding); cp==0 will be converted to
+ * cp==0x13 when the ARMCPRegInfo is registered, for convenience.
*/
uint8_t cp;
uint8_t crn;
uint8_t crm;
+ uint8_t opc0;
uint8_t opc1;
uint8_t opc2;
+ /* Execution state in which this register is visible: ARM_CP_STATE_* */
+ int state;
/* Register type: ARM_CP_* bits/values */
int type;
/* Access rights: PL*_[RW] */
@@ -798,6 +861,11 @@ int arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
/* CPReadFn that can be used for read-as-zero behaviour */
int arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value);
+/* CPResetFn that does nothing, for use if no reset is required even
+ * if fieldoffset is non zero.
+ */
+void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
+
static inline bool cp_access_ok(CPUARMState *env,
const ARMCPRegInfo *ri, int isread)
{
diff --git a/target-arm/helper.c b/target-arm/helper.c
index d833163a09..227a2ea015 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -1938,7 +1938,8 @@ CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
}
static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
- void *opaque, int crm, int opc1, int opc2)
+ void *opaque, int state,
+ int crm, int opc1, int opc2)
{
/* Private utility function for define_one_arm_cp_reg_with_opaque():
* add a single reginfo struct to the hash table.
@@ -1946,7 +1947,34 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
uint32_t *key = g_new(uint32_t, 1);
ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
- *key = ENCODE_CP_REG(r->cp, is64, r->crn, crm, opc1, opc2);
+ if (r->state == ARM_CP_STATE_BOTH && state == ARM_CP_STATE_AA32) {
+ /* The AArch32 view of a shared register sees the lower 32 bits
+ * of a 64 bit backing field. It is not migratable as the AArch64
+ * view handles that. AArch64 also handles reset.
+ * We assume it is a cp15 register.
+ */
+ r2->cp = 15;
+ r2->type |= ARM_CP_NO_MIGRATE;
+ r2->resetfn = arm_cp_reset_ignore;
+#ifdef HOST_WORDS_BIGENDIAN
+ if (r2->fieldoffset) {
+ r2->fieldoffset += sizeof(uint32_t);
+ }
+#endif
+ }
+ if (state == ARM_CP_STATE_AA64) {
+ /* To allow abbreviation of ARMCPRegInfo
+ * definitions, we treat cp == 0 as equivalent to
+ * the value for "standard guest-visible sysreg".
+ */
+ if (r->cp == 0) {
+ r2->cp = CP_REG_ARM64_SYSREG_CP;
+ }
+ *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
+ r2->opc0, opc1, opc2);
+ } else {
+ *key = ENCODE_CP_REG(r2->cp, is64, r2->crn, crm, opc1, opc2);
+ }
if (opaque) {
r2->opaque = opaque;
}
@@ -2002,8 +2030,19 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
* At least one of the original and the second definition should
* include ARM_CP_OVERRIDE in its type bits -- this is just a guard
* against accidental use.
+ *
+ * The state field defines whether the register is to be
+ * visible in the AArch32 or AArch64 execution state. If the
+ * state is set to ARM_CP_STATE_BOTH then we synthesise a
+ * reginfo structure for the AArch32 view, which sees the lower
+ * 32 bits of the 64 bit register.
+ *
+ * Only registers visible in AArch64 may set r->opc0; opc0 cannot
+ * be wildcarded. AArch64 registers are always considered to be 64
+ * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
+ * the register, if any.
*/
- int crm, opc1, opc2;
+ int crm, opc1, opc2, state;
int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
@@ -2012,6 +2051,52 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
/* 64 bit registers have only CRm and Opc1 fields */
assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
+ /* op0 only exists in the AArch64 encodings */
+ assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
+ /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
+ assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
+ /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
+ * encodes a minimum access level for the register. We roll this
+ * runtime check into our general permission check code, so check
+ * here that the reginfo's specified permissions are strict enough
+ * to encompass the generic architectural permission check.
+ */
+ if (r->state != ARM_CP_STATE_AA32) {
+ int mask = 0;
+ switch (r->opc1) {
+ case 0: case 1: case 2:
+ /* min_EL EL1 */
+ mask = PL1_RW;
+ break;
+ case 3:
+ /* min_EL EL0 */
+ mask = PL0_RW;
+ break;
+ case 4:
+ /* min_EL EL2 */
+ mask = PL2_RW;
+ break;
+ case 5:
+ /* unallocated encoding, so not possible */
+ assert(false);
+ break;
+ case 6:
+ /* min_EL EL3 */
+ mask = PL3_RW;
+ break;
+ case 7:
+ /* min_EL EL1, secure mode only (we don't check the latter) */
+ mask = PL1_RW;
+ break;
+ default:
+ /* broken reginfo with out-of-range opc1 */
+ assert(false);
+ break;
+ }
+ /* assert our permissions are not too lax (stricter is fine) */
+ assert((r->access & ~mask) == 0);
+ }
+
/* Check that the register definition has enough info to handle
* reads and writes if they are permitted.
*/
@@ -2028,7 +2113,14 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
for (crm = crmmin; crm <= crmmax; crm++) {
for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
- add_cpreg_to_hashtable(cpu, r, opaque, crm, opc1, opc2);
+ for (state = ARM_CP_STATE_AA32;
+ state <= ARM_CP_STATE_AA64; state++) {
+ if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
+ continue;
+ }
+ add_cpreg_to_hashtable(cpu, r, opaque, state,
+ crm, opc1, opc2);
+ }
}
}
}
@@ -2063,6 +2155,11 @@ int arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value)
return 0;
}
+void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
+{
+ /* Helper coprocessor reset function for do-nothing-on-reset registers */
+}
+
static int bad_mode_switch(CPUARMState *env, int mode)
{
/* Return true if it is not valid for us to switch to
diff --git a/target-arm/kvm-consts.h b/target-arm/kvm-consts.h
index 2bba0bd198..0e7f889cba 100644
--- a/target-arm/kvm-consts.h
+++ b/target-arm/kvm-consts.h
@@ -29,12 +29,14 @@
#define CP_REG_SIZE_U32 0x0020000000000000ULL
#define CP_REG_SIZE_U64 0x0030000000000000ULL
#define CP_REG_ARM 0x4000000000000000ULL
+#define CP_REG_ARCH_MASK 0xff00000000000000ULL
MISMATCH_CHECK(CP_REG_SIZE_SHIFT, KVM_REG_SIZE_SHIFT)
MISMATCH_CHECK(CP_REG_SIZE_MASK, KVM_REG_SIZE_MASK)
MISMATCH_CHECK(CP_REG_SIZE_U32, KVM_REG_SIZE_U32)
MISMATCH_CHECK(CP_REG_SIZE_U64, KVM_REG_SIZE_U64)
MISMATCH_CHECK(CP_REG_ARM, KVM_REG_ARM)
+MISMATCH_CHECK(CP_REG_ARCH_MASK, KVM_REG_ARCH_MASK)
#define PSCI_FN_BASE 0x95c1ba5e
#define PSCI_FN(n) (PSCI_FN_BASE + (n))
@@ -59,6 +61,41 @@ MISMATCH_CHECK(PSCI_FN_MIGRATE, KVM_PSCI_FN_MIGRATE)
MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A15, KVM_ARM_TARGET_CORTEX_A15)
#endif
+#define CP_REG_ARM64 0x6000000000000000ULL
+#define CP_REG_ARM_COPROC_MASK 0x000000000FFF0000
+#define CP_REG_ARM_COPROC_SHIFT 16
+#define CP_REG_ARM64_SYSREG (0x0013 << CP_REG_ARM_COPROC_SHIFT)
+#define CP_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000
+#define CP_REG_ARM64_SYSREG_OP0_SHIFT 14
+#define CP_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800
+#define CP_REG_ARM64_SYSREG_OP1_SHIFT 11
+#define CP_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780
+#define CP_REG_ARM64_SYSREG_CRN_SHIFT 7
+#define CP_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078
+#define CP_REG_ARM64_SYSREG_CRM_SHIFT 3
+#define CP_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007
+#define CP_REG_ARM64_SYSREG_OP2_SHIFT 0
+
+/* No kernel define but it's useful to QEMU */
+#define CP_REG_ARM64_SYSREG_CP (CP_REG_ARM64_SYSREG >> CP_REG_ARM_COPROC_SHIFT)
+
+#ifdef TARGET_AARCH64
+MISMATCH_CHECK(CP_REG_ARM64, KVM_REG_ARM64)
+MISMATCH_CHECK(CP_REG_ARM_COPROC_MASK, KVM_REG_ARM_COPROC_MASK)
+MISMATCH_CHECK(CP_REG_ARM_COPROC_SHIFT, KVM_REG_ARM_COPROC_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG, KVM_REG_ARM64_SYSREG)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_MASK, KVM_REG_ARM64_SYSREG_OP0_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_SHIFT, KVM_REG_ARM64_SYSREG_OP0_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_MASK, KVM_REG_ARM64_SYSREG_OP1_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_SHIFT, KVM_REG_ARM64_SYSREG_OP1_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_MASK, KVM_REG_ARM64_SYSREG_CRN_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_SHIFT, KVM_REG_ARM64_SYSREG_CRN_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_MASK, KVM_REG_ARM64_SYSREG_CRM_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_SHIFT, KVM_REG_ARM64_SYSREG_CRM_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_MASK, KVM_REG_ARM64_SYSREG_OP2_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_SHIFT, KVM_REG_ARM64_SYSREG_OP2_SHIFT)
+#endif
+
#undef MISMATCH_CHECK
#endif