aboutsummaryrefslogtreecommitdiff
path: root/target-arm
diff options
context:
space:
mode:
Diffstat (limited to 'target-arm')
-rw-r--r--target-arm/cpu.h122
-rw-r--r--target-arm/helper-a64.c45
-rw-r--r--target-arm/helper-a64.h4
-rw-r--r--target-arm/helper.c452
-rw-r--r--target-arm/helper.h40
-rw-r--r--target-arm/kvm-consts.h37
-rw-r--r--target-arm/machine.c12
-rw-r--r--target-arm/neon_helper.c12
-rw-r--r--target-arm/translate-a64.c2790
-rw-r--r--target-arm/translate.c112
-rw-r--r--target-arm/translate.h2
11 files changed, 3377 insertions, 251 deletions
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 56ed591164..f1307eb488 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -66,6 +66,18 @@
/* ARM-specific interrupt pending bits. */
#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
+/* The usual mapping for an AArch64 system register to its AArch32
+ * counterpart is for the 32 bit world to have access to the lower
+ * half only (with writes leaving the upper half untouched). It's
+ * therefore useful to be able to pass TCG the offset of the least
+ * significant half of a uint64_t struct member.
+ */
+#ifdef HOST_WORDS_BIGENDIAN
+#define offsetoflow32(S, M) offsetof(S, M + sizeof(uint32_t))
+#else
+#define offsetoflow32(S, M) offsetof(S, M)
+#endif
+
/* Meanings of the ARMCPU object's two inbound GPIO lines */
#define ARM_CPU_IRQ 0
#define ARM_CPU_FIQ 1
@@ -188,9 +200,9 @@ typedef struct CPUARMState {
uint32_t c12_vbar; /* vector base address register */
uint32_t c13_fcse; /* FCSE PID. */
uint32_t c13_context; /* Context ID. */
- uint32_t c13_tls1; /* User RW Thread register. */
- uint32_t c13_tls2; /* User RO Thread register. */
- uint32_t c13_tls3; /* Privileged Thread register. */
+ uint64_t tpidr_el0; /* User RW Thread register. */
+ uint64_t tpidrro_el0; /* User RO Thread register. */
+ uint64_t tpidr_el1; /* Privileged Thread register. */
uint32_t c14_cntfrq; /* Counter Frequency register */
uint32_t c14_cntkctl; /* Timer Control register */
ARMGenericTimer c14_timer[NUM_GTIMERS];
@@ -266,11 +278,11 @@ typedef struct CPUARMState {
float_status fp_status;
float_status standard_fp_status;
} vfp;
- uint32_t exclusive_addr;
- uint32_t exclusive_val;
- uint32_t exclusive_high;
+ uint64_t exclusive_addr;
+ uint64_t exclusive_val;
+ uint64_t exclusive_high;
#if defined(CONFIG_USER_ONLY)
- uint32_t exclusive_test;
+ uint64_t exclusive_test;
uint32_t exclusive_info;
#endif
@@ -475,6 +487,15 @@ static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val)
vfp_set_fpscr(env, new_fpscr);
}
+enum arm_fprounding {
+ FPROUNDING_TIEEVEN,
+ FPROUNDING_POSINF,
+ FPROUNDING_NEGINF,
+ FPROUNDING_ZERO,
+ FPROUNDING_TIEAWAY,
+ FPROUNDING_ODD
+};
+
enum arm_cpu_mode {
ARM_CPU_MODE_USR = 0x10,
ARM_CPU_MODE_FIQ = 0x11,
@@ -572,18 +593,43 @@ void armv7m_nvic_complete_irq(void *opaque, int irq);
* or via MRRC/MCRR?)
* We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
* (In this case crn and opc2 should be zero.)
+ * For AArch64, there is no 32/64 bit size distinction;
+ * instead all registers have a 2 bit op0, 3 bit op1 and op2,
+ * and 4 bit CRn and CRm. The encoding patterns are chosen
+ * to be easy to convert to and from the KVM encodings, and also
+ * so that the hashtable can contain both AArch32 and AArch64
+ * registers (to allow for interprocessing where we might run
+ * 32 bit code on a 64 bit core).
+ */
+/* This bit is private to our hashtable cpreg; in KVM register
+ * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64
+ * in the upper bits of the 64 bit ID.
*/
+#define CP_REG_AA64_SHIFT 28
+#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
+
#define ENCODE_CP_REG(cp, is64, crn, crm, opc1, opc2) \
(((cp) << 16) | ((is64) << 15) | ((crn) << 11) | \
((crm) << 7) | ((opc1) << 3) | (opc2))
+#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
+ (CP_REG_AA64_MASK | \
+ ((cp) << CP_REG_ARM_COPROC_SHIFT) | \
+ ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \
+ ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \
+ ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \
+ ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \
+ ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
+
/* Convert a full 64 bit KVM register ID to the truncated 32 bit
* version used as a key for the coprocessor register hashtable
*/
static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
{
uint32_t cpregid = kvmid;
- if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
+ if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
+ cpregid |= CP_REG_AA64_MASK;
+ } else if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
cpregid |= (1 << 15);
}
return cpregid;
@@ -594,11 +640,18 @@ static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
*/
static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
{
- uint64_t kvmid = cpregid & ~(1 << 15);
- if (cpregid & (1 << 15)) {
- kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
+ uint64_t kvmid;
+
+ if (cpregid & CP_REG_AA64_MASK) {
+ kvmid = cpregid & ~CP_REG_AA64_MASK;
+ kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64;
} else {
- kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
+ kvmid = cpregid & ~(1 << 15);
+ if (cpregid & (1 << 15)) {
+ kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
+ } else {
+ kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
+ }
}
return kvmid;
}
@@ -628,12 +681,28 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
#define ARM_CP_IO 64
#define ARM_CP_NOP (ARM_CP_SPECIAL | (1 << 8))
#define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8))
-#define ARM_LAST_SPECIAL ARM_CP_WFI
+#define ARM_CP_NZCV (ARM_CP_SPECIAL | (3 << 8))
+#define ARM_LAST_SPECIAL ARM_CP_NZCV
/* Used only as a terminator for ARMCPRegInfo lists */
#define ARM_CP_SENTINEL 0xffff
/* Mask of only the flag bits in a type field */
#define ARM_CP_FLAG_MASK 0x7f
+/* Valid values for ARMCPRegInfo state field, indicating which of
+ * the AArch32 and AArch64 execution states this register is visible in.
+ * If the reginfo doesn't explicitly specify then it is AArch32 only.
+ * If the reginfo is declared to be visible in both states then a second
+ * reginfo is synthesised for the AArch32 view of the AArch64 register,
+ * such that the AArch32 view is the lower 32 bits of the AArch64 one.
+ * Note that we rely on the values of these enums as we iterate through
+ * the various states in some places.
+ */
+enum {
+ ARM_CP_STATE_AA32 = 0,
+ ARM_CP_STATE_AA64 = 1,
+ ARM_CP_STATE_BOTH = 2,
+};
+
/* Return true if cptype is a valid type field. This is used to try to
* catch errors where the sentinel has been accidentally left off the end
* of a list of registers.
@@ -655,6 +724,8 @@ static inline bool cptype_valid(int cptype)
* (ie anything visible in PL2 is visible in S-PL1, some things are only
* visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
* terminology a little and call this PL3.
+ * In AArch64 things are somewhat simpler as the PLx bits line up exactly
+ * with the ELx exception levels.
*
* If access permissions for a register are more complex than can be
* described with these bits, then use a laxer set of restrictions, and
@@ -676,6 +747,10 @@ static inline bool cptype_valid(int cptype)
static inline int arm_current_pl(CPUARMState *env)
{
+ if (env->aarch64) {
+ return extract32(env->pstate, 2, 2);
+ }
+
if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR) {
return 0;
}
@@ -713,12 +788,22 @@ struct ARMCPRegInfo {
* then behave differently on read/write if necessary.
* For 64 bit registers, only crm and opc1 are relevant; crn and opc2
* must both be zero.
+ * For AArch64-visible registers, opc0 is also used.
+ * Since there are no "coprocessors" in AArch64, cp is purely used as a
+ * way to distinguish (for KVM's benefit) guest-visible system registers
+ * from demuxed ones provided to preserve the "no side effects on
+ * KVM register read/write from QEMU" semantics. cp==0x13 is guest
+ * visible (to match KVM's encoding); cp==0 will be converted to
+ * cp==0x13 when the ARMCPRegInfo is registered, for convenience.
*/
uint8_t cp;
uint8_t crn;
uint8_t crm;
+ uint8_t opc0;
uint8_t opc1;
uint8_t opc2;
+ /* Execution state in which this register is visible: ARM_CP_STATE_* */
+ int state;
/* Register type: ARM_CP_* bits/values */
int type;
/* Access rights: PL*_[RW] */
@@ -790,7 +875,7 @@ static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
{
define_one_arm_cp_reg_with_opaque(cpu, regs, 0);
}
-const ARMCPRegInfo *get_arm_cp_reginfo(ARMCPU *cpu, uint32_t encoded_cp);
+const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
/* CPWriteFn that can be used to implement writes-ignored behaviour */
int arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -798,10 +883,15 @@ int arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
/* CPReadFn that can be used for read-as-zero behaviour */
int arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value);
-static inline bool cp_access_ok(CPUARMState *env,
+/* CPResetFn that does nothing, for use if no reset is required even
+ * if fieldoffset is non zero.
+ */
+void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
+
+static inline bool cp_access_ok(int current_pl,
const ARMCPRegInfo *ri, int isread)
{
- return (ri->access >> ((arm_current_pl(env) * 2) + isread)) & 1;
+ return (ri->access >> ((current_pl * 2) + isread)) & 1;
}
/**
diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c
index d3f706754f..4ce0d01a85 100644
--- a/target-arm/helper-a64.c
+++ b/target-arm/helper-a64.c
@@ -77,3 +77,48 @@ uint64_t HELPER(rbit64)(uint64_t x)
return x;
}
+
+/* Convert a softfloat float_relation_ (as returned by
+ * the float*_compare functions) to the correct ARM
+ * NZCV flag state.
+ */
+static inline uint32_t float_rel_to_flags(int res)
+{
+ uint64_t flags;
+ switch (res) {
+ case float_relation_equal:
+ flags = PSTATE_Z | PSTATE_C;
+ break;
+ case float_relation_less:
+ flags = PSTATE_N;
+ break;
+ case float_relation_greater:
+ flags = PSTATE_C;
+ break;
+ case float_relation_unordered:
+ default:
+ flags = PSTATE_C | PSTATE_V;
+ break;
+ }
+ return flags;
+}
+
+uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status)
+{
+ return float_rel_to_flags(float32_compare_quiet(x, y, fp_status));
+}
+
+uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status)
+{
+ return float_rel_to_flags(float32_compare(x, y, fp_status));
+}
+
+uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status)
+{
+ return float_rel_to_flags(float64_compare_quiet(x, y, fp_status));
+}
+
+uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status)
+{
+ return float_rel_to_flags(float64_compare(x, y, fp_status));
+}
diff --git a/target-arm/helper-a64.h b/target-arm/helper-a64.h
index a163a94322..bca19f3dea 100644
--- a/target-arm/helper-a64.h
+++ b/target-arm/helper-a64.h
@@ -22,3 +22,7 @@ DEF_HELPER_FLAGS_1(clz64, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_1(cls64, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_1(cls32, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_FLAGS_1(rbit64, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr)
+DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, ptr)
+DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, ptr)
+DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, ptr)
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 6ebd7dc7bc..c708f15e27 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -142,11 +142,7 @@ static bool read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
} else if (ri->readfn) {
return (ri->readfn(env, ri, v) == 0);
} else {
- if (ri->type & ARM_CP_64BIT) {
- *v = CPREG_FIELD64(env, ri);
- } else {
- *v = CPREG_FIELD32(env, ri);
- }
+ raw_read(env, ri, v);
}
return true;
}
@@ -167,11 +163,7 @@ static bool write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
} else if (ri->writefn) {
return (ri->writefn(env, ri, v) == 0);
} else {
- if (ri->type & ARM_CP_64BIT) {
- CPREG_FIELD64(env, ri) = v;
- } else {
- CPREG_FIELD32(env, ri) = v;
- }
+ raw_write(env, ri, v);
}
return true;
}
@@ -186,7 +178,7 @@ bool write_cpustate_to_list(ARMCPU *cpu)
uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
const ARMCPRegInfo *ri;
uint64_t v;
- ri = get_arm_cp_reginfo(cpu, regidx);
+ ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
if (!ri) {
ok = false;
continue;
@@ -214,7 +206,7 @@ bool write_list_to_cpustate(ARMCPU *cpu)
uint64_t readback;
const ARMCPRegInfo *ri;
- ri = get_arm_cp_reginfo(cpu, regidx);
+ ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
if (!ri) {
ok = false;
continue;
@@ -242,7 +234,7 @@ static void add_cpreg_to_list(gpointer key, gpointer opaque)
const ARMCPRegInfo *ri;
regidx = *(uint32_t *)key;
- ri = get_arm_cp_reginfo(cpu, regidx);
+ ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
if (!(ri->type & ARM_CP_NO_MIGRATE)) {
cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
@@ -258,7 +250,7 @@ static void count_cpreg(gpointer key, gpointer opaque)
const ARMCPRegInfo *ri;
regidx = *(uint32_t *)key;
- ri = get_arm_cp_reginfo(cpu, regidx);
+ ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
if (!(ri->type & ARM_CP_NO_MIGRATE)) {
cpu->cpreg_array_len++;
@@ -397,7 +389,7 @@ static const ARMCPRegInfo cp_reginfo[] = {
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
.resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
{ .name = "CONTEXTIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 1,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_context),
.resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
/* ??? This covers not just the impdef TLB lockdown registers but also
* some v7VMSA registers relating to TEX remap, so it is overly broad.
@@ -740,18 +732,26 @@ static const ARMCPRegInfo t2ee_cp_reginfo[] = {
};
static const ARMCPRegInfo v6k_cp_reginfo[] = {
+ { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
+ .access = PL0_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el0), .resetvalue = 0 },
{ .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL0_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.c13_tls1),
- .resetvalue = 0 },
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.tpidr_el0),
+ .resetfn = arm_cp_reset_ignore },
+ { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
+ .access = PL0_R|PL1_W,
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el0), .resetvalue = 0 },
{ .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
.access = PL0_R|PL1_W,
- .fieldoffset = offsetof(CPUARMState, cp15.c13_tls2),
- .resetvalue = 0 },
- { .name = "TPIDRPRW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 4,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.tpidrro_el0),
+ .resetfn = arm_cp_reset_ignore },
+ { .name = "TPIDR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.c13_tls3),
- .resetvalue = 0 },
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el1), .resetvalue = 0 },
REGINFO_SENTINEL
};
@@ -1560,6 +1560,64 @@ static const ARMCPRegInfo lpae_cp_reginfo[] = {
REGINFO_SENTINEL
};
+static int aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t *value)
+{
+ *value = vfp_get_fpcr(env);
+ return 0;
+}
+
+static int aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ vfp_set_fpcr(env, value);
+ return 0;
+}
+
+static int aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t *value)
+{
+ *value = vfp_get_fpsr(env);
+ return 0;
+}
+
+static int aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ vfp_set_fpsr(env, value);
+ return 0;
+}
+
+static const ARMCPRegInfo v8_cp_reginfo[] = {
+ /* Minimal set of EL0-visible registers. This will need to be expanded
+ * significantly for system emulation of AArch64 CPUs.
+ */
+ { .name = "NZCV", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
+ .access = PL0_RW, .type = ARM_CP_NZCV },
+ { .name = "FPCR", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
+ .access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
+ { .name = "FPSR", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
+ .access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
+ /* This claims a 32 byte cacheline size for icache and dcache, VIPT icache.
+ * It will eventually need to have a CPU-specified reset value.
+ */
+ { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
+ .access = PL0_R, .type = ARM_CP_CONST,
+ .resetvalue = 0x80030003 },
+ /* Prohibit use of DC ZVA. OPTME: implement DC ZVA and allow its use.
+ * For system mode the DZP bit here will need to be computed, not constant.
+ */
+ { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
+ .access = PL0_R, .type = ARM_CP_CONST,
+ .resetvalue = 0x10 },
+ REGINFO_SENTINEL
+};
+
static int sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
env->cp15.c1_sys = value;
@@ -1662,6 +1720,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
} else {
define_arm_cp_regs(cpu, not_v7_cp_reginfo);
}
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ define_arm_cp_regs(cpu, v8_cp_reginfo);
+ }
if (arm_feature(env, ARM_FEATURE_MPU)) {
/* These are the MPU registers prior to PMSAv6. Any new
* PMSA core later than the ARM946 will require that we
@@ -1937,6 +1998,85 @@ CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
return cpu_list;
}
+static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
+ void *opaque, int state,
+ int crm, int opc1, int opc2)
+{
+ /* Private utility function for define_one_arm_cp_reg_with_opaque():
+ * add a single reginfo struct to the hash table.
+ */
+ uint32_t *key = g_new(uint32_t, 1);
+ ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
+ int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
+ if (r->state == ARM_CP_STATE_BOTH && state == ARM_CP_STATE_AA32) {
+ /* The AArch32 view of a shared register sees the lower 32 bits
+ * of a 64 bit backing field. It is not migratable as the AArch64
+ * view handles that. AArch64 also handles reset.
+ * We assume it is a cp15 register.
+ */
+ r2->cp = 15;
+ r2->type |= ARM_CP_NO_MIGRATE;
+ r2->resetfn = arm_cp_reset_ignore;
+#ifdef HOST_WORDS_BIGENDIAN
+ if (r2->fieldoffset) {
+ r2->fieldoffset += sizeof(uint32_t);
+ }
+#endif
+ }
+ if (state == ARM_CP_STATE_AA64) {
+ /* To allow abbreviation of ARMCPRegInfo
+ * definitions, we treat cp == 0 as equivalent to
+ * the value for "standard guest-visible sysreg".
+ */
+ if (r->cp == 0) {
+ r2->cp = CP_REG_ARM64_SYSREG_CP;
+ }
+ *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
+ r2->opc0, opc1, opc2);
+ } else {
+ *key = ENCODE_CP_REG(r2->cp, is64, r2->crn, crm, opc1, opc2);
+ }
+ if (opaque) {
+ r2->opaque = opaque;
+ }
+ /* Make sure reginfo passed to helpers for wildcarded regs
+ * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
+ */
+ r2->crm = crm;
+ r2->opc1 = opc1;
+ r2->opc2 = opc2;
+ /* By convention, for wildcarded registers only the first
+ * entry is used for migration; the others are marked as
+ * NO_MIGRATE so we don't try to transfer the register
+ * multiple times. Special registers (ie NOP/WFI) are
+ * never migratable.
+ */
+ if ((r->type & ARM_CP_SPECIAL) ||
+ ((r->crm == CP_ANY) && crm != 0) ||
+ ((r->opc1 == CP_ANY) && opc1 != 0) ||
+ ((r->opc2 == CP_ANY) && opc2 != 0)) {
+ r2->type |= ARM_CP_NO_MIGRATE;
+ }
+
+ /* Overriding of an existing definition must be explicitly
+ * requested.
+ */
+ if (!(r->type & ARM_CP_OVERRIDE)) {
+ ARMCPRegInfo *oldreg;
+ oldreg = g_hash_table_lookup(cpu->cp_regs, key);
+ if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
+ fprintf(stderr, "Register redefined: cp=%d %d bit "
+ "crn=%d crm=%d opc1=%d opc2=%d, "
+ "was %s, now %s\n", r2->cp, 32 + 32 * is64,
+ r2->crn, r2->crm, r2->opc1, r2->opc2,
+ oldreg->name, r2->name);
+ g_assert_not_reached();
+ }
+ }
+ g_hash_table_insert(cpu->cp_regs, key, r2);
+}
+
+
void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
const ARMCPRegInfo *r, void *opaque)
{
@@ -1951,8 +2091,19 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
* At least one of the original and the second definition should
* include ARM_CP_OVERRIDE in its type bits -- this is just a guard
* against accidental use.
+ *
+ * The state field defines whether the register is to be
+ * visible in the AArch32 or AArch64 execution state. If the
+ * state is set to ARM_CP_STATE_BOTH then we synthesise a
+ * reginfo structure for the AArch32 view, which sees the lower
+ * 32 bits of the 64 bit register.
+ *
+ * Only registers visible in AArch64 may set r->opc0; opc0 cannot
+ * be wildcarded. AArch64 registers are always considered to be 64
+ * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
+ * the register, if any.
*/
- int crm, opc1, opc2;
+ int crm, opc1, opc2, state;
int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
@@ -1961,6 +2112,52 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
/* 64 bit registers have only CRm and Opc1 fields */
assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
+ /* op0 only exists in the AArch64 encodings */
+ assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
+ /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
+ assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
+ /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
+ * encodes a minimum access level for the register. We roll this
+ * runtime check into our general permission check code, so check
+ * here that the reginfo's specified permissions are strict enough
+ * to encompass the generic architectural permission check.
+ */
+ if (r->state != ARM_CP_STATE_AA32) {
+ int mask = 0;
+ switch (r->opc1) {
+ case 0: case 1: case 2:
+ /* min_EL EL1 */
+ mask = PL1_RW;
+ break;
+ case 3:
+ /* min_EL EL0 */
+ mask = PL0_RW;
+ break;
+ case 4:
+ /* min_EL EL2 */
+ mask = PL2_RW;
+ break;
+ case 5:
+ /* unallocated encoding, so not possible */
+ assert(false);
+ break;
+ case 6:
+ /* min_EL EL3 */
+ mask = PL3_RW;
+ break;
+ case 7:
+ /* min_EL EL1, secure mode only (we don't check the latter) */
+ mask = PL1_RW;
+ break;
+ default:
+ /* broken reginfo with out-of-range opc1 */
+ assert(false);
+ break;
+ }
+ /* assert our permissions are not too lax (stricter is fine) */
+ assert((r->access & ~mask) == 0);
+ }
+
/* Check that the register definition has enough info to handle
* reads and writes if they are permitted.
*/
@@ -1977,48 +2174,14 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
for (crm = crmmin; crm <= crmmax; crm++) {
for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
- uint32_t *key = g_new(uint32_t, 1);
- ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
- int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
- *key = ENCODE_CP_REG(r->cp, is64, r->crn, crm, opc1, opc2);
- if (opaque) {
- r2->opaque = opaque;
- }
- /* Make sure reginfo passed to helpers for wildcarded regs
- * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
- */
- r2->crm = crm;
- r2->opc1 = opc1;
- r2->opc2 = opc2;
- /* By convention, for wildcarded registers only the first
- * entry is used for migration; the others are marked as
- * NO_MIGRATE so we don't try to transfer the register
- * multiple times. Special registers (ie NOP/WFI) are
- * never migratable.
- */
- if ((r->type & ARM_CP_SPECIAL) ||
- ((r->crm == CP_ANY) && crm != 0) ||
- ((r->opc1 == CP_ANY) && opc1 != 0) ||
- ((r->opc2 == CP_ANY) && opc2 != 0)) {
- r2->type |= ARM_CP_NO_MIGRATE;
- }
-
- /* Overriding of an existing definition must be explicitly
- * requested.
- */
- if (!(r->type & ARM_CP_OVERRIDE)) {
- ARMCPRegInfo *oldreg;
- oldreg = g_hash_table_lookup(cpu->cp_regs, key);
- if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
- fprintf(stderr, "Register redefined: cp=%d %d bit "
- "crn=%d crm=%d opc1=%d opc2=%d, "
- "was %s, now %s\n", r2->cp, 32 + 32 * is64,
- r2->crn, r2->crm, r2->opc1, r2->opc2,
- oldreg->name, r2->name);
- g_assert_not_reached();
+ for (state = ARM_CP_STATE_AA32;
+ state <= ARM_CP_STATE_AA64; state++) {
+ if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
+ continue;
}
+ add_cpreg_to_hashtable(cpu, r, opaque, state,
+ crm, opc1, opc2);
}
- g_hash_table_insert(cpu->cp_regs, key, r2);
}
}
}
@@ -2034,9 +2197,9 @@ void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
}
}
-const ARMCPRegInfo *get_arm_cp_reginfo(ARMCPU *cpu, uint32_t encoded_cp)
+const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
{
- return g_hash_table_lookup(cpu->cp_regs, &encoded_cp);
+ return g_hash_table_lookup(cpregs, &encoded_cp);
}
int arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -2053,6 +2216,11 @@ int arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value)
return 0;
}
+void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
+{
+ /* Helper coprocessor reset function for do-nothing-on-reset registers */
+}
+
static int bad_mode_switch(CPUARMState *env, int mode)
{
/* Return true if it is not valid for us to switch to
@@ -3639,16 +3807,16 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
if (changed & (3 << 22)) {
i = (val >> 22) & 3;
switch (i) {
- case 0:
+ case FPROUNDING_TIEEVEN:
i = float_round_nearest_even;
break;
- case 1:
+ case FPROUNDING_POSINF:
i = float_round_up;
break;
- case 2:
+ case FPROUNDING_NEGINF:
i = float_round_down;
break;
- case 3:
+ case FPROUNDING_ZERO:
i = float_round_to_zero;
break;
}
@@ -3688,6 +3856,10 @@ VFP_BINOP(add)
VFP_BINOP(sub)
VFP_BINOP(mul)
VFP_BINOP(div)
+VFP_BINOP(min)
+VFP_BINOP(max)
+VFP_BINOP(minnum)
+VFP_BINOP(maxnum)
#undef VFP_BINOP
float32 VFP_HELPER(neg, s)(float32 a)
@@ -3804,37 +3976,77 @@ float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
}
/* VFP3 fixed point conversion. */
-#define VFP_CONV_FIX(name, p, fsz, itype, sign) \
-float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t x, uint32_t shift, \
- void *fpstp) \
+#define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
+float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
+ void *fpstp) \
{ \
float_status *fpst = fpstp; \
float##fsz tmp; \
- tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
+ tmp = itype##_to_##float##fsz(x, fpst); \
return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
-} \
-uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
- void *fpstp) \
+}
+
+/* Notice that we want only input-denormal exception flags from the
+ * scalbn operation: the other possible flags (overflow+inexact if
+ * we overflow to infinity, output-denormal) aren't correct for the
+ * complete scale-and-convert operation.
+ */
+#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
+uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
+ uint32_t shift, \
+ void *fpstp) \
{ \
float_status *fpst = fpstp; \
+ int old_exc_flags = get_float_exception_flags(fpst); \
float##fsz tmp; \
if (float##fsz##_is_any_nan(x)) { \
float_raise(float_flag_invalid, fpst); \
return 0; \
} \
tmp = float##fsz##_scalbn(x, shift, fpst); \
- return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
-}
-
-VFP_CONV_FIX(sh, d, 64, int16, )
-VFP_CONV_FIX(sl, d, 64, int32, )
-VFP_CONV_FIX(uh, d, 64, uint16, u)
-VFP_CONV_FIX(ul, d, 64, uint32, u)
-VFP_CONV_FIX(sh, s, 32, int16, )
-VFP_CONV_FIX(sl, s, 32, int32, )
-VFP_CONV_FIX(uh, s, 32, uint16, u)
-VFP_CONV_FIX(ul, s, 32, uint32, u)
+ old_exc_flags |= get_float_exception_flags(fpst) \
+ & float_flag_input_denormal; \
+ set_float_exception_flags(old_exc_flags, fpst); \
+ return float##fsz##_to_##itype##round(tmp, fpst); \
+}
+
+#define VFP_CONV_FIX(name, p, fsz, isz, itype) \
+VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
+
+#define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
+VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
+
+VFP_CONV_FIX(sh, d, 64, 64, int16)
+VFP_CONV_FIX(sl, d, 64, 64, int32)
+VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
+VFP_CONV_FIX(uh, d, 64, 64, uint16)
+VFP_CONV_FIX(ul, d, 64, 64, uint32)
+VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
+VFP_CONV_FIX(sh, s, 32, 32, int16)
+VFP_CONV_FIX(sl, s, 32, 32, int32)
+VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
+VFP_CONV_FIX(uh, s, 32, 32, uint16)
+VFP_CONV_FIX(ul, s, 32, 32, uint32)
+VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
#undef VFP_CONV_FIX
+#undef VFP_CONV_FIX_FLOAT
+#undef VFP_CONV_FLOAT_FIX_ROUND
+
+/* Set the current fp rounding mode and return the old one.
+ * The argument is a softfloat float_round_ value.
+ */
+uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env)
+{
+ float_status *fp_status = &env->vfp.fp_status;
+
+ uint32_t prev_rmode = get_float_rounding_mode(fp_status);
+ set_float_rounding_mode(rmode, fp_status);
+
+ return prev_rmode;
+}
/* Half precision conversions. */
static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
@@ -3877,6 +4089,26 @@ uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
}
+float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env)
+{
+ int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
+ float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status);
+ if (ieee) {
+ return float64_maybe_silence_nan(r);
+ }
+ return r;
+}
+
+uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env)
+{
+ int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
+ float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status);
+ if (ieee) {
+ r = float16_maybe_silence_nan(r);
+ }
+ return float16_val(r);
+}
+
#define float32_two make_float32(0x40000000)
#define float32_three make_float32(0x40400000)
#define float32_one_point_five make_float32(0x3fc00000)
@@ -4142,27 +4374,47 @@ float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
return float64_muladd(a, b, c, 0, fpst);
}
-/* ARMv8 VMAXNM/VMINNM */
-float32 VFP_HELPER(maxnm, s)(float32 a, float32 b, void *fpstp)
+/* ARMv8 round to integral */
+float32 HELPER(rints_exact)(float32 x, void *fp_status)
{
- float_status *fpst = fpstp;
- return float32_maxnum(a, b, fpst);
+ return float32_round_to_int(x, fp_status);
}
-float64 VFP_HELPER(maxnm, d)(float64 a, float64 b, void *fpstp)
+float64 HELPER(rintd_exact)(float64 x, void *fp_status)
{
- float_status *fpst = fpstp;
- return float64_maxnum(a, b, fpst);
+ return float64_round_to_int(x, fp_status);
}
-float32 VFP_HELPER(minnm, s)(float32 a, float32 b, void *fpstp)
+float32 HELPER(rints)(float32 x, void *fp_status)
{
- float_status *fpst = fpstp;
- return float32_minnum(a, b, fpst);
+ int old_flags = get_float_exception_flags(fp_status), new_flags;
+ float32 ret;
+
+ ret = float32_round_to_int(x, fp_status);
+
+ /* Suppress any inexact exceptions the conversion produced */
+ if (!(old_flags & float_flag_inexact)) {
+ new_flags = get_float_exception_flags(fp_status);
+ set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
+ }
+
+ return ret;
}
-float64 VFP_HELPER(minnm, d)(float64 a, float64 b, void *fpstp)
+float64 HELPER(rintd)(float64 x, void *fp_status)
{
- float_status *fpst = fpstp;
- return float64_minnum(a, b, fpst);
+ int old_flags = get_float_exception_flags(fp_status), new_flags;
+ float64 ret;
+
+ ret = float64_round_to_int(x, fp_status);
+
+ new_flags = get_float_exception_flags(fp_status);
+
+ /* Suppress any inexact exceptions the conversion produced */
+ if (!(old_flags & float_flag_inexact)) {
+ new_flags = get_float_exception_flags(fp_status);
+ set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
+ }
+
+ return ret;
}
diff --git a/target-arm/helper.h b/target-arm/helper.h
index 73d67dcc17..70872dffc6 100644
--- a/target-arm/helper.h
+++ b/target-arm/helper.h
@@ -79,6 +79,14 @@ DEF_HELPER_3(vfp_muls, f32, f32, f32, ptr)
DEF_HELPER_3(vfp_muld, f64, f64, f64, ptr)
DEF_HELPER_3(vfp_divs, f32, f32, f32, ptr)
DEF_HELPER_3(vfp_divd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_maxs, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_maxd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_mins, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_mind, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_maxnums, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_minnums, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_minnumd, f64, f64, f64, ptr)
DEF_HELPER_1(vfp_negs, f32, f32)
DEF_HELPER_1(vfp_negd, f64, f64)
DEF_HELPER_1(vfp_abss, f32, f32)
@@ -107,36 +115,51 @@ DEF_HELPER_2(vfp_tosid, i32, f64, ptr)
DEF_HELPER_2(vfp_tosizs, i32, f32, ptr)
DEF_HELPER_2(vfp_tosizd, i32, f64, ptr)
+DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_touls_round_to_zero, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_toshs, i32, f32, i32, ptr)
DEF_HELPER_3(vfp_tosls, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_tosqs, i64, f32, i32, ptr)
DEF_HELPER_3(vfp_touhs, i32, f32, i32, ptr)
DEF_HELPER_3(vfp_touls, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_touqs, i64, f32, i32, ptr)
DEF_HELPER_3(vfp_toshd, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_tosld, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_tosqd, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_touhd, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_tould, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_touqd, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_shtos, f32, i32, i32, ptr)
DEF_HELPER_3(vfp_sltos, f32, i32, i32, ptr)
+DEF_HELPER_3(vfp_sqtos, f32, i64, i32, ptr)
DEF_HELPER_3(vfp_uhtos, f32, i32, i32, ptr)
DEF_HELPER_3(vfp_ultos, f32, i32, i32, ptr)
+DEF_HELPER_3(vfp_uqtos, f32, i64, i32, ptr)
DEF_HELPER_3(vfp_shtod, f64, i64, i32, ptr)
DEF_HELPER_3(vfp_sltod, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_sqtod, f64, i64, i32, ptr)
DEF_HELPER_3(vfp_uhtod, f64, i64, i32, ptr)
DEF_HELPER_3(vfp_ultod, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_uqtod, f64, i64, i32, ptr)
+
+DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, env)
DEF_HELPER_2(vfp_fcvt_f16_to_f32, f32, i32, env)
DEF_HELPER_2(vfp_fcvt_f32_to_f16, i32, f32, env)
DEF_HELPER_2(neon_fcvt_f16_to_f32, f32, i32, env)
DEF_HELPER_2(neon_fcvt_f32_to_f16, i32, f32, env)
+DEF_HELPER_FLAGS_2(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, i32, env)
+DEF_HELPER_FLAGS_2(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, i32, f64, env)
DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr)
DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr)
-DEF_HELPER_3(vfp_maxnmd, f64, f64, f64, ptr)
-DEF_HELPER_3(vfp_maxnms, f32, f32, f32, ptr)
-DEF_HELPER_3(vfp_minnmd, f64, f64, f64, ptr)
-DEF_HELPER_3(vfp_minnms, f32, f32, f32, ptr)
-
DEF_HELPER_3(recps_f32, f32, f32, f32, env)
DEF_HELPER_3(rsqrts_f32, f32, f32, f32, env)
DEF_HELPER_2(recpe_f32, f32, f32, env)
@@ -150,6 +173,11 @@ DEF_HELPER_3(shr_cc, i32, env, i32, i32)
DEF_HELPER_3(sar_cc, i32, env, i32, i32)
DEF_HELPER_3(ror_cc, i32, env, i32, i32)
+DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, ptr)
+DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, ptr)
+
/* neon_helper.c */
DEF_HELPER_3(neon_qadd_u8, i32, env, i32, i32)
DEF_HELPER_3(neon_qadd_s8, i32, env, i32, i32)
@@ -346,8 +374,6 @@ DEF_HELPER_2(neon_qneg_s8, i32, env, i32)
DEF_HELPER_2(neon_qneg_s16, i32, env, i32)
DEF_HELPER_2(neon_qneg_s32, i32, env, i32)
-DEF_HELPER_3(neon_min_f32, i32, i32, i32, ptr)
-DEF_HELPER_3(neon_max_f32, i32, i32, i32, ptr)
DEF_HELPER_3(neon_abd_f32, i32, i32, i32, ptr)
DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, ptr)
DEF_HELPER_3(neon_cge_f32, i32, i32, i32, ptr)
diff --git a/target-arm/kvm-consts.h b/target-arm/kvm-consts.h
index 2bba0bd198..0e7f889cba 100644
--- a/target-arm/kvm-consts.h
+++ b/target-arm/kvm-consts.h
@@ -29,12 +29,14 @@
#define CP_REG_SIZE_U32 0x0020000000000000ULL
#define CP_REG_SIZE_U64 0x0030000000000000ULL
#define CP_REG_ARM 0x4000000000000000ULL
+#define CP_REG_ARCH_MASK 0xff00000000000000ULL
MISMATCH_CHECK(CP_REG_SIZE_SHIFT, KVM_REG_SIZE_SHIFT)
MISMATCH_CHECK(CP_REG_SIZE_MASK, KVM_REG_SIZE_MASK)
MISMATCH_CHECK(CP_REG_SIZE_U32, KVM_REG_SIZE_U32)
MISMATCH_CHECK(CP_REG_SIZE_U64, KVM_REG_SIZE_U64)
MISMATCH_CHECK(CP_REG_ARM, KVM_REG_ARM)
+MISMATCH_CHECK(CP_REG_ARCH_MASK, KVM_REG_ARCH_MASK)
#define PSCI_FN_BASE 0x95c1ba5e
#define PSCI_FN(n) (PSCI_FN_BASE + (n))
@@ -59,6 +61,41 @@ MISMATCH_CHECK(PSCI_FN_MIGRATE, KVM_PSCI_FN_MIGRATE)
MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A15, KVM_ARM_TARGET_CORTEX_A15)
#endif
+#define CP_REG_ARM64 0x6000000000000000ULL
+#define CP_REG_ARM_COPROC_MASK 0x000000000FFF0000
+#define CP_REG_ARM_COPROC_SHIFT 16
+#define CP_REG_ARM64_SYSREG (0x0013 << CP_REG_ARM_COPROC_SHIFT)
+#define CP_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000
+#define CP_REG_ARM64_SYSREG_OP0_SHIFT 14
+#define CP_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800
+#define CP_REG_ARM64_SYSREG_OP1_SHIFT 11
+#define CP_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780
+#define CP_REG_ARM64_SYSREG_CRN_SHIFT 7
+#define CP_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078
+#define CP_REG_ARM64_SYSREG_CRM_SHIFT 3
+#define CP_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007
+#define CP_REG_ARM64_SYSREG_OP2_SHIFT 0
+
+/* No kernel define but it's useful to QEMU */
+#define CP_REG_ARM64_SYSREG_CP (CP_REG_ARM64_SYSREG >> CP_REG_ARM_COPROC_SHIFT)
+
+#ifdef TARGET_AARCH64
+MISMATCH_CHECK(CP_REG_ARM64, KVM_REG_ARM64)
+MISMATCH_CHECK(CP_REG_ARM_COPROC_MASK, KVM_REG_ARM_COPROC_MASK)
+MISMATCH_CHECK(CP_REG_ARM_COPROC_SHIFT, KVM_REG_ARM_COPROC_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG, KVM_REG_ARM64_SYSREG)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_MASK, KVM_REG_ARM64_SYSREG_OP0_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_SHIFT, KVM_REG_ARM64_SYSREG_OP0_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_MASK, KVM_REG_ARM64_SYSREG_OP1_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_SHIFT, KVM_REG_ARM64_SYSREG_OP1_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_MASK, KVM_REG_ARM64_SYSREG_CRN_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_SHIFT, KVM_REG_ARM64_SYSREG_CRN_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_MASK, KVM_REG_ARM64_SYSREG_CRM_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_SHIFT, KVM_REG_ARM64_SYSREG_CRM_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_MASK, KVM_REG_ARM64_SYSREG_OP2_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_SHIFT, KVM_REG_ARM64_SYSREG_OP2_SHIFT)
+#endif
+
#undef MISMATCH_CHECK
#endif
diff --git a/target-arm/machine.c b/target-arm/machine.c
index 74f010f637..8f9e7d4d28 100644
--- a/target-arm/machine.c
+++ b/target-arm/machine.c
@@ -222,9 +222,9 @@ static int cpu_post_load(void *opaque, int version_id)
const VMStateDescription vmstate_arm_cpu = {
.name = "cpu",
- .version_id = 13,
- .minimum_version_id = 13,
- .minimum_version_id_old = 13,
+ .version_id = 14,
+ .minimum_version_id = 14,
+ .minimum_version_id_old = 14,
.pre_save = cpu_pre_save,
.post_load = cpu_post_load,
.fields = (VMStateField[]) {
@@ -253,9 +253,9 @@ const VMStateDescription vmstate_arm_cpu = {
VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
cpreg_vmstate_array_len,
0, vmstate_info_uint64, uint64_t),
- VMSTATE_UINT32(env.exclusive_addr, ARMCPU),
- VMSTATE_UINT32(env.exclusive_val, ARMCPU),
- VMSTATE_UINT32(env.exclusive_high, ARMCPU),
+ VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
+ VMSTATE_UINT64(env.exclusive_val, ARMCPU),
+ VMSTATE_UINT64(env.exclusive_high, ARMCPU),
VMSTATE_UINT64(env.features, ARMCPU),
VMSTATE_TIMER(gt_timer[GTIMER_PHYS], ARMCPU),
VMSTATE_TIMER(gt_timer[GTIMER_VIRT], ARMCPU),
diff --git a/target-arm/neon_helper.c b/target-arm/neon_helper.c
index b028cc2c93..be6fbd997e 100644
--- a/target-arm/neon_helper.c
+++ b/target-arm/neon_helper.c
@@ -1765,18 +1765,6 @@ uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x)
}
/* NEON Float helpers. */
-uint32_t HELPER(neon_min_f32)(uint32_t a, uint32_t b, void *fpstp)
-{
- float_status *fpst = fpstp;
- return float32_val(float32_min(make_float32(a), make_float32(b), fpst));
-}
-
-uint32_t HELPER(neon_max_f32)(uint32_t a, uint32_t b, void *fpstp)
-{
- float_status *fpst = fpstp;
- return float32_val(float32_max(make_float32(a), make_float32(b), fpst));
-}
-
uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b, void *fpstp)
{
float_status *fpst = fpstp;
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index 0a76130bb2..cf80c46b90 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -38,6 +38,15 @@ static TCGv_i64 cpu_X[32];
static TCGv_i64 cpu_pc;
static TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
+/* Load/store exclusive handling */
+static TCGv_i64 cpu_exclusive_addr;
+static TCGv_i64 cpu_exclusive_val;
+static TCGv_i64 cpu_exclusive_high;
+#ifdef CONFIG_USER_ONLY
+static TCGv_i64 cpu_exclusive_test;
+static TCGv_i32 cpu_exclusive_info;
+#endif
+
static const char *regnames[] = {
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
@@ -70,6 +79,19 @@ void a64_translate_init(void)
cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
+
+ cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
+ cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUARMState, exclusive_val), "exclusive_val");
+ cpu_exclusive_high = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUARMState, exclusive_high), "exclusive_high");
+#ifdef CONFIG_USER_ONLY
+ cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUARMState, exclusive_test), "exclusive_test");
+ cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUARMState, exclusive_info), "exclusive_info");
+#endif
}
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
@@ -97,6 +119,31 @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
psr & PSTATE_C ? 'C' : '-',
psr & PSTATE_V ? 'V' : '-');
cpu_fprintf(f, "\n");
+
+ if (flags & CPU_DUMP_FPU) {
+ int numvfpregs = 32;
+ for (i = 0; i < numvfpregs; i += 2) {
+ uint64_t vlo = float64_val(env->vfp.regs[i * 2]);
+ uint64_t vhi = float64_val(env->vfp.regs[(i * 2) + 1]);
+ cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 " ",
+ i, vhi, vlo);
+ vlo = float64_val(env->vfp.regs[(i + 1) * 2]);
+ vhi = float64_val(env->vfp.regs[((i + 1) * 2) + 1]);
+ cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 "\n",
+ i + 1, vhi, vlo);
+ }
+ cpu_fprintf(f, "FPCR: %08x FPSR: %08x\n",
+ vfp_get_fpcr(env), vfp_get_fpsr(env));
+ }
+}
+
+static int get_mem_index(DisasContext *s)
+{
+#ifdef CONFIG_USER_ONLY
+ return 1;
+#else
+ return s->user;
+#endif
}
void gen_a64_set_pc_im(uint64_t val)
@@ -250,6 +297,91 @@ static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
return v;
}
+static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
+{
+ TCGv_i64 v = new_tmp_a64(s);
+ if (sf) {
+ tcg_gen_mov_i64(v, cpu_X[reg]);
+ } else {
+ tcg_gen_ext32u_i64(v, cpu_X[reg]);
+ }
+ return v;
+}
+
+/* Return the offset into CPUARMState of a slice (from
+ * the least significant end) of FP register Qn (ie
+ * Dn, Sn, Hn or Bn).
+ * (Note that this is not the same mapping as for A32; see cpu.h)
+ */
+static inline int fp_reg_offset(int regno, TCGMemOp size)
+{
+ int offs = offsetof(CPUARMState, vfp.regs[regno * 2]);
+#ifdef HOST_WORDS_BIGENDIAN
+ offs += (8 - (1 << size));
+#endif
+ return offs;
+}
+
+/* Offset of the high half of the 128 bit vector Qn */
+static inline int fp_reg_hi_offset(int regno)
+{
+ return offsetof(CPUARMState, vfp.regs[regno * 2 + 1]);
+}
+
+/* Convenience accessors for reading and writing single and double
+ * FP registers. Writing clears the upper parts of the associated
+ * 128 bit vector register, as required by the architecture.
+ * Note that unlike the GP register accessors, the values returned
+ * by the read functions must be manually freed.
+ */
+static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
+{
+ TCGv_i64 v = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(reg, MO_64));
+ return v;
+}
+
+static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
+{
+ TCGv_i32 v = tcg_temp_new_i32();
+
+ tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(reg, MO_32));
+ return v;
+}
+
+static void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
+{
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+
+ tcg_gen_st_i64(v, cpu_env, fp_reg_offset(reg, MO_64));
+ tcg_gen_st_i64(tcg_zero, cpu_env, fp_reg_hi_offset(reg));
+ tcg_temp_free_i64(tcg_zero);
+}
+
+static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(tmp, v);
+ write_fp_dreg(s, reg, tmp);
+ tcg_temp_free_i64(tmp);
+}
+
+static TCGv_ptr get_fpstatus_ptr(void)
+{
+ TCGv_ptr statusptr = tcg_temp_new_ptr();
+ int offset;
+
+ /* In A64 all instructions (both FP and Neon) use the FPCR;
+ * there is no equivalent of the A32 Neon "standard FPSCR value"
+ * and all operations use vfp.fp_status.
+ */
+ offset = offsetof(CPUARMState, vfp.fp_status);
+ tcg_gen_addi_ptr(statusptr, cpu_env, offset);
+ return statusptr;
+}
+
/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
* than the 32 bit equivalent.
*/
@@ -277,6 +409,318 @@ static inline void gen_logic_CC(int sf, TCGv_i64 result)
tcg_gen_movi_i32(cpu_VF, 0);
}
+/* dest = T0 + T1; compute C, N, V and Z flags */
+static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ if (sf) {
+ TCGv_i64 result, flag, tmp;
+ result = tcg_temp_new_i64();
+ flag = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_movi_i64(tmp, 0);
+ tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
+
+ tcg_gen_trunc_i64_i32(cpu_CF, flag);
+
+ gen_set_NZ64(result);
+
+ tcg_gen_xor_i64(flag, result, t0);
+ tcg_gen_xor_i64(tmp, t0, t1);
+ tcg_gen_andc_i64(flag, flag, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_gen_shri_i64(flag, flag, 32);
+ tcg_gen_trunc_i64_i32(cpu_VF, flag);
+
+ tcg_gen_mov_i64(dest, result);
+ tcg_temp_free_i64(result);
+ tcg_temp_free_i64(flag);
+ } else {
+ /* 32 bit arithmetic */
+ TCGv_i32 t0_32 = tcg_temp_new_i32();
+ TCGv_i32 t1_32 = tcg_temp_new_i32();
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_movi_i32(tmp, 0);
+ tcg_gen_trunc_i64_i32(t0_32, t0);
+ tcg_gen_trunc_i64_i32(t1_32, t1);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
+ tcg_gen_xor_i32(tmp, t0_32, t1_32);
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
+ tcg_gen_extu_i32_i64(dest, cpu_NF);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(t0_32);
+ tcg_temp_free_i32(t1_32);
+ }
+}
+
+/* dest = T0 - T1; compute C, N, V and Z flags */
+static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ if (sf) {
+ /* 64 bit arithmetic */
+ TCGv_i64 result, flag, tmp;
+
+ result = tcg_temp_new_i64();
+ flag = tcg_temp_new_i64();
+ tcg_gen_sub_i64(result, t0, t1);
+
+ gen_set_NZ64(result);
+
+ tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
+ tcg_gen_trunc_i64_i32(cpu_CF, flag);
+
+ tcg_gen_xor_i64(flag, result, t0);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_xor_i64(tmp, t0, t1);
+ tcg_gen_and_i64(flag, flag, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_gen_shri_i64(flag, flag, 32);
+ tcg_gen_trunc_i64_i32(cpu_VF, flag);
+ tcg_gen_mov_i64(dest, result);
+ tcg_temp_free_i64(flag);
+ tcg_temp_free_i64(result);
+ } else {
+ /* 32 bit arithmetic */
+ TCGv_i32 t0_32 = tcg_temp_new_i32();
+ TCGv_i32 t1_32 = tcg_temp_new_i32();
+ TCGv_i32 tmp;
+
+ tcg_gen_trunc_i64_i32(t0_32, t0);
+ tcg_gen_trunc_i64_i32(t1_32, t1);
+ tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, t0_32, t1_32);
+ tcg_temp_free_i32(t0_32);
+ tcg_temp_free_i32(t1_32);
+ tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
+ tcg_temp_free_i32(tmp);
+ tcg_gen_extu_i32_i64(dest, cpu_NF);
+ }
+}
+
+/* dest = T0 + T1 + CF; do not compute flags. */
+static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ TCGv_i64 flag = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(flag, cpu_CF);
+ tcg_gen_add_i64(dest, t0, t1);
+ tcg_gen_add_i64(dest, dest, flag);
+ tcg_temp_free_i64(flag);
+
+ if (!sf) {
+ tcg_gen_ext32u_i64(dest, dest);
+ }
+}
+
+/* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
+static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ if (sf) {
+ TCGv_i64 result, cf_64, vf_64, tmp;
+ result = tcg_temp_new_i64();
+ cf_64 = tcg_temp_new_i64();
+ vf_64 = tcg_temp_new_i64();
+ tmp = tcg_const_i64(0);
+
+ tcg_gen_extu_i32_i64(cf_64, cpu_CF);
+ tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
+ tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
+ tcg_gen_trunc_i64_i32(cpu_CF, cf_64);
+ gen_set_NZ64(result);
+
+ tcg_gen_xor_i64(vf_64, result, t0);
+ tcg_gen_xor_i64(tmp, t0, t1);
+ tcg_gen_andc_i64(vf_64, vf_64, tmp);
+ tcg_gen_shri_i64(vf_64, vf_64, 32);
+ tcg_gen_trunc_i64_i32(cpu_VF, vf_64);
+
+ tcg_gen_mov_i64(dest, result);
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(vf_64);
+ tcg_temp_free_i64(cf_64);
+ tcg_temp_free_i64(result);
+ } else {
+ TCGv_i32 t0_32, t1_32, tmp;
+ t0_32 = tcg_temp_new_i32();
+ t1_32 = tcg_temp_new_i32();
+ tmp = tcg_const_i32(0);
+
+ tcg_gen_trunc_i64_i32(t0_32, t0);
+ tcg_gen_trunc_i64_i32(t1_32, t1);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
+
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
+ tcg_gen_xor_i32(tmp, t0_32, t1_32);
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
+ tcg_gen_extu_i32_i64(dest, cpu_NF);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(t1_32);
+ tcg_temp_free_i32(t0_32);
+ }
+}
+
+/*
+ * Load/Store generators
+ */
+
+/*
+ * Store from GPR register to memory
+ */
+static void do_gpr_st(DisasContext *s, TCGv_i64 source,
+ TCGv_i64 tcg_addr, int size)
+{
+ g_assert(size <= 3);
+ tcg_gen_qemu_st_i64(source, tcg_addr, get_mem_index(s), MO_TE + size);
+}
+
+/*
+ * Load from memory to GPR register
+ */
+static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
+ int size, bool is_signed, bool extend)
+{
+ TCGMemOp memop = MO_TE + size;
+
+ g_assert(size <= 3);
+
+ if (is_signed) {
+ memop += MO_SIGN;
+ }
+
+ tcg_gen_qemu_ld_i64(dest, tcg_addr, get_mem_index(s), memop);
+
+ if (extend && is_signed) {
+ g_assert(size < 3);
+ tcg_gen_ext32u_i64(dest, dest);
+ }
+}
+
+/*
+ * Store from FP register to memory
+ */
+static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
+{
+ /* This writes the bottom N bits of a 128 bit wide vector to memory */
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(srcidx, MO_64));
+ if (size < 4) {
+ tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TE + size);
+ } else {
+ TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
+ tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_st64(tmp, tcg_addr, get_mem_index(s));
+ tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(srcidx));
+ tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
+ tcg_gen_qemu_st_i64(tmp, tcg_hiaddr, get_mem_index(s), MO_TEQ);
+ tcg_temp_free_i64(tcg_hiaddr);
+ }
+
+ tcg_temp_free_i64(tmp);
+}
+
+/*
+ * Load from memory to FP register
+ */
+static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
+{
+ /* This always zero-extends and writes to a full 128 bit wide vector */
+ TCGv_i64 tmplo = tcg_temp_new_i64();
+ TCGv_i64 tmphi;
+
+ if (size < 4) {
+ TCGMemOp memop = MO_TE + size;
+ tmphi = tcg_const_i64(0);
+ tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
+ } else {
+ TCGv_i64 tcg_hiaddr;
+ tmphi = tcg_temp_new_i64();
+ tcg_hiaddr = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), MO_TEQ);
+ tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
+ tcg_gen_qemu_ld_i64(tmphi, tcg_hiaddr, get_mem_index(s), MO_TEQ);
+ tcg_temp_free_i64(tcg_hiaddr);
+ }
+
+ tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(destidx, MO_64));
+ tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(destidx));
+
+ tcg_temp_free_i64(tmplo);
+ tcg_temp_free_i64(tmphi);
+}
+
+/*
+ * This utility function is for doing register extension with an
+ * optional shift. You will likely want to pass a temporary for the
+ * destination register. See DecodeRegExtend() in the ARM ARM.
+ */
+static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
+ int option, unsigned int shift)
+{
+ int extsize = extract32(option, 0, 2);
+ bool is_signed = extract32(option, 2, 1);
+
+ if (is_signed) {
+ switch (extsize) {
+ case 0:
+ tcg_gen_ext8s_i64(tcg_out, tcg_in);
+ break;
+ case 1:
+ tcg_gen_ext16s_i64(tcg_out, tcg_in);
+ break;
+ case 2:
+ tcg_gen_ext32s_i64(tcg_out, tcg_in);
+ break;
+ case 3:
+ tcg_gen_mov_i64(tcg_out, tcg_in);
+ break;
+ }
+ } else {
+ switch (extsize) {
+ case 0:
+ tcg_gen_ext8u_i64(tcg_out, tcg_in);
+ break;
+ case 1:
+ tcg_gen_ext16u_i64(tcg_out, tcg_in);
+ break;
+ case 2:
+ tcg_gen_ext32u_i64(tcg_out, tcg_in);
+ break;
+ case 3:
+ tcg_gen_mov_i64(tcg_out, tcg_in);
+ break;
+ }
+ }
+
+ if (shift) {
+ tcg_gen_shli_i64(tcg_out, tcg_out, shift);
+ }
+}
+
+static inline void gen_check_sp_alignment(DisasContext *s)
+{
+ /* The AArch64 architecture mandates that (if enabled via PSTATE
+ * or SCTLR bits) there is a check that SP is 16-aligned on every
+ * SP-relative load or store (with an exception generated if it is not).
+ * In line with general QEMU practice regarding misaligned accesses,
+ * we omit these checks for the sake of guest program performance.
+ * This function is provided as a hook so we can more easily add these
+ * checks in future (possibly as a "favour catching guest program bugs
+ * over speed" user selectable option).
+ */
+}
+
/*
* the instruction disassembly implemented here matches
* the instruction encoding classifications in chapter 3 (C3)
@@ -418,6 +862,11 @@ static void handle_hint(DisasContext *s, uint32_t insn,
}
}
+static void gen_clrex(DisasContext *s, uint32_t insn)
+{
+ tcg_gen_movi_i64(cpu_exclusive_addr, -1);
+}
+
/* CLREX, DSB, DMB, ISB */
static void handle_sync(DisasContext *s, uint32_t insn,
unsigned int op1, unsigned int op2, unsigned int crm)
@@ -429,7 +878,7 @@ static void handle_sync(DisasContext *s, uint32_t insn,
switch (op2) {
case 2: /* CLREX */
- unsupported_encoding(s, insn);
+ gen_clrex(s, insn);
return;
case 4: /* DSB */
case 5: /* DMB */
@@ -449,28 +898,140 @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
unsupported_encoding(s, insn);
}
-/* C5.6.204 SYS */
-static void handle_sys(DisasContext *s, uint32_t insn, unsigned int l,
- unsigned int op1, unsigned int op2,
- unsigned int crn, unsigned int crm, unsigned int rt)
+static void gen_get_nzcv(TCGv_i64 tcg_rt)
{
- unsupported_encoding(s, insn);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ TCGv_i32 nzcv = tcg_temp_new_i32();
+
+ /* build bit 31, N */
+ tcg_gen_andi_i32(nzcv, cpu_NF, (1 << 31));
+ /* build bit 30, Z */
+ tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
+ tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
+ /* build bit 29, C */
+ tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
+ /* build bit 28, V */
+ tcg_gen_shri_i32(tmp, cpu_VF, 31);
+ tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
+ /* generate result */
+ tcg_gen_extu_i32_i64(tcg_rt, nzcv);
+
+ tcg_temp_free_i32(nzcv);
+ tcg_temp_free_i32(tmp);
}
-/* C5.6.129 MRS - move from system register */
-static void handle_mrs(DisasContext *s, uint32_t insn, unsigned int op0,
- unsigned int op1, unsigned int op2,
- unsigned int crn, unsigned int crm, unsigned int rt)
+static void gen_set_nzcv(TCGv_i64 tcg_rt)
+
{
- unsupported_encoding(s, insn);
+ TCGv_i32 nzcv = tcg_temp_new_i32();
+
+ /* take NZCV from R[t] */
+ tcg_gen_trunc_i64_i32(nzcv, tcg_rt);
+
+ /* bit 31, N */
+ tcg_gen_andi_i32(cpu_NF, nzcv, (1 << 31));
+ /* bit 30, Z */
+ tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
+ /* bit 29, C */
+ tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
+ tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
+ /* bit 28, V */
+ tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
+ tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
+ tcg_temp_free_i32(nzcv);
}
-/* C5.6.131 MSR (register) - move to system register */
-static void handle_msr(DisasContext *s, uint32_t insn, unsigned int op0,
- unsigned int op1, unsigned int op2,
+/* C5.6.129 MRS - move from system register
+ * C5.6.131 MSR (register) - move to system register
+ * C5.6.204 SYS
+ * C5.6.205 SYSL
+ * These are all essentially the same insn in 'read' and 'write'
+ * versions, with varying op0 fields.
+ */
+static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
+ unsigned int op0, unsigned int op1, unsigned int op2,
unsigned int crn, unsigned int crm, unsigned int rt)
{
- unsupported_encoding(s, insn);
+ const ARMCPRegInfo *ri;
+ TCGv_i64 tcg_rt;
+
+ ri = get_arm_cp_reginfo(s->cp_regs,
+ ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
+ crn, crm, op0, op1, op2));
+
+ if (!ri) {
+ /* Unknown register */
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* Check access permissions */
+ if (!cp_access_ok(s->current_pl, ri, isread)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* Handle special cases first */
+ switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
+ case ARM_CP_NOP:
+ return;
+ case ARM_CP_NZCV:
+ tcg_rt = cpu_reg(s, rt);
+ if (isread) {
+ gen_get_nzcv(tcg_rt);
+ } else {
+ gen_set_nzcv(tcg_rt);
+ }
+ return;
+ default:
+ break;
+ }
+
+ if (use_icount && (ri->type & ARM_CP_IO)) {
+ gen_io_start();
+ }
+
+ tcg_rt = cpu_reg(s, rt);
+
+ if (isread) {
+ if (ri->type & ARM_CP_CONST) {
+ tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
+ } else if (ri->readfn) {
+ TCGv_ptr tmpptr;
+ gen_a64_set_pc_im(s->pc - 4);
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
+ }
+ } else {
+ if (ri->type & ARM_CP_CONST) {
+ /* If not forbidden by access permissions, treat as WI */
+ return;
+ } else if (ri->writefn) {
+ TCGv_ptr tmpptr;
+ gen_a64_set_pc_im(s->pc - 4);
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
+ }
+ }
+
+ if (use_icount && (ri->type & ARM_CP_IO)) {
+ /* I/O operations must end the TB here (whether read or write) */
+ gen_io_end();
+ s->is_jmp = DISAS_UPDATE;
+ } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
+ /* We default to ending the TB on a coprocessor register write,
+ * but allow this to be suppressed by the register definition
+ * (usually only necessary to work around guest bugs).
+ */
+ s->is_jmp = DISAS_UPDATE;
+ }
}
/* C3.2.4 System
@@ -511,23 +1072,60 @@ static void disas_system(DisasContext *s, uint32_t insn)
}
return;
}
-
- if (op0 == 1) {
- /* C5.6.204 SYS */
- handle_sys(s, insn, l, op1, op2, crn, crm, rt);
- } else if (l) { /* op0 > 1 */
- /* C5.6.129 MRS - move from system register */
- handle_mrs(s, insn, op0, op1, op2, crn, crm, rt);
- } else {
- /* C5.6.131 MSR (register) - move to system register */
- handle_msr(s, insn, op0, op1, op2, crn, crm, rt);
- }
+ handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
}
-/* Exception generation */
+/* C3.2.3 Exception generation
+ *
+ * 31 24 23 21 20 5 4 2 1 0
+ * +-----------------+-----+------------------------+-----+----+
+ * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
+ * +-----------------------+------------------------+----------+
+ */
static void disas_exc(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ int opc = extract32(insn, 21, 3);
+ int op2_ll = extract32(insn, 0, 5);
+
+ switch (opc) {
+ case 0:
+ /* SVC, HVC, SMC; since we don't support the Virtualization
+ * or TrustZone extensions these all UNDEF except SVC.
+ */
+ if (op2_ll != 1) {
+ unallocated_encoding(s);
+ break;
+ }
+ gen_exception_insn(s, 0, EXCP_SWI);
+ break;
+ case 1:
+ if (op2_ll != 0) {
+ unallocated_encoding(s);
+ break;
+ }
+ /* BRK */
+ gen_exception_insn(s, 0, EXCP_BKPT);
+ break;
+ case 2:
+ if (op2_ll != 0) {
+ unallocated_encoding(s);
+ break;
+ }
+ /* HLT */
+ unsupported_encoding(s, insn);
+ break;
+ case 5:
+ if (op2_ll < 1 || op2_ll > 3) {
+ unallocated_encoding(s);
+ break;
+ }
+ /* DCPS1, DCPS2, DCPS3 */
+ unsupported_encoding(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
}
/* C3.2.7 Unconditional branch (register)
@@ -608,28 +1206,633 @@ static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
}
}
-/* Load/store exclusive */
+/*
+ * Load/Store exclusive instructions are implemented by remembering
+ * the value/address loaded, and seeing if these are the same
+ * when the store is performed. This is not actually the architecturally
+ * mandated semantics, but it works for typical guest code sequences
+ * and avoids having to monitor regular stores.
+ *
+ * In system emulation mode only one CPU will be running at once, so
+ * this sequence is effectively atomic. In user emulation mode we
+ * throw an exception and handle the atomic operation elsewhere.
+ */
+static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
+ TCGv_i64 addr, int size, bool is_pair)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGMemOp memop = MO_TE + size;
+
+ g_assert(size <= 3);
+ tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), memop);
+
+ if (is_pair) {
+ TCGv_i64 addr2 = tcg_temp_new_i64();
+ TCGv_i64 hitmp = tcg_temp_new_i64();
+
+ g_assert(size >= 2);
+ tcg_gen_addi_i64(addr2, addr, 1 << size);
+ tcg_gen_qemu_ld_i64(hitmp, addr2, get_mem_index(s), memop);
+ tcg_temp_free_i64(addr2);
+ tcg_gen_mov_i64(cpu_exclusive_high, hitmp);
+ tcg_gen_mov_i64(cpu_reg(s, rt2), hitmp);
+ tcg_temp_free_i64(hitmp);
+ }
+
+ tcg_gen_mov_i64(cpu_exclusive_val, tmp);
+ tcg_gen_mov_i64(cpu_reg(s, rt), tmp);
+
+ tcg_temp_free_i64(tmp);
+ tcg_gen_mov_i64(cpu_exclusive_addr, addr);
+}
+
+#ifdef CONFIG_USER_ONLY
+static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
+ TCGv_i64 addr, int size, int is_pair)
+{
+ tcg_gen_mov_i64(cpu_exclusive_test, addr);
+ tcg_gen_movi_i32(cpu_exclusive_info,
+ size | is_pair << 2 | (rd << 4) | (rt << 9) | (rt2 << 14));
+ gen_exception_insn(s, 4, EXCP_STREX);
+}
+#else
+static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
+ TCGv_i64 addr, int size, int is_pair)
+{
+ qemu_log_mask(LOG_UNIMP,
+ "%s:%d: system mode store_exclusive unsupported "
+ "at pc=%016" PRIx64 "\n",
+ __FILE__, __LINE__, s->pc - 4);
+}
+#endif
+
+/* C3.3.6 Load/store exclusive
+ *
+ * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
+ * +-----+-------------+----+---+----+------+----+-------+------+------+
+ * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
+ * +-----+-------------+----+---+----+------+----+-------+------+------+
+ *
+ * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
+ * L: 0 -> store, 1 -> load
+ * o2: 0 -> exclusive, 1 -> not
+ * o1: 0 -> single register, 1 -> register pair
+ * o0: 1 -> load-acquire/store-release, 0 -> not
+ *
+ * o0 == 0 AND o2 == 1 is un-allocated
+ * o1 == 1 is un-allocated except for 32 and 64 bit sizes
+ */
static void disas_ldst_excl(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rt2 = extract32(insn, 10, 5);
+ int is_lasr = extract32(insn, 15, 1);
+ int rs = extract32(insn, 16, 5);
+ int is_pair = extract32(insn, 21, 1);
+ int is_store = !extract32(insn, 22, 1);
+ int is_excl = !extract32(insn, 23, 1);
+ int size = extract32(insn, 30, 2);
+ TCGv_i64 tcg_addr;
+
+ if ((!is_excl && !is_lasr) ||
+ (is_pair && size < 2)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
+
+ /* Note that since TCG is single threaded load-acquire/store-release
+ * semantics require no extra if (is_lasr) { ... } handling.
+ */
+
+ if (is_excl) {
+ if (!is_store) {
+ gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair);
+ } else {
+ gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair);
+ }
+ } else {
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ if (is_store) {
+ do_gpr_st(s, tcg_rt, tcg_addr, size);
+ } else {
+ do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false);
+ }
+ if (is_pair) {
+ TCGv_i64 tcg_rt2 = cpu_reg(s, rt);
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
+ if (is_store) {
+ do_gpr_st(s, tcg_rt2, tcg_addr, size);
+ } else {
+ do_gpr_ld(s, tcg_rt2, tcg_addr, size, false, false);
+ }
+ }
+ }
}
-/* Load register (literal) */
+/*
+ * C3.3.5 Load register (literal)
+ *
+ * 31 30 29 27 26 25 24 23 5 4 0
+ * +-----+-------+---+-----+-------------------+-------+
+ * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
+ * +-----+-------+---+-----+-------------------+-------+
+ *
+ * V: 1 -> vector (simd/fp)
+ * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
+ * 10-> 32 bit signed, 11 -> prefetch
+ * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
+ */
static void disas_ld_lit(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ int rt = extract32(insn, 0, 5);
+ int64_t imm = sextract32(insn, 5, 19) << 2;
+ bool is_vector = extract32(insn, 26, 1);
+ int opc = extract32(insn, 30, 2);
+ bool is_signed = false;
+ int size = 2;
+ TCGv_i64 tcg_rt, tcg_addr;
+
+ if (is_vector) {
+ if (opc == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ size = 2 + opc;
+ } else {
+ if (opc == 3) {
+ /* PRFM (literal) : prefetch */
+ return;
+ }
+ size = 2 + extract32(opc, 0, 1);
+ is_signed = extract32(opc, 1, 1);
+ }
+
+ tcg_rt = cpu_reg(s, rt);
+
+ tcg_addr = tcg_const_i64((s->pc - 4) + imm);
+ if (is_vector) {
+ do_fp_ld(s, rt, tcg_addr, size);
+ } else {
+ do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false);
+ }
+ tcg_temp_free_i64(tcg_addr);
}
-/* Load/store pair (all forms) */
+/*
+ * C5.6.80 LDNP (Load Pair - non-temporal hint)
+ * C5.6.81 LDP (Load Pair - non vector)
+ * C5.6.82 LDPSW (Load Pair Signed Word - non vector)
+ * C5.6.176 STNP (Store Pair - non-temporal hint)
+ * C5.6.177 STP (Store Pair - non vector)
+ * C6.3.165 LDNP (Load Pair of SIMD&FP - non-temporal hint)
+ * C6.3.165 LDP (Load Pair of SIMD&FP)
+ * C6.3.284 STNP (Store Pair of SIMD&FP - non-temporal hint)
+ * C6.3.284 STP (Store Pair of SIMD&FP)
+ *
+ * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
+ * +-----+-------+---+---+-------+---+-----------------------------+
+ * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
+ * +-----+-------+---+---+-------+---+-------+-------+------+------+
+ *
+ * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
+ * LDPSW 01
+ * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
+ * V: 0 -> GPR, 1 -> Vector
+ * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
+ * 10 -> signed offset, 11 -> pre-index
+ * L: 0 -> Store 1 -> Load
+ *
+ * Rt, Rt2 = GPR or SIMD registers to be stored
+ * Rn = general purpose register containing address
+ * imm7 = signed offset (multiple of 4 or 8 depending on size)
+ */
static void disas_ldst_pair(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rt2 = extract32(insn, 10, 5);
+ int64_t offset = sextract32(insn, 15, 7);
+ int index = extract32(insn, 23, 2);
+ bool is_vector = extract32(insn, 26, 1);
+ bool is_load = extract32(insn, 22, 1);
+ int opc = extract32(insn, 30, 2);
+
+ bool is_signed = false;
+ bool postindex = false;
+ bool wback = false;
+
+ TCGv_i64 tcg_addr; /* calculated address */
+ int size;
+
+ if (opc == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_vector) {
+ size = 2 + opc;
+ } else {
+ size = 2 + extract32(opc, 1, 1);
+ is_signed = extract32(opc, 0, 1);
+ if (!is_load && is_signed) {
+ unallocated_encoding(s);
+ return;
+ }
+ }
+
+ switch (index) {
+ case 1: /* post-index */
+ postindex = true;
+ wback = true;
+ break;
+ case 0:
+ /* signed offset with "non-temporal" hint. Since we don't emulate
+ * caches we don't care about hints to the cache system about
+ * data access patterns, and handle this identically to plain
+ * signed offset.
+ */
+ if (is_signed) {
+ /* There is no non-temporal-hint version of LDPSW */
+ unallocated_encoding(s);
+ return;
+ }
+ postindex = false;
+ break;
+ case 2: /* signed offset, rn not updated */
+ postindex = false;
+ break;
+ case 3: /* pre-index */
+ postindex = false;
+ wback = true;
+ break;
+ }
+
+ offset <<= size;
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
+
+ if (!postindex) {
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
+ }
+
+ if (is_vector) {
+ if (is_load) {
+ do_fp_ld(s, rt, tcg_addr, size);
+ } else {
+ do_fp_st(s, rt, tcg_addr, size);
+ }
+ } else {
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ if (is_load) {
+ do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false);
+ } else {
+ do_gpr_st(s, tcg_rt, tcg_addr, size);
+ }
+ }
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
+ if (is_vector) {
+ if (is_load) {
+ do_fp_ld(s, rt2, tcg_addr, size);
+ } else {
+ do_fp_st(s, rt2, tcg_addr, size);
+ }
+ } else {
+ TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
+ if (is_load) {
+ do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false);
+ } else {
+ do_gpr_st(s, tcg_rt2, tcg_addr, size);
+ }
+ }
+
+ if (wback) {
+ if (postindex) {
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
+ } else {
+ tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
+ }
+ tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
+ }
+}
+
+/*
+ * C3.3.8 Load/store (immediate post-indexed)
+ * C3.3.9 Load/store (immediate pre-indexed)
+ * C3.3.12 Load/store (unscaled immediate)
+ *
+ * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
+ * +----+-------+---+-----+-----+---+--------+-----+------+------+
+ * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
+ * +----+-------+---+-----+-----+---+--------+-----+------+------+
+ *
+ * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
+ * V = 0 -> non-vector
+ * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
+ * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
+ */
+static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int imm9 = sextract32(insn, 12, 9);
+ int opc = extract32(insn, 22, 2);
+ int size = extract32(insn, 30, 2);
+ int idx = extract32(insn, 10, 2);
+ bool is_signed = false;
+ bool is_store = false;
+ bool is_extended = false;
+ bool is_vector = extract32(insn, 26, 1);
+ bool post_index;
+ bool writeback;
+
+ TCGv_i64 tcg_addr;
+
+ if (is_vector) {
+ size |= (opc & 2) << 1;
+ if (size > 4) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = ((opc & 1) == 0);
+ } else {
+ if (size == 3 && opc == 2) {
+ /* PRFM - prefetch */
+ return;
+ }
+ if (opc == 3 && size > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = (opc == 0);
+ is_signed = opc & (1<<1);
+ is_extended = (size < 3) && (opc & 1);
+ }
+
+ switch (idx) {
+ case 0:
+ post_index = false;
+ writeback = false;
+ break;
+ case 1:
+ post_index = true;
+ writeback = true;
+ break;
+ case 3:
+ post_index = false;
+ writeback = true;
+ break;
+ case 2:
+ g_assert(false);
+ break;
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
+
+ if (!post_index) {
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
+ }
+
+ if (is_vector) {
+ if (is_store) {
+ do_fp_st(s, rt, tcg_addr, size);
+ } else {
+ do_fp_ld(s, rt, tcg_addr, size);
+ }
+ } else {
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ if (is_store) {
+ do_gpr_st(s, tcg_rt, tcg_addr, size);
+ } else {
+ do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
+ }
+ }
+
+ if (writeback) {
+ TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
+ if (post_index) {
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
+ }
+ tcg_gen_mov_i64(tcg_rn, tcg_addr);
+ }
+}
+
+/*
+ * C3.3.10 Load/store (register offset)
+ *
+ * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
+ * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
+ * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
+ * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
+ *
+ * For non-vector:
+ * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
+ * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
+ * For vector:
+ * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
+ * opc<0>: 0 -> store, 1 -> load
+ * V: 1 -> vector/simd
+ * opt: extend encoding (see DecodeRegExtend)
+ * S: if S=1 then scale (essentially index by sizeof(size))
+ * Rt: register to transfer into/out of
+ * Rn: address register or SP for base
+ * Rm: offset register or ZR for offset
+ */
+static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int shift = extract32(insn, 12, 1);
+ int rm = extract32(insn, 16, 5);
+ int opc = extract32(insn, 22, 2);
+ int opt = extract32(insn, 13, 3);
+ int size = extract32(insn, 30, 2);
+ bool is_signed = false;
+ bool is_store = false;
+ bool is_extended = false;
+ bool is_vector = extract32(insn, 26, 1);
+
+ TCGv_i64 tcg_rm;
+ TCGv_i64 tcg_addr;
+
+ if (extract32(opt, 1, 1) == 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_vector) {
+ size |= (opc & 2) << 1;
+ if (size > 4) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = !extract32(opc, 0, 1);
+ } else {
+ if (size == 3 && opc == 2) {
+ /* PRFM - prefetch */
+ return;
+ }
+ if (opc == 3 && size > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = (opc == 0);
+ is_signed = extract32(opc, 1, 1);
+ is_extended = (size < 3) && extract32(opc, 0, 1);
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
+
+ tcg_rm = read_cpu_reg(s, rm, 1);
+ ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
+
+ tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
+
+ if (is_vector) {
+ if (is_store) {
+ do_fp_st(s, rt, tcg_addr, size);
+ } else {
+ do_fp_ld(s, rt, tcg_addr, size);
+ }
+ } else {
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ if (is_store) {
+ do_gpr_st(s, tcg_rt, tcg_addr, size);
+ } else {
+ do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
+ }
+ }
+}
+
+/*
+ * C3.3.13 Load/store (unsigned immediate)
+ *
+ * 31 30 29 27 26 25 24 23 22 21 10 9 5
+ * +----+-------+---+-----+-----+------------+-------+------+
+ * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
+ * +----+-------+---+-----+-----+------------+-------+------+
+ *
+ * For non-vector:
+ * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
+ * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
+ * For vector:
+ * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
+ * opc<0>: 0 -> store, 1 -> load
+ * Rn: base address register (inc SP)
+ * Rt: target register
+ */
+static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ unsigned int imm12 = extract32(insn, 10, 12);
+ bool is_vector = extract32(insn, 26, 1);
+ int size = extract32(insn, 30, 2);
+ int opc = extract32(insn, 22, 2);
+ unsigned int offset;
+
+ TCGv_i64 tcg_addr;
+
+ bool is_store;
+ bool is_signed = false;
+ bool is_extended = false;
+
+ if (is_vector) {
+ size |= (opc & 2) << 1;
+ if (size > 4) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = !extract32(opc, 0, 1);
+ } else {
+ if (size == 3 && opc == 2) {
+ /* PRFM - prefetch */
+ return;
+ }
+ if (opc == 3 && size > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = (opc == 0);
+ is_signed = extract32(opc, 1, 1);
+ is_extended = (size < 3) && extract32(opc, 0, 1);
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
+ offset = imm12 << size;
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
+
+ if (is_vector) {
+ if (is_store) {
+ do_fp_st(s, rt, tcg_addr, size);
+ } else {
+ do_fp_ld(s, rt, tcg_addr, size);
+ }
+ } else {
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ if (is_store) {
+ do_gpr_st(s, tcg_rt, tcg_addr, size);
+ } else {
+ do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
+ }
+ }
+}
+
+/* Load/store register (immediate forms) */
+static void disas_ldst_reg_imm(DisasContext *s, uint32_t insn)
+{
+ switch (extract32(insn, 10, 2)) {
+ case 0: case 1: case 3:
+ /* Load/store register (unscaled immediate) */
+ /* Load/store immediate pre/post-indexed */
+ disas_ldst_reg_imm9(s, insn);
+ break;
+ case 2:
+ /* Load/store register unprivileged */
+ unsupported_encoding(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
}
/* Load/store register (all forms) */
static void disas_ldst_reg(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ switch (extract32(insn, 24, 2)) {
+ case 0:
+ if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
+ disas_ldst_reg_roffset(s, insn);
+ } else {
+ disas_ldst_reg_imm(s, insn);
+ }
+ break;
+ case 1:
+ disas_ldst_reg_unsigned_imm(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
}
/* AdvSIMD load/store multiple structures */
@@ -701,10 +1904,68 @@ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
}
-/* Add/subtract (immediate) */
+/*
+ * C3.4.1 Add/subtract (immediate)
+ *
+ * 31 30 29 28 24 23 22 21 10 9 5 4 0
+ * +--+--+--+-----------+-----+-------------+-----+-----+
+ * |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd |
+ * +--+--+--+-----------+-----+-------------+-----+-----+
+ *
+ * sf: 0 -> 32bit, 1 -> 64bit
+ * op: 0 -> add , 1 -> sub
+ * S: 1 -> set flags
+ * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
+ */
static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ uint64_t imm = extract32(insn, 10, 12);
+ int shift = extract32(insn, 22, 2);
+ bool setflags = extract32(insn, 29, 1);
+ bool sub_op = extract32(insn, 30, 1);
+ bool is_64bit = extract32(insn, 31, 1);
+
+ TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
+ TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
+ TCGv_i64 tcg_result;
+
+ switch (shift) {
+ case 0x0:
+ break;
+ case 0x1:
+ imm <<= 12;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_result = tcg_temp_new_i64();
+ if (!setflags) {
+ if (sub_op) {
+ tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
+ } else {
+ tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
+ }
+ } else {
+ TCGv_i64 tcg_imm = tcg_const_i64(imm);
+ if (sub_op) {
+ gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
+ } else {
+ gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
+ }
+ tcg_temp_free_i64(tcg_imm);
+ }
+
+ if (is_64bit) {
+ tcg_gen_mov_i64(tcg_rd, tcg_result);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_result);
+ }
+
+ tcg_temp_free_i64(tcg_result);
}
/* The input should be a value in the bottom e bits (with higher
@@ -863,10 +2124,57 @@ static void disas_logic_imm(DisasContext *s, uint32_t insn)
}
}
-/* Move wide (immediate) */
+/*
+ * C3.4.5 Move wide (immediate)
+ *
+ * 31 30 29 28 23 22 21 20 5 4 0
+ * +--+-----+-------------+-----+----------------+------+
+ * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
+ * +--+-----+-------------+-----+----------------+------+
+ *
+ * sf: 0 -> 32 bit, 1 -> 64 bit
+ * opc: 00 -> N, 10 -> Z, 11 -> K
+ * hw: shift/16 (0,16, and sf only 32, 48)
+ */
static void disas_movw_imm(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ int rd = extract32(insn, 0, 5);
+ uint64_t imm = extract32(insn, 5, 16);
+ int sf = extract32(insn, 31, 1);
+ int opc = extract32(insn, 29, 2);
+ int pos = extract32(insn, 21, 2) << 4;
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_imm;
+
+ if (!sf && (pos >= 32)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opc) {
+ case 0: /* MOVN */
+ case 2: /* MOVZ */
+ imm <<= pos;
+ if (opc == 0) {
+ imm = ~imm;
+ }
+ if (!sf) {
+ imm &= 0xffffffffu;
+ }
+ tcg_gen_movi_i64(tcg_rd, imm);
+ break;
+ case 3: /* MOVK */
+ tcg_imm = tcg_const_i64(imm);
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
+ tcg_temp_free_i64(tcg_imm);
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
}
/* C3.4.2 Bitfield
@@ -1162,40 +2470,346 @@ static void disas_logic_reg(DisasContext *s, uint32_t insn)
}
}
-/* Add/subtract (extended register) */
+/*
+ * C3.5.1 Add/subtract (extended register)
+ *
+ * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
+ * +--+--+--+-----------+-----+--+-------+------+------+----+----+
+ * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
+ * +--+--+--+-----------+-----+--+-------+------+------+----+----+
+ *
+ * sf: 0 -> 32bit, 1 -> 64bit
+ * op: 0 -> add , 1 -> sub
+ * S: 1 -> set flags
+ * opt: 00
+ * option: extension type (see DecodeRegExtend)
+ * imm3: optional shift to Rm
+ *
+ * Rd = Rn + LSL(extend(Rm), amount)
+ */
static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int imm3 = extract32(insn, 10, 3);
+ int option = extract32(insn, 13, 3);
+ int rm = extract32(insn, 16, 5);
+ bool setflags = extract32(insn, 29, 1);
+ bool sub_op = extract32(insn, 30, 1);
+ bool sf = extract32(insn, 31, 1);
+
+ TCGv_i64 tcg_rm, tcg_rn; /* temps */
+ TCGv_i64 tcg_rd;
+ TCGv_i64 tcg_result;
+
+ if (imm3 > 4) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* non-flag setting ops may use SP */
+ if (!setflags) {
+ tcg_rn = read_cpu_reg_sp(s, rn, sf);
+ tcg_rd = cpu_reg_sp(s, rd);
+ } else {
+ tcg_rn = read_cpu_reg(s, rn, sf);
+ tcg_rd = cpu_reg(s, rd);
+ }
+
+ tcg_rm = read_cpu_reg(s, rm, sf);
+ ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
+
+ tcg_result = tcg_temp_new_i64();
+
+ if (!setflags) {
+ if (sub_op) {
+ tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
+ } else {
+ tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
+ }
+ } else {
+ if (sub_op) {
+ gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
+ } else {
+ gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
+ }
+ }
+
+ if (sf) {
+ tcg_gen_mov_i64(tcg_rd, tcg_result);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_result);
+ }
+
+ tcg_temp_free_i64(tcg_result);
}
-/* Add/subtract (shifted register) */
+/*
+ * C3.5.2 Add/subtract (shifted register)
+ *
+ * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
+ * +--+--+--+-----------+-----+--+-------+---------+------+------+
+ * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
+ * +--+--+--+-----------+-----+--+-------+---------+------+------+
+ *
+ * sf: 0 -> 32bit, 1 -> 64bit
+ * op: 0 -> add , 1 -> sub
+ * S: 1 -> set flags
+ * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
+ * imm6: Shift amount to apply to Rm before the add/sub
+ */
static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int imm6 = extract32(insn, 10, 6);
+ int rm = extract32(insn, 16, 5);
+ int shift_type = extract32(insn, 22, 2);
+ bool setflags = extract32(insn, 29, 1);
+ bool sub_op = extract32(insn, 30, 1);
+ bool sf = extract32(insn, 31, 1);
+
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_rn, tcg_rm;
+ TCGv_i64 tcg_result;
+
+ if ((shift_type == 3) || (!sf && (imm6 > 31))) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_rn = read_cpu_reg(s, rn, sf);
+ tcg_rm = read_cpu_reg(s, rm, sf);
+
+ shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
+
+ tcg_result = tcg_temp_new_i64();
+
+ if (!setflags) {
+ if (sub_op) {
+ tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
+ } else {
+ tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
+ }
+ } else {
+ if (sub_op) {
+ gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
+ } else {
+ gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
+ }
+ }
+
+ if (sf) {
+ tcg_gen_mov_i64(tcg_rd, tcg_result);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_result);
+ }
+
+ tcg_temp_free_i64(tcg_result);
}
-/* Data-processing (3 source) */
+/* C3.5.9 Data-processing (3 source)
+
+ 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
+ +--+------+-----------+------+------+----+------+------+------+
+ |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
+ +--+------+-----------+------+------+----+------+------+------+
+
+ */
static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int ra = extract32(insn, 10, 5);
+ int rm = extract32(insn, 16, 5);
+ int op_id = (extract32(insn, 29, 3) << 4) |
+ (extract32(insn, 21, 3) << 1) |
+ extract32(insn, 15, 1);
+ bool sf = extract32(insn, 31, 1);
+ bool is_sub = extract32(op_id, 0, 1);
+ bool is_high = extract32(op_id, 2, 1);
+ bool is_signed = false;
+ TCGv_i64 tcg_op1;
+ TCGv_i64 tcg_op2;
+ TCGv_i64 tcg_tmp;
+
+ /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
+ switch (op_id) {
+ case 0x42: /* SMADDL */
+ case 0x43: /* SMSUBL */
+ case 0x44: /* SMULH */
+ is_signed = true;
+ break;
+ case 0x0: /* MADD (32bit) */
+ case 0x1: /* MSUB (32bit) */
+ case 0x40: /* MADD (64bit) */
+ case 0x41: /* MSUB (64bit) */
+ case 0x4a: /* UMADDL */
+ case 0x4b: /* UMSUBL */
+ case 0x4c: /* UMULH */
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_high) {
+ TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_rn = cpu_reg(s, rn);
+ TCGv_i64 tcg_rm = cpu_reg(s, rm);
+
+ if (is_signed) {
+ tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
+ } else {
+ tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
+ }
+
+ tcg_temp_free_i64(low_bits);
+ return;
+ }
+
+ tcg_op1 = tcg_temp_new_i64();
+ tcg_op2 = tcg_temp_new_i64();
+ tcg_tmp = tcg_temp_new_i64();
+
+ if (op_id < 0x42) {
+ tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
+ tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
+ } else {
+ if (is_signed) {
+ tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
+ tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
+ } else {
+ tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
+ tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
+ }
+ }
+
+ if (ra == 31 && !is_sub) {
+ /* Special-case MADD with rA == XZR; it is the standard MUL alias */
+ tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
+ } else {
+ tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
+ if (is_sub) {
+ tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
+ } else {
+ tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
+ }
+ }
+
+ if (!sf) {
+ tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
+ }
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_tmp);
}
-/* Add/subtract (with carry) */
+/* C3.5.3 - Add/subtract (with carry)
+ * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
+ * +--+--+--+------------------------+------+---------+------+-----+
+ * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd |
+ * +--+--+--+------------------------+------+---------+------+-----+
+ * [000000]
+ */
+
static void disas_adc_sbc(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
-}
+ unsigned int sf, op, setflags, rm, rn, rd;
+ TCGv_i64 tcg_y, tcg_rn, tcg_rd;
-/* Conditional compare (immediate) */
-static void disas_cc_imm(DisasContext *s, uint32_t insn)
-{
- unsupported_encoding(s, insn);
+ if (extract32(insn, 10, 6) != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ sf = extract32(insn, 31, 1);
+ op = extract32(insn, 30, 1);
+ setflags = extract32(insn, 29, 1);
+ rm = extract32(insn, 16, 5);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ tcg_rd = cpu_reg(s, rd);
+ tcg_rn = cpu_reg(s, rn);
+
+ if (op) {
+ tcg_y = new_tmp_a64(s);
+ tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
+ } else {
+ tcg_y = cpu_reg(s, rm);
+ }
+
+ if (setflags) {
+ gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
+ } else {
+ gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
+ }
}
-/* Conditional compare (register) */
-static void disas_cc_reg(DisasContext *s, uint32_t insn)
+/* C3.5.4 - C3.5.5 Conditional compare (immediate / register)
+ * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
+ * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
+ * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
+ * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
+ * [1] y [0] [0]
+ */
+static void disas_cc(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ unsigned int sf, op, y, cond, rn, nzcv, is_imm;
+ int label_continue = -1;
+ TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
+
+ if (!extract32(insn, 29, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (insn & (1 << 10 | 1 << 4)) {
+ unallocated_encoding(s);
+ return;
+ }
+ sf = extract32(insn, 31, 1);
+ op = extract32(insn, 30, 1);
+ is_imm = extract32(insn, 11, 1);
+ y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
+ cond = extract32(insn, 12, 4);
+ rn = extract32(insn, 5, 5);
+ nzcv = extract32(insn, 0, 4);
+
+ if (cond < 0x0e) { /* not always */
+ int label_match = gen_new_label();
+ label_continue = gen_new_label();
+ arm_gen_test_cc(cond, label_match);
+ /* nomatch: */
+ tcg_tmp = tcg_temp_new_i64();
+ tcg_gen_movi_i64(tcg_tmp, nzcv << 28);
+ gen_set_nzcv(tcg_tmp);
+ tcg_temp_free_i64(tcg_tmp);
+ tcg_gen_br(label_continue);
+ gen_set_label(label_match);
+ }
+ /* match, or condition is always */
+ if (is_imm) {
+ tcg_y = new_tmp_a64(s);
+ tcg_gen_movi_i64(tcg_y, y);
+ } else {
+ tcg_y = cpu_reg(s, y);
+ }
+ tcg_rn = cpu_reg(s, rn);
+
+ tcg_tmp = tcg_temp_new_i64();
+ if (op) {
+ gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
+ } else {
+ gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
+ }
+ tcg_temp_free_i64(tcg_tmp);
+
+ if (cond < 0x0e) { /* continue */
+ gen_set_label(label_continue);
+ }
}
/* C3.5.6 Conditional select
@@ -1549,11 +3163,7 @@ static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
disas_adc_sbc(s, insn);
break;
case 0x2: /* Conditional compare */
- if (insn & (1 << 11)) { /* (immediate) */
- disas_cc_imm(s, insn);
- } else { /* (register) */
- disas_cc_reg(s, insn);
- }
+ disas_cc(s, insn); /* both imm and reg forms */
break;
case 0x4: /* Conditional select */
disas_cond_select(s, insn);
@@ -1576,10 +3186,1062 @@ static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
}
}
+/* Convert ARM rounding mode to softfloat */
+static inline int arm_rmode_to_sf(int rmode)
+{
+ switch (rmode) {
+ case FPROUNDING_TIEAWAY:
+ rmode = float_round_ties_away;
+ break;
+ case FPROUNDING_ODD:
+ /* FIXME: add support for TIEAWAY and ODD */
+ qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
+ rmode);
+ case FPROUNDING_TIEEVEN:
+ default:
+ rmode = float_round_nearest_even;
+ break;
+ case FPROUNDING_POSINF:
+ rmode = float_round_up;
+ break;
+ case FPROUNDING_NEGINF:
+ rmode = float_round_down;
+ break;
+ case FPROUNDING_ZERO:
+ rmode = float_round_to_zero;
+ break;
+ }
+ return rmode;
+}
+
+static void handle_fp_compare(DisasContext *s, bool is_double,
+ unsigned int rn, unsigned int rm,
+ bool cmp_with_zero, bool signal_all_nans)
+{
+ TCGv_i64 tcg_flags = tcg_temp_new_i64();
+ TCGv_ptr fpst = get_fpstatus_ptr();
+
+ if (is_double) {
+ TCGv_i64 tcg_vn, tcg_vm;
+
+ tcg_vn = read_fp_dreg(s, rn);
+ if (cmp_with_zero) {
+ tcg_vm = tcg_const_i64(0);
+ } else {
+ tcg_vm = read_fp_dreg(s, rm);
+ }
+ if (signal_all_nans) {
+ gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ } else {
+ gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ }
+ tcg_temp_free_i64(tcg_vn);
+ tcg_temp_free_i64(tcg_vm);
+ } else {
+ TCGv_i32 tcg_vn, tcg_vm;
+
+ tcg_vn = read_fp_sreg(s, rn);
+ if (cmp_with_zero) {
+ tcg_vm = tcg_const_i32(0);
+ } else {
+ tcg_vm = read_fp_sreg(s, rm);
+ }
+ if (signal_all_nans) {
+ gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ } else {
+ gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ }
+ tcg_temp_free_i32(tcg_vn);
+ tcg_temp_free_i32(tcg_vm);
+ }
+
+ tcg_temp_free_ptr(fpst);
+
+ gen_set_nzcv(tcg_flags);
+
+ tcg_temp_free_i64(tcg_flags);
+}
+
+/* C3.6.22 Floating point compare
+ * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
+ * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
+ */
+static void disas_fp_compare(DisasContext *s, uint32_t insn)
+{
+ unsigned int mos, type, rm, op, rn, opc, op2r;
+
+ mos = extract32(insn, 29, 3);
+ type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
+ rm = extract32(insn, 16, 5);
+ op = extract32(insn, 14, 2);
+ rn = extract32(insn, 5, 5);
+ opc = extract32(insn, 3, 2);
+ op2r = extract32(insn, 0, 3);
+
+ if (mos || op || op2r || type > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ handle_fp_compare(s, type, rn, rm, opc & 1, opc & 2);
+}
+
+/* C3.6.23 Floating point conditional compare
+ * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
+ * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
+ * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
+ */
+static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
+{
+ unsigned int mos, type, rm, cond, rn, op, nzcv;
+ TCGv_i64 tcg_flags;
+ int label_continue = -1;
+
+ mos = extract32(insn, 29, 3);
+ type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
+ rm = extract32(insn, 16, 5);
+ cond = extract32(insn, 12, 4);
+ rn = extract32(insn, 5, 5);
+ op = extract32(insn, 4, 1);
+ nzcv = extract32(insn, 0, 4);
+
+ if (mos || type > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (cond < 0x0e) { /* not always */
+ int label_match = gen_new_label();
+ label_continue = gen_new_label();
+ arm_gen_test_cc(cond, label_match);
+ /* nomatch: */
+ tcg_flags = tcg_const_i64(nzcv << 28);
+ gen_set_nzcv(tcg_flags);
+ tcg_temp_free_i64(tcg_flags);
+ tcg_gen_br(label_continue);
+ gen_set_label(label_match);
+ }
+
+ handle_fp_compare(s, type, rn, rm, false, op);
+
+ if (cond < 0x0e) {
+ gen_set_label(label_continue);
+ }
+}
+
+/* copy src FP register to dst FP register; type specifies single or double */
+static void gen_mov_fp2fp(DisasContext *s, int type, int dst, int src)
+{
+ if (type) {
+ TCGv_i64 v = read_fp_dreg(s, src);
+ write_fp_dreg(s, dst, v);
+ tcg_temp_free_i64(v);
+ } else {
+ TCGv_i32 v = read_fp_sreg(s, src);
+ write_fp_sreg(s, dst, v);
+ tcg_temp_free_i32(v);
+ }
+}
+
+/* C3.6.24 Floating point conditional select
+ * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------+------+-----+------+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
+ * +---+---+---+-----------+------+---+------+------+-----+------+------+
+ */
+static void disas_fp_csel(DisasContext *s, uint32_t insn)
+{
+ unsigned int mos, type, rm, cond, rn, rd;
+ int label_continue = -1;
+
+ mos = extract32(insn, 29, 3);
+ type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
+ rm = extract32(insn, 16, 5);
+ cond = extract32(insn, 12, 4);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ if (mos || type > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (cond < 0x0e) { /* not always */
+ int label_match = gen_new_label();
+ label_continue = gen_new_label();
+ arm_gen_test_cc(cond, label_match);
+ /* nomatch: */
+ gen_mov_fp2fp(s, type, rd, rm);
+ tcg_gen_br(label_continue);
+ gen_set_label(label_match);
+ }
+
+ gen_mov_fp2fp(s, type, rd, rn);
+
+ if (cond < 0x0e) { /* continue */
+ gen_set_label(label_continue);
+ }
+}
+
+/* C3.6.25 Floating-point data-processing (1 source) - single precision */
+static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tcg_op;
+ TCGv_i32 tcg_res;
+
+ fpst = get_fpstatus_ptr();
+ tcg_op = read_fp_sreg(s, rn);
+ tcg_res = tcg_temp_new_i32();
+
+ switch (opcode) {
+ case 0x0: /* FMOV */
+ tcg_gen_mov_i32(tcg_res, tcg_op);
+ break;
+ case 0x1: /* FABS */
+ gen_helper_vfp_abss(tcg_res, tcg_op);
+ break;
+ case 0x2: /* FNEG */
+ gen_helper_vfp_negs(tcg_res, tcg_op);
+ break;
+ case 0x3: /* FSQRT */
+ gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
+ break;
+ case 0x8: /* FRINTN */
+ case 0x9: /* FRINTP */
+ case 0xa: /* FRINTM */
+ case 0xb: /* FRINTZ */
+ case 0xc: /* FRINTA */
+ {
+ TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ gen_helper_rints(tcg_res, tcg_op, fpst);
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+ break;
+ }
+ case 0xe: /* FRINTX */
+ gen_helper_rints_exact(tcg_res, tcg_op, fpst);
+ break;
+ case 0xf: /* FRINTI */
+ gen_helper_rints(tcg_res, tcg_op, fpst);
+ break;
+ default:
+ abort();
+ }
+
+ write_fp_sreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tcg_op);
+ tcg_temp_free_i32(tcg_res);
+}
+
+/* C3.6.25 Floating-point data-processing (1 source) - double precision */
+static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
+{
+ TCGv_ptr fpst;
+ TCGv_i64 tcg_op;
+ TCGv_i64 tcg_res;
+
+ fpst = get_fpstatus_ptr();
+ tcg_op = read_fp_dreg(s, rn);
+ tcg_res = tcg_temp_new_i64();
+
+ switch (opcode) {
+ case 0x0: /* FMOV */
+ tcg_gen_mov_i64(tcg_res, tcg_op);
+ break;
+ case 0x1: /* FABS */
+ gen_helper_vfp_absd(tcg_res, tcg_op);
+ break;
+ case 0x2: /* FNEG */
+ gen_helper_vfp_negd(tcg_res, tcg_op);
+ break;
+ case 0x3: /* FSQRT */
+ gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
+ break;
+ case 0x8: /* FRINTN */
+ case 0x9: /* FRINTP */
+ case 0xa: /* FRINTM */
+ case 0xb: /* FRINTZ */
+ case 0xc: /* FRINTA */
+ {
+ TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ gen_helper_rintd(tcg_res, tcg_op, fpst);
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+ break;
+ }
+ case 0xe: /* FRINTX */
+ gen_helper_rintd_exact(tcg_res, tcg_op, fpst);
+ break;
+ case 0xf: /* FRINTI */
+ gen_helper_rintd(tcg_res, tcg_op, fpst);
+ break;
+ default:
+ abort();
+ }
+
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tcg_op);
+ tcg_temp_free_i64(tcg_res);
+}
+
+static void handle_fp_fcvt(DisasContext *s, int opcode,
+ int rd, int rn, int dtype, int ntype)
+{
+ switch (ntype) {
+ case 0x0:
+ {
+ TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
+ if (dtype == 1) {
+ /* Single to double */
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+ gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
+ write_fp_dreg(s, rd, tcg_rd);
+ tcg_temp_free_i64(tcg_rd);
+ } else {
+ /* Single to half */
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, cpu_env);
+ /* write_fp_sreg is OK here because top half of tcg_rd is zero */
+ write_fp_sreg(s, rd, tcg_rd);
+ tcg_temp_free_i32(tcg_rd);
+ }
+ tcg_temp_free_i32(tcg_rn);
+ break;
+ }
+ case 0x1:
+ {
+ TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ if (dtype == 0) {
+ /* Double to single */
+ gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
+ } else {
+ /* Double to half */
+ gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, cpu_env);
+ /* write_fp_sreg is OK here because top half of tcg_rd is zero */
+ }
+ write_fp_sreg(s, rd, tcg_rd);
+ tcg_temp_free_i32(tcg_rd);
+ tcg_temp_free_i64(tcg_rn);
+ break;
+ }
+ case 0x3:
+ {
+ TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
+ tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
+ if (dtype == 0) {
+ /* Half to single */
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, cpu_env);
+ write_fp_sreg(s, rd, tcg_rd);
+ tcg_temp_free_i32(tcg_rd);
+ } else {
+ /* Half to double */
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+ gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, cpu_env);
+ write_fp_dreg(s, rd, tcg_rd);
+ tcg_temp_free_i64(tcg_rd);
+ }
+ tcg_temp_free_i32(tcg_rn);
+ break;
+ }
+ default:
+ abort();
+ }
+}
+
+/* C3.6.25 Floating point data-processing (1 source)
+ * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
+ * +---+---+---+-----------+------+---+--------+-----------+------+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
+ * +---+---+---+-----------+------+---+--------+-----------+------+------+
+ */
+static void disas_fp_1src(DisasContext *s, uint32_t insn)
+{
+ int type = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 15, 6);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+
+ switch (opcode) {
+ case 0x4: case 0x5: case 0x7:
+ {
+ /* FCVT between half, single and double precision */
+ int dtype = extract32(opcode, 0, 2);
+ if (type == 2 || dtype == type) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
+ break;
+ }
+ case 0x0 ... 0x3:
+ case 0x8 ... 0xc:
+ case 0xe ... 0xf:
+ /* 32-to-32 and 64-to-64 ops */
+ switch (type) {
+ case 0:
+ handle_fp_1src_single(s, opcode, rd, rn);
+ break;
+ case 1:
+ handle_fp_1src_double(s, opcode, rd, rn);
+ break;
+ default:
+ unallocated_encoding(s);
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* C3.6.26 Floating-point data-processing (2 source) - single precision */
+static void handle_fp_2src_single(DisasContext *s, int opcode,
+ int rd, int rn, int rm)
+{
+ TCGv_i32 tcg_op1;
+ TCGv_i32 tcg_op2;
+ TCGv_i32 tcg_res;
+ TCGv_ptr fpst;
+
+ tcg_res = tcg_temp_new_i32();
+ fpst = get_fpstatus_ptr();
+ tcg_op1 = read_fp_sreg(s, rn);
+ tcg_op2 = read_fp_sreg(s, rm);
+
+ switch (opcode) {
+ case 0x0: /* FMUL */
+ gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1: /* FDIV */
+ gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2: /* FADD */
+ gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3: /* FSUB */
+ gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x4: /* FMAX */
+ gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5: /* FMIN */
+ gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x6: /* FMAXNM */
+ gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7: /* FMINNM */
+ gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x8: /* FNMUL */
+ gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
+ gen_helper_vfp_negs(tcg_res, tcg_res);
+ break;
+ }
+
+ write_fp_sreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i32(tcg_res);
+}
+
+/* C3.6.26 Floating-point data-processing (2 source) - double precision */
+static void handle_fp_2src_double(DisasContext *s, int opcode,
+ int rd, int rn, int rm)
+{
+ TCGv_i64 tcg_op1;
+ TCGv_i64 tcg_op2;
+ TCGv_i64 tcg_res;
+ TCGv_ptr fpst;
+
+ tcg_res = tcg_temp_new_i64();
+ fpst = get_fpstatus_ptr();
+ tcg_op1 = read_fp_dreg(s, rn);
+ tcg_op2 = read_fp_dreg(s, rm);
+
+ switch (opcode) {
+ case 0x0: /* FMUL */
+ gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1: /* FDIV */
+ gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2: /* FADD */
+ gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3: /* FSUB */
+ gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x4: /* FMAX */
+ gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5: /* FMIN */
+ gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x6: /* FMAXNM */
+ gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7: /* FMINNM */
+ gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x8: /* FNMUL */
+ gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
+ gen_helper_vfp_negd(tcg_res, tcg_res);
+ break;
+ }
+
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+}
+
+/* C3.6.26 Floating point data-processing (2 source)
+ * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------+--------+-----+------+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
+ * +---+---+---+-----------+------+---+------+--------+-----+------+------+
+ */
+static void disas_fp_2src(DisasContext *s, uint32_t insn)
+{
+ int type = extract32(insn, 22, 2);
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rm = extract32(insn, 16, 5);
+ int opcode = extract32(insn, 12, 4);
+
+ if (opcode > 8) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (type) {
+ case 0:
+ handle_fp_2src_single(s, opcode, rd, rn, rm);
+ break;
+ case 1:
+ handle_fp_2src_double(s, opcode, rd, rn, rm);
+ break;
+ default:
+ unallocated_encoding(s);
+ }
+}
+
+/* C3.6.27 Floating-point data-processing (3 source) - single precision */
+static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
+ int rd, int rn, int rm, int ra)
+{
+ TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ TCGv_ptr fpst = get_fpstatus_ptr();
+
+ tcg_op1 = read_fp_sreg(s, rn);
+ tcg_op2 = read_fp_sreg(s, rm);
+ tcg_op3 = read_fp_sreg(s, ra);
+
+ /* These are fused multiply-add, and must be done as one
+ * floating point operation with no rounding between the
+ * multiplication and addition steps.
+ * NB that doing the negations here as separate steps is
+ * correct : an input NaN should come out with its sign bit
+ * flipped if it is a negated-input.
+ */
+ if (o1 == true) {
+ gen_helper_vfp_negs(tcg_op3, tcg_op3);
+ }
+
+ if (o0 != o1) {
+ gen_helper_vfp_negs(tcg_op1, tcg_op1);
+ }
+
+ gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
+
+ write_fp_sreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i32(tcg_op3);
+ tcg_temp_free_i32(tcg_res);
+}
+
+/* C3.6.27 Floating-point data-processing (3 source) - double precision */
+static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
+ int rd, int rn, int rm, int ra)
+{
+ TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+ TCGv_ptr fpst = get_fpstatus_ptr();
+
+ tcg_op1 = read_fp_dreg(s, rn);
+ tcg_op2 = read_fp_dreg(s, rm);
+ tcg_op3 = read_fp_dreg(s, ra);
+
+ /* These are fused multiply-add, and must be done as one
+ * floating point operation with no rounding between the
+ * multiplication and addition steps.
+ * NB that doing the negations here as separate steps is
+ * correct : an input NaN should come out with its sign bit
+ * flipped if it is a negated-input.
+ */
+ if (o1 == true) {
+ gen_helper_vfp_negd(tcg_op3, tcg_op3);
+ }
+
+ if (o0 != o1) {
+ gen_helper_vfp_negd(tcg_op1, tcg_op1);
+ }
+
+ gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
+
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_op3);
+ tcg_temp_free_i64(tcg_res);
+}
+
+/* C3.6.27 Floating point data-processing (3 source)
+ * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
+ * +---+---+---+-----------+------+----+------+----+------+------+------+
+ * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
+ * +---+---+---+-----------+------+----+------+----+------+------+------+
+ */
+static void disas_fp_3src(DisasContext *s, uint32_t insn)
+{
+ int type = extract32(insn, 22, 2);
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int ra = extract32(insn, 10, 5);
+ int rm = extract32(insn, 16, 5);
+ bool o0 = extract32(insn, 15, 1);
+ bool o1 = extract32(insn, 21, 1);
+
+ switch (type) {
+ case 0:
+ handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
+ break;
+ case 1:
+ handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
+ break;
+ default:
+ unallocated_encoding(s);
+ }
+}
+
+/* C3.6.28 Floating point immediate
+ * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------------+-------+------+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
+ * +---+---+---+-----------+------+---+------------+-------+------+------+
+ */
+static void disas_fp_imm(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int imm8 = extract32(insn, 13, 8);
+ int is_double = extract32(insn, 22, 2);
+ uint64_t imm;
+ TCGv_i64 tcg_res;
+
+ if (is_double > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* The imm8 encodes the sign bit, enough bits to represent
+ * an exponent in the range 01....1xx to 10....0xx,
+ * and the most significant 4 bits of the mantissa; see
+ * VFPExpandImm() in the v8 ARM ARM.
+ */
+ if (is_double) {
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
+ (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
+ extract32(imm8, 0, 6);
+ imm <<= 48;
+ } else {
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
+ (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
+ (extract32(imm8, 0, 6) << 3);
+ imm <<= 16;
+ }
+
+ tcg_res = tcg_const_i64(imm);
+ write_fp_dreg(s, rd, tcg_res);
+ tcg_temp_free_i64(tcg_res);
+}
+
+/* Handle floating point <=> fixed point conversions. Note that we can
+ * also deal with fp <=> integer conversions as a special case (scale == 64)
+ * OPTME: consider handling that special case specially or at least skipping
+ * the call to scalbn in the helpers for zero shifts.
+ */
+static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
+ bool itof, int rmode, int scale, int sf, int type)
+{
+ bool is_signed = !(opcode & 1);
+ bool is_double = type;
+ TCGv_ptr tcg_fpstatus;
+ TCGv_i32 tcg_shift;
+
+ tcg_fpstatus = get_fpstatus_ptr();
+
+ tcg_shift = tcg_const_i32(64 - scale);
+
+ if (itof) {
+ TCGv_i64 tcg_int = cpu_reg(s, rn);
+ if (!sf) {
+ TCGv_i64 tcg_extend = new_tmp_a64(s);
+
+ if (is_signed) {
+ tcg_gen_ext32s_i64(tcg_extend, tcg_int);
+ } else {
+ tcg_gen_ext32u_i64(tcg_extend, tcg_int);
+ }
+
+ tcg_int = tcg_extend;
+ }
+
+ if (is_double) {
+ TCGv_i64 tcg_double = tcg_temp_new_i64();
+ if (is_signed) {
+ gen_helper_vfp_sqtod(tcg_double, tcg_int,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_uqtod(tcg_double, tcg_int,
+ tcg_shift, tcg_fpstatus);
+ }
+ write_fp_dreg(s, rd, tcg_double);
+ tcg_temp_free_i64(tcg_double);
+ } else {
+ TCGv_i32 tcg_single = tcg_temp_new_i32();
+ if (is_signed) {
+ gen_helper_vfp_sqtos(tcg_single, tcg_int,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_uqtos(tcg_single, tcg_int,
+ tcg_shift, tcg_fpstatus);
+ }
+ write_fp_sreg(s, rd, tcg_single);
+ tcg_temp_free_i32(tcg_single);
+ }
+ } else {
+ TCGv_i64 tcg_int = cpu_reg(s, rd);
+ TCGv_i32 tcg_rmode;
+
+ if (extract32(opcode, 2, 1)) {
+ /* There are too many rounding modes to all fit into rmode,
+ * so FCVTA[US] is a special case.
+ */
+ rmode = FPROUNDING_TIEAWAY;
+ }
+
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+
+ if (is_double) {
+ TCGv_i64 tcg_double = read_fp_dreg(s, rn);
+ if (is_signed) {
+ if (!sf) {
+ gen_helper_vfp_tosld(tcg_int, tcg_double,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_tosqd(tcg_int, tcg_double,
+ tcg_shift, tcg_fpstatus);
+ }
+ } else {
+ if (!sf) {
+ gen_helper_vfp_tould(tcg_int, tcg_double,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_touqd(tcg_int, tcg_double,
+ tcg_shift, tcg_fpstatus);
+ }
+ }
+ tcg_temp_free_i64(tcg_double);
+ } else {
+ TCGv_i32 tcg_single = read_fp_sreg(s, rn);
+ if (sf) {
+ if (is_signed) {
+ gen_helper_vfp_tosqs(tcg_int, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_touqs(tcg_int, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ }
+ } else {
+ TCGv_i32 tcg_dest = tcg_temp_new_i32();
+ if (is_signed) {
+ gen_helper_vfp_tosls(tcg_dest, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_touls(tcg_dest, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ }
+ tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
+ tcg_temp_free_i32(tcg_dest);
+ }
+ tcg_temp_free_i32(tcg_single);
+ }
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_int, tcg_int);
+ }
+ }
+
+ tcg_temp_free_ptr(tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+}
+
+/* C3.6.29 Floating point <-> fixed point conversions
+ * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
+ * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
+ * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
+ * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
+ */
+static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int scale = extract32(insn, 10, 6);
+ int opcode = extract32(insn, 16, 3);
+ int rmode = extract32(insn, 19, 2);
+ int type = extract32(insn, 22, 2);
+ bool sbit = extract32(insn, 29, 1);
+ bool sf = extract32(insn, 31, 1);
+ bool itof;
+
+ if (sbit || (type > 1)
+ || (!sf && scale < 32)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch ((rmode << 3) | opcode) {
+ case 0x2: /* SCVTF */
+ case 0x3: /* UCVTF */
+ itof = true;
+ break;
+ case 0x18: /* FCVTZS */
+ case 0x19: /* FCVTZU */
+ itof = false;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
+}
+
+static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
+{
+ /* FMOV: gpr to or from float, double, or top half of quad fp reg,
+ * without conversion.
+ */
+
+ if (itof) {
+ TCGv_i64 tcg_rn = cpu_reg(s, rn);
+
+ switch (type) {
+ case 0:
+ {
+ /* 32 bit */
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(tmp, tcg_rn);
+ tcg_gen_st_i64(tmp, cpu_env, fp_reg_offset(rd, MO_64));
+ tcg_gen_movi_i64(tmp, 0);
+ tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(rd));
+ tcg_temp_free_i64(tmp);
+ break;
+ }
+ case 1:
+ {
+ /* 64 bit */
+ TCGv_i64 tmp = tcg_const_i64(0);
+ tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_offset(rd, MO_64));
+ tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(rd));
+ tcg_temp_free_i64(tmp);
+ break;
+ }
+ case 2:
+ /* 64 bit to top half. */
+ tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(rd));
+ break;
+ }
+ } else {
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+
+ switch (type) {
+ case 0:
+ /* 32 bit */
+ tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(rn, MO_32));
+ break;
+ case 1:
+ /* 64 bit */
+ tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(rn, MO_64));
+ break;
+ case 2:
+ /* 64 bits from top half */
+ tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(rn));
+ break;
+ }
+ }
+}
+
+/* C3.6.30 Floating point <-> integer conversions
+ * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
+ * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
+ * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
+ * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
+ */
+static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 16, 3);
+ int rmode = extract32(insn, 19, 2);
+ int type = extract32(insn, 22, 2);
+ bool sbit = extract32(insn, 29, 1);
+ bool sf = extract32(insn, 31, 1);
+
+ if (sbit) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (opcode > 5) {
+ /* FMOV */
+ bool itof = opcode & 1;
+
+ if (rmode >= 2) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (sf << 3 | type << 1 | rmode) {
+ case 0x0: /* 32 bit */
+ case 0xa: /* 64 bit */
+ case 0xd: /* 64 bit to top half of quad */
+ break;
+ default:
+ /* all other sf/type/rmode combinations are invalid */
+ unallocated_encoding(s);
+ break;
+ }
+
+ handle_fmov(s, rd, rn, type, itof);
+ } else {
+ /* actual FP conversions */
+ bool itof = extract32(opcode, 1, 1);
+
+ if (type > 1 || (rmode != 0 && opcode > 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
+ }
+}
+
+/* FP-specific subcases of table C3-6 (SIMD and FP data processing)
+ * 31 30 29 28 25 24 0
+ * +---+---+---+---------+-----------------------------+
+ * | | 0 | | 1 1 1 1 | |
+ * +---+---+---+---------+-----------------------------+
+ */
+static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
+{
+ if (extract32(insn, 24, 1)) {
+ /* Floating point data-processing (3 source) */
+ disas_fp_3src(s, insn);
+ } else if (extract32(insn, 21, 1) == 0) {
+ /* Floating point to fixed point conversions */
+ disas_fp_fixed_conv(s, insn);
+ } else {
+ switch (extract32(insn, 10, 2)) {
+ case 1:
+ /* Floating point conditional compare */
+ disas_fp_ccomp(s, insn);
+ break;
+ case 2:
+ /* Floating point data-processing (2 source) */
+ disas_fp_2src(s, insn);
+ break;
+ case 3:
+ /* Floating point conditional select */
+ disas_fp_csel(s, insn);
+ break;
+ case 0:
+ switch (ctz32(extract32(insn, 12, 4))) {
+ case 0: /* [15:12] == xxx1 */
+ /* Floating point immediate */
+ disas_fp_imm(s, insn);
+ break;
+ case 1: /* [15:12] == xx10 */
+ /* Floating point compare */
+ disas_fp_compare(s, insn);
+ break;
+ case 2: /* [15:12] == x100 */
+ /* Floating point data-processing (1 source) */
+ disas_fp_1src(s, insn);
+ break;
+ case 3: /* [15:12] == 1000 */
+ unallocated_encoding(s);
+ break;
+ default: /* [15:12] == 0000 */
+ /* Floating point <-> integer conversions */
+ disas_fp_int_conv(s, insn);
+ break;
+ }
+ break;
+ }
+ }
+}
+
+static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
+{
+ /* Note that this is called with all non-FP cases from
+ * table C3-6 so it must UNDEF for entries not specifically
+ * allocated to instructions in that table.
+ */
+ unsupported_encoding(s, insn);
+}
+
/* C3.6 Data processing - SIMD and floating point */
static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
+ disas_data_proc_fp(s, insn);
+ } else {
+ /* SIMD, including crypto */
+ disas_data_proc_simd(s, insn);
+ }
}
/* C3.1 A64 instruction index by encoding */
@@ -1661,6 +4323,8 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
dc->vfp_enabled = 0;
dc->vec_len = 0;
dc->vec_stride = 0;
+ dc->cp_regs = cpu->cp_regs;
+ dc->current_pl = arm_current_pl(env);
init_tmp_a64_array(dc);
@@ -1750,8 +4414,10 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
gen_goto_tb(dc, 1, dc->pc);
break;
default:
- case DISAS_JUMP:
case DISAS_UPDATE:
+ gen_a64_set_pc_im(dc->pc);
+ /* fall through */
+ case DISAS_JUMP:
/* indicate that the hash table must be used to find the next TB */
tcg_gen_exit_tb(0);
break;
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 1403ecf216..8d240e160d 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -61,11 +61,10 @@ TCGv_ptr cpu_env;
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
static TCGv_i32 cpu_R[16];
static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
-static TCGv_i32 cpu_exclusive_addr;
-static TCGv_i32 cpu_exclusive_val;
-static TCGv_i32 cpu_exclusive_high;
+static TCGv_i64 cpu_exclusive_addr;
+static TCGv_i64 cpu_exclusive_val;
#ifdef CONFIG_USER_ONLY
-static TCGv_i32 cpu_exclusive_test;
+static TCGv_i64 cpu_exclusive_test;
static TCGv_i32 cpu_exclusive_info;
#endif
@@ -96,14 +95,12 @@ void arm_translate_init(void)
cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
- cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
- cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUARMState, exclusive_val), "exclusive_val");
- cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUARMState, exclusive_high), "exclusive_high");
#ifdef CONFIG_USER_ONLY
- cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUARMState, exclusive_test), "exclusive_test");
cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUARMState, exclusive_info), "exclusive_info");
@@ -1101,27 +1098,29 @@ VFP_GEN_FTOI(tosi)
VFP_GEN_FTOI(tosiz)
#undef VFP_GEN_FTOI
-#define VFP_GEN_FIX(name) \
+#define VFP_GEN_FIX(name, round) \
static inline void gen_vfp_##name(int dp, int shift, int neon) \
{ \
TCGv_i32 tmp_shift = tcg_const_i32(shift); \
TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
if (dp) { \
- gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
+ gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
+ statusptr); \
} else { \
- gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
+ gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
+ statusptr); \
} \
tcg_temp_free_i32(tmp_shift); \
tcg_temp_free_ptr(statusptr); \
}
-VFP_GEN_FIX(tosh)
-VFP_GEN_FIX(tosl)
-VFP_GEN_FIX(touh)
-VFP_GEN_FIX(toul)
-VFP_GEN_FIX(shto)
-VFP_GEN_FIX(slto)
-VFP_GEN_FIX(uhto)
-VFP_GEN_FIX(ulto)
+VFP_GEN_FIX(tosh, _round_to_zero)
+VFP_GEN_FIX(tosl, _round_to_zero)
+VFP_GEN_FIX(touh, _round_to_zero)
+VFP_GEN_FIX(toul, _round_to_zero)
+VFP_GEN_FIX(shto, )
+VFP_GEN_FIX(slto, )
+VFP_GEN_FIX(uhto, )
+VFP_GEN_FIX(ulto, )
#undef VFP_GEN_FIX
static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
@@ -2728,9 +2727,9 @@ static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
if (vmin) {
- gen_helper_vfp_minnmd(dest, frn, frm, fpst);
+ gen_helper_vfp_minnumd(dest, frn, frm, fpst);
} else {
- gen_helper_vfp_maxnmd(dest, frn, frm, fpst);
+ gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
}
tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
tcg_temp_free_i64(frn);
@@ -2746,9 +2745,9 @@ static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
if (vmin) {
- gen_helper_vfp_minnms(dest, frn, frm, fpst);
+ gen_helper_vfp_minnums(dest, frn, frm, fpst);
} else {
- gen_helper_vfp_maxnms(dest, frn, frm, fpst);
+ gen_helper_vfp_maxnums(dest, frn, frm, fpst);
}
tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
tcg_temp_free_i32(frn);
@@ -5124,9 +5123,9 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
{
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
if (size == 0) {
- gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
+ gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
} else {
- gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
+ gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
}
tcg_temp_free_ptr(fpstatus);
break;
@@ -5136,9 +5135,9 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
/* VMAXNM/VMINNM */
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
if (size == 0) {
- gen_helper_vfp_maxnms(tmp, tmp, tmp2, fpstatus);
+ gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
} else {
- gen_helper_vfp_minnms(tmp, tmp, tmp2, fpstatus);
+ gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
}
tcg_temp_free_ptr(fpstatus);
} else {
@@ -6498,7 +6497,6 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
{
int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
const ARMCPRegInfo *ri;
- ARMCPU *cpu = arm_env_get_cpu(env);
cpnum = (insn >> 8) & 0xf;
if (arm_feature(env, ARM_FEATURE_XSCALE)
@@ -6541,11 +6539,11 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
isread = (insn >> 20) & 1;
rt = (insn >> 12) & 0xf;
- ri = get_arm_cp_reginfo(cpu,
+ ri = get_arm_cp_reginfo(s->cp_regs,
ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
if (ri) {
/* Check access permissions */
- if (!cp_access_ok(env, ri, isread)) {
+ if (!cp_access_ok(s->current_pl, ri, isread)) {
return 1;
}
@@ -6759,30 +6757,34 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
default:
abort();
}
- tcg_gen_mov_i32(cpu_exclusive_val, tmp);
- store_reg(s, rt, tmp);
+
if (size == 3) {
TCGv_i32 tmp2 = tcg_temp_new_i32();
+ TCGv_i32 tmp3 = tcg_temp_new_i32();
+
tcg_gen_addi_i32(tmp2, addr, 4);
- tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
+ gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
tcg_temp_free_i32(tmp2);
- tcg_gen_mov_i32(cpu_exclusive_high, tmp);
- store_reg(s, rt2, tmp);
+ tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
+ store_reg(s, rt2, tmp3);
+ } else {
+ tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
}
- tcg_gen_mov_i32(cpu_exclusive_addr, addr);
+
+ store_reg(s, rt, tmp);
+ tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
}
static void gen_clrex(DisasContext *s)
{
- tcg_gen_movi_i32(cpu_exclusive_addr, -1);
+ tcg_gen_movi_i64(cpu_exclusive_addr, -1);
}
#ifdef CONFIG_USER_ONLY
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i32 addr, int size)
{
- tcg_gen_mov_i32(cpu_exclusive_test, addr);
+ tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
tcg_gen_movi_i32(cpu_exclusive_info,
size | (rd << 4) | (rt << 8) | (rt2 << 12));
gen_exception_insn(s, 4, EXCP_STREX);
@@ -6792,6 +6794,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i32 addr, int size)
{
TCGv_i32 tmp;
+ TCGv_i64 val64, extaddr;
int done_label;
int fail_label;
@@ -6803,7 +6806,11 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
} */
fail_label = gen_new_label();
done_label = gen_new_label();
- tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
+ extaddr = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(extaddr, addr);
+ tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
+ tcg_temp_free_i64(extaddr);
+
tmp = tcg_temp_new_i32();
switch (size) {
case 0:
@@ -6819,17 +6826,24 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
default:
abort();
}
- tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
- tcg_temp_free_i32(tmp);
+
+ val64 = tcg_temp_new_i64();
if (size == 3) {
TCGv_i32 tmp2 = tcg_temp_new_i32();
+ TCGv_i32 tmp3 = tcg_temp_new_i32();
tcg_gen_addi_i32(tmp2, addr, 4);
- tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
+ gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
tcg_temp_free_i32(tmp2);
- tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
- tcg_temp_free_i32(tmp);
+ tcg_gen_concat_i32_i64(val64, tmp, tmp3);
+ tcg_temp_free_i32(tmp3);
+ } else {
+ tcg_gen_extu_i32_i64(val64, tmp);
}
+ tcg_temp_free_i32(tmp);
+
+ tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
+ tcg_temp_free_i64(val64);
+
tmp = load_reg(s, rt);
switch (size) {
case 0:
@@ -6857,7 +6871,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
gen_set_label(fail_label);
tcg_gen_movi_i32(cpu_R[rd], 1);
gen_set_label(done_label);
- tcg_gen_movi_i32(cpu_exclusive_addr, -1);
+ tcg_gen_movi_i64(cpu_exclusive_addr, -1);
}
#endif
@@ -10269,6 +10283,8 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
+ dc->cp_regs = cpu->cp_regs;
+ dc->current_pl = arm_current_pl(env);
cpu_F0s = tcg_temp_new_i32();
cpu_F1s = tcg_temp_new_i32();
diff --git a/target-arm/translate.h b/target-arm/translate.h
index a6f6b3e699..67da6996c9 100644
--- a/target-arm/translate.h
+++ b/target-arm/translate.h
@@ -24,6 +24,8 @@ typedef struct DisasContext {
int vec_len;
int vec_stride;
int aarch64;
+ int current_pl;
+ GHashTable *cp_regs;
#define TMP_A64_MAX 16
int tmp_a64_count;
TCGv_i64 tmp_a64[TMP_A64_MAX];