aboutsummaryrefslogtreecommitdiff
path: root/target-arm/translate-a64.c
diff options
context:
space:
mode:
Diffstat (limited to 'target-arm/translate-a64.c')
-rw-r--r--target-arm/translate-a64.c1316
1 files changed, 1171 insertions, 145 deletions
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index d60223af8e..8752e7e16c 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -75,8 +75,12 @@ typedef struct AArch64DecodeTable {
/* Function prototype for gen_ functions for calling Neon helpers */
typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
+typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
+typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
+typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
+typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
/* initialize TCG globals. */
void a64_translate_init(void)
@@ -614,20 +618,26 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
*/
/*
- * Store from GPR register to memory
+ * Store from GPR register to memory.
*/
+static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
+ TCGv_i64 tcg_addr, int size, int memidx)
+{
+ g_assert(size <= 3);
+ tcg_gen_qemu_st_i64(source, tcg_addr, memidx, MO_TE + size);
+}
+
static void do_gpr_st(DisasContext *s, TCGv_i64 source,
TCGv_i64 tcg_addr, int size)
{
- g_assert(size <= 3);
- tcg_gen_qemu_st_i64(source, tcg_addr, get_mem_index(s), MO_TE + size);
+ do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s));
}
/*
* Load from memory to GPR register
*/
-static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
- int size, bool is_signed, bool extend)
+static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
+ int size, bool is_signed, bool extend, int memidx)
{
TCGMemOp memop = MO_TE + size;
@@ -637,7 +647,7 @@ static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
memop += MO_SIGN;
}
- tcg_gen_qemu_ld_i64(dest, tcg_addr, get_mem_index(s), memop);
+ tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
if (extend && is_signed) {
g_assert(size < 3);
@@ -645,6 +655,13 @@ static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
}
}
+static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
+ int size, bool is_signed, bool extend)
+{
+ do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
+ get_mem_index(s));
+}
+
/*
* Store from FP register to memory
*/
@@ -1175,7 +1192,12 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
crn, crm, op0, op1, op2));
if (!ri) {
- /* Unknown register */
+ /* Unknown register; this might be a guest error or a QEMU
+ * unimplemented feature.
+ */
+ qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
+ "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
+ isread ? "read" : "write", op0, op1, crn, crm, op2);
unallocated_encoding(s);
return;
}
@@ -1186,6 +1208,17 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
return;
}
+ if (ri->accessfn) {
+ /* Emit code to perform further access permissions checks at
+ * runtime; this may result in an exception.
+ */
+ TCGv_ptr tmpptr;
+ gen_a64_set_pc_im(s->pc - 4);
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_access_check_cp_reg(cpu_env, tmpptr);
+ tcg_temp_free_ptr(tmpptr);
+ }
+
/* Handle special cases first */
switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
case ARM_CP_NOP:
@@ -1213,7 +1246,6 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
} else if (ri->readfn) {
TCGv_ptr tmpptr;
- gen_a64_set_pc_im(s->pc - 4);
tmpptr = tcg_const_ptr(ri);
gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
tcg_temp_free_ptr(tmpptr);
@@ -1226,7 +1258,6 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
return;
} else if (ri->writefn) {
TCGv_ptr tmpptr;
- gen_a64_set_pc_im(s->pc - 4);
tmpptr = tcg_const_ptr(ri);
gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
tcg_temp_free_ptr(tmpptr);
@@ -1471,12 +1502,68 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
}
#else
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
- TCGv_i64 addr, int size, int is_pair)
-{
- qemu_log_mask(LOG_UNIMP,
- "%s:%d: system mode store_exclusive unsupported "
- "at pc=%016" PRIx64 "\n",
- __FILE__, __LINE__, s->pc - 4);
+ TCGv_i64 inaddr, int size, int is_pair)
+{
+ /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
+ * && (!is_pair || env->exclusive_high == [addr + datasize])) {
+ * [addr] = {Rt};
+ * if (is_pair) {
+ * [addr + datasize] = {Rt2};
+ * }
+ * {Rd} = 0;
+ * } else {
+ * {Rd} = 1;
+ * }
+ * env->exclusive_addr = -1;
+ */
+ int fail_label = gen_new_label();
+ int done_label = gen_new_label();
+ TCGv_i64 addr = tcg_temp_local_new_i64();
+ TCGv_i64 tmp;
+
+ /* Copy input into a local temp so it is not trashed when the
+ * basic block ends at the branch insn.
+ */
+ tcg_gen_mov_i64(addr, inaddr);
+ tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
+
+ tmp = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), MO_TE + size);
+ tcg_gen_brcond_i64(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
+ tcg_temp_free_i64(tmp);
+
+ if (is_pair) {
+ TCGv_i64 addrhi = tcg_temp_new_i64();
+ TCGv_i64 tmphi = tcg_temp_new_i64();
+
+ tcg_gen_addi_i64(addrhi, addr, 1 << size);
+ tcg_gen_qemu_ld_i64(tmphi, addrhi, get_mem_index(s), MO_TE + size);
+ tcg_gen_brcond_i64(TCG_COND_NE, tmphi, cpu_exclusive_high, fail_label);
+
+ tcg_temp_free_i64(tmphi);
+ tcg_temp_free_i64(addrhi);
+ }
+
+ /* We seem to still have the exclusive monitor, so do the store */
+ tcg_gen_qemu_st_i64(cpu_reg(s, rt), addr, get_mem_index(s), MO_TE + size);
+ if (is_pair) {
+ TCGv_i64 addrhi = tcg_temp_new_i64();
+
+ tcg_gen_addi_i64(addrhi, addr, 1 << size);
+ tcg_gen_qemu_st_i64(cpu_reg(s, rt2), addrhi,
+ get_mem_index(s), MO_TE + size);
+ tcg_temp_free_i64(addrhi);
+ }
+
+ tcg_temp_free_i64(addr);
+
+ tcg_gen_movi_i64(cpu_reg(s, rd), 0);
+ tcg_gen_br(done_label);
+ gen_set_label(fail_label);
+ tcg_gen_movi_i64(cpu_reg(s, rd), 1);
+ gen_set_label(done_label);
+ tcg_gen_movi_i64(cpu_exclusive_addr, -1);
+
}
#endif
@@ -1750,6 +1837,7 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
* +----+-------+---+-----+-----+---+--------+-----+------+------+
*
* idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
+ 10 -> unprivileged
* V = 0 -> non-vector
* size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
* opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
@@ -1765,6 +1853,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
bool is_signed = false;
bool is_store = false;
bool is_extended = false;
+ bool is_unpriv = (idx == 2);
bool is_vector = extract32(insn, 26, 1);
bool post_index;
bool writeback;
@@ -1773,7 +1862,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
if (is_vector) {
size |= (opc & 2) << 1;
- if (size > 4) {
+ if (size > 4 || is_unpriv) {
unallocated_encoding(s);
return;
}
@@ -1781,6 +1870,10 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
} else {
if (size == 3 && opc == 2) {
/* PRFM - prefetch */
+ if (is_unpriv) {
+ unallocated_encoding(s);
+ return;
+ }
return;
}
if (opc == 3 && size > 1) {
@@ -1794,6 +1887,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
switch (idx) {
case 0:
+ case 2:
post_index = false;
writeback = false;
break;
@@ -1805,9 +1899,6 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
post_index = false;
writeback = true;
break;
- case 2:
- g_assert(false);
- break;
}
if (rn == 31) {
@@ -1827,10 +1918,13 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
}
} else {
TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ int memidx = is_unpriv ? 1 : get_mem_index(s);
+
if (is_store) {
- do_gpr_st(s, tcg_rt, tcg_addr, size);
+ do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx);
} else {
- do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
+ do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size,
+ is_signed, is_extended, memidx);
}
}
@@ -2010,25 +2104,6 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
}
}
-/* Load/store register (immediate forms) */
-static void disas_ldst_reg_imm(DisasContext *s, uint32_t insn)
-{
- switch (extract32(insn, 10, 2)) {
- case 0: case 1: case 3:
- /* Load/store register (unscaled immediate) */
- /* Load/store immediate pre/post-indexed */
- disas_ldst_reg_imm9(s, insn);
- break;
- case 2:
- /* Load/store register unprivileged */
- unsupported_encoding(s, insn);
- break;
- default:
- unallocated_encoding(s);
- break;
- }
-}
-
/* Load/store register (all forms) */
static void disas_ldst_reg(DisasContext *s, uint32_t insn)
{
@@ -2037,7 +2112,11 @@ static void disas_ldst_reg(DisasContext *s, uint32_t insn)
if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
disas_ldst_reg_roffset(s, insn);
} else {
- disas_ldst_reg_imm(s, insn);
+ /* Load/store register (unscaled immediate)
+ * Load/store immediate pre/post-indexed
+ * Load/store register unprivileged
+ */
+ disas_ldst_reg_imm9(s, insn);
}
break;
case 1:
@@ -5838,7 +5917,100 @@ static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
*/
static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
{
- unsupported_encoding(s, insn);
+ bool is_u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 4);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+
+ if (is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 0x9: /* SQDMLAL, SQDMLAL2 */
+ case 0xb: /* SQDMLSL, SQDMLSL2 */
+ case 0xd: /* SQDMULL, SQDMULL2 */
+ if (size == 0 || size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (size == 2) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
+ read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
+
+ tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
+ gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
+
+ switch (opcode) {
+ case 0xd: /* SQDMULL, SQDMULL2 */
+ break;
+ case 0xb: /* SQDMLSL, SQDMLSL2 */
+ tcg_gen_neg_i64(tcg_res, tcg_res);
+ /* fall through */
+ case 0x9: /* SQDMLAL, SQDMLAL2 */
+ read_vec_element(s, tcg_op1, rd, 0, MO_64);
+ gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
+ tcg_res, tcg_op1);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+ } else {
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element_i32(s, tcg_op1, rn, 0, MO_16);
+ read_vec_element_i32(s, tcg_op2, rm, 0, MO_16);
+
+ gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
+ gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
+
+ switch (opcode) {
+ case 0xd: /* SQDMULL, SQDMULL2 */
+ break;
+ case 0xb: /* SQDMLSL, SQDMLSL2 */
+ gen_helper_neon_negl_u32(tcg_res, tcg_res);
+ /* fall through */
+ case 0x9: /* SQDMLAL, SQDMLAL2 */
+ {
+ TCGv_i64 tcg_op3 = tcg_temp_new_i64();
+ read_vec_element(s, tcg_op3, rd, 0, MO_32);
+ gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
+ tcg_res, tcg_op3);
+ tcg_temp_free_i64(tcg_op3);
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ tcg_gen_ext32u_i64(tcg_res, tcg_res);
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+ }
}
static void handle_3same_64(DisasContext *s, int opcode, bool u,
@@ -5950,15 +6122,33 @@ static void handle_3same_float(DisasContext *s, int size, int elements,
read_vec_element(s, tcg_op2, rm, pass, MO_64);
switch (fpopcode) {
+ case 0x39: /* FMLS */
+ /* As usual for ARM, separate negation for fused multiply-add */
+ gen_helper_vfp_negd(tcg_op1, tcg_op1);
+ /* fall through */
+ case 0x19: /* FMLA */
+ read_vec_element(s, tcg_res, rd, pass, MO_64);
+ gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
+ tcg_res, fpst);
+ break;
case 0x18: /* FMAXNM */
gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
break;
case 0x1a: /* FADD */
gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
break;
+ case 0x1b: /* FMULX */
+ gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1c: /* FCMEQ */
+ gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
case 0x1e: /* FMAX */
gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
break;
+ case 0x1f: /* FRECPS */
+ gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
case 0x38: /* FMINNM */
gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
break;
@@ -5968,9 +6158,18 @@ static void handle_3same_float(DisasContext *s, int size, int elements,
case 0x3e: /* FMIN */
gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
break;
+ case 0x3f: /* FRSQRTS */
+ gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
case 0x5b: /* FMUL */
gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
break;
+ case 0x5c: /* FCMGE */
+ gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5d: /* FACGE */
+ gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
case 0x5f: /* FDIV */
gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
break;
@@ -5978,6 +6177,12 @@ static void handle_3same_float(DisasContext *s, int size, int elements,
gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
gen_helper_vfp_absd(tcg_res, tcg_res);
break;
+ case 0x7c: /* FCMGT */
+ gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7d: /* FACGT */
+ gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
default:
g_assert_not_reached();
}
@@ -5997,12 +6202,30 @@ static void handle_3same_float(DisasContext *s, int size, int elements,
read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
switch (fpopcode) {
+ case 0x39: /* FMLS */
+ /* As usual for ARM, separate negation for fused multiply-add */
+ gen_helper_vfp_negs(tcg_op1, tcg_op1);
+ /* fall through */
+ case 0x19: /* FMLA */
+ read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
+ tcg_res, fpst);
+ break;
case 0x1a: /* FADD */
gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
break;
+ case 0x1b: /* FMULX */
+ gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1c: /* FCMEQ */
+ gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
case 0x1e: /* FMAX */
gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
break;
+ case 0x1f: /* FRECPS */
+ gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
case 0x18: /* FMAXNM */
gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
break;
@@ -6015,9 +6238,18 @@ static void handle_3same_float(DisasContext *s, int size, int elements,
case 0x3e: /* FMIN */
gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
break;
+ case 0x3f: /* FRSQRTS */
+ gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
case 0x5b: /* FMUL */
gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
break;
+ case 0x5c: /* FCMGE */
+ gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5d: /* FACGE */
+ gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
case 0x5f: /* FDIV */
gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
break;
@@ -6025,6 +6257,12 @@ static void handle_3same_float(DisasContext *s, int size, int elements,
gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
gen_helper_vfp_abss(tcg_res, tcg_res);
break;
+ case 0x7c: /* FCMGT */
+ gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7d: /* FACGT */
+ gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
default:
g_assert_not_reached();
}
@@ -6075,15 +6313,13 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
switch (fpopcode) {
case 0x1b: /* FMULX */
- case 0x1c: /* FCMEQ */
case 0x1f: /* FRECPS */
case 0x3f: /* FRSQRTS */
- case 0x5c: /* FCMGE */
case 0x5d: /* FACGE */
- case 0x7c: /* FCMGT */
case 0x7d: /* FACGT */
- unsupported_encoding(s, insn);
- return;
+ case 0x1c: /* FCMEQ */
+ case 0x5c: /* FCMGE */
+ case 0x7c: /* FCMGT */
case 0x7a: /* FABD */
break;
default:
@@ -6268,6 +6504,115 @@ static void handle_2misc_64(DisasContext *s, int opcode, bool u,
}
}
+static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
+ bool is_scalar, bool is_u, bool is_q,
+ int size, int rn, int rd)
+{
+ bool is_double = (size == 3);
+ TCGv_ptr fpst = get_fpstatus_ptr();
+
+ if (is_double) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+ NeonGenTwoDoubleOPFn *genfn;
+ bool swap = false;
+ int pass;
+
+ switch (opcode) {
+ case 0x2e: /* FCMLT (zero) */
+ swap = true;
+ /* fallthrough */
+ case 0x2c: /* FCMGT (zero) */
+ genfn = gen_helper_neon_cgt_f64;
+ break;
+ case 0x2d: /* FCMEQ (zero) */
+ genfn = gen_helper_neon_ceq_f64;
+ break;
+ case 0x6d: /* FCMLE (zero) */
+ swap = true;
+ /* fall through */
+ case 0x6c: /* FCMGE (zero) */
+ genfn = gen_helper_neon_cge_f64;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ if (swap) {
+ genfn(tcg_res, tcg_zero, tcg_op, fpst);
+ } else {
+ genfn(tcg_res, tcg_op, tcg_zero, fpst);
+ }
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+ }
+ if (is_scalar) {
+ clear_vec_high(s, rd);
+ }
+
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_zero);
+ tcg_temp_free_i64(tcg_op);
+ } else {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i32 tcg_zero = tcg_const_i32(0);
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ NeonGenTwoSingleOPFn *genfn;
+ bool swap = false;
+ int pass, maxpasses;
+
+ switch (opcode) {
+ case 0x2e: /* FCMLT (zero) */
+ swap = true;
+ /* fall through */
+ case 0x2c: /* FCMGT (zero) */
+ genfn = gen_helper_neon_cgt_f32;
+ break;
+ case 0x2d: /* FCMEQ (zero) */
+ genfn = gen_helper_neon_ceq_f32;
+ break;
+ case 0x6d: /* FCMLE (zero) */
+ swap = true;
+ /* fall through */
+ case 0x6c: /* FCMGE (zero) */
+ genfn = gen_helper_neon_cge_f32;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (is_scalar) {
+ maxpasses = 1;
+ } else {
+ maxpasses = is_q ? 4 : 2;
+ }
+
+ for (pass = 0; pass < maxpasses; pass++) {
+ read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
+ if (swap) {
+ genfn(tcg_res, tcg_zero, tcg_op, fpst);
+ } else {
+ genfn(tcg_res, tcg_op, tcg_zero, fpst);
+ }
+ if (is_scalar) {
+ write_fp_sreg(s, rd, tcg_res);
+ } else {
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ }
+ }
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_zero);
+ tcg_temp_free_i32(tcg_op);
+ if (!is_q && !is_scalar) {
+ clear_vec_high(s, rd);
+ }
+ }
+
+ tcg_temp_free_ptr(fpst);
+}
+
/* C3.6.12 AdvSIMD scalar two reg misc
* 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
* +-----+---+-----------+------+-----------+--------+-----+------+------+
@@ -6297,9 +6642,47 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
return;
}
break;
+ case 0xc ... 0xf:
+ case 0x16 ... 0x1d:
+ case 0x1f:
+ /* Floating point: U, size[1] and opcode indicate operation;
+ * size[0] indicates single or double precision.
+ */
+ opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
+ size = extract32(size, 0, 1) ? 3 : 2;
+ switch (opcode) {
+ case 0x2c: /* FCMGT (zero) */
+ case 0x2d: /* FCMEQ (zero) */
+ case 0x2e: /* FCMLT (zero) */
+ case 0x6c: /* FCMGE (zero) */
+ case 0x6d: /* FCMLE (zero) */
+ handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
+ return;
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x1c: /* FCVTAS */
+ case 0x1d: /* SCVTF */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ case 0x3d: /* FRECPE */
+ case 0x3f: /* FRECPX */
+ case 0x56: /* FCVTXN, FCVTXN2 */
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x5c: /* FCVTAU */
+ case 0x5d: /* UCVTF */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ case 0x7d: /* FRSQRTE */
+ unsupported_encoding(s, insn);
+ return;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ break;
default:
/* Other categories of encoding in this class:
- * + floating point (single and double)
* + SUQADD/USQADD/SQABS/SQNEG : size 8, 16, 32 or 64
* + SQXTN/SQXTN2/SQXTUN/SQXTUN2/UQXTN/UQXTN2:
* narrowing saturate ops: size 64/32/16 -> 32/16/8
@@ -6322,17 +6705,6 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
}
}
-/* C3.6.13 AdvSIMD scalar x indexed element
- * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
- * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
- * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
- * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
- */
-static void disas_simd_scalar_indexed(DisasContext *s, uint32_t insn)
-{
- unsupported_encoding(s, insn);
-}
-
/* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
int immh, int immb, int opcode, int rn, int rd)
@@ -6514,6 +6886,24 @@ static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
}
}
+/* Generate code to do a "long" addition or subtraction, ie one done in
+ * TCGv_i64 on vector lanes twice the width specified by size.
+ */
+static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
+ TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
+{
+ static NeonGenTwo64OpFn * const fns[3][2] = {
+ { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
+ { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
+ { tcg_gen_add_i64, tcg_gen_sub_i64 },
+ };
+ NeonGenTwo64OpFn *genfn;
+ assert(size < 3);
+
+ genfn = fns[size][is_sub];
+ genfn(tcg_res, tcg_op1, tcg_op2);
+}
+
static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
int opcode, int rd, int rn, int rm)
{
@@ -6569,6 +6959,12 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
}
switch (opcode) {
+ case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
+ tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
+ break;
+ case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
+ tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
+ break;
case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
{
@@ -6589,15 +6985,31 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
break;
+ case 9: /* SQDMLAL, SQDMLAL2 */
+ case 11: /* SQDMLSL, SQDMLSL2 */
+ case 13: /* SQDMULL, SQDMULL2 */
+ tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
+ gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
+ tcg_passres, tcg_passres);
+ break;
default:
g_assert_not_reached();
}
- if (accop > 0) {
+ if (opcode == 9 || opcode == 11) {
+ /* saturating accumulate ops */
+ if (accop < 0) {
+ tcg_gen_neg_i64(tcg_passres, tcg_passres);
+ }
+ gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
+ tcg_res[pass], tcg_passres);
+ } else if (accop > 0) {
tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
- tcg_temp_free_i64(tcg_passres);
} else if (accop < 0) {
tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
+ }
+
+ if (accop != 0) {
tcg_temp_free_i64(tcg_passres);
}
@@ -6622,6 +7034,23 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
}
switch (opcode) {
+ case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
+ case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
+ {
+ TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
+ static NeonGenWidenFn * const widenfns[2][2] = {
+ { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
+ { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
+ };
+ NeonGenWidenFn *widenfn = widenfns[size][is_u];
+
+ widenfn(tcg_op2_64, tcg_op2);
+ widenfn(tcg_passres, tcg_op1);
+ gen_neon_addl(size, (opcode == 2), tcg_passres,
+ tcg_passres, tcg_op2_64);
+ tcg_temp_free_i64(tcg_op2_64);
+ break;
+ }
case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
if (size == 0) {
@@ -6655,28 +7084,32 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
}
}
break;
+ case 9: /* SQDMLAL, SQDMLAL2 */
+ case 11: /* SQDMLSL, SQDMLSL2 */
+ case 13: /* SQDMULL, SQDMULL2 */
+ assert(size == 1);
+ gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
+ gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
+ tcg_passres, tcg_passres);
+ break;
default:
g_assert_not_reached();
}
tcg_temp_free_i32(tcg_op1);
tcg_temp_free_i32(tcg_op2);
- if (accop > 0) {
- if (size == 0) {
- gen_helper_neon_addl_u16(tcg_res[pass], tcg_res[pass],
- tcg_passres);
- } else {
- gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
- tcg_passres);
- }
- tcg_temp_free_i64(tcg_passres);
- } else if (accop < 0) {
- if (size == 0) {
- gen_helper_neon_subl_u16(tcg_res[pass], tcg_res[pass],
- tcg_passres);
+ if (accop != 0) {
+ if (opcode == 9 || opcode == 11) {
+ /* saturating accumulate ops */
+ if (accop < 0) {
+ gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
+ }
+ gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
+ tcg_res[pass],
+ tcg_passres);
} else {
- gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
- tcg_passres);
+ gen_neon_addl(size, (accop < 0), tcg_res[pass],
+ tcg_res[pass], tcg_passres);
}
tcg_temp_free_i64(tcg_passres);
}
@@ -6689,6 +7122,95 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
tcg_temp_free_i64(tcg_res[1]);
}
+static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
+ int opcode, int rd, int rn, int rm)
+{
+ TCGv_i64 tcg_res[2];
+ int part = is_q ? 2 : 0;
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
+ static NeonGenWidenFn * const widenfns[3][2] = {
+ { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
+ { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
+ { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
+ };
+ NeonGenWidenFn *widenfn = widenfns[size][is_u];
+
+ read_vec_element(s, tcg_op1, rn, pass, MO_64);
+ read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
+ widenfn(tcg_op2_wide, tcg_op2);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_res[pass] = tcg_temp_new_i64();
+ gen_neon_addl(size, (opcode == 3),
+ tcg_res[pass], tcg_op1, tcg_op2_wide);
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2_wide);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+}
+
+static void do_narrow_high_u32(TCGv_i32 res, TCGv_i64 in)
+{
+ tcg_gen_shri_i64(in, in, 32);
+ tcg_gen_trunc_i64_i32(res, in);
+}
+
+static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
+{
+ tcg_gen_addi_i64(in, in, 1U << 31);
+ do_narrow_high_u32(res, in);
+}
+
+static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
+ int opcode, int rd, int rn, int rm)
+{
+ TCGv_i32 tcg_res[2];
+ int part = is_q ? 2 : 0;
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_wideres = tcg_temp_new_i64();
+ static NeonGenNarrowFn * const narrowfns[3][2] = {
+ { gen_helper_neon_narrow_high_u8,
+ gen_helper_neon_narrow_round_high_u8 },
+ { gen_helper_neon_narrow_high_u16,
+ gen_helper_neon_narrow_round_high_u16 },
+ { do_narrow_high_u32, do_narrow_round_high_u32 },
+ };
+ NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
+
+ read_vec_element(s, tcg_op1, rn, pass, MO_64);
+ read_vec_element(s, tcg_op2, rm, pass, MO_64);
+
+ gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+
+ tcg_res[pass] = tcg_temp_new_i32();
+ gennarrow(tcg_res[pass], tcg_wideres);
+ tcg_temp_free_i64(tcg_wideres);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
+ tcg_temp_free_i32(tcg_res[pass]);
+ }
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+}
+
/* C3.6.15 AdvSIMD three different
* 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
* +---+---+---+-----------+------+---+------+--------+-----+------+------+
@@ -6719,31 +7241,43 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
/* 64 x 128 -> 128 */
- unsupported_encoding(s, insn);
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
break;
case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
/* 128 x 128 -> 64 */
- unsupported_encoding(s, insn);
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
break;
- case 9:
- case 11:
- case 13:
- case 14:
- if (is_u) {
+ case 14: /* PMULL, PMULL2 */
+ if (is_u || size == 1 || size == 2) {
unallocated_encoding(s);
return;
}
- /* fall through */
- case 0:
- case 2:
unsupported_encoding(s, insn);
break;
- case 5:
- case 7:
- case 8:
- case 10:
- case 12:
+ case 9: /* SQDMLAL, SQDMLAL2 */
+ case 11: /* SQDMLSL, SQDMLSL2 */
+ case 13: /* SQDMULL, SQDMULL2 */
+ if (is_u || size == 0) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
+ case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
+ case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
+ case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
+ case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
/* 64 x 64 -> 128 */
if (size == 3) {
unallocated_encoding(s);
@@ -6856,39 +7390,22 @@ static void gen_min_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
tcg_gen_movcond_i32(TCG_COND_LEU, res, op1, op2, op1, op2);
}
-/* Pairwise op subgroup of C3.6.16. */
-static void disas_simd_3same_pair(DisasContext *s, uint32_t insn)
+/* Pairwise op subgroup of C3.6.16.
+ *
+ * This is called directly or via the handle_3same_float for float pairwise
+ * operations where the opcode and size are calculated differently.
+ */
+static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
+ int size, int rn, int rm, int rd)
{
- int is_q = extract32(insn, 30, 1);
- int u = extract32(insn, 29, 1);
- int size = extract32(insn, 22, 2);
- int opcode = extract32(insn, 11, 5);
- int rm = extract32(insn, 16, 5);
- int rn = extract32(insn, 5, 5);
- int rd = extract32(insn, 0, 5);
+ TCGv_ptr fpst;
int pass;
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
-
- switch (opcode) {
- case 0x14: /* SMAXP, UMAXP */
- case 0x15: /* SMINP, UMINP */
- if (size == 3) {
- unallocated_encoding(s);
- return;
- }
- break;
- case 0x17:
- if (u) {
- unallocated_encoding(s);
- return;
- }
- break;
- default:
- g_assert_not_reached();
+ /* Floating point operations need fpst */
+ if (opcode >= 0x58) {
+ fpst = get_fpstatus_ptr();
+ } else {
+ TCGV_UNUSED_PTR(fpst);
}
/* These operations work on the concatenated rm:rn, with each pair of
@@ -6906,9 +7423,28 @@ static void disas_simd_3same_pair(DisasContext *s, uint32_t insn)
read_vec_element(s, tcg_op2, passreg, 1, MO_64);
tcg_res[pass] = tcg_temp_new_i64();
- /* The only 64 bit pairwise integer op is ADDP */
- assert(opcode == 0x17);
- tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
+ switch (opcode) {
+ case 0x17: /* ADDP */
+ tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
+ break;
+ case 0x58: /* FMAXNMP */
+ gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5a: /* FADDP */
+ gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5e: /* FMAXP */
+ gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x78: /* FMINNMP */
+ gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7e: /* FMINP */
+ gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
tcg_temp_free_i64(tcg_op1);
tcg_temp_free_i64(tcg_op2);
@@ -6925,7 +7461,7 @@ static void disas_simd_3same_pair(DisasContext *s, uint32_t insn)
for (pass = 0; pass < maxpass; pass++) {
TCGv_i32 tcg_op1 = tcg_temp_new_i32();
TCGv_i32 tcg_op2 = tcg_temp_new_i32();
- NeonGenTwoOpFn *genfn;
+ NeonGenTwoOpFn *genfn = NULL;
int passreg = pass < (maxpass / 2) ? rn : rm;
int passelt = (is_q && (pass & 1)) ? 2 : 0;
@@ -6964,11 +7500,30 @@ static void disas_simd_3same_pair(DisasContext *s, uint32_t insn)
genfn = fns[size][u];
break;
}
+ /* The FP operations are all on single floats (32 bit) */
+ case 0x58: /* FMAXNMP */
+ gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5a: /* FADDP */
+ gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5e: /* FMAXP */
+ gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x78: /* FMINNMP */
+ gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7e: /* FMINP */
+ gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
default:
g_assert_not_reached();
}
- genfn(tcg_res[pass], tcg_op1, tcg_op2);
+ /* FP ops called directly, otherwise call now */
+ if (genfn) {
+ genfn(tcg_res[pass], tcg_op1, tcg_op2);
+ }
tcg_temp_free_i32(tcg_op1);
tcg_temp_free_i32(tcg_op2);
@@ -6982,6 +7537,10 @@ static void disas_simd_3same_pair(DisasContext *s, uint32_t insn)
clear_vec_high(s, rd);
}
}
+
+ if (!TCGV_IS_UNUSED_PTR(fpst)) {
+ tcg_temp_free_ptr(fpst);
+ }
}
/* Floating point op subgroup of C3.6.16. */
@@ -7015,30 +7574,32 @@ static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
case 0x5e: /* FMAXP */
case 0x78: /* FMINNMP */
case 0x7e: /* FMINP */
- /* pairwise ops */
- unsupported_encoding(s, insn);
+ if (size && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
+ rn, rm, rd);
return;
case 0x1b: /* FMULX */
- case 0x1c: /* FCMEQ */
case 0x1f: /* FRECPS */
case 0x3f: /* FRSQRTS */
- case 0x5c: /* FCMGE */
case 0x5d: /* FACGE */
- case 0x7c: /* FCMGT */
case 0x7d: /* FACGT */
case 0x19: /* FMLA */
case 0x39: /* FMLS */
- unsupported_encoding(s, insn);
- return;
case 0x18: /* FMAXNM */
case 0x1a: /* FADD */
+ case 0x1c: /* FCMEQ */
case 0x1e: /* FMAX */
case 0x38: /* FMINNM */
case 0x3a: /* FSUB */
case 0x3e: /* FMIN */
case 0x5b: /* FMUL */
+ case 0x5c: /* FCMGE */
case 0x5f: /* FDIV */
case 0x7a: /* FABD */
+ case 0x7c: /* FCMGT */
handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
return;
default:
@@ -7366,9 +7927,28 @@ static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
case 0x17: /* ADDP */
case 0x14: /* SMAXP, UMAXP */
case 0x15: /* SMINP, UMINP */
+ {
/* Pairwise operations */
- disas_simd_3same_pair(s, insn);
+ int is_q = extract32(insn, 30, 1);
+ int u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ if (opcode == 0x17) {
+ if (u || (size == 3 && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+ } else {
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ }
+ handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
break;
+ }
case 0x18 ... 0x31:
/* floating point ops, sz[1] and U are part of opcode */
disas_simd_3same_float(s, insn);
@@ -7618,6 +8198,17 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
return;
}
break;
+ case 0x2c: /* FCMGT (zero) */
+ case 0x2d: /* FCMEQ (zero) */
+ case 0x2e: /* FCMLT (zero) */
+ case 0x6c: /* FCMGE (zero) */
+ case 0x6d: /* FCMLE (zero) */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
+ return;
case 0x16: /* FCVTN, FCVTN2 */
case 0x17: /* FCVTL, FCVTL2 */
case 0x18: /* FRINTN */
@@ -7626,9 +8217,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
case 0x1b: /* FCVTMS */
case 0x1c: /* FCVTAS */
case 0x1d: /* SCVTF */
- case 0x2c: /* FCMGT (zero) */
- case 0x2d: /* FCMEQ (zero) */
- case 0x2e: /* FCMLT (zero) */
case 0x38: /* FRINTP */
case 0x39: /* FRINTZ */
case 0x3a: /* FCVTPS */
@@ -7642,8 +8230,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
case 0x5b: /* FCVTMU */
case 0x5c: /* FCVTAU */
case 0x5d: /* UCVTF */
- case 0x6c: /* FCMGE (zero) */
- case 0x6d: /* FCMLE (zero) */
case 0x79: /* FRINTI */
case 0x7a: /* FCVTPU */
case 0x7b: /* FCVTZU */
@@ -7805,15 +8391,455 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
}
}
-/* C3.6.18 AdvSIMD vector x indexed element
+/* C3.6.13 AdvSIMD scalar x indexed element
+ * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
+ * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
+ * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
+ * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
+ * C3.6.18 AdvSIMD vector x indexed element
* 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
* +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
* | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
* +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
*/
-static void disas_simd_indexed_vector(DisasContext *s, uint32_t insn)
-{
- unsupported_encoding(s, insn);
+static void disas_simd_indexed(DisasContext *s, uint32_t insn)
+{
+ /* This encoding has two kinds of instruction:
+ * normal, where we perform elt x idxelt => elt for each
+ * element in the vector
+ * long, where we perform elt x idxelt and generate a result of
+ * double the width of the input element
+ * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
+ */
+ bool is_scalar = extract32(insn, 28, 1);
+ bool is_q = extract32(insn, 30, 1);
+ bool u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int l = extract32(insn, 21, 1);
+ int m = extract32(insn, 20, 1);
+ /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
+ int rm = extract32(insn, 16, 4);
+ int opcode = extract32(insn, 12, 4);
+ int h = extract32(insn, 11, 1);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ bool is_long = false;
+ bool is_fp = false;
+ int index;
+ TCGv_ptr fpst;
+
+ switch (opcode) {
+ case 0x0: /* MLA */
+ case 0x4: /* MLS */
+ if (!u || is_scalar) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ case 0xa: /* SMULL, SMULL2, UMULL, UMULL2 */
+ if (is_scalar) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_long = true;
+ break;
+ case 0x3: /* SQDMLAL, SQDMLAL2 */
+ case 0x7: /* SQDMLSL, SQDMLSL2 */
+ case 0xb: /* SQDMULL, SQDMULL2 */
+ is_long = true;
+ /* fall through */
+ case 0xc: /* SQDMULH */
+ case 0xd: /* SQRDMULH */
+ if (u) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x8: /* MUL */
+ if (u || is_scalar) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x1: /* FMLA */
+ case 0x5: /* FMLS */
+ if (u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x9: /* FMUL, FMULX */
+ if (!extract32(size, 1, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_fp = true;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_fp) {
+ /* low bit of size indicates single/double */
+ size = extract32(size, 0, 1) ? 3 : 2;
+ if (size == 2) {
+ index = h << 1 | l;
+ } else {
+ if (l || !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ index = h;
+ }
+ rm |= (m << 4);
+ } else {
+ switch (size) {
+ case 1:
+ index = h << 2 | l << 1 | m;
+ break;
+ case 2:
+ index = h << 1 | l;
+ rm |= (m << 4);
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ }
+
+ if (is_fp) {
+ fpst = get_fpstatus_ptr();
+ } else {
+ TCGV_UNUSED_PTR(fpst);
+ }
+
+ if (size == 3) {
+ TCGv_i64 tcg_idx = tcg_temp_new_i64();
+ int pass;
+
+ assert(is_fp && is_q && !is_long);
+
+ read_vec_element(s, tcg_idx, rm, index, MO_64);
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+
+ switch (opcode) {
+ case 0x5: /* FMLS */
+ /* As usual for ARM, separate negation for fused multiply-add */
+ gen_helper_vfp_negd(tcg_op, tcg_op);
+ /* fall through */
+ case 0x1: /* FMLA */
+ read_vec_element(s, tcg_res, rd, pass, MO_64);
+ gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
+ break;
+ case 0x9: /* FMUL, FMULX */
+ if (u) {
+ gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
+ } else {
+ gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_op);
+ tcg_temp_free_i64(tcg_res);
+ }
+
+ if (is_scalar) {
+ clear_vec_high(s, rd);
+ }
+
+ tcg_temp_free_i64(tcg_idx);
+ } else if (!is_long) {
+ /* 32 bit floating point, or 16 or 32 bit integer.
+ * For the 16 bit scalar case we use the usual Neon helpers and
+ * rely on the fact that 0 op 0 == 0 with no side effects.
+ */
+ TCGv_i32 tcg_idx = tcg_temp_new_i32();
+ int pass, maxpasses;
+
+ if (is_scalar) {
+ maxpasses = 1;
+ } else {
+ maxpasses = is_q ? 4 : 2;
+ }
+
+ read_vec_element_i32(s, tcg_idx, rm, index, size);
+
+ if (size == 1 && !is_scalar) {
+ /* The simplest way to handle the 16x16 indexed ops is to duplicate
+ * the index into both halves of the 32 bit tcg_idx and then use
+ * the usual Neon helpers.
+ */
+ tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
+ }
+
+ for (pass = 0; pass < maxpasses; pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
+
+ switch (opcode) {
+ case 0x0: /* MLA */
+ case 0x4: /* MLS */
+ case 0x8: /* MUL */
+ {
+ static NeonGenTwoOpFn * const fns[2][2] = {
+ { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
+ { tcg_gen_add_i32, tcg_gen_sub_i32 },
+ };
+ NeonGenTwoOpFn *genfn;
+ bool is_sub = opcode == 0x4;
+
+ if (size == 1) {
+ gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
+ } else {
+ tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
+ }
+ if (opcode == 0x8) {
+ break;
+ }
+ read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
+ genfn = fns[size - 1][is_sub];
+ genfn(tcg_res, tcg_op, tcg_res);
+ break;
+ }
+ case 0x5: /* FMLS */
+ /* As usual for ARM, separate negation for fused multiply-add */
+ gen_helper_vfp_negs(tcg_op, tcg_op);
+ /* fall through */
+ case 0x1: /* FMLA */
+ read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
+ break;
+ case 0x9: /* FMUL, FMULX */
+ if (u) {
+ gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
+ } else {
+ gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
+ }
+ break;
+ case 0xc: /* SQDMULH */
+ if (size == 1) {
+ gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
+ tcg_op, tcg_idx);
+ } else {
+ gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
+ tcg_op, tcg_idx);
+ }
+ break;
+ case 0xd: /* SQRDMULH */
+ if (size == 1) {
+ gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
+ tcg_op, tcg_idx);
+ } else {
+ gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
+ tcg_op, tcg_idx);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (is_scalar) {
+ write_fp_sreg(s, rd, tcg_res);
+ } else {
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ }
+
+ tcg_temp_free_i32(tcg_op);
+ tcg_temp_free_i32(tcg_res);
+ }
+
+ tcg_temp_free_i32(tcg_idx);
+
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+ } else {
+ /* long ops: 16x16->32 or 32x32->64 */
+ TCGv_i64 tcg_res[2];
+ int pass;
+ bool satop = extract32(opcode, 0, 1);
+ TCGMemOp memop = MO_32;
+
+ if (satop || !u) {
+ memop |= MO_SIGN;
+ }
+
+ if (size == 2) {
+ TCGv_i64 tcg_idx = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_idx, rm, index, memop);
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_passres;
+ int passelt;
+
+ if (is_scalar) {
+ passelt = 0;
+ } else {
+ passelt = pass + (is_q * 2);
+ }
+
+ read_vec_element(s, tcg_op, rn, passelt, memop);
+
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ if (opcode == 0xa || opcode == 0xb) {
+ /* Non-accumulating ops */
+ tcg_passres = tcg_res[pass];
+ } else {
+ tcg_passres = tcg_temp_new_i64();
+ }
+
+ tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
+ tcg_temp_free_i64(tcg_op);
+
+ if (satop) {
+ /* saturating, doubling */
+ gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
+ tcg_passres, tcg_passres);
+ }
+
+ if (opcode == 0xa || opcode == 0xb) {
+ continue;
+ }
+
+ /* Accumulating op: handle accumulate step */
+ read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+
+ switch (opcode) {
+ case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
+ break;
+ case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
+ break;
+ case 0x7: /* SQDMLSL, SQDMLSL2 */
+ tcg_gen_neg_i64(tcg_passres, tcg_passres);
+ /* fall through */
+ case 0x3: /* SQDMLAL, SQDMLAL2 */
+ gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
+ tcg_res[pass],
+ tcg_passres);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_i64(tcg_passres);
+ }
+ tcg_temp_free_i64(tcg_idx);
+
+ if (is_scalar) {
+ clear_vec_high(s, rd);
+ }
+ } else {
+ TCGv_i32 tcg_idx = tcg_temp_new_i32();
+
+ assert(size == 1);
+ read_vec_element_i32(s, tcg_idx, rm, index, size);
+
+ if (!is_scalar) {
+ /* The simplest way to handle the 16x16 indexed ops is to
+ * duplicate the index into both halves of the 32 bit tcg_idx
+ * and then use the usual Neon helpers.
+ */
+ tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
+ }
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i64 tcg_passres;
+
+ if (is_scalar) {
+ read_vec_element_i32(s, tcg_op, rn, pass, size);
+ } else {
+ read_vec_element_i32(s, tcg_op, rn,
+ pass + (is_q * 2), MO_32);
+ }
+
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ if (opcode == 0xa || opcode == 0xb) {
+ /* Non-accumulating ops */
+ tcg_passres = tcg_res[pass];
+ } else {
+ tcg_passres = tcg_temp_new_i64();
+ }
+
+ if (memop & MO_SIGN) {
+ gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
+ } else {
+ gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
+ }
+ if (satop) {
+ gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
+ tcg_passres, tcg_passres);
+ }
+ tcg_temp_free_i32(tcg_op);
+
+ if (opcode == 0xa || opcode == 0xb) {
+ continue;
+ }
+
+ /* Accumulating op: handle accumulate step */
+ read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+
+ switch (opcode) {
+ case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
+ tcg_passres);
+ break;
+ case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
+ tcg_passres);
+ break;
+ case 0x7: /* SQDMLSL, SQDMLSL2 */
+ gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
+ /* fall through */
+ case 0x3: /* SQDMLAL, SQDMLAL2 */
+ gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
+ tcg_res[pass],
+ tcg_passres);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_i64(tcg_passres);
+ }
+ tcg_temp_free_i32(tcg_idx);
+
+ if (is_scalar) {
+ tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
+ }
+ }
+
+ if (is_scalar) {
+ tcg_res[1] = tcg_const_i64(0);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+ }
+
+ if (!TCGV_IS_UNUSED_PTR(fpst)) {
+ tcg_temp_free_ptr(fpst);
+ }
}
/* C3.6.19 Crypto AES
@@ -7861,7 +8887,7 @@ static const AArch64DecodeTable data_proc_simd[] = {
{ 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
{ 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
{ 0x0e000400, 0x9fe08400, disas_simd_copy },
- { 0x0f000000, 0x9f000400, disas_simd_indexed_vector },
+ { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
/* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
{ 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
{ 0x0f000400, 0x9f800400, disas_simd_shift_imm },
@@ -7873,7 +8899,7 @@ static const AArch64DecodeTable data_proc_simd[] = {
{ 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
{ 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
{ 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
- { 0x5f000000, 0xdf000400, disas_simd_scalar_indexed },
+ { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
{ 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
{ 0x4e280800, 0xff3e0c00, disas_crypto_aes },
{ 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },