diff options
Diffstat (limited to 'target/arm/translate.c')
-rw-r--r-- | target/arm/translate.c | 254 |
1 files changed, 52 insertions, 202 deletions
diff --git a/target/arm/translate.c b/target/arm/translate.c index 2eec689c5e..010a158e63 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3645,204 +3645,59 @@ static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn, return 1; } -static void gen_ceq0_i32(TCGv_i32 d, TCGv_i32 a) -{ - tcg_gen_setcondi_i32(TCG_COND_EQ, d, a, 0); - tcg_gen_neg_i32(d, d); -} - -static void gen_ceq0_i64(TCGv_i64 d, TCGv_i64 a) -{ - tcg_gen_setcondi_i64(TCG_COND_EQ, d, a, 0); - tcg_gen_neg_i64(d, d); -} - -static void gen_ceq0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) -{ - TCGv_vec zero = tcg_const_zeros_vec_matching(d); - tcg_gen_cmp_vec(TCG_COND_EQ, vece, d, a, zero); - tcg_temp_free_vec(zero); -} +#define GEN_CMP0(NAME, COND) \ + static void gen_##NAME##0_i32(TCGv_i32 d, TCGv_i32 a) \ + { \ + tcg_gen_setcondi_i32(COND, d, a, 0); \ + tcg_gen_neg_i32(d, d); \ + } \ + static void gen_##NAME##0_i64(TCGv_i64 d, TCGv_i64 a) \ + { \ + tcg_gen_setcondi_i64(COND, d, a, 0); \ + tcg_gen_neg_i64(d, d); \ + } \ + static void gen_##NAME##0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) \ + { \ + TCGv_vec zero = tcg_const_zeros_vec_matching(d); \ + tcg_gen_cmp_vec(COND, vece, d, a, zero); \ + tcg_temp_free_vec(zero); \ + } \ + void gen_gvec_##NAME##0(unsigned vece, uint32_t d, uint32_t m, \ + uint32_t opr_sz, uint32_t max_sz) \ + { \ + const GVecGen2 op[4] = { \ + { .fno = gen_helper_gvec_##NAME##0_b, \ + .fniv = gen_##NAME##0_vec, \ + .opt_opc = vecop_list_cmp, \ + .vece = MO_8 }, \ + { .fno = gen_helper_gvec_##NAME##0_h, \ + .fniv = gen_##NAME##0_vec, \ + .opt_opc = vecop_list_cmp, \ + .vece = MO_16 }, \ + { .fni4 = gen_##NAME##0_i32, \ + .fniv = gen_##NAME##0_vec, \ + .opt_opc = vecop_list_cmp, \ + .vece = MO_32 }, \ + { .fni8 = gen_##NAME##0_i64, \ + .fniv = gen_##NAME##0_vec, \ + .opt_opc = vecop_list_cmp, \ + .prefer_i64 = TCG_TARGET_REG_BITS == 64, \ + .vece = MO_64 }, \ + }; \ + tcg_gen_gvec_2(d, m, opr_sz, max_sz, &op[vece]); \ + } static const TCGOpcode vecop_list_cmp[] = { INDEX_op_cmp_vec, 0 }; -const GVecGen2 ceq0_op[4] = { - { .fno = gen_helper_gvec_ceq0_b, - .fniv = gen_ceq0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_8 }, - { .fno = gen_helper_gvec_ceq0_h, - .fniv = gen_ceq0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_16 }, - { .fni4 = gen_ceq0_i32, - .fniv = gen_ceq0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_32 }, - { .fni8 = gen_ceq0_i64, - .fniv = gen_ceq0_vec, - .opt_opc = vecop_list_cmp, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .vece = MO_64 }, -}; - -static void gen_cle0_i32(TCGv_i32 d, TCGv_i32 a) -{ - tcg_gen_setcondi_i32(TCG_COND_LE, d, a, 0); - tcg_gen_neg_i32(d, d); -} - -static void gen_cle0_i64(TCGv_i64 d, TCGv_i64 a) -{ - tcg_gen_setcondi_i64(TCG_COND_LE, d, a, 0); - tcg_gen_neg_i64(d, d); -} - -static void gen_cle0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) -{ - TCGv_vec zero = tcg_const_zeros_vec_matching(d); - tcg_gen_cmp_vec(TCG_COND_LE, vece, d, a, zero); - tcg_temp_free_vec(zero); -} - -const GVecGen2 cle0_op[4] = { - { .fno = gen_helper_gvec_cle0_b, - .fniv = gen_cle0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_8 }, - { .fno = gen_helper_gvec_cle0_h, - .fniv = gen_cle0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_16 }, - { .fni4 = gen_cle0_i32, - .fniv = gen_cle0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_32 }, - { .fni8 = gen_cle0_i64, - .fniv = gen_cle0_vec, - .opt_opc = vecop_list_cmp, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .vece = MO_64 }, -}; - -static void gen_cge0_i32(TCGv_i32 d, TCGv_i32 a) -{ - tcg_gen_setcondi_i32(TCG_COND_GE, d, a, 0); - tcg_gen_neg_i32(d, d); -} - -static void gen_cge0_i64(TCGv_i64 d, TCGv_i64 a) -{ - tcg_gen_setcondi_i64(TCG_COND_GE, d, a, 0); - tcg_gen_neg_i64(d, d); -} - -static void gen_cge0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) -{ - TCGv_vec zero = tcg_const_zeros_vec_matching(d); - tcg_gen_cmp_vec(TCG_COND_GE, vece, d, a, zero); - tcg_temp_free_vec(zero); -} - -const GVecGen2 cge0_op[4] = { - { .fno = gen_helper_gvec_cge0_b, - .fniv = gen_cge0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_8 }, - { .fno = gen_helper_gvec_cge0_h, - .fniv = gen_cge0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_16 }, - { .fni4 = gen_cge0_i32, - .fniv = gen_cge0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_32 }, - { .fni8 = gen_cge0_i64, - .fniv = gen_cge0_vec, - .opt_opc = vecop_list_cmp, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .vece = MO_64 }, -}; - -static void gen_clt0_i32(TCGv_i32 d, TCGv_i32 a) -{ - tcg_gen_setcondi_i32(TCG_COND_LT, d, a, 0); - tcg_gen_neg_i32(d, d); -} - -static void gen_clt0_i64(TCGv_i64 d, TCGv_i64 a) -{ - tcg_gen_setcondi_i64(TCG_COND_LT, d, a, 0); - tcg_gen_neg_i64(d, d); -} - -static void gen_clt0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) -{ - TCGv_vec zero = tcg_const_zeros_vec_matching(d); - tcg_gen_cmp_vec(TCG_COND_LT, vece, d, a, zero); - tcg_temp_free_vec(zero); -} - -const GVecGen2 clt0_op[4] = { - { .fno = gen_helper_gvec_clt0_b, - .fniv = gen_clt0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_8 }, - { .fno = gen_helper_gvec_clt0_h, - .fniv = gen_clt0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_16 }, - { .fni4 = gen_clt0_i32, - .fniv = gen_clt0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_32 }, - { .fni8 = gen_clt0_i64, - .fniv = gen_clt0_vec, - .opt_opc = vecop_list_cmp, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .vece = MO_64 }, -}; - -static void gen_cgt0_i32(TCGv_i32 d, TCGv_i32 a) -{ - tcg_gen_setcondi_i32(TCG_COND_GT, d, a, 0); - tcg_gen_neg_i32(d, d); -} - -static void gen_cgt0_i64(TCGv_i64 d, TCGv_i64 a) -{ - tcg_gen_setcondi_i64(TCG_COND_GT, d, a, 0); - tcg_gen_neg_i64(d, d); -} - -static void gen_cgt0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) -{ - TCGv_vec zero = tcg_const_zeros_vec_matching(d); - tcg_gen_cmp_vec(TCG_COND_GT, vece, d, a, zero); - tcg_temp_free_vec(zero); -} +GEN_CMP0(ceq, TCG_COND_EQ) +GEN_CMP0(cle, TCG_COND_LE) +GEN_CMP0(cge, TCG_COND_GE) +GEN_CMP0(clt, TCG_COND_LT) +GEN_CMP0(cgt, TCG_COND_GT) -const GVecGen2 cgt0_op[4] = { - { .fno = gen_helper_gvec_cgt0_b, - .fniv = gen_cgt0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_8 }, - { .fno = gen_helper_gvec_cgt0_h, - .fniv = gen_cgt0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_16 }, - { .fni4 = gen_cgt0_i32, - .fniv = gen_cgt0_vec, - .opt_opc = vecop_list_cmp, - .vece = MO_32 }, - { .fni8 = gen_cgt0_i64, - .fniv = gen_cgt0_vec, - .opt_opc = vecop_list_cmp, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .vece = MO_64 }, -}; +#undef GEN_CMP0 static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) { @@ -6772,24 +6627,19 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) break; case NEON_2RM_VCEQ0: - tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size, - vec_size, &ceq0_op[size]); + gen_gvec_ceq0(size, rd_ofs, rm_ofs, vec_size, vec_size); break; case NEON_2RM_VCGT0: - tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size, - vec_size, &cgt0_op[size]); + gen_gvec_cgt0(size, rd_ofs, rm_ofs, vec_size, vec_size); break; case NEON_2RM_VCLE0: - tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size, - vec_size, &cle0_op[size]); + gen_gvec_cle0(size, rd_ofs, rm_ofs, vec_size, vec_size); break; case NEON_2RM_VCGE0: - tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size, - vec_size, &cge0_op[size]); + gen_gvec_cge0(size, rd_ofs, rm_ofs, vec_size, vec_size); break; case NEON_2RM_VCLT0: - tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size, - vec_size, &clt0_op[size]); + gen_gvec_clt0(size, rd_ofs, rm_ofs, vec_size, vec_size); break; default: |