From 015ee81a4c06b644969f621fd9965cc6372b879e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 2 Nov 2020 16:52:12 +0000 Subject: target/arm: Introduce neon_full_reg_offset This function makes it clear that we're talking about the whole register, and not the 32-bit piece at index 0. This fixes a bug when running on a big-endian host. Signed-off-by: Richard Henderson Message-id: 20201030022618.785675-2-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/translate-neon.c.inc | 44 ++++++++++++++++++++--------------------- target/arm/translate-vfp.c.inc | 2 +- target/arm/translate.c | 8 ++++++++ 3 files changed, 31 insertions(+), 23 deletions(-) diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc index 4d1a292981..e259e24c05 100644 --- a/target/arm/translate-neon.c.inc +++ b/target/arm/translate-neon.c.inc @@ -76,7 +76,7 @@ neon_element_offset(int reg, int element, MemOp size) ofs ^= 8 - element_size; } #endif - return neon_reg_offset(reg, 0) + ofs; + return neon_full_reg_offset(reg) + ofs; } static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop) @@ -585,12 +585,12 @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a) * We cannot write 16 bytes at once because the * destination is unaligned. */ - tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0), + tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(vd), 8, 8, tmp); - tcg_gen_gvec_mov(0, neon_reg_offset(vd + 1, 0), - neon_reg_offset(vd, 0), 8, 8); + tcg_gen_gvec_mov(0, neon_full_reg_offset(vd + 1), + neon_full_reg_offset(vd), 8, 8); } else { - tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0), + tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(vd), vec_size, vec_size, tmp); } tcg_gen_addi_i32(addr, addr, 1 << size); @@ -691,9 +691,9 @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a) static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn) { int vec_size = a->q ? 16 : 8; - int rd_ofs = neon_reg_offset(a->vd, 0); - int rn_ofs = neon_reg_offset(a->vn, 0); - int rm_ofs = neon_reg_offset(a->vm, 0); + int rd_ofs = neon_full_reg_offset(a->vd); + int rn_ofs = neon_full_reg_offset(a->vn); + int rm_ofs = neon_full_reg_offset(a->vm); if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { return false; @@ -1177,8 +1177,8 @@ static bool do_vector_2sh(DisasContext *s, arg_2reg_shift *a, GVecGen2iFn *fn) { /* Handle a 2-reg-shift insn which can be vectorized. */ int vec_size = a->q ? 16 : 8; - int rd_ofs = neon_reg_offset(a->vd, 0); - int rm_ofs = neon_reg_offset(a->vm, 0); + int rd_ofs = neon_full_reg_offset(a->vd); + int rm_ofs = neon_full_reg_offset(a->vm); if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { return false; @@ -1620,8 +1620,8 @@ static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a, { /* FP operations in 2-reg-and-shift group */ int vec_size = a->q ? 16 : 8; - int rd_ofs = neon_reg_offset(a->vd, 0); - int rm_ofs = neon_reg_offset(a->vm, 0); + int rd_ofs = neon_full_reg_offset(a->vd); + int rm_ofs = neon_full_reg_offset(a->vm); TCGv_ptr fpst; if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { @@ -1756,7 +1756,7 @@ static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a, return true; } - reg_ofs = neon_reg_offset(a->vd, 0); + reg_ofs = neon_full_reg_offset(a->vd); vec_size = a->q ? 16 : 8; imm = asimd_imm_const(a->imm, a->cmode, a->op); @@ -2300,9 +2300,9 @@ static bool trans_VMULL_P_3d(DisasContext *s, arg_3diff *a) return true; } - tcg_gen_gvec_3_ool(neon_reg_offset(a->vd, 0), - neon_reg_offset(a->vn, 0), - neon_reg_offset(a->vm, 0), + tcg_gen_gvec_3_ool(neon_full_reg_offset(a->vd), + neon_full_reg_offset(a->vn), + neon_full_reg_offset(a->vm), 16, 16, 0, fn_gvec); return true; } @@ -2445,8 +2445,8 @@ static bool do_2scalar_fp_vec(DisasContext *s, arg_2scalar *a, { /* Two registers and a scalar, using gvec */ int vec_size = a->q ? 16 : 8; - int rd_ofs = neon_reg_offset(a->vd, 0); - int rn_ofs = neon_reg_offset(a->vn, 0); + int rd_ofs = neon_full_reg_offset(a->vd); + int rn_ofs = neon_full_reg_offset(a->vn); int rm_ofs; int idx; TCGv_ptr fpstatus; @@ -2477,7 +2477,7 @@ static bool do_2scalar_fp_vec(DisasContext *s, arg_2scalar *a, /* a->vm is M:Vm, which encodes both register and index */ idx = extract32(a->vm, a->size + 2, 2); a->vm = extract32(a->vm, 0, a->size + 2); - rm_ofs = neon_reg_offset(a->vm, 0); + rm_ofs = neon_full_reg_offset(a->vm); fpstatus = fpstatus_ptr(a->size == 1 ? FPST_STD_F16 : FPST_STD); tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpstatus, @@ -2923,7 +2923,7 @@ static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a) return true; } - tcg_gen_gvec_dup_mem(a->size, neon_reg_offset(a->vd, 0), + tcg_gen_gvec_dup_mem(a->size, neon_full_reg_offset(a->vd), neon_element_offset(a->vm, a->index, a->size), a->q ? 16 : 8, a->q ? 16 : 8); return true; @@ -3412,8 +3412,8 @@ static bool trans_VCVT_F32_F16(DisasContext *s, arg_2misc *a) static bool do_2misc_vec(DisasContext *s, arg_2misc *a, GVecGen2Fn *fn) { int vec_size = a->q ? 16 : 8; - int rd_ofs = neon_reg_offset(a->vd, 0); - int rm_ofs = neon_reg_offset(a->vm, 0); + int rd_ofs = neon_full_reg_offset(a->vd); + int rm_ofs = neon_full_reg_offset(a->vm); if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { return false; diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc index a7ed9bc81b..368bae0a73 100644 --- a/target/arm/translate-vfp.c.inc +++ b/target/arm/translate-vfp.c.inc @@ -653,7 +653,7 @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a) } tmp = load_reg(s, a->rt); - tcg_gen_gvec_dup_i32(size, neon_reg_offset(a->vn, 0), + tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(a->vn), vec_size, vec_size, tmp); tcg_temp_free_i32(tmp); diff --git a/target/arm/translate.c b/target/arm/translate.c index 38371db540..1b61e50f9c 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1094,6 +1094,14 @@ static inline void gen_hlt(DisasContext *s, int imm) unallocated_encoding(s); } +/* + * Return the offset of a "full" NEON Dreg. + */ +static long neon_full_reg_offset(unsigned reg) +{ + return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]); +} + static inline long vfp_reg_offset(bool dp, unsigned reg) { if (dp) { -- cgit v1.2.3 From 7ec85c02833f4264840c6ed78b749443a7b4ffe0 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 2 Nov 2020 16:52:13 +0000 Subject: target/arm: Move neon_element_offset to translate.c This will shortly have users outside of translate-neon.c.inc. Signed-off-by: Richard Henderson Message-id: 20201030022618.785675-3-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/translate-neon.c.inc | 19 ------------------- target/arm/translate.c | 20 ++++++++++++++++++++ 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc index e259e24c05..96ab2248fc 100644 --- a/target/arm/translate-neon.c.inc +++ b/target/arm/translate-neon.c.inc @@ -60,25 +60,6 @@ static inline int neon_3same_fp_size(DisasContext *s, int x) #include "decode-neon-ls.c.inc" #include "decode-neon-shared.c.inc" -/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE, - * where 0 is the least significant end of the register. - */ -static inline long -neon_element_offset(int reg, int element, MemOp size) -{ - int element_size = 1 << size; - int ofs = element * element_size; -#ifdef HOST_WORDS_BIGENDIAN - /* Calculate the offset assuming fully little-endian, - * then XOR to account for the order of the 8-byte units. - */ - if (element_size < 8) { - ofs ^= 8 - element_size; - } -#endif - return neon_full_reg_offset(reg) + ofs; -} - static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop) { long offset = neon_element_offset(reg, ele, mop & MO_SIZE); diff --git a/target/arm/translate.c b/target/arm/translate.c index 1b61e50f9c..bf0b5cac61 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1102,6 +1102,26 @@ static long neon_full_reg_offset(unsigned reg) return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]); } +/* + * Return the offset of a 2**SIZE piece of a NEON register, at index ELE, + * where 0 is the least significant end of the register. + */ +static long neon_element_offset(int reg, int element, MemOp size) +{ + int element_size = 1 << size; + int ofs = element * element_size; +#ifdef HOST_WORDS_BIGENDIAN + /* + * Calculate the offset assuming fully little-endian, + * then XOR to account for the order of the 8-byte units. + */ + if (element_size < 8) { + ofs ^= 8 - element_size; + } +#endif + return neon_full_reg_offset(reg) + ofs; +} + static inline long vfp_reg_offset(bool dp, unsigned reg) { if (dp) { -- cgit v1.2.3 From 0f2cdc82276a723ee58562b56b9d537a4bd7bfef Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 2 Nov 2020 16:52:13 +0000 Subject: target/arm: Use neon_element_offset in neon_load/store_reg These are the only users of neon_reg_offset, so remove that. Signed-off-by: Richard Henderson Message-id: 20201030022618.785675-4-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/translate.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/target/arm/translate.c b/target/arm/translate.c index bf0b5cac61..88a926d1df 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1137,26 +1137,16 @@ static inline long vfp_reg_offset(bool dp, unsigned reg) } } -/* Return the offset of a 32-bit piece of a NEON register. - zero is the least significant end of the register. */ -static inline long -neon_reg_offset (int reg, int n) -{ - int sreg; - sreg = reg * 2 + n; - return vfp_reg_offset(0, sreg); -} - static TCGv_i32 neon_load_reg(int reg, int pass) { TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass)); + tcg_gen_ld_i32(tmp, cpu_env, neon_element_offset(reg, pass, MO_32)); return tmp; } static void neon_store_reg(int reg, int pass, TCGv_i32 var) { - tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass)); + tcg_gen_st_i32(var, cpu_env, neon_element_offset(reg, pass, MO_32)); tcg_temp_free_i32(var); } -- cgit v1.2.3 From d8719785fde2f5041986853a314c05c6f567d3cb Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 2 Nov 2020 16:52:13 +0000 Subject: target/arm: Use neon_element_offset in vfp_reg_offset This seems a bit more readable than using offsetof CPU_DoubleU. Signed-off-by: Richard Henderson Message-id: 20201030022618.785675-5-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/translate.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/target/arm/translate.c b/target/arm/translate.c index 88a926d1df..88ded4ac2c 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1122,18 +1122,13 @@ static long neon_element_offset(int reg, int element, MemOp size) return neon_full_reg_offset(reg) + ofs; } -static inline long vfp_reg_offset(bool dp, unsigned reg) +/* Return the offset of a VFP Dreg (dp = true) or VFP Sreg (dp = false). */ +static long vfp_reg_offset(bool dp, unsigned reg) { if (dp) { - return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]); + return neon_element_offset(reg, 0, MO_64); } else { - long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]); - if (reg & 1) { - ofs += offsetof(CPU_DoubleU, l.upper); - } else { - ofs += offsetof(CPU_DoubleU, l.lower); - } - return ofs; + return neon_element_offset(reg >> 1, reg & 1, MO_32); } } -- cgit v1.2.3 From a712266f5d5a36d04b22fe69fa15592d62bed019 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 2 Nov 2020 16:52:13 +0000 Subject: target/arm: Add read/write_neon_element32 Model these off the aa64 read/write_vec_element functions. Use it within translate-neon.c.inc. The new functions do not allocate or free temps, so this rearranges the calling code a bit. Signed-off-by: Richard Henderson Message-id: 20201030022618.785675-6-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/translate-neon.c.inc | 256 ++++++++++++++++++++++++---------------- target/arm/translate.c | 26 ++++ 2 files changed, 183 insertions(+), 99 deletions(-) diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc index 96ab2248fc..549381703e 100644 --- a/target/arm/translate-neon.c.inc +++ b/target/arm/translate-neon.c.inc @@ -956,18 +956,24 @@ static bool do_3same_pair(DisasContext *s, arg_3same *a, NeonGenTwoOpFn *fn) * early. Since Q is 0 there are always just two passes, so instead * of a complicated loop over each pass we just unroll. */ - tmp = neon_load_reg(a->vn, 0); - tmp2 = neon_load_reg(a->vn, 1); + tmp = tcg_temp_new_i32(); + tmp2 = tcg_temp_new_i32(); + tmp3 = tcg_temp_new_i32(); + + read_neon_element32(tmp, a->vn, 0, MO_32); + read_neon_element32(tmp2, a->vn, 1, MO_32); fn(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - tmp3 = neon_load_reg(a->vm, 0); - tmp2 = neon_load_reg(a->vm, 1); + read_neon_element32(tmp3, a->vm, 0, MO_32); + read_neon_element32(tmp2, a->vm, 1, MO_32); fn(tmp3, tmp3, tmp2); - tcg_temp_free_i32(tmp2); - neon_store_reg(a->vd, 0, tmp); - neon_store_reg(a->vd, 1, tmp3); + write_neon_element32(tmp, a->vd, 0, MO_32); + write_neon_element32(tmp3, a->vd, 1, MO_32); + + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp3); return true; } @@ -1275,7 +1281,7 @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a, * 2-reg-and-shift operations, size < 3 case, where the * helper needs to be passed cpu_env. */ - TCGv_i32 constimm; + TCGv_i32 constimm, tmp; int pass; if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { @@ -1301,12 +1307,14 @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a, * by immediate using the variable shift operations. */ constimm = tcg_const_i32(dup_const(a->size, a->shift)); + tmp = tcg_temp_new_i32(); for (pass = 0; pass < (a->q ? 4 : 2); pass++) { - TCGv_i32 tmp = neon_load_reg(a->vm, pass); + read_neon_element32(tmp, a->vm, pass, MO_32); fn(tmp, cpu_env, tmp, constimm); - neon_store_reg(a->vd, pass, tmp); + write_neon_element32(tmp, a->vd, pass, MO_32); } + tcg_temp_free_i32(tmp); tcg_temp_free_i32(constimm); return true; } @@ -1364,21 +1372,21 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a, constimm = tcg_const_i64(-a->shift); rm1 = tcg_temp_new_i64(); rm2 = tcg_temp_new_i64(); + rd = tcg_temp_new_i32(); /* Load both inputs first to avoid potential overwrite if rm == rd */ neon_load_reg64(rm1, a->vm); neon_load_reg64(rm2, a->vm + 1); shiftfn(rm1, rm1, constimm); - rd = tcg_temp_new_i32(); narrowfn(rd, cpu_env, rm1); - neon_store_reg(a->vd, 0, rd); + write_neon_element32(rd, a->vd, 0, MO_32); shiftfn(rm2, rm2, constimm); - rd = tcg_temp_new_i32(); narrowfn(rd, cpu_env, rm2); - neon_store_reg(a->vd, 1, rd); + write_neon_element32(rd, a->vd, 1, MO_32); + tcg_temp_free_i32(rd); tcg_temp_free_i64(rm1); tcg_temp_free_i64(rm2); tcg_temp_free_i64(constimm); @@ -1428,10 +1436,14 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a, constimm = tcg_const_i32(imm); /* Load all inputs first to avoid potential overwrite */ - rm1 = neon_load_reg(a->vm, 0); - rm2 = neon_load_reg(a->vm, 1); - rm3 = neon_load_reg(a->vm + 1, 0); - rm4 = neon_load_reg(a->vm + 1, 1); + rm1 = tcg_temp_new_i32(); + rm2 = tcg_temp_new_i32(); + rm3 = tcg_temp_new_i32(); + rm4 = tcg_temp_new_i32(); + read_neon_element32(rm1, a->vm, 0, MO_32); + read_neon_element32(rm2, a->vm, 1, MO_32); + read_neon_element32(rm3, a->vm, 2, MO_32); + read_neon_element32(rm4, a->vm, 3, MO_32); rtmp = tcg_temp_new_i64(); shiftfn(rm1, rm1, constimm); @@ -1441,7 +1453,8 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a, tcg_temp_free_i32(rm2); narrowfn(rm1, cpu_env, rtmp); - neon_store_reg(a->vd, 0, rm1); + write_neon_element32(rm1, a->vd, 0, MO_32); + tcg_temp_free_i32(rm1); shiftfn(rm3, rm3, constimm); shiftfn(rm4, rm4, constimm); @@ -1452,7 +1465,8 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a, narrowfn(rm3, cpu_env, rtmp); tcg_temp_free_i64(rtmp); - neon_store_reg(a->vd, 1, rm3); + write_neon_element32(rm3, a->vd, 1, MO_32); + tcg_temp_free_i32(rm3); return true; } @@ -1553,8 +1567,10 @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a, widen_mask = dup_const(a->size + 1, widen_mask); } - rm0 = neon_load_reg(a->vm, 0); - rm1 = neon_load_reg(a->vm, 1); + rm0 = tcg_temp_new_i32(); + rm1 = tcg_temp_new_i32(); + read_neon_element32(rm0, a->vm, 0, MO_32); + read_neon_element32(rm1, a->vm, 1, MO_32); tmp = tcg_temp_new_i64(); widenfn(tmp, rm0); @@ -1808,11 +1824,13 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, if (src1_wide) { neon_load_reg64(rn0_64, a->vn); } else { - TCGv_i32 tmp = neon_load_reg(a->vn, 0); + TCGv_i32 tmp = tcg_temp_new_i32(); + read_neon_element32(tmp, a->vn, 0, MO_32); widenfn(rn0_64, tmp); tcg_temp_free_i32(tmp); } - rm = neon_load_reg(a->vm, 0); + rm = tcg_temp_new_i32(); + read_neon_element32(rm, a->vm, 0, MO_32); widenfn(rm_64, rm); tcg_temp_free_i32(rm); @@ -1825,11 +1843,13 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, if (src1_wide) { neon_load_reg64(rn1_64, a->vn + 1); } else { - TCGv_i32 tmp = neon_load_reg(a->vn, 1); + TCGv_i32 tmp = tcg_temp_new_i32(); + read_neon_element32(tmp, a->vn, 1, MO_32); widenfn(rn1_64, tmp); tcg_temp_free_i32(tmp); } - rm = neon_load_reg(a->vm, 1); + rm = tcg_temp_new_i32(); + read_neon_element32(rm, a->vm, 1, MO_32); neon_store_reg64(rn0_64, a->vd); @@ -1922,9 +1942,11 @@ static bool do_narrow_3d(DisasContext *s, arg_3diff *a, narrowfn(rd1, rn_64); - neon_store_reg(a->vd, 0, rd0); - neon_store_reg(a->vd, 1, rd1); + write_neon_element32(rd0, a->vd, 0, MO_32); + write_neon_element32(rd1, a->vd, 1, MO_32); + tcg_temp_free_i32(rd0); + tcg_temp_free_i32(rd1); tcg_temp_free_i64(rn_64); tcg_temp_free_i64(rm_64); @@ -1999,14 +2021,14 @@ static bool do_long_3d(DisasContext *s, arg_3diff *a, rd0 = tcg_temp_new_i64(); rd1 = tcg_temp_new_i64(); - rn = neon_load_reg(a->vn, 0); - rm = neon_load_reg(a->vm, 0); + rn = tcg_temp_new_i32(); + rm = tcg_temp_new_i32(); + read_neon_element32(rn, a->vn, 0, MO_32); + read_neon_element32(rm, a->vm, 0, MO_32); opfn(rd0, rn, rm); - tcg_temp_free_i32(rn); - tcg_temp_free_i32(rm); - rn = neon_load_reg(a->vn, 1); - rm = neon_load_reg(a->vm, 1); + read_neon_element32(rn, a->vn, 1, MO_32); + read_neon_element32(rm, a->vm, 1, MO_32); opfn(rd1, rn, rm); tcg_temp_free_i32(rn); tcg_temp_free_i32(rm); @@ -2308,16 +2330,16 @@ static void gen_neon_dup_high16(TCGv_i32 var) static inline TCGv_i32 neon_get_scalar(int size, int reg) { - TCGv_i32 tmp; - if (size == 1) { - tmp = neon_load_reg(reg & 7, reg >> 4); + TCGv_i32 tmp = tcg_temp_new_i32(); + if (size == MO_16) { + read_neon_element32(tmp, reg & 7, reg >> 4, MO_32); if (reg & 8) { gen_neon_dup_high16(tmp); } else { gen_neon_dup_low16(tmp); } } else { - tmp = neon_load_reg(reg & 15, reg >> 4); + read_neon_element32(tmp, reg & 15, reg >> 4, MO_32); } return tmp; } @@ -2331,7 +2353,7 @@ static bool do_2scalar(DisasContext *s, arg_2scalar *a, * perform an accumulation operation of that result into the * destination. */ - TCGv_i32 scalar; + TCGv_i32 scalar, tmp; int pass; if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { @@ -2358,17 +2380,20 @@ static bool do_2scalar(DisasContext *s, arg_2scalar *a, } scalar = neon_get_scalar(a->size, a->vm); + tmp = tcg_temp_new_i32(); for (pass = 0; pass < (a->q ? 4 : 2); pass++) { - TCGv_i32 tmp = neon_load_reg(a->vn, pass); + read_neon_element32(tmp, a->vn, pass, MO_32); opfn(tmp, tmp, scalar); if (accfn) { - TCGv_i32 rd = neon_load_reg(a->vd, pass); + TCGv_i32 rd = tcg_temp_new_i32(); + read_neon_element32(rd, a->vd, pass, MO_32); accfn(tmp, rd, tmp); tcg_temp_free_i32(rd); } - neon_store_reg(a->vd, pass, tmp); + write_neon_element32(tmp, a->vd, pass, MO_32); } + tcg_temp_free_i32(tmp); tcg_temp_free_i32(scalar); return true; } @@ -2523,7 +2548,7 @@ static bool do_vqrdmlah_2sc(DisasContext *s, arg_2scalar *a, * performs a kind of fused op-then-accumulate using a helper * function that takes all of rd, rn and the scalar at once. */ - TCGv_i32 scalar; + TCGv_i32 scalar, rn, rd; int pass; if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { @@ -2554,14 +2579,17 @@ static bool do_vqrdmlah_2sc(DisasContext *s, arg_2scalar *a, } scalar = neon_get_scalar(a->size, a->vm); + rn = tcg_temp_new_i32(); + rd = tcg_temp_new_i32(); for (pass = 0; pass < (a->q ? 4 : 2); pass++) { - TCGv_i32 rn = neon_load_reg(a->vn, pass); - TCGv_i32 rd = neon_load_reg(a->vd, pass); + read_neon_element32(rn, a->vn, pass, MO_32); + read_neon_element32(rd, a->vd, pass, MO_32); opfn(rd, cpu_env, rn, scalar, rd); - tcg_temp_free_i32(rn); - neon_store_reg(a->vd, pass, rd); + write_neon_element32(rd, a->vd, pass, MO_32); } + tcg_temp_free_i32(rn); + tcg_temp_free_i32(rd); tcg_temp_free_i32(scalar); return true; @@ -2628,12 +2656,12 @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a, scalar = neon_get_scalar(a->size, a->vm); /* Load all inputs before writing any outputs, in case of overlap */ - rn = neon_load_reg(a->vn, 0); + rn = tcg_temp_new_i32(); + read_neon_element32(rn, a->vn, 0, MO_32); rn0_64 = tcg_temp_new_i64(); opfn(rn0_64, rn, scalar); - tcg_temp_free_i32(rn); - rn = neon_load_reg(a->vn, 1); + read_neon_element32(rn, a->vn, 1, MO_32); rn1_64 = tcg_temp_new_i64(); opfn(rn1_64, rn, scalar); tcg_temp_free_i32(rn); @@ -2857,30 +2885,34 @@ static bool trans_VTBL(DisasContext *s, arg_VTBL *a) return false; } n <<= 3; + tmp = tcg_temp_new_i32(); if (a->op) { - tmp = neon_load_reg(a->vd, 0); + read_neon_element32(tmp, a->vd, 0, MO_32); } else { - tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); } - tmp2 = neon_load_reg(a->vm, 0); + tmp2 = tcg_temp_new_i32(); + read_neon_element32(tmp2, a->vm, 0, MO_32); ptr1 = vfp_reg_ptr(true, a->vn); tmp4 = tcg_const_i32(n); gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp4); - tcg_temp_free_i32(tmp); + if (a->op) { - tmp = neon_load_reg(a->vd, 1); + read_neon_element32(tmp, a->vd, 1, MO_32); } else { - tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); } - tmp3 = neon_load_reg(a->vm, 1); + tmp3 = tcg_temp_new_i32(); + read_neon_element32(tmp3, a->vm, 1, MO_32); gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp4); + tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp4); tcg_temp_free_ptr(ptr1); - neon_store_reg(a->vd, 0, tmp2); - neon_store_reg(a->vd, 1, tmp3); - tcg_temp_free_i32(tmp); + + write_neon_element32(tmp2, a->vd, 0, MO_32); + write_neon_element32(tmp3, a->vd, 1, MO_32); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp3); return true; } @@ -2913,6 +2945,7 @@ static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a) static bool trans_VREV64(DisasContext *s, arg_VREV64 *a) { int pass, half; + TCGv_i32 tmp[2]; if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { return false; @@ -2936,11 +2969,12 @@ static bool trans_VREV64(DisasContext *s, arg_VREV64 *a) return true; } - for (pass = 0; pass < (a->q ? 2 : 1); pass++) { - TCGv_i32 tmp[2]; + tmp[0] = tcg_temp_new_i32(); + tmp[1] = tcg_temp_new_i32(); + for (pass = 0; pass < (a->q ? 2 : 1); pass++) { for (half = 0; half < 2; half++) { - tmp[half] = neon_load_reg(a->vm, pass * 2 + half); + read_neon_element32(tmp[half], a->vm, pass * 2 + half, MO_32); switch (a->size) { case 0: tcg_gen_bswap32_i32(tmp[half], tmp[half]); @@ -2954,9 +2988,12 @@ static bool trans_VREV64(DisasContext *s, arg_VREV64 *a) g_assert_not_reached(); } } - neon_store_reg(a->vd, pass * 2, tmp[1]); - neon_store_reg(a->vd, pass * 2 + 1, tmp[0]); + write_neon_element32(tmp[1], a->vd, pass * 2, MO_32); + write_neon_element32(tmp[0], a->vd, pass * 2 + 1, MO_32); } + + tcg_temp_free_i32(tmp[0]); + tcg_temp_free_i32(tmp[1]); return true; } @@ -3001,12 +3038,14 @@ static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a, rm0_64 = tcg_temp_new_i64(); rm1_64 = tcg_temp_new_i64(); rd_64 = tcg_temp_new_i64(); - tmp = neon_load_reg(a->vm, pass * 2); + + tmp = tcg_temp_new_i32(); + read_neon_element32(tmp, a->vm, pass * 2, MO_32); widenfn(rm0_64, tmp); - tcg_temp_free_i32(tmp); - tmp = neon_load_reg(a->vm, pass * 2 + 1); + read_neon_element32(tmp, a->vm, pass * 2 + 1, MO_32); widenfn(rm1_64, tmp); tcg_temp_free_i32(tmp); + opfn(rd_64, rm0_64, rm1_64); tcg_temp_free_i64(rm0_64); tcg_temp_free_i64(rm1_64); @@ -3219,8 +3258,10 @@ static bool do_vmovn(DisasContext *s, arg_2misc *a, narrowfn(rd0, cpu_env, rm); neon_load_reg64(rm, a->vm + 1); narrowfn(rd1, cpu_env, rm); - neon_store_reg(a->vd, 0, rd0); - neon_store_reg(a->vd, 1, rd1); + write_neon_element32(rd0, a->vd, 0, MO_32); + write_neon_element32(rd1, a->vd, 1, MO_32); + tcg_temp_free_i32(rd0); + tcg_temp_free_i32(rd1); tcg_temp_free_i64(rm); return true; } @@ -3277,9 +3318,11 @@ static bool trans_VSHLL(DisasContext *s, arg_2misc *a) } rd = tcg_temp_new_i64(); + rm0 = tcg_temp_new_i32(); + rm1 = tcg_temp_new_i32(); - rm0 = neon_load_reg(a->vm, 0); - rm1 = neon_load_reg(a->vm, 1); + read_neon_element32(rm0, a->vm, 0, MO_32); + read_neon_element32(rm1, a->vm, 1, MO_32); widenfn(rd, rm0); tcg_gen_shli_i64(rd, rd, 8 << a->size); @@ -3320,21 +3363,25 @@ static bool trans_VCVT_F16_F32(DisasContext *s, arg_2misc *a) fpst = fpstatus_ptr(FPST_STD); ahp = get_ahp_flag(); - tmp = neon_load_reg(a->vm, 0); + tmp = tcg_temp_new_i32(); + read_neon_element32(tmp, a->vm, 0, MO_32); gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp); - tmp2 = neon_load_reg(a->vm, 1); + tmp2 = tcg_temp_new_i32(); + read_neon_element32(tmp2, a->vm, 1, MO_32); gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp); tcg_gen_shli_i32(tmp2, tmp2, 16); tcg_gen_or_i32(tmp2, tmp2, tmp); - tcg_temp_free_i32(tmp); - tmp = neon_load_reg(a->vm, 2); + read_neon_element32(tmp, a->vm, 2, MO_32); gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp); - tmp3 = neon_load_reg(a->vm, 3); - neon_store_reg(a->vd, 0, tmp2); + tmp3 = tcg_temp_new_i32(); + read_neon_element32(tmp3, a->vm, 3, MO_32); + write_neon_element32(tmp2, a->vd, 0, MO_32); + tcg_temp_free_i32(tmp2); gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp); tcg_gen_shli_i32(tmp3, tmp3, 16); tcg_gen_or_i32(tmp3, tmp3, tmp); - neon_store_reg(a->vd, 1, tmp3); + write_neon_element32(tmp3, a->vd, 1, MO_32); + tcg_temp_free_i32(tmp3); tcg_temp_free_i32(tmp); tcg_temp_free_i32(ahp); tcg_temp_free_ptr(fpst); @@ -3369,21 +3416,25 @@ static bool trans_VCVT_F32_F16(DisasContext *s, arg_2misc *a) fpst = fpstatus_ptr(FPST_STD); ahp = get_ahp_flag(); tmp3 = tcg_temp_new_i32(); - tmp = neon_load_reg(a->vm, 0); - tmp2 = neon_load_reg(a->vm, 1); + tmp2 = tcg_temp_new_i32(); + tmp = tcg_temp_new_i32(); + read_neon_element32(tmp, a->vm, 0, MO_32); + read_neon_element32(tmp2, a->vm, 1, MO_32); tcg_gen_ext16u_i32(tmp3, tmp); gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp); - neon_store_reg(a->vd, 0, tmp3); + write_neon_element32(tmp3, a->vd, 0, MO_32); tcg_gen_shri_i32(tmp, tmp, 16); gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp); - neon_store_reg(a->vd, 1, tmp); - tmp3 = tcg_temp_new_i32(); + write_neon_element32(tmp, a->vd, 1, MO_32); + tcg_temp_free_i32(tmp); tcg_gen_ext16u_i32(tmp3, tmp2); gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp); - neon_store_reg(a->vd, 2, tmp3); + write_neon_element32(tmp3, a->vd, 2, MO_32); + tcg_temp_free_i32(tmp3); tcg_gen_shri_i32(tmp2, tmp2, 16); gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp); - neon_store_reg(a->vd, 3, tmp2); + write_neon_element32(tmp2, a->vd, 3, MO_32); + tcg_temp_free_i32(tmp2); tcg_temp_free_i32(ahp); tcg_temp_free_ptr(fpst); @@ -3489,6 +3540,7 @@ DO_2M_CRYPTO(SHA256SU0, aa32_sha2, 2) static bool do_2misc(DisasContext *s, arg_2misc *a, NeonGenOneOpFn *fn) { + TCGv_i32 tmp; int pass; /* Handle a 2-reg-misc operation by iterating 32 bits at a time */ @@ -3514,11 +3566,13 @@ static bool do_2misc(DisasContext *s, arg_2misc *a, NeonGenOneOpFn *fn) return true; } + tmp = tcg_temp_new_i32(); for (pass = 0; pass < (a->q ? 4 : 2); pass++) { - TCGv_i32 tmp = neon_load_reg(a->vm, pass); + read_neon_element32(tmp, a->vm, pass, MO_32); fn(tmp, tmp); - neon_store_reg(a->vd, pass, tmp); + write_neon_element32(tmp, a->vd, pass, MO_32); } + tcg_temp_free_i32(tmp); return true; } @@ -3871,25 +3925,29 @@ static bool trans_VTRN(DisasContext *s, arg_2misc *a) return true; } - if (a->size == 2) { + tmp = tcg_temp_new_i32(); + tmp2 = tcg_temp_new_i32(); + if (a->size == MO_32) { for (pass = 0; pass < (a->q ? 4 : 2); pass += 2) { - tmp = neon_load_reg(a->vm, pass); - tmp2 = neon_load_reg(a->vd, pass + 1); - neon_store_reg(a->vm, pass, tmp2); - neon_store_reg(a->vd, pass + 1, tmp); + read_neon_element32(tmp, a->vm, pass, MO_32); + read_neon_element32(tmp2, a->vd, pass + 1, MO_32); + write_neon_element32(tmp2, a->vm, pass, MO_32); + write_neon_element32(tmp, a->vd, pass + 1, MO_32); } } else { for (pass = 0; pass < (a->q ? 4 : 2); pass++) { - tmp = neon_load_reg(a->vm, pass); - tmp2 = neon_load_reg(a->vd, pass); - if (a->size == 0) { + read_neon_element32(tmp, a->vm, pass, MO_32); + read_neon_element32(tmp2, a->vd, pass, MO_32); + if (a->size == MO_8) { gen_neon_trn_u8(tmp, tmp2); } else { gen_neon_trn_u16(tmp, tmp2); } - neon_store_reg(a->vm, pass, tmp2); - neon_store_reg(a->vd, pass, tmp); + write_neon_element32(tmp2, a->vm, pass, MO_32); + write_neon_element32(tmp, a->vd, pass, MO_32); } } + tcg_temp_free_i32(tmp); + tcg_temp_free_i32(tmp2); return true; } diff --git a/target/arm/translate.c b/target/arm/translate.c index 88ded4ac2c..0ed9eab0b0 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1165,6 +1165,32 @@ static inline void neon_store_reg32(TCGv_i32 var, int reg) tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg)); } +static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp size) +{ + long off = neon_element_offset(reg, ele, size); + + switch (size) { + case MO_32: + tcg_gen_ld_i32(dest, cpu_env, off); + break; + default: + g_assert_not_reached(); + } +} + +static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp size) +{ + long off = neon_element_offset(reg, ele, size); + + switch (size) { + case MO_32: + tcg_gen_st_i32(src, cpu_env, off); + break; + default: + g_assert_not_reached(); + } +} + static TCGv_ptr vfp_reg_ptr(bool dp, int reg) { TCGv_ptr ret = tcg_temp_new_ptr(); -- cgit v1.2.3 From 4d5fa5a80ac28f34b8497be1e85371272413a12e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 2 Nov 2020 16:52:14 +0000 Subject: target/arm: Expand read/write_neon_element32 to all MemOp We can then use this to improve VMOV (scalar to gp) and VMOV (gp to scalar) so that we simply perform the memory operation that we wanted, rather than inserting or extracting from a 32-bit quantity. These were the last uses of neon_load/store_reg, so remove them. Signed-off-by: Richard Henderson Message-id: 20201030022618.785675-7-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/translate-vfp.c.inc | 71 ++++++------------------------------------ target/arm/translate.c | 50 ++++++++++++++++------------- 2 files changed, 37 insertions(+), 84 deletions(-) diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc index 368bae0a73..28f22f9872 100644 --- a/target/arm/translate-vfp.c.inc +++ b/target/arm/translate-vfp.c.inc @@ -511,11 +511,9 @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a) { /* VMOV scalar to general purpose register */ TCGv_i32 tmp; - int pass; - uint32_t offset; - /* SIZE == 2 is a VFP instruction; otherwise NEON. */ - if (a->size == 2 + /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */ + if (a->size == MO_32 ? !dc_isar_feature(aa32_fpsp_v2, s) : !arm_dc_feature(s, ARM_FEATURE_NEON)) { return false; @@ -526,44 +524,12 @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a) return false; } - offset = a->index << a->size; - pass = extract32(offset, 2, 1); - offset = extract32(offset, 0, 2) * 8; - if (!vfp_access_check(s)) { return true; } - tmp = neon_load_reg(a->vn, pass); - switch (a->size) { - case 0: - if (offset) { - tcg_gen_shri_i32(tmp, tmp, offset); - } - if (a->u) { - gen_uxtb(tmp); - } else { - gen_sxtb(tmp); - } - break; - case 1: - if (a->u) { - if (offset) { - tcg_gen_shri_i32(tmp, tmp, 16); - } else { - gen_uxth(tmp); - } - } else { - if (offset) { - tcg_gen_sari_i32(tmp, tmp, 16); - } else { - gen_sxth(tmp); - } - } - break; - case 2: - break; - } + tmp = tcg_temp_new_i32(); + read_neon_element32(tmp, a->vn, a->index, a->size | (a->u ? 0 : MO_SIGN)); store_reg(s, a->rt, tmp); return true; @@ -572,12 +538,10 @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a) static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a) { /* VMOV general purpose register to scalar */ - TCGv_i32 tmp, tmp2; - int pass; - uint32_t offset; + TCGv_i32 tmp; - /* SIZE == 2 is a VFP instruction; otherwise NEON. */ - if (a->size == 2 + /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */ + if (a->size == MO_32 ? !dc_isar_feature(aa32_fpsp_v2, s) : !arm_dc_feature(s, ARM_FEATURE_NEON)) { return false; @@ -588,30 +552,13 @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a) return false; } - offset = a->index << a->size; - pass = extract32(offset, 2, 1); - offset = extract32(offset, 0, 2) * 8; - if (!vfp_access_check(s)) { return true; } tmp = load_reg(s, a->rt); - switch (a->size) { - case 0: - tmp2 = neon_load_reg(a->vn, pass); - tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8); - tcg_temp_free_i32(tmp2); - break; - case 1: - tmp2 = neon_load_reg(a->vn, pass); - tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16); - tcg_temp_free_i32(tmp2); - break; - case 2: - break; - } - neon_store_reg(a->vn, pass, tmp); + write_neon_element32(tmp, a->vn, a->index, a->size); + tcg_temp_free_i32(tmp); return true; } diff --git a/target/arm/translate.c b/target/arm/translate.c index 0ed9eab0b0..55d5f4ed73 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1106,9 +1106,9 @@ static long neon_full_reg_offset(unsigned reg) * Return the offset of a 2**SIZE piece of a NEON register, at index ELE, * where 0 is the least significant end of the register. */ -static long neon_element_offset(int reg, int element, MemOp size) +static long neon_element_offset(int reg, int element, MemOp memop) { - int element_size = 1 << size; + int element_size = 1 << (memop & MO_SIZE); int ofs = element * element_size; #ifdef HOST_WORDS_BIGENDIAN /* @@ -1132,19 +1132,6 @@ static long vfp_reg_offset(bool dp, unsigned reg) } } -static TCGv_i32 neon_load_reg(int reg, int pass) -{ - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_ld_i32(tmp, cpu_env, neon_element_offset(reg, pass, MO_32)); - return tmp; -} - -static void neon_store_reg(int reg, int pass, TCGv_i32 var) -{ - tcg_gen_st_i32(var, cpu_env, neon_element_offset(reg, pass, MO_32)); - tcg_temp_free_i32(var); -} - static inline void neon_load_reg64(TCGv_i64 var, int reg) { tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg)); @@ -1165,12 +1152,25 @@ static inline void neon_store_reg32(TCGv_i32 var, int reg) tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg)); } -static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp size) +static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop) { - long off = neon_element_offset(reg, ele, size); + long off = neon_element_offset(reg, ele, memop); - switch (size) { - case MO_32: + switch (memop) { + case MO_SB: + tcg_gen_ld8s_i32(dest, cpu_env, off); + break; + case MO_UB: + tcg_gen_ld8u_i32(dest, cpu_env, off); + break; + case MO_SW: + tcg_gen_ld16s_i32(dest, cpu_env, off); + break; + case MO_UW: + tcg_gen_ld16u_i32(dest, cpu_env, off); + break; + case MO_UL: + case MO_SL: tcg_gen_ld_i32(dest, cpu_env, off); break; default: @@ -1178,11 +1178,17 @@ static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp size) } } -static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp size) +static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop) { - long off = neon_element_offset(reg, ele, size); + long off = neon_element_offset(reg, ele, memop); - switch (size) { + switch (memop) { + case MO_8: + tcg_gen_st8_i32(src, cpu_env, off); + break; + case MO_16: + tcg_gen_st16_i32(src, cpu_env, off); + break; case MO_32: tcg_gen_st_i32(src, cpu_env, off); break; -- cgit v1.2.3 From 21c1c0e50b73c580c6bfc8f2314d1b6a14793561 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 2 Nov 2020 16:52:14 +0000 Subject: target/arm: Rename neon_load_reg32 to vfp_load_reg32 The only uses of this function are for loading VFP single-precision values, and nothing to do with NEON. Signed-off-by: Richard Henderson Message-id: 20201030022618.785675-8-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/translate-vfp.c.inc | 184 ++++++++++++++++++++--------------------- target/arm/translate.c | 4 +- 2 files changed, 94 insertions(+), 94 deletions(-) diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc index 28f22f9872..d2a9b658bb 100644 --- a/target/arm/translate-vfp.c.inc +++ b/target/arm/translate-vfp.c.inc @@ -283,8 +283,8 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) frn = tcg_temp_new_i32(); frm = tcg_temp_new_i32(); dest = tcg_temp_new_i32(); - neon_load_reg32(frn, rn); - neon_load_reg32(frm, rm); + vfp_load_reg32(frn, rn); + vfp_load_reg32(frm, rm); switch (a->cc) { case 0: /* eq: Z */ tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero, @@ -315,7 +315,7 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) if (sz == 1) { tcg_gen_andi_i32(dest, dest, 0xffff); } - neon_store_reg32(dest, rd); + vfp_store_reg32(dest, rd); tcg_temp_free_i32(frn); tcg_temp_free_i32(frm); tcg_temp_free_i32(dest); @@ -395,13 +395,13 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a) TCGv_i32 tcg_res; tcg_op = tcg_temp_new_i32(); tcg_res = tcg_temp_new_i32(); - neon_load_reg32(tcg_op, rm); + vfp_load_reg32(tcg_op, rm); if (sz == 1) { gen_helper_rinth(tcg_res, tcg_op, fpst); } else { gen_helper_rints(tcg_res, tcg_op, fpst); } - neon_store_reg32(tcg_res, rd); + vfp_store_reg32(tcg_res, rd); tcg_temp_free_i32(tcg_op); tcg_temp_free_i32(tcg_res); } @@ -470,7 +470,7 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst); } tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res); - neon_store_reg32(tcg_tmp, rd); + vfp_store_reg32(tcg_tmp, rd); tcg_temp_free_i32(tcg_tmp); tcg_temp_free_i64(tcg_res); tcg_temp_free_i64(tcg_double); @@ -478,7 +478,7 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) TCGv_i32 tcg_single, tcg_res; tcg_single = tcg_temp_new_i32(); tcg_res = tcg_temp_new_i32(); - neon_load_reg32(tcg_single, rm); + vfp_load_reg32(tcg_single, rm); if (sz == 1) { if (is_signed) { gen_helper_vfp_toslh(tcg_res, tcg_single, tcg_shift, fpst); @@ -492,7 +492,7 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst); } } - neon_store_reg32(tcg_res, rd); + vfp_store_reg32(tcg_res, rd); tcg_temp_free_i32(tcg_res); tcg_temp_free_i32(tcg_single); } @@ -776,14 +776,14 @@ static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a) if (a->l) { /* VFP to general purpose register */ tmp = tcg_temp_new_i32(); - neon_load_reg32(tmp, a->vn); + vfp_load_reg32(tmp, a->vn); tcg_gen_andi_i32(tmp, tmp, 0xffff); store_reg(s, a->rt, tmp); } else { /* general purpose register to VFP */ tmp = load_reg(s, a->rt); tcg_gen_andi_i32(tmp, tmp, 0xffff); - neon_store_reg32(tmp, a->vn); + vfp_store_reg32(tmp, a->vn); tcg_temp_free_i32(tmp); } @@ -805,7 +805,7 @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a) if (a->l) { /* VFP to general purpose register */ tmp = tcg_temp_new_i32(); - neon_load_reg32(tmp, a->vn); + vfp_load_reg32(tmp, a->vn); if (a->rt == 15) { /* Set the 4 flag bits in the CPSR. */ gen_set_nzcv(tmp); @@ -816,7 +816,7 @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a) } else { /* general purpose register to VFP */ tmp = load_reg(s, a->rt); - neon_store_reg32(tmp, a->vn); + vfp_store_reg32(tmp, a->vn); tcg_temp_free_i32(tmp); } @@ -842,18 +842,18 @@ static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a) if (a->op) { /* fpreg to gpreg */ tmp = tcg_temp_new_i32(); - neon_load_reg32(tmp, a->vm); + vfp_load_reg32(tmp, a->vm); store_reg(s, a->rt, tmp); tmp = tcg_temp_new_i32(); - neon_load_reg32(tmp, a->vm + 1); + vfp_load_reg32(tmp, a->vm + 1); store_reg(s, a->rt2, tmp); } else { /* gpreg to fpreg */ tmp = load_reg(s, a->rt); - neon_store_reg32(tmp, a->vm); + vfp_store_reg32(tmp, a->vm); tcg_temp_free_i32(tmp); tmp = load_reg(s, a->rt2); - neon_store_reg32(tmp, a->vm + 1); + vfp_store_reg32(tmp, a->vm + 1); tcg_temp_free_i32(tmp); } @@ -885,18 +885,18 @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a) if (a->op) { /* fpreg to gpreg */ tmp = tcg_temp_new_i32(); - neon_load_reg32(tmp, a->vm * 2); + vfp_load_reg32(tmp, a->vm * 2); store_reg(s, a->rt, tmp); tmp = tcg_temp_new_i32(); - neon_load_reg32(tmp, a->vm * 2 + 1); + vfp_load_reg32(tmp, a->vm * 2 + 1); store_reg(s, a->rt2, tmp); } else { /* gpreg to fpreg */ tmp = load_reg(s, a->rt); - neon_store_reg32(tmp, a->vm * 2); + vfp_store_reg32(tmp, a->vm * 2); tcg_temp_free_i32(tmp); tmp = load_reg(s, a->rt2); - neon_store_reg32(tmp, a->vm * 2 + 1); + vfp_store_reg32(tmp, a->vm * 2 + 1); tcg_temp_free_i32(tmp); } @@ -927,9 +927,9 @@ static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a) tmp = tcg_temp_new_i32(); if (a->l) { gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - neon_store_reg32(tmp, a->vd); + vfp_store_reg32(tmp, a->vd); } else { - neon_load_reg32(tmp, a->vd); + vfp_load_reg32(tmp, a->vd); gen_aa32_st16(s, tmp, addr, get_mem_index(s)); } tcg_temp_free_i32(tmp); @@ -961,9 +961,9 @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) tmp = tcg_temp_new_i32(); if (a->l) { gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - neon_store_reg32(tmp, a->vd); + vfp_store_reg32(tmp, a->vd); } else { - neon_load_reg32(tmp, a->vd); + vfp_load_reg32(tmp, a->vd); gen_aa32_st32(s, tmp, addr, get_mem_index(s)); } tcg_temp_free_i32(tmp); @@ -1066,10 +1066,10 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) if (a->l) { /* load */ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - neon_store_reg32(tmp, a->vd + i); + vfp_store_reg32(tmp, a->vd + i); } else { /* store */ - neon_load_reg32(tmp, a->vd + i); + vfp_load_reg32(tmp, a->vd + i); gen_aa32_st32(s, tmp, addr, get_mem_index(s)); } tcg_gen_addi_i32(addr, addr, offset); @@ -1285,15 +1285,15 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, fd = tcg_temp_new_i32(); fpst = fpstatus_ptr(FPST_FPCR); - neon_load_reg32(f0, vn); - neon_load_reg32(f1, vm); + vfp_load_reg32(f0, vn); + vfp_load_reg32(f1, vm); for (;;) { if (reads_vd) { - neon_load_reg32(fd, vd); + vfp_load_reg32(fd, vd); } fn(fd, f0, f1, fpst); - neon_store_reg32(fd, vd); + vfp_store_reg32(fd, vd); if (veclen == 0) { break; @@ -1303,10 +1303,10 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, veclen--; vd = vfp_advance_sreg(vd, delta_d); vn = vfp_advance_sreg(vn, delta_d); - neon_load_reg32(f0, vn); + vfp_load_reg32(f0, vn); if (delta_m) { vm = vfp_advance_sreg(vm, delta_m); - neon_load_reg32(f1, vm); + vfp_load_reg32(f1, vm); } } @@ -1349,14 +1349,14 @@ static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn, fd = tcg_temp_new_i32(); fpst = fpstatus_ptr(FPST_FPCR_F16); - neon_load_reg32(f0, vn); - neon_load_reg32(f1, vm); + vfp_load_reg32(f0, vn); + vfp_load_reg32(f1, vm); if (reads_vd) { - neon_load_reg32(fd, vd); + vfp_load_reg32(fd, vd); } fn(fd, f0, f1, fpst); - neon_store_reg32(fd, vd); + vfp_store_reg32(fd, vd); tcg_temp_free_i32(f0); tcg_temp_free_i32(f1); @@ -1489,11 +1489,11 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) f0 = tcg_temp_new_i32(); fd = tcg_temp_new_i32(); - neon_load_reg32(f0, vm); + vfp_load_reg32(f0, vm); for (;;) { fn(fd, f0); - neon_store_reg32(fd, vd); + vfp_store_reg32(fd, vd); if (veclen == 0) { break; @@ -1503,7 +1503,7 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) /* single source one-many */ while (veclen--) { vd = vfp_advance_sreg(vd, delta_d); - neon_store_reg32(fd, vd); + vfp_store_reg32(fd, vd); } break; } @@ -1512,7 +1512,7 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) veclen--; vd = vfp_advance_sreg(vd, delta_d); vm = vfp_advance_sreg(vm, delta_m); - neon_load_reg32(f0, vm); + vfp_load_reg32(f0, vm); } tcg_temp_free_i32(f0); @@ -1545,9 +1545,9 @@ static bool do_vfp_2op_hp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) } f0 = tcg_temp_new_i32(); - neon_load_reg32(f0, vm); + vfp_load_reg32(f0, vm); fn(f0, f0); - neon_store_reg32(f0, vd); + vfp_store_reg32(f0, vd); tcg_temp_free_i32(f0); return true; @@ -2037,20 +2037,20 @@ static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d) vm = tcg_temp_new_i32(); vd = tcg_temp_new_i32(); - neon_load_reg32(vn, a->vn); - neon_load_reg32(vm, a->vm); + vfp_load_reg32(vn, a->vn); + vfp_load_reg32(vm, a->vm); if (neg_n) { /* VFNMS, VFMS */ gen_helper_vfp_negh(vn, vn); } - neon_load_reg32(vd, a->vd); + vfp_load_reg32(vd, a->vd); if (neg_d) { /* VFNMA, VFNMS */ gen_helper_vfp_negh(vd, vd); } fpst = fpstatus_ptr(FPST_FPCR_F16); gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst); - neon_store_reg32(vd, a->vd); + vfp_store_reg32(vd, a->vd); tcg_temp_free_ptr(fpst); tcg_temp_free_i32(vn); @@ -2102,20 +2102,20 @@ static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d) vm = tcg_temp_new_i32(); vd = tcg_temp_new_i32(); - neon_load_reg32(vn, a->vn); - neon_load_reg32(vm, a->vm); + vfp_load_reg32(vn, a->vn); + vfp_load_reg32(vm, a->vm); if (neg_n) { /* VFNMS, VFMS */ gen_helper_vfp_negs(vn, vn); } - neon_load_reg32(vd, a->vd); + vfp_load_reg32(vd, a->vd); if (neg_d) { /* VFNMA, VFNMS */ gen_helper_vfp_negs(vd, vd); } fpst = fpstatus_ptr(FPST_FPCR); gen_helper_vfp_muladds(vd, vn, vm, vd, fpst); - neon_store_reg32(vd, a->vd); + vfp_store_reg32(vd, a->vd); tcg_temp_free_ptr(fpst); tcg_temp_free_i32(vn); @@ -2230,7 +2230,7 @@ static bool trans_VMOV_imm_hp(DisasContext *s, arg_VMOV_imm_sp *a) } fd = tcg_const_i32(vfp_expand_imm(MO_16, a->imm)); - neon_store_reg32(fd, a->vd); + vfp_store_reg32(fd, a->vd); tcg_temp_free_i32(fd); return true; } @@ -2270,7 +2270,7 @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a) fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm)); for (;;) { - neon_store_reg32(fd, vd); + vfp_store_reg32(fd, vd); if (veclen == 0) { break; @@ -2397,11 +2397,11 @@ static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a) vd = tcg_temp_new_i32(); vm = tcg_temp_new_i32(); - neon_load_reg32(vd, a->vd); + vfp_load_reg32(vd, a->vd); if (a->z) { tcg_gen_movi_i32(vm, 0); } else { - neon_load_reg32(vm, a->vm); + vfp_load_reg32(vm, a->vm); } if (a->e) { @@ -2436,11 +2436,11 @@ static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a) vd = tcg_temp_new_i32(); vm = tcg_temp_new_i32(); - neon_load_reg32(vd, a->vd); + vfp_load_reg32(vd, a->vd); if (a->z) { tcg_gen_movi_i32(vm, 0); } else { - neon_load_reg32(vm, a->vm); + vfp_load_reg32(vm, a->vm); } if (a->e) { @@ -2519,7 +2519,7 @@ static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a) /* The T bit tells us if we want the low or high 16 bits of Vm */ tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t)); gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode); - neon_store_reg32(tmp, a->vd); + vfp_store_reg32(tmp, a->vd); tcg_temp_free_i32(ahp_mode); tcg_temp_free_ptr(fpst); tcg_temp_free_i32(tmp); @@ -2583,7 +2583,7 @@ static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a) ahp_mode = get_ahp_flag(); tmp = tcg_temp_new_i32(); - neon_load_reg32(tmp, a->vm); + vfp_load_reg32(tmp, a->vm); gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode); tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t)); tcg_temp_free_i32(ahp_mode); @@ -2645,10 +2645,10 @@ static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a) } tmp = tcg_temp_new_i32(); - neon_load_reg32(tmp, a->vm); + vfp_load_reg32(tmp, a->vm); fpst = fpstatus_ptr(FPST_FPCR_F16); gen_helper_rinth(tmp, tmp, fpst); - neon_store_reg32(tmp, a->vd); + vfp_store_reg32(tmp, a->vd); tcg_temp_free_ptr(fpst); tcg_temp_free_i32(tmp); return true; @@ -2668,10 +2668,10 @@ static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a) } tmp = tcg_temp_new_i32(); - neon_load_reg32(tmp, a->vm); + vfp_load_reg32(tmp, a->vm); fpst = fpstatus_ptr(FPST_FPCR); gen_helper_rints(tmp, tmp, fpst); - neon_store_reg32(tmp, a->vd); + vfp_store_reg32(tmp, a->vd); tcg_temp_free_ptr(fpst); tcg_temp_free_i32(tmp); return true; @@ -2724,13 +2724,13 @@ static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a) } tmp = tcg_temp_new_i32(); - neon_load_reg32(tmp, a->vm); + vfp_load_reg32(tmp, a->vm); fpst = fpstatus_ptr(FPST_FPCR_F16); tcg_rmode = tcg_const_i32(float_round_to_zero); gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); gen_helper_rinth(tmp, tmp, fpst); gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - neon_store_reg32(tmp, a->vd); + vfp_store_reg32(tmp, a->vd); tcg_temp_free_ptr(fpst); tcg_temp_free_i32(tcg_rmode); tcg_temp_free_i32(tmp); @@ -2752,13 +2752,13 @@ static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a) } tmp = tcg_temp_new_i32(); - neon_load_reg32(tmp, a->vm); + vfp_load_reg32(tmp, a->vm); fpst = fpstatus_ptr(FPST_FPCR); tcg_rmode = tcg_const_i32(float_round_to_zero); gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); gen_helper_rints(tmp, tmp, fpst); gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - neon_store_reg32(tmp, a->vd); + vfp_store_reg32(tmp, a->vd); tcg_temp_free_ptr(fpst); tcg_temp_free_i32(tcg_rmode); tcg_temp_free_i32(tmp); @@ -2816,10 +2816,10 @@ static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a) } tmp = tcg_temp_new_i32(); - neon_load_reg32(tmp, a->vm); + vfp_load_reg32(tmp, a->vm); fpst = fpstatus_ptr(FPST_FPCR_F16); gen_helper_rinth_exact(tmp, tmp, fpst); - neon_store_reg32(tmp, a->vd); + vfp_store_reg32(tmp, a->vd); tcg_temp_free_ptr(fpst); tcg_temp_free_i32(tmp); return true; @@ -2839,10 +2839,10 @@ static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a) } tmp = tcg_temp_new_i32(); - neon_load_reg32(tmp, a->vm); + vfp_load_reg32(tmp, a->vm); fpst = fpstatus_ptr(FPST_FPCR); gen_helper_rints_exact(tmp, tmp, fpst); - neon_store_reg32(tmp, a->vd); + vfp_store_reg32(tmp, a->vd); tcg_temp_free_ptr(fpst); tcg_temp_free_i32(tmp); return true; @@ -2900,7 +2900,7 @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a) vm = tcg_temp_new_i32(); vd = tcg_temp_new_i64(); - neon_load_reg32(vm, a->vm); + vfp_load_reg32(vm, a->vm); gen_helper_vfp_fcvtds(vd, vm, cpu_env); neon_store_reg64(vd, a->vd); tcg_temp_free_i32(vm); @@ -2930,7 +2930,7 @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a) vm = tcg_temp_new_i64(); neon_load_reg64(vm, a->vm); gen_helper_vfp_fcvtsd(vd, vm, cpu_env); - neon_store_reg32(vd, a->vd); + vfp_store_reg32(vd, a->vd); tcg_temp_free_i32(vd); tcg_temp_free_i64(vm); return true; @@ -2950,7 +2950,7 @@ static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a) } vm = tcg_temp_new_i32(); - neon_load_reg32(vm, a->vm); + vfp_load_reg32(vm, a->vm); fpst = fpstatus_ptr(FPST_FPCR_F16); if (a->s) { /* i32 -> f16 */ @@ -2959,7 +2959,7 @@ static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a) /* u32 -> f16 */ gen_helper_vfp_uitoh(vm, vm, fpst); } - neon_store_reg32(vm, a->vd); + vfp_store_reg32(vm, a->vd); tcg_temp_free_i32(vm); tcg_temp_free_ptr(fpst); return true; @@ -2979,7 +2979,7 @@ static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a) } vm = tcg_temp_new_i32(); - neon_load_reg32(vm, a->vm); + vfp_load_reg32(vm, a->vm); fpst = fpstatus_ptr(FPST_FPCR); if (a->s) { /* i32 -> f32 */ @@ -2988,7 +2988,7 @@ static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a) /* u32 -> f32 */ gen_helper_vfp_uitos(vm, vm, fpst); } - neon_store_reg32(vm, a->vd); + vfp_store_reg32(vm, a->vd); tcg_temp_free_i32(vm); tcg_temp_free_ptr(fpst); return true; @@ -3015,7 +3015,7 @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a) vm = tcg_temp_new_i32(); vd = tcg_temp_new_i64(); - neon_load_reg32(vm, a->vm); + vfp_load_reg32(vm, a->vm); fpst = fpstatus_ptr(FPST_FPCR); if (a->s) { /* i32 -> f64 */ @@ -3057,7 +3057,7 @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a) vd = tcg_temp_new_i32(); neon_load_reg64(vm, a->vm); gen_helper_vjcvt(vd, vm, cpu_env); - neon_store_reg32(vd, a->vd); + vfp_store_reg32(vd, a->vd); tcg_temp_free_i64(vm); tcg_temp_free_i32(vd); return true; @@ -3080,7 +3080,7 @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a) frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm); vd = tcg_temp_new_i32(); - neon_load_reg32(vd, a->vd); + vfp_load_reg32(vd, a->vd); fpst = fpstatus_ptr(FPST_FPCR_F16); shift = tcg_const_i32(frac_bits); @@ -3115,7 +3115,7 @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a) g_assert_not_reached(); } - neon_store_reg32(vd, a->vd); + vfp_store_reg32(vd, a->vd); tcg_temp_free_i32(vd); tcg_temp_free_i32(shift); tcg_temp_free_ptr(fpst); @@ -3139,7 +3139,7 @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a) frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm); vd = tcg_temp_new_i32(); - neon_load_reg32(vd, a->vd); + vfp_load_reg32(vd, a->vd); fpst = fpstatus_ptr(FPST_FPCR); shift = tcg_const_i32(frac_bits); @@ -3174,7 +3174,7 @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a) g_assert_not_reached(); } - neon_store_reg32(vd, a->vd); + vfp_store_reg32(vd, a->vd); tcg_temp_free_i32(vd); tcg_temp_free_i32(shift); tcg_temp_free_ptr(fpst); @@ -3261,7 +3261,7 @@ static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a) fpst = fpstatus_ptr(FPST_FPCR_F16); vm = tcg_temp_new_i32(); - neon_load_reg32(vm, a->vm); + vfp_load_reg32(vm, a->vm); if (a->s) { if (a->rz) { @@ -3276,7 +3276,7 @@ static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a) gen_helper_vfp_touih(vm, vm, fpst); } } - neon_store_reg32(vm, a->vd); + vfp_store_reg32(vm, a->vd); tcg_temp_free_i32(vm); tcg_temp_free_ptr(fpst); return true; @@ -3297,7 +3297,7 @@ static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a) fpst = fpstatus_ptr(FPST_FPCR); vm = tcg_temp_new_i32(); - neon_load_reg32(vm, a->vm); + vfp_load_reg32(vm, a->vm); if (a->s) { if (a->rz) { @@ -3312,7 +3312,7 @@ static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a) gen_helper_vfp_touis(vm, vm, fpst); } } - neon_store_reg32(vm, a->vd); + vfp_store_reg32(vm, a->vd); tcg_temp_free_i32(vm); tcg_temp_free_ptr(fpst); return true; @@ -3355,7 +3355,7 @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a) gen_helper_vfp_touid(vd, vm, fpst); } } - neon_store_reg32(vd, a->vd); + vfp_store_reg32(vd, a->vd); tcg_temp_free_i32(vd); tcg_temp_free_i64(vm); tcg_temp_free_ptr(fpst); @@ -3468,10 +3468,10 @@ static bool trans_VINS(DisasContext *s, arg_VINS *a) /* Insert low half of Vm into high half of Vd */ rm = tcg_temp_new_i32(); rd = tcg_temp_new_i32(); - neon_load_reg32(rm, a->vm); - neon_load_reg32(rd, a->vd); + vfp_load_reg32(rm, a->vm); + vfp_load_reg32(rd, a->vd); tcg_gen_deposit_i32(rd, rd, rm, 16, 16); - neon_store_reg32(rd, a->vd); + vfp_store_reg32(rd, a->vd); tcg_temp_free_i32(rm); tcg_temp_free_i32(rd); return true; @@ -3495,9 +3495,9 @@ static bool trans_VMOVX(DisasContext *s, arg_VINS *a) /* Set Vd to high half of Vm */ rm = tcg_temp_new_i32(); - neon_load_reg32(rm, a->vm); + vfp_load_reg32(rm, a->vm); tcg_gen_shri_i32(rm, rm, 16); - neon_store_reg32(rm, a->vd); + vfp_store_reg32(rm, a->vd); tcg_temp_free_i32(rm); return true; } diff --git a/target/arm/translate.c b/target/arm/translate.c index 55d5f4ed73..8491ab705b 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1142,12 +1142,12 @@ static inline void neon_store_reg64(TCGv_i64 var, int reg) tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg)); } -static inline void neon_load_reg32(TCGv_i32 var, int reg) +static inline void vfp_load_reg32(TCGv_i32 var, int reg) { tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg)); } -static inline void neon_store_reg32(TCGv_i32 var, int reg) +static inline void vfp_store_reg32(TCGv_i32 var, int reg) { tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg)); } -- cgit v1.2.3 From 0aa8e700a53b0aa7275ed747b8fa3acb61d35f2d Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 2 Nov 2020 16:52:14 +0000 Subject: target/arm: Add read/write_neon_element64 Replace all uses of neon_load/store_reg64 within translate-neon.c.inc. Signed-off-by: Richard Henderson Message-id: 20201030022618.785675-9-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/translate-neon.c.inc | 94 ++++++++++++++++++++--------------------- target/arm/translate.c | 26 ++++++++++++ 2 files changed, 73 insertions(+), 47 deletions(-) diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc index 549381703e..c2d67160f9 100644 --- a/target/arm/translate-neon.c.inc +++ b/target/arm/translate-neon.c.inc @@ -1265,9 +1265,9 @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a, for (pass = 0; pass < a->q + 1; pass++) { TCGv_i64 tmp = tcg_temp_new_i64(); - neon_load_reg64(tmp, a->vm + pass); + read_neon_element64(tmp, a->vm, pass, MO_64); fn(tmp, cpu_env, tmp, constimm); - neon_store_reg64(tmp, a->vd + pass); + write_neon_element64(tmp, a->vd, pass, MO_64); tcg_temp_free_i64(tmp); } tcg_temp_free_i64(constimm); @@ -1375,8 +1375,8 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a, rd = tcg_temp_new_i32(); /* Load both inputs first to avoid potential overwrite if rm == rd */ - neon_load_reg64(rm1, a->vm); - neon_load_reg64(rm2, a->vm + 1); + read_neon_element64(rm1, a->vm, 0, MO_64); + read_neon_element64(rm2, a->vm, 1, MO_64); shiftfn(rm1, rm1, constimm); narrowfn(rd, cpu_env, rm1); @@ -1579,7 +1579,7 @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a, tcg_gen_shli_i64(tmp, tmp, a->shift); tcg_gen_andi_i64(tmp, tmp, ~widen_mask); } - neon_store_reg64(tmp, a->vd); + write_neon_element64(tmp, a->vd, 0, MO_64); widenfn(tmp, rm1); tcg_temp_free_i32(rm1); @@ -1587,7 +1587,7 @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a, tcg_gen_shli_i64(tmp, tmp, a->shift); tcg_gen_andi_i64(tmp, tmp, ~widen_mask); } - neon_store_reg64(tmp, a->vd + 1); + write_neon_element64(tmp, a->vd, 1, MO_64); tcg_temp_free_i64(tmp); return true; } @@ -1822,7 +1822,7 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, rm_64 = tcg_temp_new_i64(); if (src1_wide) { - neon_load_reg64(rn0_64, a->vn); + read_neon_element64(rn0_64, a->vn, 0, MO_64); } else { TCGv_i32 tmp = tcg_temp_new_i32(); read_neon_element32(tmp, a->vn, 0, MO_32); @@ -1841,7 +1841,7 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, * avoid incorrect results if a narrow input overlaps with the result. */ if (src1_wide) { - neon_load_reg64(rn1_64, a->vn + 1); + read_neon_element64(rn1_64, a->vn, 1, MO_64); } else { TCGv_i32 tmp = tcg_temp_new_i32(); read_neon_element32(tmp, a->vn, 1, MO_32); @@ -1851,12 +1851,12 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, rm = tcg_temp_new_i32(); read_neon_element32(rm, a->vm, 1, MO_32); - neon_store_reg64(rn0_64, a->vd); + write_neon_element64(rn0_64, a->vd, 0, MO_64); widenfn(rm_64, rm); tcg_temp_free_i32(rm); opfn(rn1_64, rn1_64, rm_64); - neon_store_reg64(rn1_64, a->vd + 1); + write_neon_element64(rn1_64, a->vd, 1, MO_64); tcg_temp_free_i64(rn0_64); tcg_temp_free_i64(rn1_64); @@ -1928,15 +1928,15 @@ static bool do_narrow_3d(DisasContext *s, arg_3diff *a, rd0 = tcg_temp_new_i32(); rd1 = tcg_temp_new_i32(); - neon_load_reg64(rn_64, a->vn); - neon_load_reg64(rm_64, a->vm); + read_neon_element64(rn_64, a->vn, 0, MO_64); + read_neon_element64(rm_64, a->vm, 0, MO_64); opfn(rn_64, rn_64, rm_64); narrowfn(rd0, rn_64); - neon_load_reg64(rn_64, a->vn + 1); - neon_load_reg64(rm_64, a->vm + 1); + read_neon_element64(rn_64, a->vn, 1, MO_64); + read_neon_element64(rm_64, a->vm, 1, MO_64); opfn(rn_64, rn_64, rm_64); @@ -2036,16 +2036,16 @@ static bool do_long_3d(DisasContext *s, arg_3diff *a, /* Don't store results until after all loads: they might overlap */ if (accfn) { tmp = tcg_temp_new_i64(); - neon_load_reg64(tmp, a->vd); + read_neon_element64(tmp, a->vd, 0, MO_64); accfn(tmp, tmp, rd0); - neon_store_reg64(tmp, a->vd); - neon_load_reg64(tmp, a->vd + 1); + write_neon_element64(tmp, a->vd, 0, MO_64); + read_neon_element64(tmp, a->vd, 1, MO_64); accfn(tmp, tmp, rd1); - neon_store_reg64(tmp, a->vd + 1); + write_neon_element64(tmp, a->vd, 1, MO_64); tcg_temp_free_i64(tmp); } else { - neon_store_reg64(rd0, a->vd); - neon_store_reg64(rd1, a->vd + 1); + write_neon_element64(rd0, a->vd, 0, MO_64); + write_neon_element64(rd1, a->vd, 1, MO_64); } tcg_temp_free_i64(rd0); @@ -2669,16 +2669,16 @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a, if (accfn) { TCGv_i64 t64 = tcg_temp_new_i64(); - neon_load_reg64(t64, a->vd); + read_neon_element64(t64, a->vd, 0, MO_64); accfn(t64, t64, rn0_64); - neon_store_reg64(t64, a->vd); - neon_load_reg64(t64, a->vd + 1); + write_neon_element64(t64, a->vd, 0, MO_64); + read_neon_element64(t64, a->vd, 1, MO_64); accfn(t64, t64, rn1_64); - neon_store_reg64(t64, a->vd + 1); + write_neon_element64(t64, a->vd, 1, MO_64); tcg_temp_free_i64(t64); } else { - neon_store_reg64(rn0_64, a->vd); - neon_store_reg64(rn1_64, a->vd + 1); + write_neon_element64(rn0_64, a->vd, 0, MO_64); + write_neon_element64(rn1_64, a->vd, 1, MO_64); } tcg_temp_free_i64(rn0_64); tcg_temp_free_i64(rn1_64); @@ -2812,10 +2812,10 @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a) right = tcg_temp_new_i64(); dest = tcg_temp_new_i64(); - neon_load_reg64(right, a->vn); - neon_load_reg64(left, a->vm); + read_neon_element64(right, a->vn, 0, MO_64); + read_neon_element64(left, a->vm, 0, MO_64); tcg_gen_extract2_i64(dest, right, left, a->imm * 8); - neon_store_reg64(dest, a->vd); + write_neon_element64(dest, a->vd, 0, MO_64); tcg_temp_free_i64(left); tcg_temp_free_i64(right); @@ -2831,21 +2831,21 @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a) destright = tcg_temp_new_i64(); if (a->imm < 8) { - neon_load_reg64(right, a->vn); - neon_load_reg64(middle, a->vn + 1); + read_neon_element64(right, a->vn, 0, MO_64); + read_neon_element64(middle, a->vn, 1, MO_64); tcg_gen_extract2_i64(destright, right, middle, a->imm * 8); - neon_load_reg64(left, a->vm); + read_neon_element64(left, a->vm, 0, MO_64); tcg_gen_extract2_i64(destleft, middle, left, a->imm * 8); } else { - neon_load_reg64(right, a->vn + 1); - neon_load_reg64(middle, a->vm); + read_neon_element64(right, a->vn, 1, MO_64); + read_neon_element64(middle, a->vm, 0, MO_64); tcg_gen_extract2_i64(destright, right, middle, (a->imm - 8) * 8); - neon_load_reg64(left, a->vm + 1); + read_neon_element64(left, a->vm, 1, MO_64); tcg_gen_extract2_i64(destleft, middle, left, (a->imm - 8) * 8); } - neon_store_reg64(destright, a->vd); - neon_store_reg64(destleft, a->vd + 1); + write_neon_element64(destright, a->vd, 0, MO_64); + write_neon_element64(destleft, a->vd, 1, MO_64); tcg_temp_free_i64(destright); tcg_temp_free_i64(destleft); @@ -3052,11 +3052,11 @@ static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a, if (accfn) { TCGv_i64 tmp64 = tcg_temp_new_i64(); - neon_load_reg64(tmp64, a->vd + pass); + read_neon_element64(tmp64, a->vd, pass, MO_64); accfn(rd_64, tmp64, rd_64); tcg_temp_free_i64(tmp64); } - neon_store_reg64(rd_64, a->vd + pass); + write_neon_element64(rd_64, a->vd, pass, MO_64); tcg_temp_free_i64(rd_64); } return true; @@ -3254,9 +3254,9 @@ static bool do_vmovn(DisasContext *s, arg_2misc *a, rd0 = tcg_temp_new_i32(); rd1 = tcg_temp_new_i32(); - neon_load_reg64(rm, a->vm); + read_neon_element64(rm, a->vm, 0, MO_64); narrowfn(rd0, cpu_env, rm); - neon_load_reg64(rm, a->vm + 1); + read_neon_element64(rm, a->vm, 1, MO_64); narrowfn(rd1, cpu_env, rm); write_neon_element32(rd0, a->vd, 0, MO_32); write_neon_element32(rd1, a->vd, 1, MO_32); @@ -3326,10 +3326,10 @@ static bool trans_VSHLL(DisasContext *s, arg_2misc *a) widenfn(rd, rm0); tcg_gen_shli_i64(rd, rd, 8 << a->size); - neon_store_reg64(rd, a->vd); + write_neon_element64(rd, a->vd, 0, MO_64); widenfn(rd, rm1); tcg_gen_shli_i64(rd, rd, 8 << a->size); - neon_store_reg64(rd, a->vd + 1); + write_neon_element64(rd, a->vd, 1, MO_64); tcg_temp_free_i64(rd); tcg_temp_free_i32(rm0); @@ -3847,10 +3847,10 @@ static bool trans_VSWP(DisasContext *s, arg_2misc *a) rm = tcg_temp_new_i64(); rd = tcg_temp_new_i64(); for (pass = 0; pass < (a->q ? 2 : 1); pass++) { - neon_load_reg64(rm, a->vm + pass); - neon_load_reg64(rd, a->vd + pass); - neon_store_reg64(rm, a->vd + pass); - neon_store_reg64(rd, a->vm + pass); + read_neon_element64(rm, a->vm, pass, MO_64); + read_neon_element64(rd, a->vd, pass, MO_64); + write_neon_element64(rm, a->vd, pass, MO_64); + write_neon_element64(rd, a->vm, pass, MO_64); } tcg_temp_free_i64(rm); tcg_temp_free_i64(rd); diff --git a/target/arm/translate.c b/target/arm/translate.c index 8491ab705b..4fb0a62200 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1178,6 +1178,19 @@ static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop) } } +static void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop) +{ + long off = neon_element_offset(reg, ele, memop); + + switch (memop) { + case MO_Q: + tcg_gen_ld_i64(dest, cpu_env, off); + break; + default: + g_assert_not_reached(); + } +} + static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop) { long off = neon_element_offset(reg, ele, memop); @@ -1197,6 +1210,19 @@ static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop) } } +static void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop) +{ + long off = neon_element_offset(reg, ele, memop); + + switch (memop) { + case MO_64: + tcg_gen_st_i64(src, cpu_env, off); + break; + default: + g_assert_not_reached(); + } +} + static TCGv_ptr vfp_reg_ptr(bool dp, int reg) { TCGv_ptr ret = tcg_temp_new_ptr(); -- cgit v1.2.3 From b38b96ca90827012ab8eb045c1337cea83a54c4b Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 2 Nov 2020 16:52:14 +0000 Subject: target/arm: Rename neon_load_reg64 to vfp_load_reg64 The only uses of this function are for loading VFP double-precision values, and nothing to do with NEON. Signed-off-by: Richard Henderson Message-id: 20201030022618.785675-10-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/translate-vfp.c.inc | 84 +++++++++++++++++++++--------------------- target/arm/translate.c | 8 ++-- 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc index d2a9b658bb..f966de5b1f 100644 --- a/target/arm/translate-vfp.c.inc +++ b/target/arm/translate-vfp.c.inc @@ -236,8 +236,8 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) tcg_gen_ext_i32_i64(nf, cpu_NF); tcg_gen_ext_i32_i64(vf, cpu_VF); - neon_load_reg64(frn, rn); - neon_load_reg64(frm, rm); + vfp_load_reg64(frn, rn); + vfp_load_reg64(frm, rm); switch (a->cc) { case 0: /* eq: Z */ tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero, @@ -264,7 +264,7 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) tcg_temp_free_i64(tmp); break; } - neon_store_reg64(dest, rd); + vfp_store_reg64(dest, rd); tcg_temp_free_i64(frn); tcg_temp_free_i64(frm); tcg_temp_free_i64(dest); @@ -385,9 +385,9 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a) TCGv_i64 tcg_res; tcg_op = tcg_temp_new_i64(); tcg_res = tcg_temp_new_i64(); - neon_load_reg64(tcg_op, rm); + vfp_load_reg64(tcg_op, rm); gen_helper_rintd(tcg_res, tcg_op, fpst); - neon_store_reg64(tcg_res, rd); + vfp_store_reg64(tcg_res, rd); tcg_temp_free_i64(tcg_op); tcg_temp_free_i64(tcg_res); } else { @@ -463,7 +463,7 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) tcg_double = tcg_temp_new_i64(); tcg_res = tcg_temp_new_i64(); tcg_tmp = tcg_temp_new_i32(); - neon_load_reg64(tcg_double, rm); + vfp_load_reg64(tcg_double, rm); if (is_signed) { gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst); } else { @@ -1002,9 +1002,9 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a) tmp = tcg_temp_new_i64(); if (a->l) { gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); - neon_store_reg64(tmp, a->vd); + vfp_store_reg64(tmp, a->vd); } else { - neon_load_reg64(tmp, a->vd); + vfp_load_reg64(tmp, a->vd); gen_aa32_st64(s, tmp, addr, get_mem_index(s)); } tcg_temp_free_i64(tmp); @@ -1149,10 +1149,10 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) if (a->l) { /* load */ gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); - neon_store_reg64(tmp, a->vd + i); + vfp_store_reg64(tmp, a->vd + i); } else { /* store */ - neon_load_reg64(tmp, a->vd + i); + vfp_load_reg64(tmp, a->vd + i); gen_aa32_st64(s, tmp, addr, get_mem_index(s)); } tcg_gen_addi_i32(addr, addr, offset); @@ -1416,15 +1416,15 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, fd = tcg_temp_new_i64(); fpst = fpstatus_ptr(FPST_FPCR); - neon_load_reg64(f0, vn); - neon_load_reg64(f1, vm); + vfp_load_reg64(f0, vn); + vfp_load_reg64(f1, vm); for (;;) { if (reads_vd) { - neon_load_reg64(fd, vd); + vfp_load_reg64(fd, vd); } fn(fd, f0, f1, fpst); - neon_store_reg64(fd, vd); + vfp_store_reg64(fd, vd); if (veclen == 0) { break; @@ -1433,10 +1433,10 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, veclen--; vd = vfp_advance_dreg(vd, delta_d); vn = vfp_advance_dreg(vn, delta_d); - neon_load_reg64(f0, vn); + vfp_load_reg64(f0, vn); if (delta_m) { vm = vfp_advance_dreg(vm, delta_m); - neon_load_reg64(f1, vm); + vfp_load_reg64(f1, vm); } } @@ -1599,11 +1599,11 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) f0 = tcg_temp_new_i64(); fd = tcg_temp_new_i64(); - neon_load_reg64(f0, vm); + vfp_load_reg64(f0, vm); for (;;) { fn(fd, f0); - neon_store_reg64(fd, vd); + vfp_store_reg64(fd, vd); if (veclen == 0) { break; @@ -1613,7 +1613,7 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) /* single source one-many */ while (veclen--) { vd = vfp_advance_dreg(vd, delta_d); - neon_store_reg64(fd, vd); + vfp_store_reg64(fd, vd); } break; } @@ -1622,7 +1622,7 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) veclen--; vd = vfp_advance_dreg(vd, delta_d); vd = vfp_advance_dreg(vm, delta_m); - neon_load_reg64(f0, vm); + vfp_load_reg64(f0, vm); } tcg_temp_free_i64(f0); @@ -2173,20 +2173,20 @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d) vm = tcg_temp_new_i64(); vd = tcg_temp_new_i64(); - neon_load_reg64(vn, a->vn); - neon_load_reg64(vm, a->vm); + vfp_load_reg64(vn, a->vn); + vfp_load_reg64(vm, a->vm); if (neg_n) { /* VFNMS, VFMS */ gen_helper_vfp_negd(vn, vn); } - neon_load_reg64(vd, a->vd); + vfp_load_reg64(vd, a->vd); if (neg_d) { /* VFNMA, VFNMS */ gen_helper_vfp_negd(vd, vd); } fpst = fpstatus_ptr(FPST_FPCR); gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst); - neon_store_reg64(vd, a->vd); + vfp_store_reg64(vd, a->vd); tcg_temp_free_ptr(fpst); tcg_temp_free_i64(vn); @@ -2325,7 +2325,7 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm)); for (;;) { - neon_store_reg64(fd, vd); + vfp_store_reg64(fd, vd); if (veclen == 0) { break; @@ -2480,11 +2480,11 @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a) vd = tcg_temp_new_i64(); vm = tcg_temp_new_i64(); - neon_load_reg64(vd, a->vd); + vfp_load_reg64(vd, a->vd); if (a->z) { tcg_gen_movi_i64(vm, 0); } else { - neon_load_reg64(vm, a->vm); + vfp_load_reg64(vm, a->vm); } if (a->e) { @@ -2557,7 +2557,7 @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a) tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t)); vd = tcg_temp_new_i64(); gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode); - neon_store_reg64(vd, a->vd); + vfp_store_reg64(vd, a->vd); tcg_temp_free_i32(ahp_mode); tcg_temp_free_ptr(fpst); tcg_temp_free_i32(tmp); @@ -2621,7 +2621,7 @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a) tmp = tcg_temp_new_i32(); vm = tcg_temp_new_i64(); - neon_load_reg64(vm, a->vm); + vfp_load_reg64(vm, a->vm); gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode); tcg_temp_free_i64(vm); tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t)); @@ -2700,10 +2700,10 @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a) } tmp = tcg_temp_new_i64(); - neon_load_reg64(tmp, a->vm); + vfp_load_reg64(tmp, a->vm); fpst = fpstatus_ptr(FPST_FPCR); gen_helper_rintd(tmp, tmp, fpst); - neon_store_reg64(tmp, a->vd); + vfp_store_reg64(tmp, a->vd); tcg_temp_free_ptr(fpst); tcg_temp_free_i64(tmp); return true; @@ -2789,13 +2789,13 @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a) } tmp = tcg_temp_new_i64(); - neon_load_reg64(tmp, a->vm); + vfp_load_reg64(tmp, a->vm); fpst = fpstatus_ptr(FPST_FPCR); tcg_rmode = tcg_const_i32(float_round_to_zero); gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); gen_helper_rintd(tmp, tmp, fpst); gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - neon_store_reg64(tmp, a->vd); + vfp_store_reg64(tmp, a->vd); tcg_temp_free_ptr(fpst); tcg_temp_free_i64(tmp); tcg_temp_free_i32(tcg_rmode); @@ -2871,10 +2871,10 @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a) } tmp = tcg_temp_new_i64(); - neon_load_reg64(tmp, a->vm); + vfp_load_reg64(tmp, a->vm); fpst = fpstatus_ptr(FPST_FPCR); gen_helper_rintd_exact(tmp, tmp, fpst); - neon_store_reg64(tmp, a->vd); + vfp_store_reg64(tmp, a->vd); tcg_temp_free_ptr(fpst); tcg_temp_free_i64(tmp); return true; @@ -2902,7 +2902,7 @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a) vd = tcg_temp_new_i64(); vfp_load_reg32(vm, a->vm); gen_helper_vfp_fcvtds(vd, vm, cpu_env); - neon_store_reg64(vd, a->vd); + vfp_store_reg64(vd, a->vd); tcg_temp_free_i32(vm); tcg_temp_free_i64(vd); return true; @@ -2928,7 +2928,7 @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a) vd = tcg_temp_new_i32(); vm = tcg_temp_new_i64(); - neon_load_reg64(vm, a->vm); + vfp_load_reg64(vm, a->vm); gen_helper_vfp_fcvtsd(vd, vm, cpu_env); vfp_store_reg32(vd, a->vd); tcg_temp_free_i32(vd); @@ -3024,7 +3024,7 @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a) /* u32 -> f64 */ gen_helper_vfp_uitod(vd, vm, fpst); } - neon_store_reg64(vd, a->vd); + vfp_store_reg64(vd, a->vd); tcg_temp_free_i32(vm); tcg_temp_free_i64(vd); tcg_temp_free_ptr(fpst); @@ -3055,7 +3055,7 @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a) vm = tcg_temp_new_i64(); vd = tcg_temp_new_i32(); - neon_load_reg64(vm, a->vm); + vfp_load_reg64(vm, a->vm); gen_helper_vjcvt(vd, vm, cpu_env); vfp_store_reg32(vd, a->vd); tcg_temp_free_i64(vm); @@ -3204,7 +3204,7 @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a) frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm); vd = tcg_temp_new_i64(); - neon_load_reg64(vd, a->vd); + vfp_load_reg64(vd, a->vd); fpst = fpstatus_ptr(FPST_FPCR); shift = tcg_const_i32(frac_bits); @@ -3239,7 +3239,7 @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a) g_assert_not_reached(); } - neon_store_reg64(vd, a->vd); + vfp_store_reg64(vd, a->vd); tcg_temp_free_i64(vd); tcg_temp_free_i32(shift); tcg_temp_free_ptr(fpst); @@ -3340,7 +3340,7 @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a) fpst = fpstatus_ptr(FPST_FPCR); vm = tcg_temp_new_i64(); vd = tcg_temp_new_i32(); - neon_load_reg64(vm, a->vm); + vfp_load_reg64(vm, a->vm); if (a->s) { if (a->rz) { diff --git a/target/arm/translate.c b/target/arm/translate.c index 4fb0a62200..7611c1f0f1 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1132,14 +1132,14 @@ static long vfp_reg_offset(bool dp, unsigned reg) } } -static inline void neon_load_reg64(TCGv_i64 var, int reg) +static inline void vfp_load_reg64(TCGv_i64 var, int reg) { - tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg)); + tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(true, reg)); } -static inline void neon_store_reg64(TCGv_i64 var, int reg) +static inline void vfp_store_reg64(TCGv_i64 var, int reg) { - tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg)); + tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(true, reg)); } static inline void vfp_load_reg32(TCGv_i32 var, int reg) -- cgit v1.2.3 From 9f1a5f93c2dd345dc6c8fe86ed14bf1485056f6e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 2 Nov 2020 16:52:15 +0000 Subject: target/arm: Simplify do_long_3d and do_2scalar_long In both cases, we can sink the write-back and perform the accumulate into the normal destination temps. Signed-off-by: Richard Henderson Message-id: 20201030022618.785675-11-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/translate-neon.c.inc | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc index c2d67160f9..1c16c56e7e 100644 --- a/target/arm/translate-neon.c.inc +++ b/target/arm/translate-neon.c.inc @@ -2037,17 +2037,14 @@ static bool do_long_3d(DisasContext *s, arg_3diff *a, if (accfn) { tmp = tcg_temp_new_i64(); read_neon_element64(tmp, a->vd, 0, MO_64); - accfn(tmp, tmp, rd0); - write_neon_element64(tmp, a->vd, 0, MO_64); + accfn(rd0, tmp, rd0); read_neon_element64(tmp, a->vd, 1, MO_64); - accfn(tmp, tmp, rd1); - write_neon_element64(tmp, a->vd, 1, MO_64); + accfn(rd1, tmp, rd1); tcg_temp_free_i64(tmp); - } else { - write_neon_element64(rd0, a->vd, 0, MO_64); - write_neon_element64(rd1, a->vd, 1, MO_64); } + write_neon_element64(rd0, a->vd, 0, MO_64); + write_neon_element64(rd1, a->vd, 1, MO_64); tcg_temp_free_i64(rd0); tcg_temp_free_i64(rd1); @@ -2670,16 +2667,14 @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a, if (accfn) { TCGv_i64 t64 = tcg_temp_new_i64(); read_neon_element64(t64, a->vd, 0, MO_64); - accfn(t64, t64, rn0_64); - write_neon_element64(t64, a->vd, 0, MO_64); + accfn(rn0_64, t64, rn0_64); read_neon_element64(t64, a->vd, 1, MO_64); - accfn(t64, t64, rn1_64); - write_neon_element64(t64, a->vd, 1, MO_64); + accfn(rn1_64, t64, rn1_64); tcg_temp_free_i64(t64); - } else { - write_neon_element64(rn0_64, a->vd, 0, MO_64); - write_neon_element64(rn1_64, a->vd, 1, MO_64); } + + write_neon_element64(rn0_64, a->vd, 0, MO_64); + write_neon_element64(rn1_64, a->vd, 1, MO_64); tcg_temp_free_i64(rn0_64); tcg_temp_free_i64(rn1_64); return true; -- cgit v1.2.3 From 8aab18a2c5209e4e48998a61fbc2d89f374331ed Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 2 Nov 2020 16:52:15 +0000 Subject: target/arm: Improve do_prewiden_3d We can use proper widening loads to extend 32-bit inputs, and skip the "widenfn" step. Signed-off-by: Richard Henderson Message-id: 20201030022618.785675-12-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/translate-neon.c.inc | 70 +++++++++++++++++++++++------------------ target/arm/translate.c | 6 ++++ 2 files changed, 45 insertions(+), 31 deletions(-) diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc index 1c16c56e7e..59368cb243 100644 --- a/target/arm/translate-neon.c.inc +++ b/target/arm/translate-neon.c.inc @@ -1788,11 +1788,10 @@ static bool trans_Vimm_1r(DisasContext *s, arg_1reg_imm *a) static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, NeonGenWidenFn *widenfn, NeonGenTwo64OpFn *opfn, - bool src1_wide) + int src1_mop, int src2_mop) { /* 3-regs different lengths, prewidening case (VADDL/VSUBL/VAADW/VSUBW) */ TCGv_i64 rn0_64, rn1_64, rm_64; - TCGv_i32 rm; if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { return false; @@ -1804,12 +1803,12 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, return false; } - if (!widenfn || !opfn) { + if (!opfn) { /* size == 3 case, which is an entirely different insn group */ return false; } - if ((a->vd & 1) || (src1_wide && (a->vn & 1))) { + if ((a->vd & 1) || (src1_mop == MO_Q && (a->vn & 1))) { return false; } @@ -1821,40 +1820,48 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, rn1_64 = tcg_temp_new_i64(); rm_64 = tcg_temp_new_i64(); - if (src1_wide) { - read_neon_element64(rn0_64, a->vn, 0, MO_64); + if (src1_mop >= 0) { + read_neon_element64(rn0_64, a->vn, 0, src1_mop); } else { TCGv_i32 tmp = tcg_temp_new_i32(); read_neon_element32(tmp, a->vn, 0, MO_32); widenfn(rn0_64, tmp); tcg_temp_free_i32(tmp); } - rm = tcg_temp_new_i32(); - read_neon_element32(rm, a->vm, 0, MO_32); + if (src2_mop >= 0) { + read_neon_element64(rm_64, a->vm, 0, src2_mop); + } else { + TCGv_i32 tmp = tcg_temp_new_i32(); + read_neon_element32(tmp, a->vm, 0, MO_32); + widenfn(rm_64, tmp); + tcg_temp_free_i32(tmp); + } - widenfn(rm_64, rm); - tcg_temp_free_i32(rm); opfn(rn0_64, rn0_64, rm_64); /* * Load second pass inputs before storing the first pass result, to * avoid incorrect results if a narrow input overlaps with the result. */ - if (src1_wide) { - read_neon_element64(rn1_64, a->vn, 1, MO_64); + if (src1_mop >= 0) { + read_neon_element64(rn1_64, a->vn, 1, src1_mop); } else { TCGv_i32 tmp = tcg_temp_new_i32(); read_neon_element32(tmp, a->vn, 1, MO_32); widenfn(rn1_64, tmp); tcg_temp_free_i32(tmp); } - rm = tcg_temp_new_i32(); - read_neon_element32(rm, a->vm, 1, MO_32); + if (src2_mop >= 0) { + read_neon_element64(rm_64, a->vm, 1, src2_mop); + } else { + TCGv_i32 tmp = tcg_temp_new_i32(); + read_neon_element32(tmp, a->vm, 1, MO_32); + widenfn(rm_64, tmp); + tcg_temp_free_i32(tmp); + } write_neon_element64(rn0_64, a->vd, 0, MO_64); - widenfn(rm_64, rm); - tcg_temp_free_i32(rm); opfn(rn1_64, rn1_64, rm_64); write_neon_element64(rn1_64, a->vd, 1, MO_64); @@ -1865,14 +1872,13 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, return true; } -#define DO_PREWIDEN(INSN, S, EXT, OP, SRC1WIDE) \ +#define DO_PREWIDEN(INSN, S, OP, SRC1WIDE, SIGN) \ static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \ { \ static NeonGenWidenFn * const widenfn[] = { \ gen_helper_neon_widen_##S##8, \ gen_helper_neon_widen_##S##16, \ - tcg_gen_##EXT##_i32_i64, \ - NULL, \ + NULL, NULL, \ }; \ static NeonGenTwo64OpFn * const addfn[] = { \ gen_helper_neon_##OP##l_u16, \ @@ -1880,18 +1886,20 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, tcg_gen_##OP##_i64, \ NULL, \ }; \ - return do_prewiden_3d(s, a, widenfn[a->size], \ - addfn[a->size], SRC1WIDE); \ - } - -DO_PREWIDEN(VADDL_S, s, ext, add, false) -DO_PREWIDEN(VADDL_U, u, extu, add, false) -DO_PREWIDEN(VSUBL_S, s, ext, sub, false) -DO_PREWIDEN(VSUBL_U, u, extu, sub, false) -DO_PREWIDEN(VADDW_S, s, ext, add, true) -DO_PREWIDEN(VADDW_U, u, extu, add, true) -DO_PREWIDEN(VSUBW_S, s, ext, sub, true) -DO_PREWIDEN(VSUBW_U, u, extu, sub, true) + int narrow_mop = a->size == MO_32 ? MO_32 | SIGN : -1; \ + return do_prewiden_3d(s, a, widenfn[a->size], addfn[a->size], \ + SRC1WIDE ? MO_Q : narrow_mop, \ + narrow_mop); \ + } + +DO_PREWIDEN(VADDL_S, s, add, false, MO_SIGN) +DO_PREWIDEN(VADDL_U, u, add, false, 0) +DO_PREWIDEN(VSUBL_S, s, sub, false, MO_SIGN) +DO_PREWIDEN(VSUBL_U, u, sub, false, 0) +DO_PREWIDEN(VADDW_S, s, add, true, MO_SIGN) +DO_PREWIDEN(VADDW_U, u, add, true, 0) +DO_PREWIDEN(VSUBW_S, s, sub, true, MO_SIGN) +DO_PREWIDEN(VSUBW_U, u, sub, true, 0) static bool do_narrow_3d(DisasContext *s, arg_3diff *a, NeonGenTwo64OpFn *opfn, NeonGenNarrowFn *narrowfn) diff --git a/target/arm/translate.c b/target/arm/translate.c index 7611c1f0f1..29ea1eb781 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1183,6 +1183,12 @@ static void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop) long off = neon_element_offset(reg, ele, memop); switch (memop) { + case MO_SL: + tcg_gen_ld32s_i64(dest, cpu_env, off); + break; + case MO_UL: + tcg_gen_ld32u_i64(dest, cpu_env, off); + break; case MO_Q: tcg_gen_ld_i64(dest, cpu_env, off); break; -- cgit v1.2.3 From 552714c0812a10e5cff239bd29928e5fcb8d8b3b Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 2 Nov 2020 16:52:15 +0000 Subject: target/arm: Fix float16 pairwise Neon ops on big-endian hosts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the neon_padd/pmax/pmin helpers for float16, a cut-and-paste error meant we were using the H4() address swizzler macro rather than the H2() which is required for 2-byte data. This had no effect on little-endian hosts but meant we put the result data into the destination Dreg in the wrong order on big-endian hosts. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-id: 20201028191712.4910-2-peter.maydell@linaro.org --- target/arm/vec_helper.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c index a973454e4f..30d76d05be 100644 --- a/target/arm/vec_helper.c +++ b/target/arm/vec_helper.c @@ -1858,10 +1858,10 @@ DO_ABA(gvec_uaba_d, uint64_t) r2 = float16_##OP(m[H2(0)], m[H2(1)], fpst); \ r3 = float16_##OP(m[H2(2)], m[H2(3)], fpst); \ \ - d[H4(0)] = r0; \ - d[H4(1)] = r1; \ - d[H4(2)] = r2; \ - d[H4(3)] = r3; \ + d[H2(0)] = r0; \ + d[H2(1)] = r1; \ + d[H2(2)] = r2; \ + d[H2(3)] = r3; \ } DO_NEON_PAIRWISE(neon_padd, add) -- cgit v1.2.3 From d1a9254be5cc93afb15be19f7543da6ff4806256 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 2 Nov 2020 16:52:15 +0000 Subject: target/arm: Fix VUDOT/VSDOT (scalar) on big-endian hosts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The helper functions for performing the udot/sdot operations against a scalar were not using an address-swizzling macro when converting the index of the scalar element into a pointer into the vm array. This had no effect on little-endian hosts but meant we generated incorrect results on big-endian hosts. For these insns, the index is indexing over group of 4 8-bit values, so 32 bits per indexed entity, and H4() is therefore what we want. (For Neon the only possible input indexes are 0 and 1.) Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-id: 20201028191712.4910-3-peter.maydell@linaro.org --- target/arm/vec_helper.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c index 30d76d05be..0f33127c4c 100644 --- a/target/arm/vec_helper.c +++ b/target/arm/vec_helper.c @@ -293,7 +293,7 @@ void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) intptr_t index = simd_data(desc); uint32_t *d = vd; int8_t *n = vn; - int8_t *m_indexed = (int8_t *)vm + index * 4; + int8_t *m_indexed = (int8_t *)vm + H4(index) * 4; /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd. * Otherwise opr_sz is a multiple of 16. @@ -324,7 +324,7 @@ void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) intptr_t index = simd_data(desc); uint32_t *d = vd; uint8_t *n = vn; - uint8_t *m_indexed = (uint8_t *)vm + index * 4; + uint8_t *m_indexed = (uint8_t *)vm + H4(index) * 4; /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd. * Otherwise opr_sz is a multiple of 16. -- cgit v1.2.3 From 373e7ffde9bae90a20fb5db21b053f23091689f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Denis-Courmont?= Date: Mon, 2 Nov 2020 16:52:15 +0000 Subject: target/arm: fix handling of HCR.FB MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HCR should be applied when NS is set, not when it is cleared. Signed-off-by: Rémi Denis-Courmont Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/helper.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/target/arm/helper.c b/target/arm/helper.c index 97bb6b8c01..dc51175bf0 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -731,13 +731,12 @@ static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, /* * Non-IS variants of TLB operations are upgraded to - * IS versions if we are at NS EL1 and HCR_EL2.FB is set to + * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to * force broadcast of these operations. */ static bool tlb_force_broadcast(CPUARMState *env) { - return (env->cp15.hcr_el2 & HCR_FB) && - arm_current_el(env) == 1 && arm_is_secure_below_el3(env); + return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB); } static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, -- cgit v1.2.3 From 9bd268bae5c4760870522292fb1d46e7da7e372a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Denis-Courmont?= Date: Mon, 2 Nov 2020 16:52:16 +0000 Subject: target/arm: fix LORID_EL1 access check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Secure mode is not exempted from checking SCR_EL3.TLOR, and in the future HCR_EL2.TLOR when S-EL2 is enabled. Signed-off-by: Rémi Denis-Courmont Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/helper.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/target/arm/helper.c b/target/arm/helper.c index dc51175bf0..6854591986 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -6679,9 +6679,10 @@ static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) #endif /* Shared logic between LORID and the rest of the LOR* registers. - * Secure state has already been delt with. + * Secure state exclusion has already been dealt with. */ -static CPAccessResult access_lor_ns(CPUARMState *env) +static CPAccessResult access_lor_ns(CPUARMState *env, + const ARMCPRegInfo *ri, bool isread) { int el = arm_current_el(env); @@ -6694,16 +6695,6 @@ static CPAccessResult access_lor_ns(CPUARMState *env) return CP_ACCESS_OK; } -static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - if (arm_is_secure_below_el3(env)) { - /* Access ok in secure mode. */ - return CP_ACCESS_OK; - } - return access_lor_ns(env); -} - static CPAccessResult access_lor_other(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { @@ -6711,7 +6702,7 @@ static CPAccessResult access_lor_other(CPUARMState *env, /* Access denied in secure mode. */ return CP_ACCESS_TRAP; } - return access_lor_ns(env); + return access_lor_ns(env, ri, isread); } /* @@ -6738,7 +6729,7 @@ static const ARMCPRegInfo lor_reginfo[] = { .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, - .access = PL1_R, .accessfn = access_lorid, + .access = PL1_R, .accessfn = access_lor_ns, .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; -- cgit v1.2.3 From 437588d81d99ac91cb1e4ff060610458e67852d5 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 2 Nov 2020 16:52:16 +0000 Subject: disas/capstone: Fix monitor disassembly of >32 bytes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we're using the capstone disassembler, disassembly of a run of instructions more than 32 bytes long disassembles the wrong data for instructions beyond the 32 byte mark: (qemu) xp /16x 0x100 0000000000000100: 0x00000005 0x54410001 0x00000001 0x00001000 0000000000000110: 0x00000000 0x00000004 0x54410002 0x3c000000 0000000000000120: 0x00000000 0x00000004 0x54410009 0x74736574 0000000000000130: 0x00000000 0x00000000 0x00000000 0x00000000 (qemu) xp /16i 0x100 0x00000100: 00000005 andeq r0, r0, r5 0x00000104: 54410001 strbpl r0, [r1], #-1 0x00000108: 00000001 andeq r0, r0, r1 0x0000010c: 00001000 andeq r1, r0, r0 0x00000110: 00000000 andeq r0, r0, r0 0x00000114: 00000004 andeq r0, r0, r4 0x00000118: 54410002 strbpl r0, [r1], #-2 0x0000011c: 3c000000 .byte 0x00, 0x00, 0x00, 0x3c 0x00000120: 54410001 strbpl r0, [r1], #-1 0x00000124: 00000001 andeq r0, r0, r1 0x00000128: 00001000 andeq r1, r0, r0 0x0000012c: 00000000 andeq r0, r0, r0 0x00000130: 00000004 andeq r0, r0, r4 0x00000134: 54410002 strbpl r0, [r1], #-2 0x00000138: 3c000000 .byte 0x00, 0x00, 0x00, 0x3c 0x0000013c: 00000000 andeq r0, r0, r0 Here the disassembly of 0x120..0x13f is using the data that is in 0x104..0x123. This is caused by passing the wrong value to the read_memory_func(). The intention is that at this point in the loop the 'cap_buf' buffer already contains 'csize' bytes of data for the instruction at guest addr 'pc', and we want to read in an extra 'tsize' bytes. Those extra bytes are therefore at 'pc + csize', not 'pc'. On the first time through the loop 'csize' happens to be zero, so the initial read of 32 bytes into cap_buf is correct and as long as the disassembly never needs to read more data we return the correct information. Use the correct guest address in the call to read_memory_func(). Cc: qemu-stable@nongnu.org Fixes: https://bugs.launchpad.net/qemu/+bug/1900779 Signed-off-by: Peter Maydell Reviewed-by: Philippe Mathieu-Daudé Message-id: 20201022132445.25039-1-peter.maydell@linaro.org --- disas/capstone.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/disas/capstone.c b/disas/capstone.c index 0a9ef9c892..7462c0e305 100644 --- a/disas/capstone.c +++ b/disas/capstone.c @@ -286,7 +286,7 @@ bool cap_disas_monitor(disassemble_info *info, uint64_t pc, int count) /* Make certain that we can make progress. */ assert(tsize != 0); - info->read_memory_func(pc, cap_buf + csize, tsize, info); + info->read_memory_func(pc + csize, cap_buf + csize, tsize, info); csize += tsize; if (cs_disasm_iter(handle, &cbuf, &csize, &pc, insn)) { -- cgit v1.2.3 From 744a790ec01a30033309e6a2155df4d61061e184 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Mon, 2 Nov 2020 16:52:16 +0000 Subject: hw/arm/smmuv3: Fix potential integer overflow (CID 1432363) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the BIT_ULL() macro to ensure we use 64-bit arithmetic. This fixes the following Coverity issue (OVERFLOW_BEFORE_WIDEN): CID 1432363 (#1 of 1): Unintentional integer overflow: overflow_before_widen: Potentially overflowing expression 1 << scale with type int (32 bits, signed) is evaluated using 32-bit arithmetic, and then used in a context that expects an expression of type hwaddr (64 bits, unsigned). Signed-off-by: Philippe Mathieu-Daudé Acked-by: Eric Auger Message-id: 20201030144617.1535064-1-philmd@redhat.com Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- hw/arm/smmuv3.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 2017ba7a5a..22607c3784 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -17,6 +17,7 @@ */ #include "qemu/osdep.h" +#include "qemu/bitops.h" #include "hw/irq.h" #include "hw/sysbus.h" #include "migration/vmstate.h" @@ -864,7 +865,7 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) scale = CMD_SCALE(cmd); num = CMD_NUM(cmd); ttl = CMD_TTL(cmd); - num_pages = (num + 1) * (1 << (scale)); + num_pages = (num + 1) * BIT_ULL(scale); } if (type == SMMU_CMD_TLBI_NH_VA) { -- cgit v1.2.3 From 3f0b59070c89f2261ff5961980ff5203b0c6bea9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Denis-Courmont?= Date: Mon, 2 Nov 2020 16:52:16 +0000 Subject: hw/arm/boot: fix SVE for EL3 direct kernel boot MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When booting a CPU with EL3 using the -kernel flag, set up CPTR_EL3 so that SVE will not trap to EL3. Signed-off-by: Rémi Denis-Courmont Reviewed-by: Richard Henderson Message-id: 20201030151541.11976-1-remi@remlab.net Signed-off-by: Peter Maydell --- hw/arm/boot.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 3e9816af80..cf97600a91 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -742,6 +742,9 @@ static void do_cpu_reset(void *opaque) if (cpu_isar_feature(aa64_mte, cpu)) { env->cp15.scr_el3 |= SCR_ATA; } + if (cpu_isar_feature(aa64_sve, cpu)) { + env->cp15.cptr_el[3] |= CPTR_EZ; + } /* AArch64 kernels never boot in secure mode */ assert(!info->secure_boot); /* This hook is only supported for AArch32 currently: -- cgit v1.2.3 From 0080edc45e93324e085e93db89180a2945897c5a Mon Sep 17 00:00:00 2001 From: AlexChen Date: Mon, 2 Nov 2020 16:52:17 +0000 Subject: hw/display/omap_lcdc: Fix potential NULL pointer dereference In omap_lcd_interrupts(), the pointer omap_lcd is dereferinced before being check if it is valid, which may lead to NULL pointer dereference. So move the assignment to surface after checking that the omap_lcd is valid and move surface_bits_per_pixel(surface) to after the surface assignment. Reported-by: Euler Robot Signed-off-by: AlexChen Message-id: 5F9CDB8A.9000001@huawei.com Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- hw/display/omap_lcdc.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/hw/display/omap_lcdc.c b/hw/display/omap_lcdc.c index fa4a381db6..58e659c94f 100644 --- a/hw/display/omap_lcdc.c +++ b/hw/display/omap_lcdc.c @@ -78,14 +78,18 @@ static void omap_lcd_interrupts(struct omap_lcd_panel_s *s) static void omap_update_display(void *opaque) { struct omap_lcd_panel_s *omap_lcd = (struct omap_lcd_panel_s *) opaque; - DisplaySurface *surface = qemu_console_surface(omap_lcd->con); + DisplaySurface *surface; draw_line_func draw_line; int size, height, first, last; int width, linesize, step, bpp, frame_offset; hwaddr frame_base; - if (!omap_lcd || omap_lcd->plm == 1 || !omap_lcd->enable || - !surface_bits_per_pixel(surface)) { + if (!omap_lcd || omap_lcd->plm == 1 || !omap_lcd->enable) { + return; + } + + surface = qemu_console_surface(omap_lcd->con); + if (!surface_bits_per_pixel(surface)) { return; } -- cgit v1.2.3 From 18520fa465a08b81972afd9a25056f102f0180c9 Mon Sep 17 00:00:00 2001 From: AlexChen Date: Mon, 2 Nov 2020 16:52:17 +0000 Subject: hw/display/exynos4210_fimd: Fix potential NULL pointer dereference MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In exynos4210_fimd_update(), the pointer s is dereferinced before being check if it is valid, which may lead to NULL pointer dereference. So move the assignment to global_width after checking that the s is valid. Reported-by: Euler Robot Signed-off-by: Alex Chen Reviewed-by: Philippe Mathieu-Daudé Message-id: 5F9F8D88.9030102@huawei.com Signed-off-by: Peter Maydell --- hw/display/exynos4210_fimd.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hw/display/exynos4210_fimd.c b/hw/display/exynos4210_fimd.c index 4c16e1f5a0..34a960a976 100644 --- a/hw/display/exynos4210_fimd.c +++ b/hw/display/exynos4210_fimd.c @@ -1275,12 +1275,14 @@ static void exynos4210_fimd_update(void *opaque) bool blend = false; uint8_t *host_fb_addr; bool is_dirty = false; - const int global_width = (s->vidtcon[2] & FIMD_VIDTCON2_SIZE_MASK) + 1; + int global_width; if (!s || !s->console || !s->enabled || surface_bits_per_pixel(qemu_console_surface(s->console)) == 0) { return; } + + global_width = (s->vidtcon[2] & FIMD_VIDTCON2_SIZE_MASK) + 1; exynos4210_update_resolution(s); surface = qemu_console_surface(s->console); -- cgit v1.2.3 From 7142eb9e24b4aa5118cd67038057f15694d782aa Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 2 Nov 2020 16:52:17 +0000 Subject: target/arm: Get correct MMU index for other-security-state In arm_v7m_mmu_idx_for_secstate() we get the 'priv' level to pass to armv7m_mmu_idx_for_secstate_and_priv() by calling arm_current_el(). This is incorrect when the security state being queried is not the current one, because arm_current_el() uses the current security state to determine which of the banked CONTROL.nPRIV bits to look at. The effect was that if (for instance) Secure state was in privileged mode but Non-Secure was not then we would return the wrong MMU index. The only places where we are using this function in a way that could trigger this bug are for the stack loads during a v8M function-return and for the instruction fetch of a v8M SG insn. Fix the bug by expanding out the M-profile version of the arm_current_el() logic inline so it can use the passed in secstate rather than env->v7m.secure. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20201022164408.13214-1-peter.maydell@linaro.org --- target/arm/m_helper.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c index 036454234c..aad01ea012 100644 --- a/target/arm/m_helper.c +++ b/target/arm/m_helper.c @@ -2719,7 +2719,8 @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, /* Return the MMU index for a v7M CPU in the specified security state */ ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) { - bool priv = arm_current_el(env) != 0; + bool priv = arm_v7m_is_handler_mode(env) || + !(env->v7m.control[secstate] & 1); return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv); } -- cgit v1.2.3 From 76346b6264a9b01979829a5d4366c3799e2dd15a Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 2 Nov 2020 16:52:17 +0000 Subject: configure: Test that gio libs from pkg-config work MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On some hosts (eg Ubuntu Bionic) pkg-config returns a set of libraries for gio-2.0 which don't actually work when compiling statically. (Specifically, the returned library string includes -lmount, but not -lblkid which -lmount depends upon, so linking fails due to missing symbols.) Check that the libraries work, and don't enable gio if they don't, in the same way we do for gnutls. Signed-off-by: Peter Maydell Reviewed-by: Paolo Bonzini Reviewed-by: Philippe Mathieu-Daudé Message-id: 20200928160402.7961-1-peter.maydell@linaro.org --- configure | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/configure b/configure index 6df4306c88..2c3c69f118 100755 --- a/configure +++ b/configure @@ -3489,13 +3489,21 @@ if test "$static" = yes && test "$mingw32" = yes; then fi if $pkg_config --atleast-version=$glib_req_ver gio-2.0; then - gio=yes gio_cflags=$($pkg_config --cflags gio-2.0) gio_libs=$($pkg_config --libs gio-2.0) gdbus_codegen=$($pkg_config --variable=gdbus_codegen gio-2.0) if [ ! -x "$gdbus_codegen" ]; then gdbus_codegen= fi + # Check that the libraries actually work -- Ubuntu 18.04 ships + # with pkg-config --static --libs data for gio-2.0 that is missing + # -lblkid and will give a link error. + write_c_skeleton + if compile_prog "" "gio_libs" ; then + gio=yes + else + gio=no + fi else gio=no fi -- cgit v1.2.3 From c20a135a7a14d846699b0cfc9f74bcc2a226bee6 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 2 Nov 2020 16:52:17 +0000 Subject: hw/intc/arm_gicv3_cpuif: Make GIC maintenance interrupts work In gicv3_init_cpuif() we copy the ARMCPU gicv3_maintenance_interrupt into the GICv3CPUState struct's maintenance_irq field. This will only work if the board happens to have already wired up the CPU maintenance IRQ before the GIC was realized. Unfortunately this is not the case for the 'virt' board, and so the value that gets copied is NULL (since a qemu_irq is really a pointer to an IRQState struct under the hood). The effect is that the CPU interface code never actually raises the maintenance interrupt line. Instead, since the GICv3CPUState has a pointer to the CPUState, make the dereference at the point where we want to raise the interrupt, to avoid an implicit requirement on board code to wire things up in a particular order. Reported-by: Jose Martins Signed-off-by: Peter Maydell Message-id: 20201009153904.28529-1-peter.maydell@linaro.org Reviewed-by: Luc Michel --- hw/intc/arm_gicv3_cpuif.c | 5 ++--- include/hw/intc/arm_gicv3_common.h | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index 08e000e33c..43ef1d7a84 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -399,6 +399,7 @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs) int irqlevel = 0; int fiqlevel = 0; int maintlevel = 0; + ARMCPU *cpu = ARM_CPU(cs->cpu); idx = hppvi_index(cs); trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx); @@ -424,7 +425,7 @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs) qemu_set_irq(cs->parent_vfiq, fiqlevel); qemu_set_irq(cs->parent_virq, irqlevel); - qemu_set_irq(cs->maintenance_irq, maintlevel); + qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel); } static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -2624,8 +2625,6 @@ void gicv3_init_cpuif(GICv3State *s) && cpu->gic_num_lrs) { int j; - cs->maintenance_irq = cpu->gicv3_maintenance_interrupt; - cs->num_list_regs = cpu->gic_num_lrs; cs->vpribits = cpu->gic_vpribits; cs->vprebits = cpu->gic_vprebits; diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h index 0331b0ffdb..91491a2f66 100644 --- a/include/hw/intc/arm_gicv3_common.h +++ b/include/hw/intc/arm_gicv3_common.h @@ -153,7 +153,6 @@ struct GICv3CPUState { qemu_irq parent_fiq; qemu_irq parent_virq; qemu_irq parent_vfiq; - qemu_irq maintenance_irq; /* Redistributor */ uint32_t level; /* Current IRQ level */ -- cgit v1.2.3 From 92bb29f9b2c3d4a98eef5f0db935d4be291eec72 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 2 Nov 2020 16:52:18 +0000 Subject: scripts/kerneldoc: For Sphinx 3 use c:macro for macros with arguments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The kerneldoc script currently emits Sphinx markup for a macro with arguments that uses the c:function directive. This is correct for Sphinx versions earlier than Sphinx 3, where c:macro doesn't allow documentation of macros with arguments and c:function is not picky about the syntax of what it is passed. However, in Sphinx 3 the c:macro directive was enhanced to support macros with arguments, and c:function was made more picky about what syntax it accepted. When kerneldoc is told that it needs to produce output for Sphinx 3 or later, make it emit c:function only for functions and c:macro for macros with arguments. We assume that anything with a return type is a function and anything without is a macro. This fixes the Sphinx error: /home/petmay01/linaro/qemu-from-laptop/qemu/docs/../include/qom/object.h:155:Error in declarator If declarator-id with parameters (e.g., 'void f(int arg)'): Invalid C declaration: Expected identifier in nested name. [error at 25] DECLARE_INSTANCE_CHECKER ( InstanceType, OBJ_NAME, TYPENAME) -------------------------^ If parenthesis in noptr-declarator (e.g., 'void (*f(int arg))(double)'): Error in declarator or parameters Invalid C declaration: Expecting "(" in parameters. [error at 39] DECLARE_INSTANCE_CHECKER ( InstanceType, OBJ_NAME, TYPENAME) ---------------------------------------^ Signed-off-by: Peter Maydell Reviewed-by: Daniel P. Berrangé Tested-by: Stefan Hajnoczi Message-id: 20201030174700.7204-2-peter.maydell@linaro.org --- scripts/kernel-doc | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 0ff62bb6a2..4fbaaa05e3 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -839,7 +839,23 @@ sub output_function_rst(%) { output_highlight_rst($args{'purpose'}); $start = "\n\n**Syntax**\n\n ``"; } else { - print ".. c:function:: "; + if ((split(/\./, $sphinx_version))[0] >= 3) { + # Sphinx 3 and later distinguish macros and functions and + # complain if you use c:function with something that's not + # syntactically valid as a function declaration. + # We assume that anything with a return type is a function + # and anything without is a macro. + if ($args{'functiontype'} ne "") { + print ".. c:function:: "; + } else { + print ".. c:macro:: "; + } + } else { + # Older Sphinx don't support documenting macros that take + # arguments with c:macro, and don't complain about the use + # of c:function for this. + print ".. c:function:: "; + } } if ($args{'functiontype'} ne "") { $start .= $args{'functiontype'} . " " . $args{'function'} . " ("; -- cgit v1.2.3 From 5a4b0d411283c92c5a394aaa4b7c19f6568a54fd Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 2 Nov 2020 16:52:18 +0000 Subject: qemu-option-trace.rst.inc: Don't use option:: markup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sphinx 3.2 is pickier than earlier versions about the option:: markup, and complains about our usage in qemu-option-trace.rst: ../../docs/qemu-option-trace.rst.inc:4:Malformed option description '[enable=]PATTERN', should look like "opt", "-opt args", "--opt args", "/opt args" or "+opt args" In this file, we're really trying to document the different parts of the top-level --trace option, which qemu-nbd.rst and qemu-img.rst have already introduced with an option:: markup. So it's not right to use option:: here anyway. Switch to a different markup (definition lists) which gives about the same formatted output. (Unlike option::, this markup doesn't produce index entries; but at the moment we don't do anything much with indexes anyway, and in any case I think it doesn't make much sense to have individual index entries for the sub-parts of the --trace option.) Signed-off-by: Peter Maydell Reviewed-by: Daniel P. Berrangé Tested-by: Stefan Hajnoczi Message-id: 20201030174700.7204-3-peter.maydell@linaro.org --- docs/qemu-option-trace.rst.inc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/qemu-option-trace.rst.inc b/docs/qemu-option-trace.rst.inc index 7e09773a9c..d7acbe67f7 100644 --- a/docs/qemu-option-trace.rst.inc +++ b/docs/qemu-option-trace.rst.inc @@ -1,7 +1,7 @@ Specify tracing options. -.. option:: [enable=]PATTERN +``[enable=]PATTERN`` Immediately enable events matching *PATTERN* (either event name or a globbing pattern). This option is only @@ -11,7 +11,7 @@ Specify tracing options. Use :option:`-trace help` to print a list of names of trace points. -.. option:: events=FILE +``events=FILE`` Immediately enable events listed in *FILE*. The file must contain one event name (as listed in the ``trace-events-all`` @@ -19,7 +19,7 @@ Specify tracing options. available if QEMU has been compiled with the ``simple``, ``log`` or ``ftrace`` tracing backend. -.. option:: file=FILE +``file=FILE`` Log output traces to *FILE*. This option is only available if QEMU has been compiled with -- cgit v1.2.3 From ffb4fbf90a2f63c9cb33e4bb9f854c79bf04ca4a Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 2 Nov 2020 16:52:18 +0000 Subject: tests/qtest/npcm7xx_rng-test: Disable randomness tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The randomness tests in the NPCM7xx RNG test fail intermittently but fairly frequently. On my machine running the test in a loop: while QTEST_QEMU_BINARY=./qemu-system-aarch64 ./tests/qtest/npcm7xx_rng-test; do true; done will fail in less than a minute with an error like: ERROR:../../tests/qtest/npcm7xx_rng-test.c:256:test_first_byte_runs: assertion failed (calc_runs_p(buf.l, sizeof(buf) * BITS_PER_BYTE) > 0.01): (0.00286205989 > 0.01) (Failures have been observed on all 4 of the randomness tests, not just first_byte_runs.) It's not clear why these tests are failing like this, but intermittent failures make CI and merge testing awkward, so disable running them unless a developer specifically sets QEMU_TEST_FLAKY_RNG_TESTS when running the test suite, until we work out the cause. Signed-off-by: Peter Maydell Reviewed-by: Philippe Mathieu-Daudé Message-id: 20201102152454.8287-1-peter.maydell@linaro.org Reviewed-by: Havard Skinnemoen --- tests/qtest/npcm7xx_rng-test.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tests/qtest/npcm7xx_rng-test.c b/tests/qtest/npcm7xx_rng-test.c index da6e639bf6..e7cde85fbb 100644 --- a/tests/qtest/npcm7xx_rng-test.c +++ b/tests/qtest/npcm7xx_rng-test.c @@ -265,10 +265,16 @@ int main(int argc, char **argv) qtest_add_func("npcm7xx_rng/enable_disable", test_enable_disable); qtest_add_func("npcm7xx_rng/rosel", test_rosel); - qtest_add_func("npcm7xx_rng/continuous/monobit", test_continuous_monobit); - qtest_add_func("npcm7xx_rng/continuous/runs", test_continuous_runs); - qtest_add_func("npcm7xx_rng/first_byte/monobit", test_first_byte_monobit); - qtest_add_func("npcm7xx_rng/first_byte/runs", test_first_byte_runs); + /* + * These tests fail intermittently; only run them on explicit + * request until we figure out why. + */ + if (getenv("QEMU_TEST_FLAKY_RNG_TESTS")) { + qtest_add_func("npcm7xx_rng/continuous/monobit", test_continuous_monobit); + qtest_add_func("npcm7xx_rng/continuous/runs", test_continuous_runs); + qtest_add_func("npcm7xx_rng/first_byte/monobit", test_first_byte_monobit); + qtest_add_func("npcm7xx_rng/first_byte/runs", test_first_byte_runs); + } qtest_start("-machine npcm750-evb"); ret = g_test_run(); -- cgit v1.2.3