diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2019-06-11 16:39:51 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2019-06-13 15:14:06 +0100 |
commit | b623d803dda805f07aadcbf098961fde27315c19 (patch) | |
tree | 0d265674ab44439f5b0a48acf0af224201e1b596 /target/arm | |
parent | 386bba2368842fc74388a3c1651c6c0c0c70adbd (diff) |
target/arm: Convert the VCVT-from-f16 insns to decodetree
Convert the VCVTT, VCVTB instructions that deal with conversion
from half-precision floats to f32 or 64 to decodetree.
Since we're no longer constrained to the old decoder's style
using cpu_F0s and cpu_F0d we can perform a direct 16 bit
load of the right half of the input single-precision register
rather than loading the full 32 bits and then doing a
separate shift or sign-extension.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'target/arm')
-rw-r--r-- | target/arm/translate-vfp.inc.c | 82 | ||||
-rw-r--r-- | target/arm/translate.c | 56 | ||||
-rw-r--r-- | target/arm/vfp.decode | 6 |
3 files changed, 89 insertions, 55 deletions
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index ebde86210a..732bf6020a 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -31,6 +31,26 @@ #include "decode-vfp-uncond.inc.c" /* + * Return the offset of a 16-bit half of the specified VFP single-precision + * register. If top is true, returns the top 16 bits; otherwise the bottom + * 16 bits. + */ +static inline long vfp_f16_offset(unsigned reg, bool top) +{ + long offs = vfp_reg_offset(false, reg); +#ifdef HOST_WORDS_BIGENDIAN + if (!top) { + offs += 2; + } +#else + if (top) { + offs += 2; + } +#endif + return offs; +} + +/* * Check that VFP access is enabled. If it is, do the necessary * M-profile lazy-FP handling and then return true. * If not, emit code to generate an appropriate exception and @@ -2013,3 +2033,65 @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a) return true; } + +static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a) +{ + TCGv_ptr fpst; + TCGv_i32 ahp_mode; + TCGv_i32 tmp; + + if (!dc_isar_feature(aa32_fp16_spconv, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(false); + ahp_mode = get_ahp_flag(); + tmp = tcg_temp_new_i32(); + /* The T bit tells us if we want the low or high 16 bits of Vm */ + tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t)); + gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode); + neon_store_reg32(tmp, a->vd); + tcg_temp_free_i32(ahp_mode); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i32(tmp); + return true; +} + +static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a) +{ + TCGv_ptr fpst; + TCGv_i32 ahp_mode; + TCGv_i32 tmp; + TCGv_i64 vd; + + if (!dc_isar_feature(aa32_fp16_dpconv, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(false); + ahp_mode = get_ahp_flag(); + tmp = tcg_temp_new_i32(); + /* The T bit tells us if we want the low or high 16 bits of Vm */ + tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t)); + vd = tcg_temp_new_i64(); + gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode); + neon_store_reg64(vd, a->vd); + tcg_temp_free_i32(ahp_mode); + tcg_temp_free_ptr(fpst); + tcg_temp_free_i32(tmp); + tcg_temp_free_i64(vd); + return true; +} diff --git a/target/arm/translate.c b/target/arm/translate.c index eb16269021..f22b8ddf68 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3066,7 +3066,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) return 1; case 15: switch (rn) { - case 0 ... 3: + case 0 ... 5: case 8 ... 11: /* Already handled by decodetree */ return 1; @@ -3080,24 +3080,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) if (op == 15) { /* rn is opcode, encoded as per VFP_SREG_N. */ switch (rn) { - case 0x04: /* vcvtb.f64.f16, vcvtb.f32.f16 */ - case 0x05: /* vcvtt.f64.f16, vcvtt.f32.f16 */ - /* - * VCVTB, VCVTT: only present with the halfprec extension - * UNPREDICTABLE if bit 8 is set prior to ARMv8 - * (we choose to UNDEF) - */ - if (dp) { - if (!dc_isar_feature(aa32_fp16_dpconv, s)) { - return 1; - } - } else { - if (!dc_isar_feature(aa32_fp16_spconv, s)) { - return 1; - } - } - rm_is_dp = false; - break; case 0x06: /* vcvtb.f16.f32, vcvtb.f16.f64 */ case 0x07: /* vcvtt.f16.f32, vcvtt.f16.f64 */ if (dp) { @@ -3239,42 +3221,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) switch (op) { case 15: /* extension space */ switch (rn) { - case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */ - { - TCGv_ptr fpst = get_fpstatus_ptr(false); - TCGv_i32 ahp_mode = get_ahp_flag(); - tmp = gen_vfp_mrs(); - tcg_gen_ext16u_i32(tmp, tmp); - if (dp) { - gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp, - fpst, ahp_mode); - } else { - gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, - fpst, ahp_mode); - } - tcg_temp_free_i32(ahp_mode); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); - break; - } - case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */ - { - TCGv_ptr fpst = get_fpstatus_ptr(false); - TCGv_i32 ahp = get_ahp_flag(); - tmp = gen_vfp_mrs(); - tcg_gen_shri_i32(tmp, tmp, 16); - if (dp) { - gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp, - fpst, ahp); - } else { - gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, - fpst, ahp); - } - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(ahp); - tcg_temp_free_ptr(fpst); - break; - } case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */ { TCGv_ptr fpst = get_fpstatus_ptr(false); diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode index 9db7aa7021..53d9544f7c 100644 --- a/target/arm/vfp.decode +++ b/target/arm/vfp.decode @@ -181,3 +181,9 @@ VCMP_sp ---- 1110 1.11 010 z:1 .... 1010 e:1 1.0 .... \ vd=%vd_sp vm=%vm_sp VCMP_dp ---- 1110 1.11 010 z:1 .... 1011 e:1 1.0 .... \ vd=%vd_dp vm=%vm_dp + +# VCVTT and VCVTB from f16: Vd format depends on size bit; Vm is always vm_sp +VCVT_f32_f16 ---- 1110 1.11 0010 .... 1010 t:1 1.0 .... \ + vd=%vd_sp vm=%vm_sp +VCVT_f64_f16 ---- 1110 1.11 0010 .... 1011 t:1 1.0 .... \ + vd=%vd_dp vm=%vm_sp |