diff options
Diffstat (limited to 'target-arm')
-rw-r--r-- | target-arm/helper-a64.c | 23 | ||||
-rw-r--r-- | target-arm/helper-a64.h | 1 | ||||
-rw-r--r-- | target-arm/translate-a64.c | 20 |
3 files changed, 43 insertions, 1 deletions
diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c index cea2468b30..ec0258295f 100644 --- a/target-arm/helper-a64.c +++ b/target-arm/helper-a64.c @@ -413,3 +413,26 @@ float64 HELPER(frecpx_f64)(float64 a, void *fpstp) return make_float64(sbit | (~exp & 0x7ffULL) << 52); } } + +float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env) +{ + /* Von Neumann rounding is implemented by using round-to-zero + * and then setting the LSB of the result if Inexact was raised. + */ + float32 r; + float_status *fpst = &env->vfp.fp_status; + float_status tstat = *fpst; + int exflags; + + set_float_rounding_mode(float_round_to_zero, &tstat); + set_float_exception_flags(0, &tstat); + r = float64_to_float32(a, &tstat); + r = float32_maybe_silence_nan(r); + exflags = get_float_exception_flags(&tstat); + if (exflags & float_flag_inexact) { + r = make_float32(float32_val(r) | 1); + } + exflags |= get_float_exception_flags(fpst); + set_float_exception_flags(exflags, fpst); + return r; +} diff --git a/target-arm/helper-a64.h b/target-arm/helper-a64.h index 8cbc3492d4..3f05bedcca 100644 --- a/target-arm/helper-a64.h +++ b/target-arm/helper-a64.h @@ -45,3 +45,4 @@ DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_FLAGS_1(neon_addlp_u16, TCG_CALL_NO_RWG_SE, i64, i64) DEF_HELPER_FLAGS_2(frecpx_f64, TCG_CALL_NO_RWG, f64, f64, ptr) DEF_HELPER_FLAGS_2(frecpx_f32, TCG_CALL_NO_RWG, f32, f32, ptr) +DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, env) diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c index 5f4c6bf22f..235f880589 100644 --- a/target-arm/translate-a64.c +++ b/target-arm/translate-a64.c @@ -7278,6 +7278,13 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar, tcg_temp_free_i32(tcg_hi); } break; + case 0x56: /* FCVTXN, FCVTXN2 */ + /* 64 bit to 32 bit float conversion + * with von Neumann rounding (round to odd) + */ + assert(size == 2); + gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env); + break; default: g_assert_not_reached(); } @@ -7391,6 +7398,12 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) rmode = FPROUNDING_TIEAWAY; break; case 0x56: /* FCVTXN, FCVTXN2 */ + if (size == 2) { + unallocated_encoding(s); + return; + } + handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd); + return; case 0x7d: /* FRSQRTE */ unsupported_encoding(s, insn); return; @@ -9244,6 +9257,12 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) case 0x3d: /* FRECPE */ handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd); return; + case 0x56: /* FCVTXN, FCVTXN2 */ + if (size == 2) { + unallocated_encoding(s); + return; + } + /* fall through */ case 0x16: /* FCVTN, FCVTN2 */ /* handle_2misc_narrow does a 2*size -> size operation, but these * instructions encode the source size rather than dest size. @@ -9277,7 +9296,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) return; } break; - case 0x56: /* FCVTXN, FCVTXN2 */ case 0x7c: /* URSQRTE */ case 0x7d: /* FRSQRTE */ unsupported_encoding(s, insn); |