aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWill Newton <will.newton@linaro.org>2014-01-31 14:47:35 +0000
committerPeter Maydell <peter.maydell@linaro.org>2014-01-31 14:47:35 +0000
commitc9975a838736c9479b2e17fff7e50a2a83b1009e (patch)
tree19effbf55bc74b144270d59d81d0d3fa03461916
parent34f7b0a276ed2679c5e420ed1bc6525689ac5abd (diff)
target-arm: Add AArch32 FP VCVTA, VCVTN, VCVTP and VCVTM
Add support for the AArch32 floating-point VCVTA, VCVTN, VCVTP and VCVTM instructions. Signed-off-by: Will Newton <will.newton@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--target-arm/translate.c61
1 files changed, 61 insertions, 0 deletions
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 5a8ca240c2..0fcc159350 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -2797,6 +2797,63 @@ static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
return 0;
}
+static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
+ int rounding)
+{
+ bool is_signed = extract32(insn, 7, 1);
+ TCGv_ptr fpst = get_fpstatus_ptr(0);
+ TCGv_i32 tcg_rmode, tcg_shift;
+
+ tcg_shift = tcg_const_i32(0);
+
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+
+ if (dp) {
+ TCGv_i64 tcg_double, tcg_res;
+ TCGv_i32 tcg_tmp;
+ /* Rd is encoded as a single precision register even when the source
+ * is double precision.
+ */
+ rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
+ tcg_double = tcg_temp_new_i64();
+ tcg_res = tcg_temp_new_i64();
+ tcg_tmp = tcg_temp_new_i32();
+ tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
+ if (is_signed) {
+ gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
+ } else {
+ gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
+ }
+ tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
+ tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
+ tcg_temp_free_i32(tcg_tmp);
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_double);
+ } else {
+ TCGv_i32 tcg_single, tcg_res;
+ tcg_single = tcg_temp_new_i32();
+ tcg_res = tcg_temp_new_i32();
+ tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
+ if (is_signed) {
+ gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
+ } else {
+ gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
+ }
+ tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_single);
+ }
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+
+ tcg_temp_free_i32(tcg_shift);
+
+ tcg_temp_free_ptr(fpst);
+
+ return 0;
+}
/* Table for converting the most common AArch32 encoding of
* rounding mode to arm_fprounding order (which matches the
@@ -2835,6 +2892,10 @@ static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
/* VRINTA, VRINTN, VRINTP, VRINTM */
int rounding = fp_decode_rm[extract32(insn, 16, 2)];
return handle_vrint(insn, rd, rm, dp, rounding);
+ } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
+ /* VCVTA, VCVTN, VCVTP, VCVTM */
+ int rounding = fp_decode_rm[extract32(insn, 16, 2)];
+ return handle_vcvt(insn, rd, rm, dp, rounding);
}
return 1;
}