aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--target/arm/cpu.h5
-rw-r--r--target/arm/translate-vfp.inc.c205
-rw-r--r--target/arm/translate.c14
-rw-r--r--target/arm/vfp.decode6
4 files changed, 224 insertions, 6 deletions
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index dc50c86e31..9229862421 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -3377,6 +3377,11 @@ static inline bool isar_feature_aa32_fp_d32(const ARMISARegisters *id)
return FIELD_EX64(id->mvfr0, MVFR0, SIMDREG) >= 2;
}
+static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->mvfr0, MVFR0, FPSHVEC) > 0;
+}
+
/*
* We always set the FP and SIMD FP16 fields to indicate identical
* levels of support (assuming SIMD is implemented at all), so
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
index 9729946d73..4f922dc840 100644
--- a/target/arm/translate-vfp.inc.c
+++ b/target/arm/translate-vfp.inc.c
@@ -1098,3 +1098,208 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
return true;
}
+
+/*
+ * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
+ * The callback should emit code to write a value to vd. If
+ * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
+ * will contain the old value of the relevant VFP register;
+ * otherwise it must be written to only.
+ */
+typedef void VFPGen3OpSPFn(TCGv_i32 vd,
+ TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst);
+typedef void VFPGen3OpDPFn(TCGv_i64 vd,
+ TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst);
+
+/*
+ * Perform a 3-operand VFP data processing instruction. fn is the
+ * callback to do the actual operation; this function deals with the
+ * code to handle looping around for VFP vector processing.
+ */
+static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
+ int vd, int vn, int vm, bool reads_vd)
+{
+ uint32_t delta_m = 0;
+ uint32_t delta_d = 0;
+ uint32_t bank_mask = 0;
+ int veclen = s->vec_len;
+ TCGv_i32 f0, f1, fd;
+ TCGv_ptr fpst;
+
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
+ (veclen != 0 || s->vec_stride != 0)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (veclen > 0) {
+ bank_mask = 0x18;
+
+ /* Figure out what type of vector operation this is. */
+ if ((vd & bank_mask) == 0) {
+ /* scalar */
+ veclen = 0;
+ } else {
+ delta_d = s->vec_stride + 1;
+
+ if ((vm & bank_mask) == 0) {
+ /* mixed scalar/vector */
+ delta_m = 0;
+ } else {
+ /* vector */
+ delta_m = delta_d;
+ }
+ }
+ }
+
+ f0 = tcg_temp_new_i32();
+ f1 = tcg_temp_new_i32();
+ fd = tcg_temp_new_i32();
+ fpst = get_fpstatus_ptr(0);
+
+ neon_load_reg32(f0, vn);
+ neon_load_reg32(f1, vm);
+
+ for (;;) {
+ if (reads_vd) {
+ neon_load_reg32(fd, vd);
+ }
+ fn(fd, f0, f1, fpst);
+ neon_store_reg32(fd, vd);
+
+ if (veclen == 0) {
+ break;
+ }
+
+ /* Set up the operands for the next iteration */
+ veclen--;
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
+ vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask);
+ neon_load_reg32(f0, vn);
+ if (delta_m) {
+ vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
+ neon_load_reg32(f1, vm);
+ }
+ }
+
+ tcg_temp_free_i32(f0);
+ tcg_temp_free_i32(f1);
+ tcg_temp_free_i32(fd);
+ tcg_temp_free_ptr(fpst);
+
+ return true;
+}
+
+static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
+ int vd, int vn, int vm, bool reads_vd)
+{
+ uint32_t delta_m = 0;
+ uint32_t delta_d = 0;
+ uint32_t bank_mask = 0;
+ int veclen = s->vec_len;
+ TCGv_i64 f0, f1, fd;
+ TCGv_ptr fpst;
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vn | vm) & 0x10)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
+ (veclen != 0 || s->vec_stride != 0)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (veclen > 0) {
+ bank_mask = 0xc;
+
+ /* Figure out what type of vector operation this is. */
+ if ((vd & bank_mask) == 0) {
+ /* scalar */
+ veclen = 0;
+ } else {
+ delta_d = (s->vec_stride >> 1) + 1;
+
+ if ((vm & bank_mask) == 0) {
+ /* mixed scalar/vector */
+ delta_m = 0;
+ } else {
+ /* vector */
+ delta_m = delta_d;
+ }
+ }
+ }
+
+ f0 = tcg_temp_new_i64();
+ f1 = tcg_temp_new_i64();
+ fd = tcg_temp_new_i64();
+ fpst = get_fpstatus_ptr(0);
+
+ neon_load_reg64(f0, vn);
+ neon_load_reg64(f1, vm);
+
+ for (;;) {
+ if (reads_vd) {
+ neon_load_reg64(fd, vd);
+ }
+ fn(fd, f0, f1, fpst);
+ neon_store_reg64(fd, vd);
+
+ if (veclen == 0) {
+ break;
+ }
+ /* Set up the operands for the next iteration */
+ veclen--;
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
+ vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask);
+ neon_load_reg64(f0, vn);
+ if (delta_m) {
+ vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
+ neon_load_reg64(f1, vm);
+ }
+ }
+
+ tcg_temp_free_i64(f0);
+ tcg_temp_free_i64(f1);
+ tcg_temp_free_i64(fd);
+ tcg_temp_free_ptr(fpst);
+
+ return true;
+}
+
+static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
+{
+ /* Note that order of inputs to the add matters for NaNs */
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ gen_helper_vfp_muls(tmp, vn, vm, fpst);
+ gen_helper_vfp_adds(vd, vd, tmp, fpst);
+ tcg_temp_free_i32(tmp);
+}
+
+static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a)
+{
+ return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true);
+}
+
+static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
+{
+ /* Note that order of inputs to the add matters for NaNs */
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ gen_helper_vfp_muld(tmp, vn, vm, fpst);
+ gen_helper_vfp_addd(vd, vd, tmp, fpst);
+ tcg_temp_free_i64(tmp);
+}
+
+static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_sp *a)
+{
+ return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
+}
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 4a462fe094..2215e83c86 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -3133,6 +3133,14 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
rn = VFP_SREG_N(insn);
+ switch (op) {
+ case 0:
+ /* Already handled by decodetree */
+ return 1;
+ default:
+ break;
+ }
+
if (op == 15) {
/* rn is opcode, encoded as per VFP_SREG_N. */
switch (rn) {
@@ -3312,12 +3320,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
for (;;) {
/* Perform the calculation. */
switch (op) {
- case 0: /* VMLA: fd + (fn * fm) */
- /* Note that order of inputs to the add matters for NaNs */
- gen_vfp_F1_mul(dp);
- gen_mov_F0_vreg(dp, rd);
- gen_vfp_add(dp);
- break;
case 1: /* VMLS: fd + -(fn * fm) */
gen_vfp_mul(dp);
gen_vfp_F1_neg(dp);
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
index 68c9ffcfd3..9530e17ae0 100644
--- a/target/arm/vfp.decode
+++ b/target/arm/vfp.decode
@@ -96,3 +96,9 @@ VLDM_VSTM_sp ---- 1101 0.1 l:1 rn:4 .... 1010 imm:8 \
vd=%vd_sp p=1 u=0 w=1
VLDM_VSTM_dp ---- 1101 0.1 l:1 rn:4 .... 1011 imm:8 \
vd=%vd_dp p=1 u=0 w=1
+
+# 3-register VFP data-processing; bits [23,21:20,6] identify the operation.
+VMLA_sp ---- 1110 0.00 .... .... 1010 .0.0 .... \
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp
+VMLA_dp ---- 1110 0.00 .... .... 1011 .0.0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp