aboutsummaryrefslogtreecommitdiff
path: root/target-ppc
diff options
context:
space:
mode:
Diffstat (limited to 'target-ppc')
-rw-r--r--target-ppc/helper.h2
-rw-r--r--target-ppc/op_helper.c37
-rw-r--r--target-ppc/translate.c1
3 files changed, 40 insertions, 0 deletions
diff --git a/target-ppc/helper.h b/target-ppc/helper.h
index 43d1867d2a..af6c8396c0 100644
--- a/target-ppc/helper.h
+++ b/target-ppc/helper.h
@@ -236,6 +236,8 @@ DEF_HELPER_3(vaddfp, void, avr, avr, avr)
DEF_HELPER_3(vsubfp, void, avr, avr, avr)
DEF_HELPER_3(vmaxfp, void, avr, avr, avr)
DEF_HELPER_3(vminfp, void, avr, avr, avr)
+DEF_HELPER_4(vmaddfp, void, avr, avr, avr, avr)
+DEF_HELPER_4(vnmsubfp, void, avr, avr, avr, avr)
DEF_HELPER_2(vlogefp, void, avr, avr)
DEF_HELPER_2(vrfim, void, avr, avr)
DEF_HELPER_2(vrfin, void, avr, avr)
diff --git a/target-ppc/op_helper.c b/target-ppc/op_helper.c
index 6cf47a3576..f59cd15cf6 100644
--- a/target-ppc/op_helper.c
+++ b/target-ppc/op_helper.c
@@ -2220,6 +2220,24 @@ VCMP(gtsw, >, s32)
#undef VCMP_DO
#undef VCMP
+void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) {
+ HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
+ /* Need to do the computation in higher precision and round
+ * once at the end. */
+ float64 af, bf, cf, t;
+ af = float32_to_float64(a->f[i], &env->vec_status);
+ bf = float32_to_float64(b->f[i], &env->vec_status);
+ cf = float32_to_float64(c->f[i], &env->vec_status);
+ t = float64_mul(af, cf, &env->vec_status);
+ t = float64_add(t, bf, &env->vec_status);
+ r->f[i] = float64_to_float32(t, &env->vec_status);
+ }
+ }
+}
+
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
int sat = 0;
@@ -2456,6 +2474,25 @@ VMUL(uh, u16, u32)
#undef VMUL_DO
#undef VMUL
+void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) {
+ HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
+ /* Need to do the computation is higher precision and round
+ * once at the end. */
+ float64 af, bf, cf, t;
+ af = float32_to_float64(a->f[i], &env->vec_status);
+ bf = float32_to_float64(b->f[i], &env->vec_status);
+ cf = float32_to_float64(c->f[i], &env->vec_status);
+ t = float64_mul(af, cf, &env->vec_status);
+ t = float64_sub(t, bf, &env->vec_status);
+ t = float64_chs(t);
+ r->f[i] = float64_to_float32(t, &env->vec_status);
+ }
+ }
+}
+
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
ppc_avr_t result;
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index af83b198ef..88426af946 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -6584,6 +6584,7 @@ GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18)
GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19)
GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20)
GEN_VAFORM_PAIRED(vsel, vperm, 21)
+GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
/*** SPE extension ***/
/* Register moves */