aboutsummaryrefslogtreecommitdiff
path: root/target-ppc/int_helper.c
diff options
context:
space:
mode:
authorAurelien Jarno <aurelien@aurel32.net>2012-09-11 08:47:12 +0000
committerAlexander Graf <agraf@suse.de>2012-10-04 15:54:17 +0200
commit2f93c23fe71420e5095f2fae1877fe747ad9f876 (patch)
tree2a65026bea93e2b6d356aeae7bbe3ef97cf334ce /target-ppc/int_helper.c
parentdb1babb8dbb6d18433a51f1b4c3d186ea7057a6f (diff)
target-ppc: use the softfloat float32_muladd function
Use the new softfloat float32_muladd() function to implement the vmaddfp and vnmsubfp instructions. As a bonus we can get rid of the call to the HANDLE_NAN3 macro, as the NaN handling is directly done at the softfloat level. Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'target-ppc/int_helper.c')
-rw-r--r--target-ppc/int_helper.c57
1 files changed, 14 insertions, 43 deletions
diff --git a/target-ppc/int_helper.c b/target-ppc/int_helper.c
index 61412433d0..6f9beffb2a 100644
--- a/target-ppc/int_helper.c
+++ b/target-ppc/int_helper.c
@@ -418,6 +418,20 @@ VARITHFP(minfp, float32_min)
VARITHFP(maxfp, float32_max)
#undef VARITHFP
+#define VARITHFPFMA(suffix, type) \
+ void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
+ ppc_avr_t *b, ppc_avr_t *c) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
+ r->f[i] = float32_muladd(a->f[i], c->f[i], b->f[i], \
+ type, &env->vec_status); \
+ } \
+ }
+VARITHFPFMA(maddfp, 0);
+VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c);
+#undef VARITHFPFMA
+
#define VARITHSAT_CASE(type, op, cvt, element) \
{ \
type result = (type)a->element[i] op (type)b->element[i]; \
@@ -649,27 +663,6 @@ VCT(uxs, cvtsduw, u32)
VCT(sxs, cvtsdsw, s32)
#undef VCT
-void helper_vmaddfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
- ppc_avr_t *c)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(r->f); i++) {
- HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
- /* Need to do the computation in higher precision and round
- * once at the end. */
- float64 af, bf, cf, t;
-
- af = float32_to_float64(a->f[i], &env->vec_status);
- bf = float32_to_float64(b->f[i], &env->vec_status);
- cf = float32_to_float64(c->f[i], &env->vec_status);
- t = float64_mul(af, cf, &env->vec_status);
- t = float64_add(t, bf, &env->vec_status);
- r->f[i] = float64_to_float32(t, &env->vec_status);
- }
- }
-}
-
void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
ppc_avr_t *b, ppc_avr_t *c)
{
@@ -909,28 +902,6 @@ VMUL(uh, u16, u32)
#undef VMUL_DO
#undef VMUL
-void helper_vnmsubfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
- ppc_avr_t *b, ppc_avr_t *c)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(r->f); i++) {
- HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
- /* Need to do the computation is higher precision and round
- * once at the end. */
- float64 af, bf, cf, t;
-
- af = float32_to_float64(a->f[i], &env->vec_status);
- bf = float32_to_float64(b->f[i], &env->vec_status);
- cf = float32_to_float64(c->f[i], &env->vec_status);
- t = float64_mul(af, cf, &env->vec_status);
- t = float64_sub(t, bf, &env->vec_status);
- t = float64_chs(t);
- r->f[i] = float64_to_float32(t, &env->vec_status);
- }
- }
-}
-
void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
ppc_avr_t *c)
{