aboutsummaryrefslogtreecommitdiff
path: root/target-ppc
diff options
context:
space:
mode:
Diffstat (limited to 'target-ppc')
-rw-r--r--target-ppc/cpu.h1
-rw-r--r--target-ppc/exec.h1
-rw-r--r--target-ppc/helper.h42
-rw-r--r--target-ppc/op.c280
-rw-r--r--target-ppc/op_helper.c705
-rw-r--r--target-ppc/translate.c157
6 files changed, 550 insertions, 636 deletions
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index 35c824aaa1..b0265937c5 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -571,7 +571,6 @@ struct CPUPPCState {
/* temporary float registers */
float64 ft0;
float64 ft1;
- float64 ft2;
float_status fp_status;
/* floating point registers */
float64 fpr[32];
diff --git a/target-ppc/exec.h b/target-ppc/exec.h
index 2b1cfe2586..d3df2f2da7 100644
--- a/target-ppc/exec.h
+++ b/target-ppc/exec.h
@@ -61,7 +61,6 @@ register target_ulong T2 asm(AREG3);
#define FT0 (env->ft0)
#define FT1 (env->ft1)
-#define FT2 (env->ft2)
#if defined (DEBUG_OP)
# define RETURN() __asm__ __volatile__("nop" : : : "memory");
diff --git a/target-ppc/helper.h b/target-ppc/helper.h
index c58fb444d3..9620e0d239 100644
--- a/target-ppc/helper.h
+++ b/target-ppc/helper.h
@@ -1,7 +1,7 @@
#include "def-helper.h"
-DEF_HELPER_0(fcmpo, i32)
-DEF_HELPER_0(fcmpu, i32)
+DEF_HELPER_2(fcmpo, i32, i64, i64)
+DEF_HELPER_2(fcmpu, i32, i64, i64)
DEF_HELPER_0(load_cr, tl)
DEF_HELPER_2(store_cr, void, tl, i32)
@@ -25,4 +25,42 @@ DEF_HELPER_1(cntlsw32, i32, i32)
DEF_HELPER_1(cntlzw32, i32, i32)
DEF_HELPER_2(brinc, tl, tl, tl)
+DEF_HELPER_0(float_check_status, void)
+#ifdef CONFIG_SOFTFLOAT
+DEF_HELPER_0(reset_fpstatus, void)
+#endif
+DEF_HELPER_2(compute_fprf, i32, i64, i32)
+DEF_HELPER_2(store_fpscr, void, i64, i32)
+DEF_HELPER_1(fpscr_setbit, void, i32)
+
+DEF_HELPER_1(fctiw, i64, i64)
+DEF_HELPER_1(fctiwz, i64, i64)
+#if defined(TARGET_PPC64)
+DEF_HELPER_1(fcfid, i64, i64)
+DEF_HELPER_1(fctid, i64, i64)
+DEF_HELPER_1(fctidz, i64, i64)
+#endif
+DEF_HELPER_1(frsp, i64, i64)
+DEF_HELPER_1(frin, i64, i64)
+DEF_HELPER_1(friz, i64, i64)
+DEF_HELPER_1(frip, i64, i64)
+DEF_HELPER_1(frim, i64, i64)
+
+DEF_HELPER_2(fadd, i64, i64, i64)
+DEF_HELPER_2(fsub, i64, i64, i64)
+DEF_HELPER_2(fmul, i64, i64, i64)
+DEF_HELPER_2(fdiv, i64, i64, i64)
+DEF_HELPER_3(fmadd, i64, i64, i64, i64)
+DEF_HELPER_3(fmsub, i64, i64, i64, i64)
+DEF_HELPER_3(fnmadd, i64, i64, i64, i64)
+DEF_HELPER_3(fnmsub, i64, i64, i64, i64)
+DEF_HELPER_1(fabs, i64, i64)
+DEF_HELPER_1(fnabs, i64, i64)
+DEF_HELPER_1(fneg, i64, i64)
+DEF_HELPER_1(fsqrt, i64, i64);
+DEF_HELPER_1(fre, i64, i64);
+DEF_HELPER_1(fres, i64, i64);
+DEF_HELPER_1(frsqrte, i64, i64);
+DEF_HELPER_3(fsel, i64, i64, i64, i64)
+
#include "def-helper.h"
diff --git a/target-ppc/op.c b/target-ppc/op.c
index 10a22eba5e..1ef5dbd3a1 100644
--- a/target-ppc/op.c
+++ b/target-ppc/op.c
@@ -261,71 +261,6 @@ void OPPROTO op_store_dbatl (void)
}
#endif /* !defined(CONFIG_USER_ONLY) */
-/* FPSCR */
-#ifdef CONFIG_SOFTFLOAT
-void OPPROTO op_reset_fpstatus (void)
-{
- env->fp_status.float_exception_flags = 0;
- RETURN();
-}
-#endif
-
-void OPPROTO op_compute_fprf (void)
-{
- do_compute_fprf(PARAM1);
- RETURN();
-}
-
-#ifdef CONFIG_SOFTFLOAT
-void OPPROTO op_float_check_status (void)
-{
- do_float_check_status();
- RETURN();
-}
-#else
-void OPPROTO op_float_check_status (void)
-{
- if (env->exception_index == POWERPC_EXCP_PROGRAM &&
- (env->error_code & POWERPC_EXCP_FP)) {
- /* Differred floating-point exception after target FPR update */
- if (msr_fe0 != 0 || msr_fe1 != 0)
- do_raise_exception_err(env->exception_index, env->error_code);
- }
- RETURN();
-}
-#endif
-
-void OPPROTO op_load_fpscr_FT0 (void)
-{
- /* The 32 MSB of the target fpr are undefined.
- * They'll be zero...
- */
- CPU_DoubleU u;
-
- u.l.upper = 0;
- u.l.lower = env->fpscr;
- FT0 = u.d;
- RETURN();
-}
-
-void OPPROTO op_fpscr_resetbit (void)
-{
- env->fpscr &= PARAM1;
- RETURN();
-}
-
-void OPPROTO op_fpscr_setbit (void)
-{
- do_fpscr_setbit(PARAM1);
- RETURN();
-}
-
-void OPPROTO op_store_fpscr (void)
-{
- do_store_fpscr(PARAM1);
- RETURN();
-}
-
/*** Integer shift ***/
void OPPROTO op_srli_T1 (void)
{
@@ -333,221 +268,6 @@ void OPPROTO op_srli_T1 (void)
RETURN();
}
-/*** Floating-Point arithmetic ***/
-/* fadd - fadd. */
-void OPPROTO op_fadd (void)
-{
-#if USE_PRECISE_EMULATION
- do_fadd();
-#else
- FT0 = float64_add(FT0, FT1, &env->fp_status);
-#endif
- RETURN();
-}
-
-/* fsub - fsub. */
-void OPPROTO op_fsub (void)
-{
-#if USE_PRECISE_EMULATION
- do_fsub();
-#else
- FT0 = float64_sub(FT0, FT1, &env->fp_status);
-#endif
- RETURN();
-}
-
-/* fmul - fmul. */
-void OPPROTO op_fmul (void)
-{
-#if USE_PRECISE_EMULATION
- do_fmul();
-#else
- FT0 = float64_mul(FT0, FT1, &env->fp_status);
-#endif
- RETURN();
-}
-
-/* fdiv - fdiv. */
-void OPPROTO op_fdiv (void)
-{
-#if USE_PRECISE_EMULATION
- do_fdiv();
-#else
- FT0 = float64_div(FT0, FT1, &env->fp_status);
-#endif
- RETURN();
-}
-
-/* fsqrt - fsqrt. */
-void OPPROTO op_fsqrt (void)
-{
- do_fsqrt();
- RETURN();
-}
-
-/* fre - fre. */
-void OPPROTO op_fre (void)
-{
- do_fre();
- RETURN();
-}
-
-/* fres - fres. */
-void OPPROTO op_fres (void)
-{
- do_fres();
- RETURN();
-}
-
-/* frsqrte - frsqrte. */
-void OPPROTO op_frsqrte (void)
-{
- do_frsqrte();
- RETURN();
-}
-
-/* fsel - fsel. */
-void OPPROTO op_fsel (void)
-{
- do_fsel();
- RETURN();
-}
-
-/*** Floating-Point multiply-and-add ***/
-/* fmadd - fmadd. */
-void OPPROTO op_fmadd (void)
-{
-#if USE_PRECISE_EMULATION
- do_fmadd();
-#else
- FT0 = float64_mul(FT0, FT1, &env->fp_status);
- FT0 = float64_add(FT0, FT2, &env->fp_status);
-#endif
- RETURN();
-}
-
-/* fmsub - fmsub. */
-void OPPROTO op_fmsub (void)
-{
-#if USE_PRECISE_EMULATION
- do_fmsub();
-#else
- FT0 = float64_mul(FT0, FT1, &env->fp_status);
- FT0 = float64_sub(FT0, FT2, &env->fp_status);
-#endif
- RETURN();
-}
-
-/* fnmadd - fnmadd. - fnmadds - fnmadds. */
-void OPPROTO op_fnmadd (void)
-{
- do_fnmadd();
- RETURN();
-}
-
-/* fnmsub - fnmsub. */
-void OPPROTO op_fnmsub (void)
-{
- do_fnmsub();
- RETURN();
-}
-
-/*** Floating-Point round & convert ***/
-/* frsp - frsp. */
-void OPPROTO op_frsp (void)
-{
-#if USE_PRECISE_EMULATION
- do_frsp();
-#else
- FT0 = float64_to_float32(FT0, &env->fp_status);
-#endif
- RETURN();
-}
-
-/* fctiw - fctiw. */
-void OPPROTO op_fctiw (void)
-{
- do_fctiw();
- RETURN();
-}
-
-/* fctiwz - fctiwz. */
-void OPPROTO op_fctiwz (void)
-{
- do_fctiwz();
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-/* fcfid - fcfid. */
-void OPPROTO op_fcfid (void)
-{
- do_fcfid();
- RETURN();
-}
-
-/* fctid - fctid. */
-void OPPROTO op_fctid (void)
-{
- do_fctid();
- RETURN();
-}
-
-/* fctidz - fctidz. */
-void OPPROTO op_fctidz (void)
-{
- do_fctidz();
- RETURN();
-}
-#endif
-
-void OPPROTO op_frin (void)
-{
- do_frin();
- RETURN();
-}
-
-void OPPROTO op_friz (void)
-{
- do_friz();
- RETURN();
-}
-
-void OPPROTO op_frip (void)
-{
- do_frip();
- RETURN();
-}
-
-void OPPROTO op_frim (void)
-{
- do_frim();
- RETURN();
-}
-
-/*** Floating-point move ***/
-/* fabs */
-void OPPROTO op_fabs (void)
-{
- FT0 = float64_abs(FT0);
- RETURN();
-}
-
-/* fnabs */
-void OPPROTO op_fnabs (void)
-{
- FT0 = float64_abs(FT0);
- FT0 = float64_chs(FT0);
- RETURN();
-}
-
-/* fneg */
-void OPPROTO op_fneg (void)
-{
- FT0 = float64_chs(FT0);
- RETURN();
-}
-
/* Load and store */
#define MEMSUFFIX _raw
#include "op_helper.h"
diff --git a/target-ppc/op_helper.c b/target-ppc/op_helper.c
index b8f4b5eed6..daaceecf07 100644
--- a/target-ppc/op_helper.c
+++ b/target-ppc/op_helper.c
@@ -299,59 +299,62 @@ static always_inline int isnormal (float64 d)
}
#endif
-void do_compute_fprf (int set_fprf)
+uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
{
+ CPU_DoubleU farg;
int isneg;
-
- isneg = fpisneg(FT0);
- if (unlikely(float64_is_nan(FT0))) {
- if (float64_is_signaling_nan(FT0)) {
+ int ret;
+ farg.ll = arg;
+ isneg = fpisneg(farg.d);
+ if (unlikely(float64_is_nan(farg.d))) {
+ if (float64_is_signaling_nan(farg.d)) {
/* Signaling NaN: flags are undefined */
- T0 = 0x00;
+ ret = 0x00;
} else {
/* Quiet NaN */
- T0 = 0x11;
+ ret = 0x11;
}
- } else if (unlikely(isinfinity(FT0))) {
+ } else if (unlikely(isinfinity(farg.d))) {
/* +/- infinity */
if (isneg)
- T0 = 0x09;
+ ret = 0x09;
else
- T0 = 0x05;
+ ret = 0x05;
} else {
- if (iszero(FT0)) {
+ if (iszero(farg.d)) {
/* +/- zero */
if (isneg)
- T0 = 0x12;
+ ret = 0x12;
else
- T0 = 0x02;
+ ret = 0x02;
} else {
- if (isden(FT0)) {
+ if (isden(farg.d)) {
/* Denormalized numbers */
- T0 = 0x10;
+ ret = 0x10;
} else {
/* Normalized numbers */
- T0 = 0x00;
+ ret = 0x00;
}
if (isneg) {
- T0 |= 0x08;
+ ret |= 0x08;
} else {
- T0 |= 0x04;
+ ret |= 0x04;
}
}
}
if (set_fprf) {
/* We update FPSCR_FPRF */
env->fpscr &= ~(0x1F << FPSCR_FPRF);
- env->fpscr |= T0 << FPSCR_FPRF;
+ env->fpscr |= ret << FPSCR_FPRF;
}
/* We just need fpcc to update Rc1 */
- T0 &= 0xF;
+ return ret & 0xF;
}
/* Floating-point invalid operations exception */
-static always_inline void fload_invalid_op_excp (int op)
+static always_inline uint64_t fload_invalid_op_excp (int op)
{
+ uint64_t ret = 0;
int ve;
ve = fpscr_ve;
@@ -402,7 +405,7 @@ static always_inline void fload_invalid_op_excp (int op)
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
if (ve == 0) {
/* Set the result to quiet NaN */
- FT0 = UINT64_MAX;
+ ret = UINT64_MAX;
env->fpscr &= ~(0xF << FPSCR_FPCC);
env->fpscr |= 0x11 << FPSCR_FPCC;
}
@@ -413,7 +416,7 @@ static always_inline void fload_invalid_op_excp (int op)
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
if (ve == 0) {
/* Set the result to quiet NaN */
- FT0 = UINT64_MAX;
+ ret = UINT64_MAX;
env->fpscr &= ~(0xF << FPSCR_FPCC);
env->fpscr |= 0x11 << FPSCR_FPCC;
}
@@ -429,12 +432,11 @@ static always_inline void fload_invalid_op_excp (int op)
if (msr_fe0 != 0 || msr_fe1 != 0)
do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
}
+ return ret;
}
-static always_inline void float_zero_divide_excp (void)
+static always_inline uint64_t float_zero_divide_excp (uint64_t arg1, uint64_t arg2)
{
- CPU_DoubleU u0, u1;
-
env->fpscr |= 1 << FPSCR_ZX;
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
/* Update the floating-point exception summary */
@@ -448,12 +450,10 @@ static always_inline void float_zero_divide_excp (void)
}
} else {
/* Set the result to infinity */
- u0.d = FT0;
- u1.d = FT1;
- u0.ll = ((u0.ll ^ u1.ll) & 0x8000000000000000ULL);
- u0.ll |= 0x7FFULL << 52;
- FT0 = u0.d;
+ arg1 = ((arg1 ^ arg2) & 0x8000000000000000ULL);
+ arg1 |= 0x7FFULL << 52;
}
+ return arg1;
}
static always_inline void float_overflow_excp (void)
@@ -530,7 +530,7 @@ static always_inline void fpscr_set_rounding_mode (void)
set_float_rounding_mode(rnd_type, &env->fp_status);
}
-void do_fpscr_setbit (int bit)
+void helper_fpscr_setbit (uint32_t bit)
{
int prev;
@@ -645,25 +645,16 @@ void do_fpscr_setbit (int bit)
}
}
-#if defined(WORDS_BIGENDIAN)
-#define WORD0 0
-#define WORD1 1
-#else
-#define WORD0 1
-#define WORD1 0
-#endif
-void do_store_fpscr (uint32_t mask)
+void helper_store_fpscr (uint64_t arg, uint32_t mask)
{
/*
* We use only the 32 LSB of the incoming fpr
*/
- CPU_DoubleU u;
uint32_t prev, new;
int i;
- u.d = FT0;
prev = env->fpscr;
- new = u.l.lower;
+ new = (uint32_t)arg;
new &= ~0x90000000;
new |= prev & 0x90000000;
for (i = 0; i < 7; i++) {
@@ -687,12 +678,10 @@ void do_store_fpscr (uint32_t mask)
env->fpscr &= ~(1 << FPSCR_FEX);
fpscr_set_rounding_mode();
}
-#undef WORD0
-#undef WORD1
-#ifdef CONFIG_SOFTFLOAT
-void do_float_check_status (void)
+void helper_float_check_status (void)
{
+#ifdef CONFIG_SOFTFLOAT
if (env->exception_index == POWERPC_EXCP_PROGRAM &&
(env->error_code & POWERPC_EXCP_FP)) {
/* Differred floating-point exception after target FPR update */
@@ -705,455 +694,618 @@ void do_float_check_status (void)
} else if (env->fp_status.float_exception_flags & float_flag_inexact) {
float_inexact_excp();
}
+#else
+ if (env->exception_index == POWERPC_EXCP_PROGRAM &&
+ (env->error_code & POWERPC_EXCP_FP)) {
+ /* Differred floating-point exception after target FPR update */
+ if (msr_fe0 != 0 || msr_fe1 != 0)
+ do_raise_exception_err(env->exception_index, env->error_code);
+ }
+ RETURN();
+#endif
+}
+
+#ifdef CONFIG_SOFTFLOAT
+void helper_reset_fpstatus (void)
+{
+ env->fp_status.float_exception_flags = 0;
}
#endif
-#if USE_PRECISE_EMULATION
-void do_fadd (void)
+/* fadd - fadd. */
+uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
{
- if (unlikely(float64_is_signaling_nan(FT0) ||
- float64_is_signaling_nan(FT1))) {
+ CPU_DoubleU farg1, farg2;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+#if USE_PRECISE_EMULATION
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d))) {
/* sNaN addition */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
- } else if (likely(isfinite(FT0) || isfinite(FT1) ||
- fpisneg(FT0) == fpisneg(FT1))) {
- FT0 = float64_add(FT0, FT1, &env->fp_status);
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
+ fpisneg(farg1.d) == fpisneg(farg2.d))) {
+ farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
} else {
/* Magnitude subtraction of infinities */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
+ farg1.ll == fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
}
+#else
+ farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
+#endif
+ return farg1.ll;
}
-void do_fsub (void)
+/* fsub - fsub. */
+uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
{
- if (unlikely(float64_is_signaling_nan(FT0) ||
- float64_is_signaling_nan(FT1))) {
+ CPU_DoubleU farg1, farg2;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+#if USE_PRECISE_EMULATION
+{
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d))) {
/* sNaN subtraction */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
- } else if (likely(isfinite(FT0) || isfinite(FT1) ||
- fpisneg(FT0) != fpisneg(FT1))) {
- FT0 = float64_sub(FT0, FT1, &env->fp_status);
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
+ fpisneg(farg1.d) != fpisneg(farg2.d))) {
+ farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
} else {
/* Magnitude subtraction of infinities */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
}
}
+#else
+ farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
+#endif
+ return farg1.ll;
+}
-void do_fmul (void)
+/* fmul - fmul. */
+uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
{
- if (unlikely(float64_is_signaling_nan(FT0) ||
- float64_is_signaling_nan(FT1))) {
+ CPU_DoubleU farg1, farg2;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+#if USE_PRECISE_EMULATION
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d))) {
/* sNaN multiplication */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
- } else if (unlikely((isinfinity(FT0) && iszero(FT1)) ||
- (iszero(FT0) && isinfinity(FT1)))) {
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ } else if (unlikely((isinfinity(farg1.d) && iszero(farg2.d)) ||
+ (iszero(farg1.d) && isinfinity(farg2.d)))) {
/* Multiplication of zero by infinity */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
} else {
- FT0 = float64_mul(FT0, FT1, &env->fp_status);
+ farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
}
}
+#else
+ farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
+#endif
+ return farg1.ll;
+}
-void do_fdiv (void)
+/* fdiv - fdiv. */
+uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
{
- if (unlikely(float64_is_signaling_nan(FT0) ||
- float64_is_signaling_nan(FT1))) {
+ CPU_DoubleU farg1, farg2;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+#if USE_PRECISE_EMULATION
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d))) {
/* sNaN division */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
- } else if (unlikely(isinfinity(FT0) && isinfinity(FT1))) {
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ } else if (unlikely(isinfinity(farg1.d) && isinfinity(farg2.d))) {
/* Division of infinity by infinity */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
- } else if (unlikely(iszero(FT1))) {
- if (iszero(FT0)) {
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
+ } else if (unlikely(iszero(farg2.d))) {
+ if (iszero(farg1.d)) {
/* Division of zero by zero */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
+ farg1.ll fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
} else {
/* Division by zero */
- float_zero_divide_excp();
+ farg1.ll = float_zero_divide_excp(farg1.d, farg2.d);
}
} else {
- FT0 = float64_div(FT0, FT1, &env->fp_status);
+ farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
}
+#else
+ farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
+#endif
+ return farg1.ll;
}
-#endif /* USE_PRECISE_EMULATION */
-void do_fctiw (void)
+/* fabs */
+uint64_t helper_fabs (uint64_t arg)
{
- CPU_DoubleU p;
+ CPU_DoubleU farg;
- if (unlikely(float64_is_signaling_nan(FT0))) {
+ farg.ll = arg;
+ farg.d = float64_abs(farg.d);
+ return farg.ll;
+}
+
+/* fnabs */
+uint64_t helper_fnabs (uint64_t arg)
+{
+ CPU_DoubleU farg;
+
+ farg.ll = arg;
+ farg.d = float64_abs(farg.d);
+ farg.d = float64_chs(farg.d);
+ return farg.ll;
+}
+
+/* fneg */
+uint64_t helper_fneg (uint64_t arg)
+{
+ CPU_DoubleU farg;
+
+ farg.ll = arg;
+ farg.d = float64_chs(farg.d);
+ return farg.ll;
+}
+
+/* fctiw - fctiw. */
+uint64_t helper_fctiw (uint64_t arg)
+{
+ CPU_DoubleU farg;
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN conversion */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
- } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
+ } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
/* qNan / infinity conversion */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
} else {
- p.ll = float64_to_int32(FT0, &env->fp_status);
+ farg.ll = float64_to_int32(farg.d, &env->fp_status);
#if USE_PRECISE_EMULATION
/* XXX: higher bits are not supposed to be significant.
* to make tests easier, return the same as a real PowerPC 750
*/
- p.ll |= 0xFFF80000ULL << 32;
+ farg.ll |= 0xFFF80000ULL << 32;
#endif
- FT0 = p.d;
}
+ return farg.ll;
}
-void do_fctiwz (void)
+/* fctiwz - fctiwz. */
+uint64_t helper_fctiwz (uint64_t arg)
{
- CPU_DoubleU p;
+ CPU_DoubleU farg;
+ farg.ll = arg;
- if (unlikely(float64_is_signaling_nan(FT0))) {
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN conversion */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
- } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
+ } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
/* qNan / infinity conversion */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
} else {
- p.ll = float64_to_int32_round_to_zero(FT0, &env->fp_status);
+ farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
#if USE_PRECISE_EMULATION
/* XXX: higher bits are not supposed to be significant.
* to make tests easier, return the same as a real PowerPC 750
*/
- p.ll |= 0xFFF80000ULL << 32;
+ farg.ll |= 0xFFF80000ULL << 32;
#endif
- FT0 = p.d;
}
+ return farg.ll;
}
#if defined(TARGET_PPC64)
-void do_fcfid (void)
+/* fcfid - fcfid. */
+uint64_t helper_fcfid (uint64_t arg)
{
- CPU_DoubleU p;
-
- p.d = FT0;
- FT0 = int64_to_float64(p.ll, &env->fp_status);
+ CPU_DoubleU farg;
+ farg.d = int64_to_float64(arg, &env->fp_status);
+ return farg.ll;
}
-void do_fctid (void)
+/* fctid - fctid. */
+uint64_t helper_fctid (uint64_t arg)
{
- CPU_DoubleU p;
+ CPU_DoubleU farg;
+ farg.ll = arg;
- if (unlikely(float64_is_signaling_nan(FT0))) {
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN conversion */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
- } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
+ } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
/* qNan / infinity conversion */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
} else {
- p.ll = float64_to_int64(FT0, &env->fp_status);
- FT0 = p.d;
+ farg.ll = float64_to_int64(farg.d, &env->fp_status);
}
+ return farg.ll;
}
-void do_fctidz (void)
+/* fctidz - fctidz. */
+uint64_t helper_fctidz (uint64_t arg)
{
- CPU_DoubleU p;
+ CPU_DoubleU farg;
+ farg.ll = arg;
- if (unlikely(float64_is_signaling_nan(FT0))) {
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN conversion */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
- } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
+ } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
/* qNan / infinity conversion */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
} else {
- p.ll = float64_to_int64_round_to_zero(FT0, &env->fp_status);
- FT0 = p.d;
+ farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
}
+ return farg.ll;
}
#endif
-static always_inline void do_fri (int rounding_mode)
+static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
{
- if (unlikely(float64_is_signaling_nan(FT0))) {
+ CPU_DoubleU farg;
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN round */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
- } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
+ } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
/* qNan / infinity round */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
} else {
set_float_rounding_mode(rounding_mode, &env->fp_status);
- FT0 = float64_round_to_int(FT0, &env->fp_status);
+ farg.ll = float64_round_to_int(farg.d, &env->fp_status);
/* Restore rounding mode from FPSCR */
fpscr_set_rounding_mode();
}
+ return farg.ll;
}
-void do_frin (void)
+uint64_t helper_frin (uint64_t arg)
{
- do_fri(float_round_nearest_even);
+ return do_fri(arg, float_round_nearest_even);
}
-void do_friz (void)
+uint64_t helper_friz (uint64_t arg)
{
- do_fri(float_round_to_zero);
+ return do_fri(arg, float_round_to_zero);
}
-void do_frip (void)
+uint64_t helper_frip (uint64_t arg)
{
- do_fri(float_round_up);
+ return do_fri(arg, float_round_up);
}
-void do_frim (void)
+uint64_t helper_frim (uint64_t arg)
{
- do_fri(float_round_down);
+ return do_fri(arg, float_round_down);
}
-#if USE_PRECISE_EMULATION
-void do_fmadd (void)
+/* fmadd - fmadd. */
+uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
{
- if (unlikely(float64_is_signaling_nan(FT0) ||
- float64_is_signaling_nan(FT1) ||
- float64_is_signaling_nan(FT2))) {
+ CPU_DoubleU farg1, farg2, farg3;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+ farg3.ll = arg3;
+#if USE_PRECISE_EMULATION
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d) ||
+ float64_is_signaling_nan(farg3.d))) {
/* sNaN operation */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
} else {
#ifdef FLOAT128
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
- ft0_128 = float64_to_float128(FT0, &env->fp_status);
- ft1_128 = float64_to_float128(FT1, &env->fp_status);
+ ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
+ ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
- ft1_128 = float64_to_float128(FT2, &env->fp_status);
+ ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
- FT0 = float128_to_float64(ft0_128, &env->fp_status);
+ farg1.d = float128_to_float64(ft0_128, &env->fp_status);
#else
/* This is OK on x86 hosts */
- FT0 = (FT0 * FT1) + FT2;
+ farg1.d = (farg1.d * farg2.d) + farg3.d;
#endif
}
+#else
+ farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
+ farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
+#endif
+ return farg1.ll;
}
-void do_fmsub (void)
+/* fmsub - fmsub. */
+uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
{
- if (unlikely(float64_is_signaling_nan(FT0) ||
- float64_is_signaling_nan(FT1) ||
- float64_is_signaling_nan(FT2))) {
+ CPU_DoubleU farg1, farg2, farg3;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+ farg3.ll = arg3;
+#if USE_PRECISE_EMULATION
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d) ||
+ float64_is_signaling_nan(farg3.d))) {
/* sNaN operation */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
} else {
#ifdef FLOAT128
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
- ft0_128 = float64_to_float128(FT0, &env->fp_status);
- ft1_128 = float64_to_float128(FT1, &env->fp_status);
+ ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
+ ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
- ft1_128 = float64_to_float128(FT2, &env->fp_status);
+ ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
- FT0 = float128_to_float64(ft0_128, &env->fp_status);
+ farg1.d = float128_to_float64(ft0_128, &env->fp_status);
#else
/* This is OK on x86 hosts */
- FT0 = (FT0 * FT1) - FT2;
+ farg1.d = (farg1.d * farg2.d) - farg3.d;
#endif
}
+#else
+ farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
+ farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
+#endif
+ return farg1.ll;
}
-#endif /* USE_PRECISE_EMULATION */
-void do_fnmadd (void)
+/* fnmadd - fnmadd. */
+uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
{
- if (unlikely(float64_is_signaling_nan(FT0) ||
- float64_is_signaling_nan(FT1) ||
- float64_is_signaling_nan(FT2))) {
+ CPU_DoubleU farg1, farg2, farg3;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+ farg3.ll = arg3;
+
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d) ||
+ float64_is_signaling_nan(farg3.d))) {
/* sNaN operation */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
} else {
#if USE_PRECISE_EMULATION
#ifdef FLOAT128
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
- ft0_128 = float64_to_float128(FT0, &env->fp_status);
- ft1_128 = float64_to_float128(FT1, &env->fp_status);
+ ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
+ ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
- ft1_128 = float64_to_float128(FT2, &env->fp_status);
+ ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
- FT0 = float128_to_float64(ft0_128, &env->fp_status);
+ farg1.d= float128_to_float64(ft0_128, &env->fp_status);
#else
/* This is OK on x86 hosts */
- FT0 = (FT0 * FT1) + FT2;
+ farg1.d = (farg1.d * farg2.d) + farg3.d;
#endif
#else
- FT0 = float64_mul(FT0, FT1, &env->fp_status);
- FT0 = float64_add(FT0, FT2, &env->fp_status);
+ farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
+ farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
#endif
- if (likely(!isnan(FT0)))
- FT0 = float64_chs(FT0);
+ if (likely(!isnan(farg1.d)))
+ farg1.d = float64_chs(farg1.d);
}
+ return farg1.ll;
}
-void do_fnmsub (void)
+/* fnmsub - fnmsub. */
+uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
{
- if (unlikely(float64_is_signaling_nan(FT0) ||
- float64_is_signaling_nan(FT1) ||
- float64_is_signaling_nan(FT2))) {
+ CPU_DoubleU farg1, farg2, farg3;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+ farg3.ll = arg3;
+
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d) ||
+ float64_is_signaling_nan(farg3.d))) {
/* sNaN operation */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
} else {
#if USE_PRECISE_EMULATION
#ifdef FLOAT128
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
- ft0_128 = float64_to_float128(FT0, &env->fp_status);
- ft1_128 = float64_to_float128(FT1, &env->fp_status);
+ ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
+ ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
- ft1_128 = float64_to_float128(FT2, &env->fp_status);
+ ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
- FT0 = float128_to_float64(ft0_128, &env->fp_status);
+ farg1.d = float128_to_float64(ft0_128, &env->fp_status);
#else
/* This is OK on x86 hosts */
- FT0 = (FT0 * FT1) - FT2;
+ farg1.d = (farg1.d * farg2.d) - farg3.d;
#endif
#else
- FT0 = float64_mul(FT0, FT1, &env->fp_status);
- FT0 = float64_sub(FT0, FT2, &env->fp_status);
+ farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
+ farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
#endif
- if (likely(!isnan(FT0)))
- FT0 = float64_chs(FT0);
+ if (likely(!isnan(farg1.d)))
+ farg1.d = float64_chs(farg1.d);
}
+ return farg1.ll;
}
-#if USE_PRECISE_EMULATION
-void do_frsp (void)
+
+/* frsp - frsp. */
+uint64_t helper_frsp (uint64_t arg)
{
- if (unlikely(float64_is_signaling_nan(FT0))) {
+ CPU_DoubleU farg;
+ farg.ll = arg;
+
+#if USE_PRECISE_EMULATION
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN square root */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
} else {
- FT0 = float64_to_float32(FT0, &env->fp_status);
+ fard.d = float64_to_float32(farg.d, &env->fp_status);
}
+#else
+ farg.d = float64_to_float32(farg.d, &env->fp_status);
+#endif
+ return farg.ll;
}
-#endif /* USE_PRECISE_EMULATION */
-void do_fsqrt (void)
+/* fsqrt - fsqrt. */
+uint64_t helper_fsqrt (uint64_t arg)
{
- if (unlikely(float64_is_signaling_nan(FT0))) {
+ CPU_DoubleU farg;
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN square root */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
- } else if (unlikely(fpisneg(FT0) && !iszero(FT0))) {
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
/* Square root of a negative nonzero number */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
} else {
- FT0 = float64_sqrt(FT0, &env->fp_status);
+ farg.d = float64_sqrt(farg.d, &env->fp_status);
}
+ return farg.ll;
}
-void do_fre (void)
+/* fre - fre. */
+uint64_t helper_fre (uint64_t arg)
{
- CPU_DoubleU p;
+ CPU_DoubleU farg;
+ farg.ll = arg;
- if (unlikely(float64_is_signaling_nan(FT0))) {
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN reciprocal */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
- } else if (unlikely(iszero(FT0))) {
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ } else if (unlikely(iszero(farg.d))) {
/* Zero reciprocal */
- float_zero_divide_excp();
- } else if (likely(isnormal(FT0))) {
- FT0 = float64_div(1.0, FT0, &env->fp_status);
+ farg.ll = float_zero_divide_excp(1.0, farg.d);
+ } else if (likely(isnormal(farg.d))) {
+ farg.d = float64_div(1.0, farg.d, &env->fp_status);
} else {
- p.d = FT0;
- if (p.ll == 0x8000000000000000ULL) {
- p.ll = 0xFFF0000000000000ULL;
- } else if (p.ll == 0x0000000000000000ULL) {
- p.ll = 0x7FF0000000000000ULL;
- } else if (isnan(FT0)) {
- p.ll = 0x7FF8000000000000ULL;
- } else if (fpisneg(FT0)) {
- p.ll = 0x8000000000000000ULL;
+ if (farg.ll == 0x8000000000000000ULL) {
+ farg.ll = 0xFFF0000000000000ULL;
+ } else if (farg.ll == 0x0000000000000000ULL) {
+ farg.ll = 0x7FF0000000000000ULL;
+ } else if (isnan(farg.d)) {
+ farg.ll = 0x7FF8000000000000ULL;
+ } else if (fpisneg(farg.d)) {
+ farg.ll = 0x8000000000000000ULL;
} else {
- p.ll = 0x0000000000000000ULL;
+ farg.ll = 0x0000000000000000ULL;
}
- FT0 = p.d;
}
+ return farg.d;
}
-void do_fres (void)
+/* fres - fres. */
+uint64_t helper_fres (uint64_t arg)
{
- CPU_DoubleU p;
+ CPU_DoubleU farg;
+ farg.ll = arg;
- if (unlikely(float64_is_signaling_nan(FT0))) {
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN reciprocal */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
- } else if (unlikely(iszero(FT0))) {
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ } else if (unlikely(iszero(farg.d))) {
/* Zero reciprocal */
- float_zero_divide_excp();
- } else if (likely(isnormal(FT0))) {
+ farg.ll = float_zero_divide_excp(1.0, farg.d);
+ } else if (likely(isnormal(farg.d))) {
#if USE_PRECISE_EMULATION
- FT0 = float64_div(1.0, FT0, &env->fp_status);
- FT0 = float64_to_float32(FT0, &env->fp_status);
+ farg.d = float64_div(1.0, farg.d, &env->fp_status);
+ farg.d = float64_to_float32(farg.d, &env->fp_status);
#else
- FT0 = float32_div(1.0, FT0, &env->fp_status);
+ farg.d = float32_div(1.0, farg.d, &env->fp_status);
#endif
} else {
- p.d = FT0;
- if (p.ll == 0x8000000000000000ULL) {
- p.ll = 0xFFF0000000000000ULL;
- } else if (p.ll == 0x0000000000000000ULL) {
- p.ll = 0x7FF0000000000000ULL;
- } else if (isnan(FT0)) {
- p.ll = 0x7FF8000000000000ULL;
- } else if (fpisneg(FT0)) {
- p.ll = 0x8000000000000000ULL;
+ if (farg.ll == 0x8000000000000000ULL) {
+ farg.ll = 0xFFF0000000000000ULL;
+ } else if (farg.ll == 0x0000000000000000ULL) {
+ farg.ll = 0x7FF0000000000000ULL;
+ } else if (isnan(farg.d)) {
+ farg.ll = 0x7FF8000000000000ULL;
+ } else if (fpisneg(farg.d)) {
+ farg.ll = 0x8000000000000000ULL;
} else {
- p.ll = 0x0000000000000000ULL;
+ farg.ll = 0x0000000000000000ULL;
}
- FT0 = p.d;
}
+ return farg.ll;
}
-void do_frsqrte (void)
+/* frsqrte - frsqrte. */
+uint64_t helper_frsqrte (uint64_t arg)
{
- CPU_DoubleU p;
+ CPU_DoubleU farg;
+ farg.ll = arg;
- if (unlikely(float64_is_signaling_nan(FT0))) {
+ if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN reciprocal square root */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
- } else if (unlikely(fpisneg(FT0) && !iszero(FT0))) {
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
/* Reciprocal square root of a negative nonzero number */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
- } else if (likely(isnormal(FT0))) {
- FT0 = float64_sqrt(FT0, &env->fp_status);
- FT0 = float32_div(1.0, FT0, &env->fp_status);
+ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
+ } else if (likely(isnormal(farg.d))) {
+ farg.d = float64_sqrt(farg.d, &env->fp_status);
+ farg.d = float32_div(1.0, farg.d, &env->fp_status);
} else {
- p.d = FT0;
- if (p.ll == 0x8000000000000000ULL) {
- p.ll = 0xFFF0000000000000ULL;
- } else if (p.ll == 0x0000000000000000ULL) {
- p.ll = 0x7FF0000000000000ULL;
- } else if (isnan(FT0)) {
- p.ll |= 0x000FFFFFFFFFFFFFULL;
- } else if (fpisneg(FT0)) {
- p.ll = 0x7FF8000000000000ULL;
+ if (farg.ll == 0x8000000000000000ULL) {
+ farg.ll = 0xFFF0000000000000ULL;
+ } else if (farg.ll == 0x0000000000000000ULL) {
+ farg.ll = 0x7FF0000000000000ULL;
+ } else if (isnan(farg.d)) {
+ farg.ll |= 0x000FFFFFFFFFFFFFULL;
+ } else if (fpisneg(farg.d)) {
+ farg.ll = 0x7FF8000000000000ULL;
} else {
- p.ll = 0x0000000000000000ULL;
+ farg.ll = 0x0000000000000000ULL;
}
- FT0 = p.d;
}
+ return farg.ll;
}
-void do_fsel (void)
+/* fsel - fsel. */
+uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
{
- if (!fpisneg(FT0) || iszero(FT0))
- FT0 = FT1;
+ CPU_DoubleU farg1, farg2, farg3;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+ farg3.ll = arg3;
+
+ if (!fpisneg(farg1.d) || iszero(farg1.d))
+ return farg2.ll;
else
- FT0 = FT2;
+ return farg2.ll;
}
-uint32_t helper_fcmpu (void)
+uint32_t helper_fcmpu (uint64_t arg1, uint64_t arg2)
{
+ CPU_DoubleU farg1, farg2;
uint32_t ret = 0;
+ farg1.ll = arg1;
+ farg2.ll = arg2;
- if (unlikely(float64_is_signaling_nan(FT0) ||
- float64_is_signaling_nan(FT1))) {
+ if (unlikely(float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d))) {
/* sNaN comparison */
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
} else {
- if (float64_lt(FT0, FT1, &env->fp_status)) {
+ if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
ret = 0x08UL;
- } else if (!float64_le(FT0, FT1, &env->fp_status)) {
+ } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
ret = 0x04UL;
} else {
ret = 0x02UL;
@@ -1164,14 +1316,17 @@ uint32_t helper_fcmpu (void)
return ret;
}
-uint32_t helper_fcmpo (void)
+uint32_t helper_fcmpo (uint64_t arg1, uint64_t arg2)
{
+ CPU_DoubleU farg1, farg2;
uint32_t ret = 0;
+ farg1.ll = arg1;
+ farg2.ll = arg2;
- if (unlikely(float64_is_nan(FT0) ||
- float64_is_nan(FT1))) {
- if (float64_is_signaling_nan(FT0) ||
- float64_is_signaling_nan(FT1)) {
+ if (unlikely(float64_is_nan(farg1.d) ||
+ float64_is_nan(farg2.d))) {
+ if (float64_is_signaling_nan(farg1.d) ||
+ float64_is_signaling_nan(farg2.d)) {
/* sNaN comparison */
fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
POWERPC_EXCP_FP_VXVC);
@@ -1180,9 +1335,9 @@ uint32_t helper_fcmpo (void)
fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
}
} else {
- if (float64_lt(FT0, FT1, &env->fp_status)) {
+ if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
ret = 0x08UL;
- } else if (!float64_le(FT0, FT1, &env->fp_status)) {
+ } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
ret = 0x04UL;
} else {
ret = 0x02UL;
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 085c7a1533..18db6604aa 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -75,7 +75,7 @@ static TCGv cpu_T[3];
#else
static TCGv_i64 cpu_T64[3];
#endif
-static TCGv_i64 cpu_FT[3];
+static TCGv_i64 cpu_FT[2];
static TCGv_i64 cpu_AVRh[3], cpu_AVRl[3];
#include "gen-icount.h"
@@ -120,8 +120,6 @@ void ppc_translate_init(void)
offsetof(CPUState, ft0), "FT0");
cpu_FT[1] = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUState, ft1), "FT1");
- cpu_FT[2] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, ft2), "FT2");
cpu_AVRh[0] = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUState, avr0.u64[0]), "AVR0H");
@@ -245,27 +243,31 @@ static always_inline void gen_reset_fpstatus (void)
#endif
}
-static always_inline void gen_compute_fprf (int set_fprf, int set_rc)
+static always_inline void gen_compute_fprf (TCGv arg, int set_fprf, int set_rc)
{
+ TCGv t0 = tcg_temp_new_i32();
+
if (set_fprf != 0) {
/* This case might be optimized later */
#if defined(OPTIMIZE_FPRF_UPDATE)
*gen_fprf_ptr++ = gen_opc_ptr;
#endif
- gen_op_compute_fprf(1);
+ tcg_gen_movi_tl(t0, 1);
+ gen_helper_compute_fprf(t0, arg, t0);
if (unlikely(set_rc)) {
- tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_T[0]);
- tcg_gen_andi_i32(cpu_crf[1], cpu_crf[1], 0xf);
+ tcg_gen_movi_i32(cpu_crf[1], t0);
}
- gen_op_float_check_status();
+ gen_helper_float_check_status();
} else if (unlikely(set_rc)) {
/* We always need to compute fpcc */
- gen_op_compute_fprf(0);
- tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_T[0]);
- tcg_gen_andi_i32(cpu_crf[1], cpu_crf[1], 0xf);
+ tcg_gen_movi_tl(t0, 0);
+ gen_helper_compute_fprf(t0, arg, t0);
+ tcg_gen_movi_i32(cpu_crf[1], t0);
if (set_fprf)
- gen_op_float_check_status();
+ gen_helper_float_check_status();
}
+
+ tcg_temp_free(t0);
}
static always_inline void gen_optimize_fprf (void)
@@ -2096,16 +2098,14 @@ GEN_HANDLER(f##name, op1, op2, 0xFF, 0x00000000, type) \
GEN_EXCP_NO_FP(ctx); \
return; \
} \
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rA(ctx->opcode)]); \
- tcg_gen_mov_i64(cpu_FT[1], cpu_fpr[rC(ctx->opcode)]); \
- tcg_gen_mov_i64(cpu_FT[2], cpu_fpr[rB(ctx->opcode)]); \
gen_reset_fpstatus(); \
- gen_op_f##op(); \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
+ cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
if (isfloat) { \
- gen_op_frsp(); \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
} \
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]); \
- gen_compute_fprf(set_fprf, Rc(ctx->opcode) != 0); \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], set_fprf, \
+ Rc(ctx->opcode) != 0); \
}
#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
@@ -2119,15 +2119,14 @@ GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type) \
GEN_EXCP_NO_FP(ctx); \
return; \
} \
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rA(ctx->opcode)]); \
- tcg_gen_mov_i64(cpu_FT[1], cpu_fpr[rB(ctx->opcode)]); \
gen_reset_fpstatus(); \
- gen_op_f##op(); \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
+ cpu_fpr[rB(ctx->opcode)]); \
if (isfloat) { \
- gen_op_frsp(); \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
} \
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]); \
- gen_compute_fprf(set_fprf, Rc(ctx->opcode) != 0); \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
+ set_fprf, Rc(ctx->opcode) != 0); \
}
#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
@@ -2140,15 +2139,14 @@ GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type) \
GEN_EXCP_NO_FP(ctx); \
return; \
} \
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rA(ctx->opcode)]); \
- tcg_gen_mov_i64(cpu_FT[1], cpu_fpr[rC(ctx->opcode)]); \
gen_reset_fpstatus(); \
- gen_op_f##op(); \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
+ cpu_fpr[rC(ctx->opcode)]); \
if (isfloat) { \
- gen_op_frsp(); \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
} \
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]); \
- gen_compute_fprf(set_fprf, Rc(ctx->opcode) != 0); \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
+ set_fprf, Rc(ctx->opcode) != 0); \
}
#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
@@ -2161,11 +2159,10 @@ GEN_HANDLER(f##name, 0x3F, op2, op3, 0x001F0000, type) \
GEN_EXCP_NO_FP(ctx); \
return; \
} \
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rB(ctx->opcode)]); \
gen_reset_fpstatus(); \
- gen_op_f##name(); \
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]); \
- gen_compute_fprf(set_fprf, Rc(ctx->opcode) != 0); \
+ gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
+ set_fprf, Rc(ctx->opcode) != 0); \
}
#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
@@ -2175,11 +2172,10 @@ GEN_HANDLER(f##name, op1, op2, 0xFF, 0x001F07C0, type) \
GEN_EXCP_NO_FP(ctx); \
return; \
} \
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rB(ctx->opcode)]); \
gen_reset_fpstatus(); \
- gen_op_f##name(); \
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]); \
- gen_compute_fprf(set_fprf, Rc(ctx->opcode) != 0); \
+ gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
+ set_fprf, Rc(ctx->opcode) != 0); \
}
/* fadd - fadds */
@@ -2199,12 +2195,17 @@ GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
/* frsqrtes */
-static always_inline void gen_op_frsqrtes (void)
+GEN_HANDLER(frsqrtes, 0x3B, 0x1A, 0xFF, 0x001F07C0, PPC_FLOAT_FRSQRTES)
{
- gen_op_frsqrte();
- gen_op_frsp();
+ if (unlikely(!ctx->fpu_enabled)) {
+ GEN_EXCP_NO_FP(ctx);
+ return;
+ }
+ gen_reset_fpstatus();
+ gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
}
-GEN_FLOAT_BS(rsqrtes, 0x3B, 0x1A, 1, PPC_FLOAT_FRSQRTES);
/* fsel */
_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
@@ -2218,11 +2219,9 @@ GEN_HANDLER(fsqrt, 0x3F, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT)
GEN_EXCP_NO_FP(ctx);
return;
}
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rB(ctx->opcode)]);
gen_reset_fpstatus();
- gen_op_fsqrt();
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]);
- gen_compute_fprf(1, Rc(ctx->opcode) != 0);
+ gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
}
GEN_HANDLER(fsqrts, 0x3B, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT)
@@ -2231,12 +2230,10 @@ GEN_HANDLER(fsqrts, 0x3B, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT)
GEN_EXCP_NO_FP(ctx);
return;
}
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rB(ctx->opcode)]);
gen_reset_fpstatus();
- gen_op_fsqrt();
- gen_op_frsp();
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]);
- gen_compute_fprf(1, Rc(ctx->opcode) != 0);
+ gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
}
/*** Floating-Point multiply-and-add ***/
@@ -2282,11 +2279,10 @@ GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT)
GEN_EXCP_NO_FP(ctx);
return;
}
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rA(ctx->opcode)]);
- tcg_gen_mov_i64(cpu_FT[1], cpu_fpr[rB(ctx->opcode)]);
gen_reset_fpstatus();
- gen_helper_fcmpo(cpu_crf[crfD(ctx->opcode)]);
- gen_op_float_check_status();
+ gen_helper_fcmpo(cpu_crf[crfD(ctx->opcode)],
+ cpu_fpr[rA(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+ gen_helper_float_check_status();
}
/* fcmpu */
@@ -2296,11 +2292,10 @@ GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT)
GEN_EXCP_NO_FP(ctx);
return;
}
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rA(ctx->opcode)]);
- tcg_gen_mov_i64(cpu_FT[1], cpu_fpr[rB(ctx->opcode)]);
gen_reset_fpstatus();
- gen_helper_fcmpu(cpu_crf[crfD(ctx->opcode)]);
- gen_op_float_check_status();
+ gen_helper_fcmpu(cpu_crf[crfD(ctx->opcode)],
+ cpu_fpr[rA(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+ gen_helper_float_check_status();
}
/*** Floating-point move ***/
@@ -2316,9 +2311,8 @@ GEN_HANDLER(fmr, 0x3F, 0x08, 0x02, 0x001F0000, PPC_FLOAT)
GEN_EXCP_NO_FP(ctx);
return;
}
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rB(ctx->opcode)]);
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]);
- gen_compute_fprf(0, Rc(ctx->opcode) != 0);
+ tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
}
/* fnabs */
@@ -2342,7 +2336,7 @@ GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT)
bfa = 4 * (7 - crfS(ctx->opcode));
tcg_gen_shri_i32(cpu_crf[crfD(ctx->opcode)], cpu_fpscr, bfa);
tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
- gen_op_fpscr_resetbit(~(0xF << bfa));
+ tcg_gen_andi_i32(cpu_fpscr, cpu_fpscr, ~(0xF << bfa));
}
/* mffs */
@@ -2354,9 +2348,8 @@ GEN_HANDLER(mffs, 0x3F, 0x07, 0x12, 0x001FF800, PPC_FLOAT)
}
gen_optimize_fprf();
gen_reset_fpstatus();
- gen_op_load_fpscr_FT0();
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]);
- gen_compute_fprf(0, Rc(ctx->opcode) != 0);
+ tcg_gen_extu_i32_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
}
/* mtfsb0 */
@@ -2372,7 +2365,7 @@ GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT)
gen_optimize_fprf();
gen_reset_fpstatus();
if (likely(crb != 30 && crb != 29))
- gen_op_fpscr_resetbit(~(1 << crb));
+ tcg_gen_andi_i32(cpu_fpscr, cpu_fpscr, ~(1 << crb));
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
}
@@ -2391,37 +2384,44 @@ GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT)
gen_optimize_fprf();
gen_reset_fpstatus();
/* XXX: we pretend we can only do IEEE floating-point computations */
- if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI))
- gen_op_fpscr_setbit(crb);
+ if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
+ TCGv t0 = tcg_const_tl(crb);
+ gen_helper_fpscr_setbit(t0);
+ tcg_temp_free(t0);
+ }
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
}
/* We can raise a differed exception */
- gen_op_float_check_status();
+ gen_helper_float_check_status();
}
/* mtfsf */
GEN_HANDLER(mtfsf, 0x3F, 0x07, 0x16, 0x02010000, PPC_FLOAT)
{
+ TCGv t0;
+
if (unlikely(!ctx->fpu_enabled)) {
GEN_EXCP_NO_FP(ctx);
return;
}
gen_optimize_fprf();
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rB(ctx->opcode)]);
gen_reset_fpstatus();
- gen_op_store_fpscr(FM(ctx->opcode));
+ t0 = tcg_const_i32(FM(ctx->opcode));
+ gen_helper_store_fpscr(cpu_fpr[rB(ctx->opcode)], t0);
+ tcg_temp_free(t0);
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
}
/* We can raise a differed exception */
- gen_op_float_check_status();
+ gen_helper_float_check_status();
}
/* mtfsfi */
GEN_HANDLER(mtfsfi, 0x3F, 0x06, 0x04, 0x006f0800, PPC_FLOAT)
{
int bf, sh;
+ TCGv t0, t1;
if (unlikely(!ctx->fpu_enabled)) {
GEN_EXCP_NO_FP(ctx);
@@ -2430,14 +2430,17 @@ GEN_HANDLER(mtfsfi, 0x3F, 0x06, 0x04, 0x006f0800, PPC_FLOAT)
bf = crbD(ctx->opcode) >> 2;
sh = 7 - bf;
gen_optimize_fprf();
- tcg_gen_movi_i64(cpu_FT[0], FPIMM(ctx->opcode) << (4 * sh));
gen_reset_fpstatus();
- gen_op_store_fpscr(1 << sh);
+ t0 = tcg_const_tl(FPIMM(ctx->opcode) << (4 * sh));
+ t1 = tcg_const_i32(1 << sh);
+ gen_helper_store_fpscr(t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
}
/* We can raise a differed exception */
- gen_op_float_check_status();
+ gen_helper_float_check_status();
}
/*** Addressing modes ***/