diff options
author | aurel32 <aurel32@c046a42c-6fe2-441c-8c8c-71466251a162> | 2008-11-23 10:54:04 +0000 |
---|---|---|
committer | aurel32 <aurel32@c046a42c-6fe2-441c-8c8c-71466251a162> | 2008-11-23 10:54:04 +0000 |
commit | 1c97856dcc4557f75eb9a86ec5300f9450a1e1a0 (patch) | |
tree | 0875ad311ee098fc2e7845eadbbffa90ba735774 /target-ppc | |
parent | f48879196272e8e966d4666e05ed4c11422f3baf (diff) |
target-ppc: convert SPE FP ops to TCG
Including a few bug fixes:
- Don't clear high part for instruction with 32-bit destination
- Fix efscmp* and etstcmp* return value
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5783 c046a42c-6fe2-441c-8c8c-71466251a162
Diffstat (limited to 'target-ppc')
-rw-r--r-- | target-ppc/helper.h | 75 | ||||
-rw-r--r-- | target-ppc/op.c | 464 | ||||
-rw-r--r-- | target-ppc/op_helper.c | 565 | ||||
-rw-r--r-- | target-ppc/op_helper.h | 151 | ||||
-rw-r--r-- | target-ppc/translate.c | 433 |
5 files changed, 681 insertions, 1007 deletions
diff --git a/target-ppc/helper.h b/target-ppc/helper.h index 6178a5620d..3fe568169d 100644 --- a/target-ppc/helper.h +++ b/target-ppc/helper.h @@ -60,10 +60,77 @@ DEF_HELPER_3(fnmsub, i64, i64, i64, i64) DEF_HELPER_1(fabs, i64, i64) DEF_HELPER_1(fnabs, i64, i64) DEF_HELPER_1(fneg, i64, i64) -DEF_HELPER_1(fsqrt, i64, i64); -DEF_HELPER_1(fre, i64, i64); -DEF_HELPER_1(fres, i64, i64); -DEF_HELPER_1(frsqrte, i64, i64); +DEF_HELPER_1(fsqrt, i64, i64) +DEF_HELPER_1(fre, i64, i64) +DEF_HELPER_1(fres, i64, i64) +DEF_HELPER_1(frsqrte, i64, i64) DEF_HELPER_3(fsel, i64, i64, i64, i64) +DEF_HELPER_1(efscfsi, i32, i32) +DEF_HELPER_1(efscfui, i32, i32) +DEF_HELPER_1(efscfuf, i32, i32) +DEF_HELPER_1(efscfsf, i32, i32) +DEF_HELPER_1(efsctsi, i32, i32) +DEF_HELPER_1(efsctui, i32, i32) +DEF_HELPER_1(efsctsiz, i32, i32) +DEF_HELPER_1(efsctuiz, i32, i32) +DEF_HELPER_1(efsctsf, i32, i32) +DEF_HELPER_1(efsctuf, i32, i32) +DEF_HELPER_1(evfscfsi, i64, i64) +DEF_HELPER_1(evfscfui, i64, i64) +DEF_HELPER_1(evfscfuf, i64, i64) +DEF_HELPER_1(evfscfsf, i64, i64) +DEF_HELPER_1(evfsctsi, i64, i64) +DEF_HELPER_1(evfsctui, i64, i64) +DEF_HELPER_1(evfsctsiz, i64, i64) +DEF_HELPER_1(evfsctuiz, i64, i64) +DEF_HELPER_1(evfsctsf, i64, i64) +DEF_HELPER_1(evfsctuf, i64, i64) +DEF_HELPER_2(efsadd, i32, i32, i32) +DEF_HELPER_2(efssub, i32, i32, i32) +DEF_HELPER_2(efsmul, i32, i32, i32) +DEF_HELPER_2(efsdiv, i32, i32, i32) +DEF_HELPER_2(evfsadd, i64, i64, i64) +DEF_HELPER_2(evfssub, i64, i64, i64) +DEF_HELPER_2(evfsmul, i64, i64, i64) +DEF_HELPER_2(evfsdiv, i64, i64, i64) +DEF_HELPER_2(efststlt, i32, i32, i32) +DEF_HELPER_2(efststgt, i32, i32, i32) +DEF_HELPER_2(efststeq, i32, i32, i32) +DEF_HELPER_2(efscmplt, i32, i32, i32) +DEF_HELPER_2(efscmpgt, i32, i32, i32) +DEF_HELPER_2(efscmpeq, i32, i32, i32) +DEF_HELPER_2(evfststlt, i32, i64, i64) +DEF_HELPER_2(evfststgt, i32, i64, i64) +DEF_HELPER_2(evfststeq, i32, i64, i64) +DEF_HELPER_2(evfscmplt, i32, i64, i64) +DEF_HELPER_2(evfscmpgt, i32, i64, i64) +DEF_HELPER_2(evfscmpeq, i32, i64, i64) +DEF_HELPER_1(efdcfsi, i64, i32) +DEF_HELPER_1(efdcfsid, i64, i64) +DEF_HELPER_1(efdcfui, i64, i32) +DEF_HELPER_1(efdcfuid, i64, i64) +DEF_HELPER_1(efdctsi, i32, i64) +DEF_HELPER_1(efdctui, i32, i64) +DEF_HELPER_1(efdctsiz, i32, i64) +DEF_HELPER_1(efdctsidz, i64, i64) +DEF_HELPER_1(efdctuiz, i32, i64) +DEF_HELPER_1(efdctuidz, i64, i64) +DEF_HELPER_1(efdcfsf, i64, i32) +DEF_HELPER_1(efdcfuf, i64, i32) +DEF_HELPER_1(efdctsf, i32, i64) +DEF_HELPER_1(efdctuf, i32, i64) +DEF_HELPER_1(efscfd, i32, i64) +DEF_HELPER_1(efdcfs, i64, i32) +DEF_HELPER_2(efdadd, i64, i64, i64) +DEF_HELPER_2(efdsub, i64, i64, i64) +DEF_HELPER_2(efdmul, i64, i64, i64) +DEF_HELPER_2(efddiv, i64, i64, i64) +DEF_HELPER_2(efdtstlt, i32, i64, i64) +DEF_HELPER_2(efdtstgt, i32, i64, i64) +DEF_HELPER_2(efdtsteq, i32, i64, i64) +DEF_HELPER_2(efdcmplt, i32, i64, i64) +DEF_HELPER_2(efdcmpgt, i32, i64, i64) +DEF_HELPER_2(efdcmpeq, i32, i64, i64) + #include "def-helper.h" diff --git a/target-ppc/op.c b/target-ppc/op.c index 8b3dbcaa04..ee2f36ece7 100644 --- a/target-ppc/op.c +++ b/target-ppc/op.c @@ -960,468 +960,4 @@ void OPPROTO op_srli32_T1_64 (void) RETURN(); } -void OPPROTO op_evfssub (void) -{ - do_evfssub(); - RETURN(); -} - -void OPPROTO op_evfsadd (void) -{ - do_evfsadd(); - RETURN(); -} - -void OPPROTO op_evfsnabs (void) -{ - do_evfsnabs(); - RETURN(); -} - -void OPPROTO op_evfsabs (void) -{ - do_evfsabs(); - RETURN(); -} - -void OPPROTO op_evfsneg (void) -{ - do_evfsneg(); - RETURN(); -} - -void OPPROTO op_evfsdiv (void) -{ - do_evfsdiv(); - RETURN(); -} - -void OPPROTO op_evfsmul (void) -{ - do_evfsmul(); - RETURN(); -} - -void OPPROTO op_evfscmplt (void) -{ - do_evfscmplt(); - RETURN(); -} - -void OPPROTO op_evfscmpgt (void) -{ - do_evfscmpgt(); - RETURN(); -} - -void OPPROTO op_evfscmpeq (void) -{ - do_evfscmpeq(); - RETURN(); -} - -void OPPROTO op_evfscfsi (void) -{ - do_evfscfsi(); - RETURN(); -} - -void OPPROTO op_evfscfui (void) -{ - do_evfscfui(); - RETURN(); -} - -void OPPROTO op_evfscfsf (void) -{ - do_evfscfsf(); - RETURN(); -} - -void OPPROTO op_evfscfuf (void) -{ - do_evfscfuf(); - RETURN(); -} - -void OPPROTO op_evfsctsi (void) -{ - do_evfsctsi(); - RETURN(); -} - -void OPPROTO op_evfsctui (void) -{ - do_evfsctui(); - RETURN(); -} - -void OPPROTO op_evfsctsf (void) -{ - do_evfsctsf(); - RETURN(); -} - -void OPPROTO op_evfsctuf (void) -{ - do_evfsctuf(); - RETURN(); -} - -void OPPROTO op_evfsctuiz (void) -{ - do_evfsctuiz(); - RETURN(); -} - -void OPPROTO op_evfsctsiz (void) -{ - do_evfsctsiz(); - RETURN(); -} - -void OPPROTO op_evfststlt (void) -{ - do_evfststlt(); - RETURN(); -} - -void OPPROTO op_evfststgt (void) -{ - do_evfststgt(); - RETURN(); -} - -void OPPROTO op_evfststeq (void) -{ - do_evfststeq(); - RETURN(); -} - -void OPPROTO op_efssub (void) -{ - T0_64 = _do_efssub(T0_64, T1_64); - RETURN(); -} - -void OPPROTO op_efsadd (void) -{ - T0_64 = _do_efsadd(T0_64, T1_64); - RETURN(); -} - -void OPPROTO op_efsnabs (void) -{ - T0_64 = _do_efsnabs(T0_64); - RETURN(); -} - -void OPPROTO op_efsabs (void) -{ - T0_64 = _do_efsabs(T0_64); - RETURN(); -} - -void OPPROTO op_efsneg (void) -{ - T0_64 = _do_efsneg(T0_64); - RETURN(); -} - -void OPPROTO op_efsdiv (void) -{ - T0_64 = _do_efsdiv(T0_64, T1_64); - RETURN(); -} - -void OPPROTO op_efsmul (void) -{ - T0_64 = _do_efsmul(T0_64, T1_64); - RETURN(); -} - -void OPPROTO op_efscmplt (void) -{ - do_efscmplt(); - RETURN(); -} - -void OPPROTO op_efscmpgt (void) -{ - do_efscmpgt(); - RETURN(); -} - -void OPPROTO op_efscfd (void) -{ - do_efscfd(); - RETURN(); -} - -void OPPROTO op_efscmpeq (void) -{ - do_efscmpeq(); - RETURN(); -} - -void OPPROTO op_efscfsi (void) -{ - do_efscfsi(); - RETURN(); -} - -void OPPROTO op_efscfui (void) -{ - do_efscfui(); - RETURN(); -} - -void OPPROTO op_efscfsf (void) -{ - do_efscfsf(); - RETURN(); -} - -void OPPROTO op_efscfuf (void) -{ - do_efscfuf(); - RETURN(); -} - -void OPPROTO op_efsctsi (void) -{ - do_efsctsi(); - RETURN(); -} - -void OPPROTO op_efsctui (void) -{ - do_efsctui(); - RETURN(); -} - -void OPPROTO op_efsctsf (void) -{ - do_efsctsf(); - RETURN(); -} - -void OPPROTO op_efsctuf (void) -{ - do_efsctuf(); - RETURN(); -} - -void OPPROTO op_efsctsiz (void) -{ - do_efsctsiz(); - RETURN(); -} - -void OPPROTO op_efsctuiz (void) -{ - do_efsctuiz(); - RETURN(); -} - -void OPPROTO op_efststlt (void) -{ - T0 = _do_efststlt(T0_64, T1_64); - RETURN(); -} - -void OPPROTO op_efststgt (void) -{ - T0 = _do_efststgt(T0_64, T1_64); - RETURN(); -} - -void OPPROTO op_efststeq (void) -{ - T0 = _do_efststeq(T0_64, T1_64); - RETURN(); -} -void OPPROTO op_efdsub (void) -{ - CPU_DoubleU u1, u2; - u1.ll = T0_64; - u2.ll = T1_64; - u1.d = float64_sub(u1.d, u2.d, &env->spe_status); - T0_64 = u1.ll; - RETURN(); -} - -void OPPROTO op_efdadd (void) -{ - CPU_DoubleU u1, u2; - u1.ll = T0_64; - u2.ll = T1_64; - u1.d = float64_add(u1.d, u2.d, &env->spe_status); - T0_64 = u1.ll; - RETURN(); -} - -void OPPROTO op_efdcfsid (void) -{ - do_efdcfsi(); - RETURN(); -} - -void OPPROTO op_efdcfuid (void) -{ - do_efdcfui(); - RETURN(); -} - -void OPPROTO op_efdnabs (void) -{ - T0_64 |= 0x8000000000000000ULL; - RETURN(); -} - -void OPPROTO op_efdabs (void) -{ - T0_64 &= ~0x8000000000000000ULL; - RETURN(); -} - -void OPPROTO op_efdneg (void) -{ - T0_64 ^= 0x8000000000000000ULL; - RETURN(); -} - -void OPPROTO op_efddiv (void) -{ - CPU_DoubleU u1, u2; - u1.ll = T0_64; - u2.ll = T1_64; - u1.d = float64_div(u1.d, u2.d, &env->spe_status); - T0_64 = u1.ll; - RETURN(); -} - -void OPPROTO op_efdmul (void) -{ - CPU_DoubleU u1, u2; - u1.ll = T0_64; - u2.ll = T1_64; - u1.d = float64_mul(u1.d, u2.d, &env->spe_status); - T0_64 = u1.ll; - RETURN(); -} - -void OPPROTO op_efdctsidz (void) -{ - do_efdctsiz(); - RETURN(); -} - -void OPPROTO op_efdctuidz (void) -{ - do_efdctuiz(); - RETURN(); -} - -void OPPROTO op_efdcmplt (void) -{ - do_efdcmplt(); - RETURN(); -} - -void OPPROTO op_efdcmpgt (void) -{ - do_efdcmpgt(); - RETURN(); -} - -void OPPROTO op_efdcfs (void) -{ - do_efdcfs(); - RETURN(); -} - -void OPPROTO op_efdcmpeq (void) -{ - do_efdcmpeq(); - RETURN(); -} - -void OPPROTO op_efdcfsi (void) -{ - do_efdcfsi(); - RETURN(); -} - -void OPPROTO op_efdcfui (void) -{ - do_efdcfui(); - RETURN(); -} - -void OPPROTO op_efdcfsf (void) -{ - do_efdcfsf(); - RETURN(); -} - -void OPPROTO op_efdcfuf (void) -{ - do_efdcfuf(); - RETURN(); -} - -void OPPROTO op_efdctsi (void) -{ - do_efdctsi(); - RETURN(); -} - -void OPPROTO op_efdctui (void) -{ - do_efdctui(); - RETURN(); -} - -void OPPROTO op_efdctsf (void) -{ - do_efdctsf(); - RETURN(); -} - -void OPPROTO op_efdctuf (void) -{ - do_efdctuf(); - RETURN(); -} - -void OPPROTO op_efdctuiz (void) -{ - do_efdctuiz(); - RETURN(); -} - -void OPPROTO op_efdctsiz (void) -{ - do_efdctsiz(); - RETURN(); -} - -void OPPROTO op_efdtstlt (void) -{ - T0 = _do_efdtstlt(T0_64, T1_64); - RETURN(); -} - -void OPPROTO op_efdtstgt (void) -{ - T0 = _do_efdtstgt(T0_64, T1_64); - RETURN(); -} - -void OPPROTO op_efdtsteq (void) -{ - T0 = _do_efdtsteq(T0_64, T1_64); - RETURN(); -} diff --git a/target-ppc/op_helper.c b/target-ppc/op_helper.c index 4f7521bac7..6ea3ba3dae 100644 --- a/target-ppc/op_helper.c +++ b/target-ppc/op_helper.c @@ -1757,6 +1757,7 @@ void do_440_dlmzb (void) T0 = i; } +/*****************************************************************************/ /* SPE extension helpers */ /* Use a table to make this quicker */ static uint8_t hbrev[16] = { @@ -1800,36 +1801,8 @@ uint32_t helper_cntlzw32 (uint32_t val) return clz32(val); } -#define DO_SPE_OP1(name) \ -void do_ev##name (void) \ -{ \ - T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32) << 32) | \ - (uint64_t)_do_e##name(T0_64); \ -} - -#define DO_SPE_OP2(name) \ -void do_ev##name (void) \ -{ \ - T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32, T1_64 >> 32) << 32) | \ - (uint64_t)_do_e##name(T0_64, T1_64); \ -} - -/* Fixed-point vector comparisons */ -#define DO_SPE_CMP(name) \ -void do_ev##name (void) \ -{ \ - T0 = _do_evcmp_merge((uint64_t)_do_e##name(T0_64 >> 32, \ - T1_64 >> 32) << 32, \ - _do_e##name(T0_64, T1_64)); \ -} - -static always_inline uint32_t _do_evcmp_merge (int t0, int t1) -{ - return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1); -} - -/* Single precision floating-point conversions from/to integer */ -static always_inline uint32_t _do_efscfsi (int32_t val) +/* Single-precision floating-point conversions */ +static always_inline uint32_t efscfsi (uint32_t val) { CPU_FloatU u; @@ -1838,7 +1811,7 @@ static always_inline uint32_t _do_efscfsi (int32_t val) return u.l; } -static always_inline uint32_t _do_efscfui (uint32_t val) +static always_inline uint32_t efscfui (uint32_t val) { CPU_FloatU u; @@ -1847,7 +1820,7 @@ static always_inline uint32_t _do_efscfui (uint32_t val) return u.l; } -static always_inline int32_t _do_efsctsi (uint32_t val) +static always_inline int32_t efsctsi (uint32_t val) { CPU_FloatU u; @@ -1859,7 +1832,7 @@ static always_inline int32_t _do_efsctsi (uint32_t val) return float32_to_int32(u.f, &env->spe_status); } -static always_inline uint32_t _do_efsctui (uint32_t val) +static always_inline uint32_t efsctui (uint32_t val) { CPU_FloatU u; @@ -1871,7 +1844,7 @@ static always_inline uint32_t _do_efsctui (uint32_t val) return float32_to_uint32(u.f, &env->spe_status); } -static always_inline int32_t _do_efsctsiz (uint32_t val) +static always_inline uint32_t efsctsiz (uint32_t val) { CPU_FloatU u; @@ -1883,7 +1856,7 @@ static always_inline int32_t _do_efsctsiz (uint32_t val) return float32_to_int32_round_to_zero(u.f, &env->spe_status); } -static always_inline uint32_t _do_efsctuiz (uint32_t val) +static always_inline uint32_t efsctuiz (uint32_t val) { CPU_FloatU u; @@ -1895,38 +1868,7 @@ static always_inline uint32_t _do_efsctuiz (uint32_t val) return float32_to_uint32_round_to_zero(u.f, &env->spe_status); } -void do_efscfsi (void) -{ - T0_64 = _do_efscfsi(T0_64); -} - -void do_efscfui (void) -{ - T0_64 = _do_efscfui(T0_64); -} - -void do_efsctsi (void) -{ - T0_64 = _do_efsctsi(T0_64); -} - -void do_efsctui (void) -{ - T0_64 = _do_efsctui(T0_64); -} - -void do_efsctsiz (void) -{ - T0_64 = _do_efsctsiz(T0_64); -} - -void do_efsctuiz (void) -{ - T0_64 = _do_efsctuiz(T0_64); -} - -/* Single precision floating-point conversion to/from fractional */ -static always_inline uint32_t _do_efscfsf (uint32_t val) +static always_inline uint32_t efscfsf (uint32_t val) { CPU_FloatU u; float32 tmp; @@ -1938,7 +1880,7 @@ static always_inline uint32_t _do_efscfsf (uint32_t val) return u.l; } -static always_inline uint32_t _do_efscfuf (uint32_t val) +static always_inline uint32_t efscfuf (uint32_t val) { CPU_FloatU u; float32 tmp; @@ -1950,7 +1892,7 @@ static always_inline uint32_t _do_efscfuf (uint32_t val) return u.l; } -static always_inline int32_t _do_efsctsf (uint32_t val) +static always_inline uint32_t efsctsf (uint32_t val) { CPU_FloatU u; float32 tmp; @@ -1965,7 +1907,7 @@ static always_inline int32_t _do_efsctsf (uint32_t val) return float32_to_int32(u.f, &env->spe_status); } -static always_inline uint32_t _do_efsctuf (uint32_t val) +static always_inline uint32_t efsctuf (uint32_t val) { CPU_FloatU u; float32 tmp; @@ -1980,102 +1922,220 @@ static always_inline uint32_t _do_efsctuf (uint32_t val) return float32_to_uint32(u.f, &env->spe_status); } -static always_inline int32_t _do_efsctsfz (uint32_t val) -{ - CPU_FloatU u; - float32 tmp; - - u.l = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(isnan(u.f))) - return 0; - tmp = uint64_to_float32(1ULL << 32, &env->spe_status); - u.f = float32_mul(u.f, tmp, &env->spe_status); - - return float32_to_int32_round_to_zero(u.f, &env->spe_status); +#define HELPER_SPE_SINGLE_CONV(name) \ +uint32_t helper_e##name (uint32_t val) \ +{ \ + return e##name(val); \ +} +/* efscfsi */ +HELPER_SPE_SINGLE_CONV(fscfsi); +/* efscfui */ +HELPER_SPE_SINGLE_CONV(fscfui); +/* efscfuf */ +HELPER_SPE_SINGLE_CONV(fscfuf); +/* efscfsf */ +HELPER_SPE_SINGLE_CONV(fscfsf); +/* efsctsi */ +HELPER_SPE_SINGLE_CONV(fsctsi); +/* efsctui */ +HELPER_SPE_SINGLE_CONV(fsctui); +/* efsctsiz */ +HELPER_SPE_SINGLE_CONV(fsctsiz); +/* efsctuiz */ +HELPER_SPE_SINGLE_CONV(fsctuiz); +/* efsctsf */ +HELPER_SPE_SINGLE_CONV(fsctsf); +/* efsctuf */ +HELPER_SPE_SINGLE_CONV(fsctuf); + +#define HELPER_SPE_VECTOR_CONV(name) \ +uint64_t helper_ev##name (uint64_t val) \ +{ \ + return ((uint64_t)e##name(val >> 32) << 32) | \ + (uint64_t)e##name(val); \ } +/* evfscfsi */ +HELPER_SPE_VECTOR_CONV(fscfsi); +/* evfscfui */ +HELPER_SPE_VECTOR_CONV(fscfui); +/* evfscfuf */ +HELPER_SPE_VECTOR_CONV(fscfuf); +/* evfscfsf */ +HELPER_SPE_VECTOR_CONV(fscfsf); +/* evfsctsi */ +HELPER_SPE_VECTOR_CONV(fsctsi); +/* evfsctui */ +HELPER_SPE_VECTOR_CONV(fsctui); +/* evfsctsiz */ +HELPER_SPE_VECTOR_CONV(fsctsiz); +/* evfsctuiz */ +HELPER_SPE_VECTOR_CONV(fsctuiz); +/* evfsctsf */ +HELPER_SPE_VECTOR_CONV(fsctsf); +/* evfsctuf */ +HELPER_SPE_VECTOR_CONV(fsctuf); -static always_inline uint32_t _do_efsctufz (uint32_t val) +/* Single-precision floating-point arithmetic */ +static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2) { - CPU_FloatU u; - float32 tmp; - - u.l = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(isnan(u.f))) - return 0; - tmp = uint64_to_float32(1ULL << 32, &env->spe_status); - u.f = float32_mul(u.f, tmp, &env->spe_status); - - return float32_to_uint32_round_to_zero(u.f, &env->spe_status); + CPU_FloatU u1, u2; + u1.l = op1; + u2.l = op2; + u1.f = float32_add(u1.f, u2.f, &env->spe_status); + return u1.l; } -void do_efscfsf (void) +static always_inline uint32_t efssub (uint32_t op1, uint32_t op2) { - T0_64 = _do_efscfsf(T0_64); + CPU_FloatU u1, u2; + u1.l = op1; + u2.l = op2; + u1.f = float32_sub(u1.f, u2.f, &env->spe_status); + return u1.l; } -void do_efscfuf (void) +static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2) { - T0_64 = _do_efscfuf(T0_64); + CPU_FloatU u1, u2; + u1.l = op1; + u2.l = op2; + u1.f = float32_mul(u1.f, u2.f, &env->spe_status); + return u1.l; } -void do_efsctsf (void) +static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2) { - T0_64 = _do_efsctsf(T0_64); + CPU_FloatU u1, u2; + u1.l = op1; + u2.l = op2; + u1.f = float32_div(u1.f, u2.f, &env->spe_status); + return u1.l; } -void do_efsctuf (void) +#define HELPER_SPE_SINGLE_ARITH(name) \ +uint32_t helper_e##name (uint32_t op1, uint32_t op2) \ +{ \ + return e##name(op1, op2); \ +} +/* efsadd */ +HELPER_SPE_SINGLE_ARITH(fsadd); +/* efssub */ +HELPER_SPE_SINGLE_ARITH(fssub); +/* efsmul */ +HELPER_SPE_SINGLE_ARITH(fsmul); +/* efsdiv */ +HELPER_SPE_SINGLE_ARITH(fsdiv); + +#define HELPER_SPE_VECTOR_ARITH(name) \ +uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \ +{ \ + return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \ + (uint64_t)e##name(op1, op2); \ +} +/* evfsadd */ +HELPER_SPE_VECTOR_ARITH(fsadd); +/* evfssub */ +HELPER_SPE_VECTOR_ARITH(fssub); +/* evfsmul */ +HELPER_SPE_VECTOR_ARITH(fsmul); +/* evfsdiv */ +HELPER_SPE_VECTOR_ARITH(fsdiv); + +/* Single-precision floating-point comparisons */ +static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2) { - T0_64 = _do_efsctuf(T0_64); + CPU_FloatU u1, u2; + u1.l = op1; + u2.l = op2; + return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0; } -void do_efsctsfz (void) +static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2) { - T0_64 = _do_efsctsfz(T0_64); + CPU_FloatU u1, u2; + u1.l = op1; + u2.l = op2; + return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4; } -void do_efsctufz (void) +static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2) { - T0_64 = _do_efsctufz(T0_64); + CPU_FloatU u1, u2; + u1.l = op1; + u2.l = op2; + return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0; } -/* Double precision floating point helpers */ -static always_inline int _do_efdcmplt (uint64_t op1, uint64_t op2) +static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2) { /* XXX: TODO: test special values (NaN, infinites, ...) */ - return _do_efdtstlt(op1, op2); + return efststlt(op1, op2); } -static always_inline int _do_efdcmpgt (uint64_t op1, uint64_t op2) +static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2) { /* XXX: TODO: test special values (NaN, infinites, ...) */ - return _do_efdtstgt(op1, op2); + return efststgt(op1, op2); } -static always_inline int _do_efdcmpeq (uint64_t op1, uint64_t op2) +static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2) { /* XXX: TODO: test special values (NaN, infinites, ...) */ - return _do_efdtsteq(op1, op2); + return efststeq(op1, op2); } -void do_efdcmplt (void) +#define HELPER_SINGLE_SPE_CMP(name) \ +uint32_t helper_e##name (uint32_t op1, uint32_t op2) \ +{ \ + return e##name(op1, op2) << 2; \ +} +/* efststlt */ +HELPER_SINGLE_SPE_CMP(fststlt); +/* efststgt */ +HELPER_SINGLE_SPE_CMP(fststgt); +/* efststeq */ +HELPER_SINGLE_SPE_CMP(fststeq); +/* efscmplt */ +HELPER_SINGLE_SPE_CMP(fscmplt); +/* efscmpgt */ +HELPER_SINGLE_SPE_CMP(fscmpgt); +/* efscmpeq */ +HELPER_SINGLE_SPE_CMP(fscmpeq); + +static always_inline uint32_t evcmp_merge (int t0, int t1) { - T0 = _do_efdcmplt(T0_64, T1_64); + return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1); } -void do_efdcmpgt (void) -{ - T0 = _do_efdcmpgt(T0_64, T1_64); +#define HELPER_VECTOR_SPE_CMP(name) \ +uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \ +{ \ + return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \ } +/* evfststlt */ +HELPER_VECTOR_SPE_CMP(fststlt); +/* evfststgt */ +HELPER_VECTOR_SPE_CMP(fststgt); +/* evfststeq */ +HELPER_VECTOR_SPE_CMP(fststeq); +/* evfscmplt */ +HELPER_VECTOR_SPE_CMP(fscmplt); +/* evfscmpgt */ +HELPER_VECTOR_SPE_CMP(fscmpgt); +/* evfscmpeq */ +HELPER_VECTOR_SPE_CMP(fscmpeq); -void do_efdcmpeq (void) +/* Double-precision floating-point conversion */ +uint64_t helper_efdcfsi (uint32_t val) { - T0 = _do_efdcmpeq(T0_64, T1_64); + CPU_DoubleU u; + + u.d = int32_to_float64(val, &env->spe_status); + + return u.ll; } -/* Double precision floating-point conversion to/from integer */ -static always_inline uint64_t _do_efdcfsi (int64_t val) +uint64_t helper_efdcfsid (uint64_t val) { CPU_DoubleU u; @@ -2084,7 +2144,16 @@ static always_inline uint64_t _do_efdcfsi (int64_t val) return u.ll; } -static always_inline uint64_t _do_efdcfui (uint64_t val) +uint64_t helper_efdcfui (uint32_t val) +{ + CPU_DoubleU u; + + u.d = uint32_to_float64(val, &env->spe_status); + + return u.ll; +} + +uint64_t helper_efdcfuid (uint64_t val) { CPU_DoubleU u; @@ -2093,7 +2162,7 @@ static always_inline uint64_t _do_efdcfui (uint64_t val) return u.ll; } -static always_inline int64_t _do_efdctsi (uint64_t val) +uint32_t helper_efdctsi (uint64_t val) { CPU_DoubleU u; @@ -2102,10 +2171,10 @@ static always_inline int64_t _do_efdctsi (uint64_t val) if (unlikely(isnan(u.d))) return 0; - return float64_to_int64(u.d, &env->spe_status); + return float64_to_int32(u.d, &env->spe_status); } -static always_inline uint64_t _do_efdctui (uint64_t val) +uint32_t helper_efdctui (uint64_t val) { CPU_DoubleU u; @@ -2114,10 +2183,10 @@ static always_inline uint64_t _do_efdctui (uint64_t val) if (unlikely(isnan(u.d))) return 0; - return float64_to_uint64(u.d, &env->spe_status); + return float64_to_uint32(u.d, &env->spe_status); } -static always_inline int64_t _do_efdctsiz (uint64_t val) +uint32_t helper_efdctsiz (uint64_t val) { CPU_DoubleU u; @@ -2126,10 +2195,10 @@ static always_inline int64_t _do_efdctsiz (uint64_t val) if (unlikely(isnan(u.d))) return 0; - return float64_to_int64_round_to_zero(u.d, &env->spe_status); + return float64_to_int32_round_to_zero(u.d, &env->spe_status); } -static always_inline uint64_t _do_efdctuiz (uint64_t val) +uint64_t helper_efdctsidz (uint64_t val) { CPU_DoubleU u; @@ -2138,41 +2207,34 @@ static always_inline uint64_t _do_efdctuiz (uint64_t val) if (unlikely(isnan(u.d))) return 0; - return float64_to_uint64_round_to_zero(u.d, &env->spe_status); + return float64_to_int64_round_to_zero(u.d, &env->spe_status); } -void do_efdcfsi (void) +uint32_t helper_efdctuiz (uint64_t val) { - T0_64 = _do_efdcfsi(T0_64); -} + CPU_DoubleU u; -void do_efdcfui (void) -{ - T0_64 = _do_efdcfui(T0_64); -} + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.d))) + return 0; -void do_efdctsi (void) -{ - T0_64 = _do_efdctsi(T0_64); + return float64_to_uint32_round_to_zero(u.d, &env->spe_status); } -void do_efdctui (void) +uint64_t helper_efdctuidz (uint64_t val) { - T0_64 = _do_efdctui(T0_64); -} + CPU_DoubleU u; -void do_efdctsiz (void) -{ - T0_64 = _do_efdctsiz(T0_64); -} + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.d))) + return 0; -void do_efdctuiz (void) -{ - T0_64 = _do_efdctuiz(T0_64); + return float64_to_uint64_round_to_zero(u.d, &env->spe_status); } -/* Double precision floating-point conversion to/from fractional */ -static always_inline uint64_t _do_efdcfsf (int64_t val) +uint64_t helper_efdcfsf (uint32_t val) { CPU_DoubleU u; float64 tmp; @@ -2184,7 +2246,7 @@ static always_inline uint64_t _do_efdcfsf (int64_t val) return u.ll; } -static always_inline uint64_t _do_efdcfuf (uint64_t val) +uint64_t helper_efdcfuf (uint32_t val) { CPU_DoubleU u; float64 tmp; @@ -2196,7 +2258,7 @@ static always_inline uint64_t _do_efdcfuf (uint64_t val) return u.ll; } -static always_inline int64_t _do_efdctsf (uint64_t val) +uint32_t helper_efdctsf (uint64_t val) { CPU_DoubleU u; float64 tmp; @@ -2211,7 +2273,7 @@ static always_inline int64_t _do_efdctsf (uint64_t val) return float64_to_int32(u.d, &env->spe_status); } -static always_inline uint64_t _do_efdctuf (uint64_t val) +uint32_t helper_efdctuf (uint64_t val) { CPU_DoubleU u; float64 tmp; @@ -2226,68 +2288,7 @@ static always_inline uint64_t _do_efdctuf (uint64_t val) return float64_to_uint32(u.d, &env->spe_status); } -static always_inline int64_t _do_efdctsfz (uint64_t val) -{ - CPU_DoubleU u; - float64 tmp; - - u.ll = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(isnan(u.d))) - return 0; - tmp = uint64_to_float64(1ULL << 32, &env->spe_status); - u.d = float64_mul(u.d, tmp, &env->spe_status); - - return float64_to_int32_round_to_zero(u.d, &env->spe_status); -} - -static always_inline uint64_t _do_efdctufz (uint64_t val) -{ - CPU_DoubleU u; - float64 tmp; - - u.ll = val; - /* NaN are not treated the same way IEEE 754 does */ - if (unlikely(isnan(u.d))) - return 0; - tmp = uint64_to_float64(1ULL << 32, &env->spe_status); - u.d = float64_mul(u.d, tmp, &env->spe_status); - - return float64_to_uint32_round_to_zero(u.d, &env->spe_status); -} - -void do_efdcfsf (void) -{ - T0_64 = _do_efdcfsf(T0_64); -} - -void do_efdcfuf (void) -{ - T0_64 = _do_efdcfuf(T0_64); -} - -void do_efdctsf (void) -{ - T0_64 = _do_efdctsf(T0_64); -} - -void do_efdctuf (void) -{ - T0_64 = _do_efdctuf(T0_64); -} - -void do_efdctsfz (void) -{ - T0_64 = _do_efdctsfz(T0_64); -} - -void do_efdctufz (void) -{ - T0_64 = _do_efdctufz(T0_64); -} - -/* Floating point conversion between single and double precision */ -static always_inline uint32_t _do_efscfd (uint64_t val) +uint32_t helper_efscfd (uint64_t val) { CPU_DoubleU u1; CPU_FloatU u2; @@ -2298,7 +2299,7 @@ static always_inline uint32_t _do_efscfd (uint64_t val) return u2.l; } -static always_inline uint64_t _do_efdcfs (uint32_t val) +uint64_t helper_efdcfs (uint32_t val) { CPU_DoubleU u2; CPU_FloatU u1; @@ -2309,101 +2310,85 @@ static always_inline uint64_t _do_efdcfs (uint32_t val) return u2.ll; } -void do_efscfd (void) +/* Double precision fixed-point arithmetic */ +uint64_t helper_efdadd (uint64_t op1, uint64_t op2) { - T0_64 = _do_efscfd(T0_64); + CPU_DoubleU u1, u2; + u1.ll = op1; + u2.ll = op2; + u1.d = float64_add(u1.d, u2.d, &env->spe_status); + return u1.ll; } -void do_efdcfs (void) +uint64_t helper_efdsub (uint64_t op1, uint64_t op2) { - T0_64 = _do_efdcfs(T0_64); + CPU_DoubleU u1, u2; + u1.ll = op1; + u2.ll = op2; + u1.d = float64_sub(u1.d, u2.d, &env->spe_status); + return u1.ll; } -/* Single precision fixed-point vector arithmetic */ -/* evfsabs */ -DO_SPE_OP1(fsabs); -/* evfsnabs */ -DO_SPE_OP1(fsnabs); -/* evfsneg */ -DO_SPE_OP1(fsneg); -/* evfsadd */ -DO_SPE_OP2(fsadd); -/* evfssub */ -DO_SPE_OP2(fssub); -/* evfsmul */ -DO_SPE_OP2(fsmul); -/* evfsdiv */ -DO_SPE_OP2(fsdiv); - -/* Single-precision floating-point comparisons */ -static always_inline int _do_efscmplt (uint32_t op1, uint32_t op2) +uint64_t helper_efdmul (uint64_t op1, uint64_t op2) { - /* XXX: TODO: test special values (NaN, infinites, ...) */ - return _do_efststlt(op1, op2); + CPU_DoubleU u1, u2; + u1.ll = op1; + u2.ll = op2; + u1.d = float64_mul(u1.d, u2.d, &env->spe_status); + return u1.ll; } -static always_inline int _do_efscmpgt (uint32_t op1, uint32_t op2) +uint64_t helper_efddiv (uint64_t op1, uint64_t op2) { - /* XXX: TODO: test special values (NaN, infinites, ...) */ - return _do_efststgt(op1, op2); + CPU_DoubleU u1, u2; + u1.ll = op1; + u2.ll = op2; + u1.d = float64_div(u1.d, u2.d, &env->spe_status); + return u1.ll; } -static always_inline int _do_efscmpeq (uint32_t op1, uint32_t op2) +/* Double precision floating point helpers */ +uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2) { - /* XXX: TODO: test special values (NaN, infinites, ...) */ - return _do_efststeq(op1, op2); + CPU_DoubleU u1, u2; + u1.ll = op1; + u2.ll = op2; + return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0; } -void do_efscmplt (void) +uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2) { - T0 = _do_efscmplt(T0_64, T1_64); + CPU_DoubleU u1, u2; + u1.ll = op1; + u2.ll = op2; + return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4; } -void do_efscmpgt (void) +uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2) { - T0 = _do_efscmpgt(T0_64, T1_64); + CPU_DoubleU u1, u2; + u1.ll = op1; + u2.ll = op2; + return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0; } -void do_efscmpeq (void) +uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2) { - T0 = _do_efscmpeq(T0_64, T1_64); + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return helper_efdtstlt(op1, op2); } -/* Single-precision floating-point vector comparisons */ -/* evfscmplt */ -DO_SPE_CMP(fscmplt); -/* evfscmpgt */ -DO_SPE_CMP(fscmpgt); -/* evfscmpeq */ -DO_SPE_CMP(fscmpeq); -/* evfststlt */ -DO_SPE_CMP(fststlt); -/* evfststgt */ -DO_SPE_CMP(fststgt); -/* evfststeq */ -DO_SPE_CMP(fststeq); +uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2) +{ + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return helper_efdtstgt(op1, op2); +} -/* Single-precision floating-point vector conversions */ -/* evfscfsi */ -DO_SPE_OP1(fscfsi); -/* evfscfui */ -DO_SPE_OP1(fscfui); -/* evfscfuf */ -DO_SPE_OP1(fscfuf); -/* evfscfsf */ -DO_SPE_OP1(fscfsf); -/* evfsctsi */ -DO_SPE_OP1(fsctsi); -/* evfsctui */ -DO_SPE_OP1(fsctui); -/* evfsctsiz */ -DO_SPE_OP1(fsctsiz); -/* evfsctuiz */ -DO_SPE_OP1(fsctuiz); -/* evfsctsf */ -DO_SPE_OP1(fsctsf); -/* evfsctuf */ -DO_SPE_OP1(fsctuf); +uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2) +{ + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return helper_efdtsteq(op1, op2); +} /*****************************************************************************/ /* Softmmu support */ diff --git a/target-ppc/op_helper.h b/target-ppc/op_helper.h index 39b4cac780..30f9ca1eb6 100644 --- a/target-ppc/op_helper.h +++ b/target-ppc/op_helper.h @@ -125,155 +125,4 @@ void do_load_403_pb (int num); void do_store_403_pb (int num); #endif -/* SPE extension helpers */ -/* Single precision floating-point helpers */ -void do_efscmplt (void); -void do_efscmpgt (void); -void do_efscmpeq (void); -void do_efscfsf (void); -void do_efscfuf (void); -void do_efsctsf (void); -void do_efsctuf (void); - -void do_efscfsi (void); -void do_efscfui (void); -void do_efsctsi (void); -void do_efsctui (void); -void do_efsctsiz (void); -void do_efsctuiz (void); - -/* Double precision floating-point helpers */ -void do_efdcmplt (void); -void do_efdcmpgt (void); -void do_efdcmpeq (void); -void do_efdcfsf (void); -void do_efdcfuf (void); -void do_efdctsf (void); -void do_efdctuf (void); - -void do_efdcfsi (void); -void do_efdcfui (void); -void do_efdctsi (void); -void do_efdctui (void); -void do_efdctsiz (void); -void do_efdctuiz (void); - -void do_efdcfs (void); -void do_efscfd (void); - -/* Floating-point vector helpers */ -void do_evfsabs (void); -void do_evfsnabs (void); -void do_evfsneg (void); -void do_evfsadd (void); -void do_evfssub (void); -void do_evfsmul (void); -void do_evfsdiv (void); -void do_evfscmplt (void); -void do_evfscmpgt (void); -void do_evfscmpeq (void); -void do_evfststlt (void); -void do_evfststgt (void); -void do_evfststeq (void); -void do_evfscfsi (void); -void do_evfscfui (void); -void do_evfscfsf (void); -void do_evfscfuf (void); -void do_evfsctsf (void); -void do_evfsctuf (void); -void do_evfsctsi (void); -void do_evfsctui (void); -void do_evfsctsiz (void); -void do_evfsctuiz (void); - -/* SPE extension */ -/* Single precision floating-point helpers */ -static always_inline uint32_t _do_efsabs (uint32_t val) -{ - return val & ~0x80000000; -} -static always_inline uint32_t _do_efsnabs (uint32_t val) -{ - return val | 0x80000000; -} -static always_inline uint32_t _do_efsneg (uint32_t val) -{ - return val ^ 0x80000000; -} -static always_inline uint32_t _do_efsadd (uint32_t op1, uint32_t op2) -{ - CPU_FloatU u1, u2; - u1.l = op1; - u2.l = op2; - u1.f = float32_add(u1.f, u2.f, &env->spe_status); - return u1.l; -} -static always_inline uint32_t _do_efssub (uint32_t op1, uint32_t op2) -{ - CPU_FloatU u1, u2; - u1.l = op1; - u2.l = op2; - u1.f = float32_sub(u1.f, u2.f, &env->spe_status); - return u1.l; -} -static always_inline uint32_t _do_efsmul (uint32_t op1, uint32_t op2) -{ - CPU_FloatU u1, u2; - u1.l = op1; - u2.l = op2; - u1.f = float32_mul(u1.f, u2.f, &env->spe_status); - return u1.l; -} -static always_inline uint32_t _do_efsdiv (uint32_t op1, uint32_t op2) -{ - CPU_FloatU u1, u2; - u1.l = op1; - u2.l = op2; - u1.f = float32_div(u1.f, u2.f, &env->spe_status); - return u1.l; -} - -static always_inline int _do_efststlt (uint32_t op1, uint32_t op2) -{ - CPU_FloatU u1, u2; - u1.l = op1; - u2.l = op2; - return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0; -} -static always_inline int _do_efststgt (uint32_t op1, uint32_t op2) -{ - CPU_FloatU u1, u2; - u1.l = op1; - u2.l = op2; - return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4; -} -static always_inline int _do_efststeq (uint32_t op1, uint32_t op2) -{ - CPU_FloatU u1, u2; - u1.l = op1; - u2.l = op2; - return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0; -} -/* Double precision floating-point helpers */ -static always_inline int _do_efdtstlt (uint64_t op1, uint64_t op2) -{ - CPU_DoubleU u1, u2; - u1.ll = op1; - u2.ll = op2; - return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0; -} -static always_inline int _do_efdtstgt (uint64_t op1, uint64_t op2) -{ - CPU_DoubleU u1, u2; - u1.ll = op1; - u2.ll = op2; - return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4; -} -static always_inline int _do_efdtsteq (uint64_t op1, uint64_t op2) -{ - CPU_DoubleU u1, u2; - u1.ll = op1; - u2.ll = op2; - return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0; -} #endif diff --git a/target-ppc/translate.c b/target-ppc/translate.c index e9ed627d78..aaec6d8e16 100644 --- a/target-ppc/translate.c +++ b/target-ppc/translate.c @@ -6861,81 +6861,261 @@ GEN_SPE(speundef, evmwsmfan, 0x0D, 0x17, 0x00000000, PPC_SPE); #endif /*** SPE floating-point extension ***/ -#define GEN_SPEFPUOP_CONV(name) \ +#if defined(TARGET_PPC64) +#define GEN_SPEFPUOP_CONV_32_32(name) \ static always_inline void gen_##name (DisasContext *ctx) \ { \ - gen_load_gpr64(cpu_T64[0], rB(ctx->opcode)); \ - gen_op_##name(); \ - gen_store_gpr64(rD(ctx->opcode), cpu_T64[0]); \ + TCGv_i32 t0; \ + TCGv t1; \ + t0 = tcg_temp_new_i32(); \ + tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \ + gen_helper_##name(t0, t0); \ + t1 = tcg_temp_new(); \ + tcg_gen_extu_i32_tl(t1, t0); \ + tcg_temp_free_i32(t0); \ + tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], \ + 0xFFFFFFFF00000000ULL); \ + tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t1); \ + tcg_temp_free(t1); \ } - -#define GEN_SPEFPUOP_ARITH1(name) \ +#define GEN_SPEFPUOP_CONV_32_64(name) \ +static always_inline void gen_##name (DisasContext *ctx) \ +{ \ + TCGv_i32 t0; \ + TCGv t1; \ + t0 = tcg_temp_new_i32(); \ + gen_helper_##name(t0, cpu_gpr[rB(ctx->opcode)]); \ + t1 = tcg_temp_new(); \ + tcg_gen_extu_i32_tl(t1, t0); \ + tcg_temp_free_i32(t0); \ + tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], \ + 0xFFFFFFFF00000000ULL); \ + tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t1); \ + tcg_temp_free(t1); \ +} +#define GEN_SPEFPUOP_CONV_64_32(name) \ +static always_inline void gen_##name (DisasContext *ctx) \ +{ \ + TCGv_i32 t0 = tcg_temp_new_i32(); \ + tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \ + gen_helper_##name(cpu_gpr[rD(ctx->opcode)], t0); \ + tcg_temp_free_i32(t0); \ +} +#define GEN_SPEFPUOP_CONV_64_64(name) \ +static always_inline void gen_##name (DisasContext *ctx) \ +{ \ + gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \ +} +#define GEN_SPEFPUOP_ARITH2_32_32(name) \ static always_inline void gen_##name (DisasContext *ctx) \ { \ + TCGv_i32 t0, t1; \ + TCGv_i64 t2; \ if (unlikely(!ctx->spe_enabled)) { \ GEN_EXCP_NO_AP(ctx); \ return; \ } \ - gen_load_gpr64(cpu_T64[0], rA(ctx->opcode)); \ - gen_op_##name(); \ - gen_store_gpr64(rD(ctx->opcode), cpu_T64[0]); \ + t0 = tcg_temp_new_i32(); \ + t1 = tcg_temp_new_i32(); \ + tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \ + tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \ + gen_helper_##name(t0, t0, t1); \ + tcg_temp_free_i32(t1); \ + t2 = tcg_temp_new(); \ + tcg_gen_extu_i32_tl(t2, t0); \ + tcg_temp_free_i32(t0); \ + tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], \ + 0xFFFFFFFF00000000ULL); \ + tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t2); \ + tcg_temp_free(t2); \ } - -#define GEN_SPEFPUOP_ARITH2(name) \ +#define GEN_SPEFPUOP_ARITH2_64_64(name) \ static always_inline void gen_##name (DisasContext *ctx) \ { \ if (unlikely(!ctx->spe_enabled)) { \ GEN_EXCP_NO_AP(ctx); \ return; \ } \ - gen_load_gpr64(cpu_T64[0], rA(ctx->opcode)); \ - gen_load_gpr64(cpu_T64[1], rB(ctx->opcode)); \ - gen_op_##name(); \ - gen_store_gpr64(rD(ctx->opcode), cpu_T64[0]); \ + gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \ + cpu_gpr[rB(ctx->opcode)]); \ } - -#define GEN_SPEFPUOP_COMP(name) \ +#define GEN_SPEFPUOP_COMP_32(name) \ static always_inline void gen_##name (DisasContext *ctx) \ { \ - TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)]; \ + TCGv_i32 t0, t1; \ if (unlikely(!ctx->spe_enabled)) { \ GEN_EXCP_NO_AP(ctx); \ return; \ } \ - gen_load_gpr64(cpu_T64[0], rA(ctx->opcode)); \ - gen_load_gpr64(cpu_T64[1], rB(ctx->opcode)); \ - gen_op_##name(); \ - tcg_gen_trunc_tl_i32(crf, cpu_T[0]); \ - tcg_gen_andi_i32(crf, crf, 0xf); \ + t0 = tcg_temp_new_i32(); \ + t1 = tcg_temp_new_i32(); \ + tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \ + tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \ + gen_helper_##name(cpu_crf[crfD(ctx->opcode)], t0, t1); \ + tcg_temp_free_i32(t0); \ + tcg_temp_free_i32(t1); \ +} +#define GEN_SPEFPUOP_COMP_64(name) \ +static always_inline void gen_##name (DisasContext *ctx) \ +{ \ + if (unlikely(!ctx->spe_enabled)) { \ + GEN_EXCP_NO_AP(ctx); \ + return; \ + } \ + gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \ +} +#else +#define GEN_SPEFPUOP_CONV_32_32(name) \ +static always_inline void gen_##name (DisasContext *ctx) \ +{ \ + gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \ } +#define GEN_SPEFPUOP_CONV_32_64(name) \ +static always_inline void gen_##name (DisasContext *ctx) \ +{ \ + TCGv_i64 t0 = tcg_temp_new_i64(); \ + gen_load_gpr64(t0, rB(ctx->opcode)); \ + gen_helper_##name(cpu_gpr[rD(ctx->opcode)], t0); \ + tcg_temp_free_i64(t0); \ +} +#define GEN_SPEFPUOP_CONV_64_32(name) \ +static always_inline void gen_##name (DisasContext *ctx) \ +{ \ + TCGv_i64 t0 = tcg_temp_new_i64(); \ + gen_helper_##name(t0, cpu_gpr[rB(ctx->opcode)]); \ + gen_store_gpr64(rD(ctx->opcode), t0); \ + tcg_temp_free_i64(t0); \ +} +#define GEN_SPEFPUOP_CONV_64_64(name) \ +static always_inline void gen_##name (DisasContext *ctx) \ +{ \ + TCGv_i64 t0 = tcg_temp_new_i64(); \ + gen_load_gpr64(t0, rB(ctx->opcode)); \ + gen_helper_##name(t0, t0); \ + gen_store_gpr64(rD(ctx->opcode), t0); \ + tcg_temp_free_i64(t0); \ +} +#define GEN_SPEFPUOP_ARITH2_32_32(name) \ +static always_inline void gen_##name (DisasContext *ctx) \ +{ \ + if (unlikely(!ctx->spe_enabled)) { \ + GEN_EXCP_NO_AP(ctx); \ + return; \ + } \ + gen_helper_##name(cpu_gpr[rD(ctx->opcode)], \ + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \ +} +#define GEN_SPEFPUOP_ARITH2_64_64(name) \ +static always_inline void gen_##name (DisasContext *ctx) \ +{ \ + TCGv_i64 t0, t1; \ + if (unlikely(!ctx->spe_enabled)) { \ + GEN_EXCP_NO_AP(ctx); \ + return; \ + } \ + t0 = tcg_temp_new_i64(); \ + t1 = tcg_temp_new_i64(); \ + gen_load_gpr64(t0, rA(ctx->opcode)); \ + gen_load_gpr64(t1, rB(ctx->opcode)); \ + gen_helper_##name(t0, t0, t1); \ + gen_store_gpr64(rD(ctx->opcode), t0); \ + tcg_temp_free_i64(t0); \ + tcg_temp_free_i64(t1); \ +} +#define GEN_SPEFPUOP_COMP_32(name) \ +static always_inline void gen_##name (DisasContext *ctx) \ +{ \ + if (unlikely(!ctx->spe_enabled)) { \ + GEN_EXCP_NO_AP(ctx); \ + return; \ + } \ + gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \ +} +#define GEN_SPEFPUOP_COMP_64(name) \ +static always_inline void gen_##name (DisasContext *ctx) \ +{ \ + TCGv_i64 t0, t1; \ + if (unlikely(!ctx->spe_enabled)) { \ + GEN_EXCP_NO_AP(ctx); \ + return; \ + } \ + t0 = tcg_temp_new_i64(); \ + t1 = tcg_temp_new_i64(); \ + gen_load_gpr64(t0, rA(ctx->opcode)); \ + gen_load_gpr64(t1, rB(ctx->opcode)); \ + gen_helper_##name(cpu_crf[crfD(ctx->opcode)], t0, t1); \ + tcg_temp_free_i64(t0); \ + tcg_temp_free_i64(t1); \ +} +#endif /* Single precision floating-point vectors operations */ /* Arithmetic */ -GEN_SPEFPUOP_ARITH2(evfsadd); -GEN_SPEFPUOP_ARITH2(evfssub); -GEN_SPEFPUOP_ARITH2(evfsmul); -GEN_SPEFPUOP_ARITH2(evfsdiv); -GEN_SPEFPUOP_ARITH1(evfsabs); -GEN_SPEFPUOP_ARITH1(evfsnabs); -GEN_SPEFPUOP_ARITH1(evfsneg); +GEN_SPEFPUOP_ARITH2_64_64(evfsadd); +GEN_SPEFPUOP_ARITH2_64_64(evfssub); +GEN_SPEFPUOP_ARITH2_64_64(evfsmul); +GEN_SPEFPUOP_ARITH2_64_64(evfsdiv); +static always_inline void gen_evfsabs (DisasContext *ctx) +{ + if (unlikely(!ctx->spe_enabled)) { + GEN_EXCP_NO_AP(ctx); + return; + } +#if defined(TARGET_PPC64) + tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~0x8000000080000000LL); +#else + tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~0x80000000); + tcg_gen_andi_tl(cpu_gprh[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], ~0x80000000); +#endif +} +static always_inline void gen_evfsnabs (DisasContext *ctx) +{ + if (unlikely(!ctx->spe_enabled)) { + GEN_EXCP_NO_AP(ctx); + return; + } +#if defined(TARGET_PPC64) + tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x8000000080000000LL); +#else + tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000); + tcg_gen_ori_tl(cpu_gprh[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000); +#endif +} +static always_inline void gen_evfsneg (DisasContext *ctx) +{ + if (unlikely(!ctx->spe_enabled)) { + GEN_EXCP_NO_AP(ctx); + return; + } +#if defined(TARGET_PPC64) + tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x8000000080000000LL); +#else + tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000); + tcg_gen_xori_tl(cpu_gprh[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000); +#endif +} + /* Conversion */ -GEN_SPEFPUOP_CONV(evfscfui); -GEN_SPEFPUOP_CONV(evfscfsi); -GEN_SPEFPUOP_CONV(evfscfuf); -GEN_SPEFPUOP_CONV(evfscfsf); -GEN_SPEFPUOP_CONV(evfsctui); -GEN_SPEFPUOP_CONV(evfsctsi); -GEN_SPEFPUOP_CONV(evfsctuf); -GEN_SPEFPUOP_CONV(evfsctsf); -GEN_SPEFPUOP_CONV(evfsctuiz); -GEN_SPEFPUOP_CONV(evfsctsiz); +GEN_SPEFPUOP_CONV_64_64(evfscfui); +GEN_SPEFPUOP_CONV_64_64(evfscfsi); +GEN_SPEFPUOP_CONV_64_64(evfscfuf); +GEN_SPEFPUOP_CONV_64_64(evfscfsf); +GEN_SPEFPUOP_CONV_64_64(evfsctui); +GEN_SPEFPUOP_CONV_64_64(evfsctsi); +GEN_SPEFPUOP_CONV_64_64(evfsctuf); +GEN_SPEFPUOP_CONV_64_64(evfsctsf); +GEN_SPEFPUOP_CONV_64_64(evfsctuiz); +GEN_SPEFPUOP_CONV_64_64(evfsctsiz); + /* Comparison */ -GEN_SPEFPUOP_COMP(evfscmpgt); -GEN_SPEFPUOP_COMP(evfscmplt); -GEN_SPEFPUOP_COMP(evfscmpeq); -GEN_SPEFPUOP_COMP(evfststgt); -GEN_SPEFPUOP_COMP(evfststlt); -GEN_SPEFPUOP_COMP(evfststeq); +GEN_SPEFPUOP_COMP_64(evfscmpgt); +GEN_SPEFPUOP_COMP_64(evfscmplt); +GEN_SPEFPUOP_COMP_64(evfscmpeq); +GEN_SPEFPUOP_COMP_64(evfststgt); +GEN_SPEFPUOP_COMP_64(evfststlt); +GEN_SPEFPUOP_COMP_64(evfststeq); /* Opcodes definitions */ GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, PPC_SPEFPU); // @@ -6955,32 +7135,55 @@ GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, PPC_SPEFPU); // /* Single precision floating-point operations */ /* Arithmetic */ -GEN_SPEFPUOP_ARITH2(efsadd); -GEN_SPEFPUOP_ARITH2(efssub); -GEN_SPEFPUOP_ARITH2(efsmul); -GEN_SPEFPUOP_ARITH2(efsdiv); -GEN_SPEFPUOP_ARITH1(efsabs); -GEN_SPEFPUOP_ARITH1(efsnabs); -GEN_SPEFPUOP_ARITH1(efsneg); +GEN_SPEFPUOP_ARITH2_32_32(efsadd); +GEN_SPEFPUOP_ARITH2_32_32(efssub); +GEN_SPEFPUOP_ARITH2_32_32(efsmul); +GEN_SPEFPUOP_ARITH2_32_32(efsdiv); +static always_inline void gen_efsabs (DisasContext *ctx) +{ + if (unlikely(!ctx->spe_enabled)) { + GEN_EXCP_NO_AP(ctx); + return; + } + tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], (target_long)~0x80000000LL); +} +static always_inline void gen_efsnabs (DisasContext *ctx) +{ + if (unlikely(!ctx->spe_enabled)) { + GEN_EXCP_NO_AP(ctx); + return; + } + tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000); +} +static always_inline void gen_efsneg (DisasContext *ctx) +{ + if (unlikely(!ctx->spe_enabled)) { + GEN_EXCP_NO_AP(ctx); + return; + } + tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000); +} + /* Conversion */ -GEN_SPEFPUOP_CONV(efscfui); -GEN_SPEFPUOP_CONV(efscfsi); -GEN_SPEFPUOP_CONV(efscfuf); -GEN_SPEFPUOP_CONV(efscfsf); -GEN_SPEFPUOP_CONV(efsctui); -GEN_SPEFPUOP_CONV(efsctsi); -GEN_SPEFPUOP_CONV(efsctuf); -GEN_SPEFPUOP_CONV(efsctsf); -GEN_SPEFPUOP_CONV(efsctuiz); -GEN_SPEFPUOP_CONV(efsctsiz); -GEN_SPEFPUOP_CONV(efscfd); +GEN_SPEFPUOP_CONV_32_32(efscfui); +GEN_SPEFPUOP_CONV_32_32(efscfsi); +GEN_SPEFPUOP_CONV_32_32(efscfuf); +GEN_SPEFPUOP_CONV_32_32(efscfsf); +GEN_SPEFPUOP_CONV_32_32(efsctui); +GEN_SPEFPUOP_CONV_32_32(efsctsi); +GEN_SPEFPUOP_CONV_32_32(efsctuf); +GEN_SPEFPUOP_CONV_32_32(efsctsf); +GEN_SPEFPUOP_CONV_32_32(efsctuiz); +GEN_SPEFPUOP_CONV_32_32(efsctsiz); +GEN_SPEFPUOP_CONV_32_64(efscfd); + /* Comparison */ -GEN_SPEFPUOP_COMP(efscmpgt); -GEN_SPEFPUOP_COMP(efscmplt); -GEN_SPEFPUOP_COMP(efscmpeq); -GEN_SPEFPUOP_COMP(efststgt); -GEN_SPEFPUOP_COMP(efststlt); -GEN_SPEFPUOP_COMP(efststeq); +GEN_SPEFPUOP_COMP_32(efscmpgt); +GEN_SPEFPUOP_COMP_32(efscmplt); +GEN_SPEFPUOP_COMP_32(efscmpeq); +GEN_SPEFPUOP_COMP_32(efststgt); +GEN_SPEFPUOP_COMP_32(efststlt); +GEN_SPEFPUOP_COMP_32(efststeq); /* Opcodes definitions */ GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, PPC_SPEFPU); // @@ -7000,37 +7203,71 @@ GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, PPC_SPEFPU); // /* Double precision floating-point operations */ /* Arithmetic */ -GEN_SPEFPUOP_ARITH2(efdadd); -GEN_SPEFPUOP_ARITH2(efdsub); -GEN_SPEFPUOP_ARITH2(efdmul); -GEN_SPEFPUOP_ARITH2(efddiv); -GEN_SPEFPUOP_ARITH1(efdabs); -GEN_SPEFPUOP_ARITH1(efdnabs); -GEN_SPEFPUOP_ARITH1(efdneg); +GEN_SPEFPUOP_ARITH2_64_64(efdadd); +GEN_SPEFPUOP_ARITH2_64_64(efdsub); +GEN_SPEFPUOP_ARITH2_64_64(efdmul); +GEN_SPEFPUOP_ARITH2_64_64(efddiv); +static always_inline void gen_efdabs (DisasContext *ctx) +{ + if (unlikely(!ctx->spe_enabled)) { + GEN_EXCP_NO_AP(ctx); + return; + } +#if defined(TARGET_PPC64) + tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~0x8000000000000000LL); +#else + tcg_gen_andi_tl(cpu_gprh[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], ~0x80000000); +#endif +} +static always_inline void gen_efdnabs (DisasContext *ctx) +{ + if (unlikely(!ctx->spe_enabled)) { + GEN_EXCP_NO_AP(ctx); + return; + } +#if defined(TARGET_PPC64) + tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x8000000000000000LL); +#else + tcg_gen_ori_tl(cpu_gprh[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000); +#endif +} +static always_inline void gen_efdneg (DisasContext *ctx) +{ + if (unlikely(!ctx->spe_enabled)) { + GEN_EXCP_NO_AP(ctx); + return; + } +#if defined(TARGET_PPC64) + tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x8000000000000000LL); +#else + tcg_gen_xori_tl(cpu_gprh[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000); +#endif +} + /* Conversion */ +GEN_SPEFPUOP_CONV_64_32(efdcfui); +GEN_SPEFPUOP_CONV_64_32(efdcfsi); +GEN_SPEFPUOP_CONV_64_32(efdcfuf); +GEN_SPEFPUOP_CONV_64_32(efdcfsf); +GEN_SPEFPUOP_CONV_32_64(efdctui); +GEN_SPEFPUOP_CONV_32_64(efdctsi); +GEN_SPEFPUOP_CONV_32_64(efdctuf); +GEN_SPEFPUOP_CONV_32_64(efdctsf); +GEN_SPEFPUOP_CONV_32_64(efdctuiz); +GEN_SPEFPUOP_CONV_32_64(efdctsiz); +GEN_SPEFPUOP_CONV_64_32(efdcfs); +GEN_SPEFPUOP_CONV_64_64(efdcfuid); +GEN_SPEFPUOP_CONV_64_64(efdcfsid); +GEN_SPEFPUOP_CONV_64_64(efdctuidz); +GEN_SPEFPUOP_CONV_64_64(efdctsidz); -GEN_SPEFPUOP_CONV(efdcfui); -GEN_SPEFPUOP_CONV(efdcfsi); -GEN_SPEFPUOP_CONV(efdcfuf); -GEN_SPEFPUOP_CONV(efdcfsf); -GEN_SPEFPUOP_CONV(efdctui); -GEN_SPEFPUOP_CONV(efdctsi); -GEN_SPEFPUOP_CONV(efdctuf); -GEN_SPEFPUOP_CONV(efdctsf); -GEN_SPEFPUOP_CONV(efdctuiz); -GEN_SPEFPUOP_CONV(efdctsiz); -GEN_SPEFPUOP_CONV(efdcfs); -GEN_SPEFPUOP_CONV(efdcfuid); -GEN_SPEFPUOP_CONV(efdcfsid); -GEN_SPEFPUOP_CONV(efdctuidz); -GEN_SPEFPUOP_CONV(efdctsidz); /* Comparison */ -GEN_SPEFPUOP_COMP(efdcmpgt); -GEN_SPEFPUOP_COMP(efdcmplt); -GEN_SPEFPUOP_COMP(efdcmpeq); -GEN_SPEFPUOP_COMP(efdtstgt); -GEN_SPEFPUOP_COMP(efdtstlt); -GEN_SPEFPUOP_COMP(efdtsteq); +GEN_SPEFPUOP_COMP_64(efdcmpgt); +GEN_SPEFPUOP_COMP_64(efdcmplt); +GEN_SPEFPUOP_COMP_64(efdcmpeq); +GEN_SPEFPUOP_COMP_64(efdtstgt); +GEN_SPEFPUOP_COMP_64(efdtstlt); +GEN_SPEFPUOP_COMP_64(efdtsteq); /* Opcodes definitions */ GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, PPC_SPEFPU); // |