aboutsummaryrefslogtreecommitdiff
path: root/target-ppc/op_helper.c
diff options
context:
space:
mode:
authoraurel32 <aurel32@c046a42c-6fe2-441c-8c8c-71466251a162>2008-11-10 11:10:23 +0000
committeraurel32 <aurel32@c046a42c-6fe2-441c-8c8c-71466251a162>2008-11-10 11:10:23 +0000
commit57951c27422fc41586a5d4032e05bb300cced78d (patch)
tree7f5e53762c9cfb56438bcc471e95b7c5a8f00bcd /target-ppc/op_helper.c
parent970d622e8ab1de8fdf5762e23e92a2dea9d7d36c (diff)
target-ppc: convert most SPE integer instructions to TCG
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5668 c046a42c-6fe2-441c-8c8c-71466251a162
Diffstat (limited to 'target-ppc/op_helper.c')
-rw-r--r--target-ppc/op_helper.c160
1 files changed, 16 insertions, 144 deletions
diff --git a/target-ppc/op_helper.c b/target-ppc/op_helper.c
index b1a88b5271..9582f4f835 100644
--- a/target-ppc/op_helper.c
+++ b/target-ppc/op_helper.c
@@ -1624,46 +1624,18 @@ static always_inline uint32_t word_reverse (uint32_t val)
}
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
-void do_brinc (void)
+target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
{
uint32_t a, b, d, mask;
mask = UINT32_MAX >> (32 - MASKBITS);
- a = T0 & mask;
- b = T1 & mask;
+ a = arg1 & mask;
+ b = arg2 & mask;
d = word_reverse(1 + word_reverse(a | ~b));
- T0 = (T0 & ~mask) | (d & b);
+ return (arg1 & ~mask) | (d & b);
}
-#define DO_SPE_OP2(name) \
-void do_ev##name (void) \
-{ \
- T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32, T1_64 >> 32) << 32) | \
- (uint64_t)_do_e##name(T0_64, T1_64); \
-}
-
-#define DO_SPE_OP1(name) \
-void do_ev##name (void) \
-{ \
- T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32) << 32) | \
- (uint64_t)_do_e##name(T0_64); \
-}
-
-/* Fixed-point vector arithmetic */
-static always_inline uint32_t _do_eabs (uint32_t val)
-{
- if ((val & 0x80000000) && val != 0x80000000)
- val -= val;
-
- return val;
-}
-
-static always_inline uint32_t _do_eaddw (uint32_t op1, uint32_t op2)
-{
- return op1 + op2;
-}
-
-static always_inline int _do_ecntlsw (uint32_t val)
+uint32_t helper_cntlsw32 (uint32_t val)
{
if (val & 0x80000000)
return clz32(~val);
@@ -1671,88 +1643,23 @@ static always_inline int _do_ecntlsw (uint32_t val)
return clz32(val);
}
-static always_inline int _do_ecntlzw (uint32_t val)
+uint32_t helper_cntlzw32 (uint32_t val)
{
return clz32(val);
}
-static always_inline uint32_t _do_eneg (uint32_t val)
-{
- if (val != 0x80000000)
- val -= val;
-
- return val;
-}
-
-static always_inline uint32_t _do_erlw (uint32_t op1, uint32_t op2)
-{
- return rotl32(op1, op2);
-}
-
-static always_inline uint32_t _do_erndw (uint32_t val)
-{
- return (val + 0x000080000000) & 0xFFFF0000;
-}
-
-static always_inline uint32_t _do_eslw (uint32_t op1, uint32_t op2)
-{
- /* No error here: 6 bits are used */
- return op1 << (op2 & 0x3F);
-}
-
-static always_inline int32_t _do_esrws (int32_t op1, uint32_t op2)
-{
- /* No error here: 6 bits are used */
- return op1 >> (op2 & 0x3F);
-}
-
-static always_inline uint32_t _do_esrwu (uint32_t op1, uint32_t op2)
-{
- /* No error here: 6 bits are used */
- return op1 >> (op2 & 0x3F);
-}
-
-static always_inline uint32_t _do_esubfw (uint32_t op1, uint32_t op2)
-{
- return op2 - op1;
-}
-
-/* evabs */
-DO_SPE_OP1(abs);
-/* evaddw */
-DO_SPE_OP2(addw);
-/* evcntlsw */
-DO_SPE_OP1(cntlsw);
-/* evcntlzw */
-DO_SPE_OP1(cntlzw);
-/* evneg */
-DO_SPE_OP1(neg);
-/* evrlw */
-DO_SPE_OP2(rlw);
-/* evrnd */
-DO_SPE_OP1(rndw);
-/* evslw */
-DO_SPE_OP2(slw);
-/* evsrws */
-DO_SPE_OP2(srws);
-/* evsrwu */
-DO_SPE_OP2(srwu);
-/* evsubfw */
-DO_SPE_OP2(subfw);
-
-/* evsel is a little bit more complicated... */
-static always_inline uint32_t _do_esel (uint32_t op1, uint32_t op2, int n)
-{
- if (n)
- return op1;
- else
- return op2;
+#define DO_SPE_OP1(name) \
+void do_ev##name (void) \
+{ \
+ T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32) << 32) | \
+ (uint64_t)_do_e##name(T0_64); \
}
-void do_evsel (void)
-{
- T0_64 = ((uint64_t)_do_esel(T0_64 >> 32, T1_64 >> 32, T0 >> 3) << 32) |
- (uint64_t)_do_esel(T0_64, T1_64, (T0 >> 2) & 1);
+#define DO_SPE_OP2(name) \
+void do_ev##name (void) \
+{ \
+ T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32, T1_64 >> 32) << 32) | \
+ (uint64_t)_do_e##name(T0_64, T1_64); \
}
/* Fixed-point vector comparisons */
@@ -1768,41 +1675,6 @@ static always_inline uint32_t _do_evcmp_merge (int t0, int t1)
{
return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
}
-static always_inline int _do_ecmpeq (uint32_t op1, uint32_t op2)
-{
- return op1 == op2 ? 1 : 0;
-}
-
-static always_inline int _do_ecmpgts (int32_t op1, int32_t op2)
-{
- return op1 > op2 ? 1 : 0;
-}
-
-static always_inline int _do_ecmpgtu (uint32_t op1, uint32_t op2)
-{
- return op1 > op2 ? 1 : 0;
-}
-
-static always_inline int _do_ecmplts (int32_t op1, int32_t op2)
-{
- return op1 < op2 ? 1 : 0;
-}
-
-static always_inline int _do_ecmpltu (uint32_t op1, uint32_t op2)
-{
- return op1 < op2 ? 1 : 0;
-}
-
-/* evcmpeq */
-DO_SPE_CMP(cmpeq);
-/* evcmpgts */
-DO_SPE_CMP(cmpgts);
-/* evcmpgtu */
-DO_SPE_CMP(cmpgtu);
-/* evcmplts */
-DO_SPE_CMP(cmplts);
-/* evcmpltu */
-DO_SPE_CMP(cmpltu);
/* Single precision floating-point conversions from/to integer */
static always_inline uint32_t _do_efscfsi (int32_t val)