aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--target/arm/iwmmxt_helper.c234
1 files changed, 117 insertions, 117 deletions
diff --git a/target/arm/iwmmxt_helper.c b/target/arm/iwmmxt_helper.c
index f6a4fc5b7f..24244d012c 100644
--- a/target/arm/iwmmxt_helper.c
+++ b/target/arm/iwmmxt_helper.c
@@ -27,30 +27,30 @@
/* iwMMXt macros extracted from GNU gdb. */
/* Set the SIMD wCASF flags for 8, 16, 32 or 64-bit operations. */
-#define SIMD8_SET( v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n)))
-#define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n)))
-#define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n)))
-#define SIMD64_SET(v, n) ((v != 0) << (32 + (n)))
+#define SIMD8_SET(v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n)))
+#define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n)))
+#define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n)))
+#define SIMD64_SET(v, n) ((v != 0) << (32 + (n)))
/* Flags to pass as "n" above. */
-#define SIMD_NBIT -1
-#define SIMD_ZBIT -2
-#define SIMD_CBIT -3
-#define SIMD_VBIT -4
+#define SIMD_NBIT -1
+#define SIMD_ZBIT -2
+#define SIMD_CBIT -3
+#define SIMD_VBIT -4
/* Various status bit macros. */
-#define NBIT8(x) ((x) & 0x80)
-#define NBIT16(x) ((x) & 0x8000)
-#define NBIT32(x) ((x) & 0x80000000)
-#define NBIT64(x) ((x) & 0x8000000000000000ULL)
-#define ZBIT8(x) (((x) & 0xff) == 0)
-#define ZBIT16(x) (((x) & 0xffff) == 0)
-#define ZBIT32(x) (((x) & 0xffffffff) == 0)
-#define ZBIT64(x) (x == 0)
+#define NBIT8(x) ((x) & 0x80)
+#define NBIT16(x) ((x) & 0x8000)
+#define NBIT32(x) ((x) & 0x80000000)
+#define NBIT64(x) ((x) & 0x8000000000000000ULL)
+#define ZBIT8(x) (((x) & 0xff) == 0)
+#define ZBIT16(x) (((x) & 0xffff) == 0)
+#define ZBIT32(x) (((x) & 0xffffffff) == 0)
+#define ZBIT64(x) (x == 0)
/* Sign extension macros. */
-#define EXTEND8H(a) ((uint16_t) (int8_t) (a))
-#define EXTEND8(a) ((uint32_t) (int8_t) (a))
-#define EXTEND16(a) ((uint32_t) (int16_t) (a))
-#define EXTEND16S(a) ((int32_t) (int16_t) (a))
-#define EXTEND32(a) ((uint64_t) (int32_t) (a))
+#define EXTEND8H(a) ((uint16_t) (int8_t) (a))
+#define EXTEND8(a) ((uint32_t) (int8_t) (a))
+#define EXTEND16(a) ((uint32_t) (int16_t) (a))
+#define EXTEND16S(a) ((int32_t) (int16_t) (a))
+#define EXTEND32(a) ((uint64_t) (int32_t) (a))
uint64_t HELPER(iwmmxt_maddsq)(uint64_t a, uint64_t b)
{
@@ -159,141 +159,141 @@ uint64_t HELPER(iwmmxt_macuw)(uint64_t a, uint64_t b)
#define NZBIT64(x) \
SIMD64_SET(NBIT64(x), SIMD_NBIT) | \
SIMD64_SET(ZBIT64(x), SIMD_ZBIT)
-#define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \
+#define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(CPUARMState *env, \
uint64_t a, uint64_t b) \
-{ \
- a = \
- (((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \
- (((a >> SH1) & 0xff) << 16) | (((b >> SH1) & 0xff) << 24) | \
- (((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \
- (((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \
- env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
- NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \
- NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \
- NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \
- NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
+{ \
+ a = \
+ (((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \
+ (((a >> SH1) & 0xff) << 16) | (((b >> SH1) & 0xff) << 24) | \
+ (((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \
+ (((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \
+ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \
+ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \
+ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
return a; \
-} \
+} \
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(CPUARMState *env, \
uint64_t a, uint64_t b) \
-{ \
- a = \
- (((a >> SH0) & 0xffff) << 0) | \
- (((b >> SH0) & 0xffff) << 16) | \
- (((a >> SH2) & 0xffff) << 32) | \
- (((b >> SH2) & 0xffff) << 48); \
- env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
- NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | \
- NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \
+{ \
+ a = \
+ (((a >> SH0) & 0xffff) << 0) | \
+ (((b >> SH0) & 0xffff) << 16) | \
+ (((a >> SH2) & 0xffff) << 32) | \
+ (((b >> SH2) & 0xffff) << 48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | \
+ NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \
return a; \
-} \
+} \
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(CPUARMState *env, \
uint64_t a, uint64_t b) \
-{ \
- a = \
- (((a >> SH0) & 0xffffffff) << 0) | \
- (((b >> SH0) & 0xffffffff) << 32); \
- env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
- NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
+{ \
+ a = \
+ (((a >> SH0) & 0xffffffff) << 0) | \
+ (((b >> SH0) & 0xffffffff) << 32); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
return a; \
-} \
+} \
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(CPUARMState *env, \
uint64_t x) \
-{ \
- x = \
- (((x >> SH0) & 0xff) << 0) | \
- (((x >> SH1) & 0xff) << 16) | \
- (((x >> SH2) & 0xff) << 32) | \
- (((x >> SH3) & 0xff) << 48); \
- env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
- NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \
- NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
+{ \
+ x = \
+ (((x >> SH0) & 0xff) << 0) | \
+ (((x >> SH1) & 0xff) << 16) | \
+ (((x >> SH2) & 0xff) << 32) | \
+ (((x >> SH3) & 0xff) << 48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
return x; \
-} \
+} \
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(CPUARMState *env, \
uint64_t x) \
-{ \
- x = \
- (((x >> SH0) & 0xffff) << 0) | \
- (((x >> SH2) & 0xffff) << 32); \
- env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
- NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
+{ \
+ x = \
+ (((x >> SH0) & 0xffff) << 0) | \
+ (((x >> SH2) & 0xffff) << 32); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
return x; \
-} \
+} \
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(CPUARMState *env, \
uint64_t x) \
-{ \
- x = (((x >> SH0) & 0xffffffff) << 0); \
- env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
+{ \
+ x = (((x >> SH0) & 0xffffffff) << 0); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
return x; \
-} \
+} \
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(CPUARMState *env, \
uint64_t x) \
-{ \
- x = \
- ((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \
- ((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \
- ((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | \
- ((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \
- env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
- NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \
- NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
+{ \
+ x = \
+ ((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \
+ ((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \
+ ((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | \
+ ((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
return x; \
-} \
+} \
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(CPUARMState *env, \
uint64_t x) \
-{ \
- x = \
- ((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \
- ((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \
- env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
- NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
+{ \
+ x = \
+ ((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \
+ ((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
return x; \
-} \
+} \
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(CPUARMState *env, \
uint64_t x) \
-{ \
- x = EXTEND32((x >> SH0) & 0xffffffff); \
- env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
+{ \
+ x = EXTEND32((x >> SH0) & 0xffffffff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
return x; \
}
IWMMXT_OP_UNPACK(l, 0, 8, 16, 24)
IWMMXT_OP_UNPACK(h, 32, 40, 48, 56)
-#define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \
+#define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \
uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(CPUARMState *env, \
uint64_t a, uint64_t b) \
-{ \
- a = \
- CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \
- CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \
- CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | \
- CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \
- env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
- NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \
- NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \
- NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \
- NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
+{ \
+ a = \
+ CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \
+ CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \
+ CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | \
+ CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \
+ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \
+ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \
+ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
return a; \
-} \
+} \
uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(CPUARMState *env, \
uint64_t a, uint64_t b) \
-{ \
- a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \
- CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \
- env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
- NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | \
- NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \
+{ \
+ a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \
+ CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | \
+ NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \
return a; \
-} \
+} \
uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(CPUARMState *env, \
uint64_t a, uint64_t b) \
-{ \
- a = CMP(0, Tl, O, 0xffffffff) | \
- CMP(32, Tl, O, 0xffffffff); \
- env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
- NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
+{ \
+ a = CMP(0, Tl, O, 0xffffffff) | \
+ CMP(32, Tl, O, 0xffffffff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
return a; \
}
#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \