aboutsummaryrefslogtreecommitdiff
path: root/tcg/optimize.c
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2011-08-17 14:11:46 -0700
committerBlue Swirl <blauwirbel@gmail.com>2011-08-21 18:52:24 +0000
commit25c4d9cc845fb58f624dae8c0f690e20c70e7a1d (patch)
tree79ff5484f3fe9995d2e28c9e0931fe855b099a6c /tcg/optimize.c
parent8399ad59e73a0f48af3edef62f021e2cb6220e12 (diff)
tcg: Always define all of the TCGOpcode enum members.
By always defining these symbols, we can eliminate a lot of ifdefs. To allow this to be checked reliably, the semantics of the TCG_TARGET_HAS_* macros must be changed from def/undef to true/false. This allows even more ifdefs to be removed, converting them into C if statements. Signed-off-by: Richard Henderson <rth@twiddle.net> Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
Diffstat (limited to 'tcg/optimize.c')
-rw-r--r--tcg/optimize.c156
1 files changed, 22 insertions, 134 deletions
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 98c7e3f41c..32f928f980 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -31,14 +31,9 @@
#include "qemu-common.h"
#include "tcg-op.h"
-#if TCG_TARGET_REG_BITS == 64
#define CASE_OP_32_64(x) \
glue(glue(case INDEX_op_, x), _i32): \
glue(glue(case INDEX_op_, x), _i64)
-#else
-#define CASE_OP_32_64(x) \
- glue(glue(case INDEX_op_, x), _i32)
-#endif
typedef enum {
TCG_TEMP_UNDEF = 0,
@@ -103,10 +98,8 @@ static int op_to_movi(int op)
switch (op_bits(op)) {
case 32:
return INDEX_op_movi_i32;
-#if TCG_TARGET_REG_BITS == 64
case 64:
return INDEX_op_movi_i64;
-#endif
default:
fprintf(stderr, "op_to_movi: unexpected return value of "
"function op_bits.\n");
@@ -155,10 +148,8 @@ static int op_to_mov(int op)
switch (op_bits(op)) {
case 32:
return INDEX_op_mov_i32;
-#if TCG_TARGET_REG_BITS == 64
case 64:
return INDEX_op_mov_i64;
-#endif
default:
fprintf(stderr, "op_to_mov: unexpected return value of "
"function op_bits.\n");
@@ -190,124 +181,57 @@ static TCGArg do_constant_folding_2(int op, TCGArg x, TCGArg y)
case INDEX_op_shl_i32:
return (uint32_t)x << (uint32_t)y;
-#if TCG_TARGET_REG_BITS == 64
case INDEX_op_shl_i64:
return (uint64_t)x << (uint64_t)y;
-#endif
case INDEX_op_shr_i32:
return (uint32_t)x >> (uint32_t)y;
-#if TCG_TARGET_REG_BITS == 64
case INDEX_op_shr_i64:
return (uint64_t)x >> (uint64_t)y;
-#endif
case INDEX_op_sar_i32:
return (int32_t)x >> (int32_t)y;
-#if TCG_TARGET_REG_BITS == 64
case INDEX_op_sar_i64:
return (int64_t)x >> (int64_t)y;
-#endif
-#ifdef TCG_TARGET_HAS_rot_i32
case INDEX_op_rotr_i32:
-#if TCG_TARGET_REG_BITS == 64
- x &= 0xffffffff;
- y &= 0xffffffff;
-#endif
- x = (x << (32 - y)) | (x >> y);
+ x = ((uint32_t)x << (32 - y)) | ((uint32_t)x >> y);
return x;
-#endif
-#ifdef TCG_TARGET_HAS_rot_i64
-#if TCG_TARGET_REG_BITS == 64
case INDEX_op_rotr_i64:
- x = (x << (64 - y)) | (x >> y);
+ x = ((uint64_t)x << (64 - y)) | ((uint64_t)x >> y);
return x;
-#endif
-#endif
-#ifdef TCG_TARGET_HAS_rot_i32
case INDEX_op_rotl_i32:
-#if TCG_TARGET_REG_BITS == 64
- x &= 0xffffffff;
- y &= 0xffffffff;
-#endif
- x = (x << y) | (x >> (32 - y));
+ x = ((uint32_t)x << y) | ((uint32_t)x >> (32 - y));
return x;
-#endif
-#ifdef TCG_TARGET_HAS_rot_i64
-#if TCG_TARGET_REG_BITS == 64
case INDEX_op_rotl_i64:
- x = (x << y) | (x >> (64 - y));
+ x = ((uint64_t)x << y) | ((uint64_t)x >> (64 - y));
return x;
-#endif
-#endif
-
-#if defined(TCG_TARGET_HAS_not_i32) || defined(TCG_TARGET_HAS_not_i64)
-#ifdef TCG_TARGET_HAS_not_i32
- case INDEX_op_not_i32:
-#endif
-#ifdef TCG_TARGET_HAS_not_i64
- case INDEX_op_not_i64:
-#endif
+
+ CASE_OP_32_64(not):
return ~x;
-#endif
-
-#if defined(TCG_TARGET_HAS_ext8s_i32) || defined(TCG_TARGET_HAS_ext8s_i64)
-#ifdef TCG_TARGET_HAS_ext8s_i32
- case INDEX_op_ext8s_i32:
-#endif
-#ifdef TCG_TARGET_HAS_ext8s_i64
- case INDEX_op_ext8s_i64:
-#endif
+
+ CASE_OP_32_64(ext8s):
return (int8_t)x;
-#endif
-
-#if defined(TCG_TARGET_HAS_ext16s_i32) || defined(TCG_TARGET_HAS_ext16s_i64)
-#ifdef TCG_TARGET_HAS_ext16s_i32
- case INDEX_op_ext16s_i32:
-#endif
-#ifdef TCG_TARGET_HAS_ext16s_i64
- case INDEX_op_ext16s_i64:
-#endif
+
+ CASE_OP_32_64(ext16s):
return (int16_t)x;
-#endif
-
-#if defined(TCG_TARGET_HAS_ext8u_i32) || defined(TCG_TARGET_HAS_ext8u_i64)
-#ifdef TCG_TARGET_HAS_ext8u_i32
- case INDEX_op_ext8u_i32:
-#endif
-#ifdef TCG_TARGET_HAS_ext8u_i64
- case INDEX_op_ext8u_i64:
-#endif
+
+ CASE_OP_32_64(ext8u):
return (uint8_t)x;
-#endif
-
-#if defined(TCG_TARGET_HAS_ext16u_i32) || defined(TCG_TARGET_HAS_ext16u_i64)
-#ifdef TCG_TARGET_HAS_ext16u_i32
- case INDEX_op_ext16u_i32:
-#endif
-#ifdef TCG_TARGET_HAS_ext16u_i64
- case INDEX_op_ext16u_i64:
-#endif
+
+ CASE_OP_32_64(ext16u):
return (uint16_t)x;
-#endif
-#if TCG_TARGET_REG_BITS == 64
-#ifdef TCG_TARGET_HAS_ext32s_i64
case INDEX_op_ext32s_i64:
return (int32_t)x;
-#endif
-#ifdef TCG_TARGET_HAS_ext32u_i64
case INDEX_op_ext32u_i64:
return (uint32_t)x;
-#endif
-#endif
default:
fprintf(stderr,
@@ -319,11 +243,9 @@ static TCGArg do_constant_folding_2(int op, TCGArg x, TCGArg y)
static TCGArg do_constant_folding(int op, TCGArg x, TCGArg y)
{
TCGArg res = do_constant_folding_2(op, x, y);
-#if TCG_TARGET_REG_BITS == 64
if (op_bits(op) == 32) {
res &= 0xffffffff;
}
-#endif
return res;
}
@@ -385,14 +307,8 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
CASE_OP_32_64(shl):
CASE_OP_32_64(shr):
CASE_OP_32_64(sar):
-#ifdef TCG_TARGET_HAS_rot_i32
- case INDEX_op_rotl_i32:
- case INDEX_op_rotr_i32:
-#endif
-#ifdef TCG_TARGET_HAS_rot_i64
- case INDEX_op_rotl_i64:
- case INDEX_op_rotr_i64:
-#endif
+ CASE_OP_32_64(rotl):
+ CASE_OP_32_64(rotr):
if (temps[args[1]].state == TCG_TEMP_CONST) {
/* Proceed with possible constant folding. */
break;
@@ -473,34 +389,12 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
args += 2;
break;
CASE_OP_32_64(not):
-#ifdef TCG_TARGET_HAS_ext8s_i32
- case INDEX_op_ext8s_i32:
-#endif
-#ifdef TCG_TARGET_HAS_ext8s_i64
- case INDEX_op_ext8s_i64:
-#endif
-#ifdef TCG_TARGET_HAS_ext16s_i32
- case INDEX_op_ext16s_i32:
-#endif
-#ifdef TCG_TARGET_HAS_ext16s_i64
- case INDEX_op_ext16s_i64:
-#endif
-#ifdef TCG_TARGET_HAS_ext8u_i32
- case INDEX_op_ext8u_i32:
-#endif
-#ifdef TCG_TARGET_HAS_ext8u_i64
- case INDEX_op_ext8u_i64:
-#endif
-#ifdef TCG_TARGET_HAS_ext16u_i32
- case INDEX_op_ext16u_i32:
-#endif
-#ifdef TCG_TARGET_HAS_ext16u_i64
- case INDEX_op_ext16u_i64:
-#endif
-#if TCG_TARGET_REG_BITS == 64
+ CASE_OP_32_64(ext8s):
+ CASE_OP_32_64(ext8u):
+ CASE_OP_32_64(ext16s):
+ CASE_OP_32_64(ext16u):
case INDEX_op_ext32s_i64:
case INDEX_op_ext32u_i64:
-#endif
if (temps[args[1]].state == TCG_TEMP_CONST) {
gen_opc_buf[op_index] = op_to_movi(op);
tmp = do_constant_folding(op, temps[args[1]].val, 0);
@@ -525,14 +419,8 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
CASE_OP_32_64(shl):
CASE_OP_32_64(shr):
CASE_OP_32_64(sar):
-#ifdef TCG_TARGET_HAS_rot_i32
- case INDEX_op_rotl_i32:
- case INDEX_op_rotr_i32:
-#endif
-#ifdef TCG_TARGET_HAS_rot_i64
- case INDEX_op_rotl_i64:
- case INDEX_op_rotr_i64:
-#endif
+ CASE_OP_32_64(rotl):
+ CASE_OP_32_64(rotr):
if (temps[args[1]].state == TCG_TEMP_CONST
&& temps[args[2]].state == TCG_TEMP_CONST) {
gen_opc_buf[op_index] = op_to_movi(op);