aboutsummaryrefslogtreecommitdiff
path: root/tcg/optimize.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2021-08-26 09:03:59 -0700
committerRichard Henderson <richard.henderson@linaro.org>2021-10-28 20:55:07 -0700
commitfaa2e10045ef82f4a1a24a7f69e285736143b469 (patch)
tree28093111895c2b3829c30ccb9d6d8aab2caa3a0b /tcg/optimize.c
parent18cf3d07a2556700895c626754937e90f8e972cf (diff)
tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values
This "garbage" setting pre-dates the addition of the type changing opcodes INDEX_op_ext_i32_i64, INDEX_op_extu_i32_i64, and INDEX_op_extr{l,h}_i64_i32. So now we have a definitive points at which to adjust z_mask to eliminate such bits from the 32-bit operands. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/optimize.c')
-rw-r--r--tcg/optimize.c35
1 files changed, 16 insertions, 19 deletions
diff --git a/tcg/optimize.c b/tcg/optimize.c
index e42f5a145f..e0abf769d0 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -124,10 +124,6 @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
ti->is_const = true;
ti->val = ts->val;
ti->z_mask = ts->val;
- if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
- /* High bits of a 32-bit quantity are garbage. */
- ti->z_mask |= ~0xffffffffull;
- }
} else {
ti->is_const = false;
ti->z_mask = -1;
@@ -192,7 +188,6 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
TCGTemp *src_ts = arg_temp(src);
TempOptInfo *di;
TempOptInfo *si;
- uint64_t z_mask;
TCGOpcode new_op;
if (ts_are_copies(dst_ts, src_ts)) {
@@ -224,12 +219,7 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
op->args[0] = dst;
op->args[1] = src;
- z_mask = si->z_mask;
- if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
- /* High bits of the destination are now garbage. */
- z_mask |= ~0xffffffffull;
- }
- di->z_mask = z_mask;
+ di->z_mask = si->z_mask;
if (src_ts->type == dst_ts->type) {
TempOptInfo *ni = ts_info(si->next_copy);
@@ -247,9 +237,14 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
TCGArg dst, uint64_t val)
{
- /* Convert movi to mov with constant temp. */
- TCGTemp *tv = tcg_constant_internal(ctx->type, val);
+ TCGTemp *tv;
+ if (ctx->type == TCG_TYPE_I32) {
+ val = (int32_t)val;
+ }
+
+ /* Convert movi to mov with constant temp. */
+ tv = tcg_constant_internal(ctx->type, val);
init_ts_info(ctx, tv);
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
}
@@ -721,14 +716,16 @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
uint64_t z_mask = ctx->z_mask;
/*
- * 32-bit ops generate 32-bit results. For the result is zero test
- * below, we can ignore high bits, but for further optimizations we
- * need to record that the high bits contain garbage.
+ * 32-bit ops generate 32-bit results, which for the purpose of
+ * simplifying tcg are sign-extended. Certainly that's how we
+ * represent our constants elsewhere. Note that the bits will
+ * be reset properly for a 64-bit value when encountering the
+ * type changing opcodes.
*/
if (ctx->type == TCG_TYPE_I32) {
- ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
- a_mask &= MAKE_64BIT_MASK(0, 32);
- z_mask &= MAKE_64BIT_MASK(0, 32);
+ a_mask = (int32_t)a_mask;
+ z_mask = (int32_t)z_mask;
+ ctx->z_mask = z_mask;
}
if (z_mask == 0) {