From 196601e759b89e70c7a4470c5ef8b9f0b307298a Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 4 Apr 2024 20:53:50 +0000 Subject: tcg/optimize: Do not attempt to constant fold neg_vec Split out the tail of fold_neg to fold_neg_no_const so that we can avoid attempting to constant fold vector negate. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2150 Signed-off-by: Richard Henderson (cherry picked from commit e25fe886b89a396bae5847520b70c148587d490a) Signed-off-by: Michael Tokarev --- tcg/optimize.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'tcg/optimize.c') diff --git a/tcg/optimize.c b/tcg/optimize.c index 6fcdda68ef..5ead14972a 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1830,16 +1830,10 @@ static bool fold_nand(OptContext *ctx, TCGOp *op) return false; } -static bool fold_neg(OptContext *ctx, TCGOp *op) +static bool fold_neg_no_const(OptContext *ctx, TCGOp *op) { - uint64_t z_mask; - - if (fold_const1(ctx, op)) { - return true; - } - /* Set to 1 all bits to the left of the rightmost. */ - z_mask = arg_info(op->args[1])->z_mask; + uint64_t z_mask = arg_info(op->args[1])->z_mask; ctx->z_mask = -(z_mask & -z_mask); /* @@ -1850,6 +1844,11 @@ static bool fold_neg(OptContext *ctx, TCGOp *op) return true; } +static bool fold_neg(OptContext *ctx, TCGOp *op) +{ + return fold_const1(ctx, op) || fold_neg_no_const(ctx, op); +} + static bool fold_nor(OptContext *ctx, TCGOp *op) { if (fold_const2_commutative(ctx, op) || @@ -2165,7 +2164,7 @@ static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) if (have_neg) { op->opc = neg_op; op->args[1] = op->args[2]; - return fold_neg(ctx, op); + return fold_neg_no_const(ctx, op); } return false; } -- cgit v1.2.3