aboutsummaryrefslogtreecommitdiff
path: root/target/i386
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2024-05-23 09:39:08 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2024-06-11 14:29:22 +0200
commitcc155f19717ced44d70df3cd5f149a5b9f9a13f1 (patch)
tree563f08d7d674949d29869ab03aea41c9d1141c17 /target/i386
parent4228eb8cc6ba44d35cd52b05508a47e780668051 (diff)
target/i386: rewrite flags writeback for ADCX/ADOX
Avoid using set_cc_op() in preparation for implementing APX; treat CC_OP_EFLAGS similar to the case where we have the "opposite" cc_op (CC_OP_ADOX for ADCX and CC_OP_ADCX for ADOX), except the resulting cc_op is not CC_OP_ADCOX. This is written easily as two "if"s, whose conditions are both false for CC_OP_EFLAGS, both true for CC_OP_ADCOX, and one each true for CC_OP_ADCX/ADOX. The new logic also makes it easy to drop usage of tmp0. Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target/i386')
-rw-r--r--target/i386/cpu.h9
-rw-r--r--target/i386/tcg/emit.c.inc61
2 files changed, 40 insertions, 30 deletions
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 8fe28b67e0..7e2a9b56ae 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1260,6 +1260,8 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
/* Use a clearer name for this. */
#define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
+#define CC_OP_HAS_EFLAGS(op) ((op) >= CC_OP_EFLAGS && (op) <= CC_OP_ADCOX)
+
/* Instead of computing the condition codes after each x86 instruction,
* QEMU just stores one operand (called CC_SRC), the result
* (called CC_DST) and the type of operation (called CC_OP). When the
@@ -1270,6 +1272,9 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
typedef enum {
CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
+ CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
+ CC_OP_ADOX, /* CC_SRC2 = O, CC_SRC = rest. */
+ CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
CC_OP_MULW,
@@ -1326,10 +1331,6 @@ typedef enum {
CC_OP_BMILGL,
CC_OP_BMILGQ,
- CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
- CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */
- CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
-
CC_OP_CLR, /* Z set, all other flags clear. */
CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index df7597c7e2..2041ea9d04 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -1122,24 +1122,41 @@ static void gen_ADC(DisasContext *s, X86DecodedInsn *decode)
prepare_update3_cc(decode, s, CC_OP_ADCB + ot, c_in);
}
-/* ADCX/ADOX do not have memory operands and can use set_cc_op. */
-static void gen_ADCOX(DisasContext *s, MemOp ot, int cc_op)
+static void gen_ADCOX(DisasContext *s, X86DecodedInsn *decode, int cc_op)
{
- int opposite_cc_op;
+ MemOp ot = decode->op[0].ot;
TCGv carry_in = NULL;
- TCGv carry_out = (cc_op == CC_OP_ADCX ? cpu_cc_dst : cpu_cc_src2);
+ TCGv *carry_out = (cc_op == CC_OP_ADCX ? &decode->cc_dst : &decode->cc_src2);
TCGv zero;
- if (cc_op == s->cc_op || s->cc_op == CC_OP_ADCOX) {
- /* Re-use the carry-out from a previous round. */
- carry_in = carry_out;
- } else {
- /* We don't have a carry-in, get it out of EFLAGS. */
- if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
- gen_compute_eflags(s);
+ decode->cc_op = cc_op;
+ *carry_out = tcg_temp_new();
+ if (CC_OP_HAS_EFLAGS(s->cc_op)) {
+ decode->cc_src = cpu_cc_src;
+
+ /* Re-use the carry-out from a previous round? */
+ if (s->cc_op == cc_op || s->cc_op == CC_OP_ADCOX) {
+ carry_in = (cc_op == CC_OP_ADCX ? cpu_cc_dst : cpu_cc_src2);
+ }
+
+ /* Preserve the opposite carry from previous rounds? */
+ if (s->cc_op != cc_op && s->cc_op != CC_OP_EFLAGS) {
+ decode->cc_op = CC_OP_ADCOX;
+ if (carry_out == &decode->cc_dst) {
+ decode->cc_src2 = cpu_cc_src2;
+ } else {
+ decode->cc_dst = cpu_cc_dst;
+ }
}
- carry_in = s->tmp0;
- tcg_gen_extract_tl(carry_in, cpu_cc_src,
+ } else {
+ decode->cc_src = tcg_temp_new();
+ gen_mov_eflags(s, decode->cc_src);
+ }
+
+ if (!carry_in) {
+ /* Get carry_in out of EFLAGS. */
+ carry_in = tcg_temp_new();
+ tcg_gen_extract_tl(carry_in, decode->cc_src,
ctz32(cc_op == CC_OP_ADCX ? CC_C : CC_O), 1);
}
@@ -1151,28 +1168,20 @@ static void gen_ADCOX(DisasContext *s, MemOp ot, int cc_op)
tcg_gen_ext32u_tl(s->T1, s->T1);
tcg_gen_add_i64(s->T0, s->T0, s->T1);
tcg_gen_add_i64(s->T0, s->T0, carry_in);
- tcg_gen_shri_i64(carry_out, s->T0, 32);
+ tcg_gen_shri_i64(*carry_out, s->T0, 32);
break;
#endif
default:
zero = tcg_constant_tl(0);
- tcg_gen_add2_tl(s->T0, carry_out, s->T0, zero, carry_in, zero);
- tcg_gen_add2_tl(s->T0, carry_out, s->T0, carry_out, s->T1, zero);
+ tcg_gen_add2_tl(s->T0, *carry_out, s->T0, zero, carry_in, zero);
+ tcg_gen_add2_tl(s->T0, *carry_out, s->T0, *carry_out, s->T1, zero);
break;
}
-
- opposite_cc_op = cc_op == CC_OP_ADCX ? CC_OP_ADOX : CC_OP_ADCX;
- if (s->cc_op == CC_OP_ADCOX || s->cc_op == opposite_cc_op) {
- /* Merge with the carry-out from the opposite instruction. */
- set_cc_op(s, CC_OP_ADCOX);
- } else {
- set_cc_op(s, cc_op);
- }
}
static void gen_ADCX(DisasContext *s, X86DecodedInsn *decode)
{
- gen_ADCOX(s, decode->op[0].ot, CC_OP_ADCX);
+ gen_ADCOX(s, decode, CC_OP_ADCX);
}
static void gen_ADD(DisasContext *s, X86DecodedInsn *decode)
@@ -1190,7 +1199,7 @@ static void gen_ADD(DisasContext *s, X86DecodedInsn *decode)
static void gen_ADOX(DisasContext *s, X86DecodedInsn *decode)
{
- gen_ADCOX(s, decode->op[0].ot, CC_OP_ADOX);
+ gen_ADCOX(s, decode, CC_OP_ADOX);
}
static void gen_AND(DisasContext *s, X86DecodedInsn *decode)