aboutsummaryrefslogtreecommitdiff
path: root/target-arm/translate-a64.c
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2015-09-14 14:39:48 +0100
committerPeter Maydell <peter.maydell@linaro.org>2015-09-14 14:39:48 +0100
commit7dd03d773e0dafae9271318fc8d6b2b14de74403 (patch)
tree3a1b73b986895f7ea2cac8ce462df143808ff7c3 /target-arm/translate-a64.c
parent259cb68491ab36427e7e5d820fe543d53b006ec6 (diff)
target-arm: Implement ccmp branchless
This can allow much of a ccmp to be elided when particular flags are subsequently dead. Signed-off-by: Richard Henderson <rth@twiddle.net> Message-id: 1441909103-24666-6-git-send-email-rth@twiddle.net Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target-arm/translate-a64.c')
-rw-r--r--target-arm/translate-a64.c74
1 files changed, 58 insertions, 16 deletions
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index 1ab2c8d9aa..b48cd97314 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -3570,8 +3570,9 @@ static void disas_adc_sbc(DisasContext *s, uint32_t insn)
static void disas_cc(DisasContext *s, uint32_t insn)
{
unsigned int sf, op, y, cond, rn, nzcv, is_imm;
- TCGLabel *label_continue = NULL;
+ TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
+ DisasCompare c;
if (!extract32(insn, 29, 1)) {
unallocated_encoding(s);
@@ -3589,19 +3590,13 @@ static void disas_cc(DisasContext *s, uint32_t insn)
rn = extract32(insn, 5, 5);
nzcv = extract32(insn, 0, 4);
- if (cond < 0x0e) { /* not always */
- TCGLabel *label_match = gen_new_label();
- label_continue = gen_new_label();
- arm_gen_test_cc(cond, label_match);
- /* nomatch: */
- tcg_tmp = tcg_temp_new_i64();
- tcg_gen_movi_i64(tcg_tmp, nzcv << 28);
- gen_set_nzcv(tcg_tmp);
- tcg_temp_free_i64(tcg_tmp);
- tcg_gen_br(label_continue);
- gen_set_label(label_match);
- }
- /* match, or condition is always */
+ /* Set T0 = !COND. */
+ tcg_t0 = tcg_temp_new_i32();
+ arm_test_cc(&c, cond);
+ tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
+ arm_free_cc(&c);
+
+ /* Load the arguments for the new comparison. */
if (is_imm) {
tcg_y = new_tmp_a64(s);
tcg_gen_movi_i64(tcg_y, y);
@@ -3610,6 +3605,7 @@ static void disas_cc(DisasContext *s, uint32_t insn)
}
tcg_rn = cpu_reg(s, rn);
+ /* Set the flags for the new comparison. */
tcg_tmp = tcg_temp_new_i64();
if (op) {
gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
@@ -3618,9 +3614,55 @@ static void disas_cc(DisasContext *s, uint32_t insn)
}
tcg_temp_free_i64(tcg_tmp);
- if (cond < 0x0e) { /* continue */
- gen_set_label(label_continue);
+ /* If COND was false, force the flags to #nzcv. Compute two masks
+ * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
+ * For tcg hosts that support ANDC, we can make do with just T1.
+ * In either case, allow the tcg optimizer to delete any unused mask.
+ */
+ tcg_t1 = tcg_temp_new_i32();
+ tcg_t2 = tcg_temp_new_i32();
+ tcg_gen_neg_i32(tcg_t1, tcg_t0);
+ tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
+
+ if (nzcv & 8) { /* N */
+ tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
+ } else {
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
+ }
+ }
+ if (nzcv & 4) { /* Z */
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
+ }
+ } else {
+ tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
+ }
+ if (nzcv & 2) { /* C */
+ tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
+ } else {
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
+ }
+ }
+ if (nzcv & 1) { /* V */
+ tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
+ } else {
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
+ }
}
+ tcg_temp_free_i32(tcg_t0);
+ tcg_temp_free_i32(tcg_t1);
+ tcg_temp_free_i32(tcg_t2);
}
/* C3.5.6 Conditional select