aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--target-arm/helper.h2
-rw-r--r--target-arm/op_helper.c15
-rw-r--r--target-arm/translate.c47
3 files changed, 39 insertions, 25 deletions
diff --git a/target-arm/helper.h b/target-arm/helper.h
index 507bb9cf2b..63ae13acff 100644
--- a/target-arm/helper.h
+++ b/target-arm/helper.h
@@ -140,8 +140,6 @@ DEF_HELPER_2(recpe_u32, i32, i32, env)
DEF_HELPER_2(rsqrte_u32, i32, i32, env)
DEF_HELPER_5(neon_tbl, i32, env, i32, i32, i32, i32)
-DEF_HELPER_3(sbc_cc, i32, env, i32, i32)
-
DEF_HELPER_3(shl_cc, i32, env, i32, i32)
DEF_HELPER_3(shr_cc, i32, env, i32, i32)
DEF_HELPER_3(sar_cc, i32, env, i32, i32)
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 49fc036cc9..a52231346c 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -315,21 +315,6 @@ uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
The only way to do that in TCG is a conditional branch, which clobbers
all our temporaries. For now implement these as helper functions. */
-uint32_t HELPER(sbc_cc)(CPUARMState *env, uint32_t a, uint32_t b)
-{
- uint32_t result;
- if (!env->CF) {
- result = a - b - 1;
- env->CF = a > b;
- } else {
- result = a - b;
- env->CF = a >= b;
- }
- env->VF = (a ^ b) & (a ^ result);
- env->NF = env->ZF = result;
- return result;
-}
-
/* Similarly for variable shift instructions. */
uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 493448a637..9993aea93e 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -464,6 +464,35 @@ static void gen_sub_CC(TCGv dest, TCGv t0, TCGv t1)
tcg_gen_mov_i32(dest, cpu_NF);
}
+/* dest = T0 + ~T1 + CF = T0 - T1 + CF - 1. Compute C, N, V and Z flags */
+static void gen_sbc_CC(TCGv dest, TCGv t0, TCGv t1)
+{
+ TCGv tmp = tcg_temp_new_i32();
+ tcg_gen_subi_i32(cpu_CF, cpu_CF, 1);
+ if (TCG_TARGET_HAS_add2_i32) {
+ tcg_gen_movi_i32(tmp, 0);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
+ tcg_gen_sub2_i32(cpu_NF, cpu_CF, t0, cpu_CF, t1, tmp);
+ } else {
+ TCGv_i64 q0 = tcg_temp_new_i64();
+ TCGv_i64 q1 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(q0, t0);
+ tcg_gen_extu_i32_i64(q1, t1);
+ tcg_gen_sub_i64(q0, q0, q1);
+ tcg_gen_extu_i32_i64(q1, cpu_CF);
+ tcg_gen_add_i64(q0, q0, q1);
+ tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
+ tcg_temp_free_i64(q0);
+ tcg_temp_free_i64(q1);
+ }
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
+ tcg_gen_xor_i32(tmp, t0, t1);
+ tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
+ tcg_temp_free_i32(tmp);
+ tcg_gen_mov_i32(dest, cpu_NF);
+}
+
#define GEN_SHIFT(name) \
static void gen_##name(TCGv dest, TCGv t0, TCGv t1) \
{ \
@@ -7109,7 +7138,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
break;
case 0x06:
if (set_cc) {
- gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
+ gen_sbc_CC(tmp, tmp, tmp2);
} else {
gen_sub_carry(tmp, tmp, tmp2);
}
@@ -7117,7 +7146,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
break;
case 0x07:
if (set_cc) {
- gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
+ gen_sbc_CC(tmp, tmp2, tmp);
} else {
gen_sub_carry(tmp, tmp2, tmp);
}
@@ -7947,10 +7976,11 @@ gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCG
gen_adc(t0, t1);
break;
case 11: /* sbc */
- if (conds)
- gen_helper_sbc_cc(t0, cpu_env, t0, t1);
- else
+ if (conds) {
+ gen_sbc_CC(t0, t0, t1);
+ } else {
gen_sub_carry(t0, t0, t1);
+ }
break;
case 13: /* sub */
if (conds)
@@ -9267,10 +9297,11 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
}
break;
case 0x6: /* sbc */
- if (s->condexec_mask)
+ if (s->condexec_mask) {
gen_sub_carry(tmp, tmp, tmp2);
- else
- gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
+ } else {
+ gen_sbc_CC(tmp, tmp, tmp2);
+ }
break;
case 0x7: /* ror */
if (s->condexec_mask) {