aboutsummaryrefslogtreecommitdiff
path: root/target/arm/translate-a64.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/translate-a64.c')
-rw-r--r--target/arm/translate-a64.c38
1 files changed, 29 insertions, 9 deletions
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index a39b9d3633..f6b364c04b 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -348,7 +348,8 @@ static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
/* No direct tb linking with singlestep (either QEMU's or the ARM
* debug architecture kind) or deterministic io
*/
- if (s->base.singlestep_enabled || s->ss_active || (s->base.tb->cflags & CF_LAST_IO)) {
+ if (s->base.singlestep_enabled || s->ss_active ||
+ (tb_cflags(s->base.tb) & CF_LAST_IO)) {
return false;
}
@@ -1335,13 +1336,18 @@ static void handle_hint(DisasContext *s, uint32_t insn,
case 3: /* WFI */
s->base.is_jmp = DISAS_WFI;
return;
+ /* When running in MTTCG we don't generate jumps to the yield and
+ * WFE helpers as it won't affect the scheduling of other vCPUs.
+ * If we wanted to more completely model WFE/SEV so we don't busy
+ * spin unnecessarily we would need to do something more involved.
+ */
case 1: /* YIELD */
- if (!parallel_cpus) {
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
s->base.is_jmp = DISAS_YIELD;
}
return;
case 2: /* WFE */
- if (!parallel_cpus) {
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
s->base.is_jmp = DISAS_WFE;
}
return;
@@ -1561,7 +1567,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
break;
}
- if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start();
}
@@ -1592,7 +1598,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
}
}
- if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
gen_io_end();
s->base.is_jmp = DISAS_UPDATE;
@@ -1930,11 +1936,25 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
MO_64 | MO_ALIGN | s->be_data);
tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
} else if (s->be_data == MO_LE) {
- gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
- cpu_reg(s, rt), cpu_reg(s, rt2));
+ if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
+ cpu_exclusive_addr,
+ cpu_reg(s, rt),
+ cpu_reg(s, rt2));
+ } else {
+ gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
+ cpu_reg(s, rt), cpu_reg(s, rt2));
+ }
} else {
- gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
- cpu_reg(s, rt), cpu_reg(s, rt2));
+ if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
+ cpu_exclusive_addr,
+ cpu_reg(s, rt),
+ cpu_reg(s, rt2));
+ } else {
+ gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
+ cpu_reg(s, rt), cpu_reg(s, rt2));
+ }
}
} else {
tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,