aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg
diff options
context:
space:
mode:
authorAlex Bennée <alex.bennee@linaro.org>2021-02-24 16:58:08 +0000
committerRichard Henderson <richard.henderson@linaro.org>2021-03-06 11:50:50 -0800
commitc0ae396a81e13e5a09846f86a702bc61733a8885 (patch)
treea1bbd21ab556043602530b72685a093ca9fd2abf /accel/tcg
parent6f04cb1c8f481cf02fbc4657fefba985a1fe725f (diff)
accel/tcg: move CF_CLUSTER calculation to curr_cflags
There is nothing special about this compile flag that doesn't mean we can't just compute it with curr_cflags() which we should be using when building a new set. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20210224165811.11567-3-alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg')
-rw-r--r--accel/tcg/cpu-exec.c9
-rw-r--r--accel/tcg/tcg-runtime.c2
-rw-r--r--accel/tcg/translate-all.c6
3 files changed, 8 insertions, 9 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index ef96b312a1..45286dc4b3 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -249,8 +249,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
- uint32_t cflags = 1;
- uint32_t cf_mask = cflags & CF_HASH_MASK;
+ uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1;
int tb_exit;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
@@ -260,7 +259,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu->running = true;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask);
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
mmap_lock();
@@ -497,7 +496,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
if (replay_has_exception()
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
/* Execute just one insn to trigger exception pending in the log */
- cpu->cflags_next_tb = (curr_cflags() & ~CF_USE_ICOUNT) | 1;
+ cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1;
}
#endif
return false;
@@ -794,7 +793,7 @@ int cpu_exec(CPUState *cpu)
have CF_INVALID set, -1 is a convenient invalid value that
does not require tcg headers for cpu_common_reset. */
if (cflags == -1) {
- cflags = curr_cflags();
+ cflags = curr_cflags(cpu);
} else {
cpu->cflags_next_tb = -1;
}
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
index 05e3d52c2f..99403e3eb3 100644
--- a/accel/tcg/tcg-runtime.c
+++ b/accel/tcg/tcg-runtime.c
@@ -154,7 +154,7 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags());
+ tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu));
if (tb == NULL) {
return tcg_code_gen_epilogue;
}
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index bbd919a393..f29b47f090 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -2194,7 +2194,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
if (current_tb_modified) {
page_collection_unlock(pages);
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags();
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
@@ -2362,7 +2362,7 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags();
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
return true;
}
#endif
@@ -2438,7 +2438,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
* operations only (which execute after completion) so we don't
* double instrument the instruction.
*/
- cpu->cflags_next_tb = curr_cflags() | CF_MEMI_ONLY | CF_LAST_IO | n;
+ cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
"cpu_io_recompile: rewound execution of TB to "