aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2021-07-19 12:40:57 -1000
committerRichard Henderson <richard.henderson@linaro.org>2021-07-21 07:47:05 -1000
commit11c1d5f8ca7a72818b3c24093a2c40856022fe0f (patch)
tree621154150d82949105b21d00292b328e9b83b8f7 /accel
parente64cb6c231e0de00f88d4cd0c4dd3481dacfc0d9 (diff)
accel/tcg: Merge tb_find into its only caller
We are going to want two things: (1) check for breakpoints will want to break out of the loop here, (2) cflags can only be calculated with pc in hand. Tested-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cpu-exec.c85
1 files changed, 42 insertions, 43 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 5bb099174f..cde7069eb7 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -500,41 +500,6 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
return;
}
-static inline TranslationBlock *tb_find(CPUState *cpu,
- TranslationBlock *last_tb,
- int tb_exit, uint32_t cflags)
-{
- CPUArchState *env = (CPUArchState *)cpu->env_ptr;
- TranslationBlock *tb;
- target_ulong cs_base, pc;
- uint32_t flags;
-
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
-
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
- if (tb == NULL) {
- mmap_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
- mmap_unlock();
- /* We add the TB in the virtual pc hash table for the fast lookup */
- qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
- }
-#ifndef CONFIG_USER_ONLY
- /* We don't take care of direct jumps when address mapping changes in
- * system emulation. So it's not safe to make a direct jump to a TB
- * spanning two pages because the mapping for the second page can change.
- */
- if (tb->page_addr[1] != -1) {
- last_tb = NULL;
- }
-#endif
- /* See if we can patch the calling TB. */
- if (last_tb) {
- tb_add_jump(last_tb, tb_exit, tb);
- }
- return tb;
-}
-
static inline bool cpu_handle_halt(CPUState *cpu)
{
if (cpu->halted) {
@@ -868,22 +833,56 @@ int cpu_exec(CPUState *cpu)
int tb_exit = 0;
while (!cpu_handle_interrupt(cpu, &last_tb)) {
- uint32_t cflags = cpu->cflags_next_tb;
TranslationBlock *tb;
-
- /* When requested, use an exact setting for cflags for the next
- execution. This is used for icount, precise smc, and stop-
- after-access watchpoints. Since this request should never
- have CF_INVALID set, -1 is a convenient invalid value that
- does not require tcg headers for cpu_common_reset. */
+ target_ulong cs_base, pc;
+ uint32_t flags, cflags;
+
+ /*
+ * When requested, use an exact setting for cflags for the next
+ * execution. This is used for icount, precise smc, and stop-
+ * after-access watchpoints. Since this request should never
+ * have CF_INVALID set, -1 is a convenient invalid value that
+ * does not require tcg headers for cpu_common_reset.
+ */
+ cflags = cpu->cflags_next_tb;
if (cflags == -1) {
cflags = curr_cflags(cpu);
} else {
cpu->cflags_next_tb = -1;
}
- tb = tb_find(cpu, last_tb, tb_exit, cflags);
+ cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
+
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ if (tb == NULL) {
+ mmap_lock();
+ tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
+ mmap_unlock();
+ /*
+ * We add the TB in the virtual pc hash table
+ * for the fast lookup
+ */
+ qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ /*
+ * We don't take care of direct jumps when address mapping
+ * changes in system emulation. So it's not safe to make a
+ * direct jump to a TB spanning two pages because the mapping
+ * for the second page can change.
+ */
+ if (tb->page_addr[1] != -1) {
+ last_tb = NULL;
+ }
+#endif
+ /* See if we can patch the calling TB. */
+ if (last_tb) {
+ tb_add_jump(last_tb, tb_exit, tb);
+ }
+
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
+
/* Try to align the host and virtual clocks
if the guest is in advance */
align_clocks(&sc, cpu);