aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg/cpu-exec.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-08-12 09:53:53 -0700
committerRichard Henderson <richard.henderson@linaro.org>2022-10-04 12:13:16 -0700
commit8ed558ec0cbcc29ecf490e93c54dd65d276e8e69 (patch)
treeff425842023c3c01ba85c1f5feef862ac6159fb0 /accel/tcg/cpu-exec.c
parentfbf59aad178d98afe193fa872a2d880266a75269 (diff)
accel/tcg: Introduce TARGET_TB_PCREL
Prepare for targets to be able to produce TBs that can run in more than one virtual context. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg/cpu-exec.c')
-rw-r--r--accel/tcg/cpu-exec.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 8b3f8435fb..f9e5cc9ba0 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -186,7 +186,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
- if (tb_pc(tb) == desc->pc &&
+ if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
tb->page_addr[0] == desc->page_addr0 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
@@ -237,7 +237,8 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
return NULL;
}
desc.page_addr0 = phys_pc;
- h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc),
+ flags, cflags, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
@@ -247,16 +248,18 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
uint32_t flags, uint32_t cflags)
{
TranslationBlock *tb;
+ CPUJumpCache *jc;
uint32_t hash;
/* we should never be trying to look up an INVALID tb */
tcg_debug_assert(!(cflags & CF_INVALID));
hash = tb_jmp_cache_hash_func(pc);
- tb = qatomic_rcu_read(&cpu->tb_jmp_cache->array[hash].tb);
+ jc = cpu->tb_jmp_cache;
+ tb = tb_jmp_cache_get_tb(jc, hash);
if (likely(tb &&
- tb->pc == pc &&
+ tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
@@ -267,7 +270,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
if (tb == NULL) {
return NULL;
}
- qatomic_set(&cpu->tb_jmp_cache->array[hash].tb, tb);
+ tb_jmp_cache_set(jc, hash, tb, pc);
return tb;
}
@@ -453,6 +456,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
if (cc->tcg_ops->synchronize_from_tb) {
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
} else {
+ assert(!TARGET_TB_PCREL);
assert(cc->set_pc);
cc->set_pc(cpu, tb_pc(last_tb));
}
@@ -1002,7 +1006,7 @@ int cpu_exec(CPUState *cpu)
* for the fast lookup
*/
h = tb_jmp_cache_hash_func(pc);
- qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
+ tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
}
#ifndef CONFIG_USER_ONLY