aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-08-12 09:53:53 -0700
committerRichard Henderson <richard.henderson@linaro.org>2022-10-04 12:13:16 -0700
commit8ed558ec0cbcc29ecf490e93c54dd65d276e8e69 (patch)
treeff425842023c3c01ba85c1f5feef862ac6159fb0 /accel/tcg
parentfbf59aad178d98afe193fa872a2d880266a75269 (diff)
accel/tcg: Introduce TARGET_TB_PCREL
Prepare for targets to be able to produce TBs that can run in more than one virtual context. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg')
-rw-r--r--accel/tcg/cpu-exec.c16
-rw-r--r--accel/tcg/internal.h4
-rw-r--r--accel/tcg/tb-jmp-cache.h41
-rw-r--r--accel/tcg/translate-all.c64
4 files changed, 98 insertions, 27 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 8b3f8435fb..f9e5cc9ba0 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -186,7 +186,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
- if (tb_pc(tb) == desc->pc &&
+ if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
tb->page_addr[0] == desc->page_addr0 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
@@ -237,7 +237,8 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
return NULL;
}
desc.page_addr0 = phys_pc;
- h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc),
+ flags, cflags, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
@@ -247,16 +248,18 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
uint32_t flags, uint32_t cflags)
{
TranslationBlock *tb;
+ CPUJumpCache *jc;
uint32_t hash;
/* we should never be trying to look up an INVALID tb */
tcg_debug_assert(!(cflags & CF_INVALID));
hash = tb_jmp_cache_hash_func(pc);
- tb = qatomic_rcu_read(&cpu->tb_jmp_cache->array[hash].tb);
+ jc = cpu->tb_jmp_cache;
+ tb = tb_jmp_cache_get_tb(jc, hash);
if (likely(tb &&
- tb->pc == pc &&
+ tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
@@ -267,7 +270,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
if (tb == NULL) {
return NULL;
}
- qatomic_set(&cpu->tb_jmp_cache->array[hash].tb, tb);
+ tb_jmp_cache_set(jc, hash, tb, pc);
return tb;
}
@@ -453,6 +456,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
if (cc->tcg_ops->synchronize_from_tb) {
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
} else {
+ assert(!TARGET_TB_PCREL);
assert(cc->set_pc);
cc->set_pc(cpu, tb_pc(last_tb));
}
@@ -1002,7 +1006,7 @@ int cpu_exec(CPUState *cpu)
* for the fast lookup
*/
h = tb_jmp_cache_hash_func(pc);
- qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
+ tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
}
#ifndef CONFIG_USER_ONLY
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
index a3875a3b5a..dc800fd485 100644
--- a/accel/tcg/internal.h
+++ b/accel/tcg/internal.h
@@ -21,7 +21,11 @@ void tb_htable_init(void);
/* Return the current PC from CPU, which may be cached in TB. */
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
{
+#if TARGET_TB_PCREL
+ return cpu->cc->get_pc(cpu);
+#else
return tb_pc(tb);
+#endif
}
#endif /* ACCEL_TCG_INTERNAL_H */
diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h
index 2d8fbb1bfe..ff5ffc8fc2 100644
--- a/accel/tcg/tb-jmp-cache.h
+++ b/accel/tcg/tb-jmp-cache.h
@@ -14,11 +14,52 @@
/*
* Accessed in parallel; all accesses to 'tb' must be atomic.
+ * For TARGET_TB_PCREL, accesses to 'pc' must be protected by
+ * a load_acquire/store_release to 'tb'.
*/
struct CPUJumpCache {
struct {
TranslationBlock *tb;
+#if TARGET_TB_PCREL
+ target_ulong pc;
+#endif
} array[TB_JMP_CACHE_SIZE];
};
+static inline TranslationBlock *
+tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash)
+{
+#if TARGET_TB_PCREL
+ /* Use acquire to ensure current load of pc from jc. */
+ return qatomic_load_acquire(&jc->array[hash].tb);
+#else
+ /* Use rcu_read to ensure current load of pc from *tb. */
+ return qatomic_rcu_read(&jc->array[hash].tb);
+#endif
+}
+
+static inline target_ulong
+tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
+{
+#if TARGET_TB_PCREL
+ return jc->array[hash].pc;
+#else
+ return tb_pc(tb);
+#endif
+}
+
+static inline void
+tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
+ TranslationBlock *tb, target_ulong pc)
+{
+#if TARGET_TB_PCREL
+ jc->array[hash].pc = pc;
+ /* Use store_release on tb to ensure pc is written first. */
+ qatomic_store_release(&jc->array[hash].tb, tb);
+#else
+ /* Use the pc value already stored in tb->pc. */
+ qatomic_set(&jc->array[hash].tb, tb);
+#endif
+}
+
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 13c964dcd8..4ed75a13e1 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -299,7 +299,7 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
if (i == 0) {
- prev = (j == 0 ? tb_pc(tb) : 0);
+ prev = (!TARGET_TB_PCREL && j == 0 ? tb_pc(tb) : 0);
} else {
prev = tcg_ctx->gen_insn_data[i - 1][j];
}
@@ -327,7 +327,7 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t searched_pc, bool reset_icount)
{
- target_ulong data[TARGET_INSN_START_WORDS] = { tb_pc(tb) };
+ target_ulong data[TARGET_INSN_START_WORDS];
uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
CPUArchState *env = cpu->env_ptr;
const uint8_t *p = tb->tc.ptr + tb->tc.size;
@@ -343,6 +343,11 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
return -1;
}
+ memset(data, 0, sizeof(data));
+ if (!TARGET_TB_PCREL) {
+ data[0] = tb_pc(tb);
+ }
+
/* Reconstruct the stored insn data while looking for the point at
which the end of the insn exceeds the searched_pc. */
for (i = 0; i < num_insns; ++i) {
@@ -885,13 +890,13 @@ static bool tb_cmp(const void *ap, const void *bp)
const TranslationBlock *a = ap;
const TranslationBlock *b = bp;
- return tb_pc(a) == tb_pc(b) &&
- a->cs_base == b->cs_base &&
- a->flags == b->flags &&
- (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
- a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
- a->page_addr[0] == b->page_addr[0] &&
- a->page_addr[1] == b->page_addr[1];
+ return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
+ a->cs_base == b->cs_base &&
+ a->flags == b->flags &&
+ (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
+ a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
+ a->page_addr[0] == b->page_addr[0] &&
+ a->page_addr[1] == b->page_addr[1]);
}
void tb_htable_init(void)
@@ -1148,6 +1153,28 @@ static inline void tb_jmp_unlink(TranslationBlock *dest)
qemu_spin_unlock(&dest->jmp_lock);
}
+static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
+{
+ CPUState *cpu;
+
+ if (TARGET_TB_PCREL) {
+ /* A TB may be at any virtual address */
+ CPU_FOREACH(cpu) {
+ tcg_flush_jmp_cache(cpu);
+ }
+ } else {
+ uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb));
+
+ CPU_FOREACH(cpu) {
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
+
+ if (qatomic_read(&jc->array[h].tb) == tb) {
+ qatomic_set(&jc->array[h].tb, NULL);
+ }
+ }
+ }
+}
+
/*
* In user-mode, call with mmap_lock held.
* In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
@@ -1155,7 +1182,6 @@ static inline void tb_jmp_unlink(TranslationBlock *dest)
*/
static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
{
- CPUState *cpu;
PageDesc *p;
uint32_t h;
tb_page_addr_t phys_pc;
@@ -1170,8 +1196,8 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
/* remove the TB from the hash list */
phys_pc = tb->page_addr[0];
- h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, orig_cflags,
- tb->trace_vcpu_dstate);
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
+ tb->flags, orig_cflags, tb->trace_vcpu_dstate);
if (!qht_remove(&tb_ctx.htable, tb, h)) {
return;
}
@@ -1187,13 +1213,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
}
/* remove the TB from the hash list */
- h = tb_jmp_cache_hash_func(tb->pc);
- CPU_FOREACH(cpu) {
- CPUJumpCache *jc = cpu->tb_jmp_cache;
- if (qatomic_read(&jc->array[h].tb) == tb) {
- qatomic_set(&jc->array[h].tb, NULL);
- }
- }
+ tb_jmp_cache_inval_tb(tb);
/* suppress this TB from the two jump lists */
tb_remove_from_jmp_list(tb, 0);
@@ -1302,8 +1322,8 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
}
/* add in the hash table */
- h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, tb->cflags,
- tb->trace_vcpu_dstate);
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
+ tb->flags, tb->cflags, tb->trace_vcpu_dstate);
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
/* remove TB from the page(s) if we couldn't insert it */
@@ -1373,7 +1393,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
gen_code_buf = tcg_ctx->code_gen_ptr;
tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
+#if !TARGET_TB_PCREL
tb->pc = pc;
+#endif
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;