aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cpu-exec.c41
-rw-r--r--accel/tcg/translate-all.c118
2 files changed, 98 insertions, 61 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 45f6ebc65e..c482008bc7 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -352,28 +352,43 @@ void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
}
}
-/* Called with tb_lock held. */
static inline void tb_add_jump(TranslationBlock *tb, int n,
TranslationBlock *tb_next)
{
+ uintptr_t old;
+
assert(n < ARRAY_SIZE(tb->jmp_list_next));
- if (tb->jmp_list_next[n]) {
- /* Another thread has already done this while we were
- * outside of the lock; nothing to do in this case */
- return;
+ qemu_spin_lock(&tb_next->jmp_lock);
+
+ /* make sure the destination TB is valid */
+ if (tb_next->cflags & CF_INVALID) {
+ goto out_unlock_next;
+ }
+ /* Atomically claim the jump destination slot only if it was NULL */
+ old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
+ if (old) {
+ goto out_unlock_next;
}
+
+ /* patch the native jump address */
+ tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
+
+ /* add in TB jmp list */
+ tb->jmp_list_next[n] = tb_next->jmp_list_head;
+ tb_next->jmp_list_head = (uintptr_t)tb | n;
+
+ qemu_spin_unlock(&tb_next->jmp_lock);
+
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
"Linking TBs %p [" TARGET_FMT_lx
"] index %d -> %p [" TARGET_FMT_lx "]\n",
tb->tc.ptr, tb->pc, n,
tb_next->tc.ptr, tb_next->pc);
+ return;
- /* patch the native jump address */
- tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
-
- /* add in TB jmp circular list */
- tb->jmp_list_next[n] = tb_next->jmp_list_first;
- tb_next->jmp_list_first = (uintptr_t)tb | n;
+ out_unlock_next:
+ qemu_spin_unlock(&tb_next->jmp_lock);
+ return;
}
static inline TranslationBlock *tb_find(CPUState *cpu,
@@ -416,9 +431,7 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
tb_lock();
acquired_tb_lock = true;
}
- if (!(tb->cflags & CF_INVALID)) {
- tb_add_jump(last_tb, tb_exit, tb);
- }
+ tb_add_jump(last_tb, tb_exit, tb);
}
if (acquired_tb_lock) {
tb_unlock();
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 3f977532bf..55c9e1b196 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -170,6 +170,9 @@ struct page_collection {
#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
+#define TB_FOR_EACH_JMP(head_tb, tb, n) \
+ TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
+
/* In system mode we want L1_MAP to be based on ram offsets,
while in user mode we want it to be based on virtual addresses. */
#if !defined(CONFIG_USER_ONLY)
@@ -389,7 +392,7 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
return -1;
found:
- if (reset_icount && (tb->cflags & CF_USE_ICOUNT)) {
+ if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
assert(use_icount);
/* Reset the cycle counter to the start of the block
and shift if to the number of actually executed instructions */
@@ -432,7 +435,7 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
tb = tcg_tb_lookup(host_pc);
if (tb) {
cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
- if (tb->cflags & CF_NOCACHE) {
+ if (tb_cflags(tb) & CF_NOCACHE) {
/* one-shot translation, invalidate it immediately */
tb_phys_invalidate(tb, -1);
tcg_tb_remove(tb);
@@ -1360,34 +1363,53 @@ static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
g_assert_not_reached();
}
-/* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
-static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
+/* remove @orig from its @n_orig-th jump list */
+static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
{
- TranslationBlock *tb1;
- uintptr_t *ptb, ntb;
- unsigned int n1;
+ uintptr_t ptr, ptr_locked;
+ TranslationBlock *dest;
+ TranslationBlock *tb;
+ uintptr_t *pprev;
+ int n;
- ptb = &tb->jmp_list_next[n];
- if (*ptb) {
- /* find tb(n) in circular list */
- for (;;) {
- ntb = *ptb;
- n1 = ntb & 3;
- tb1 = (TranslationBlock *)(ntb & ~3);
- if (n1 == n && tb1 == tb) {
- break;
- }
- if (n1 == 2) {
- ptb = &tb1->jmp_list_first;
- } else {
- ptb = &tb1->jmp_list_next[n1];
- }
- }
- /* now we can suppress tb(n) from the list */
- *ptb = tb->jmp_list_next[n];
+ /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
+ ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1);
+ dest = (TranslationBlock *)(ptr & ~1);
+ if (dest == NULL) {
+ return;
+ }
- tb->jmp_list_next[n] = (uintptr_t)NULL;
+ qemu_spin_lock(&dest->jmp_lock);
+ /*
+ * While acquiring the lock, the jump might have been removed if the
+ * destination TB was invalidated; check again.
+ */
+ ptr_locked = atomic_read(&orig->jmp_dest[n_orig]);
+ if (ptr_locked != ptr) {
+ qemu_spin_unlock(&dest->jmp_lock);
+ /*
+ * The only possibility is that the jump was unlinked via
+ * tb_jump_unlink(dest). Seeing here another destination would be a bug,
+ * because we set the LSB above.
+ */
+ g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
+ return;
}
+ /*
+ * We first acquired the lock, and since the destination pointer matches,
+ * we know for sure that @orig is in the jmp list.
+ */
+ pprev = &dest->jmp_list_head;
+ TB_FOR_EACH_JMP(dest, tb, n) {
+ if (tb == orig && n == n_orig) {
+ *pprev = tb->jmp_list_next[n];
+ /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
+ qemu_spin_unlock(&dest->jmp_lock);
+ return;
+ }
+ pprev = &tb->jmp_list_next[n];
+ }
+ g_assert_not_reached();
}
/* reset the jump entry 'n' of a TB so that it is not chained to
@@ -1399,24 +1421,21 @@ static inline void tb_reset_jump(TranslationBlock *tb, int n)
}
/* remove any jumps to the TB */
-static inline void tb_jmp_unlink(TranslationBlock *tb)
+static inline void tb_jmp_unlink(TranslationBlock *dest)
{
- TranslationBlock *tb1;
- uintptr_t *ptb, ntb;
- unsigned int n1;
+ TranslationBlock *tb;
+ int n;
- ptb = &tb->jmp_list_first;
- for (;;) {
- ntb = *ptb;
- n1 = ntb & 3;
- tb1 = (TranslationBlock *)(ntb & ~3);
- if (n1 == 2) {
- break;
- }
- tb_reset_jump(tb1, n1);
- *ptb = tb1->jmp_list_next[n1];
- tb1->jmp_list_next[n1] = (uintptr_t)NULL;
+ qemu_spin_lock(&dest->jmp_lock);
+
+ TB_FOR_EACH_JMP(dest, tb, n) {
+ tb_reset_jump(tb, n);
+ atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
+ /* No need to clear the list entry; setting the dest ptr is enough */
}
+ dest->jmp_list_head = (uintptr_t)NULL;
+
+ qemu_spin_unlock(&dest->jmp_lock);
}
/* If @rm_from_page_list is set, call with the TB's pages' locks held */
@@ -1429,11 +1448,14 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
assert_tb_locked();
+ /* make sure no further incoming jumps will be chained to this TB */
+ qemu_spin_lock(&tb->jmp_lock);
atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
+ qemu_spin_unlock(&tb->jmp_lock);
/* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
tb->trace_vcpu_dstate);
if (!qht_remove(&tb_ctx.htable, tb, h)) {
return;
@@ -1773,10 +1795,12 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
CODE_GEN_ALIGN));
/* init jump list */
- assert(((uintptr_t)tb & 3) == 0);
- tb->jmp_list_first = (uintptr_t)tb | 2;
+ qemu_spin_init(&tb->jmp_lock);
+ tb->jmp_list_head = (uintptr_t)NULL;
tb->jmp_list_next[0] = (uintptr_t)NULL;
tb->jmp_list_next[1] = (uintptr_t)NULL;
+ tb->jmp_dest[0] = (uintptr_t)NULL;
+ tb->jmp_dest[1] = (uintptr_t)NULL;
/* init original jump addresses wich has been set during tcg_gen_code() */
if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
@@ -1868,7 +1892,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
}
}
if (current_tb == tb &&
- (current_tb->cflags & CF_COUNT_MASK) != 1) {
+ (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking
that the modification is after the current PC, but it
@@ -2067,7 +2091,7 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
PAGE_FOR_EACH_TB(p, tb, n) {
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb == tb &&
- (current_tb->cflags & CF_COUNT_MASK) != 1) {
+ (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking
that the modification is after the current PC, but it
@@ -2192,7 +2216,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
/* Generate a new TB executing the I/O insn. */
cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
- if (tb->cflags & CF_NOCACHE) {
+ if (tb_cflags(tb) & CF_NOCACHE) {
if (tb->orig_tb) {
/* Invalidate original TB if this TB was generated in
* cpu_exec_nocache() */