aboutsummaryrefslogtreecommitdiff
path: root/translate-all.c
diff options
context:
space:
mode:
Diffstat (limited to 'translate-all.c')
-rw-r--r--translate-all.c177
1 files changed, 150 insertions, 27 deletions
diff --git a/translate-all.c b/translate-all.c
index 76fc18c98f..e6a8b07363 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -31,6 +31,7 @@
#include "tcg.h"
#if defined(CONFIG_USER_ONLY)
#include "qemu.h"
+#include "exec/exec-all.h"
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#include <sys/param.h>
#if __FreeBSD_version >= 700104
@@ -56,16 +57,39 @@
#include "qemu/timer.h"
#include "exec/log.h"
-//#define DEBUG_TB_INVALIDATE
-//#define DEBUG_FLUSH
+/* #define DEBUG_TB_INVALIDATE */
+/* #define DEBUG_TB_FLUSH */
+/* #define DEBUG_LOCKING */
/* make various TB consistency checks */
-//#define DEBUG_TB_CHECK
+/* #define DEBUG_TB_CHECK */
#if !defined(CONFIG_USER_ONLY)
/* TB consistency checks only implemented for usermode emulation. */
#undef DEBUG_TB_CHECK
#endif
+/* Access to the various translations structures need to be serialised via locks
+ * for consistency. This is automatic for SoftMMU based system
+ * emulation due to its single threaded nature. In user-mode emulation
+ * access to the memory related structures are protected with the
+ * mmap_lock.
+ */
+#ifdef DEBUG_LOCKING
+#define DEBUG_MEM_LOCKS 1
+#else
+#define DEBUG_MEM_LOCKS 0
+#endif
+
+#ifdef CONFIG_SOFTMMU
+#define assert_memory_lock() do { /* nothing */ } while (0)
+#else
+#define assert_memory_lock() do { \
+ if (DEBUG_MEM_LOCKS) { \
+ g_assert(have_mmap_lock()); \
+ } \
+ } while (0)
+#endif
+
#define SMC_BITMAP_USE_THRESHOLD 10
typedef struct PageDesc {
@@ -173,6 +197,23 @@ void tb_lock_reset(void)
#endif
}
+#ifdef DEBUG_LOCKING
+#define DEBUG_TB_LOCKS 1
+#else
+#define DEBUG_TB_LOCKS 0
+#endif
+
+#ifdef CONFIG_SOFTMMU
+#define assert_tb_lock() do { /* nothing */ } while (0)
+#else
+#define assert_tb_lock() do { \
+ if (DEBUG_TB_LOCKS) { \
+ g_assert(have_tb_lock); \
+ } \
+ } while (0)
+#endif
+
+
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
void cpu_gen_init(void)
@@ -267,7 +308,9 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
return p - block;
}
-/* The cpu state corresponding to 'searched_pc' is restored. */
+/* The cpu state corresponding to 'searched_pc' is restored.
+ * Called with tb_lock held.
+ */
static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t searched_pc)
{
@@ -320,7 +363,9 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
{
TranslationBlock *tb;
+ bool r = false;
+ tb_lock();
tb = tb_find_pc(retaddr);
if (tb) {
cpu_restore_state_from_tb(cpu, tb, retaddr);
@@ -329,9 +374,11 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
tb_phys_invalidate(tb, -1);
tb_free(tb);
}
- return true;
+ r = true;
}
- return false;
+ tb_unlock();
+
+ return r;
}
void page_size_init(void)
@@ -421,6 +468,7 @@ static void page_init(void)
}
/* If alloc=1:
+ * Called with tb_lock held for system emulation.
* Called with mmap_lock held for user-mode emulation.
*/
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
@@ -429,6 +477,10 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
void **lp;
int i;
+ if (alloc) {
+ assert_memory_lock();
+ }
+
/* Level 1. Always allocated. */
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
@@ -785,12 +837,18 @@ bool tcg_enabled(void)
return tcg_ctx.code_gen_buffer != NULL;
}
-/* Allocate a new translation block. Flush the translation buffer if
- too many translation blocks or too much generated code. */
+/*
+ * Allocate a new translation block. Flush the translation buffer if
+ * too many translation blocks or too much generated code.
+ *
+ * Called with tb_lock held.
+ */
static TranslationBlock *tb_alloc(target_ulong pc)
{
TranslationBlock *tb;
+ assert_tb_lock();
+
if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
return NULL;
}
@@ -801,8 +859,11 @@ static TranslationBlock *tb_alloc(target_ulong pc)
return tb;
}
+/* Called with tb_lock held. */
void tb_free(TranslationBlock *tb)
{
+ assert_tb_lock();
+
/* In practice this is mostly used for single use temporary TB
Ignore the hard cases and just back up if this TB happens to
be the last one generated. */
@@ -856,20 +917,18 @@ static void page_flush_tb(void)
}
/* flush all the translation blocks */
-static void do_tb_flush(CPUState *cpu, void *data)
+static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
{
- unsigned tb_flush_req = (unsigned) (uintptr_t) data;
-
tb_lock();
- /* If it's already been done on request of another CPU,
+ /* If it is already been done on request of another CPU,
* just retry.
*/
- if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_req) {
+ if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) {
goto done;
}
-#if defined(DEBUG_FLUSH)
+#if defined(DEBUG_TB_FLUSH)
printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
(unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
@@ -906,8 +965,9 @@ done:
void tb_flush(CPUState *cpu)
{
if (tcg_enabled()) {
- uintptr_t tb_flush_req = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
- async_safe_run_on_cpu(cpu, do_tb_flush, (void *) tb_flush_req);
+ unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
+ async_safe_run_on_cpu(cpu, do_tb_flush,
+ RUN_ON_CPU_HOST_INT(tb_flush_count));
}
}
@@ -925,6 +985,10 @@ do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
}
}
+/* verify that all the pages have correct rights for code
+ *
+ * Called with tb_lock held.
+ */
static void tb_invalidate_check(target_ulong address)
{
address &= TARGET_PAGE_MASK;
@@ -1029,7 +1093,10 @@ static inline void tb_jmp_unlink(TranslationBlock *tb)
}
}
-/* invalidate one TB */
+/* invalidate one TB
+ *
+ * Called with tb_lock held.
+ */
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
{
CPUState *cpu;
@@ -1037,6 +1104,8 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
uint32_t h;
tb_page_addr_t phys_pc;
+ assert_tb_lock();
+
atomic_set(&tb->invalid, true);
/* remove the TB from the hash list */
@@ -1094,7 +1163,7 @@ static void build_page_bitmap(PageDesc *p)
tb_end = tb_start + tb->size;
if (tb_end > TARGET_PAGE_SIZE) {
tb_end = TARGET_PAGE_SIZE;
- }
+ }
} else {
tb_start = 0;
tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
@@ -1117,6 +1186,8 @@ static inline void tb_alloc_page(TranslationBlock *tb,
bool page_already_protected;
#endif
+ assert_memory_lock();
+
tb->page_addr[n] = page_addr;
p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
tb->page_next[n] = p->first_tb;
@@ -1173,6 +1244,8 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
{
uint32_t h;
+ assert_memory_lock();
+
/* add in the page list */
tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
if (phys_page2 != -1) {
@@ -1204,6 +1277,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
+ assert_memory_lock();
phys_pc = get_page_addr_code(env, pc);
if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
@@ -1328,9 +1402,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*
- * Called with mmap_lock held for user-mode emulation
+ * Called with mmap_lock held for user-mode emulation, grabs tb_lock
+ * Called with tb_lock held for system-mode emulation
*/
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
+static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
{
while (start < end) {
tb_invalidate_phys_page_range(start, end, 0);
@@ -1339,6 +1414,21 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
}
}
+#ifdef CONFIG_SOFTMMU
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
+{
+ assert_tb_lock();
+ tb_invalidate_phys_range_1(start, end);
+}
+#else
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
+{
+ assert_memory_lock();
+ tb_lock();
+ tb_invalidate_phys_range_1(start, end);
+ tb_unlock();
+}
+#endif
/*
* Invalidate all TBs which intersect with the target physical address range
* [start;end[. NOTE: start and end must refer to the *same* physical page.
@@ -1346,7 +1436,8 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*
- * Called with mmap_lock held for user-mode emulation
+ * Called with tb_lock/mmap_lock held for user-mode emulation
+ * Called with tb_lock held for system-mode emulation
*/
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access)
@@ -1368,6 +1459,9 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
uint32_t current_flags = 0;
#endif /* TARGET_HAS_PRECISE_SMC */
+ assert_memory_lock();
+ assert_tb_lock();
+
p = page_find(start >> TARGET_PAGE_BITS);
if (!p) {
return;
@@ -1443,7 +1537,10 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
}
#ifdef CONFIG_SOFTMMU
-/* len must be <= 8 and start must be a multiple of len */
+/* len must be <= 8 and start must be a multiple of len.
+ * Called via softmmu_template.h when code areas are written to with
+ * tb_lock held.
+ */
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
{
PageDesc *p;
@@ -1457,13 +1554,17 @@ void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
(intptr_t)cpu_single_env->segs[R_CS].base);
}
#endif
+ assert_memory_lock();
+
p = page_find(start >> TARGET_PAGE_BITS);
if (!p) {
return;
}
if (!p->code_bitmap &&
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
- /* build code bitmap */
+ /* build code bitmap. FIXME: writes should be protected by
+ * tb_lock, reads by tb_lock or RCU.
+ */
build_page_bitmap(p);
}
if (p->code_bitmap) {
@@ -1502,11 +1603,15 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
uint32_t current_flags = 0;
#endif
+ assert_memory_lock();
+
addr &= TARGET_PAGE_MASK;
p = page_find(addr >> TARGET_PAGE_BITS);
if (!p) {
return false;
}
+
+ tb_lock();
tb = p->first_tb;
#ifdef TARGET_HAS_PRECISE_SMC
if (tb && pc != 0) {
@@ -1544,9 +1649,13 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
modifying the memory. It will ensure that it cannot modify
itself */
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
+ /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
+ * back into the cpu_exec loop. */
return true;
}
#endif
+ tb_unlock();
+
return false;
}
#endif
@@ -1599,11 +1708,14 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
return;
}
ram_addr = memory_region_get_ram_addr(mr) + addr;
+ tb_lock();
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
+ tb_unlock();
rcu_read_unlock();
}
#endif /* !defined(CONFIG_USER_ONLY) */
+/* Called with tb_lock held. */
void tb_check_watchpoint(CPUState *cpu)
{
TranslationBlock *tb;
@@ -1640,6 +1752,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
target_ulong pc, cs_base;
uint32_t flags;
+ tb_lock();
tb = tb_find_pc(retaddr);
if (!tb) {
cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
@@ -1691,11 +1804,16 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
/* FIXME: In theory this could raise an exception. In practice
we have already translated the block once so it's probably ok. */
tb_gen_code(cpu, pc, cs_base, flags, cflags);
+
/* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
- the first in the TB) then we end up generating a whole new TB and
- repeating the fault, which is horribly inefficient.
- Better would be to execute just this insn uncached, or generate a
- second new TB. */
+ * the first in the TB) then we end up generating a whole new TB and
+ * repeating the fault, which is horribly inefficient.
+ * Better would be to execute just this insn uncached, or generate a
+ * second new TB.
+ *
+ * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
+ * tb_lock gets reset.
+ */
cpu_loop_exit_noexc(cpu);
}
@@ -1759,6 +1877,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
TranslationBlock *tb;
struct qht_stats hst;
+ tb_lock();
+
target_code_size = 0;
max_target_code_size = 0;
cross_page = 0;
@@ -1820,6 +1940,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
tcg_ctx.tb_ctx.tb_phys_invalidate_count);
cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
tcg_dump_info(f, cpu_fprintf);
+
+ tb_unlock();
}
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
@@ -1972,6 +2094,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
#endif
assert(start < end);
+ assert_memory_lock();
start = start & TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);