From ba051fb5e56d5ff5e4fa672d37954452e58543b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20Benn=C3=A9e?= Date: Thu, 27 Oct 2016 16:10:16 +0100 Subject: tcg: move locking for tb_invalidate_phys_page_range up MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the linux-user case all things that involve ''l1_map' and PageDesc tweaks are protected by the memory lock (mmpa_lock). For SoftMMU mode we previously relied on single threaded behaviour, with MTTCG we now use the tb_lock(). As a result we need to do a little re-factoring and push the taking of this lock up the call tree. This requires a slightly different entry for the SoftMMU and user-mode cases from tb_invalidate_phys_range. This also means user-mode breakpoint insertion needs to take two locks but it hadn't taken any previously so this is an improvement. Signed-off-by: Alex Bennée Message-Id: <20161027151030.20863-20-alex.bennee@linaro.org> Signed-off-by: Paolo Bonzini --- translate-all.c | 39 +++++++++++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 8 deletions(-) (limited to 'translate-all.c') diff --git a/translate-all.c b/translate-all.c index 01b1604130..e6a8b07363 100644 --- a/translate-all.c +++ b/translate-all.c @@ -1402,12 +1402,11 @@ TranslationBlock *tb_gen_code(CPUState *cpu, * access: the virtual CPU will exit the current TB if code is modified inside * this TB. * - * Called with mmap_lock held for user-mode emulation + * Called with mmap_lock held for user-mode emulation, grabs tb_lock + * Called with tb_lock held for system-mode emulation */ -void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) +static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end) { - assert_memory_lock(); - while (start < end) { tb_invalidate_phys_page_range(start, end, 0); start &= TARGET_PAGE_MASK; @@ -1415,6 +1414,21 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) } } +#ifdef CONFIG_SOFTMMU +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) +{ + assert_tb_lock(); + tb_invalidate_phys_range_1(start, end); +} +#else +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) +{ + assert_memory_lock(); + tb_lock(); + tb_invalidate_phys_range_1(start, end); + tb_unlock(); +} +#endif /* * Invalidate all TBs which intersect with the target physical address range * [start;end[. NOTE: start and end must refer to the *same* physical page. @@ -1422,7 +1436,8 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) * access: the virtual CPU will exit the current TB if code is modified inside * this TB. * - * Called with mmap_lock held for user-mode emulation + * Called with tb_lock/mmap_lock held for user-mode emulation + * Called with tb_lock held for system-mode emulation */ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, int is_cpu_write_access) @@ -1445,6 +1460,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, #endif /* TARGET_HAS_PRECISE_SMC */ assert_memory_lock(); + assert_tb_lock(); p = page_find(start >> TARGET_PAGE_BITS); if (!p) { @@ -1459,7 +1475,6 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, /* we remove all the TBs in the range [start, end[ */ /* XXX: see if in some cases it could be faster to invalidate all the code */ - tb_lock(); tb = p->first_tb; while (tb != NULL) { n = (uintptr_t)tb & 3; @@ -1519,11 +1534,13 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, cpu_loop_exit_noexc(cpu); } #endif - tb_unlock(); } #ifdef CONFIG_SOFTMMU -/* len must be <= 8 and start must be a multiple of len */ +/* len must be <= 8 and start must be a multiple of len. + * Called via softmmu_template.h when code areas are written to with + * tb_lock held. + */ void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) { PageDesc *p; @@ -1537,6 +1554,8 @@ void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) (intptr_t)cpu_single_env->segs[R_CS].base); } #endif + assert_memory_lock(); + p = page_find(start >> TARGET_PAGE_BITS); if (!p) { return; @@ -1584,6 +1603,8 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) uint32_t current_flags = 0; #endif + assert_memory_lock(); + addr &= TARGET_PAGE_MASK; p = page_find(addr >> TARGET_PAGE_BITS); if (!p) { @@ -1687,7 +1708,9 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) return; } ram_addr = memory_region_get_ram_addr(mr) + addr; + tb_lock(); tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); + tb_unlock(); rcu_read_unlock(); } #endif /* !defined(CONFIG_USER_ONLY) */ -- cgit v1.2.3