aboutsummaryrefslogtreecommitdiff
path: root/cputlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'cputlb.c')
-rw-r--r--cputlb.c463
1 files changed, 389 insertions, 74 deletions
diff --git a/cputlb.c b/cputlb.c
index 6c39927455..7fa7fefa05 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -18,6 +18,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/memory.h"
@@ -57,6 +58,40 @@
} \
} while (0)
+#define assert_cpu_is_self(this_cpu) do { \
+ if (DEBUG_TLB_GATE) { \
+ g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
+ } \
+ } while (0)
+
+/* run_on_cpu_data.target_ptr should always be big enough for a
+ * target_ulong even on 32 bit builds */
+QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
+
+/* We currently can't handle more than 16 bits in the MMUIDX bitmask.
+ */
+QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
+#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
+
+/* flush_all_helper: run fn across all cpus
+ *
+ * If the wait flag is set then the src cpu's helper will be queued as
+ * "safe" work and the loop exited creating a synchronisation point
+ * where all queued work will be finished before execution starts
+ * again.
+ */
+static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
+ run_on_cpu_data d)
+{
+ CPUState *cpu;
+
+ CPU_FOREACH(cpu) {
+ if (cpu != src) {
+ async_run_on_cpu(cpu, fn, d);
+ }
+ }
+}
+
/* statistics */
int tlb_flush_count;
@@ -65,10 +100,22 @@ int tlb_flush_count;
* flushing more entries than required is only an efficiency issue,
* not a correctness issue.
*/
-void tlb_flush(CPUState *cpu)
+static void tlb_flush_nocheck(CPUState *cpu)
{
CPUArchState *env = cpu->env_ptr;
+ /* The QOM tests will trigger tlb_flushes without setting up TCG
+ * so we bug out here in that case.
+ */
+ if (!tcg_enabled()) {
+ return;
+ }
+
+ assert_cpu_is_self(cpu);
+ tlb_debug("(count: %d)\n", tlb_flush_count++);
+
+ tb_lock();
+
memset(env->tlb_table, -1, sizeof(env->tlb_table));
memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
@@ -76,39 +123,117 @@ void tlb_flush(CPUState *cpu)
env->vtlb_index = 0;
env->tlb_flush_addr = -1;
env->tlb_flush_mask = 0;
- tlb_flush_count++;
+
+ tb_unlock();
+
+ atomic_mb_set(&cpu->pending_tlb_flush, 0);
+}
+
+static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
+{
+ tlb_flush_nocheck(cpu);
+}
+
+void tlb_flush(CPUState *cpu)
+{
+ if (cpu->created && !qemu_cpu_is_self(cpu)) {
+ if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) {
+ atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS);
+ async_run_on_cpu(cpu, tlb_flush_global_async_work,
+ RUN_ON_CPU_NULL);
+ }
+ } else {
+ tlb_flush_nocheck(cpu);
+ }
+}
+
+void tlb_flush_all_cpus(CPUState *src_cpu)
+{
+ const run_on_cpu_func fn = tlb_flush_global_async_work;
+ flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
+ fn(src_cpu, RUN_ON_CPU_NULL);
+}
+
+void tlb_flush_all_cpus_synced(CPUState *src_cpu)
+{
+ const run_on_cpu_func fn = tlb_flush_global_async_work;
+ flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
+ async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL);
}
-static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
+static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
{
CPUArchState *env = cpu->env_ptr;
+ unsigned long mmu_idx_bitmask = data.host_int;
+ int mmu_idx;
- tlb_debug("start\n");
+ assert_cpu_is_self(cpu);
- for (;;) {
- int mmu_idx = va_arg(argp, int);
+ tb_lock();
- if (mmu_idx < 0) {
- break;
- }
+ tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
- tlb_debug("%d\n", mmu_idx);
+ for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+
+ if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
+ tlb_debug("%d\n", mmu_idx);
- memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
- memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
+ memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
+ memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
+ }
}
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
+
+ tlb_debug("done\n");
+
+ tb_unlock();
}
-void tlb_flush_by_mmuidx(CPUState *cpu, ...)
+void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
{
- va_list argp;
- va_start(argp, cpu);
- v_tlb_flush_by_mmuidx(cpu, argp);
- va_end(argp);
+ tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
+
+ if (!qemu_cpu_is_self(cpu)) {
+ uint16_t pending_flushes = idxmap;
+ pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush);
+
+ if (pending_flushes) {
+ tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes);
+
+ atomic_or(&cpu->pending_tlb_flush, pending_flushes);
+ async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
+ RUN_ON_CPU_HOST_INT(pending_flushes));
+ }
+ } else {
+ tlb_flush_by_mmuidx_async_work(cpu,
+ RUN_ON_CPU_HOST_INT(idxmap));
+ }
+}
+
+void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
+{
+ const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
+
+ tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
+
+ flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
+ fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
}
+void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ uint16_t idxmap)
+{
+ const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
+
+ tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
+
+ flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
+ async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
+}
+
+
+
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
{
if (addr == (tlb_entry->addr_read &
@@ -121,12 +246,15 @@ static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
}
}
-void tlb_flush_page(CPUState *cpu, target_ulong addr)
+static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
{
CPUArchState *env = cpu->env_ptr;
+ target_ulong addr = (target_ulong) data.target_ptr;
int i;
int mmu_idx;
+ assert_cpu_is_self(cpu);
+
tlb_debug("page :" TARGET_FMT_lx "\n", addr);
/* Check if we need to flush due to large pages. */
@@ -156,15 +284,62 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
tb_flush_jmp_cache(cpu, addr);
}
-void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
+void tlb_flush_page(CPUState *cpu, target_ulong addr)
+{
+ tlb_debug("page :" TARGET_FMT_lx "\n", addr);
+
+ if (!qemu_cpu_is_self(cpu)) {
+ async_run_on_cpu(cpu, tlb_flush_page_async_work,
+ RUN_ON_CPU_TARGET_PTR(addr));
+ } else {
+ tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
+ }
+}
+
+/* As we are going to hijack the bottom bits of the page address for a
+ * mmuidx bit mask we need to fail to build if we can't do that
+ */
+QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
+
+static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
+ run_on_cpu_data data)
{
CPUArchState *env = cpu->env_ptr;
- int i, k;
- va_list argp;
+ target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
+ target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
+ unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
+ int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ int mmu_idx;
+ int i;
+
+ assert_cpu_is_self(cpu);
- va_start(argp, addr);
+ tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
+ page, addr, mmu_idx_bitmap);
+
+ for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+ if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
+ tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
+
+ /* check whether there are vltb entries that need to be flushed */
+ for (i = 0; i < CPU_VTLB_SIZE; i++) {
+ tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
+ }
+ }
+ }
+
+ tb_flush_jmp_cache(cpu, addr);
+}
+
+static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu,
+ run_on_cpu_data data)
+{
+ CPUArchState *env = cpu->env_ptr;
+ target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
+ target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
+ unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
- tlb_debug("addr "TARGET_FMT_lx"\n", addr);
+ tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap);
/* Check if we need to flush due to large pages. */
if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
@@ -172,33 +347,80 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
env->tlb_flush_addr, env->tlb_flush_mask);
- v_tlb_flush_by_mmuidx(cpu, argp);
- va_end(argp);
- return;
+ tlb_flush_by_mmuidx_async_work(cpu,
+ RUN_ON_CPU_HOST_INT(mmu_idx_bitmap));
+ } else {
+ tlb_flush_page_by_mmuidx_async_work(cpu, data);
}
+}
- addr &= TARGET_PAGE_MASK;
- i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
+{
+ target_ulong addr_and_mmu_idx;
- for (;;) {
- int mmu_idx = va_arg(argp, int);
+ tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
- if (mmu_idx < 0) {
- break;
- }
+ /* This should already be page aligned */
+ addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
+ addr_and_mmu_idx |= idxmap;
- tlb_debug("idx %d\n", mmu_idx);
+ if (!qemu_cpu_is_self(cpu)) {
+ async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work,
+ RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+ } else {
+ tlb_check_page_and_flush_by_mmuidx_async_work(
+ cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+ }
+}
- tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
+void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
+ uint16_t idxmap)
+{
+ const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
+ target_ulong addr_and_mmu_idx;
- /* check whether there are vltb entries that need to be flushed */
- for (k = 0; k < CPU_VTLB_SIZE; k++) {
- tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
- }
- }
- va_end(argp);
+ tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
- tb_flush_jmp_cache(cpu, addr);
+ /* This should already be page aligned */
+ addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
+ addr_and_mmu_idx |= idxmap;
+
+ flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+ fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+}
+
+void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ target_ulong addr,
+ uint16_t idxmap)
+{
+ const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
+ target_ulong addr_and_mmu_idx;
+
+ tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
+
+ /* This should already be page aligned */
+ addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
+ addr_and_mmu_idx |= idxmap;
+
+ flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+ async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+}
+
+void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
+{
+ const run_on_cpu_func fn = tlb_flush_page_async_work;
+
+ flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
+ fn(src, RUN_ON_CPU_TARGET_PTR(addr));
+}
+
+void tlb_flush_page_all_cpus_synced(CPUState *src,
+ target_ulong addr)
+{
+ const run_on_cpu_func fn = tlb_flush_page_async_work;
+
+ flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
+ async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
}
/* update the TLBs so that writes to code in the virtual page 'addr'
@@ -216,36 +438,84 @@ void tlb_unprotect_code(ram_addr_t ram_addr)
cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
}
-static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
-{
- return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
-}
-void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
+/*
+ * Dirty write flag handling
+ *
+ * When the TCG code writes to a location it looks up the address in
+ * the TLB and uses that data to compute the final address. If any of
+ * the lower bits of the address are set then the slow path is forced.
+ * There are a number of reasons to do this but for normal RAM the
+ * most usual is detecting writes to code regions which may invalidate
+ * generated code.
+ *
+ * Because we want other vCPUs to respond to changes straight away we
+ * update the te->addr_write field atomically. If the TLB entry has
+ * been changed by the vCPU in the mean time we skip the update.
+ *
+ * As this function uses atomic accesses we also need to ensure
+ * updates to tlb_entries follow the same access rules. We don't need
+ * to worry about this for oversized guests as MTTCG is disabled for
+ * them.
+ */
+
+static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
uintptr_t length)
{
- uintptr_t addr;
+#if TCG_OVERSIZED_GUEST
+ uintptr_t addr = tlb_entry->addr_write;
- if (tlb_is_dirty_ram(tlb_entry)) {
- addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
+ if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
+ addr &= TARGET_PAGE_MASK;
+ addr += tlb_entry->addend;
if ((addr - start) < length) {
tlb_entry->addr_write |= TLB_NOTDIRTY;
}
}
+#else
+ /* paired with atomic_mb_set in tlb_set_page_with_attrs */
+ uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write);
+ uintptr_t addr = orig_addr;
+
+ if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
+ addr &= TARGET_PAGE_MASK;
+ addr += atomic_read(&tlb_entry->addend);
+ if ((addr - start) < length) {
+ uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY;
+ atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr);
+ }
+ }
+#endif
}
-static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
+/* For atomic correctness when running MTTCG we need to use the right
+ * primitives when copying entries */
+static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s,
+ bool atomic_set)
{
- ram_addr_t ram_addr;
-
- ram_addr = qemu_ram_addr_from_host(ptr);
- if (ram_addr == RAM_ADDR_INVALID) {
- fprintf(stderr, "Bad ram pointer %p\n", ptr);
- abort();
+#if TCG_OVERSIZED_GUEST
+ *d = *s;
+#else
+ if (atomic_set) {
+ d->addr_read = s->addr_read;
+ d->addr_code = s->addr_code;
+ atomic_set(&d->addend, atomic_read(&s->addend));
+ /* Pairs with flag setting in tlb_reset_dirty_range */
+ atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write));
+ } else {
+ d->addr_read = s->addr_read;
+ d->addr_write = atomic_read(&s->addr_write);
+ d->addr_code = s->addr_code;
+ d->addend = atomic_read(&s->addend);
}
- return ram_addr;
+#endif
}
+/* This is a cross vCPU call (i.e. another vCPU resetting the flags of
+ * the target vCPU). As such care needs to be taken that we don't
+ * dangerously race with another vCPU update. The only thing actually
+ * updated is the target TLB entry ->addr_write flags.
+ */
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
{
CPUArchState *env;
@@ -283,6 +553,8 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
int i;
int mmu_idx;
+ assert_cpu_is_self(cpu);
+
vaddr &= TARGET_PAGE_MASK;
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
@@ -337,11 +609,12 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
target_ulong address;
target_ulong code_address;
uintptr_t addend;
- CPUTLBEntry *te;
+ CPUTLBEntry *te, *tv, tn;
hwaddr iotlb, xlat, sz;
unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
int asidx = cpu_asidx_from_attrs(cpu, attrs);
+ assert_cpu_is_self(cpu);
assert(size >= TARGET_PAGE_SIZE);
if (size != TARGET_PAGE_SIZE) {
tlb_add_large_page(env, vaddr, size);
@@ -371,41 +644,50 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
te = &env->tlb_table[mmu_idx][index];
-
/* do not discard the translation in te, evict it into a victim tlb */
- env->tlb_v_table[mmu_idx][vidx] = *te;
+ tv = &env->tlb_v_table[mmu_idx][vidx];
+
+ /* addr_write can race with tlb_reset_dirty_range */
+ copy_tlb_helper(tv, te, true);
+
env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
/* refill the tlb */
env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
env->iotlb[mmu_idx][index].attrs = attrs;
- te->addend = addend - vaddr;
+
+ /* Now calculate the new entry */
+ tn.addend = addend - vaddr;
if (prot & PAGE_READ) {
- te->addr_read = address;
+ tn.addr_read = address;
} else {
- te->addr_read = -1;
+ tn.addr_read = -1;
}
if (prot & PAGE_EXEC) {
- te->addr_code = code_address;
+ tn.addr_code = code_address;
} else {
- te->addr_code = -1;
+ tn.addr_code = -1;
}
+
+ tn.addr_write = -1;
if (prot & PAGE_WRITE) {
if ((memory_region_is_ram(section->mr) && section->readonly)
|| memory_region_is_romd(section->mr)) {
/* Write access calls the I/O callback. */
- te->addr_write = address | TLB_MMIO;
+ tn.addr_write = address | TLB_MMIO;
} else if (memory_region_is_ram(section->mr)
&& cpu_physical_memory_is_clean(
memory_region_get_ram_addr(section->mr) + xlat)) {
- te->addr_write = address | TLB_NOTDIRTY;
+ tn.addr_write = address | TLB_NOTDIRTY;
} else {
- te->addr_write = address;
+ tn.addr_write = address;
}
- } else {
- te->addr_write = -1;
}
+
+ /* Pairs with flag setting in tlb_reset_dirty_range */
+ copy_tlb_helper(te, &tn, true);
+ /* atomic_mb_set(&te->addr_write, write_address); */
}
/* Add a new TLB entry, but without specifying the memory
@@ -452,6 +734,18 @@ static void report_bad_exec(CPUState *cpu, target_ulong addr)
log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
}
+static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
+{
+ ram_addr_t ram_addr;
+
+ ram_addr = qemu_ram_addr_from_host(ptr);
+ if (ram_addr == RAM_ADDR_INVALID) {
+ error_report("Bad ram pointer %p", ptr);
+ abort();
+ }
+ return ram_addr;
+}
+
/* NOTE: this function can trigger an exception */
/* NOTE2: the returned address is not exactly the physical address: it
* is actually a ram_addr_t (in system mode; the user mode emulation
@@ -495,6 +789,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
hwaddr physaddr = iotlbentry->addr;
MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
uint64_t val;
+ bool locked = false;
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
cpu->mem_io_pc = retaddr;
@@ -503,7 +798,16 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
}
cpu->mem_io_vaddr = addr;
+
+ if (mr->global_locking) {
+ qemu_mutex_lock_iothread();
+ locked = true;
+ }
memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
+ if (locked) {
+ qemu_mutex_unlock_iothread();
+ }
+
return val;
}
@@ -514,15 +818,23 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
CPUState *cpu = ENV_GET_CPU(env);
hwaddr physaddr = iotlbentry->addr;
MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
+ bool locked = false;
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
cpu_io_recompile(cpu, retaddr);
}
-
cpu->mem_io_vaddr = addr;
cpu->mem_io_pc = retaddr;
+
+ if (mr->global_locking) {
+ qemu_mutex_lock_iothread();
+ locked = true;
+ }
memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
+ if (locked) {
+ qemu_mutex_unlock_iothread();
+ }
}
/* Return true if ADDR is present in the victim tlb, and has been copied
@@ -538,10 +850,13 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
if (cmp == page) {
/* Found entry in victim tlb, swap tlb and iotlb. */
CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
+
+ copy_tlb_helper(&tmptlb, tlb, false);
+ copy_tlb_helper(tlb, vtlb, true);
+ copy_tlb_helper(vtlb, &tmptlb, true);
+
CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
-
- tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
tmpio = *io; *io = *vio; *vio = tmpio;
return true;
}