aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2018-08-14 17:17:19 +0100
committerPeter Maydell <peter.maydell@linaro.org>2018-08-14 17:17:19 +0100
commitdbea78a4d696e35d28a35db95cb29ff075626150 (patch)
treedf8a5230e1e960b252449cd11b795f24400f30f6
parentc4379b4874f4c522f6818f1720f295205d7cf34d (diff)
accel/tcg: Pass read access type through to io_readx()
The io_readx() function needs to know whether the load it is doing is an MMU_DATA_LOAD or an MMU_INST_FETCH, so that it can pass the right value to the cpu_transaction_failed() function. Plumb this information through from the softmmu code. This is currently not often going to give the wrong answer, because usually instruction fetches go via get_page_addr_code(). However once we switch over to handling execution from non-RAM by creating single-insn TBs, the path for an insn fetch to generate a bus error will be through cpu_ld*_code() and io_readx(), so without this change we will generate a d-side fault when we should generate an i-side fault. We also have to pass the access type via a CPU struct global down to unassigned_mem_read(), for the benefit of the targets which still use the cpu_unassigned_access() hook (m68k, mips, sparc, xtensa). Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Tested-by: Cédric Le Goater <clg@kaod.org> Message-id: 20180710160013.26559-2-peter.maydell@linaro.org
-rw-r--r--accel/tcg/cputlb.c5
-rw-r--r--accel/tcg/softmmu_template.h11
-rw-r--r--include/qom/cpu.h6
-rw-r--r--memory.c3
4 files changed, 18 insertions, 7 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 563fa30117..51b1193044 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -789,7 +789,7 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx,
target_ulong addr, uintptr_t retaddr,
- bool recheck, int size)
+ bool recheck, MMUAccessType access_type, int size)
{
CPUState *cpu = ENV_GET_CPU(env);
hwaddr mr_offset;
@@ -831,6 +831,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
}
cpu->mem_io_vaddr = addr;
+ cpu->mem_io_access_type = access_type;
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
qemu_mutex_lock_iothread();
@@ -843,7 +844,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
section->offset_within_address_space -
section->offset_within_region;
- cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
+ cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
mmu_idx, iotlbentry->attrs, r, retaddr);
}
if (locked) {
diff --git a/accel/tcg/softmmu_template.h b/accel/tcg/softmmu_template.h
index badbf14880..f060a693d4 100644
--- a/accel/tcg/softmmu_template.h
+++ b/accel/tcg/softmmu_template.h
@@ -99,11 +99,12 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
size_t mmu_idx, size_t index,
target_ulong addr,
uintptr_t retaddr,
- bool recheck)
+ bool recheck,
+ MMUAccessType access_type)
{
CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, recheck,
- DATA_SIZE);
+ access_type, DATA_SIZE);
}
#endif
@@ -140,7 +141,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
- tlb_addr & TLB_RECHECK);
+ tlb_addr & TLB_RECHECK,
+ READ_ACCESS_TYPE);
res = TGT_LE(res);
return res;
}
@@ -207,7 +209,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
- tlb_addr & TLB_RECHECK);
+ tlb_addr & TLB_RECHECK,
+ READ_ACCESS_TYPE);
res = TGT_BE(res);
return res;
}
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index bd796579ee..ecf6ed556a 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -386,6 +386,12 @@ struct CPUState {
*/
uintptr_t mem_io_pc;
vaddr mem_io_vaddr;
+ /*
+ * This is only needed for the legacy cpu_unassigned_access() hook;
+ * when all targets using it have been converted to use
+ * cpu_transaction_failed() instead it can be removed.
+ */
+ MMUAccessType mem_io_access_type;
int kvm_fd;
struct KVMState *kvm_state;
diff --git a/memory.c b/memory.c
index e9cd446968..2ea16e7bfb 100644
--- a/memory.c
+++ b/memory.c
@@ -1249,7 +1249,8 @@ static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
if (current_cpu != NULL) {
- cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
+ bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
+ cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
}
return 0;
}