aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg
diff options
context:
space:
mode:
Diffstat (limited to 'accel/tcg')
-rw-r--r--accel/tcg/cputlb.c13
-rw-r--r--accel/tcg/softmmu_template.h14
-rw-r--r--accel/tcg/user-exec.c2
3 files changed, 17 insertions, 12 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 8fd84209df..05439039e9 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -880,7 +880,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
if (unlikely(env->tlb_table[mmu_idx][index].addr_code !=
(addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))) {
if (!VICTIM_TLB_HIT(addr_read, addr)) {
- tlb_fill(ENV_GET_CPU(env), addr, MMU_INST_FETCH, mmu_idx, 0);
+ tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
}
}
iotlbentry = &env->iotlb[mmu_idx][index];
@@ -928,7 +928,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
* Otherwise the function will return, and there will be a valid
* entry in the TLB for this access.
*/
-void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
+void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
@@ -938,7 +938,8 @@ void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
/* TLB entry is for a different page */
if (!VICTIM_TLB_HIT(addr_write, addr)) {
- tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
+ tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
+ mmu_idx, retaddr);
}
}
}
@@ -981,7 +982,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
- tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
+ tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
+ mmu_idx, retaddr);
}
tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK;
}
@@ -995,7 +997,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
/* Let the guest notice RMW on a write-only page. */
if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
- tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
+ tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
+ mmu_idx, retaddr);
/* Since we don't support reads and writes to different addresses,
and we do have the proper page loaded for write, this shouldn't
ever return. But just in case, handle via stop-the-world. */
diff --git a/accel/tcg/softmmu_template.h b/accel/tcg/softmmu_template.h
index 3fc5144316..239ea6692b 100644
--- a/accel/tcg/softmmu_template.h
+++ b/accel/tcg/softmmu_template.h
@@ -124,7 +124,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
- tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
@@ -191,7 +191,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
- tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
@@ -283,7 +283,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
- tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
+ tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
+ mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK;
}
@@ -316,7 +317,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
&& !VICTIM_TLB_HIT(addr_write, page2)) {
- tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
+ tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
}
@@ -359,7 +360,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
- tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
+ tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
+ mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK;
}
@@ -392,7 +394,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
&& !VICTIM_TLB_HIT(addr_write, page2)) {
- tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
+ tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
}
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index c973752562..a0a4a1924e 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -149,7 +149,7 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
cc = CPU_GET_CLASS(cpu);
/* see if it is an MMU fault */
g_assert(cc->handle_mmu_fault);
- ret = cc->handle_mmu_fault(cpu, address, is_write, MMU_USER_IDX);
+ ret = cc->handle_mmu_fault(cpu, address, 0, is_write, MMU_USER_IDX);
if (ret == 0) {
/* The MMU fault was handled without causing real CPU fault.