diff options
Diffstat (limited to 'cputlb.c')
-rw-r--r-- | cputlb.c | 203 |
1 files changed, 192 insertions, 11 deletions
@@ -23,15 +23,15 @@ #include "exec/memory.h" #include "exec/address-spaces.h" #include "exec/cpu_ldst.h" - #include "exec/cputlb.h" - #include "exec/memory-internal.h" #include "exec/ram_addr.h" #include "exec/exec-all.h" #include "tcg/tcg.h" #include "qemu/error-report.h" #include "exec/log.h" +#include "exec/helper-proto.h" +#include "qemu/atomic.h" /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ @@ -498,6 +498,43 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) return qemu_ram_addr_from_host_nofail(p); } +static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, + target_ulong addr, uintptr_t retaddr, int size) +{ + CPUState *cpu = ENV_GET_CPU(env); + hwaddr physaddr = iotlbentry->addr; + MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); + uint64_t val; + + physaddr = (physaddr & TARGET_PAGE_MASK) + addr; + cpu->mem_io_pc = retaddr; + if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { + cpu_io_recompile(cpu, retaddr); + } + + cpu->mem_io_vaddr = addr; + memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs); + return val; +} + +static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, + uint64_t val, target_ulong addr, + uintptr_t retaddr, int size) +{ + CPUState *cpu = ENV_GET_CPU(env); + hwaddr physaddr = iotlbentry->addr; + MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); + + physaddr = (physaddr & TARGET_PAGE_MASK) + addr; + if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { + cpu_io_recompile(cpu, retaddr); + } + + cpu->mem_io_vaddr = addr; + cpu->mem_io_pc = retaddr; + memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs); +} + /* Return true if ADDR is present in the victim tlb, and has been copied back to the main tlb. */ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, @@ -527,34 +564,178 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ (ADDR) & TARGET_PAGE_MASK) +/* Probe for whether the specified guest write access is permitted. + * If it is not permitted then an exception will be taken in the same + * way as if this were a real write access (and we will not return). + * Otherwise the function will return, and there will be a valid + * entry in the TLB for this access. + */ +void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, + uintptr_t retaddr) +{ + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + /* TLB entry is for a different page */ + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); + } + } +} + +/* Probe for a read-modify-write atomic operation. Do not allow unaligned + * operations, or io operations to proceed. Return the host address. */ +static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + size_t mmu_idx = get_mmuidx(oi); + size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index]; + target_ulong tlb_addr = tlbe->addr_write; + TCGMemOp mop = get_memop(oi); + int a_bits = get_alignment_bits(mop); + int s_bits = mop & MO_SIZE; + + /* Adjust the given return address. */ + retaddr -= GETPC_ADJ; + + /* Enforce guest required alignment. */ + if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { + /* ??? Maybe indicate atomic op to cpu_unaligned_access */ + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* Enforce qemu required alignment. */ + if (unlikely(addr & ((1 << s_bits) - 1))) { + /* We get here if guest alignment was not requested, + or was not enforced by cpu_unaligned_access above. + We might widen the access and emulate, but for now + mark an exception and exit the cpu loop. */ + goto stop_the_world; + } + + /* Check TLB entry and enforce page permissions. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); + } + tlb_addr = tlbe->addr_write; + } + + /* Notice an IO access, or a notdirty page. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + /* There's really nothing that can be done to + support this apart from stop-the-world. */ + goto stop_the_world; + } + + /* Let the guest notice RMW on a write-only page. */ + if (unlikely(tlbe->addr_read != tlb_addr)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr); + /* Since we don't support reads and writes to different addresses, + and we do have the proper page loaded for write, this shouldn't + ever return. But just in case, handle via stop-the-world. */ + goto stop_the_world; + } + + return (void *)((uintptr_t)addr + tlbe->addend); + + stop_the_world: + cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); +} + +#ifdef TARGET_WORDS_BIGENDIAN +# define TGT_BE(X) (X) +# define TGT_LE(X) BSWAP(X) +#else +# define TGT_BE(X) BSWAP(X) +# define TGT_LE(X) (X) +#endif + #define MMUSUFFIX _mmu -#define SHIFT 0 +#define DATA_SIZE 1 #include "softmmu_template.h" -#define SHIFT 1 +#define DATA_SIZE 2 #include "softmmu_template.h" -#define SHIFT 2 +#define DATA_SIZE 4 #include "softmmu_template.h" -#define SHIFT 3 +#define DATA_SIZE 8 #include "softmmu_template.h" -#undef MMUSUFFIX +/* First set of helpers allows passing in of OI and RETADDR. This makes + them callable from other helpers. */ + +#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr +#define ATOMIC_NAME(X) \ + HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) +#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) + +#define DATA_SIZE 1 +#include "atomic_template.h" + +#define DATA_SIZE 2 +#include "atomic_template.h" + +#define DATA_SIZE 4 +#include "atomic_template.h" + +#ifdef CONFIG_ATOMIC64 +#define DATA_SIZE 8 +#include "atomic_template.h" +#endif + +#ifdef CONFIG_ATOMIC128 +#define DATA_SIZE 16 +#include "atomic_template.h" +#endif + +/* Second set of helpers are directly callable from TCG as helpers. */ + +#undef EXTRA_ARGS +#undef ATOMIC_NAME +#undef ATOMIC_MMU_LOOKUP +#define EXTRA_ARGS , TCGMemOpIdx oi +#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) +#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) + +#define DATA_SIZE 1 +#include "atomic_template.h" + +#define DATA_SIZE 2 +#include "atomic_template.h" + +#define DATA_SIZE 4 +#include "atomic_template.h" + +#ifdef CONFIG_ATOMIC64 +#define DATA_SIZE 8 +#include "atomic_template.h" +#endif + +/* Code access functions. */ + +#undef MMUSUFFIX #define MMUSUFFIX _cmmu #undef GETPC #define GETPC() ((uintptr_t)0) #define SOFTMMU_CODE_ACCESS -#define SHIFT 0 +#define DATA_SIZE 1 #include "softmmu_template.h" -#define SHIFT 1 +#define DATA_SIZE 2 #include "softmmu_template.h" -#define SHIFT 2 +#define DATA_SIZE 4 #include "softmmu_template.h" -#define SHIFT 3 +#define DATA_SIZE 8 #include "softmmu_template.h" |