diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2013-05-24 12:59:37 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2013-05-29 16:26:50 +0200 |
commit | 149f54b53b7666a3facd45e86eece60ce7d3b114 (patch) | |
tree | 716ff4eeaca8647f328eb06730476d46adbe6bc0 /cputlb.c | |
parent | b018ddf633f77195e9ae859c6d940a334e68879f (diff) |
memory: add address_space_translate
Using phys_page_find to translate an AddressSpace to a MemoryRegionSection
is unwieldy. It requires to pass the page index rather than the address,
and later memory_region_section_addr has to be called. Replace
memory_region_section_addr with a function that does all of it: call
phys_page_find, compute the offset within the region, and check how
big the current mapping is. This way, a large flat region can be written
with a single lookup rather than a page at a time.
address_space_translate will also provide a single point where IOMMU
forwarding is implemented.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'cputlb.c')
-rw-r--r-- | cputlb.c | 20 |
1 files changed, 11 insertions, 9 deletions
@@ -248,13 +248,18 @@ void tlb_set_page(CPUArchState *env, target_ulong vaddr, target_ulong code_address; uintptr_t addend; CPUTLBEntry *te; - hwaddr iotlb; + hwaddr iotlb, xlat, sz; assert(size >= TARGET_PAGE_SIZE); if (size != TARGET_PAGE_SIZE) { tlb_add_large_page(env, vaddr, size); } - section = phys_page_find(address_space_memory.dispatch, paddr >> TARGET_PAGE_BITS); + + sz = size; + section = address_space_translate(&address_space_memory, paddr, &xlat, &sz, + false); + assert(sz >= TARGET_PAGE_SIZE); + #if defined(DEBUG_TLB) printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx " prot=%x idx=%d pd=0x%08lx\n", @@ -268,13 +273,12 @@ void tlb_set_page(CPUArchState *env, target_ulong vaddr, addend = 0; } else { /* TLB_MMIO for rom/romd handled below */ - addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) - + memory_region_section_addr(section, paddr); + addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; } code_address = address; - iotlb = memory_region_section_get_iotlb(env, section, vaddr, paddr, prot, - &address); + iotlb = memory_region_section_get_iotlb(env, section, vaddr, paddr, xlat, + prot, &address); index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); env->iotlb[mmu_idx][index] = iotlb - vaddr; @@ -297,9 +301,7 @@ void tlb_set_page(CPUArchState *env, target_ulong vaddr, /* Write access calls the I/O callback. */ te->addr_write = address | TLB_MMIO; } else if (memory_region_is_ram(section->mr) - && !cpu_physical_memory_is_dirty( - section->mr->ram_addr - + memory_region_section_addr(section, paddr))) { + && !cpu_physical_memory_is_dirty(section->mr->ram_addr + xlat)) { te->addr_write = address | TLB_NOTDIRTY; } else { te->addr_write = address; |