diff options
Diffstat (limited to 'exec.c')
-rw-r--r-- | exec.c | 555 |
1 files changed, 260 insertions, 295 deletions
@@ -59,8 +59,7 @@ #include "cputlb.h" -#define WANT_EXEC_OBSOLETE -#include "exec-obsolete.h" +#include "memory-internal.h" //#define DEBUG_TB_INVALIDATE //#define DEBUG_FLUSH @@ -86,26 +85,11 @@ static int nb_tbs; /* any access to the tbs or the page table must use this lock */ spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; -#if defined(__arm__) || defined(__sparc__) -/* The prologue must be reachable with a direct jump. ARM and Sparc64 - have limited branch ranges (possibly also PPC) so place it in a - section close to code segment. */ -#define code_gen_section \ - __attribute__((__section__(".gen_code"))) \ - __attribute__((aligned (32))) -#elif defined(_WIN32) && !defined(_WIN64) -#define code_gen_section \ - __attribute__((aligned (16))) -#else -#define code_gen_section \ - __attribute__((aligned (32))) -#endif - -uint8_t code_gen_prologue[1024] code_gen_section; +uint8_t *code_gen_prologue; static uint8_t *code_gen_buffer; -static unsigned long code_gen_buffer_size; +static size_t code_gen_buffer_size; /* threshold to flush the translated code buffer */ -static unsigned long code_gen_buffer_max_size; +static size_t code_gen_buffer_max_size; static uint8_t *code_gen_ptr; #if !defined(CONFIG_USER_ONLY) @@ -117,6 +101,9 @@ RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; static MemoryRegion *system_memory; static MemoryRegion *system_io; +AddressSpace address_space_io; +AddressSpace address_space_memory; + MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty; static MemoryRegion io_mem_subpage_ram; @@ -185,7 +172,6 @@ uintptr_t qemu_host_page_mask; static void *l1_map[V_L1_SIZE]; #if !defined(CONFIG_USER_ONLY) -typedef struct PhysPageEntry PhysPageEntry; static MemoryRegionSection *phys_sections; static unsigned phys_sections_nb, phys_sections_nb_alloc; @@ -194,22 +180,12 @@ static uint16_t phys_section_notdirty; static uint16_t phys_section_rom; static uint16_t phys_section_watch; -struct PhysPageEntry { - uint16_t is_leaf : 1; - /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */ - uint16_t ptr : 15; -}; - /* Simple allocator for PhysPageEntry nodes */ static PhysPageEntry (*phys_map_nodes)[L2_SIZE]; static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc; #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1) -/* This is a multi-level map on the physical address space. - The bottom level has pointers to MemoryRegionSections. */ -static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 }; - static void io_mem_init(void); static void memory_map_init(void); @@ -221,7 +197,7 @@ static int tb_flush_count; static int tb_phys_invalidate_count; #ifdef _WIN32 -static void map_exec(void *addr, long size) +static inline void map_exec(void *addr, long size) { DWORD old_protect; VirtualProtect(addr, size, @@ -229,7 +205,7 @@ static void map_exec(void *addr, long size) } #else -static void map_exec(void *addr, long size) +static inline void map_exec(void *addr, long size) { unsigned long start, end, page_size; @@ -457,18 +433,19 @@ static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index, } } -static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb, +static void phys_page_set(AddressSpaceDispatch *d, + target_phys_addr_t index, target_phys_addr_t nb, uint16_t leaf) { /* Wildly overreserve - it doesn't matter much. */ phys_map_node_reserve(3 * P_L2_LEVELS); - phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); + phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); } -MemoryRegionSection *phys_page_find(target_phys_addr_t index) +MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, target_phys_addr_t index) { - PhysPageEntry lp = phys_map; + PhysPageEntry lp = d->phys_map; PhysPageEntry *p; int i; uint16_t s_index = phys_section_unassigned; @@ -497,111 +474,142 @@ bool memory_region_is_unassigned(MemoryRegion *mr) #define mmap_unlock() do { } while(0) #endif -#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024) - #if defined(CONFIG_USER_ONLY) /* Currently it is not recommended to allocate big chunks of data in - user mode. It will change when a dedicated libc will be used */ + user mode. It will change when a dedicated libc will be used. */ +/* ??? 64-bit hosts ought to have no problem mmaping data outside the + region in which the guest needs to run. Revisit this. */ #define USE_STATIC_CODE_GEN_BUFFER #endif -#ifdef USE_STATIC_CODE_GEN_BUFFER -static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] - __attribute__((aligned (CODE_GEN_ALIGN))); +/* ??? Should configure for this, not list operating systems here. */ +#if (defined(__linux__) \ + || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \ + || defined(__DragonFly__) || defined(__OpenBSD__) \ + || defined(__NetBSD__)) +# define USE_MMAP #endif -static void code_gen_alloc(unsigned long tb_size) -{ -#ifdef USE_STATIC_CODE_GEN_BUFFER - code_gen_buffer = static_code_gen_buffer; - code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE; - map_exec(code_gen_buffer, code_gen_buffer_size); -#else - code_gen_buffer_size = tb_size; - if (code_gen_buffer_size == 0) { -#if defined(CONFIG_USER_ONLY) - code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE; -#else - /* XXX: needs adjustments */ - code_gen_buffer_size = (unsigned long)(ram_size / 4); -#endif - } - if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE) - code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE; - /* The code gen buffer location may have constraints depending on - the host cpu and OS */ -#if defined(__linux__) - { - int flags; - void *start = NULL; +/* Minimum size of the code gen buffer. This number is randomly chosen, + but not so small that we can't have a fair number of TB's live. */ +#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) - flags = MAP_PRIVATE | MAP_ANONYMOUS; +/* Maximum size of the code gen buffer we'd like to use. Unless otherwise + indicated, this is constrained by the range of direct branches on the + host cpu, as used by the TCG implementation of goto_tb. */ #if defined(__x86_64__) - flags |= MAP_32BIT; - /* Cannot map more than that */ - if (code_gen_buffer_size > (800 * 1024 * 1024)) - code_gen_buffer_size = (800 * 1024 * 1024); -#elif defined(__sparc__) && HOST_LONG_BITS == 64 - // Map the buffer below 2G, so we can use direct calls and branches - start = (void *) 0x40000000UL; - if (code_gen_buffer_size > (512 * 1024 * 1024)) - code_gen_buffer_size = (512 * 1024 * 1024); +# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) +#elif defined(__sparc__) +# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) #elif defined(__arm__) - /* Keep the buffer no bigger than 16MB to branch between blocks */ - if (code_gen_buffer_size > 16 * 1024 * 1024) - code_gen_buffer_size = 16 * 1024 * 1024; +# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024) #elif defined(__s390x__) - /* Map the buffer so that we can use direct calls and branches. */ - /* We have a +- 4GB range on the branches; leave some slop. */ - if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) { - code_gen_buffer_size = 3ul * 1024 * 1024 * 1024; - } - start = (void *)0x90000000UL; + /* We have a +- 4GB range on the branches; leave some slop. */ +# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) +#else +# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) #endif - code_gen_buffer = mmap(start, code_gen_buffer_size, - PROT_WRITE | PROT_READ | PROT_EXEC, - flags, -1, 0); - if (code_gen_buffer == MAP_FAILED) { - fprintf(stderr, "Could not allocate dynamic translator buffer\n"); - exit(1); - } - } -#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \ - || defined(__DragonFly__) || defined(__OpenBSD__) \ - || defined(__NetBSD__) - { - int flags; - void *addr = NULL; - flags = MAP_PRIVATE | MAP_ANONYMOUS; -#if defined(__x86_64__) - /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume - * 0x40000000 is free */ - flags |= MAP_FIXED; - addr = (void *)0x40000000; - /* Cannot map more than that */ - if (code_gen_buffer_size > (800 * 1024 * 1024)) - code_gen_buffer_size = (800 * 1024 * 1024); -#elif defined(__sparc__) && HOST_LONG_BITS == 64 - // Map the buffer below 2G, so we can use direct calls and branches - addr = (void *) 0x40000000UL; - if (code_gen_buffer_size > (512 * 1024 * 1024)) { - code_gen_buffer_size = (512 * 1024 * 1024); - } + +#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024) + +#define DEFAULT_CODE_GEN_BUFFER_SIZE \ + (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ + ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) + +static inline size_t size_code_gen_buffer(size_t tb_size) +{ + /* Size the buffer. */ + if (tb_size == 0) { +#ifdef USE_STATIC_CODE_GEN_BUFFER + tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; +#else + /* ??? Needs adjustments. */ + /* ??? If we relax the requirement that CONFIG_USER_ONLY use the + static buffer, we could size this on RESERVED_VA, on the text + segment size of the executable, or continue to use the default. */ + tb_size = (unsigned long)(ram_size / 4); #endif - code_gen_buffer = mmap(addr, code_gen_buffer_size, - PROT_WRITE | PROT_READ | PROT_EXEC, - flags, -1, 0); - if (code_gen_buffer == MAP_FAILED) { - fprintf(stderr, "Could not allocate dynamic translator buffer\n"); - exit(1); - } } + if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { + tb_size = MIN_CODE_GEN_BUFFER_SIZE; + } + if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { + tb_size = MAX_CODE_GEN_BUFFER_SIZE; + } + code_gen_buffer_size = tb_size; + return tb_size; +} + +#ifdef USE_STATIC_CODE_GEN_BUFFER +static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] + __attribute__((aligned(CODE_GEN_ALIGN))); + +static inline void *alloc_code_gen_buffer(void) +{ + map_exec(static_code_gen_buffer, code_gen_buffer_size); + return static_code_gen_buffer; +} +#elif defined(USE_MMAP) +static inline void *alloc_code_gen_buffer(void) +{ + int flags = MAP_PRIVATE | MAP_ANONYMOUS; + uintptr_t start = 0; + void *buf; + + /* Constrain the position of the buffer based on the host cpu. + Note that these addresses are chosen in concert with the + addresses assigned in the relevant linker script file. */ +# if defined(__PIE__) || defined(__PIC__) + /* Don't bother setting a preferred location if we're building + a position-independent executable. We're more likely to get + an address near the main executable if we let the kernel + choose the address. */ +# elif defined(__x86_64__) && defined(MAP_32BIT) + /* Force the memory down into low memory with the executable. + Leave the choice of exact location with the kernel. */ + flags |= MAP_32BIT; + /* Cannot expect to map more than 800MB in low memory. */ + if (code_gen_buffer_size > 800u * 1024 * 1024) { + code_gen_buffer_size = 800u * 1024 * 1024; + } +# elif defined(__sparc__) + start = 0x40000000ul; +# elif defined(__s390x__) + start = 0x90000000ul; +# endif + + buf = mmap((void *)start, code_gen_buffer_size, + PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0); + return buf == MAP_FAILED ? NULL : buf; +} #else - code_gen_buffer = g_malloc(code_gen_buffer_size); - map_exec(code_gen_buffer, code_gen_buffer_size); -#endif -#endif /* !USE_STATIC_CODE_GEN_BUFFER */ - map_exec(code_gen_prologue, sizeof(code_gen_prologue)); +static inline void *alloc_code_gen_buffer(void) +{ + void *buf = g_malloc(code_gen_buffer_size); + if (buf) { + map_exec(buf, code_gen_buffer_size); + } + return buf; +} +#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */ + +static inline void code_gen_alloc(size_t tb_size) +{ + code_gen_buffer_size = size_code_gen_buffer(tb_size); + code_gen_buffer = alloc_code_gen_buffer(); + if (code_gen_buffer == NULL) { + fprintf(stderr, "Could not allocate dynamic translator buffer\n"); + exit(1); + } + + /* Steal room for the prologue at the end of the buffer. This ensures + (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches + from TB's to the prologue are going to be in range. It also means + that we don't need to mark (additional) portions of the data segment + as executable. */ + code_gen_prologue = code_gen_buffer + code_gen_buffer_size - 1024; + code_gen_buffer_size -= 1024; + code_gen_buffer_max_size = code_gen_buffer_size - (TCG_MAX_OP_SIZE * OPC_BUF_SIZE); code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; @@ -1470,7 +1478,7 @@ void tb_invalidate_phys_addr(target_phys_addr_t addr) ram_addr_t ram_addr; MemoryRegionSection *section; - section = phys_page_find(addr >> TARGET_PAGE_BITS); + section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || (section->mr->rom_device && section->mr->readable))) { return; @@ -2208,9 +2216,9 @@ static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level) lp->ptr = PHYS_MAP_NODE_NIL; } -static void destroy_all_mappings(void) +static void destroy_all_mappings(AddressSpaceDispatch *d) { - destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1); + destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1); phys_map_nodes_reset(); } @@ -2230,12 +2238,12 @@ static void phys_sections_clear(void) phys_sections_nb = 0; } -static void register_subpage(MemoryRegionSection *section) +static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section) { subpage_t *subpage; target_phys_addr_t base = section->offset_within_address_space & TARGET_PAGE_MASK; - MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS); + MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS); MemoryRegionSection subsection = { .offset_within_address_space = base, .size = TARGET_PAGE_SIZE, @@ -2247,7 +2255,7 @@ static void register_subpage(MemoryRegionSection *section) if (!(existing->mr->subpage)) { subpage = subpage_init(base); subsection.mr = &subpage->iomem; - phys_page_set(base >> TARGET_PAGE_BITS, 1, + phys_page_set(d, base >> TARGET_PAGE_BITS, 1, phys_section_add(&subsection)); } else { subpage = container_of(existing->mr, subpage_t, iomem); @@ -2258,7 +2266,7 @@ static void register_subpage(MemoryRegionSection *section) } -static void register_multipage(MemoryRegionSection *section) +static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section) { target_phys_addr_t start_addr = section->offset_within_address_space; ram_addr_t size = section->size; @@ -2268,13 +2276,13 @@ static void register_multipage(MemoryRegionSection *section) assert(size); addr = start_addr; - phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS, + phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS, section_index); } -void cpu_register_physical_memory_log(MemoryRegionSection *section, - bool readonly) +static void mem_add(MemoryListener *listener, MemoryRegionSection *section) { + AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener); MemoryRegionSection now = *section, remain = *section; if ((now.offset_within_address_space & ~TARGET_PAGE_MASK) @@ -2282,7 +2290,7 @@ void cpu_register_physical_memory_log(MemoryRegionSection *section, now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space) - now.offset_within_address_space, now.size); - register_subpage(&now); + register_subpage(d, &now); remain.size -= now.size; remain.offset_within_address_space += now.size; remain.offset_within_region += now.size; @@ -2291,10 +2299,10 @@ void cpu_register_physical_memory_log(MemoryRegionSection *section, now = remain; if (remain.offset_within_region & ~TARGET_PAGE_MASK) { now.size = TARGET_PAGE_SIZE; - register_subpage(&now); + register_subpage(d, &now); } else { now.size &= TARGET_PAGE_MASK; - register_multipage(&now); + register_multipage(d, &now); } remain.size -= now.size; remain.offset_within_address_space += now.size; @@ -2302,23 +2310,10 @@ void cpu_register_physical_memory_log(MemoryRegionSection *section, } now = remain; if (now.size) { - register_subpage(&now); + register_subpage(d, &now); } } - -void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) -{ - if (kvm_enabled()) - kvm_coalesce_mmio_region(addr, size); -} - -void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) -{ - if (kvm_enabled()) - kvm_uncoalesce_mmio_region(addr, size); -} - void qemu_flush_coalesced_mmio_buffer(void) { if (kvm_enabled()) @@ -3166,18 +3161,24 @@ static void io_mem_init(void) "watch", UINT64_MAX); } +static void mem_begin(MemoryListener *listener) +{ + AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener); + + destroy_all_mappings(d); + d->phys_map.ptr = PHYS_MAP_NODE_NIL; +} + static void core_begin(MemoryListener *listener) { - destroy_all_mappings(); phys_sections_clear(); - phys_map.ptr = PHYS_MAP_NODE_NIL; phys_section_unassigned = dummy_section(&io_mem_unassigned); phys_section_notdirty = dummy_section(&io_mem_notdirty); phys_section_rom = dummy_section(&io_mem_rom); phys_section_watch = dummy_section(&io_mem_watch); } -static void core_commit(MemoryListener *listener) +static void tcg_commit(MemoryListener *listener) { CPUArchState *env; @@ -3189,38 +3190,6 @@ static void core_commit(MemoryListener *listener) } } -static void core_region_add(MemoryListener *listener, - MemoryRegionSection *section) -{ - cpu_register_physical_memory_log(section, section->readonly); -} - -static void core_region_del(MemoryListener *listener, - MemoryRegionSection *section) -{ -} - -static void core_region_nop(MemoryListener *listener, - MemoryRegionSection *section) -{ - cpu_register_physical_memory_log(section, section->readonly); -} - -static void core_log_start(MemoryListener *listener, - MemoryRegionSection *section) -{ -} - -static void core_log_stop(MemoryListener *listener, - MemoryRegionSection *section) -{ -} - -static void core_log_sync(MemoryListener *listener, - MemoryRegionSection *section) -{ -} - static void core_log_global_start(MemoryListener *listener) { cpu_physical_memory_set_dirty_tracking(1); @@ -3231,26 +3200,6 @@ static void core_log_global_stop(MemoryListener *listener) cpu_physical_memory_set_dirty_tracking(0); } -static void core_eventfd_add(MemoryListener *listener, - MemoryRegionSection *section, - bool match_data, uint64_t data, EventNotifier *e) -{ -} - -static void core_eventfd_del(MemoryListener *listener, - MemoryRegionSection *section, - bool match_data, uint64_t data, EventNotifier *e) -{ -} - -static void io_begin(MemoryListener *listener) -{ -} - -static void io_commit(MemoryListener *listener) -{ -} - static void io_region_add(MemoryListener *listener, MemoryRegionSection *section) { @@ -3269,90 +3218,63 @@ static void io_region_del(MemoryListener *listener, isa_unassign_ioport(section->offset_within_address_space, section->size); } -static void io_region_nop(MemoryListener *listener, - MemoryRegionSection *section) -{ -} - -static void io_log_start(MemoryListener *listener, - MemoryRegionSection *section) -{ -} - -static void io_log_stop(MemoryListener *listener, - MemoryRegionSection *section) -{ -} - -static void io_log_sync(MemoryListener *listener, - MemoryRegionSection *section) -{ -} - -static void io_log_global_start(MemoryListener *listener) -{ -} - -static void io_log_global_stop(MemoryListener *listener) -{ -} - -static void io_eventfd_add(MemoryListener *listener, - MemoryRegionSection *section, - bool match_data, uint64_t data, EventNotifier *e) -{ -} - -static void io_eventfd_del(MemoryListener *listener, - MemoryRegionSection *section, - bool match_data, uint64_t data, EventNotifier *e) -{ -} - static MemoryListener core_memory_listener = { .begin = core_begin, - .commit = core_commit, - .region_add = core_region_add, - .region_del = core_region_del, - .region_nop = core_region_nop, - .log_start = core_log_start, - .log_stop = core_log_stop, - .log_sync = core_log_sync, .log_global_start = core_log_global_start, .log_global_stop = core_log_global_stop, - .eventfd_add = core_eventfd_add, - .eventfd_del = core_eventfd_del, - .priority = 0, + .priority = 1, }; static MemoryListener io_memory_listener = { - .begin = io_begin, - .commit = io_commit, .region_add = io_region_add, .region_del = io_region_del, - .region_nop = io_region_nop, - .log_start = io_log_start, - .log_stop = io_log_stop, - .log_sync = io_log_sync, - .log_global_start = io_log_global_start, - .log_global_stop = io_log_global_stop, - .eventfd_add = io_eventfd_add, - .eventfd_del = io_eventfd_del, .priority = 0, }; +static MemoryListener tcg_memory_listener = { + .commit = tcg_commit, +}; + +void address_space_init_dispatch(AddressSpace *as) +{ + AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1); + + d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 }; + d->listener = (MemoryListener) { + .begin = mem_begin, + .region_add = mem_add, + .region_nop = mem_add, + .priority = 0, + }; + as->dispatch = d; + memory_listener_register(&d->listener, as); +} + +void address_space_destroy_dispatch(AddressSpace *as) +{ + AddressSpaceDispatch *d = as->dispatch; + + memory_listener_unregister(&d->listener); + destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1); + g_free(d); + as->dispatch = NULL; +} + static void memory_map_init(void) { system_memory = g_malloc(sizeof(*system_memory)); memory_region_init(system_memory, "system", INT64_MAX); - set_system_memory_map(system_memory); + address_space_init(&address_space_memory, system_memory); + address_space_memory.name = "memory"; system_io = g_malloc(sizeof(*system_io)); memory_region_init(system_io, "io", 65536); - set_system_io_map(system_io); + address_space_init(&address_space_io, system_io); + address_space_io.name = "I/O"; - memory_listener_register(&core_memory_listener, system_memory); - memory_listener_register(&io_memory_listener, system_io); + memory_listener_register(&core_memory_listener, &address_space_memory); + memory_listener_register(&io_memory_listener, &address_space_io); + memory_listener_register(&tcg_memory_listener, &address_space_memory); } MemoryRegion *get_system_memory(void) @@ -3422,9 +3344,10 @@ static void invalidate_and_set_dirty(target_phys_addr_t addr, xen_modified_memory(addr, length); } -void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, - int len, int is_write) +void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf, + int len, bool is_write) { + AddressSpaceDispatch *d = as->dispatch; int l; uint8_t *ptr; uint32_t val; @@ -3436,7 +3359,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; - section = phys_page_find(page >> TARGET_PAGE_BITS); + section = phys_page_find(d, page >> TARGET_PAGE_BITS); if (is_write) { if (!memory_region_is_ram(section->mr)) { @@ -3507,10 +3430,36 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, } } +void address_space_write(AddressSpace *as, target_phys_addr_t addr, + const uint8_t *buf, int len) +{ + address_space_rw(as, addr, (uint8_t *)buf, len, true); +} + +/** + * address_space_read: read from an address space. + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @buf: buffer with the data transferred + */ +void address_space_read(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf, int len) +{ + address_space_rw(as, addr, buf, len, false); +} + + +void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, + int len, int is_write) +{ + return address_space_rw(&address_space_memory, addr, buf, len, is_write); +} + /* used for ROM loading : can write in RAM and ROM */ void cpu_physical_memory_write_rom(target_phys_addr_t addr, const uint8_t *buf, int len) { + AddressSpaceDispatch *d = address_space_memory.dispatch; int l; uint8_t *ptr; target_phys_addr_t page; @@ -3521,7 +3470,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr, l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; - section = phys_page_find(page >> TARGET_PAGE_BITS); + section = phys_page_find(d, page >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { @@ -3595,10 +3544,12 @@ static void cpu_notify_map_clients(void) * Use cpu_register_map_client() to know when retrying the map operation is * likely to succeed. */ -void *cpu_physical_memory_map(target_phys_addr_t addr, - target_phys_addr_t *plen, - int is_write) +void *address_space_map(AddressSpace *as, + target_phys_addr_t addr, + target_phys_addr_t *plen, + bool is_write) { + AddressSpaceDispatch *d = as->dispatch; target_phys_addr_t len = *plen; target_phys_addr_t todo = 0; int l; @@ -3613,7 +3564,7 @@ void *cpu_physical_memory_map(target_phys_addr_t addr, l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; - section = phys_page_find(page >> TARGET_PAGE_BITS); + section = phys_page_find(d, page >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) && !section->readonly)) { if (todo || bounce.buffer) { @@ -3623,7 +3574,7 @@ void *cpu_physical_memory_map(target_phys_addr_t addr, bounce.addr = addr; bounce.len = l; if (!is_write) { - cpu_physical_memory_read(addr, bounce.buffer, l); + address_space_read(as, addr, bounce.buffer, l); } *plen = l; @@ -3644,12 +3595,12 @@ void *cpu_physical_memory_map(target_phys_addr_t addr, return ret; } -/* Unmaps a memory region previously mapped by cpu_physical_memory_map(). +/* Unmaps a memory region previously mapped by address_space_map(). * Will also mark the memory as dirty if is_write == 1. access_len gives * the amount of memory that was actually read or written by the caller. */ -void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, - int is_write, target_phys_addr_t access_len) +void address_space_unmap(AddressSpace *as, void *buffer, target_phys_addr_t len, + int is_write, target_phys_addr_t access_len) { if (buffer != bounce.buffer) { if (is_write) { @@ -3670,13 +3621,26 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, return; } if (is_write) { - cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len); + address_space_write(as, bounce.addr, bounce.buffer, access_len); } qemu_vfree(bounce.buffer); bounce.buffer = NULL; cpu_notify_map_clients(); } +void *cpu_physical_memory_map(target_phys_addr_t addr, + target_phys_addr_t *plen, + int is_write) +{ + return address_space_map(&address_space_memory, addr, plen, is_write); +} + +void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, + int is_write, target_phys_addr_t access_len) +{ + return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); +} + /* warning: addr must be aligned */ static inline uint32_t ldl_phys_internal(target_phys_addr_t addr, enum device_endian endian) @@ -3685,7 +3649,7 @@ static inline uint32_t ldl_phys_internal(target_phys_addr_t addr, uint32_t val; MemoryRegionSection *section; - section = phys_page_find(addr >> TARGET_PAGE_BITS); + section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { @@ -3744,7 +3708,7 @@ static inline uint64_t ldq_phys_internal(target_phys_addr_t addr, uint64_t val; MemoryRegionSection *section; - section = phys_page_find(addr >> TARGET_PAGE_BITS); + section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { @@ -3811,7 +3775,7 @@ static inline uint32_t lduw_phys_internal(target_phys_addr_t addr, uint64_t val; MemoryRegionSection *section; - section = phys_page_find(addr >> TARGET_PAGE_BITS); + section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { @@ -3870,7 +3834,7 @@ void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) uint8_t *ptr; MemoryRegionSection *section; - section = phys_page_find(addr >> TARGET_PAGE_BITS); + section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!memory_region_is_ram(section->mr) || section->readonly) { addr = memory_region_section_addr(section, addr); @@ -3902,7 +3866,7 @@ void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val) uint8_t *ptr; MemoryRegionSection *section; - section = phys_page_find(addr >> TARGET_PAGE_BITS); + section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!memory_region_is_ram(section->mr) || section->readonly) { addr = memory_region_section_addr(section, addr); @@ -3931,7 +3895,7 @@ static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val, uint8_t *ptr; MemoryRegionSection *section; - section = phys_page_find(addr >> TARGET_PAGE_BITS); + section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!memory_region_is_ram(section->mr) || section->readonly) { addr = memory_region_section_addr(section, addr); @@ -3998,7 +3962,7 @@ static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val, uint8_t *ptr; MemoryRegionSection *section; - section = phys_page_find(addr >> TARGET_PAGE_BITS); + section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!memory_region_is_ram(section->mr) || section->readonly) { addr = memory_region_section_addr(section, addr); @@ -4188,7 +4152,7 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) } /* XXX: avoid using doubles ? */ cpu_fprintf(f, "Translation buffer state:\n"); - cpu_fprintf(f, "gen code size %td/%ld\n", + cpu_fprintf(f, "gen code size %td/%zd\n", code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size); cpu_fprintf(f, "TB count %d/%d\n", nb_tbs, code_gen_max_blocks); @@ -4234,7 +4198,8 @@ bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr) { MemoryRegionSection *section; - section = phys_page_find(phys_addr >> TARGET_PAGE_BITS); + section = phys_page_find(address_space_memory.dispatch, + phys_addr >> TARGET_PAGE_BITS); return !(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr)); |