diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2011-06-14 17:51:11 +0300 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2011-06-15 18:27:15 +0300 |
commit | befeac45d4d9afb587eca9a27d975db4a7950960 (patch) | |
tree | aab24c856a3ea944e287d7f2591bb4bab6a56eb4 /exec.c | |
parent | 35f754620615138aaae0ef72602f84c88fd8de0f (diff) | |
parent | 0b862cedf36d927818c50584ddd611b0370673df (diff) |
Merge remote-tracking branch 'origin/master' into pci
Conflicts:
hw/virtio-pci.c
Diffstat (limited to 'exec.c')
-rw-r--r-- | exec.c | 128 |
1 files changed, 110 insertions, 18 deletions
@@ -32,10 +32,10 @@ #include "hw/qdev.h" #include "osdep.h" #include "kvm.h" +#include "hw/xen.h" #include "qemu-timer.h" #if defined(CONFIG_USER_ONLY) #include <qemu.h> -#include <signal.h> #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) #include <sys/param.h> #if __FreeBSD_version >= 700104 @@ -51,6 +51,8 @@ #include <libutil.h> #endif #endif +#else /* !CONFIG_USER_ONLY */ +#include "xen-mapcache.h" #endif //#define DEBUG_TB_INVALIDATE @@ -2085,7 +2087,7 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, /* we modify the TLB cache so that the dirty bit will be set again when accessing the range */ start1 = (unsigned long)qemu_safe_ram_ptr(start); - /* Chek that we don't span multiple blocks - this breaks the + /* Check that we don't span multiple blocks - this breaks the address comparisons below. */ if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1 != (end - 1) - start) { @@ -2916,6 +2918,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, } } + new_block->offset = find_ram_offset(size); if (host) { new_block->host = host; new_block->flags |= RAM_PREALLOC_MASK; @@ -2933,18 +2936,28 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, #endif } else { #if defined(TARGET_S390X) && defined(CONFIG_KVM) - /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */ - new_block->host = mmap((void*)0x1000000, size, + /* S390 KVM requires the topmost vma of the RAM to be smaller than + an system defined value, which is at least 256GB. Larger systems + have larger values. We put the guest between the end of data + segment (system break) and this value. We use 32GB as a base to + have enough room for the system break to grow. */ + new_block->host = mmap((void*)0x800000000, size, PROT_EXEC|PROT_READ|PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, -1, 0); + MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + if (new_block->host == MAP_FAILED) { + fprintf(stderr, "Allocating RAM failed\n"); + abort(); + } #else - new_block->host = qemu_vmalloc(size); + if (xen_mapcache_enabled()) { + xen_ram_alloc(new_block->offset, size); + } else { + new_block->host = qemu_vmalloc(size); + } #endif qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE); } } - - new_block->offset = find_ram_offset(size); new_block->length = size; QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); @@ -2965,6 +2978,19 @@ ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size) return qemu_ram_alloc_from_ptr(dev, name, size, NULL); } +void qemu_ram_free_from_ptr(ram_addr_t addr) +{ + RAMBlock *block; + + QLIST_FOREACH(block, &ram_list.blocks, next) { + if (addr == block->offset) { + QLIST_REMOVE(block, next); + qemu_free(block); + return; + } + } +} + void qemu_ram_free(ram_addr_t addr) { RAMBlock *block; @@ -2989,7 +3015,11 @@ void qemu_ram_free(ram_addr_t addr) #if defined(TARGET_S390X) && defined(CONFIG_KVM) munmap(block->host, block->length); #else - qemu_vfree(block->host); + if (xen_mapcache_enabled()) { + qemu_invalidate_entry(block->host); + } else { + qemu_vfree(block->host); + } #endif } qemu_free(block); @@ -3078,6 +3108,16 @@ void *qemu_get_ram_ptr(ram_addr_t addr) QLIST_REMOVE(block, next); QLIST_INSERT_HEAD(&ram_list.blocks, block, next); } + if (xen_mapcache_enabled()) { + /* We need to check if the requested address is in the RAM + * because we don't want to map the entire memory in QEMU. + */ + if (block->offset == 0) { + return qemu_map_cache(addr, 0, 1); + } else if (block->host == NULL) { + block->host = xen_map_block(block->offset, block->length); + } + } return block->host + (addr - block->offset); } } @@ -3097,6 +3137,16 @@ void *qemu_safe_ram_ptr(ram_addr_t addr) QLIST_FOREACH(block, &ram_list.blocks, next) { if (addr - block->offset < block->length) { + if (xen_mapcache_enabled()) { + /* We need to check if the requested address is in the RAM + * because we don't want to map the entire memory in QEMU. + */ + if (block->offset == 0) { + return qemu_map_cache(addr, 0, 1); + } else if (block->host == NULL) { + block->host = xen_map_block(block->offset, block->length); + } + } return block->host + (addr - block->offset); } } @@ -3107,17 +3157,48 @@ void *qemu_safe_ram_ptr(ram_addr_t addr) return NULL; } +void qemu_put_ram_ptr(void *addr) +{ + trace_qemu_put_ram_ptr(addr); + + if (xen_mapcache_enabled()) { + RAMBlock *block; + + QLIST_FOREACH(block, &ram_list.blocks, next) { + if (addr == block->host) { + break; + } + } + if (block && block->host) { + xen_unmap_block(block->host, block->length); + block->host = NULL; + } else { + qemu_map_cache_unlock(addr); + } + } +} + int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) { RAMBlock *block; uint8_t *host = ptr; QLIST_FOREACH(block, &ram_list.blocks, next) { + /* This case append when the block is not mapped. */ + if (block->host == NULL) { + continue; + } if (host - block->host < block->length) { *ram_addr = block->offset + (host - block->host); return 0; } } + + if (xen_mapcache_enabled()) { + *ram_addr = qemu_ram_addr_from_mapcache(ptr); + return 0; + } + return -1; } @@ -3139,7 +3220,7 @@ static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) #ifdef DEBUG_UNASSIGNED printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 0, 0, 0, 1); #endif return 0; @@ -3150,7 +3231,7 @@ static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr) #ifdef DEBUG_UNASSIGNED printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 0, 0, 0, 2); #endif return 0; @@ -3161,7 +3242,7 @@ static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr) #ifdef DEBUG_UNASSIGNED printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 0, 0, 0, 4); #endif return 0; @@ -3172,7 +3253,7 @@ static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_ #ifdef DEBUG_UNASSIGNED printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 1, 0, 0, 1); #endif } @@ -3182,7 +3263,7 @@ static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_ #ifdef DEBUG_UNASSIGNED printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 1, 0, 0, 2); #endif } @@ -3192,7 +3273,7 @@ static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_ #ifdef DEBUG_UNASSIGNED printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 1, 0, 0, 4); #endif } @@ -3812,6 +3893,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, cpu_physical_memory_set_dirty_flags( addr1, (0xff & ~CODE_DIRTY_FLAG)); } + qemu_put_ram_ptr(ptr); } } else { if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && @@ -3839,9 +3921,9 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, } } else { /* RAM case */ - ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + - (addr & ~TARGET_PAGE_MASK); - memcpy(buf, ptr, l); + ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK); + memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l); + qemu_put_ram_ptr(ptr); } } len -= l; @@ -3882,6 +3964,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr, /* ROM/RAM case */ ptr = qemu_get_ram_ptr(addr1); memcpy(ptr, buf, l); + qemu_put_ram_ptr(ptr); } len -= l; buf += l; @@ -4023,6 +4106,15 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, access_len -= l; } } + if (xen_mapcache_enabled()) { + uint8_t *buffer1 = buffer; + uint8_t *end_buffer = buffer + len; + + while (buffer1 < end_buffer) { + qemu_put_ram_ptr(buffer1); + buffer1 += TARGET_PAGE_SIZE; + } + } return; } if (is_write) { |