diff options
Diffstat (limited to 'exec.c')
-rw-r--r-- | exec.c | 140 |
1 files changed, 79 insertions, 61 deletions
@@ -80,7 +80,7 @@ #define SMC_BITMAP_USE_THRESHOLD 10 static TranslationBlock *tbs; -int code_gen_max_blocks; +static int code_gen_max_blocks; TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; static int nb_tbs; /* any access to the tbs or the page table must use this lock */ @@ -107,7 +107,7 @@ static uint8_t *code_gen_buffer; static unsigned long code_gen_buffer_size; /* threshold to flush the translated code buffer */ static unsigned long code_gen_buffer_max_size; -uint8_t *code_gen_ptr; +static uint8_t *code_gen_ptr; #if !defined(CONFIG_USER_ONLY) int phys_ram_fd; @@ -1706,8 +1706,8 @@ static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list = QLIST_HEAD_INITIALIZER(memory_client_list); static void cpu_notify_set_memory(target_phys_addr_t start_addr, - ram_addr_t size, - ram_addr_t phys_offset) + ram_addr_t size, + ram_addr_t phys_offset) { CPUPhysMemoryClient *client; QLIST_FOREACH(client, &memory_client_list, list) { @@ -1716,7 +1716,7 @@ static void cpu_notify_set_memory(target_phys_addr_t start_addr, } static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start, - target_phys_addr_t end) + target_phys_addr_t end) { CPUPhysMemoryClient *client; QLIST_FOREACH(client, &memory_client_list, list) { @@ -1803,17 +1803,17 @@ int cpu_str_to_log_mask(const char *str) p1 = strchr(p, ','); if (!p1) p1 = p + strlen(p); - if(cmp1(p,p1-p,"all")) { - for(item = cpu_log_items; item->mask != 0; item++) { - mask |= item->mask; - } - } else { - for(item = cpu_log_items; item->mask != 0; item++) { - if (cmp1(p, p1 - p, item->name)) - goto found; + if(cmp1(p,p1-p,"all")) { + for(item = cpu_log_items; item->mask != 0; item++) { + mask |= item->mask; + } + } else { + for(item = cpu_log_items; item->mask != 0; item++) { + if (cmp1(p, p1 - p, item->name)) + goto found; + } + return 0; } - return 0; - } found: mask |= item->mask; if (*p1 != ',') @@ -1907,11 +1907,11 @@ static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr) overlap the flushed page. */ i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); memset (&env->tb_jmp_cache[i], 0, - TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); + TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); i = tb_jmp_cache_hash_page(addr); memset (&env->tb_jmp_cache[i], 0, - TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); + TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); } static CPUTLBEntry s_cputlb_empty_entry = { @@ -2085,7 +2085,7 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend); - ram_addr = qemu_ram_addr_from_host(p); + ram_addr = qemu_ram_addr_from_host_nofail(p); if (!cpu_physical_memory_is_dirty(ram_addr)) { tlb_entry->addr_write |= TLB_NOTDIRTY; } @@ -2173,8 +2173,9 @@ void tlb_set_page(CPUState *env, target_ulong vaddr, pd = p->phys_offset; } #if defined(DEBUG_TLB) - printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n", - vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd); + printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx + " prot=%x idx=%d pd=0x%08lx\n", + vaddr, paddr, prot, mmu_idx, pd); #endif address = vaddr; @@ -2687,16 +2688,16 @@ static long gethugepagesize(const char *path) int ret; do { - ret = statfs(path, &fs); + ret = statfs(path, &fs); } while (ret != 0 && errno == EINTR); if (ret != 0) { - perror(path); - return 0; + perror(path); + return 0; } if (fs.f_type != HUGETLBFS_MAGIC) - fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path); + fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path); return fs.f_bsize; } @@ -2715,7 +2716,7 @@ static void *file_ram_alloc(RAMBlock *block, hpagesize = gethugepagesize(path); if (!hpagesize) { - return NULL; + return NULL; } if (memory < hpagesize) { @@ -2728,14 +2729,14 @@ static void *file_ram_alloc(RAMBlock *block, } if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) { - return NULL; + return NULL; } fd = mkstemp(filename); if (fd < 0) { - perror("unable to create backing store for hugepages"); - free(filename); - return NULL; + perror("unable to create backing store for hugepages"); + free(filename); + return NULL; } unlink(filename); free(filename); @@ -2749,7 +2750,7 @@ static void *file_ram_alloc(RAMBlock *block, * mmap will fail. */ if (ftruncate(fd, memory)) - perror("ftruncate"); + perror("ftruncate"); #ifdef MAP_POPULATE /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case @@ -2762,9 +2763,9 @@ static void *file_ram_alloc(RAMBlock *block, area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); #endif if (area == MAP_FAILED) { - perror("file_ram_alloc: can't mmap RAM pages"); - close(fd); - return (NULL); + perror("file_ram_alloc: can't mmap RAM pages"); + close(fd); + return (NULL); } block->fd = fd; return area; @@ -2808,7 +2809,8 @@ static ram_addr_t last_ram_offset(void) return last; } -ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size) +ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, + ram_addr_t size, void *host) { RAMBlock *new_block, *block; @@ -2832,32 +2834,33 @@ ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size) } } - if (mem_path) { + if (host) { + new_block->host = host; + } else { + if (mem_path) { #if defined (__linux__) && !defined(TARGET_S390X) - new_block->host = file_ram_alloc(new_block, size, mem_path); - if (!new_block->host) { - new_block->host = qemu_vmalloc(size); -#ifdef MADV_MERGEABLE - madvise(new_block->host, size, MADV_MERGEABLE); -#endif - } + new_block->host = file_ram_alloc(new_block, size, mem_path); + if (!new_block->host) { + new_block->host = qemu_vmalloc(size); + qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE); + } #else - fprintf(stderr, "-mem-path option unsupported\n"); - exit(1); + fprintf(stderr, "-mem-path option unsupported\n"); + exit(1); #endif - } else { + } else { #if defined(TARGET_S390X) && defined(CONFIG_KVM) - /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */ - new_block->host = mmap((void*)0x1000000, size, - PROT_EXEC|PROT_READ|PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, -1, 0); + /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */ + new_block->host = mmap((void*)0x1000000, size, + PROT_EXEC|PROT_READ|PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); #else - new_block->host = qemu_vmalloc(size); -#endif -#ifdef MADV_MERGEABLE - madvise(new_block->host, size, MADV_MERGEABLE); + new_block->host = qemu_vmalloc(size); #endif + qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE); + } } + new_block->offset = find_ram_offset(size); new_block->length = size; @@ -2874,6 +2877,11 @@ ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size) return new_block->offset; } +ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size) +{ + return qemu_ram_alloc_from_ptr(dev, name, size, NULL); +} + void qemu_ram_free(ram_addr_t addr) { RAMBlock *block; @@ -2930,23 +2938,31 @@ void *qemu_get_ram_ptr(ram_addr_t addr) return NULL; } -/* Some of the softmmu routines need to translate from a host pointer - (typically a TLB entry) back to a ram offset. */ -ram_addr_t qemu_ram_addr_from_host(void *ptr) +int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) { RAMBlock *block; uint8_t *host = ptr; QLIST_FOREACH(block, &ram_list.blocks, next) { if (host - block->host < block->length) { - return block->offset + (host - block->host); + *ram_addr = block->offset + (host - block->host); + return 0; } } + return -1; +} - fprintf(stderr, "Bad ram pointer %p\n", ptr); - abort(); +/* Some of the softmmu routines need to translate from a host pointer + (typically a TLB entry) back to a ram offset. */ +ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) +{ + ram_addr_t ram_addr; - return 0; + if (qemu_ram_addr_from_host(ptr, &ram_addr)) { + fprintf(stderr, "Bad ram pointer %p\n", ptr); + abort(); + } + return ram_addr; } static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) @@ -3282,6 +3298,8 @@ static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__, mmio, start, end, idx, eidx, memory); #endif + if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) + memory = IO_MEM_UNASSIGNED; memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); for (; idx <= eidx; idx++) { mmio->sub_io_index[idx] = memory; @@ -3693,7 +3711,7 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, { if (buffer != bounce.buffer) { if (is_write) { - ram_addr_t addr1 = qemu_ram_addr_from_host(buffer); + ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer); while (access_len) { unsigned l; l = TARGET_PAGE_SIZE; |