diff options
Diffstat (limited to 'exec.c')
-rw-r--r-- | exec.c | 83 |
1 files changed, 39 insertions, 44 deletions
@@ -1824,11 +1824,29 @@ void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr) TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); } +static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end, + uintptr_t length) +{ + uintptr_t start1; + + /* we modify the TLB cache so that the dirty bit will be set again + when accessing the range */ + start1 = (uintptr_t)qemu_safe_ram_ptr(start); + /* Check that we don't span multiple blocks - this breaks the + address comparisons below. */ + if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1 + != (end - 1) - start) { + abort(); + } + cpu_tlb_reset_dirty_all(start1, length); + +} + /* Note: start and end must be within the same ram block. */ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, int dirty_flags) { - uintptr_t length, start1; + uintptr_t length; start &= TARGET_PAGE_MASK; end = TARGET_PAGE_ALIGN(end); @@ -1838,16 +1856,9 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, return; cpu_physical_memory_mask_dirty_range(start, length, dirty_flags); - /* we modify the TLB cache so that the dirty bit will be set again - when accessing the range */ - start1 = (uintptr_t)qemu_safe_ram_ptr(start); - /* Check that we don't span multiple blocks - this breaks the - address comparisons below. */ - if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1 - != (end - 1) - start) { - abort(); + if (tcg_enabled()) { + tlb_reset_dirty_range_all(start, end, length); } - cpu_tlb_reset_dirty_all(start1, length); } int cpu_physical_memory_set_dirty_tracking(int enable) @@ -2229,14 +2240,6 @@ static void phys_sections_clear(void) phys_sections_nb = 0; } -/* register physical memory. - For RAM, 'size' must be a multiple of the target page size. - If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an - io memory page. The address used when calling the IO function is - the offset from the start of the region, plus region_offset. Both - start_addr and region_offset are rounded down to a page boundary - before calculating this offset. This should not be a problem unless - the low bits of start_addr and region_offset differ. */ static void register_subpage(MemoryRegionSection *section) { subpage_t *subpage; @@ -2260,7 +2263,7 @@ static void register_subpage(MemoryRegionSection *section) subpage = container_of(existing->mr, subpage_t, iomem); } start = section->offset_within_address_space & ~TARGET_PAGE_MASK; - end = start + section->size; + end = start + section->size - 1; subpage_register(subpage, start, end, phys_section_add(section)); } @@ -2294,10 +2297,15 @@ void cpu_register_physical_memory_log(MemoryRegionSection *section, remain.offset_within_address_space += now.size; remain.offset_within_region += now.size; } - now = remain; - now.size &= TARGET_PAGE_MASK; - if (now.size) { - register_multipage(&now); + while (remain.size >= TARGET_PAGE_SIZE) { + now = remain; + if (remain.offset_within_region & ~TARGET_PAGE_MASK) { + now.size = TARGET_PAGE_SIZE; + register_subpage(&now); + } else { + now.size &= TARGET_PAGE_MASK; + register_multipage(&now); + } remain.size -= now.size; remain.offset_within_address_space += now.size; remain.offset_within_region += now.size; @@ -2525,26 +2533,14 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, exit(1); #endif } else { -#if defined(TARGET_S390X) && defined(CONFIG_KVM) - /* S390 KVM requires the topmost vma of the RAM to be smaller than - an system defined value, which is at least 256GB. Larger systems - have larger values. We put the guest between the end of data - segment (system break) and this value. We use 32GB as a base to - have enough room for the system break to grow. */ - new_block->host = mmap((void*)0x800000000, size, - PROT_EXEC|PROT_READ|PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); - if (new_block->host == MAP_FAILED) { - fprintf(stderr, "Allocating RAM failed\n"); - abort(); - } -#else if (xen_enabled()) { xen_ram_alloc(new_block->offset, size, mr); + } else if (kvm_enabled()) { + /* some s390/kvm configurations have special constraints */ + new_block->host = kvm_vmalloc(size); } else { new_block->host = qemu_vmalloc(size); } -#endif qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE); } } @@ -2554,8 +2550,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, ram_list.phys_dirty = g_realloc(ram_list.phys_dirty, last_ram_offset() >> TARGET_PAGE_BITS); - memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), - 0xff, size >> TARGET_PAGE_BITS); + cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff); if (kvm_enabled()) kvm_setup_guest_memory(new_block->host, size); @@ -3212,13 +3207,13 @@ static void core_log_global_stop(MemoryListener *listener) static void core_eventfd_add(MemoryListener *listener, MemoryRegionSection *section, - bool match_data, uint64_t data, int fd) + bool match_data, uint64_t data, EventNotifier *e) { } static void core_eventfd_del(MemoryListener *listener, MemoryRegionSection *section, - bool match_data, uint64_t data, int fd) + bool match_data, uint64_t data, EventNotifier *e) { } @@ -3278,13 +3273,13 @@ static void io_log_global_stop(MemoryListener *listener) static void io_eventfd_add(MemoryListener *listener, MemoryRegionSection *section, - bool match_data, uint64_t data, int fd) + bool match_data, uint64_t data, EventNotifier *e) { } static void io_eventfd_del(MemoryListener *listener, MemoryRegionSection *section, - bool match_data, uint64_t data, int fd) + bool match_data, uint64_t data, EventNotifier *e) { } |