diff options
Diffstat (limited to 'accel')
-rw-r--r-- | accel/Makefile.objs | 4 | ||||
-rw-r--r-- | accel/accel.c | 128 | ||||
-rw-r--r-- | accel/kvm/Makefile.objs | 1 | ||||
-rw-r--r-- | accel/kvm/kvm-all.c | 2633 | ||||
-rw-r--r-- | accel/kvm/trace-events | 15 | ||||
-rw-r--r-- | accel/stubs/Makefile.objs | 1 | ||||
-rw-r--r-- | accel/stubs/kvm-stub.c | 158 | ||||
-rw-r--r-- | accel/tcg/Makefile.objs | 3 | ||||
-rw-r--r-- | accel/tcg/cpu-exec-common.c | 82 | ||||
-rw-r--r-- | accel/tcg/cpu-exec.c | 683 | ||||
-rw-r--r-- | accel/tcg/cputlb.c | 1051 | ||||
-rw-r--r-- | accel/tcg/tcg-all.c | 61 | ||||
-rw-r--r-- | accel/tcg/trace-events | 10 | ||||
-rw-r--r-- | accel/tcg/translate-all.c | 2227 | ||||
-rw-r--r-- | accel/tcg/translate-all.h | 36 | ||||
-rw-r--r-- | accel/tcg/translate-common.c | 56 |
16 files changed, 7149 insertions, 0 deletions
diff --git a/accel/Makefile.objs b/accel/Makefile.objs new file mode 100644 index 0000000000..cd5702f347 --- /dev/null +++ b/accel/Makefile.objs @@ -0,0 +1,4 @@ +obj-$(CONFIG_SOFTMMU) += accel.o +obj-y += kvm/ +obj-y += tcg/ +obj-y += stubs/ diff --git a/accel/accel.c b/accel/accel.c new file mode 100644 index 0000000000..7c079a5611 --- /dev/null +++ b/accel/accel.c @@ -0,0 +1,128 @@ +/* + * QEMU System Emulator, accelerator interfaces + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "sysemu/accel.h" +#include "hw/boards.h" +#include "qemu-common.h" +#include "sysemu/arch_init.h" +#include "sysemu/sysemu.h" +#include "sysemu/kvm.h" +#include "sysemu/qtest.h" +#include "hw/xen/xen.h" +#include "qom/object.h" + +static const TypeInfo accel_type = { + .name = TYPE_ACCEL, + .parent = TYPE_OBJECT, + .class_size = sizeof(AccelClass), + .instance_size = sizeof(AccelState), +}; + +/* Lookup AccelClass from opt_name. Returns NULL if not found */ +static AccelClass *accel_find(const char *opt_name) +{ + char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name); + AccelClass *ac = ACCEL_CLASS(object_class_by_name(class_name)); + g_free(class_name); + return ac; +} + +static int accel_init_machine(AccelClass *acc, MachineState *ms) +{ + ObjectClass *oc = OBJECT_CLASS(acc); + const char *cname = object_class_get_name(oc); + AccelState *accel = ACCEL(object_new(cname)); + int ret; + ms->accelerator = accel; + *(acc->allowed) = true; + ret = acc->init_machine(ms); + if (ret < 0) { + ms->accelerator = NULL; + *(acc->allowed) = false; + object_unref(OBJECT(accel)); + } + return ret; +} + +void configure_accelerator(MachineState *ms) +{ + const char *p; + char buf[10]; + int ret; + bool accel_initialised = false; + bool init_failed = false; + AccelClass *acc = NULL; + + p = qemu_opt_get(qemu_get_machine_opts(), "accel"); + if (p == NULL) { + /* Use the default "accelerator", tcg */ + p = "tcg"; + } + + while (!accel_initialised && *p != '\0') { + if (*p == ':') { + p++; + } + p = get_opt_name(buf, sizeof(buf), p, ':'); + acc = accel_find(buf); + if (!acc) { + fprintf(stderr, "\"%s\" accelerator not found.\n", buf); + continue; + } + if (acc->available && !acc->available()) { + printf("%s not supported for this target\n", + acc->name); + continue; + } + ret = accel_init_machine(acc, ms); + if (ret < 0) { + init_failed = true; + fprintf(stderr, "failed to initialize %s: %s\n", + acc->name, + strerror(-ret)); + } else { + accel_initialised = true; + } + } + + if (!accel_initialised) { + if (!init_failed) { + fprintf(stderr, "No accelerator found!\n"); + } + exit(1); + } + + if (init_failed) { + fprintf(stderr, "Back to %s accelerator.\n", acc->name); + } +} + +static void register_accel_types(void) +{ + type_register_static(&accel_type); +} + +type_init(register_accel_types); diff --git a/accel/kvm/Makefile.objs b/accel/kvm/Makefile.objs new file mode 100644 index 0000000000..85351e7de7 --- /dev/null +++ b/accel/kvm/Makefile.objs @@ -0,0 +1 @@ +obj-$(CONFIG_KVM) += kvm-all.o diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c new file mode 100644 index 0000000000..75feffa504 --- /dev/null +++ b/accel/kvm/kvm-all.c @@ -0,0 +1,2633 @@ +/* + * QEMU KVM support + * + * Copyright IBM, Corp. 2008 + * Red Hat, Inc. 2008 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * Glauber Costa <gcosta@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include <sys/ioctl.h> + +#include <linux/kvm.h> + +#include "qemu-common.h" +#include "qemu/atomic.h" +#include "qemu/option.h" +#include "qemu/config-file.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/hw.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/s390x/adapter.h" +#include "exec/gdbstub.h" +#include "sysemu/kvm_int.h" +#include "sysemu/cpus.h" +#include "qemu/bswap.h" +#include "exec/memory.h" +#include "exec/ram_addr.h" +#include "exec/address-spaces.h" +#include "qemu/event_notifier.h" +#include "trace.h" +#include "hw/irq.h" + +#include "hw/boards.h" + +/* This check must be after config-host.h is included */ +#ifdef CONFIG_EVENTFD +#include <sys/eventfd.h> +#endif + +/* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We + * need to use the real host PAGE_SIZE, as that's what KVM will use. + */ +#define PAGE_SIZE getpagesize() + +//#define DEBUG_KVM + +#ifdef DEBUG_KVM +#define DPRINTF(fmt, ...) \ + do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) \ + do { } while (0) +#endif + +#define KVM_MSI_HASHTAB_SIZE 256 + +struct KVMParkedVcpu { + unsigned long vcpu_id; + int kvm_fd; + QLIST_ENTRY(KVMParkedVcpu) node; +}; + +struct KVMState +{ + AccelState parent_obj; + + int nr_slots; + int fd; + int vmfd; + int coalesced_mmio; + struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; + bool coalesced_flush_in_progress; + int broken_set_mem_region; + int vcpu_events; + int robust_singlestep; + int debugregs; +#ifdef KVM_CAP_SET_GUEST_DEBUG + struct kvm_sw_breakpoint_head kvm_sw_breakpoints; +#endif + int many_ioeventfds; + int intx_set_mask; + /* The man page (and posix) say ioctl numbers are signed int, but + * they're not. Linux, glibc and *BSD all treat ioctl numbers as + * unsigned, and treating them as signed here can break things */ + unsigned irq_set_ioctl; + unsigned int sigmask_len; + GHashTable *gsimap; +#ifdef KVM_CAP_IRQ_ROUTING + struct kvm_irq_routing *irq_routes; + int nr_allocated_irq_routes; + unsigned long *used_gsi_bitmap; + unsigned int gsi_count; + QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE]; +#endif + KVMMemoryListener memory_listener; + QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus; +}; + +KVMState *kvm_state; +bool kvm_kernel_irqchip; +bool kvm_split_irqchip; +bool kvm_async_interrupts_allowed; +bool kvm_halt_in_kernel_allowed; +bool kvm_eventfds_allowed; +bool kvm_irqfds_allowed; +bool kvm_resamplefds_allowed; +bool kvm_msi_via_irqfd_allowed; +bool kvm_gsi_routing_allowed; +bool kvm_gsi_direct_mapping; +bool kvm_allowed; +bool kvm_readonly_mem_allowed; +bool kvm_vm_attributes_allowed; +bool kvm_direct_msi_allowed; +bool kvm_ioeventfd_any_length_allowed; +bool kvm_msi_use_devid; +static bool kvm_immediate_exit; + +static const KVMCapabilityInfo kvm_required_capabilites[] = { + KVM_CAP_INFO(USER_MEMORY), + KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS), + KVM_CAP_LAST_INFO +}; + +int kvm_get_max_memslots(void) +{ + KVMState *s = KVM_STATE(current_machine->accelerator); + + return s->nr_slots; +} + +static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml) +{ + KVMState *s = kvm_state; + int i; + + for (i = 0; i < s->nr_slots; i++) { + if (kml->slots[i].memory_size == 0) { + return &kml->slots[i]; + } + } + + return NULL; +} + +bool kvm_has_free_slot(MachineState *ms) +{ + KVMState *s = KVM_STATE(ms->accelerator); + + return kvm_get_free_slot(&s->memory_listener); +} + +static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml) +{ + KVMSlot *slot = kvm_get_free_slot(kml); + + if (slot) { + return slot; + } + + fprintf(stderr, "%s: no free slot available\n", __func__); + abort(); +} + +static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml, + hwaddr start_addr, + hwaddr end_addr) +{ + KVMState *s = kvm_state; + int i; + + for (i = 0; i < s->nr_slots; i++) { + KVMSlot *mem = &kml->slots[i]; + + if (start_addr == mem->start_addr && + end_addr == mem->start_addr + mem->memory_size) { + return mem; + } + } + + return NULL; +} + +/* + * Find overlapping slot with lowest start address + */ +static KVMSlot *kvm_lookup_overlapping_slot(KVMMemoryListener *kml, + hwaddr start_addr, + hwaddr end_addr) +{ + KVMState *s = kvm_state; + KVMSlot *found = NULL; + int i; + + for (i = 0; i < s->nr_slots; i++) { + KVMSlot *mem = &kml->slots[i]; + + if (mem->memory_size == 0 || + (found && found->start_addr < mem->start_addr)) { + continue; + } + + if (end_addr > mem->start_addr && + start_addr < mem->start_addr + mem->memory_size) { + found = mem; + } + } + + return found; +} + +int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, + hwaddr *phys_addr) +{ + KVMMemoryListener *kml = &s->memory_listener; + int i; + + for (i = 0; i < s->nr_slots; i++) { + KVMSlot *mem = &kml->slots[i]; + + if (ram >= mem->ram && ram < mem->ram + mem->memory_size) { + *phys_addr = mem->start_addr + (ram - mem->ram); + return 1; + } + } + + return 0; +} + +static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot) +{ + KVMState *s = kvm_state; + struct kvm_userspace_memory_region mem; + + mem.slot = slot->slot | (kml->as_id << 16); + mem.guest_phys_addr = slot->start_addr; + mem.userspace_addr = (unsigned long)slot->ram; + mem.flags = slot->flags; + + if (slot->memory_size && mem.flags & KVM_MEM_READONLY) { + /* Set the slot size to 0 before setting the slot to the desired + * value. This is needed based on KVM commit 75d61fbc. */ + mem.memory_size = 0; + kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); + } + mem.memory_size = slot->memory_size; + return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); +} + +int kvm_destroy_vcpu(CPUState *cpu) +{ + KVMState *s = kvm_state; + long mmap_size; + struct KVMParkedVcpu *vcpu = NULL; + int ret = 0; + + DPRINTF("kvm_destroy_vcpu\n"); + + mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); + if (mmap_size < 0) { + ret = mmap_size; + DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n"); + goto err; + } + + ret = munmap(cpu->kvm_run, mmap_size); + if (ret < 0) { + goto err; + } + + vcpu = g_malloc0(sizeof(*vcpu)); + vcpu->vcpu_id = kvm_arch_vcpu_id(cpu); + vcpu->kvm_fd = cpu->kvm_fd; + QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node); +err: + return ret; +} + +static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id) +{ + struct KVMParkedVcpu *cpu; + + QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) { + if (cpu->vcpu_id == vcpu_id) { + int kvm_fd; + + QLIST_REMOVE(cpu, node); + kvm_fd = cpu->kvm_fd; + g_free(cpu); + return kvm_fd; + } + } + + return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id); +} + +int kvm_init_vcpu(CPUState *cpu) +{ + KVMState *s = kvm_state; + long mmap_size; + int ret; + + DPRINTF("kvm_init_vcpu\n"); + + ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu)); + if (ret < 0) { + DPRINTF("kvm_create_vcpu failed\n"); + goto err; + } + + cpu->kvm_fd = ret; + cpu->kvm_state = s; + cpu->kvm_vcpu_dirty = true; + + mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); + if (mmap_size < 0) { + ret = mmap_size; + DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n"); + goto err; + } + + cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, + cpu->kvm_fd, 0); + if (cpu->kvm_run == MAP_FAILED) { + ret = -errno; + DPRINTF("mmap'ing vcpu state failed\n"); + goto err; + } + + if (s->coalesced_mmio && !s->coalesced_mmio_ring) { + s->coalesced_mmio_ring = + (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE; + } + + ret = kvm_arch_init_vcpu(cpu); +err: + return ret; +} + +/* + * dirty pages logging control + */ + +static int kvm_mem_flags(MemoryRegion *mr) +{ + bool readonly = mr->readonly || memory_region_is_romd(mr); + int flags = 0; + + if (memory_region_get_dirty_log_mask(mr) != 0) { + flags |= KVM_MEM_LOG_DIRTY_PAGES; + } + if (readonly && kvm_readonly_mem_allowed) { + flags |= KVM_MEM_READONLY; + } + return flags; +} + +static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem, + MemoryRegion *mr) +{ + int old_flags; + + old_flags = mem->flags; + mem->flags = kvm_mem_flags(mr); + + /* If nothing changed effectively, no need to issue ioctl */ + if (mem->flags == old_flags) { + return 0; + } + + return kvm_set_user_memory_region(kml, mem); +} + +static int kvm_section_update_flags(KVMMemoryListener *kml, + MemoryRegionSection *section) +{ + hwaddr phys_addr = section->offset_within_address_space; + ram_addr_t size = int128_get64(section->size); + KVMSlot *mem = kvm_lookup_matching_slot(kml, phys_addr, phys_addr + size); + + if (mem == NULL) { + return 0; + } else { + return kvm_slot_update_flags(kml, mem, section->mr); + } +} + +static void kvm_log_start(MemoryListener *listener, + MemoryRegionSection *section, + int old, int new) +{ + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); + int r; + + if (old != 0) { + return; + } + + r = kvm_section_update_flags(kml, section); + if (r < 0) { + abort(); + } +} + +static void kvm_log_stop(MemoryListener *listener, + MemoryRegionSection *section, + int old, int new) +{ + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); + int r; + + if (new != 0) { + return; + } + + r = kvm_section_update_flags(kml, section); + if (r < 0) { + abort(); + } +} + +/* get kvm's dirty pages bitmap and update qemu's */ +static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section, + unsigned long *bitmap) +{ + ram_addr_t start = section->offset_within_region + + memory_region_get_ram_addr(section->mr); + ram_addr_t pages = int128_get64(section->size) / getpagesize(); + + cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages); + return 0; +} + +#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1)) + +/** + * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space + * This function updates qemu's dirty bitmap using + * memory_region_set_dirty(). This means all bits are set + * to dirty. + * + * @start_add: start of logged region. + * @end_addr: end of logged region. + */ +static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml, + MemoryRegionSection *section) +{ + KVMState *s = kvm_state; + unsigned long size, allocated_size = 0; + struct kvm_dirty_log d = {}; + KVMSlot *mem; + int ret = 0; + hwaddr start_addr = section->offset_within_address_space; + hwaddr end_addr = start_addr + int128_get64(section->size); + + d.dirty_bitmap = NULL; + while (start_addr < end_addr) { + mem = kvm_lookup_overlapping_slot(kml, start_addr, end_addr); + if (mem == NULL) { + break; + } + + /* XXX bad kernel interface alert + * For dirty bitmap, kernel allocates array of size aligned to + * bits-per-long. But for case when the kernel is 64bits and + * the userspace is 32bits, userspace can't align to the same + * bits-per-long, since sizeof(long) is different between kernel + * and user space. This way, userspace will provide buffer which + * may be 4 bytes less than the kernel will use, resulting in + * userspace memory corruption (which is not detectable by valgrind + * too, in most cases). + * So for now, let's align to 64 instead of HOST_LONG_BITS here, in + * a hope that sizeof(long) won't become >8 any time soon. + */ + size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS), + /*HOST_LONG_BITS*/ 64) / 8; + if (!d.dirty_bitmap) { + d.dirty_bitmap = g_malloc(size); + } else if (size > allocated_size) { + d.dirty_bitmap = g_realloc(d.dirty_bitmap, size); + } + allocated_size = size; + memset(d.dirty_bitmap, 0, allocated_size); + + d.slot = mem->slot | (kml->as_id << 16); + if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) { + DPRINTF("ioctl failed %d\n", errno); + ret = -1; + break; + } + + kvm_get_dirty_pages_log_range(section, d.dirty_bitmap); + start_addr = mem->start_addr + mem->memory_size; + } + g_free(d.dirty_bitmap); + + return ret; +} + +static void kvm_coalesce_mmio_region(MemoryListener *listener, + MemoryRegionSection *secion, + hwaddr start, hwaddr size) +{ + KVMState *s = kvm_state; + + if (s->coalesced_mmio) { + struct kvm_coalesced_mmio_zone zone; + + zone.addr = start; + zone.size = size; + zone.pad = 0; + + (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); + } +} + +static void kvm_uncoalesce_mmio_region(MemoryListener *listener, + MemoryRegionSection *secion, + hwaddr start, hwaddr size) +{ + KVMState *s = kvm_state; + + if (s->coalesced_mmio) { + struct kvm_coalesced_mmio_zone zone; + + zone.addr = start; + zone.size = size; + zone.pad = 0; + + (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); + } +} + +int kvm_check_extension(KVMState *s, unsigned int extension) +{ + int ret; + + ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension); + if (ret < 0) { + ret = 0; + } + + return ret; +} + +int kvm_vm_check_extension(KVMState *s, unsigned int extension) +{ + int ret; + + ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension); + if (ret < 0) { + /* VM wide version not implemented, use global one instead */ + ret = kvm_check_extension(s, extension); + } + + return ret; +} + +static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size) +{ +#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) + /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN + * endianness, but the memory core hands them in target endianness. + * For example, PPC is always treated as big-endian even if running + * on KVM and on PPC64LE. Correct here. + */ + switch (size) { + case 2: + val = bswap16(val); + break; + case 4: + val = bswap32(val); + break; + } +#endif + return val; +} + +static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val, + bool assign, uint32_t size, bool datamatch) +{ + int ret; + struct kvm_ioeventfd iofd = { + .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0, + .addr = addr, + .len = size, + .flags = 0, + .fd = fd, + }; + + if (!kvm_enabled()) { + return -ENOSYS; + } + + if (datamatch) { + iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH; + } + if (!assign) { + iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; + } + + ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd); + + if (ret < 0) { + return -errno; + } + + return 0; +} + +static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val, + bool assign, uint32_t size, bool datamatch) +{ + struct kvm_ioeventfd kick = { + .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0, + .addr = addr, + .flags = KVM_IOEVENTFD_FLAG_PIO, + .len = size, + .fd = fd, + }; + int r; + if (!kvm_enabled()) { + return -ENOSYS; + } + if (datamatch) { + kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH; + } + if (!assign) { + kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; + } + r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); + if (r < 0) { + return r; + } + return 0; +} + + +static int kvm_check_many_ioeventfds(void) +{ + /* Userspace can use ioeventfd for io notification. This requires a host + * that supports eventfd(2) and an I/O thread; since eventfd does not + * support SIGIO it cannot interrupt the vcpu. + * + * Older kernels have a 6 device limit on the KVM io bus. Find out so we + * can avoid creating too many ioeventfds. + */ +#if defined(CONFIG_EVENTFD) + int ioeventfds[7]; + int i, ret = 0; + for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) { + ioeventfds[i] = eventfd(0, EFD_CLOEXEC); + if (ioeventfds[i] < 0) { + break; + } + ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true); + if (ret < 0) { + close(ioeventfds[i]); + break; + } + } + + /* Decide whether many devices are supported or not */ + ret = i == ARRAY_SIZE(ioeventfds); + + while (i-- > 0) { + kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true); + close(ioeventfds[i]); + } + return ret; +#else + return 0; +#endif +} + +static const KVMCapabilityInfo * +kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list) +{ + while (list->name) { + if (!kvm_check_extension(s, list->value)) { + return list; + } + list++; + } + return NULL; +} + +static void kvm_set_phys_mem(KVMMemoryListener *kml, + MemoryRegionSection *section, bool add) +{ + KVMState *s = kvm_state; + KVMSlot *mem, old; + int err; + MemoryRegion *mr = section->mr; + bool writeable = !mr->readonly && !mr->rom_device; + hwaddr start_addr = section->offset_within_address_space; + ram_addr_t size = int128_get64(section->size); + void *ram = NULL; + unsigned delta; + + /* kvm works in page size chunks, but the function may be called + with sub-page size and unaligned start address. Pad the start + address to next and truncate size to previous page boundary. */ + delta = qemu_real_host_page_size - (start_addr & ~qemu_real_host_page_mask); + delta &= ~qemu_real_host_page_mask; + if (delta > size) { + return; + } + start_addr += delta; + size -= delta; + size &= qemu_real_host_page_mask; + if (!size || (start_addr & ~qemu_real_host_page_mask)) { + return; + } + + if (!memory_region_is_ram(mr)) { + if (writeable || !kvm_readonly_mem_allowed) { + return; + } else if (!mr->romd_mode) { + /* If the memory device is not in romd_mode, then we actually want + * to remove the kvm memory slot so all accesses will trap. */ + add = false; + } + } + + ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta; + + while (1) { + mem = kvm_lookup_overlapping_slot(kml, start_addr, start_addr + size); + if (!mem) { + break; + } + + if (add && start_addr >= mem->start_addr && + (start_addr + size <= mem->start_addr + mem->memory_size) && + (ram - start_addr == mem->ram - mem->start_addr)) { + /* The new slot fits into the existing one and comes with + * identical parameters - update flags and done. */ + kvm_slot_update_flags(kml, mem, mr); + return; + } + + old = *mem; + + if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { + kvm_physical_sync_dirty_bitmap(kml, section); + } + + /* unregister the overlapping slot */ + mem->memory_size = 0; + err = kvm_set_user_memory_region(kml, mem); + if (err) { + fprintf(stderr, "%s: error unregistering overlapping slot: %s\n", + __func__, strerror(-err)); + abort(); + } + + /* Workaround for older KVM versions: we can't join slots, even not by + * unregistering the previous ones and then registering the larger + * slot. We have to maintain the existing fragmentation. Sigh. + * + * This workaround assumes that the new slot starts at the same + * address as the first existing one. If not or if some overlapping + * slot comes around later, we will fail (not seen in practice so far) + * - and actually require a recent KVM version. */ + if (s->broken_set_mem_region && + old.start_addr == start_addr && old.memory_size < size && add) { + mem = kvm_alloc_slot(kml); + mem->memory_size = old.memory_size; + mem->start_addr = old.start_addr; + mem->ram = old.ram; + mem->flags = kvm_mem_flags(mr); + + err = kvm_set_user_memory_region(kml, mem); + if (err) { + fprintf(stderr, "%s: error updating slot: %s\n", __func__, + strerror(-err)); + abort(); + } + + start_addr += old.memory_size; + ram += old.memory_size; + size -= old.memory_size; + continue; + } + + /* register prefix slot */ + if (old.start_addr < start_addr) { + mem = kvm_alloc_slot(kml); + mem->memory_size = start_addr - old.start_addr; + mem->start_addr = old.start_addr; + mem->ram = old.ram; + mem->flags = kvm_mem_flags(mr); + + err = kvm_set_user_memory_region(kml, mem); + if (err) { + fprintf(stderr, "%s: error registering prefix slot: %s\n", + __func__, strerror(-err)); +#ifdef TARGET_PPC + fprintf(stderr, "%s: This is probably because your kernel's " \ + "PAGE_SIZE is too big. Please try to use 4k " \ + "PAGE_SIZE!\n", __func__); +#endif + abort(); + } + } + + /* register suffix slot */ + if (old.start_addr + old.memory_size > start_addr + size) { + ram_addr_t size_delta; + + mem = kvm_alloc_slot(kml); + mem->start_addr = start_addr + size; + size_delta = mem->start_addr - old.start_addr; + mem->memory_size = old.memory_size - size_delta; + mem->ram = old.ram + size_delta; + mem->flags = kvm_mem_flags(mr); + + err = kvm_set_user_memory_region(kml, mem); + if (err) { + fprintf(stderr, "%s: error registering suffix slot: %s\n", + __func__, strerror(-err)); + abort(); + } + } + } + + /* in case the KVM bug workaround already "consumed" the new slot */ + if (!size) { + return; + } + if (!add) { + return; + } + mem = kvm_alloc_slot(kml); + mem->memory_size = size; + mem->start_addr = start_addr; + mem->ram = ram; + mem->flags = kvm_mem_flags(mr); + + err = kvm_set_user_memory_region(kml, mem); + if (err) { + fprintf(stderr, "%s: error registering slot: %s\n", __func__, + strerror(-err)); + abort(); + } +} + +static void kvm_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); + + memory_region_ref(section->mr); + kvm_set_phys_mem(kml, section, true); +} + +static void kvm_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); + + kvm_set_phys_mem(kml, section, false); + memory_region_unref(section->mr); +} + +static void kvm_log_sync(MemoryListener *listener, + MemoryRegionSection *section) +{ + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); + int r; + + r = kvm_physical_sync_dirty_bitmap(kml, section); + if (r < 0) { + abort(); + } +} + +static void kvm_mem_ioeventfd_add(MemoryListener *listener, + MemoryRegionSection *section, + bool match_data, uint64_t data, + EventNotifier *e) +{ + int fd = event_notifier_get_fd(e); + int r; + + r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, + data, true, int128_get64(section->size), + match_data); + if (r < 0) { + fprintf(stderr, "%s: error adding ioeventfd: %s\n", + __func__, strerror(-r)); + abort(); + } +} + +static void kvm_mem_ioeventfd_del(MemoryListener *listener, + MemoryRegionSection *section, + bool match_data, uint64_t data, + EventNotifier *e) +{ + int fd = event_notifier_get_fd(e); + int r; + + r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, + data, false, int128_get64(section->size), + match_data); + if (r < 0) { + abort(); + } +} + +static void kvm_io_ioeventfd_add(MemoryListener *listener, + MemoryRegionSection *section, + bool match_data, uint64_t data, + EventNotifier *e) +{ + int fd = event_notifier_get_fd(e); + int r; + + r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, + data, true, int128_get64(section->size), + match_data); + if (r < 0) { + fprintf(stderr, "%s: error adding ioeventfd: %s\n", + __func__, strerror(-r)); + abort(); + } +} + +static void kvm_io_ioeventfd_del(MemoryListener *listener, + MemoryRegionSection *section, + bool match_data, uint64_t data, + EventNotifier *e) + +{ + int fd = event_notifier_get_fd(e); + int r; + + r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, + data, false, int128_get64(section->size), + match_data); + if (r < 0) { + abort(); + } +} + +void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, + AddressSpace *as, int as_id) +{ + int i; + + kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot)); + kml->as_id = as_id; + + for (i = 0; i < s->nr_slots; i++) { + kml->slots[i].slot = i; + } + + kml->listener.region_add = kvm_region_add; + kml->listener.region_del = kvm_region_del; + kml->listener.log_start = kvm_log_start; + kml->listener.log_stop = kvm_log_stop; + kml->listener.log_sync = kvm_log_sync; + kml->listener.priority = 10; + + memory_listener_register(&kml->listener, as); +} + +static MemoryListener kvm_io_listener = { + .eventfd_add = kvm_io_ioeventfd_add, + .eventfd_del = kvm_io_ioeventfd_del, + .priority = 10, +}; + +static void kvm_handle_interrupt(CPUState *cpu, int mask) +{ + cpu->interrupt_request |= mask; + + if (!qemu_cpu_is_self(cpu)) { + qemu_cpu_kick(cpu); + } +} + +int kvm_set_irq(KVMState *s, int irq, int level) +{ + struct kvm_irq_level event; + int ret; + + assert(kvm_async_interrupts_enabled()); + + event.level = level; + event.irq = irq; + ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event); + if (ret < 0) { + perror("kvm_set_irq"); + abort(); + } + + return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status; +} + +#ifdef KVM_CAP_IRQ_ROUTING +typedef struct KVMMSIRoute { + struct kvm_irq_routing_entry kroute; + QTAILQ_ENTRY(KVMMSIRoute) entry; +} KVMMSIRoute; + +static void set_gsi(KVMState *s, unsigned int gsi) +{ + set_bit(gsi, s->used_gsi_bitmap); +} + +static void clear_gsi(KVMState *s, unsigned int gsi) +{ + clear_bit(gsi, s->used_gsi_bitmap); +} + +void kvm_init_irq_routing(KVMState *s) +{ + int gsi_count, i; + + gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1; + if (gsi_count > 0) { + /* Round up so we can search ints using ffs */ + s->used_gsi_bitmap = bitmap_new(gsi_count); + s->gsi_count = gsi_count; + } + + s->irq_routes = g_malloc0(sizeof(*s->irq_routes)); + s->nr_allocated_irq_routes = 0; + + if (!kvm_direct_msi_allowed) { + for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) { + QTAILQ_INIT(&s->msi_hashtab[i]); + } + } + + kvm_arch_init_irq_routing(s); +} + +void kvm_irqchip_commit_routes(KVMState *s) +{ + int ret; + + if (kvm_gsi_direct_mapping()) { + return; + } + + if (!kvm_gsi_routing_enabled()) { + return; + } + + s->irq_routes->flags = 0; + trace_kvm_irqchip_commit_routes(); + ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes); + assert(ret == 0); +} + +static void kvm_add_routing_entry(KVMState *s, + struct kvm_irq_routing_entry *entry) +{ + struct kvm_irq_routing_entry *new; + int n, size; + + if (s->irq_routes->nr == s->nr_allocated_irq_routes) { + n = s->nr_allocated_irq_routes * 2; + if (n < 64) { + n = 64; + } + size = sizeof(struct kvm_irq_routing); + size += n * sizeof(*new); + s->irq_routes = g_realloc(s->irq_routes, size); + s->nr_allocated_irq_routes = n; + } + n = s->irq_routes->nr++; + new = &s->irq_routes->entries[n]; + + *new = *entry; + + set_gsi(s, entry->gsi); +} + +static int kvm_update_routing_entry(KVMState *s, + struct kvm_irq_routing_entry *new_entry) +{ + struct kvm_irq_routing_entry *entry; + int n; + + for (n = 0; n < s->irq_routes->nr; n++) { + entry = &s->irq_routes->entries[n]; + if (entry->gsi != new_entry->gsi) { + continue; + } + + if(!memcmp(entry, new_entry, sizeof *entry)) { + return 0; + } + + *entry = *new_entry; + + return 0; + } + + return -ESRCH; +} + +void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin) +{ + struct kvm_irq_routing_entry e = {}; + + assert(pin < s->gsi_count); + + e.gsi = irq; + e.type = KVM_IRQ_ROUTING_IRQCHIP; + e.flags = 0; + e.u.irqchip.irqchip = irqchip; + e.u.irqchip.pin = pin; + kvm_add_routing_entry(s, &e); +} + +void kvm_irqchip_release_virq(KVMState *s, int virq) +{ + struct kvm_irq_routing_entry *e; + int i; + + if (kvm_gsi_direct_mapping()) { + return; + } + + for (i = 0; i < s->irq_routes->nr; i++) { + e = &s->irq_routes->entries[i]; + if (e->gsi == virq) { + s->irq_routes->nr--; + *e = s->irq_routes->entries[s->irq_routes->nr]; + } + } + clear_gsi(s, virq); + kvm_arch_release_virq_post(virq); + trace_kvm_irqchip_release_virq(virq); +} + +static unsigned int kvm_hash_msi(uint32_t data) +{ + /* This is optimized for IA32 MSI layout. However, no other arch shall + * repeat the mistake of not providing a direct MSI injection API. */ + return data & 0xff; +} + +static void kvm_flush_dynamic_msi_routes(KVMState *s) +{ + KVMMSIRoute *route, *next; + unsigned int hash; + + for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) { + QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) { + kvm_irqchip_release_virq(s, route->kroute.gsi); + QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry); + g_free(route); + } + } +} + +static int kvm_irqchip_get_virq(KVMState *s) +{ + int next_virq; + + /* + * PIC and IOAPIC share the first 16 GSI numbers, thus the available + * GSI numbers are more than the number of IRQ route. Allocating a GSI + * number can succeed even though a new route entry cannot be added. + * When this happens, flush dynamic MSI entries to free IRQ route entries. + */ + if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) { + kvm_flush_dynamic_msi_routes(s); + } + + /* Return the lowest unused GSI in the bitmap */ + next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count); + if (next_virq >= s->gsi_count) { + return -ENOSPC; + } else { + return next_virq; + } +} + +static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg) +{ + unsigned int hash = kvm_hash_msi(msg.data); + KVMMSIRoute *route; + + QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) { + if (route->kroute.u.msi.address_lo == (uint32_t)msg.address && + route->kroute.u.msi.address_hi == (msg.address >> 32) && + route->kroute.u.msi.data == le32_to_cpu(msg.data)) { + return route; + } + } + return NULL; +} + +int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) +{ + struct kvm_msi msi; + KVMMSIRoute *route; + + if (kvm_direct_msi_allowed) { + msi.address_lo = (uint32_t)msg.address; + msi.address_hi = msg.address >> 32; + msi.data = le32_to_cpu(msg.data); + msi.flags = 0; + memset(msi.pad, 0, sizeof(msi.pad)); + + return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi); + } + + route = kvm_lookup_msi_route(s, msg); + if (!route) { + int virq; + + virq = kvm_irqchip_get_virq(s); + if (virq < 0) { + return virq; + } + + route = g_malloc0(sizeof(KVMMSIRoute)); + route->kroute.gsi = virq; + route->kroute.type = KVM_IRQ_ROUTING_MSI; + route->kroute.flags = 0; + route->kroute.u.msi.address_lo = (uint32_t)msg.address; + route->kroute.u.msi.address_hi = msg.address >> 32; + route->kroute.u.msi.data = le32_to_cpu(msg.data); + + kvm_add_routing_entry(s, &route->kroute); + kvm_irqchip_commit_routes(s); + + QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route, + entry); + } + + assert(route->kroute.type == KVM_IRQ_ROUTING_MSI); + + return kvm_set_irq(s, route->kroute.gsi, 1); +} + +int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev) +{ + struct kvm_irq_routing_entry kroute = {}; + int virq; + MSIMessage msg = {0, 0}; + + if (dev) { + msg = pci_get_msi_message(dev, vector); + } + + if (kvm_gsi_direct_mapping()) { + return kvm_arch_msi_data_to_gsi(msg.data); + } + + if (!kvm_gsi_routing_enabled()) { + return -ENOSYS; + } + + virq = kvm_irqchip_get_virq(s); + if (virq < 0) { + return virq; + } + + kroute.gsi = virq; + kroute.type = KVM_IRQ_ROUTING_MSI; + kroute.flags = 0; + kroute.u.msi.address_lo = (uint32_t)msg.address; + kroute.u.msi.address_hi = msg.address >> 32; + kroute.u.msi.data = le32_to_cpu(msg.data); + if (kvm_msi_devid_required()) { + kroute.flags = KVM_MSI_VALID_DEVID; + kroute.u.msi.devid = pci_requester_id(dev); + } + if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) { + kvm_irqchip_release_virq(s, virq); + return -EINVAL; + } + + trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A", + vector, virq); + + kvm_add_routing_entry(s, &kroute); + kvm_arch_add_msi_route_post(&kroute, vector, dev); + kvm_irqchip_commit_routes(s); + + return virq; +} + +int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, + PCIDevice *dev) +{ + struct kvm_irq_routing_entry kroute = {}; + + if (kvm_gsi_direct_mapping()) { + return 0; + } + + if (!kvm_irqchip_in_kernel()) { + return -ENOSYS; + } + + kroute.gsi = virq; + kroute.type = KVM_IRQ_ROUTING_MSI; + kroute.flags = 0; + kroute.u.msi.address_lo = (uint32_t)msg.address; + kroute.u.msi.address_hi = msg.address >> 32; + kroute.u.msi.data = le32_to_cpu(msg.data); + if (kvm_msi_devid_required()) { + kroute.flags = KVM_MSI_VALID_DEVID; + kroute.u.msi.devid = pci_requester_id(dev); + } + if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) { + return -EINVAL; + } + + trace_kvm_irqchip_update_msi_route(virq); + + return kvm_update_routing_entry(s, &kroute); +} + +static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq, + bool assign) +{ + struct kvm_irqfd irqfd = { + .fd = fd, + .gsi = virq, + .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN, + }; + + if (rfd != -1) { + irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE; + irqfd.resamplefd = rfd; + } + + if (!kvm_irqfds_enabled()) { + return -ENOSYS; + } + + return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd); +} + +int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter) +{ + struct kvm_irq_routing_entry kroute = {}; + int virq; + + if (!kvm_gsi_routing_enabled()) { + return -ENOSYS; + } + + virq = kvm_irqchip_get_virq(s); + if (virq < 0) { + return virq; + } + + kroute.gsi = virq; + kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER; + kroute.flags = 0; + kroute.u.adapter.summary_addr = adapter->summary_addr; + kroute.u.adapter.ind_addr = adapter->ind_addr; + kroute.u.adapter.summary_offset = adapter->summary_offset; + kroute.u.adapter.ind_offset = adapter->ind_offset; + kroute.u.adapter.adapter_id = adapter->adapter_id; + + kvm_add_routing_entry(s, &kroute); + + return virq; +} + +int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint) +{ + struct kvm_irq_routing_entry kroute = {}; + int virq; + + if (!kvm_gsi_routing_enabled()) { + return -ENOSYS; + } + if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) { + return -ENOSYS; + } + virq = kvm_irqchip_get_virq(s); + if (virq < 0) { + return virq; + } + + kroute.gsi = virq; + kroute.type = KVM_IRQ_ROUTING_HV_SINT; + kroute.flags = 0; + kroute.u.hv_sint.vcpu = vcpu; + kroute.u.hv_sint.sint = sint; + + kvm_add_routing_entry(s, &kroute); + kvm_irqchip_commit_routes(s); + + return virq; +} + +#else /* !KVM_CAP_IRQ_ROUTING */ + +void kvm_init_irq_routing(KVMState *s) +{ +} + +void kvm_irqchip_release_virq(KVMState *s, int virq) +{ +} + +int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) +{ + abort(); +} + +int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev) +{ + return -ENOSYS; +} + +int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter) +{ + return -ENOSYS; +} + +int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint) +{ + return -ENOSYS; +} + +static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign) +{ + abort(); +} + +int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg) +{ + return -ENOSYS; +} +#endif /* !KVM_CAP_IRQ_ROUTING */ + +int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, + EventNotifier *rn, int virq) +{ + return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), + rn ? event_notifier_get_fd(rn) : -1, virq, true); +} + +int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, + int virq) +{ + return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq, + false); +} + +int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, + EventNotifier *rn, qemu_irq irq) +{ + gpointer key, gsi; + gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); + + if (!found) { + return -ENXIO; + } + return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi)); +} + +int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, + qemu_irq irq) +{ + gpointer key, gsi; + gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); + + if (!found) { + return -ENXIO; + } + return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi)); +} + +void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi) +{ + g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi)); +} + +static void kvm_irqchip_create(MachineState *machine, KVMState *s) +{ + int ret; + + if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) { + ; + } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) { + ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0); + if (ret < 0) { + fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret)); + exit(1); + } + } else { + return; + } + + /* First probe and see if there's a arch-specific hook to create the + * in-kernel irqchip for us */ + ret = kvm_arch_irqchip_create(machine, s); + if (ret == 0) { + if (machine_kernel_irqchip_split(machine)) { + perror("Split IRQ chip mode not supported."); + exit(1); + } else { + ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP); + } + } + if (ret < 0) { + fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret)); + exit(1); + } + + kvm_kernel_irqchip = true; + /* If we have an in-kernel IRQ chip then we must have asynchronous + * interrupt delivery (though the reverse is not necessarily true) + */ + kvm_async_interrupts_allowed = true; + kvm_halt_in_kernel_allowed = true; + + kvm_init_irq_routing(s); + + s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal); +} + +/* Find number of supported CPUs using the recommended + * procedure from the kernel API documentation to cope with + * older kernels that may be missing capabilities. + */ +static int kvm_recommended_vcpus(KVMState *s) +{ + int ret = kvm_check_extension(s, KVM_CAP_NR_VCPUS); + return (ret) ? ret : 4; +} + +static int kvm_max_vcpus(KVMState *s) +{ + int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS); + return (ret) ? ret : kvm_recommended_vcpus(s); +} + +static int kvm_max_vcpu_id(KVMState *s) +{ + int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID); + return (ret) ? ret : kvm_max_vcpus(s); +} + +bool kvm_vcpu_id_is_valid(int vcpu_id) +{ + KVMState *s = KVM_STATE(current_machine->accelerator); + return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s); +} + +static int kvm_init(MachineState *ms) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + static const char upgrade_note[] = + "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" + "(see http://sourceforge.net/projects/kvm).\n"; + struct { + const char *name; + int num; + } num_cpus[] = { + { "SMP", smp_cpus }, + { "hotpluggable", max_cpus }, + { NULL, } + }, *nc = num_cpus; + int soft_vcpus_limit, hard_vcpus_limit; + KVMState *s; + const KVMCapabilityInfo *missing_cap; + int ret; + int type = 0; + const char *kvm_type; + + s = KVM_STATE(ms->accelerator); + + /* + * On systems where the kernel can support different base page + * sizes, host page size may be different from TARGET_PAGE_SIZE, + * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum + * page size for the system though. + */ + assert(TARGET_PAGE_SIZE <= getpagesize()); + + s->sigmask_len = 8; + +#ifdef KVM_CAP_SET_GUEST_DEBUG + QTAILQ_INIT(&s->kvm_sw_breakpoints); +#endif + QLIST_INIT(&s->kvm_parked_vcpus); + s->vmfd = -1; + s->fd = qemu_open("/dev/kvm", O_RDWR); + if (s->fd == -1) { + fprintf(stderr, "Could not access KVM kernel module: %m\n"); + ret = -errno; + goto err; + } + + ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); + if (ret < KVM_API_VERSION) { + if (ret >= 0) { + ret = -EINVAL; + } + fprintf(stderr, "kvm version too old\n"); + goto err; + } + + if (ret > KVM_API_VERSION) { + ret = -EINVAL; + fprintf(stderr, "kvm version not supported\n"); + goto err; + } + + kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT); + s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); + + /* If unspecified, use the default value */ + if (!s->nr_slots) { + s->nr_slots = 32; + } + + /* check the vcpu limits */ + soft_vcpus_limit = kvm_recommended_vcpus(s); + hard_vcpus_limit = kvm_max_vcpus(s); + + while (nc->name) { + if (nc->num > soft_vcpus_limit) { + fprintf(stderr, + "Warning: Number of %s cpus requested (%d) exceeds " + "the recommended cpus supported by KVM (%d)\n", + nc->name, nc->num, soft_vcpus_limit); + + if (nc->num > hard_vcpus_limit) { + fprintf(stderr, "Number of %s cpus requested (%d) exceeds " + "the maximum cpus supported by KVM (%d)\n", + nc->name, nc->num, hard_vcpus_limit); + exit(1); + } + } + nc++; + } + + kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type"); + if (mc->kvm_type) { + type = mc->kvm_type(kvm_type); + } else if (kvm_type) { + ret = -EINVAL; + fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type); + goto err; + } + + do { + ret = kvm_ioctl(s, KVM_CREATE_VM, type); + } while (ret == -EINTR); + + if (ret < 0) { + fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret, + strerror(-ret)); + +#ifdef TARGET_S390X + if (ret == -EINVAL) { + fprintf(stderr, + "Host kernel setup problem detected. Please verify:\n"); + fprintf(stderr, "- for kernels supporting the switch_amode or" + " user_mode parameters, whether\n"); + fprintf(stderr, + " user space is running in primary address space\n"); + fprintf(stderr, + "- for kernels supporting the vm.allocate_pgste sysctl, " + "whether it is enabled\n"); + } +#endif + goto err; + } + + s->vmfd = ret; + missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); + if (!missing_cap) { + missing_cap = + kvm_check_extension_list(s, kvm_arch_required_capabilities); + } + if (missing_cap) { + ret = -EINVAL; + fprintf(stderr, "kvm does not support %s\n%s", + missing_cap->name, upgrade_note); + goto err; + } + + s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); + + s->broken_set_mem_region = 1; + ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS); + if (ret > 0) { + s->broken_set_mem_region = 0; + } + +#ifdef KVM_CAP_VCPU_EVENTS + s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); +#endif + + s->robust_singlestep = + kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); + +#ifdef KVM_CAP_DEBUGREGS + s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); +#endif + +#ifdef KVM_CAP_IRQ_ROUTING + kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0); +#endif + + s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3); + + s->irq_set_ioctl = KVM_IRQ_LINE; + if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) { + s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; + } + +#ifdef KVM_CAP_READONLY_MEM + kvm_readonly_mem_allowed = + (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); +#endif + + kvm_eventfds_allowed = + (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0); + + kvm_irqfds_allowed = + (kvm_check_extension(s, KVM_CAP_IRQFD) > 0); + + kvm_resamplefds_allowed = + (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0); + + kvm_vm_attributes_allowed = + (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0); + + kvm_ioeventfd_any_length_allowed = + (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0); + + kvm_state = s; + + ret = kvm_arch_init(ms, s); + if (ret < 0) { + goto err; + } + + if (machine_kernel_irqchip_allowed(ms)) { + kvm_irqchip_create(ms, s); + } + + if (kvm_eventfds_allowed) { + s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; + s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; + } + s->memory_listener.listener.coalesced_mmio_add = kvm_coalesce_mmio_region; + s->memory_listener.listener.coalesced_mmio_del = kvm_uncoalesce_mmio_region; + + kvm_memory_listener_register(s, &s->memory_listener, + &address_space_memory, 0); + memory_listener_register(&kvm_io_listener, + &address_space_io); + + s->many_ioeventfds = kvm_check_many_ioeventfds(); + + cpu_interrupt_handler = kvm_handle_interrupt; + + return 0; + +err: + assert(ret < 0); + if (s->vmfd >= 0) { + close(s->vmfd); + } + if (s->fd != -1) { + close(s->fd); + } + g_free(s->memory_listener.slots); + + return ret; +} + +void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len) +{ + s->sigmask_len = sigmask_len; +} + +static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction, + int size, uint32_t count) +{ + int i; + uint8_t *ptr = data; + + for (i = 0; i < count; i++) { + address_space_rw(&address_space_io, port, attrs, + ptr, size, + direction == KVM_EXIT_IO_OUT); + ptr += size; + } +} + +static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run) +{ + fprintf(stderr, "KVM internal error. Suberror: %d\n", + run->internal.suberror); + + if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) { + int i; + + for (i = 0; i < run->internal.ndata; ++i) { + fprintf(stderr, "extra data[%d]: %"PRIx64"\n", + i, (uint64_t)run->internal.data[i]); + } + } + if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) { + fprintf(stderr, "emulation failure\n"); + if (!kvm_arch_stop_on_emulation_error(cpu)) { + cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE); + return EXCP_INTERRUPT; + } + } + /* FIXME: Should trigger a qmp message to let management know + * something went wrong. + */ + return -1; +} + +void kvm_flush_coalesced_mmio_buffer(void) +{ + KVMState *s = kvm_state; + + if (s->coalesced_flush_in_progress) { + return; + } + + s->coalesced_flush_in_progress = true; + + if (s->coalesced_mmio_ring) { + struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring; + while (ring->first != ring->last) { + struct kvm_coalesced_mmio *ent; + + ent = &ring->coalesced_mmio[ring->first]; + + cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); + smp_wmb(); + ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX; + } + } + + s->coalesced_flush_in_progress = false; +} + +static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) +{ + if (!cpu->kvm_vcpu_dirty) { + kvm_arch_get_registers(cpu); + cpu->kvm_vcpu_dirty = true; + } +} + +void kvm_cpu_synchronize_state(CPUState *cpu) +{ + if (!cpu->kvm_vcpu_dirty) { + run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL); + } +} + +static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) +{ + kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE); + cpu->kvm_vcpu_dirty = false; +} + +void kvm_cpu_synchronize_post_reset(CPUState *cpu) +{ + run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL); +} + +static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) +{ + kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); + cpu->kvm_vcpu_dirty = false; +} + +void kvm_cpu_synchronize_post_init(CPUState *cpu) +{ + run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL); +} + +static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg) +{ + cpu->kvm_vcpu_dirty = true; +} + +void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu) +{ + run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL); +} + +#ifdef KVM_HAVE_MCE_INJECTION +static __thread void *pending_sigbus_addr; +static __thread int pending_sigbus_code; +static __thread bool have_sigbus_pending; +#endif + +static void kvm_cpu_kick(CPUState *cpu) +{ + atomic_set(&cpu->kvm_run->immediate_exit, 1); +} + +static void kvm_cpu_kick_self(void) +{ + if (kvm_immediate_exit) { + kvm_cpu_kick(current_cpu); + } else { + qemu_cpu_kick_self(); + } +} + +static void kvm_eat_signals(CPUState *cpu) +{ + struct timespec ts = { 0, 0 }; + siginfo_t siginfo; + sigset_t waitset; + sigset_t chkset; + int r; + + if (kvm_immediate_exit) { + atomic_set(&cpu->kvm_run->immediate_exit, 0); + /* Write kvm_run->immediate_exit before the cpu->exit_request + * write in kvm_cpu_exec. + */ + smp_wmb(); + return; + } + + sigemptyset(&waitset); + sigaddset(&waitset, SIG_IPI); + + do { + r = sigtimedwait(&waitset, &siginfo, &ts); + if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { + perror("sigtimedwait"); + exit(1); + } + + r = sigpending(&chkset); + if (r == -1) { + perror("sigpending"); + exit(1); + } + } while (sigismember(&chkset, SIG_IPI)); +} + +int kvm_cpu_exec(CPUState *cpu) +{ + struct kvm_run *run = cpu->kvm_run; + int ret, run_ret; + + DPRINTF("kvm_cpu_exec()\n"); + + if (kvm_arch_process_async_events(cpu)) { + atomic_set(&cpu->exit_request, 0); + return EXCP_HLT; + } + + qemu_mutex_unlock_iothread(); + cpu_exec_start(cpu); + + do { + MemTxAttrs attrs; + + if (cpu->kvm_vcpu_dirty) { + kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE); + cpu->kvm_vcpu_dirty = false; + } + + kvm_arch_pre_run(cpu, run); + if (atomic_read(&cpu->exit_request)) { + DPRINTF("interrupt exit requested\n"); + /* + * KVM requires us to reenter the kernel after IO exits to complete + * instruction emulation. This self-signal will ensure that we + * leave ASAP again. + */ + kvm_cpu_kick_self(); + } + + /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit. + * Matching barrier in kvm_eat_signals. + */ + smp_rmb(); + + run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0); + + attrs = kvm_arch_post_run(cpu, run); + +#ifdef KVM_HAVE_MCE_INJECTION + if (unlikely(have_sigbus_pending)) { + qemu_mutex_lock_iothread(); + kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code, + pending_sigbus_addr); + have_sigbus_pending = false; + qemu_mutex_unlock_iothread(); + } +#endif + + if (run_ret < 0) { + if (run_ret == -EINTR || run_ret == -EAGAIN) { + DPRINTF("io window exit\n"); + kvm_eat_signals(cpu); + ret = EXCP_INTERRUPT; + break; + } + fprintf(stderr, "error: kvm run failed %s\n", + strerror(-run_ret)); +#ifdef TARGET_PPC + if (run_ret == -EBUSY) { + fprintf(stderr, + "This is probably because your SMT is enabled.\n" + "VCPU can only run on primary threads with all " + "secondary threads offline.\n"); + } +#endif + ret = -1; + break; + } + + trace_kvm_run_exit(cpu->cpu_index, run->exit_reason); + switch (run->exit_reason) { + case KVM_EXIT_IO: + DPRINTF("handle_io\n"); + /* Called outside BQL */ + kvm_handle_io(run->io.port, attrs, + (uint8_t *)run + run->io.data_offset, + run->io.direction, + run->io.size, + run->io.count); + ret = 0; + break; + case KVM_EXIT_MMIO: + DPRINTF("handle_mmio\n"); + /* Called outside BQL */ + address_space_rw(&address_space_memory, + run->mmio.phys_addr, attrs, + run->mmio.data, + run->mmio.len, + run->mmio.is_write); + ret = 0; + break; + case KVM_EXIT_IRQ_WINDOW_OPEN: + DPRINTF("irq_window_open\n"); + ret = EXCP_INTERRUPT; + break; + case KVM_EXIT_SHUTDOWN: + DPRINTF("shutdown\n"); + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + ret = EXCP_INTERRUPT; + break; + case KVM_EXIT_UNKNOWN: + fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n", + (uint64_t)run->hw.hardware_exit_reason); + ret = -1; + break; + case KVM_EXIT_INTERNAL_ERROR: + ret = kvm_handle_internal_error(cpu, run); + break; + case KVM_EXIT_SYSTEM_EVENT: + switch (run->system_event.type) { + case KVM_SYSTEM_EVENT_SHUTDOWN: + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + ret = EXCP_INTERRUPT; + break; + case KVM_SYSTEM_EVENT_RESET: + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + ret = EXCP_INTERRUPT; + break; + case KVM_SYSTEM_EVENT_CRASH: + kvm_cpu_synchronize_state(cpu); + qemu_mutex_lock_iothread(); + qemu_system_guest_panicked(cpu_get_crash_info(cpu)); + qemu_mutex_unlock_iothread(); + ret = 0; + break; + default: + DPRINTF("kvm_arch_handle_exit\n"); + ret = kvm_arch_handle_exit(cpu, run); + break; + } + break; + default: + DPRINTF("kvm_arch_handle_exit\n"); + ret = kvm_arch_handle_exit(cpu, run); + break; + } + } while (ret == 0); + + cpu_exec_end(cpu); + qemu_mutex_lock_iothread(); + + if (ret < 0) { + cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE); + vm_stop(RUN_STATE_INTERNAL_ERROR); + } + + atomic_set(&cpu->exit_request, 0); + return ret; +} + +int kvm_ioctl(KVMState *s, int type, ...) +{ + int ret; + void *arg; + va_list ap; + + va_start(ap, type); + arg = va_arg(ap, void *); + va_end(ap); + + trace_kvm_ioctl(type, arg); + ret = ioctl(s->fd, type, arg); + if (ret == -1) { + ret = -errno; + } + return ret; +} + +int kvm_vm_ioctl(KVMState *s, int type, ...) +{ + int ret; + void *arg; + va_list ap; + + va_start(ap, type); + arg = va_arg(ap, void *); + va_end(ap); + + trace_kvm_vm_ioctl(type, arg); + ret = ioctl(s->vmfd, type, arg); + if (ret == -1) { + ret = -errno; + } + return ret; +} + +int kvm_vcpu_ioctl(CPUState *cpu, int type, ...) +{ + int ret; + void *arg; + va_list ap; + + va_start(ap, type); + arg = va_arg(ap, void *); + va_end(ap); + + trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg); + ret = ioctl(cpu->kvm_fd, type, arg); + if (ret == -1) { + ret = -errno; + } + return ret; +} + +int kvm_device_ioctl(int fd, int type, ...) +{ + int ret; + void *arg; + va_list ap; + + va_start(ap, type); + arg = va_arg(ap, void *); + va_end(ap); + + trace_kvm_device_ioctl(fd, type, arg); + ret = ioctl(fd, type, arg); + if (ret == -1) { + ret = -errno; + } + return ret; +} + +int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr) +{ + int ret; + struct kvm_device_attr attribute = { + .group = group, + .attr = attr, + }; + + if (!kvm_vm_attributes_allowed) { + return 0; + } + + ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute); + /* kvm returns 0 on success for HAS_DEVICE_ATTR */ + return ret ? 0 : 1; +} + +int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr) +{ + struct kvm_device_attr attribute = { + .group = group, + .attr = attr, + .flags = 0, + }; + + return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1; +} + +int kvm_device_access(int fd, int group, uint64_t attr, + void *val, bool write, Error **errp) +{ + struct kvm_device_attr kvmattr; + int err; + + kvmattr.flags = 0; + kvmattr.group = group; + kvmattr.attr = attr; + kvmattr.addr = (uintptr_t)val; + + err = kvm_device_ioctl(fd, + write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR, + &kvmattr); + if (err < 0) { + error_setg_errno(errp, -err, + "KVM_%s_DEVICE_ATTR failed: Group %d " + "attr 0x%016" PRIx64, + write ? "SET" : "GET", group, attr); + } + return err; +} + +/* Return 1 on success, 0 on failure */ +int kvm_has_sync_mmu(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); +} + +int kvm_has_vcpu_events(void) +{ + return kvm_state->vcpu_events; +} + +int kvm_has_robust_singlestep(void) +{ + return kvm_state->robust_singlestep; +} + +int kvm_has_debugregs(void) +{ + return kvm_state->debugregs; +} + +int kvm_has_many_ioeventfds(void) +{ + if (!kvm_enabled()) { + return 0; + } + return kvm_state->many_ioeventfds; +} + +int kvm_has_gsi_routing(void) +{ +#ifdef KVM_CAP_IRQ_ROUTING + return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING); +#else + return false; +#endif +} + +int kvm_has_intx_set_mask(void) +{ + return kvm_state->intx_set_mask; +} + +#ifdef KVM_CAP_SET_GUEST_DEBUG +struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, + target_ulong pc) +{ + struct kvm_sw_breakpoint *bp; + + QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) { + if (bp->pc == pc) { + return bp; + } + } + return NULL; +} + +int kvm_sw_breakpoints_active(CPUState *cpu) +{ + return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints); +} + +struct kvm_set_guest_debug_data { + struct kvm_guest_debug dbg; + int err; +}; + +static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data) +{ + struct kvm_set_guest_debug_data *dbg_data = + (struct kvm_set_guest_debug_data *) data.host_ptr; + + dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG, + &dbg_data->dbg); +} + +int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) +{ + struct kvm_set_guest_debug_data data; + + data.dbg.control = reinject_trap; + + if (cpu->singlestep_enabled) { + data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; + } + kvm_arch_update_guest_debug(cpu, &data.dbg); + + run_on_cpu(cpu, kvm_invoke_set_guest_debug, + RUN_ON_CPU_HOST_PTR(&data)); + return data.err; +} + +int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, + target_ulong len, int type) +{ + struct kvm_sw_breakpoint *bp; + int err; + + if (type == GDB_BREAKPOINT_SW) { + bp = kvm_find_sw_breakpoint(cpu, addr); + if (bp) { + bp->use_count++; + return 0; + } + + bp = g_malloc(sizeof(struct kvm_sw_breakpoint)); + bp->pc = addr; + bp->use_count = 1; + err = kvm_arch_insert_sw_breakpoint(cpu, bp); + if (err) { + g_free(bp); + return err; + } + + QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); + } else { + err = kvm_arch_insert_hw_breakpoint(addr, len, type); + if (err) { + return err; + } + } + + CPU_FOREACH(cpu) { + err = kvm_update_guest_debug(cpu, 0); + if (err) { + return err; + } + } + return 0; +} + +int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr, + target_ulong len, int type) +{ + struct kvm_sw_breakpoint *bp; + int err; + + if (type == GDB_BREAKPOINT_SW) { + bp = kvm_find_sw_breakpoint(cpu, addr); + if (!bp) { + return -ENOENT; + } + + if (bp->use_count > 1) { + bp->use_count--; + return 0; + } + + err = kvm_arch_remove_sw_breakpoint(cpu, bp); + if (err) { + return err; + } + + QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); + g_free(bp); + } else { + err = kvm_arch_remove_hw_breakpoint(addr, len, type); + if (err) { + return err; + } + } + + CPU_FOREACH(cpu) { + err = kvm_update_guest_debug(cpu, 0); + if (err) { + return err; + } + } + return 0; +} + +void kvm_remove_all_breakpoints(CPUState *cpu) +{ + struct kvm_sw_breakpoint *bp, *next; + KVMState *s = cpu->kvm_state; + CPUState *tmpcpu; + + QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { + if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) { + /* Try harder to find a CPU that currently sees the breakpoint. */ + CPU_FOREACH(tmpcpu) { + if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) { + break; + } + } + } + QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry); + g_free(bp); + } + kvm_arch_remove_all_hw_breakpoints(); + + CPU_FOREACH(cpu) { + kvm_update_guest_debug(cpu, 0); + } +} + +#else /* !KVM_CAP_SET_GUEST_DEBUG */ + +int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) +{ + return -EINVAL; +} + +int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, + target_ulong len, int type) +{ + return -EINVAL; +} + +int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr, + target_ulong len, int type) +{ + return -EINVAL; +} + +void kvm_remove_all_breakpoints(CPUState *cpu) +{ +} +#endif /* !KVM_CAP_SET_GUEST_DEBUG */ + +static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset) +{ + KVMState *s = kvm_state; + struct kvm_signal_mask *sigmask; + int r; + + sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset)); + + sigmask->len = s->sigmask_len; + memcpy(sigmask->sigset, sigset, sizeof(*sigset)); + r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask); + g_free(sigmask); + + return r; +} + +static void kvm_ipi_signal(int sig) +{ + if (current_cpu) { + assert(kvm_immediate_exit); + kvm_cpu_kick(current_cpu); + } +} + +void kvm_init_cpu_signals(CPUState *cpu) +{ + int r; + sigset_t set; + struct sigaction sigact; + + memset(&sigact, 0, sizeof(sigact)); + sigact.sa_handler = kvm_ipi_signal; + sigaction(SIG_IPI, &sigact, NULL); + + pthread_sigmask(SIG_BLOCK, NULL, &set); +#if defined KVM_HAVE_MCE_INJECTION + sigdelset(&set, SIGBUS); + pthread_sigmask(SIG_SETMASK, &set, NULL); +#endif + sigdelset(&set, SIG_IPI); + if (kvm_immediate_exit) { + r = pthread_sigmask(SIG_SETMASK, &set, NULL); + } else { + r = kvm_set_signal_mask(cpu, &set); + } + if (r) { + fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); + exit(1); + } +} + +/* Called asynchronously in VCPU thread. */ +int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) +{ +#ifdef KVM_HAVE_MCE_INJECTION + if (have_sigbus_pending) { + return 1; + } + have_sigbus_pending = true; + pending_sigbus_addr = addr; + pending_sigbus_code = code; + atomic_set(&cpu->exit_request, 1); + return 0; +#else + return 1; +#endif +} + +/* Called synchronously (via signalfd) in main thread. */ +int kvm_on_sigbus(int code, void *addr) +{ +#ifdef KVM_HAVE_MCE_INJECTION + /* Action required MCE kills the process if SIGBUS is blocked. Because + * that's what happens in the I/O thread, where we handle MCE via signalfd, + * we can only get action optional here. + */ + assert(code != BUS_MCEERR_AR); + kvm_arch_on_sigbus_vcpu(first_cpu, code, addr); + return 0; +#else + return 1; +#endif +} + +int kvm_create_device(KVMState *s, uint64_t type, bool test) +{ + int ret; + struct kvm_create_device create_dev; + + create_dev.type = type; + create_dev.fd = -1; + create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0; + + if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) { + return -ENOTSUP; + } + + ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev); + if (ret) { + return ret; + } + + return test ? 0 : create_dev.fd; +} + +bool kvm_device_supported(int vmfd, uint64_t type) +{ + struct kvm_create_device create_dev = { + .type = type, + .fd = -1, + .flags = KVM_CREATE_DEVICE_TEST, + }; + + if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) { + return false; + } + + return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0); +} + +int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source) +{ + struct kvm_one_reg reg; + int r; + + reg.id = id; + reg.addr = (uintptr_t) source; + r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); + if (r) { + trace_kvm_failed_reg_set(id, strerror(-r)); + } + return r; +} + +int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target) +{ + struct kvm_one_reg reg; + int r; + + reg.id = id; + reg.addr = (uintptr_t) target; + r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); + if (r) { + trace_kvm_failed_reg_get(id, strerror(-r)); + } + return r; +} + +static void kvm_accel_class_init(ObjectClass *oc, void *data) +{ + AccelClass *ac = ACCEL_CLASS(oc); + ac->name = "KVM"; + ac->init_machine = kvm_init; + ac->allowed = &kvm_allowed; +} + +static const TypeInfo kvm_accel_type = { + .name = TYPE_KVM_ACCEL, + .parent = TYPE_ACCEL, + .class_init = kvm_accel_class_init, + .instance_size = sizeof(KVMState), +}; + +static void kvm_type_init(void) +{ + type_register_static(&kvm_accel_type); +} + +type_init(kvm_type_init); diff --git a/accel/kvm/trace-events b/accel/kvm/trace-events new file mode 100644 index 0000000000..f89ba5578d --- /dev/null +++ b/accel/kvm/trace-events @@ -0,0 +1,15 @@ +# Trace events for debugging and performance instrumentation + +# kvm-all.c +kvm_ioctl(int type, void *arg) "type 0x%x, arg %p" +kvm_vm_ioctl(int type, void *arg) "type 0x%x, arg %p" +kvm_vcpu_ioctl(int cpu_index, int type, void *arg) "cpu_index %d, type 0x%x, arg %p" +kvm_run_exit(int cpu_index, uint32_t reason) "cpu_index %d, reason %d" +kvm_device_ioctl(int fd, int type, void *arg) "dev fd %d, type 0x%x, arg %p" +kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s" +kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s" +kvm_irqchip_commit_routes(void) "" +kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s vector %d virq %d" +kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d" +kvm_irqchip_release_virq(int virq) "virq %d" + diff --git a/accel/stubs/Makefile.objs b/accel/stubs/Makefile.objs new file mode 100644 index 0000000000..bd5794f222 --- /dev/null +++ b/accel/stubs/Makefile.objs @@ -0,0 +1 @@ +obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c new file mode 100644 index 0000000000..ef0c7346af --- /dev/null +++ b/accel/stubs/kvm-stub.c @@ -0,0 +1,158 @@ +/* + * QEMU KVM stub + * + * Copyright Red Hat, Inc. 2010 + * + * Author: Paolo Bonzini <pbonzini@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "cpu.h" +#include "sysemu/kvm.h" + +#ifndef CONFIG_USER_ONLY +#include "hw/pci/msi.h" +#endif + +KVMState *kvm_state; +bool kvm_kernel_irqchip; +bool kvm_async_interrupts_allowed; +bool kvm_eventfds_allowed; +bool kvm_irqfds_allowed; +bool kvm_resamplefds_allowed; +bool kvm_msi_via_irqfd_allowed; +bool kvm_gsi_routing_allowed; +bool kvm_gsi_direct_mapping; +bool kvm_allowed; +bool kvm_readonly_mem_allowed; +bool kvm_ioeventfd_any_length_allowed; +bool kvm_msi_use_devid; + +int kvm_destroy_vcpu(CPUState *cpu) +{ + return -ENOSYS; +} + +int kvm_init_vcpu(CPUState *cpu) +{ + return -ENOSYS; +} + +void kvm_flush_coalesced_mmio_buffer(void) +{ +} + +void kvm_cpu_synchronize_state(CPUState *cpu) +{ +} + +void kvm_cpu_synchronize_post_reset(CPUState *cpu) +{ +} + +void kvm_cpu_synchronize_post_init(CPUState *cpu) +{ +} + +int kvm_cpu_exec(CPUState *cpu) +{ + abort(); +} + +int kvm_has_sync_mmu(void) +{ + return 0; +} + +int kvm_has_many_ioeventfds(void) +{ + return 0; +} + +int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) +{ + return -ENOSYS; +} + +int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, + target_ulong len, int type) +{ + return -EINVAL; +} + +int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr, + target_ulong len, int type) +{ + return -EINVAL; +} + +void kvm_remove_all_breakpoints(CPUState *cpu) +{ +} + +int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) +{ + return 1; +} + +int kvm_on_sigbus(int code, void *addr) +{ + return 1; +} + +#ifndef CONFIG_USER_ONLY +int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev) +{ + return -ENOSYS; +} + +void kvm_init_irq_routing(KVMState *s) +{ +} + +void kvm_irqchip_release_virq(KVMState *s, int virq) +{ +} + +int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, + PCIDevice *dev) +{ + return -ENOSYS; +} + +void kvm_irqchip_commit_routes(KVMState *s) +{ +} + +int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter) +{ + return -ENOSYS; +} + +int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, + EventNotifier *rn, int virq) +{ + return -ENOSYS; +} + +int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, + int virq) +{ + return -ENOSYS; +} + +bool kvm_has_free_slot(MachineState *ms) +{ + return false; +} + +void kvm_init_cpu_signals(CPUState *cpu) +{ + abort(); +} +#endif diff --git a/accel/tcg/Makefile.objs b/accel/tcg/Makefile.objs new file mode 100644 index 0000000000..f173cd5397 --- /dev/null +++ b/accel/tcg/Makefile.objs @@ -0,0 +1,3 @@ +obj-$(CONFIG_SOFTMMU) += tcg-all.o +obj-$(CONFIG_SOFTMMU) += cputlb.o +obj-y += cpu-exec.o cpu-exec-common.o translate-all.o translate-common.o diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c new file mode 100644 index 0000000000..e81da276bb --- /dev/null +++ b/accel/tcg/cpu-exec-common.c @@ -0,0 +1,82 @@ +/* + * emulator main execution loop + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "sysemu/cpus.h" +#include "exec/exec-all.h" +#include "exec/memory-internal.h" + +/* exit the current TB, but without causing any exception to be raised */ +void cpu_loop_exit_noexc(CPUState *cpu) +{ + /* XXX: restore cpu registers saved in host registers */ + + cpu->exception_index = -1; + siglongjmp(cpu->jmp_env, 1); +} + +#if defined(CONFIG_SOFTMMU) +void cpu_reloading_memory_map(void) +{ + if (qemu_in_vcpu_thread() && current_cpu->running) { + /* The guest can in theory prolong the RCU critical section as long + * as it feels like. The major problem with this is that because it + * can do multiple reconfigurations of the memory map within the + * critical section, we could potentially accumulate an unbounded + * collection of memory data structures awaiting reclamation. + * + * Because the only thing we're currently protecting with RCU is the + * memory data structures, it's sufficient to break the critical section + * in this callback, which we know will get called every time the + * memory map is rearranged. + * + * (If we add anything else in the system that uses RCU to protect + * its data structures, we will need to implement some other mechanism + * to force TCG CPUs to exit the critical section, at which point this + * part of this callback might become unnecessary.) + * + * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which + * only protects cpu->as->dispatch. Since we know our caller is about + * to reload it, it's safe to split the critical section. + */ + rcu_read_unlock(); + rcu_read_lock(); + } +} +#endif + +void cpu_loop_exit(CPUState *cpu) +{ + siglongjmp(cpu->jmp_env, 1); +} + +void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) +{ + if (pc) { + cpu_restore_state(cpu, pc); + } + siglongjmp(cpu->jmp_env, 1); +} + +void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc) +{ + cpu->exception_index = EXCP_ATOMIC; + cpu_loop_exit_restore(cpu, pc); +} diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c new file mode 100644 index 0000000000..3581618bc0 --- /dev/null +++ b/accel/tcg/cpu-exec.c @@ -0,0 +1,683 @@ +/* + * emulator main execution loop + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "trace.h" +#include "disas/disas.h" +#include "exec/exec-all.h" +#include "tcg.h" +#include "qemu/atomic.h" +#include "sysemu/qtest.h" +#include "qemu/timer.h" +#include "exec/address-spaces.h" +#include "qemu/rcu.h" +#include "exec/tb-hash.h" +#include "exec/log.h" +#include "qemu/main-loop.h" +#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) +#include "hw/i386/apic.h" +#endif +#include "sysemu/cpus.h" +#include "sysemu/replay.h" + +/* -icount align implementation. */ + +typedef struct SyncClocks { + int64_t diff_clk; + int64_t last_cpu_icount; + int64_t realtime_clock; +} SyncClocks; + +#if !defined(CONFIG_USER_ONLY) +/* Allow the guest to have a max 3ms advance. + * The difference between the 2 clocks could therefore + * oscillate around 0. + */ +#define VM_CLOCK_ADVANCE 3000000 +#define THRESHOLD_REDUCE 1.5 +#define MAX_DELAY_PRINT_RATE 2000000000LL +#define MAX_NB_PRINTS 100 + +static void align_clocks(SyncClocks *sc, const CPUState *cpu) +{ + int64_t cpu_icount; + + if (!icount_align_option) { + return; + } + + cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low; + sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount); + sc->last_cpu_icount = cpu_icount; + + if (sc->diff_clk > VM_CLOCK_ADVANCE) { +#ifndef _WIN32 + struct timespec sleep_delay, rem_delay; + sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; + sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; + if (nanosleep(&sleep_delay, &rem_delay) < 0) { + sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; + } else { + sc->diff_clk = 0; + } +#else + Sleep(sc->diff_clk / SCALE_MS); + sc->diff_clk = 0; +#endif + } +} + +static void print_delay(const SyncClocks *sc) +{ + static float threshold_delay; + static int64_t last_realtime_clock; + static int nb_prints; + + if (icount_align_option && + sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && + nb_prints < MAX_NB_PRINTS) { + if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || + (-sc->diff_clk / (float)1000000000LL < + (threshold_delay - THRESHOLD_REDUCE))) { + threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; + printf("Warning: The guest is now late by %.1f to %.1f seconds\n", + threshold_delay - 1, + threshold_delay); + nb_prints++; + last_realtime_clock = sc->realtime_clock; + } + } +} + +static void init_delay_params(SyncClocks *sc, + const CPUState *cpu) +{ + if (!icount_align_option) { + return; + } + sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); + sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; + sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low; + if (sc->diff_clk < max_delay) { + max_delay = sc->diff_clk; + } + if (sc->diff_clk > max_advance) { + max_advance = sc->diff_clk; + } + + /* Print every 2s max if the guest is late. We limit the number + of printed messages to NB_PRINT_MAX(currently 100) */ + print_delay(sc); +} +#else +static void align_clocks(SyncClocks *sc, const CPUState *cpu) +{ +} + +static void init_delay_params(SyncClocks *sc, const CPUState *cpu) +{ +} +#endif /* CONFIG USER ONLY */ + +/* Execute a TB, and fix up the CPU state afterwards if necessary */ +static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb) +{ + CPUArchState *env = cpu->env_ptr; + uintptr_t ret; + TranslationBlock *last_tb; + int tb_exit; + uint8_t *tb_ptr = itb->tc_ptr; + + qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc, + "Trace %p [%d: " TARGET_FMT_lx "] %s\n", + itb->tc_ptr, cpu->cpu_index, itb->pc, + lookup_symbol(itb->pc)); + +#if defined(DEBUG_DISAS) + if (qemu_loglevel_mask(CPU_LOG_TB_CPU) + && qemu_log_in_addr_range(itb->pc)) { + qemu_log_lock(); +#if defined(TARGET_I386) + log_cpu_state(cpu, CPU_DUMP_CCOP); +#else + log_cpu_state(cpu, 0); +#endif + qemu_log_unlock(); + } +#endif /* DEBUG_DISAS */ + + cpu->can_do_io = !use_icount; + ret = tcg_qemu_tb_exec(env, tb_ptr); + cpu->can_do_io = 1; + last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); + tb_exit = ret & TB_EXIT_MASK; + trace_exec_tb_exit(last_tb, tb_exit); + + if (tb_exit > TB_EXIT_IDX1) { + /* We didn't start executing this TB (eg because the instruction + * counter hit zero); we must restore the guest PC to the address + * of the start of the TB. + */ + CPUClass *cc = CPU_GET_CLASS(cpu); + qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc, + "Stopped execution of TB chain before %p [" + TARGET_FMT_lx "] %s\n", + last_tb->tc_ptr, last_tb->pc, + lookup_symbol(last_tb->pc)); + if (cc->synchronize_from_tb) { + cc->synchronize_from_tb(cpu, last_tb); + } else { + assert(cc->set_pc); + cc->set_pc(cpu, last_tb->pc); + } + } + return ret; +} + +#ifndef CONFIG_USER_ONLY +/* Execute the code without caching the generated code. An interpreter + could be used if available. */ +static void cpu_exec_nocache(CPUState *cpu, int max_cycles, + TranslationBlock *orig_tb, bool ignore_icount) +{ + TranslationBlock *tb; + + /* Should never happen. + We only end up here when an existing TB is too long. */ + if (max_cycles > CF_COUNT_MASK) + max_cycles = CF_COUNT_MASK; + + tb_lock(); + tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, + max_cycles | CF_NOCACHE + | (ignore_icount ? CF_IGNORE_ICOUNT : 0)); + tb->orig_tb = orig_tb; + tb_unlock(); + + /* execute the generated code */ + trace_exec_tb_nocache(tb, tb->pc); + cpu_tb_exec(cpu, tb); + + tb_lock(); + tb_phys_invalidate(tb, -1); + tb_free(tb); + tb_unlock(); +} +#endif + +static void cpu_exec_step(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + CPUArchState *env = (CPUArchState *)cpu->env_ptr; + TranslationBlock *tb; + target_ulong cs_base, pc; + uint32_t flags; + + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + if (sigsetjmp(cpu->jmp_env, 0) == 0) { + mmap_lock(); + tb_lock(); + tb = tb_gen_code(cpu, pc, cs_base, flags, + 1 | CF_NOCACHE | CF_IGNORE_ICOUNT); + tb->orig_tb = NULL; + tb_unlock(); + mmap_unlock(); + + cc->cpu_exec_enter(cpu); + /* execute the generated code */ + trace_exec_tb_nocache(tb, pc); + cpu_tb_exec(cpu, tb); + cc->cpu_exec_exit(cpu); + + tb_lock(); + tb_phys_invalidate(tb, -1); + tb_free(tb); + tb_unlock(); + } else { + /* We may have exited due to another problem here, so we need + * to reset any tb_locks we may have taken but didn't release. + * The mmap_lock is dropped by tb_gen_code if it runs out of + * memory. + */ +#ifndef CONFIG_SOFTMMU + tcg_debug_assert(!have_mmap_lock()); +#endif + tb_lock_reset(); + } +} + +void cpu_exec_step_atomic(CPUState *cpu) +{ + start_exclusive(); + + /* Since we got here, we know that parallel_cpus must be true. */ + parallel_cpus = false; + cpu_exec_step(cpu); + parallel_cpus = true; + + end_exclusive(); +} + +struct tb_desc { + target_ulong pc; + target_ulong cs_base; + CPUArchState *env; + tb_page_addr_t phys_page1; + uint32_t flags; +}; + +static bool tb_cmp(const void *p, const void *d) +{ + const TranslationBlock *tb = p; + const struct tb_desc *desc = d; + + if (tb->pc == desc->pc && + tb->page_addr[0] == desc->phys_page1 && + tb->cs_base == desc->cs_base && + tb->flags == desc->flags && + !atomic_read(&tb->invalid)) { + /* check next page if needed */ + if (tb->page_addr[1] == -1) { + return true; + } else { + tb_page_addr_t phys_page2; + target_ulong virt_page2; + + virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + phys_page2 = get_page_addr_code(desc->env, virt_page2); + if (tb->page_addr[1] == phys_page2) { + return true; + } + } + } + return false; +} + +TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, + target_ulong cs_base, uint32_t flags) +{ + tb_page_addr_t phys_pc; + struct tb_desc desc; + uint32_t h; + + desc.env = (CPUArchState *)cpu->env_ptr; + desc.cs_base = cs_base; + desc.flags = flags; + desc.pc = pc; + phys_pc = get_page_addr_code(desc.env, pc); + desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; + h = tb_hash_func(phys_pc, pc, flags); + return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h); +} + +static inline TranslationBlock *tb_find(CPUState *cpu, + TranslationBlock *last_tb, + int tb_exit) +{ + CPUArchState *env = (CPUArchState *)cpu->env_ptr; + TranslationBlock *tb; + target_ulong cs_base, pc; + uint32_t flags; + bool have_tb_lock = false; + + /* we record a subset of the CPU state. It will + always be the same before a given translated block + is executed. */ + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]); + if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || + tb->flags != flags)) { + tb = tb_htable_lookup(cpu, pc, cs_base, flags); + if (!tb) { + + /* mmap_lock is needed by tb_gen_code, and mmap_lock must be + * taken outside tb_lock. As system emulation is currently + * single threaded the locks are NOPs. + */ + mmap_lock(); + tb_lock(); + have_tb_lock = true; + + /* There's a chance that our desired tb has been translated while + * taking the locks so we check again inside the lock. + */ + tb = tb_htable_lookup(cpu, pc, cs_base, flags); + if (!tb) { + /* if no translated code available, then translate it now */ + tb = tb_gen_code(cpu, pc, cs_base, flags, 0); + } + + mmap_unlock(); + } + + /* We add the TB in the virtual pc hash table for the fast lookup */ + atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); + } +#ifndef CONFIG_USER_ONLY + /* We don't take care of direct jumps when address mapping changes in + * system emulation. So it's not safe to make a direct jump to a TB + * spanning two pages because the mapping for the second page can change. + */ + if (tb->page_addr[1] != -1) { + last_tb = NULL; + } +#endif + /* See if we can patch the calling TB. */ + if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { + if (!have_tb_lock) { + tb_lock(); + have_tb_lock = true; + } + if (!tb->invalid) { + tb_add_jump(last_tb, tb_exit, tb); + } + } + if (have_tb_lock) { + tb_unlock(); + } + return tb; +} + +static inline bool cpu_handle_halt(CPUState *cpu) +{ + if (cpu->halted) { +#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) + if ((cpu->interrupt_request & CPU_INTERRUPT_POLL) + && replay_interrupt()) { + X86CPU *x86_cpu = X86_CPU(cpu); + qemu_mutex_lock_iothread(); + apic_poll_irq(x86_cpu->apic_state); + cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); + qemu_mutex_unlock_iothread(); + } +#endif + if (!cpu_has_work(cpu)) { + return true; + } + + cpu->halted = 0; + } + + return false; +} + +static inline void cpu_handle_debug_exception(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + CPUWatchpoint *wp; + + if (!cpu->watchpoint_hit) { + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + wp->flags &= ~BP_WATCHPOINT_HIT; + } + } + + cc->debug_excp_handler(cpu); +} + +static inline bool cpu_handle_exception(CPUState *cpu, int *ret) +{ + if (cpu->exception_index >= 0) { + if (cpu->exception_index >= EXCP_INTERRUPT) { + /* exit request from the cpu execution loop */ + *ret = cpu->exception_index; + if (*ret == EXCP_DEBUG) { + cpu_handle_debug_exception(cpu); + } + cpu->exception_index = -1; + return true; + } else { +#if defined(CONFIG_USER_ONLY) + /* if user mode only, we simulate a fake exception + which will be handled outside the cpu execution + loop */ +#if defined(TARGET_I386) + CPUClass *cc = CPU_GET_CLASS(cpu); + cc->do_interrupt(cpu); +#endif + *ret = cpu->exception_index; + cpu->exception_index = -1; + return true; +#else + if (replay_exception()) { + CPUClass *cc = CPU_GET_CLASS(cpu); + qemu_mutex_lock_iothread(); + cc->do_interrupt(cpu); + qemu_mutex_unlock_iothread(); + cpu->exception_index = -1; + } else if (!replay_has_interrupt()) { + /* give a chance to iothread in replay mode */ + *ret = EXCP_INTERRUPT; + return true; + } +#endif + } +#ifndef CONFIG_USER_ONLY + } else if (replay_has_exception() + && cpu->icount_decr.u16.low + cpu->icount_extra == 0) { + /* try to cause an exception pending in the log */ + cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true); + *ret = -1; + return true; +#endif + } + + return false; +} + +static inline bool cpu_handle_interrupt(CPUState *cpu, + TranslationBlock **last_tb) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (unlikely(atomic_read(&cpu->interrupt_request))) { + int interrupt_request; + qemu_mutex_lock_iothread(); + interrupt_request = cpu->interrupt_request; + if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { + /* Mask out external interrupts for this step. */ + interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; + } + if (interrupt_request & CPU_INTERRUPT_DEBUG) { + cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; + cpu->exception_index = EXCP_DEBUG; + qemu_mutex_unlock_iothread(); + return true; + } + if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { + /* Do nothing */ + } else if (interrupt_request & CPU_INTERRUPT_HALT) { + replay_interrupt(); + cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; + cpu->halted = 1; + cpu->exception_index = EXCP_HLT; + qemu_mutex_unlock_iothread(); + return true; + } +#if defined(TARGET_I386) + else if (interrupt_request & CPU_INTERRUPT_INIT) { + X86CPU *x86_cpu = X86_CPU(cpu); + CPUArchState *env = &x86_cpu->env; + replay_interrupt(); + cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); + do_cpu_init(x86_cpu); + cpu->exception_index = EXCP_HALTED; + qemu_mutex_unlock_iothread(); + return true; + } +#else + else if (interrupt_request & CPU_INTERRUPT_RESET) { + replay_interrupt(); + cpu_reset(cpu); + qemu_mutex_unlock_iothread(); + return true; + } +#endif + /* The target hook has 3 exit conditions: + False when the interrupt isn't processed, + True when it is, and we should restart on a new TB, + and via longjmp via cpu_loop_exit. */ + else { + if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { + replay_interrupt(); + *last_tb = NULL; + } + /* The target hook may have updated the 'cpu->interrupt_request'; + * reload the 'interrupt_request' value */ + interrupt_request = cpu->interrupt_request; + } + if (interrupt_request & CPU_INTERRUPT_EXITTB) { + cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; + /* ensure that no TB jump will be modified as + the program flow was changed */ + *last_tb = NULL; + } + + /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ + qemu_mutex_unlock_iothread(); + } + + /* Finally, check if we need to exit to the main loop. */ + if (unlikely(atomic_read(&cpu->exit_request) + || (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) { + atomic_set(&cpu->exit_request, 0); + cpu->exception_index = EXCP_INTERRUPT; + return true; + } + + return false; +} + +static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, + TranslationBlock **last_tb, int *tb_exit) +{ + uintptr_t ret; + int32_t insns_left; + + trace_exec_tb(tb, tb->pc); + ret = cpu_tb_exec(cpu, tb); + tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); + *tb_exit = ret & TB_EXIT_MASK; + if (*tb_exit != TB_EXIT_REQUESTED) { + *last_tb = tb; + return; + } + + *last_tb = NULL; + insns_left = atomic_read(&cpu->icount_decr.u32); + atomic_set(&cpu->icount_decr.u16.high, 0); + if (insns_left < 0) { + /* Something asked us to stop executing chained TBs; just + * continue round the main loop. Whatever requested the exit + * will also have set something else (eg exit_request or + * interrupt_request) which we will handle next time around + * the loop. But we need to ensure the zeroing of icount_decr + * comes before the next read of cpu->exit_request + * or cpu->interrupt_request. + */ + smp_mb(); + return; + } + + /* Instruction counter expired. */ + assert(use_icount); +#ifndef CONFIG_USER_ONLY + /* Ensure global icount has gone forward */ + cpu_update_icount(cpu); + /* Refill decrementer and continue execution. */ + insns_left = MIN(0xffff, cpu->icount_budget); + cpu->icount_decr.u16.low = insns_left; + cpu->icount_extra = cpu->icount_budget - insns_left; + if (!cpu->icount_extra) { + /* Execute any remaining instructions, then let the main loop + * handle the next event. + */ + if (insns_left > 0) { + cpu_exec_nocache(cpu, insns_left, tb, false); + } + } +#endif +} + +/* main execution loop */ + +int cpu_exec(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + int ret; + SyncClocks sc = { 0 }; + + /* replay_interrupt may need current_cpu */ + current_cpu = cpu; + + if (cpu_handle_halt(cpu)) { + return EXCP_HALTED; + } + + rcu_read_lock(); + + cc->cpu_exec_enter(cpu); + + /* Calculate difference between guest clock and host clock. + * This delay includes the delay of the last cycle, so + * what we have to do is sleep until it is 0. As for the + * advance/delay we gain here, we try to fix it next time. + */ + init_delay_params(&sc, cpu); + + /* prepare setjmp context for exception handling */ + if (sigsetjmp(cpu->jmp_env, 0) != 0) { +#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6) + /* Some compilers wrongly smash all local variables after + * siglongjmp. There were bug reports for gcc 4.5.0 and clang. + * Reload essential local variables here for those compilers. + * Newer versions of gcc would complain about this code (-Wclobbered). */ + cpu = current_cpu; + cc = CPU_GET_CLASS(cpu); +#else /* buggy compiler */ + /* Assert that the compiler does not smash local variables. */ + g_assert(cpu == current_cpu); + g_assert(cc == CPU_GET_CLASS(cpu)); +#endif /* buggy compiler */ + cpu->can_do_io = 1; + tb_lock_reset(); + if (qemu_mutex_iothread_locked()) { + qemu_mutex_unlock_iothread(); + } + } + + /* if an exception is pending, we execute it here */ + while (!cpu_handle_exception(cpu, &ret)) { + TranslationBlock *last_tb = NULL; + int tb_exit = 0; + + while (!cpu_handle_interrupt(cpu, &last_tb)) { + TranslationBlock *tb = tb_find(cpu, last_tb, tb_exit); + cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); + /* Try to align the host and virtual clocks + if the guest is in advance */ + align_clocks(&sc, cpu); + } + } + + cc->cpu_exec_exit(cpu); + rcu_read_unlock(); + + return ret; +} diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c new file mode 100644 index 0000000000..743776ae19 --- /dev/null +++ b/accel/tcg/cputlb.c @@ -0,0 +1,1051 @@ +/* + * Common CPU TLB handling + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" +#include "exec/cpu_ldst.h" +#include "exec/cputlb.h" +#include "exec/memory-internal.h" +#include "exec/ram_addr.h" +#include "tcg/tcg.h" +#include "qemu/error-report.h" +#include "exec/log.h" +#include "exec/helper-proto.h" +#include "qemu/atomic.h" + +/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ +/* #define DEBUG_TLB */ +/* #define DEBUG_TLB_LOG */ + +#ifdef DEBUG_TLB +# define DEBUG_TLB_GATE 1 +# ifdef DEBUG_TLB_LOG +# define DEBUG_TLB_LOG_GATE 1 +# else +# define DEBUG_TLB_LOG_GATE 0 +# endif +#else +# define DEBUG_TLB_GATE 0 +# define DEBUG_TLB_LOG_GATE 0 +#endif + +#define tlb_debug(fmt, ...) do { \ + if (DEBUG_TLB_LOG_GATE) { \ + qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ + ## __VA_ARGS__); \ + } else if (DEBUG_TLB_GATE) { \ + fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ + } \ +} while (0) + +#define assert_cpu_is_self(this_cpu) do { \ + if (DEBUG_TLB_GATE) { \ + g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \ + } \ + } while (0) + +/* run_on_cpu_data.target_ptr should always be big enough for a + * target_ulong even on 32 bit builds */ +QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); + +/* We currently can't handle more than 16 bits in the MMUIDX bitmask. + */ +QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); +#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) + +/* flush_all_helper: run fn across all cpus + * + * If the wait flag is set then the src cpu's helper will be queued as + * "safe" work and the loop exited creating a synchronisation point + * where all queued work will be finished before execution starts + * again. + */ +static void flush_all_helper(CPUState *src, run_on_cpu_func fn, + run_on_cpu_data d) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + if (cpu != src) { + async_run_on_cpu(cpu, fn, d); + } + } +} + +/* statistics */ +int tlb_flush_count; + +/* This is OK because CPU architectures generally permit an + * implementation to drop entries from the TLB at any time, so + * flushing more entries than required is only an efficiency issue, + * not a correctness issue. + */ +static void tlb_flush_nocheck(CPUState *cpu) +{ + CPUArchState *env = cpu->env_ptr; + + /* The QOM tests will trigger tlb_flushes without setting up TCG + * so we bug out here in that case. + */ + if (!tcg_enabled()) { + return; + } + + assert_cpu_is_self(cpu); + tlb_debug("(count: %d)\n", tlb_flush_count++); + + tb_lock(); + + memset(env->tlb_table, -1, sizeof(env->tlb_table)); + memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table)); + memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); + + env->vtlb_index = 0; + env->tlb_flush_addr = -1; + env->tlb_flush_mask = 0; + + tb_unlock(); + + atomic_mb_set(&cpu->pending_tlb_flush, 0); +} + +static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data) +{ + tlb_flush_nocheck(cpu); +} + +void tlb_flush(CPUState *cpu) +{ + if (cpu->created && !qemu_cpu_is_self(cpu)) { + if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) { + atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS); + async_run_on_cpu(cpu, tlb_flush_global_async_work, + RUN_ON_CPU_NULL); + } + } else { + tlb_flush_nocheck(cpu); + } +} + +void tlb_flush_all_cpus(CPUState *src_cpu) +{ + const run_on_cpu_func fn = tlb_flush_global_async_work; + flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL); + fn(src_cpu, RUN_ON_CPU_NULL); +} + +void tlb_flush_all_cpus_synced(CPUState *src_cpu) +{ + const run_on_cpu_func fn = tlb_flush_global_async_work; + flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL); + async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL); +} + +static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) +{ + CPUArchState *env = cpu->env_ptr; + unsigned long mmu_idx_bitmask = data.host_int; + int mmu_idx; + + assert_cpu_is_self(cpu); + + tb_lock(); + + tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask); + + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + + if (test_bit(mmu_idx, &mmu_idx_bitmask)) { + tlb_debug("%d\n", mmu_idx); + + memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0])); + memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); + } + } + + memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); + + tlb_debug("done\n"); + + tb_unlock(); +} + +void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) +{ + tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); + + if (!qemu_cpu_is_self(cpu)) { + uint16_t pending_flushes = idxmap; + pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush); + + if (pending_flushes) { + tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes); + + atomic_or(&cpu->pending_tlb_flush, pending_flushes); + async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, + RUN_ON_CPU_HOST_INT(pending_flushes)); + } + } else { + tlb_flush_by_mmuidx_async_work(cpu, + RUN_ON_CPU_HOST_INT(idxmap)); + } +} + +void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) +{ + const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; + + tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); + + flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); + fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); +} + +void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, + uint16_t idxmap) +{ + const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; + + tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); + + flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); + async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); +} + + + +static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) +{ + if (addr == (tlb_entry->addr_read & + (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || + addr == (tlb_entry->addr_write & + (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || + addr == (tlb_entry->addr_code & + (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + memset(tlb_entry, -1, sizeof(*tlb_entry)); + } +} + +static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data) +{ + CPUArchState *env = cpu->env_ptr; + target_ulong addr = (target_ulong) data.target_ptr; + int i; + int mmu_idx; + + assert_cpu_is_self(cpu); + + tlb_debug("page :" TARGET_FMT_lx "\n", addr); + + /* Check if we need to flush due to large pages. */ + if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { + tlb_debug("forcing full flush (" + TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", + env->tlb_flush_addr, env->tlb_flush_mask); + + tlb_flush(cpu); + return; + } + + addr &= TARGET_PAGE_MASK; + i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); + } + + /* check whether there are entries that need to be flushed in the vtlb */ + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + int k; + for (k = 0; k < CPU_VTLB_SIZE; k++) { + tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr); + } + } + + tb_flush_jmp_cache(cpu, addr); +} + +void tlb_flush_page(CPUState *cpu, target_ulong addr) +{ + tlb_debug("page :" TARGET_FMT_lx "\n", addr); + + if (!qemu_cpu_is_self(cpu)) { + async_run_on_cpu(cpu, tlb_flush_page_async_work, + RUN_ON_CPU_TARGET_PTR(addr)); + } else { + tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr)); + } +} + +/* As we are going to hijack the bottom bits of the page address for a + * mmuidx bit mask we need to fail to build if we can't do that + */ +QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN); + +static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu, + run_on_cpu_data data) +{ + CPUArchState *env = cpu->env_ptr; + target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; + target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; + unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; + int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + int mmu_idx; + int i; + + assert_cpu_is_self(cpu); + + tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n", + page, addr, mmu_idx_bitmap); + + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + if (test_bit(mmu_idx, &mmu_idx_bitmap)) { + tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr); + + /* check whether there are vltb entries that need to be flushed */ + for (i = 0; i < CPU_VTLB_SIZE; i++) { + tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr); + } + } + } + + tb_flush_jmp_cache(cpu, addr); +} + +static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu, + run_on_cpu_data data) +{ + CPUArchState *env = cpu->env_ptr; + target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; + target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; + unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; + + tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap); + + /* Check if we need to flush due to large pages. */ + if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { + tlb_debug("forced full flush (" + TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", + env->tlb_flush_addr, env->tlb_flush_mask); + + tlb_flush_by_mmuidx_async_work(cpu, + RUN_ON_CPU_HOST_INT(mmu_idx_bitmap)); + } else { + tlb_flush_page_by_mmuidx_async_work(cpu, data); + } +} + +void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) +{ + target_ulong addr_and_mmu_idx; + + tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); + + /* This should already be page aligned */ + addr_and_mmu_idx = addr & TARGET_PAGE_MASK; + addr_and_mmu_idx |= idxmap; + + if (!qemu_cpu_is_self(cpu)) { + async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work, + RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); + } else { + tlb_check_page_and_flush_by_mmuidx_async_work( + cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); + } +} + +void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, + uint16_t idxmap) +{ + const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work; + target_ulong addr_and_mmu_idx; + + tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); + + /* This should already be page aligned */ + addr_and_mmu_idx = addr & TARGET_PAGE_MASK; + addr_and_mmu_idx |= idxmap; + + flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); + fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); +} + +void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, + target_ulong addr, + uint16_t idxmap) +{ + const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work; + target_ulong addr_and_mmu_idx; + + tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); + + /* This should already be page aligned */ + addr_and_mmu_idx = addr & TARGET_PAGE_MASK; + addr_and_mmu_idx |= idxmap; + + flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); + async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); +} + +void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) +{ + const run_on_cpu_func fn = tlb_flush_page_async_work; + + flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); + fn(src, RUN_ON_CPU_TARGET_PTR(addr)); +} + +void tlb_flush_page_all_cpus_synced(CPUState *src, + target_ulong addr) +{ + const run_on_cpu_func fn = tlb_flush_page_async_work; + + flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); + async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); +} + +/* update the TLBs so that writes to code in the virtual page 'addr' + can be detected */ +void tlb_protect_code(ram_addr_t ram_addr) +{ + cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, + DIRTY_MEMORY_CODE); +} + +/* update the TLB so that writes in physical page 'phys_addr' are no longer + tested for self modifying code */ +void tlb_unprotect_code(ram_addr_t ram_addr) +{ + cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); +} + + +/* + * Dirty write flag handling + * + * When the TCG code writes to a location it looks up the address in + * the TLB and uses that data to compute the final address. If any of + * the lower bits of the address are set then the slow path is forced. + * There are a number of reasons to do this but for normal RAM the + * most usual is detecting writes to code regions which may invalidate + * generated code. + * + * Because we want other vCPUs to respond to changes straight away we + * update the te->addr_write field atomically. If the TLB entry has + * been changed by the vCPU in the mean time we skip the update. + * + * As this function uses atomic accesses we also need to ensure + * updates to tlb_entries follow the same access rules. We don't need + * to worry about this for oversized guests as MTTCG is disabled for + * them. + */ + +static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start, + uintptr_t length) +{ +#if TCG_OVERSIZED_GUEST + uintptr_t addr = tlb_entry->addr_write; + + if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { + addr &= TARGET_PAGE_MASK; + addr += tlb_entry->addend; + if ((addr - start) < length) { + tlb_entry->addr_write |= TLB_NOTDIRTY; + } + } +#else + /* paired with atomic_mb_set in tlb_set_page_with_attrs */ + uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write); + uintptr_t addr = orig_addr; + + if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { + addr &= TARGET_PAGE_MASK; + addr += atomic_read(&tlb_entry->addend); + if ((addr - start) < length) { + uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY; + atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr); + } + } +#endif +} + +/* For atomic correctness when running MTTCG we need to use the right + * primitives when copying entries */ +static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s, + bool atomic_set) +{ +#if TCG_OVERSIZED_GUEST + *d = *s; +#else + if (atomic_set) { + d->addr_read = s->addr_read; + d->addr_code = s->addr_code; + atomic_set(&d->addend, atomic_read(&s->addend)); + /* Pairs with flag setting in tlb_reset_dirty_range */ + atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write)); + } else { + d->addr_read = s->addr_read; + d->addr_write = atomic_read(&s->addr_write); + d->addr_code = s->addr_code; + d->addend = atomic_read(&s->addend); + } +#endif +} + +/* This is a cross vCPU call (i.e. another vCPU resetting the flags of + * the target vCPU). As such care needs to be taken that we don't + * dangerously race with another vCPU update. The only thing actually + * updated is the target TLB entry ->addr_write flags. + */ +void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) +{ + CPUArchState *env; + + int mmu_idx; + + env = cpu->env_ptr; + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + unsigned int i; + + for (i = 0; i < CPU_TLB_SIZE; i++) { + tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], + start1, length); + } + + for (i = 0; i < CPU_VTLB_SIZE; i++) { + tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i], + start1, length); + } + } +} + +static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) +{ + if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { + tlb_entry->addr_write = vaddr; + } +} + +/* update the TLB corresponding to virtual page vaddr + so that it is no longer dirty */ +void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) +{ + CPUArchState *env = cpu->env_ptr; + int i; + int mmu_idx; + + assert_cpu_is_self(cpu); + + vaddr &= TARGET_PAGE_MASK; + i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); + } + + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + int k; + for (k = 0; k < CPU_VTLB_SIZE; k++) { + tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr); + } + } +} + +/* Our TLB does not support large pages, so remember the area covered by + large pages and trigger a full TLB flush if these are invalidated. */ +static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, + target_ulong size) +{ + target_ulong mask = ~(size - 1); + + if (env->tlb_flush_addr == (target_ulong)-1) { + env->tlb_flush_addr = vaddr & mask; + env->tlb_flush_mask = mask; + return; + } + /* Extend the existing region to include the new page. + This is a compromise between unnecessary flushes and the cost + of maintaining a full variable size TLB. */ + mask &= env->tlb_flush_mask; + while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { + mask <<= 1; + } + env->tlb_flush_addr &= mask; + env->tlb_flush_mask = mask; +} + +/* Add a new TLB entry. At most one entry for a given virtual address + * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the + * supplied size is only used by tlb_flush_page. + * + * Called from TCG-generated code, which is under an RCU read-side + * critical section. + */ +void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, MemTxAttrs attrs, int prot, + int mmu_idx, target_ulong size) +{ + CPUArchState *env = cpu->env_ptr; + MemoryRegionSection *section; + unsigned int index; + target_ulong address; + target_ulong code_address; + uintptr_t addend; + CPUTLBEntry *te, *tv, tn; + hwaddr iotlb, xlat, sz; + unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE; + int asidx = cpu_asidx_from_attrs(cpu, attrs); + + assert_cpu_is_self(cpu); + assert(size >= TARGET_PAGE_SIZE); + if (size != TARGET_PAGE_SIZE) { + tlb_add_large_page(env, vaddr, size); + } + + sz = size; + section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz); + assert(sz >= TARGET_PAGE_SIZE); + + tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx + " prot=%x idx=%d\n", + vaddr, paddr, prot, mmu_idx); + + address = vaddr; + if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) { + /* IO memory case */ + address |= TLB_MMIO; + addend = 0; + } else { + /* TLB_MMIO for rom/romd handled below */ + addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; + } + + code_address = address; + iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat, + prot, &address); + + index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + te = &env->tlb_table[mmu_idx][index]; + /* do not discard the translation in te, evict it into a victim tlb */ + tv = &env->tlb_v_table[mmu_idx][vidx]; + + /* addr_write can race with tlb_reset_dirty_range */ + copy_tlb_helper(tv, te, true); + + env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; + + /* refill the tlb */ + env->iotlb[mmu_idx][index].addr = iotlb - vaddr; + env->iotlb[mmu_idx][index].attrs = attrs; + + /* Now calculate the new entry */ + tn.addend = addend - vaddr; + if (prot & PAGE_READ) { + tn.addr_read = address; + } else { + tn.addr_read = -1; + } + + if (prot & PAGE_EXEC) { + tn.addr_code = code_address; + } else { + tn.addr_code = -1; + } + + tn.addr_write = -1; + if (prot & PAGE_WRITE) { + if ((memory_region_is_ram(section->mr) && section->readonly) + || memory_region_is_romd(section->mr)) { + /* Write access calls the I/O callback. */ + tn.addr_write = address | TLB_MMIO; + } else if (memory_region_is_ram(section->mr) + && cpu_physical_memory_is_clean( + memory_region_get_ram_addr(section->mr) + xlat)) { + tn.addr_write = address | TLB_NOTDIRTY; + } else { + tn.addr_write = address; + } + } + + /* Pairs with flag setting in tlb_reset_dirty_range */ + copy_tlb_helper(te, &tn, true); + /* atomic_mb_set(&te->addr_write, write_address); */ +} + +/* Add a new TLB entry, but without specifying the memory + * transaction attributes to be used. + */ +void tlb_set_page(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, int prot, + int mmu_idx, target_ulong size) +{ + tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, + prot, mmu_idx, size); +} + +static void report_bad_exec(CPUState *cpu, target_ulong addr) +{ + /* Accidentally executing outside RAM or ROM is quite common for + * several user-error situations, so report it in a way that + * makes it clear that this isn't a QEMU bug and provide suggestions + * about what a user could do to fix things. + */ + error_report("Trying to execute code outside RAM or ROM at 0x" + TARGET_FMT_lx, addr); + error_printf("This usually means one of the following happened:\n\n" + "(1) You told QEMU to execute a kernel for the wrong machine " + "type, and it crashed on startup (eg trying to run a " + "raspberry pi kernel on a versatilepb QEMU machine)\n" + "(2) You didn't give QEMU a kernel or BIOS filename at all, " + "and QEMU executed a ROM full of no-op instructions until " + "it fell off the end\n" + "(3) Your guest kernel has a bug and crashed by jumping " + "off into nowhere\n\n" + "This is almost always one of the first two, so check your " + "command line and that you are using the right type of kernel " + "for this machine.\n" + "If you think option (3) is likely then you can try debugging " + "your guest with the -d debug options; in particular " + "-d guest_errors will cause the log to include a dump of the " + "guest register state at this point.\n\n" + "Execution cannot continue; stopping here.\n\n"); + + /* Report also to the logs, with more detail including register dump */ + qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code " + "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); + log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); +} + +static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) +{ + ram_addr_t ram_addr; + + ram_addr = qemu_ram_addr_from_host(ptr); + if (ram_addr == RAM_ADDR_INVALID) { + error_report("Bad ram pointer %p", ptr); + abort(); + } + return ram_addr; +} + +/* NOTE: this function can trigger an exception */ +/* NOTE2: the returned address is not exactly the physical address: it + * is actually a ram_addr_t (in system mode; the user mode emulation + * version of this function returns a guest virtual address). + */ +tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) +{ + int mmu_idx, page_index, pd; + void *p; + MemoryRegion *mr; + CPUState *cpu = ENV_GET_CPU(env1); + CPUIOTLBEntry *iotlbentry; + + page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + mmu_idx = cpu_mmu_index(env1, true); + if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code != + (addr & TARGET_PAGE_MASK))) { + cpu_ldub_code(env1, addr); + } + iotlbentry = &env1->iotlb[mmu_idx][page_index]; + pd = iotlbentry->addr & ~TARGET_PAGE_MASK; + mr = iotlb_to_region(cpu, pd, iotlbentry->attrs); + if (memory_region_is_unassigned(mr)) { + cpu_unassigned_access(cpu, addr, false, true, 0, 4); + /* The CPU's unassigned access hook might have longjumped out + * with an exception. If it didn't (or there was no hook) then + * we can't proceed further. + */ + report_bad_exec(cpu, addr); + exit(1); + } + p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend); + return qemu_ram_addr_from_host_nofail(p); +} + +static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, + target_ulong addr, uintptr_t retaddr, int size) +{ + CPUState *cpu = ENV_GET_CPU(env); + hwaddr physaddr = iotlbentry->addr; + MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); + uint64_t val; + bool locked = false; + + physaddr = (physaddr & TARGET_PAGE_MASK) + addr; + cpu->mem_io_pc = retaddr; + if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { + cpu_io_recompile(cpu, retaddr); + } + + cpu->mem_io_vaddr = addr; + + if (mr->global_locking) { + qemu_mutex_lock_iothread(); + locked = true; + } + memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs); + if (locked) { + qemu_mutex_unlock_iothread(); + } + + return val; +} + +static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, + uint64_t val, target_ulong addr, + uintptr_t retaddr, int size) +{ + CPUState *cpu = ENV_GET_CPU(env); + hwaddr physaddr = iotlbentry->addr; + MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); + bool locked = false; + + physaddr = (physaddr & TARGET_PAGE_MASK) + addr; + if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { + cpu_io_recompile(cpu, retaddr); + } + cpu->mem_io_vaddr = addr; + cpu->mem_io_pc = retaddr; + + if (mr->global_locking) { + qemu_mutex_lock_iothread(); + locked = true; + } + memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs); + if (locked) { + qemu_mutex_unlock_iothread(); + } +} + +/* Return true if ADDR is present in the victim tlb, and has been copied + back to the main tlb. */ +static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, + size_t elt_ofs, target_ulong page) +{ + size_t vidx; + for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { + CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx]; + target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); + + if (cmp == page) { + /* Found entry in victim tlb, swap tlb and iotlb. */ + CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; + + copy_tlb_helper(&tmptlb, tlb, false); + copy_tlb_helper(tlb, vtlb, true); + copy_tlb_helper(vtlb, &tmptlb, true); + + CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; + CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; + tmpio = *io; *io = *vio; *vio = tmpio; + return true; + } + } + return false; +} + +/* Macro to call the above, with local variables from the use context. */ +#define VICTIM_TLB_HIT(TY, ADDR) \ + victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ + (ADDR) & TARGET_PAGE_MASK) + +/* Probe for whether the specified guest write access is permitted. + * If it is not permitted then an exception will be taken in the same + * way as if this were a real write access (and we will not return). + * Otherwise the function will return, and there will be a valid + * entry in the TLB for this access. + */ +void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, + uintptr_t retaddr) +{ + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + /* TLB entry is for a different page */ + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); + } + } +} + +/* Probe for a read-modify-write atomic operation. Do not allow unaligned + * operations, or io operations to proceed. Return the host address. */ +static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + size_t mmu_idx = get_mmuidx(oi); + size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index]; + target_ulong tlb_addr = tlbe->addr_write; + TCGMemOp mop = get_memop(oi); + int a_bits = get_alignment_bits(mop); + int s_bits = mop & MO_SIZE; + + /* Adjust the given return address. */ + retaddr -= GETPC_ADJ; + + /* Enforce guest required alignment. */ + if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { + /* ??? Maybe indicate atomic op to cpu_unaligned_access */ + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* Enforce qemu required alignment. */ + if (unlikely(addr & ((1 << s_bits) - 1))) { + /* We get here if guest alignment was not requested, + or was not enforced by cpu_unaligned_access above. + We might widen the access and emulate, but for now + mark an exception and exit the cpu loop. */ + goto stop_the_world; + } + + /* Check TLB entry and enforce page permissions. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); + } + tlb_addr = tlbe->addr_write; + } + + /* Check notdirty */ + if (unlikely(tlb_addr & TLB_NOTDIRTY)) { + tlb_set_dirty(ENV_GET_CPU(env), addr); + tlb_addr = tlb_addr & ~TLB_NOTDIRTY; + } + + /* Notice an IO access */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + /* There's really nothing that can be done to + support this apart from stop-the-world. */ + goto stop_the_world; + } + + /* Let the guest notice RMW on a write-only page. */ + if (unlikely(tlbe->addr_read != tlb_addr)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr); + /* Since we don't support reads and writes to different addresses, + and we do have the proper page loaded for write, this shouldn't + ever return. But just in case, handle via stop-the-world. */ + goto stop_the_world; + } + + return (void *)((uintptr_t)addr + tlbe->addend); + + stop_the_world: + cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); +} + +#ifdef TARGET_WORDS_BIGENDIAN +# define TGT_BE(X) (X) +# define TGT_LE(X) BSWAP(X) +#else +# define TGT_BE(X) BSWAP(X) +# define TGT_LE(X) (X) +#endif + +#define MMUSUFFIX _mmu + +#define DATA_SIZE 1 +#include "softmmu_template.h" + +#define DATA_SIZE 2 +#include "softmmu_template.h" + +#define DATA_SIZE 4 +#include "softmmu_template.h" + +#define DATA_SIZE 8 +#include "softmmu_template.h" + +/* First set of helpers allows passing in of OI and RETADDR. This makes + them callable from other helpers. */ + +#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr +#define ATOMIC_NAME(X) \ + HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) +#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) + +#define DATA_SIZE 1 +#include "atomic_template.h" + +#define DATA_SIZE 2 +#include "atomic_template.h" + +#define DATA_SIZE 4 +#include "atomic_template.h" + +#ifdef CONFIG_ATOMIC64 +#define DATA_SIZE 8 +#include "atomic_template.h" +#endif + +#ifdef CONFIG_ATOMIC128 +#define DATA_SIZE 16 +#include "atomic_template.h" +#endif + +/* Second set of helpers are directly callable from TCG as helpers. */ + +#undef EXTRA_ARGS +#undef ATOMIC_NAME +#undef ATOMIC_MMU_LOOKUP +#define EXTRA_ARGS , TCGMemOpIdx oi +#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) +#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) + +#define DATA_SIZE 1 +#include "atomic_template.h" + +#define DATA_SIZE 2 +#include "atomic_template.h" + +#define DATA_SIZE 4 +#include "atomic_template.h" + +#ifdef CONFIG_ATOMIC64 +#define DATA_SIZE 8 +#include "atomic_template.h" +#endif + +/* Code access functions. */ + +#undef MMUSUFFIX +#define MMUSUFFIX _cmmu +#undef GETPC +#define GETPC() ((uintptr_t)0) +#define SOFTMMU_CODE_ACCESS + +#define DATA_SIZE 1 +#include "softmmu_template.h" + +#define DATA_SIZE 2 +#include "softmmu_template.h" + +#define DATA_SIZE 4 +#include "softmmu_template.h" + +#define DATA_SIZE 8 +#include "softmmu_template.h" diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c new file mode 100644 index 0000000000..dba99315e3 --- /dev/null +++ b/accel/tcg/tcg-all.c @@ -0,0 +1,61 @@ +/* + * QEMU System Emulator, accelerator interfaces + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "sysemu/accel.h" +#include "sysemu/sysemu.h" +#include "qom/object.h" + +int tcg_tb_size; +static bool tcg_allowed = true; + +static int tcg_init(MachineState *ms) +{ + tcg_exec_init(tcg_tb_size * 1024 * 1024); + return 0; +} + +static void tcg_accel_class_init(ObjectClass *oc, void *data) +{ + AccelClass *ac = ACCEL_CLASS(oc); + ac->name = "tcg"; + ac->init_machine = tcg_init; + ac->allowed = &tcg_allowed; +} + +#define TYPE_TCG_ACCEL ACCEL_CLASS_NAME("tcg") + +static const TypeInfo tcg_accel_type = { + .name = TYPE_TCG_ACCEL, + .parent = TYPE_ACCEL, + .class_init = tcg_accel_class_init, +}; + +static void register_accel_types(void) +{ + type_register_static(&tcg_accel_type); +} + +type_init(register_accel_types); diff --git a/accel/tcg/trace-events b/accel/tcg/trace-events new file mode 100644 index 0000000000..2de8359670 --- /dev/null +++ b/accel/tcg/trace-events @@ -0,0 +1,10 @@ +# Trace events for debugging and performance instrumentation + +# TCG related tracing (mostly disabled by default) +# cpu-exec.c +disable exec_tb(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR +disable exec_tb_nocache(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR +disable exec_tb_exit(void *last_tb, unsigned int flags) "tb:%p flags=%x" + +# translate-all.c +translate_block(void *tb, uintptr_t pc, uint8_t *tb_code) "tb:%p, pc:0x%"PRIxPTR", tb_code:%p" diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c new file mode 100644 index 0000000000..f6ad46b613 --- /dev/null +++ b/accel/tcg/translate-all.c @@ -0,0 +1,2227 @@ +/* + * Host code generation + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#ifdef _WIN32 +#include <windows.h> +#endif +#include "qemu/osdep.h" + + +#include "qemu-common.h" +#define NO_CPU_IO_DEFS +#include "cpu.h" +#include "trace.h" +#include "disas/disas.h" +#include "exec/exec-all.h" +#include "tcg.h" +#if defined(CONFIG_USER_ONLY) +#include "qemu.h" +#include "exec/exec-all.h" +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +#include <sys/param.h> +#if __FreeBSD_version >= 700104 +#define HAVE_KINFO_GETVMMAP +#define sigqueue sigqueue_freebsd /* avoid redefinition */ +#include <sys/proc.h> +#include <machine/profile.h> +#define _KERNEL +#include <sys/user.h> +#undef _KERNEL +#undef sigqueue +#include <libutil.h> +#endif +#endif +#else +#include "exec/address-spaces.h" +#endif + +#include "exec/cputlb.h" +#include "exec/tb-hash.h" +#include "translate-all.h" +#include "qemu/bitmap.h" +#include "qemu/timer.h" +#include "qemu/main-loop.h" +#include "exec/log.h" +#include "sysemu/cpus.h" + +/* #define DEBUG_TB_INVALIDATE */ +/* #define DEBUG_TB_FLUSH */ +/* make various TB consistency checks */ +/* #define DEBUG_TB_CHECK */ + +#if !defined(CONFIG_USER_ONLY) +/* TB consistency checks only implemented for usermode emulation. */ +#undef DEBUG_TB_CHECK +#endif + +/* Access to the various translations structures need to be serialised via locks + * for consistency. This is automatic for SoftMMU based system + * emulation due to its single threaded nature. In user-mode emulation + * access to the memory related structures are protected with the + * mmap_lock. + */ +#ifdef CONFIG_SOFTMMU +#define assert_memory_lock() tcg_debug_assert(have_tb_lock) +#else +#define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) +#endif + +#define SMC_BITMAP_USE_THRESHOLD 10 + +typedef struct PageDesc { + /* list of TBs intersecting this ram page */ + TranslationBlock *first_tb; +#ifdef CONFIG_SOFTMMU + /* in order to optimize self modifying code, we count the number + of lookups we do to a given page to use a bitmap */ + unsigned int code_write_count; + unsigned long *code_bitmap; +#else + unsigned long flags; +#endif +} PageDesc; + +/* In system mode we want L1_MAP to be based on ram offsets, + while in user mode we want it to be based on virtual addresses. */ +#if !defined(CONFIG_USER_ONLY) +#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS +# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS +#else +# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS +#endif +#else +# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS +#endif + +/* Size of the L2 (and L3, etc) page tables. */ +#define V_L2_BITS 10 +#define V_L2_SIZE (1 << V_L2_BITS) + +uintptr_t qemu_host_page_size; +intptr_t qemu_host_page_mask; + +/* + * L1 Mapping properties + */ +static int v_l1_size; +static int v_l1_shift; +static int v_l2_levels; + +/* The bottom level has pointers to PageDesc, and is indexed by + * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. + */ +#define V_L1_MIN_BITS 4 +#define V_L1_MAX_BITS (V_L2_BITS + 3) +#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) + +static void *l1_map[V_L1_MAX_SIZE]; + +/* code generation context */ +TCGContext tcg_ctx; +bool parallel_cpus; + +/* translation block context */ +__thread int have_tb_lock; + +static void page_table_config_init(void) +{ + uint32_t v_l1_bits; + + assert(TARGET_PAGE_BITS); + /* The bits remaining after N lower levels of page tables. */ + v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS; + if (v_l1_bits < V_L1_MIN_BITS) { + v_l1_bits += V_L2_BITS; + } + + v_l1_size = 1 << v_l1_bits; + v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits; + v_l2_levels = v_l1_shift / V_L2_BITS - 1; + + assert(v_l1_bits <= V_L1_MAX_BITS); + assert(v_l1_shift % V_L2_BITS == 0); + assert(v_l2_levels >= 0); +} + +#define assert_tb_locked() tcg_debug_assert(have_tb_lock) +#define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock) + +void tb_lock(void) +{ + assert_tb_unlocked(); + qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); + have_tb_lock++; +} + +void tb_unlock(void) +{ + assert_tb_locked(); + have_tb_lock--; + qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); +} + +void tb_lock_reset(void) +{ + if (have_tb_lock) { + qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); + have_tb_lock = 0; + } +} + +static TranslationBlock *tb_find_pc(uintptr_t tc_ptr); + +void cpu_gen_init(void) +{ + tcg_context_init(&tcg_ctx); +} + +/* Encode VAL as a signed leb128 sequence at P. + Return P incremented past the encoded value. */ +static uint8_t *encode_sleb128(uint8_t *p, target_long val) +{ + int more, byte; + + do { + byte = val & 0x7f; + val >>= 7; + more = !((val == 0 && (byte & 0x40) == 0) + || (val == -1 && (byte & 0x40) != 0)); + if (more) { + byte |= 0x80; + } + *p++ = byte; + } while (more); + + return p; +} + +/* Decode a signed leb128 sequence at *PP; increment *PP past the + decoded value. Return the decoded value. */ +static target_long decode_sleb128(uint8_t **pp) +{ + uint8_t *p = *pp; + target_long val = 0; + int byte, shift = 0; + + do { + byte = *p++; + val |= (target_ulong)(byte & 0x7f) << shift; + shift += 7; + } while (byte & 0x80); + if (shift < TARGET_LONG_BITS && (byte & 0x40)) { + val |= -(target_ulong)1 << shift; + } + + *pp = p; + return val; +} + +/* Encode the data collected about the instructions while compiling TB. + Place the data at BLOCK, and return the number of bytes consumed. + + The logical table consisits of TARGET_INSN_START_WORDS target_ulong's, + which come from the target's insn_start data, followed by a uintptr_t + which comes from the host pc of the end of the code implementing the insn. + + Each line of the table is encoded as sleb128 deltas from the previous + line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }. + That is, the first column is seeded with the guest pc, the last column + with the host pc, and the middle columns with zeros. */ + +static int encode_search(TranslationBlock *tb, uint8_t *block) +{ + uint8_t *highwater = tcg_ctx.code_gen_highwater; + uint8_t *p = block; + int i, j, n; + + tb->tc_search = block; + + for (i = 0, n = tb->icount; i < n; ++i) { + target_ulong prev; + + for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { + if (i == 0) { + prev = (j == 0 ? tb->pc : 0); + } else { + prev = tcg_ctx.gen_insn_data[i - 1][j]; + } + p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev); + } + prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]); + p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev); + + /* Test for (pending) buffer overflow. The assumption is that any + one row beginning below the high water mark cannot overrun + the buffer completely. Thus we can test for overflow after + encoding a row without having to check during encoding. */ + if (unlikely(p > highwater)) { + return -1; + } + } + + return p - block; +} + +/* The cpu state corresponding to 'searched_pc' is restored. + * Called with tb_lock held. + */ +static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, + uintptr_t searched_pc) +{ + target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; + uintptr_t host_pc = (uintptr_t)tb->tc_ptr; + CPUArchState *env = cpu->env_ptr; + uint8_t *p = tb->tc_search; + int i, j, num_insns = tb->icount; +#ifdef CONFIG_PROFILER + int64_t ti = profile_getclock(); +#endif + + searched_pc -= GETPC_ADJ; + + if (searched_pc < host_pc) { + return -1; + } + + /* Reconstruct the stored insn data while looking for the point at + which the end of the insn exceeds the searched_pc. */ + for (i = 0; i < num_insns; ++i) { + for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { + data[j] += decode_sleb128(&p); + } + host_pc += decode_sleb128(&p); + if (host_pc > searched_pc) { + goto found; + } + } + return -1; + + found: + if (tb->cflags & CF_USE_ICOUNT) { + assert(use_icount); + /* Reset the cycle counter to the start of the block. */ + cpu->icount_decr.u16.low += num_insns; + /* Clear the IO flag. */ + cpu->can_do_io = 0; + } + cpu->icount_decr.u16.low -= i; + restore_state_to_opc(env, tb, data); + +#ifdef CONFIG_PROFILER + tcg_ctx.restore_time += profile_getclock() - ti; + tcg_ctx.restore_count++; +#endif + return 0; +} + +bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr) +{ + TranslationBlock *tb; + bool r = false; + + /* A retaddr of zero is invalid so we really shouldn't have ended + * up here. The target code has likely forgotten to check retaddr + * != 0 before attempting to restore state. We return early to + * avoid blowing up on a recursive tb_lock(). The target must have + * previously survived a failed cpu_restore_state because + * tb_find_pc(0) would have failed anyway. It still should be + * fixed though. + */ + + if (!retaddr) { + return r; + } + + tb_lock(); + tb = tb_find_pc(retaddr); + if (tb) { + cpu_restore_state_from_tb(cpu, tb, retaddr); + if (tb->cflags & CF_NOCACHE) { + /* one-shot translation, invalidate it immediately */ + tb_phys_invalidate(tb, -1); + tb_free(tb); + } + r = true; + } + tb_unlock(); + + return r; +} + +void page_size_init(void) +{ + /* NOTE: we can always suppose that qemu_host_page_size >= + TARGET_PAGE_SIZE */ + qemu_real_host_page_size = getpagesize(); + qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size; + if (qemu_host_page_size == 0) { + qemu_host_page_size = qemu_real_host_page_size; + } + if (qemu_host_page_size < TARGET_PAGE_SIZE) { + qemu_host_page_size = TARGET_PAGE_SIZE; + } + qemu_host_page_mask = -(intptr_t)qemu_host_page_size; +} + +static void page_init(void) +{ + page_size_init(); + page_table_config_init(); + +#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) + { +#ifdef HAVE_KINFO_GETVMMAP + struct kinfo_vmentry *freep; + int i, cnt; + + freep = kinfo_getvmmap(getpid(), &cnt); + if (freep) { + mmap_lock(); + for (i = 0; i < cnt; i++) { + unsigned long startaddr, endaddr; + + startaddr = freep[i].kve_start; + endaddr = freep[i].kve_end; + if (h2g_valid(startaddr)) { + startaddr = h2g(startaddr) & TARGET_PAGE_MASK; + + if (h2g_valid(endaddr)) { + endaddr = h2g(endaddr); + page_set_flags(startaddr, endaddr, PAGE_RESERVED); + } else { +#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS + endaddr = ~0ul; + page_set_flags(startaddr, endaddr, PAGE_RESERVED); +#endif + } + } + } + free(freep); + mmap_unlock(); + } +#else + FILE *f; + + last_brk = (unsigned long)sbrk(0); + + f = fopen("/compat/linux/proc/self/maps", "r"); + if (f) { + mmap_lock(); + + do { + unsigned long startaddr, endaddr; + int n; + + n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); + + if (n == 2 && h2g_valid(startaddr)) { + startaddr = h2g(startaddr) & TARGET_PAGE_MASK; + + if (h2g_valid(endaddr)) { + endaddr = h2g(endaddr); + } else { + endaddr = ~0ul; + } + page_set_flags(startaddr, endaddr, PAGE_RESERVED); + } + } while (!feof(f)); + + fclose(f); + mmap_unlock(); + } +#endif + } +#endif +} + +/* If alloc=1: + * Called with tb_lock held for system emulation. + * Called with mmap_lock held for user-mode emulation. + */ +static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) +{ + PageDesc *pd; + void **lp; + int i; + + if (alloc) { + assert_memory_lock(); + } + + /* Level 1. Always allocated. */ + lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1)); + + /* Level 2..N-1. */ + for (i = v_l2_levels; i > 0; i--) { + void **p = atomic_rcu_read(lp); + + if (p == NULL) { + if (!alloc) { + return NULL; + } + p = g_new0(void *, V_L2_SIZE); + atomic_rcu_set(lp, p); + } + + lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); + } + + pd = atomic_rcu_read(lp); + if (pd == NULL) { + if (!alloc) { + return NULL; + } + pd = g_new0(PageDesc, V_L2_SIZE); + atomic_rcu_set(lp, pd); + } + + return pd + (index & (V_L2_SIZE - 1)); +} + +static inline PageDesc *page_find(tb_page_addr_t index) +{ + return page_find_alloc(index, 0); +} + +#if defined(CONFIG_USER_ONLY) +/* Currently it is not recommended to allocate big chunks of data in + user mode. It will change when a dedicated libc will be used. */ +/* ??? 64-bit hosts ought to have no problem mmaping data outside the + region in which the guest needs to run. Revisit this. */ +#define USE_STATIC_CODE_GEN_BUFFER +#endif + +/* Minimum size of the code gen buffer. This number is randomly chosen, + but not so small that we can't have a fair number of TB's live. */ +#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) + +/* Maximum size of the code gen buffer we'd like to use. Unless otherwise + indicated, this is constrained by the range of direct branches on the + host cpu, as used by the TCG implementation of goto_tb. */ +#if defined(__x86_64__) +# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) +#elif defined(__sparc__) +# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) +#elif defined(__powerpc64__) +# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) +#elif defined(__powerpc__) +# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024) +#elif defined(__aarch64__) +# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) +#elif defined(__s390x__) + /* We have a +- 4GB range on the branches; leave some slop. */ +# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) +#elif defined(__mips__) + /* We have a 256MB branch region, but leave room to make sure the + main executable is also within that region. */ +# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) +#else +# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) +#endif + +#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024) + +#define DEFAULT_CODE_GEN_BUFFER_SIZE \ + (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ + ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) + +static inline size_t size_code_gen_buffer(size_t tb_size) +{ + /* Size the buffer. */ + if (tb_size == 0) { +#ifdef USE_STATIC_CODE_GEN_BUFFER + tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; +#else + /* ??? Needs adjustments. */ + /* ??? If we relax the requirement that CONFIG_USER_ONLY use the + static buffer, we could size this on RESERVED_VA, on the text + segment size of the executable, or continue to use the default. */ + tb_size = (unsigned long)(ram_size / 4); +#endif + } + if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { + tb_size = MIN_CODE_GEN_BUFFER_SIZE; + } + if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { + tb_size = MAX_CODE_GEN_BUFFER_SIZE; + } + return tb_size; +} + +#ifdef __mips__ +/* In order to use J and JAL within the code_gen_buffer, we require + that the buffer not cross a 256MB boundary. */ +static inline bool cross_256mb(void *addr, size_t size) +{ + return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; +} + +/* We weren't able to allocate a buffer without crossing that boundary, + so make do with the larger portion of the buffer that doesn't cross. + Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ +static inline void *split_cross_256mb(void *buf1, size_t size1) +{ + void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); + size_t size2 = buf1 + size1 - buf2; + + size1 = buf2 - buf1; + if (size1 < size2) { + size1 = size2; + buf1 = buf2; + } + + tcg_ctx.code_gen_buffer_size = size1; + return buf1; +} +#endif + +#ifdef USE_STATIC_CODE_GEN_BUFFER +static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] + __attribute__((aligned(CODE_GEN_ALIGN))); + +# ifdef _WIN32 +static inline void do_protect(void *addr, long size, int prot) +{ + DWORD old_protect; + VirtualProtect(addr, size, prot, &old_protect); +} + +static inline void map_exec(void *addr, long size) +{ + do_protect(addr, size, PAGE_EXECUTE_READWRITE); +} + +static inline void map_none(void *addr, long size) +{ + do_protect(addr, size, PAGE_NOACCESS); +} +# else +static inline void do_protect(void *addr, long size, int prot) +{ + uintptr_t start, end; + + start = (uintptr_t)addr; + start &= qemu_real_host_page_mask; + + end = (uintptr_t)addr + size; + end = ROUND_UP(end, qemu_real_host_page_size); + + mprotect((void *)start, end - start, prot); +} + +static inline void map_exec(void *addr, long size) +{ + do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC); +} + +static inline void map_none(void *addr, long size) +{ + do_protect(addr, size, PROT_NONE); +} +# endif /* WIN32 */ + +static inline void *alloc_code_gen_buffer(void) +{ + void *buf = static_code_gen_buffer; + size_t full_size, size; + + /* The size of the buffer, rounded down to end on a page boundary. */ + full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer)) + & qemu_real_host_page_mask) - (uintptr_t)buf; + + /* Reserve a guard page. */ + size = full_size - qemu_real_host_page_size; + + /* Honor a command-line option limiting the size of the buffer. */ + if (size > tcg_ctx.code_gen_buffer_size) { + size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size) + & qemu_real_host_page_mask) - (uintptr_t)buf; + } + tcg_ctx.code_gen_buffer_size = size; + +#ifdef __mips__ + if (cross_256mb(buf, size)) { + buf = split_cross_256mb(buf, size); + size = tcg_ctx.code_gen_buffer_size; + } +#endif + + map_exec(buf, size); + map_none(buf + size, qemu_real_host_page_size); + qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); + + return buf; +} +#elif defined(_WIN32) +static inline void *alloc_code_gen_buffer(void) +{ + size_t size = tcg_ctx.code_gen_buffer_size; + void *buf1, *buf2; + + /* Perform the allocation in two steps, so that the guard page + is reserved but uncommitted. */ + buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size, + MEM_RESERVE, PAGE_NOACCESS); + if (buf1 != NULL) { + buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE); + assert(buf1 == buf2); + } + + return buf1; +} +#else +static inline void *alloc_code_gen_buffer(void) +{ + int flags = MAP_PRIVATE | MAP_ANONYMOUS; + uintptr_t start = 0; + size_t size = tcg_ctx.code_gen_buffer_size; + void *buf; + + /* Constrain the position of the buffer based on the host cpu. + Note that these addresses are chosen in concert with the + addresses assigned in the relevant linker script file. */ +# if defined(__PIE__) || defined(__PIC__) + /* Don't bother setting a preferred location if we're building + a position-independent executable. We're more likely to get + an address near the main executable if we let the kernel + choose the address. */ +# elif defined(__x86_64__) && defined(MAP_32BIT) + /* Force the memory down into low memory with the executable. + Leave the choice of exact location with the kernel. */ + flags |= MAP_32BIT; + /* Cannot expect to map more than 800MB in low memory. */ + if (size > 800u * 1024 * 1024) { + tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024; + } +# elif defined(__sparc__) + start = 0x40000000ul; +# elif defined(__s390x__) + start = 0x90000000ul; +# elif defined(__mips__) +# if _MIPS_SIM == _ABI64 + start = 0x128000000ul; +# else + start = 0x08000000ul; +# endif +# endif + + buf = mmap((void *)start, size + qemu_real_host_page_size, + PROT_NONE, flags, -1, 0); + if (buf == MAP_FAILED) { + return NULL; + } + +#ifdef __mips__ + if (cross_256mb(buf, size)) { + /* Try again, with the original still mapped, to avoid re-acquiring + that 256mb crossing. This time don't specify an address. */ + size_t size2; + void *buf2 = mmap(NULL, size + qemu_real_host_page_size, + PROT_NONE, flags, -1, 0); + switch ((int)(buf2 != MAP_FAILED)) { + case 1: + if (!cross_256mb(buf2, size)) { + /* Success! Use the new buffer. */ + munmap(buf, size + qemu_real_host_page_size); + break; + } + /* Failure. Work with what we had. */ + munmap(buf2, size + qemu_real_host_page_size); + /* fallthru */ + default: + /* Split the original buffer. Free the smaller half. */ + buf2 = split_cross_256mb(buf, size); + size2 = tcg_ctx.code_gen_buffer_size; + if (buf == buf2) { + munmap(buf + size2 + qemu_real_host_page_size, size - size2); + } else { + munmap(buf, size - size2); + } + size = size2; + break; + } + buf = buf2; + } +#endif + + /* Make the final buffer accessible. The guard page at the end + will remain inaccessible with PROT_NONE. */ + mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC); + + /* Request large pages for the buffer. */ + qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); + + return buf; +} +#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ + +static inline void code_gen_alloc(size_t tb_size) +{ + tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size); + tcg_ctx.code_gen_buffer = alloc_code_gen_buffer(); + if (tcg_ctx.code_gen_buffer == NULL) { + fprintf(stderr, "Could not allocate dynamic translator buffer\n"); + exit(1); + } + + /* size this conservatively -- realloc later if needed */ + tcg_ctx.tb_ctx.tbs_size = + tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE / 8; + if (unlikely(!tcg_ctx.tb_ctx.tbs_size)) { + tcg_ctx.tb_ctx.tbs_size = 64 * 1024; + } + tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock *, tcg_ctx.tb_ctx.tbs_size); + + qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock); +} + +static void tb_htable_init(void) +{ + unsigned int mode = QHT_MODE_AUTO_RESIZE; + + qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode); +} + +/* Must be called before using the QEMU cpus. 'tb_size' is the size + (in bytes) allocated to the translation buffer. Zero means default + size. */ +void tcg_exec_init(unsigned long tb_size) +{ + cpu_gen_init(); + page_init(); + tb_htable_init(); + code_gen_alloc(tb_size); +#if defined(CONFIG_SOFTMMU) + /* There's no guest base to take into account, so go ahead and + initialize the prologue now. */ + tcg_prologue_init(&tcg_ctx); +#endif +} + +bool tcg_enabled(void) +{ + return tcg_ctx.code_gen_buffer != NULL; +} + +/* + * Allocate a new translation block. Flush the translation buffer if + * too many translation blocks or too much generated code. + * + * Called with tb_lock held. + */ +static TranslationBlock *tb_alloc(target_ulong pc) +{ + TranslationBlock *tb; + TBContext *ctx; + + assert_tb_locked(); + + tb = tcg_tb_alloc(&tcg_ctx); + if (unlikely(tb == NULL)) { + return NULL; + } + ctx = &tcg_ctx.tb_ctx; + if (unlikely(ctx->nb_tbs == ctx->tbs_size)) { + ctx->tbs_size *= 2; + ctx->tbs = g_renew(TranslationBlock *, ctx->tbs, ctx->tbs_size); + } + ctx->tbs[ctx->nb_tbs++] = tb; + return tb; +} + +/* Called with tb_lock held. */ +void tb_free(TranslationBlock *tb) +{ + assert_tb_locked(); + + /* In practice this is mostly used for single use temporary TB + Ignore the hard cases and just back up if this TB happens to + be the last one generated. */ + if (tcg_ctx.tb_ctx.nb_tbs > 0 && + tb == tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) { + size_t struct_size = ROUND_UP(sizeof(*tb), qemu_icache_linesize); + + tcg_ctx.code_gen_ptr = tb->tc_ptr - struct_size; + tcg_ctx.tb_ctx.nb_tbs--; + } +} + +static inline void invalidate_page_bitmap(PageDesc *p) +{ +#ifdef CONFIG_SOFTMMU + g_free(p->code_bitmap); + p->code_bitmap = NULL; + p->code_write_count = 0; +#endif +} + +/* Set to NULL all the 'first_tb' fields in all PageDescs. */ +static void page_flush_tb_1(int level, void **lp) +{ + int i; + + if (*lp == NULL) { + return; + } + if (level == 0) { + PageDesc *pd = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + pd[i].first_tb = NULL; + invalidate_page_bitmap(pd + i); + } + } else { + void **pp = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + page_flush_tb_1(level - 1, pp + i); + } + } +} + +static void page_flush_tb(void) +{ + int i, l1_sz = v_l1_size; + + for (i = 0; i < l1_sz; i++) { + page_flush_tb_1(v_l2_levels, l1_map + i); + } +} + +/* flush all the translation blocks */ +static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) +{ + tb_lock(); + + /* If it is already been done on request of another CPU, + * just retry. + */ + if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) { + goto done; + } + +#if defined(DEBUG_TB_FLUSH) + printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", + (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer), + tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ? + ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) / + tcg_ctx.tb_ctx.nb_tbs : 0); +#endif + if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) + > tcg_ctx.code_gen_buffer_size) { + cpu_abort(cpu, "Internal error: code buffer overflow\n"); + } + + CPU_FOREACH(cpu) { + int i; + + for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) { + atomic_set(&cpu->tb_jmp_cache[i], NULL); + } + } + + tcg_ctx.tb_ctx.nb_tbs = 0; + qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE); + page_flush_tb(); + + tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer; + /* XXX: flush processor icache at this point if cache flush is + expensive */ + atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count, + tcg_ctx.tb_ctx.tb_flush_count + 1); + +done: + tb_unlock(); +} + +void tb_flush(CPUState *cpu) +{ + if (tcg_enabled()) { + unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count); + async_safe_run_on_cpu(cpu, do_tb_flush, + RUN_ON_CPU_HOST_INT(tb_flush_count)); + } +} + +#ifdef DEBUG_TB_CHECK + +static void +do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp) +{ + TranslationBlock *tb = p; + target_ulong addr = *(target_ulong *)userp; + + if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) { + printf("ERROR invalidate: address=" TARGET_FMT_lx + " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size); + } +} + +/* verify that all the pages have correct rights for code + * + * Called with tb_lock held. + */ +static void tb_invalidate_check(target_ulong address) +{ + address &= TARGET_PAGE_MASK; + qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address); +} + +static void +do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp) +{ + TranslationBlock *tb = p; + int flags1, flags2; + + flags1 = page_get_flags(tb->pc); + flags2 = page_get_flags(tb->pc + tb->size - 1); + if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { + printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", + (long)tb->pc, tb->size, flags1, flags2); + } +} + +/* verify that all the pages have correct rights for code */ +static void tb_page_check(void) +{ + qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL); +} + +#endif + +static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) +{ + TranslationBlock *tb1; + unsigned int n1; + + for (;;) { + tb1 = *ptb; + n1 = (uintptr_t)tb1 & 3; + tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); + if (tb1 == tb) { + *ptb = tb1->page_next[n1]; + break; + } + ptb = &tb1->page_next[n1]; + } +} + +/* remove the TB from a list of TBs jumping to the n-th jump target of the TB */ +static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n) +{ + TranslationBlock *tb1; + uintptr_t *ptb, ntb; + unsigned int n1; + + ptb = &tb->jmp_list_next[n]; + if (*ptb) { + /* find tb(n) in circular list */ + for (;;) { + ntb = *ptb; + n1 = ntb & 3; + tb1 = (TranslationBlock *)(ntb & ~3); + if (n1 == n && tb1 == tb) { + break; + } + if (n1 == 2) { + ptb = &tb1->jmp_list_first; + } else { + ptb = &tb1->jmp_list_next[n1]; + } + } + /* now we can suppress tb(n) from the list */ + *ptb = tb->jmp_list_next[n]; + + tb->jmp_list_next[n] = (uintptr_t)NULL; + } +} + +/* reset the jump entry 'n' of a TB so that it is not chained to + another TB */ +static inline void tb_reset_jump(TranslationBlock *tb, int n) +{ + uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]); + tb_set_jmp_target(tb, n, addr); +} + +/* remove any jumps to the TB */ +static inline void tb_jmp_unlink(TranslationBlock *tb) +{ + TranslationBlock *tb1; + uintptr_t *ptb, ntb; + unsigned int n1; + + ptb = &tb->jmp_list_first; + for (;;) { + ntb = *ptb; + n1 = ntb & 3; + tb1 = (TranslationBlock *)(ntb & ~3); + if (n1 == 2) { + break; + } + tb_reset_jump(tb1, n1); + *ptb = tb1->jmp_list_next[n1]; + tb1->jmp_list_next[n1] = (uintptr_t)NULL; + } +} + +/* invalidate one TB + * + * Called with tb_lock held. + */ +void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) +{ + CPUState *cpu; + PageDesc *p; + uint32_t h; + tb_page_addr_t phys_pc; + + assert_tb_locked(); + + atomic_set(&tb->invalid, true); + + /* remove the TB from the hash list */ + phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); + h = tb_hash_func(phys_pc, tb->pc, tb->flags); + qht_remove(&tcg_ctx.tb_ctx.htable, tb, h); + + /* remove the TB from the page list */ + if (tb->page_addr[0] != page_addr) { + p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); + tb_page_remove(&p->first_tb, tb); + invalidate_page_bitmap(p); + } + if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { + p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); + tb_page_remove(&p->first_tb, tb); + invalidate_page_bitmap(p); + } + + /* remove the TB from the hash list */ + h = tb_jmp_cache_hash_func(tb->pc); + CPU_FOREACH(cpu) { + if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) { + atomic_set(&cpu->tb_jmp_cache[h], NULL); + } + } + + /* suppress this TB from the two jump lists */ + tb_remove_from_jmp_list(tb, 0); + tb_remove_from_jmp_list(tb, 1); + + /* suppress any remaining jumps to this TB */ + tb_jmp_unlink(tb); + + tcg_ctx.tb_ctx.tb_phys_invalidate_count++; +} + +#ifdef CONFIG_SOFTMMU +static void build_page_bitmap(PageDesc *p) +{ + int n, tb_start, tb_end; + TranslationBlock *tb; + + p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); + + tb = p->first_tb; + while (tb != NULL) { + n = (uintptr_t)tb & 3; + tb = (TranslationBlock *)((uintptr_t)tb & ~3); + /* NOTE: this is subtle as a TB may span two physical pages */ + if (n == 0) { + /* NOTE: tb_end may be after the end of the page, but + it is not a problem */ + tb_start = tb->pc & ~TARGET_PAGE_MASK; + tb_end = tb_start + tb->size; + if (tb_end > TARGET_PAGE_SIZE) { + tb_end = TARGET_PAGE_SIZE; + } + } else { + tb_start = 0; + tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); + } + bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); + tb = tb->page_next[n]; + } +} +#endif + +/* add the tb in the target page and protect it if necessary + * + * Called with mmap_lock held for user-mode emulation. + */ +static inline void tb_alloc_page(TranslationBlock *tb, + unsigned int n, tb_page_addr_t page_addr) +{ + PageDesc *p; +#ifndef CONFIG_USER_ONLY + bool page_already_protected; +#endif + + assert_memory_lock(); + + tb->page_addr[n] = page_addr; + p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1); + tb->page_next[n] = p->first_tb; +#ifndef CONFIG_USER_ONLY + page_already_protected = p->first_tb != NULL; +#endif + p->first_tb = (TranslationBlock *)((uintptr_t)tb | n); + invalidate_page_bitmap(p); + +#if defined(CONFIG_USER_ONLY) + if (p->flags & PAGE_WRITE) { + target_ulong addr; + PageDesc *p2; + int prot; + + /* force the host page as non writable (writes will have a + page fault + mprotect overhead) */ + page_addr &= qemu_host_page_mask; + prot = 0; + for (addr = page_addr; addr < page_addr + qemu_host_page_size; + addr += TARGET_PAGE_SIZE) { + + p2 = page_find(addr >> TARGET_PAGE_BITS); + if (!p2) { + continue; + } + prot |= p2->flags; + p2->flags &= ~PAGE_WRITE; + } + mprotect(g2h(page_addr), qemu_host_page_size, + (prot & PAGE_BITS) & ~PAGE_WRITE); +#ifdef DEBUG_TB_INVALIDATE + printf("protecting code page: 0x" TARGET_FMT_lx "\n", + page_addr); +#endif + } +#else + /* if some code is already present, then the pages are already + protected. So we handle the case where only the first TB is + allocated in a physical page */ + if (!page_already_protected) { + tlb_protect_code(page_addr); + } +#endif +} + +/* add a new TB and link it to the physical page tables. phys_page2 is + * (-1) to indicate that only one page contains the TB. + * + * Called with mmap_lock held for user-mode emulation. + */ +static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, + tb_page_addr_t phys_page2) +{ + uint32_t h; + + assert_memory_lock(); + + /* add in the page list */ + tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); + if (phys_page2 != -1) { + tb_alloc_page(tb, 1, phys_page2); + } else { + tb->page_addr[1] = -1; + } + + /* add in the hash table */ + h = tb_hash_func(phys_pc, tb->pc, tb->flags); + qht_insert(&tcg_ctx.tb_ctx.htable, tb, h); + +#ifdef DEBUG_TB_CHECK + tb_page_check(); +#endif +} + +/* Called with mmap_lock held for user mode emulation. */ +TranslationBlock *tb_gen_code(CPUState *cpu, + target_ulong pc, target_ulong cs_base, + uint32_t flags, int cflags) +{ + CPUArchState *env = cpu->env_ptr; + TranslationBlock *tb; + tb_page_addr_t phys_pc, phys_page2; + target_ulong virt_page2; + tcg_insn_unit *gen_code_buf; + int gen_code_size, search_size; +#ifdef CONFIG_PROFILER + int64_t ti; +#endif + assert_memory_lock(); + + phys_pc = get_page_addr_code(env, pc); + if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) { + cflags |= CF_USE_ICOUNT; + } + + tb = tb_alloc(pc); + if (unlikely(!tb)) { + buffer_overflow: + /* flush must be done */ + tb_flush(cpu); + mmap_unlock(); + /* Make the execution loop process the flush as soon as possible. */ + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit(cpu); + } + + gen_code_buf = tcg_ctx.code_gen_ptr; + tb->tc_ptr = gen_code_buf; + tb->pc = pc; + tb->cs_base = cs_base; + tb->flags = flags; + tb->cflags = cflags; + tb->invalid = false; + +#ifdef CONFIG_PROFILER + tcg_ctx.tb_count1++; /* includes aborted translations because of + exceptions */ + ti = profile_getclock(); +#endif + + tcg_func_start(&tcg_ctx); + + tcg_ctx.cpu = ENV_GET_CPU(env); + gen_intermediate_code(env, tb); + tcg_ctx.cpu = NULL; + + trace_translate_block(tb, tb->pc, tb->tc_ptr); + + /* generate machine code */ + tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; + tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; + tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset; +#ifdef USE_DIRECT_JUMP + tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset; + tcg_ctx.tb_jmp_target_addr = NULL; +#else + tcg_ctx.tb_jmp_insn_offset = NULL; + tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr; +#endif + +#ifdef CONFIG_PROFILER + tcg_ctx.tb_count++; + tcg_ctx.interm_time += profile_getclock() - ti; + tcg_ctx.code_time -= profile_getclock(); +#endif + + /* ??? Overflow could be handled better here. In particular, we + don't need to re-do gen_intermediate_code, nor should we re-do + the tcg optimization currently hidden inside tcg_gen_code. All + that should be required is to flush the TBs, allocate a new TB, + re-initialize it per above, and re-do the actual code generation. */ + gen_code_size = tcg_gen_code(&tcg_ctx, tb); + if (unlikely(gen_code_size < 0)) { + goto buffer_overflow; + } + search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); + if (unlikely(search_size < 0)) { + goto buffer_overflow; + } + +#ifdef CONFIG_PROFILER + tcg_ctx.code_time += profile_getclock(); + tcg_ctx.code_in_len += tb->size; + tcg_ctx.code_out_len += gen_code_size; + tcg_ctx.search_out_len += search_size; +#endif + +#ifdef DEBUG_DISAS + if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && + qemu_log_in_addr_range(tb->pc)) { + qemu_log_lock(); + qemu_log("OUT: [size=%d]\n", gen_code_size); + log_disas(tb->tc_ptr, gen_code_size); + qemu_log("\n"); + qemu_log_flush(); + qemu_log_unlock(); + } +#endif + + tcg_ctx.code_gen_ptr = (void *) + ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, + CODE_GEN_ALIGN); + + /* init jump list */ + assert(((uintptr_t)tb & 3) == 0); + tb->jmp_list_first = (uintptr_t)tb | 2; + tb->jmp_list_next[0] = (uintptr_t)NULL; + tb->jmp_list_next[1] = (uintptr_t)NULL; + + /* init original jump addresses wich has been set during tcg_gen_code() */ + if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { + tb_reset_jump(tb, 0); + } + if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { + tb_reset_jump(tb, 1); + } + + /* check next page if needed */ + virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; + phys_page2 = -1; + if ((pc & TARGET_PAGE_MASK) != virt_page2) { + phys_page2 = get_page_addr_code(env, virt_page2); + } + /* As long as consistency of the TB stuff is provided by tb_lock in user + * mode and is implicit in single-threaded softmmu emulation, no explicit + * memory barrier is required before tb_link_page() makes the TB visible + * through the physical hash table and physical page list. + */ + tb_link_page(tb, phys_pc, phys_page2); + return tb; +} + +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end may refer to *different* physical pages. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + * + * Called with mmap_lock held for user-mode emulation, grabs tb_lock + * Called with tb_lock held for system-mode emulation + */ +static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end) +{ + while (start < end) { + tb_invalidate_phys_page_range(start, end, 0); + start &= TARGET_PAGE_MASK; + start += TARGET_PAGE_SIZE; + } +} + +#ifdef CONFIG_SOFTMMU +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) +{ + assert_tb_locked(); + tb_invalidate_phys_range_1(start, end); +} +#else +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) +{ + assert_memory_lock(); + tb_lock(); + tb_invalidate_phys_range_1(start, end); + tb_unlock(); +} +#endif +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end must refer to the *same* physical page. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + * + * Called with tb_lock/mmap_lock held for user-mode emulation + * Called with tb_lock held for system-mode emulation + */ +void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, + int is_cpu_write_access) +{ + TranslationBlock *tb, *tb_next; +#if defined(TARGET_HAS_PRECISE_SMC) + CPUState *cpu = current_cpu; + CPUArchState *env = NULL; +#endif + tb_page_addr_t tb_start, tb_end; + PageDesc *p; + int n; +#ifdef TARGET_HAS_PRECISE_SMC + int current_tb_not_found = is_cpu_write_access; + TranslationBlock *current_tb = NULL; + int current_tb_modified = 0; + target_ulong current_pc = 0; + target_ulong current_cs_base = 0; + uint32_t current_flags = 0; +#endif /* TARGET_HAS_PRECISE_SMC */ + + assert_memory_lock(); + assert_tb_locked(); + + p = page_find(start >> TARGET_PAGE_BITS); + if (!p) { + return; + } +#if defined(TARGET_HAS_PRECISE_SMC) + if (cpu != NULL) { + env = cpu->env_ptr; + } +#endif + + /* we remove all the TBs in the range [start, end[ */ + /* XXX: see if in some cases it could be faster to invalidate all + the code */ + tb = p->first_tb; + while (tb != NULL) { + n = (uintptr_t)tb & 3; + tb = (TranslationBlock *)((uintptr_t)tb & ~3); + tb_next = tb->page_next[n]; + /* NOTE: this is subtle as a TB may span two physical pages */ + if (n == 0) { + /* NOTE: tb_end may be after the end of the page, but + it is not a problem */ + tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); + tb_end = tb_start + tb->size; + } else { + tb_start = tb->page_addr[1]; + tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); + } + if (!(tb_end <= start || tb_start >= end)) { +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_not_found) { + current_tb_not_found = 0; + current_tb = NULL; + if (cpu->mem_io_pc) { + /* now we have a real cpu fault */ + current_tb = tb_find_pc(cpu->mem_io_pc); + } + } + if (current_tb == tb && + (current_tb->cflags & CF_COUNT_MASK) != 1) { + /* If we are modifying the current TB, we must stop + its execution. We could be more precise by checking + that the modification is after the current PC, but it + would require a specialized function to partially + restore the CPU state */ + + current_tb_modified = 1; + cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc); + cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, + ¤t_flags); + } +#endif /* TARGET_HAS_PRECISE_SMC */ + tb_phys_invalidate(tb, -1); + } + tb = tb_next; + } +#if !defined(CONFIG_USER_ONLY) + /* if no code remaining, no need to continue to use slow writes */ + if (!p->first_tb) { + invalidate_page_bitmap(p); + tlb_unprotect_code(start); + } +#endif +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_modified) { + /* we generate a block containing just the instruction + modifying the memory. It will ensure that it cannot modify + itself */ + tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); + cpu_loop_exit_noexc(cpu); + } +#endif +} + +#ifdef CONFIG_SOFTMMU +/* len must be <= 8 and start must be a multiple of len. + * Called via softmmu_template.h when code areas are written to with + * iothread mutex not held. + */ +void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) +{ + PageDesc *p; + +#if 0 + if (1) { + qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", + cpu_single_env->mem_io_vaddr, len, + cpu_single_env->eip, + cpu_single_env->eip + + (intptr_t)cpu_single_env->segs[R_CS].base); + } +#endif + assert_memory_lock(); + + p = page_find(start >> TARGET_PAGE_BITS); + if (!p) { + return; + } + if (!p->code_bitmap && + ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { + /* build code bitmap. FIXME: writes should be protected by + * tb_lock, reads by tb_lock or RCU. + */ + build_page_bitmap(p); + } + if (p->code_bitmap) { + unsigned int nr; + unsigned long b; + + nr = start & ~TARGET_PAGE_MASK; + b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); + if (b & ((1 << len) - 1)) { + goto do_invalidate; + } + } else { + do_invalidate: + tb_invalidate_phys_page_range(start, start + len, 1); + } +} +#else +/* Called with mmap_lock held. If pc is not 0 then it indicates the + * host PC of the faulting store instruction that caused this invalidate. + * Returns true if the caller needs to abort execution of the current + * TB (because it was modified by this store and the guest CPU has + * precise-SMC semantics). + */ +static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) +{ + TranslationBlock *tb; + PageDesc *p; + int n; +#ifdef TARGET_HAS_PRECISE_SMC + TranslationBlock *current_tb = NULL; + CPUState *cpu = current_cpu; + CPUArchState *env = NULL; + int current_tb_modified = 0; + target_ulong current_pc = 0; + target_ulong current_cs_base = 0; + uint32_t current_flags = 0; +#endif + + assert_memory_lock(); + + addr &= TARGET_PAGE_MASK; + p = page_find(addr >> TARGET_PAGE_BITS); + if (!p) { + return false; + } + + tb_lock(); + tb = p->first_tb; +#ifdef TARGET_HAS_PRECISE_SMC + if (tb && pc != 0) { + current_tb = tb_find_pc(pc); + } + if (cpu != NULL) { + env = cpu->env_ptr; + } +#endif + while (tb != NULL) { + n = (uintptr_t)tb & 3; + tb = (TranslationBlock *)((uintptr_t)tb & ~3); +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb == tb && + (current_tb->cflags & CF_COUNT_MASK) != 1) { + /* If we are modifying the current TB, we must stop + its execution. We could be more precise by checking + that the modification is after the current PC, but it + would require a specialized function to partially + restore the CPU state */ + + current_tb_modified = 1; + cpu_restore_state_from_tb(cpu, current_tb, pc); + cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, + ¤t_flags); + } +#endif /* TARGET_HAS_PRECISE_SMC */ + tb_phys_invalidate(tb, addr); + tb = tb->page_next[n]; + } + p->first_tb = NULL; +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_modified) { + /* we generate a block containing just the instruction + modifying the memory. It will ensure that it cannot modify + itself */ + tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); + /* tb_lock will be reset after cpu_loop_exit_noexc longjmps + * back into the cpu_exec loop. */ + return true; + } +#endif + tb_unlock(); + + return false; +} +#endif + +/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < + tb[1].tc_ptr. Return NULL if not found */ +static TranslationBlock *tb_find_pc(uintptr_t tc_ptr) +{ + int m_min, m_max, m; + uintptr_t v; + TranslationBlock *tb; + + if (tcg_ctx.tb_ctx.nb_tbs <= 0) { + return NULL; + } + if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer || + tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) { + return NULL; + } + /* binary search (cf Knuth) */ + m_min = 0; + m_max = tcg_ctx.tb_ctx.nb_tbs - 1; + while (m_min <= m_max) { + m = (m_min + m_max) >> 1; + tb = tcg_ctx.tb_ctx.tbs[m]; + v = (uintptr_t)tb->tc_ptr; + if (v == tc_ptr) { + return tb; + } else if (tc_ptr < v) { + m_max = m - 1; + } else { + m_min = m + 1; + } + } + return tcg_ctx.tb_ctx.tbs[m_max]; +} + +#if !defined(CONFIG_USER_ONLY) +void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) +{ + ram_addr_t ram_addr; + MemoryRegion *mr; + hwaddr l = 1; + + rcu_read_lock(); + mr = address_space_translate(as, addr, &addr, &l, false); + if (!(memory_region_is_ram(mr) + || memory_region_is_romd(mr))) { + rcu_read_unlock(); + return; + } + ram_addr = memory_region_get_ram_addr(mr) + addr; + tb_lock(); + tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); + tb_unlock(); + rcu_read_unlock(); +} +#endif /* !defined(CONFIG_USER_ONLY) */ + +/* Called with tb_lock held. */ +void tb_check_watchpoint(CPUState *cpu) +{ + TranslationBlock *tb; + + tb = tb_find_pc(cpu->mem_io_pc); + if (tb) { + /* We can use retranslation to find the PC. */ + cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc); + tb_phys_invalidate(tb, -1); + } else { + /* The exception probably happened in a helper. The CPU state should + have been saved before calling it. Fetch the PC from there. */ + CPUArchState *env = cpu->env_ptr; + target_ulong pc, cs_base; + tb_page_addr_t addr; + uint32_t flags; + + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + addr = get_page_addr_code(env, pc); + tb_invalidate_phys_range(addr, addr + 1); + } +} + +#ifndef CONFIG_USER_ONLY +/* in deterministic execution mode, instructions doing device I/Os + * must be at the end of the TB. + * + * Called by softmmu_template.h, with iothread mutex not held. + */ +void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) +{ +#if defined(TARGET_MIPS) || defined(TARGET_SH4) + CPUArchState *env = cpu->env_ptr; +#endif + TranslationBlock *tb; + uint32_t n, cflags; + target_ulong pc, cs_base; + uint32_t flags; + + tb_lock(); + tb = tb_find_pc(retaddr); + if (!tb) { + cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", + (void *)retaddr); + } + n = cpu->icount_decr.u16.low + tb->icount; + cpu_restore_state_from_tb(cpu, tb, retaddr); + /* Calculate how many instructions had been executed before the fault + occurred. */ + n = n - cpu->icount_decr.u16.low; + /* Generate a new TB ending on the I/O insn. */ + n++; + /* On MIPS and SH, delay slot instructions can only be restarted if + they were already the first instruction in the TB. If this is not + the first instruction in a TB then re-execute the preceding + branch. */ +#if defined(TARGET_MIPS) + if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { + env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); + cpu->icount_decr.u16.low++; + env->hflags &= ~MIPS_HFLAG_BMASK; + } +#elif defined(TARGET_SH4) + if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 + && n > 1) { + env->pc -= 2; + cpu->icount_decr.u16.low++; + env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); + } +#endif + /* This should never happen. */ + if (n > CF_COUNT_MASK) { + cpu_abort(cpu, "TB too big during recompile"); + } + + cflags = n | CF_LAST_IO; + pc = tb->pc; + cs_base = tb->cs_base; + flags = tb->flags; + tb_phys_invalidate(tb, -1); + if (tb->cflags & CF_NOCACHE) { + if (tb->orig_tb) { + /* Invalidate original TB if this TB was generated in + * cpu_exec_nocache() */ + tb_phys_invalidate(tb->orig_tb, -1); + } + tb_free(tb); + } + /* FIXME: In theory this could raise an exception. In practice + we have already translated the block once so it's probably ok. */ + tb_gen_code(cpu, pc, cs_base, flags, cflags); + + /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not + * the first in the TB) then we end up generating a whole new TB and + * repeating the fault, which is horribly inefficient. + * Better would be to execute just this insn uncached, or generate a + * second new TB. + * + * cpu_loop_exit_noexc will longjmp back to cpu_exec where the + * tb_lock gets reset. + */ + cpu_loop_exit_noexc(cpu); +} + +void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) +{ + unsigned int i; + + /* Discard jump cache entries for any tb which might potentially + overlap the flushed page. */ + i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); + memset(&cpu->tb_jmp_cache[i], 0, + TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); + + i = tb_jmp_cache_hash_page(addr); + memset(&cpu->tb_jmp_cache[i], 0, + TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); +} + +static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf, + struct qht_stats hst) +{ + uint32_t hgram_opts; + size_t hgram_bins; + char *hgram; + + if (!hst.head_buckets) { + return; + } + cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n", + hst.used_head_buckets, hst.head_buckets, + (double)hst.used_head_buckets / hst.head_buckets * 100); + + hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; + hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; + if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { + hgram_opts |= QDIST_PR_NODECIMAL; + } + hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); + cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n", + qdist_avg(&hst.occupancy) * 100, hgram); + g_free(hgram); + + hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; + hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); + if (hgram_bins > 10) { + hgram_bins = 10; + } else { + hgram_bins = 0; + hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; + } + hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); + cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n", + qdist_avg(&hst.chain), hgram); + g_free(hgram); +} + +void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) +{ + int i, target_code_size, max_target_code_size; + int direct_jmp_count, direct_jmp2_count, cross_page; + TranslationBlock *tb; + struct qht_stats hst; + + tb_lock(); + + target_code_size = 0; + max_target_code_size = 0; + cross_page = 0; + direct_jmp_count = 0; + direct_jmp2_count = 0; + for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) { + tb = tcg_ctx.tb_ctx.tbs[i]; + target_code_size += tb->size; + if (tb->size > max_target_code_size) { + max_target_code_size = tb->size; + } + if (tb->page_addr[1] != -1) { + cross_page++; + } + if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { + direct_jmp_count++; + if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { + direct_jmp2_count++; + } + } + } + /* XXX: avoid using doubles ? */ + cpu_fprintf(f, "Translation buffer state:\n"); + cpu_fprintf(f, "gen code size %td/%zd\n", + tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer, + tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer); + cpu_fprintf(f, "TB count %d\n", tcg_ctx.tb_ctx.nb_tbs); + cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", + tcg_ctx.tb_ctx.nb_tbs ? target_code_size / + tcg_ctx.tb_ctx.nb_tbs : 0, + max_target_code_size); + cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n", + tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr - + tcg_ctx.code_gen_buffer) / + tcg_ctx.tb_ctx.nb_tbs : 0, + target_code_size ? (double) (tcg_ctx.code_gen_ptr - + tcg_ctx.code_gen_buffer) / + target_code_size : 0); + cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page, + tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) / + tcg_ctx.tb_ctx.nb_tbs : 0); + cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", + direct_jmp_count, + tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) / + tcg_ctx.tb_ctx.nb_tbs : 0, + direct_jmp2_count, + tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) / + tcg_ctx.tb_ctx.nb_tbs : 0); + + qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst); + print_qht_statistics(f, cpu_fprintf, hst); + qht_statistics_destroy(&hst); + + cpu_fprintf(f, "\nStatistics:\n"); + cpu_fprintf(f, "TB flush count %u\n", + atomic_read(&tcg_ctx.tb_ctx.tb_flush_count)); + cpu_fprintf(f, "TB invalidate count %d\n", + tcg_ctx.tb_ctx.tb_phys_invalidate_count); + cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); + tcg_dump_info(f, cpu_fprintf); + + tb_unlock(); +} + +void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf) +{ + tcg_dump_op_count(f, cpu_fprintf); +} + +#else /* CONFIG_USER_ONLY */ + +void cpu_interrupt(CPUState *cpu, int mask) +{ + g_assert(qemu_mutex_iothread_locked()); + cpu->interrupt_request |= mask; + cpu->icount_decr.u16.high = -1; +} + +/* + * Walks guest process memory "regions" one by one + * and calls callback function 'fn' for each region. + */ +struct walk_memory_regions_data { + walk_memory_regions_fn fn; + void *priv; + target_ulong start; + int prot; +}; + +static int walk_memory_regions_end(struct walk_memory_regions_data *data, + target_ulong end, int new_prot) +{ + if (data->start != -1u) { + int rc = data->fn(data->priv, data->start, end, data->prot); + if (rc != 0) { + return rc; + } + } + + data->start = (new_prot ? end : -1u); + data->prot = new_prot; + + return 0; +} + +static int walk_memory_regions_1(struct walk_memory_regions_data *data, + target_ulong base, int level, void **lp) +{ + target_ulong pa; + int i, rc; + + if (*lp == NULL) { + return walk_memory_regions_end(data, base, 0); + } + + if (level == 0) { + PageDesc *pd = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + int prot = pd[i].flags; + + pa = base | (i << TARGET_PAGE_BITS); + if (prot != data->prot) { + rc = walk_memory_regions_end(data, pa, prot); + if (rc != 0) { + return rc; + } + } + } + } else { + void **pp = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + pa = base | ((target_ulong)i << + (TARGET_PAGE_BITS + V_L2_BITS * level)); + rc = walk_memory_regions_1(data, pa, level - 1, pp + i); + if (rc != 0) { + return rc; + } + } + } + + return 0; +} + +int walk_memory_regions(void *priv, walk_memory_regions_fn fn) +{ + struct walk_memory_regions_data data; + uintptr_t i, l1_sz = v_l1_size; + + data.fn = fn; + data.priv = priv; + data.start = -1u; + data.prot = 0; + + for (i = 0; i < l1_sz; i++) { + target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS); + int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i); + if (rc != 0) { + return rc; + } + } + + return walk_memory_regions_end(&data, 0, 0); +} + +static int dump_region(void *priv, target_ulong start, + target_ulong end, unsigned long prot) +{ + FILE *f = (FILE *)priv; + + (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx + " "TARGET_FMT_lx" %c%c%c\n", + start, end, end - start, + ((prot & PAGE_READ) ? 'r' : '-'), + ((prot & PAGE_WRITE) ? 'w' : '-'), + ((prot & PAGE_EXEC) ? 'x' : '-')); + + return 0; +} + +/* dump memory mappings */ +void page_dump(FILE *f) +{ + const int length = sizeof(target_ulong) * 2; + (void) fprintf(f, "%-*s %-*s %-*s %s\n", + length, "start", length, "end", length, "size", "prot"); + walk_memory_regions(f, dump_region); +} + +int page_get_flags(target_ulong address) +{ + PageDesc *p; + + p = page_find(address >> TARGET_PAGE_BITS); + if (!p) { + return 0; + } + return p->flags; +} + +/* Modify the flags of a page and invalidate the code if necessary. + The flag PAGE_WRITE_ORG is positioned automatically depending + on PAGE_WRITE. The mmap_lock should already be held. */ +void page_set_flags(target_ulong start, target_ulong end, int flags) +{ + target_ulong addr, len; + + /* This function should never be called with addresses outside the + guest address space. If this assert fires, it probably indicates + a missing call to h2g_valid. */ +#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS + assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); +#endif + assert(start < end); + assert_memory_lock(); + + start = start & TARGET_PAGE_MASK; + end = TARGET_PAGE_ALIGN(end); + + if (flags & PAGE_WRITE) { + flags |= PAGE_WRITE_ORG; + } + + for (addr = start, len = end - start; + len != 0; + len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { + PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); + + /* If the write protection bit is set, then we invalidate + the code inside. */ + if (!(p->flags & PAGE_WRITE) && + (flags & PAGE_WRITE) && + p->first_tb) { + tb_invalidate_phys_page(addr, 0); + } + p->flags = flags; + } +} + +int page_check_range(target_ulong start, target_ulong len, int flags) +{ + PageDesc *p; + target_ulong end; + target_ulong addr; + + /* This function should never be called with addresses outside the + guest address space. If this assert fires, it probably indicates + a missing call to h2g_valid. */ +#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS + assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); +#endif + + if (len == 0) { + return 0; + } + if (start + len - 1 < start) { + /* We've wrapped around. */ + return -1; + } + + /* must do before we loose bits in the next step */ + end = TARGET_PAGE_ALIGN(start + len); + start = start & TARGET_PAGE_MASK; + + for (addr = start, len = end - start; + len != 0; + len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { + p = page_find(addr >> TARGET_PAGE_BITS); + if (!p) { + return -1; + } + if (!(p->flags & PAGE_VALID)) { + return -1; + } + + if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { + return -1; + } + if (flags & PAGE_WRITE) { + if (!(p->flags & PAGE_WRITE_ORG)) { + return -1; + } + /* unprotect the page if it was put read-only because it + contains translated code */ + if (!(p->flags & PAGE_WRITE)) { + if (!page_unprotect(addr, 0)) { + return -1; + } + } + } + } + return 0; +} + +/* called from signal handler: invalidate the code and unprotect the + * page. Return 0 if the fault was not handled, 1 if it was handled, + * and 2 if it was handled but the caller must cause the TB to be + * immediately exited. (We can only return 2 if the 'pc' argument is + * non-zero.) + */ +int page_unprotect(target_ulong address, uintptr_t pc) +{ + unsigned int prot; + bool current_tb_invalidated; + PageDesc *p; + target_ulong host_start, host_end, addr; + + /* Technically this isn't safe inside a signal handler. However we + know this only ever happens in a synchronous SEGV handler, so in + practice it seems to be ok. */ + mmap_lock(); + + p = page_find(address >> TARGET_PAGE_BITS); + if (!p) { + mmap_unlock(); + return 0; + } + + /* if the page was really writable, then we change its + protection back to writable */ + if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { + host_start = address & qemu_host_page_mask; + host_end = host_start + qemu_host_page_size; + + prot = 0; + current_tb_invalidated = false; + for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { + p = page_find(addr >> TARGET_PAGE_BITS); + p->flags |= PAGE_WRITE; + prot |= p->flags; + + /* and since the content will be modified, we must invalidate + the corresponding translated code. */ + current_tb_invalidated |= tb_invalidate_phys_page(addr, pc); +#ifdef DEBUG_TB_CHECK + tb_invalidate_check(addr); +#endif + } + mprotect((void *)g2h(host_start), qemu_host_page_size, + prot & PAGE_BITS); + + mmap_unlock(); + /* If current TB was invalidated return to main loop */ + return current_tb_invalidated ? 2 : 1; + } + mmap_unlock(); + return 0; +} +#endif /* CONFIG_USER_ONLY */ diff --git a/accel/tcg/translate-all.h b/accel/tcg/translate-all.h new file mode 100644 index 0000000000..ba8e4d63c4 --- /dev/null +++ b/accel/tcg/translate-all.h @@ -0,0 +1,36 @@ +/* + * Translated block handling + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#ifndef TRANSLATE_ALL_H +#define TRANSLATE_ALL_H + +#include "exec/exec-all.h" + + +/* translate-all.c */ +void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len); +void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, + int is_cpu_write_access); +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end); +void tb_check_watchpoint(CPUState *cpu); + +#ifdef CONFIG_USER_ONLY +int page_unprotect(target_ulong address, uintptr_t pc); +#endif + +#endif /* TRANSLATE_ALL_H */ diff --git a/accel/tcg/translate-common.c b/accel/tcg/translate-common.c new file mode 100644 index 0000000000..40fe5a19bb --- /dev/null +++ b/accel/tcg/translate-common.c @@ -0,0 +1,56 @@ +/* + * Host code generation common components + * + * Copyright (c) 2015 Peter Crosthwaite <crosthwaite.peter@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qom/cpu.h" +#include "sysemu/cpus.h" +#include "qemu/main-loop.h" + +uintptr_t qemu_real_host_page_size; +intptr_t qemu_real_host_page_mask; + +#ifndef CONFIG_USER_ONLY +/* mask must never be zero, except for A20 change call */ +static void tcg_handle_interrupt(CPUState *cpu, int mask) +{ + int old_mask; + g_assert(qemu_mutex_iothread_locked()); + + old_mask = cpu->interrupt_request; + cpu->interrupt_request |= mask; + + /* + * If called from iothread context, wake the target cpu in + * case its halted. + */ + if (!qemu_cpu_is_self(cpu)) { + qemu_cpu_kick(cpu); + } else { + cpu->icount_decr.u16.high = -1; + if (use_icount && + !cpu->can_do_io + && (mask & ~old_mask) != 0) { + cpu_abort(cpu, "Raised interrupt while not in I/O function"); + } + } +} + +CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt; +#endif |