diff options
179 files changed, 9735 insertions, 1599 deletions
diff --git a/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml b/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml index 9f1fe9e7dc..03e74c97db 100644 --- a/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml +++ b/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml @@ -8,8 +8,6 @@ ubuntu-20.04-s390x-all-linux-static: tags: - ubuntu_20.04 - s390x - variables: - DFLTCC: 0 rules: - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - if: "$S390X_RUNNER_AVAILABLE" @@ -29,8 +27,6 @@ ubuntu-20.04-s390x-all: tags: - ubuntu_20.04 - s390x - variables: - DFLTCC: 0 timeout: 75m rules: - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' @@ -48,8 +44,6 @@ ubuntu-20.04-s390x-alldbg: tags: - ubuntu_20.04 - s390x - variables: - DFLTCC: 0 rules: - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' when: manual @@ -71,8 +65,6 @@ ubuntu-20.04-s390x-clang: tags: - ubuntu_20.04 - s390x - variables: - DFLTCC: 0 rules: - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' when: manual @@ -93,8 +85,6 @@ ubuntu-20.04-s390x-tci: tags: - ubuntu_20.04 - s390x - variables: - DFLTCC: 0 rules: - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' when: manual @@ -114,8 +104,6 @@ ubuntu-20.04-s390x-notcg: tags: - ubuntu_20.04 - s390x - variables: - DFLTCC: 0 rules: - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' when: manual diff --git a/.travis.yml b/.travis.yml index 4fdc9a6785..fb3baabca9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -218,7 +218,6 @@ jobs: - TEST_CMD="make check check-tcg V=1" - CONFIG="--disable-containers --target-list=${MAIN_SOFTMMU_TARGETS},s390x-linux-user" - UNRELIABLE=true - - DFLTCC=0 script: - BUILD_RC=0 && make -j${JOBS} || BUILD_RC=$? - | @@ -258,7 +257,7 @@ jobs: env: - CONFIG="--disable-containers --audio-drv-list=sdl --disable-user --target-list-exclude=${MAIN_SOFTMMU_TARGETS}" - - DFLTCC=0 + - name: "[s390x] GCC (user)" arch: s390x dist: focal @@ -270,7 +269,7 @@ jobs: - ninja-build env: - CONFIG="--disable-containers --disable-system" - - DFLTCC=0 + - name: "[s390x] Clang (disable-tcg)" arch: s390x dist: focal @@ -304,4 +303,3 @@ jobs: - CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools --host-cc=clang --cxx=clang++" - UNRELIABLE=true - - DFLTCC=0 diff --git a/MAINTAINERS b/MAINTAINERS index ead2bed652..6af9cd985c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2881,7 +2881,7 @@ T: git https://repo.or.cz/qemu/armbru.git qapi-next QEMU Guest Agent M: Michael Roth <michael.roth@amd.com> -R: Konstantin Kostiuk <kkostiuk@redhat.com> +M: Konstantin Kostiuk <kkostiuk@redhat.com> S: Maintained F: qga/ F: docs/interop/qemu-ga.rst diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index 3a2677d065..99aede73b7 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -45,6 +45,7 @@ #include "qemu/guest-random.h" #include "sysemu/hw_accel.h" #include "kvm-cpus.h" +#include "sysemu/dirtylimit.h" #include "hw/boards.h" #include "monitor/stats.h" @@ -477,6 +478,7 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp) cpu->kvm_state = s; cpu->vcpu_dirty = true; cpu->dirty_pages = 0; + cpu->throttle_us_per_full = 0; mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { @@ -757,17 +759,20 @@ static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu) } /* Must be with slots_lock held */ -static uint64_t kvm_dirty_ring_reap_locked(KVMState *s) +static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu) { int ret; - CPUState *cpu; uint64_t total = 0; int64_t stamp; stamp = get_clock(); - CPU_FOREACH(cpu) { - total += kvm_dirty_ring_reap_one(s, cpu); + if (cpu) { + total = kvm_dirty_ring_reap_one(s, cpu); + } else { + CPU_FOREACH(cpu) { + total += kvm_dirty_ring_reap_one(s, cpu); + } } if (total) { @@ -788,7 +793,7 @@ static uint64_t kvm_dirty_ring_reap_locked(KVMState *s) * Currently for simplicity, we must hold BQL before calling this. We can * consider to drop the BQL if we're clear with all the race conditions. */ -static uint64_t kvm_dirty_ring_reap(KVMState *s) +static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu) { uint64_t total; @@ -808,7 +813,7 @@ static uint64_t kvm_dirty_ring_reap(KVMState *s) * reset below. */ kvm_slots_lock(); - total = kvm_dirty_ring_reap_locked(s); + total = kvm_dirty_ring_reap_locked(s, cpu); kvm_slots_unlock(); return total; @@ -855,7 +860,7 @@ static void kvm_dirty_ring_flush(void) * vcpus out in a synchronous way. */ kvm_cpu_synchronize_kick_all(); - kvm_dirty_ring_reap(kvm_state); + kvm_dirty_ring_reap(kvm_state, NULL); trace_kvm_dirty_ring_flush(1); } @@ -1399,7 +1404,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, * Not easy. Let's cross the fingers until it's fixed. */ if (kvm_state->kvm_dirty_ring_size) { - kvm_dirty_ring_reap_locked(kvm_state); + kvm_dirty_ring_reap_locked(kvm_state, NULL); } else { kvm_slot_get_dirty_log(kvm_state, mem); } @@ -1467,11 +1472,16 @@ static void *kvm_dirty_ring_reaper_thread(void *data) */ sleep(1); + /* keep sleeping so that dirtylimit not be interfered by reaper */ + if (dirtylimit_in_service()) { + continue; + } + trace_kvm_dirty_ring_reaper("wakeup"); r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING; qemu_mutex_lock_iothread(); - kvm_dirty_ring_reap(s); + kvm_dirty_ring_reap(s, NULL); qemu_mutex_unlock_iothread(); r->reaper_iteration++; @@ -2315,6 +2325,11 @@ static void query_stats_cb(StatsResultList **result, StatsTarget target, strList *names, strList *targets, Error **errp); static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp); +uint32_t kvm_dirty_ring_size(void) +{ + return kvm_state->kvm_dirty_ring_size; +} + static int kvm_init(MachineState *ms) { MachineClass *mc = MACHINE_GET_CLASS(ms); @@ -2967,8 +2982,19 @@ int kvm_cpu_exec(CPUState *cpu) */ trace_kvm_dirty_ring_full(cpu->cpu_index); qemu_mutex_lock_iothread(); - kvm_dirty_ring_reap(kvm_state); + /* + * We throttle vCPU by making it sleep once it exit from kernel + * due to dirty ring full. In the dirtylimit scenario, reaping + * all vCPUs after a single vCPU dirty ring get full result in + * the miss of sleep, so just reap the ring-fulled vCPU. + */ + if (dirtylimit_in_service()) { + kvm_dirty_ring_reap(kvm_state, cpu); + } else { + kvm_dirty_ring_reap(kvm_state, NULL); + } qemu_mutex_unlock_iothread(); + dirtylimit_vcpu_execute(cpu); ret = 0; break; case KVM_EXIT_SYSTEM_EVENT: diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c index 3345882d85..2ac5f9c036 100644 --- a/accel/stubs/kvm-stub.c +++ b/accel/stubs/kvm-stub.c @@ -148,3 +148,8 @@ bool kvm_dirty_ring_enabled(void) { return false; } + +uint32_t kvm_dirty_ring_size(void) +{ + return 0; +} diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 8fd23a9d05..ef62a199c7 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -2256,6 +2256,15 @@ int page_get_flags(target_ulong address) return p->flags; } +/* + * Allow the target to decide if PAGE_TARGET_[12] may be reset. + * By default, they are not kept. + */ +#ifndef PAGE_TARGET_STICKY +#define PAGE_TARGET_STICKY 0 +#endif +#define PAGE_STICKY (PAGE_ANON | PAGE_TARGET_STICKY) + /* Modify the flags of a page and invalidate the code if necessary. The flag PAGE_WRITE_ORG is positioned automatically depending on PAGE_WRITE. The mmap_lock should already be held. */ @@ -2299,8 +2308,8 @@ void page_set_flags(target_ulong start, target_ulong end, int flags) p->target_data = NULL; p->flags = flags; } else { - /* Using mprotect on a page does not change MAP_ANON. */ - p->flags = (p->flags & PAGE_ANON) | flags; + /* Using mprotect on a page does not change sticky bits. */ + p->flags = (p->flags & PAGE_STICKY) | flags; } } } diff --git a/configs/targets/loongarch64-softmmu.mak b/configs/targets/loongarch64-softmmu.mak index 7bc06c850c..483474ba93 100644 --- a/configs/targets/loongarch64-softmmu.mak +++ b/configs/targets/loongarch64-softmmu.mak @@ -2,3 +2,4 @@ TARGET_ARCH=loongarch64 TARGET_BASE_ARCH=loongarch TARGET_SUPPORTS_MTTCG=y TARGET_XML_FILES= gdb-xml/loongarch-base64.xml gdb-xml/loongarch-fpu64.xml +TARGET_NEED_FDT=y @@ -1933,6 +1933,7 @@ probe_target_compiler() { hexagon) container_hosts=x86_64 ;; hppa) container_hosts=x86_64 ;; i386) container_hosts=x86_64 ;; + loongarch64) container_hosts=x86_64 ;; m68k) container_hosts=x86_64 ;; microblaze) container_hosts=x86_64 ;; mips64el) container_hosts=x86_64 ;; @@ -1987,6 +1988,10 @@ probe_target_compiler() { container_image=fedora-i386-cross container_cross_prefix= ;; + loongarch64) + container_image=debian-loongarch-cross + container_cross_prefix=loongarch64-unknown-linux-gnu- + ;; m68k) container_image=debian-m68k-cross container_cross_prefix=m68k-linux-gnu- diff --git a/cpus-common.c b/cpus-common.c index db459b41ce..793364dc0e 100644 --- a/cpus-common.c +++ b/cpus-common.c @@ -73,6 +73,12 @@ static int cpu_get_free_index(void) } CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); +static unsigned int cpu_list_generation_id; + +unsigned int cpu_list_generation_id_get(void) +{ + return cpu_list_generation_id; +} void cpu_list_add(CPUState *cpu) { @@ -84,6 +90,7 @@ void cpu_list_add(CPUState *cpu) assert(!cpu_index_auto_assigned); } QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node); + cpu_list_generation_id++; } void cpu_list_remove(CPUState *cpu) @@ -96,6 +103,7 @@ void cpu_list_remove(CPUState *cpu) QTAILQ_REMOVE_RCU(&cpus, cpu, node); cpu->cpu_index = UNASSIGNED_CPU_INDEX; + cpu_list_generation_id++; } CPUState *qemu_get_cpu(int index) diff --git a/docs/devel/submitting-a-patch.rst b/docs/devel/submitting-a-patch.rst index 09a8d12c2c..fec33ce148 100644 --- a/docs/devel/submitting-a-patch.rst +++ b/docs/devel/submitting-a-patch.rst @@ -39,7 +39,7 @@ ideas from other posts. If you do subscribe, be prepared for a high volume of email, often over one thousand messages in a week. The list is moderated; first-time posts from an email address (whether or not you subscribed) may be subject to some delay while waiting for a moderator -to whitelist your address. +to allow your address. The larger your contribution is, or if you plan on becoming a long-term contributor, then the more important the rest of this page becomes. diff --git a/docs/system/devices/usb.rst b/docs/system/devices/usb.rst index 872d916758..f39a88f080 100644 --- a/docs/system/devices/usb.rst +++ b/docs/system/devices/usb.rst @@ -353,3 +353,44 @@ and also assign it to the correct USB bus in QEMU like this: -device usb-ehci,id=ehci \\ -device usb-host,bus=usb-bus.0,hostbus=3,hostport=1 \\ -device usb-host,bus=ehci.0,hostbus=1,hostport=1 + +``usb-host`` properties for reset behavior +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``guest-reset`` and ``guest-reset-all`` properties control +whenever the guest is allowed to reset the physical usb device on the +host. There are three cases: + +``guest-reset=false`` + The guest is not allowed to reset the (physical) usb device. + +``guest-reset=true,guest-resets-all=false`` + The guest is allowed to reset the device when it is not yet + initialized (aka no usb bus address assigned). Usually this results + in one guest reset being allowed. This is the default behavior. + +``guest-reset=true,guest-resets-all=true`` + The guest is allowed to reset the device as it pleases. + +The reason for this existing are broken usb devices. In theory one +should be able to reset (and re-initialize) usb devices at any time. +In practice that may result in shitty usb device firmware crashing and +the device not responding any more until you power-cycle (aka un-plug +and re-plug) it. + +What works best pretty much depends on the behavior of the specific +usb device at hand, so it's a trial-and-error game. If the default +doesn't work, try another option and see whenever the situation +improves. + +record usb transfers +^^^^^^^^^^^^^^^^^^^^ + +All usb devices have support for recording the usb traffic. This can +be enabled using the ``pcap=<file>`` property, for example: + +``-device usb-mouse,pcap=mouse.pcap`` + +The pcap files are compatible with the linux kernels usbmon. Many +tools, including ``wireshark``, can decode and inspect these trace +files. diff --git a/docs/tools/qemu-nbd.rst b/docs/tools/qemu-nbd.rst index 8e08a29e89..faf6349ea5 100644 --- a/docs/tools/qemu-nbd.rst +++ b/docs/tools/qemu-nbd.rst @@ -225,7 +225,7 @@ disconnects: qemu-nbd -f qcow2 file.qcow2 Start a long-running server listening with encryption on port 10810, -and whitelist clients with a specific X.509 certificate to connect to +and allow clients with a specific X.509 certificate to connect to a 1 megabyte subset of a raw file, using the export name 'subset': :: diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc index 943e3301d2..1610472cfc 100644 --- a/fpu/softfloat-specialize.c.inc +++ b/fpu/softfloat-specialize.c.inc @@ -390,7 +390,8 @@ bool float32_is_signaling_nan(float32 a_, float_status *status) static int pickNaN(FloatClass a_cls, FloatClass b_cls, bool aIsLargerSignificand, float_status *status) { -#if defined(TARGET_ARM) || defined(TARGET_MIPS) || defined(TARGET_HPPA) +#if defined(TARGET_ARM) || defined(TARGET_MIPS) || defined(TARGET_HPPA) || \ + defined(TARGET_LOONGARCH64) || defined(TARGET_S390X) /* ARM mandated NaN propagation rules (see FPProcessNaNs()), take * the first of: * 1. A if it is signaling @@ -574,6 +575,29 @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls, return 1; } } +#elif defined(TARGET_LOONGARCH64) + /* + * For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan) + * case sets InvalidOp and returns the input value 'c' + */ + if (infzero) { + float_raise(float_flag_invalid | float_flag_invalid_imz, status); + return 2; + } + /* Prefer sNaN over qNaN, in the c, a, b order. */ + if (is_snan(c_cls)) { + return 2; + } else if (is_snan(a_cls)) { + return 0; + } else if (is_snan(b_cls)) { + return 1; + } else if (is_qnan(c_cls)) { + return 2; + } else if (is_qnan(a_cls)) { + return 0; + } else { + return 1; + } #elif defined(TARGET_PPC) /* For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer * to return an input NaN if we have one (ie c) rather than generating diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx index 3ffa24bd67..188d9ece3b 100644 --- a/hmp-commands-info.hx +++ b/hmp-commands-info.hx @@ -865,6 +865,19 @@ SRST Display the vcpu dirty rate information. ERST + { + .name = "vcpu_dirty_limit", + .args_type = "", + .params = "", + .help = "show dirty page limit information of all vCPU", + .cmd = hmp_info_vcpu_dirty_limit, + }, + +SRST + ``info vcpu_dirty_limit`` + Display the vcpu dirty page limit information. +ERST + #if defined(TARGET_I386) { .name = "sgx", diff --git a/hmp-commands.hx b/hmp-commands.hx index c9d465735a..182e639d14 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -1768,3 +1768,35 @@ ERST "\n\t\t\t -b to specify dirty bitmap as method of calculation)", .cmd = hmp_calc_dirty_rate, }, + +SRST +``set_vcpu_dirty_limit`` + Set dirty page rate limit on virtual CPU, the information about all the + virtual CPU dirty limit status can be observed with ``info vcpu_dirty_limit`` + command. +ERST + + { + .name = "set_vcpu_dirty_limit", + .args_type = "dirty_rate:l,cpu_index:l?", + .params = "dirty_rate [cpu_index]", + .help = "set dirty page rate limit, use cpu_index to set limit" + "\n\t\t\t\t\t on a specified virtual cpu", + .cmd = hmp_set_vcpu_dirty_limit, + }, + +SRST +``cancel_vcpu_dirty_limit`` + Cancel dirty page rate limit on virtual CPU, the information about all the + virtual CPU dirty limit status can be observed with ``info vcpu_dirty_limit`` + command. +ERST + + { + .name = "cancel_vcpu_dirty_limit", + .args_type = "cpu_index:l?", + .params = "[cpu_index]", + .help = "cancel dirty page rate limit, use cpu_index to cancel" + "\n\t\t\t\t\t limit on a specified virtual cpu", + .cmd = hmp_cancel_vcpu_dirty_limit, + }, diff --git a/hw/adc/npcm7xx_adc.c b/hw/adc/npcm7xx_adc.c index 0f0a9f63e2..bc6f3f55e6 100644 --- a/hw/adc/npcm7xx_adc.c +++ b/hw/adc/npcm7xx_adc.c @@ -36,7 +36,7 @@ REG32(NPCM7XX_ADC_DATA, 0x4) #define NPCM7XX_ADC_CON_INT BIT(18) #define NPCM7XX_ADC_CON_EN BIT(17) #define NPCM7XX_ADC_CON_RST BIT(16) -#define NPCM7XX_ADC_CON_CONV BIT(14) +#define NPCM7XX_ADC_CON_CONV BIT(13) #define NPCM7XX_ADC_CON_DIV(rv) extract32(rv, 1, 8) #define NPCM7XX_ADC_MAX_RESULT 1023 @@ -242,7 +242,7 @@ static void npcm7xx_adc_init(Object *obj) for (i = 0; i < NPCM7XX_ADC_NUM_INPUTS; ++i) { object_property_add_uint32_ptr(obj, "adci[*]", - &s->adci[i], OBJ_PROP_FLAG_WRITE); + &s->adci[i], OBJ_PROP_FLAG_READWRITE); } object_property_add_uint32_ptr(obj, "vref", &s->vref, OBJ_PROP_FLAG_WRITE); diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c index 48538c9360..3c2a4160cd 100644 --- a/hw/arm/bcm2835_peripherals.c +++ b/hw/arm/bcm2835_peripherals.c @@ -23,6 +23,13 @@ /* Capabilities for SD controller: no DMA, high-speed, default clocks etc. */ #define BCM2835_SDHC_CAPAREG 0x52134b4 +/* + * According to Linux driver & DTS, dma channels 0--10 have separate IRQ, + * while channels 11--14 share one IRQ: + */ +#define SEPARATE_DMA_IRQ_MAX 10 +#define ORGATED_DMA_IRQ_COUNT 4 + static void create_unimp(BCM2835PeripheralState *ps, UnimplementedDeviceState *uds, const char *name, hwaddr ofs, hwaddr size) @@ -101,6 +108,11 @@ static void bcm2835_peripherals_init(Object *obj) /* DMA Channels */ object_initialize_child(obj, "dma", &s->dma, TYPE_BCM2835_DMA); + object_initialize_child(obj, "orgated-dma-irq", + &s->orgated_dma_irq, TYPE_OR_IRQ); + object_property_set_int(OBJECT(&s->orgated_dma_irq), "num-lines", + ORGATED_DMA_IRQ_COUNT, &error_abort); + object_property_add_const_link(OBJECT(&s->dma), "dma-mr", OBJECT(&s->gpu_bus_mr)); @@ -322,12 +334,24 @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(&s->peri_mr, DMA15_OFFSET, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->dma), 1)); - for (n = 0; n <= 12; n++) { + for (n = 0; n <= SEPARATE_DMA_IRQ_MAX; n++) { sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma), n, qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, INTERRUPT_DMA0 + n)); } + if (!qdev_realize(DEVICE(&s->orgated_dma_irq), NULL, errp)) { + return; + } + for (n = 0; n < ORGATED_DMA_IRQ_COUNT; n++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma), + SEPARATE_DMA_IRQ_MAX + 1 + n, + qdev_get_gpio_in(DEVICE(&s->orgated_dma_irq), n)); + } + qdev_connect_gpio_out(DEVICE(&s->orgated_dma_irq), 0, + qdev_get_gpio_in_named(DEVICE(&s->ic), + BCM2835_IC_GPU_IRQ, + INTERRUPT_DMA0 + SEPARATE_DMA_IRQ_MAX + 1)); /* THERMAL */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->thermal), errp)) { diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c index 44ecd446c3..e53d5f0fa7 100644 --- a/hw/hppa/machine.c +++ b/hw/hppa/machine.c @@ -280,9 +280,10 @@ static void machine_hppa_init(MachineState *machine) } /* PS/2 Keyboard/Mouse */ - dev = DEVICE(lasips2_initfn(LASI_PS2KBD_HPA, - qdev_get_gpio_in(lasi_dev, - LASI_IRQ_PS2KBD_HPA))); + dev = qdev_new(TYPE_LASIPS2); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, + qdev_get_gpio_in(lasi_dev, LASI_IRQ_PS2KBD_HPA)); memory_region_add_subregion(addr_space, LASI_PS2KBD_HPA, sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0)); diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c index 754f1d0593..dc929727dc 100644 --- a/hw/i386/microvm.c +++ b/hw/i386/microvm.c @@ -631,6 +631,14 @@ static void microvm_machine_initfn(Object *obj) qemu_register_powerdown_notifier(&mms->powerdown_req); } +GlobalProperty microvm_properties[] = { + /* + * pcie host bridge (gpex) on microvm has no io address window, + * so reserving io space is not going to work. Turn it off. + */ + { "pcie-root-port", "io-reserve", "0" }, +}; + static void microvm_class_init(ObjectClass *oc, void *data) { X86MachineClass *x86mc = X86_MACHINE_CLASS(oc); @@ -707,6 +715,9 @@ static void microvm_class_init(ObjectClass *oc, void *data) "Set off to disable adding virtio-mmio devices to the kernel cmdline"); machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE); + + compat_props_add(mc->compat_props, microvm_properties, + G_N_ELEMENTS(microvm_properties)); } static const TypeInfo microvm_machine_info = { diff --git a/hw/input/lasips2.c b/hw/input/lasips2.c index 9223cb0af4..ea7c07a2ba 100644 --- a/hw/input/lasips2.c +++ b/hw/input/lasips2.c @@ -35,17 +35,28 @@ #include "qapi/error.h" +static const VMStateDescription vmstate_lasips2_port = { + .name = "lasips2-port", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(control, LASIPS2Port), + VMSTATE_UINT8(buf, LASIPS2Port), + VMSTATE_BOOL(loopback_rbne, LASIPS2Port), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_lasips2 = { .name = "lasips2", - .version_id = 0, - .minimum_version_id = 0, + .version_id = 1, + .minimum_version_id = 1, .fields = (VMStateField[]) { - VMSTATE_UINT8(kbd.control, LASIPS2State), - VMSTATE_UINT8(kbd.id, LASIPS2State), - VMSTATE_BOOL(kbd.irq, LASIPS2State), - VMSTATE_UINT8(mouse.control, LASIPS2State), - VMSTATE_UINT8(mouse.id, LASIPS2State), - VMSTATE_BOOL(mouse.irq, LASIPS2State), + VMSTATE_UINT8(int_status, LASIPS2State), + VMSTATE_STRUCT(kbd_port.parent_obj, LASIPS2State, 1, + vmstate_lasips2_port, LASIPS2Port), + VMSTATE_STRUCT(mouse_port.parent_obj, LASIPS2State, 1, + vmstate_lasips2_port, LASIPS2Port), VMSTATE_END_OF_LIST() } }; @@ -119,36 +130,50 @@ static const char *lasips2_write_reg_name(uint64_t addr) static void lasips2_update_irq(LASIPS2State *s) { - trace_lasips2_intr(s->kbd.irq | s->mouse.irq); - qemu_set_irq(s->irq, s->kbd.irq | s->mouse.irq); + int level = s->int_status ? 1 : 0; + + trace_lasips2_intr(level); + qemu_set_irq(s->irq, level); +} + +static void lasips2_set_irq(void *opaque, int n, int level) +{ + LASIPS2State *s = LASIPS2(opaque); + + if (level) { + s->int_status |= BIT(n); + } else { + s->int_status &= ~BIT(n); + } + + lasips2_update_irq(s); } static void lasips2_reg_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { - LASIPS2Port *port = opaque; + LASIPS2Port *lp = LASIPS2_PORT(opaque); - trace_lasips2_reg_write(size, port->id, addr, + trace_lasips2_reg_write(size, lp->id, addr, lasips2_write_reg_name(addr), val); switch (addr & 0xc) { case REG_PS2_CONTROL: - port->control = val; + lp->control = val; break; case REG_PS2_XMTDATA: - if (port->control & LASIPS2_CONTROL_LOOPBACK) { - port->buf = val; - port->irq = true; - port->loopback_rbne = true; - lasips2_update_irq(port->parent); + if (lp->control & LASIPS2_CONTROL_LOOPBACK) { + lp->buf = val; + lp->loopback_rbne = true; + qemu_set_irq(lp->irq, 1); break; } - if (port->id) { - ps2_write_mouse(port->dev, val); + if (lp->id) { + ps2_write_mouse(PS2_MOUSE_DEVICE(lp->ps2dev), val); } else { - ps2_write_keyboard(port->dev, val); + ps2_write_keyboard(PS2_KBD_DEVICE(lp->ps2dev), val); } break; @@ -164,54 +189,53 @@ static void lasips2_reg_write(void *opaque, hwaddr addr, uint64_t val, static uint64_t lasips2_reg_read(void *opaque, hwaddr addr, unsigned size) { - LASIPS2Port *port = opaque; + LASIPS2Port *lp = LASIPS2_PORT(opaque); uint64_t ret = 0; switch (addr & 0xc) { case REG_PS2_ID: - ret = port->id; + ret = lp->id; break; case REG_PS2_RCVDATA: - if (port->control & LASIPS2_CONTROL_LOOPBACK) { - port->irq = false; - port->loopback_rbne = false; - lasips2_update_irq(port->parent); - ret = port->buf; + if (lp->control & LASIPS2_CONTROL_LOOPBACK) { + lp->loopback_rbne = false; + qemu_set_irq(lp->irq, 0); + ret = lp->buf; break; } - ret = ps2_read_data(port->dev); + ret = ps2_read_data(lp->ps2dev); break; case REG_PS2_CONTROL: - ret = port->control; + ret = lp->control; break; case REG_PS2_STATUS: ret = LASIPS2_STATUS_DATSHD | LASIPS2_STATUS_CLKSHD; - if (port->control & LASIPS2_CONTROL_DIAG) { - if (!(port->control & LASIPS2_CONTROL_DATDIR)) { + if (lp->control & LASIPS2_CONTROL_DIAG) { + if (!(lp->control & LASIPS2_CONTROL_DATDIR)) { ret &= ~LASIPS2_STATUS_DATSHD; } - if (!(port->control & LASIPS2_CONTROL_CLKDIR)) { + if (!(lp->control & LASIPS2_CONTROL_CLKDIR)) { ret &= ~LASIPS2_STATUS_CLKSHD; } } - if (port->control & LASIPS2_CONTROL_LOOPBACK) { - if (port->loopback_rbne) { + if (lp->control & LASIPS2_CONTROL_LOOPBACK) { + if (lp->loopback_rbne) { ret |= LASIPS2_STATUS_RBNE; } } else { - if (!ps2_queue_empty(port->dev)) { + if (!ps2_queue_empty(lp->ps2dev)) { ret |= LASIPS2_STATUS_RBNE; } } - if (port->parent->kbd.irq || port->parent->mouse.irq) { + if (lp->lasips2->int_status) { ret |= LASIPS2_STATUS_CMPINTR; } break; @@ -222,7 +246,7 @@ static uint64_t lasips2_reg_read(void *opaque, hwaddr addr, unsigned size) break; } - trace_lasips2_reg_read(size, port->id, addr, + trace_lasips2_reg_read(size, lp->id, addr, lasips2_read_reg_name(addr), ret); return ret; } @@ -234,106 +258,208 @@ static const MemoryRegionOps lasips2_reg_ops = { .min_access_size = 1, .max_access_size = 4, }, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, }; -static void lasips2_set_kbd_irq(void *opaque, int n, int level) +static void lasips2_realize(DeviceState *dev, Error **errp) { - LASIPS2State *s = LASIPS2(opaque); - LASIPS2Port *port = &s->kbd; + LASIPS2State *s = LASIPS2(dev); + LASIPS2Port *lp; - port->irq = level; - lasips2_update_irq(port->parent); + lp = LASIPS2_PORT(&s->kbd_port); + if (!(qdev_realize(DEVICE(lp), NULL, errp))) { + return; + } + + qdev_connect_gpio_out(DEVICE(lp), 0, + qdev_get_gpio_in_named(dev, "lasips2-port-input-irq", + lp->id)); + + lp = LASIPS2_PORT(&s->mouse_port); + if (!(qdev_realize(DEVICE(lp), NULL, errp))) { + return; + } + + qdev_connect_gpio_out(DEVICE(lp), 0, + qdev_get_gpio_in_named(dev, "lasips2-port-input-irq", + lp->id)); } -static void lasips2_set_mouse_irq(void *opaque, int n, int level) +static void lasips2_init(Object *obj) { - LASIPS2State *s = LASIPS2(opaque); - LASIPS2Port *port = &s->mouse; + LASIPS2State *s = LASIPS2(obj); + LASIPS2Port *lp; + + object_initialize_child(obj, "lasips2-kbd-port", &s->kbd_port, + TYPE_LASIPS2_KBD_PORT); + object_initialize_child(obj, "lasips2-mouse-port", &s->mouse_port, + TYPE_LASIPS2_MOUSE_PORT); + + lp = LASIPS2_PORT(&s->kbd_port); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &lp->reg); + lp = LASIPS2_PORT(&s->mouse_port); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &lp->reg); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); - port->irq = level; - lasips2_update_irq(port->parent); + qdev_init_gpio_in_named(DEVICE(obj), lasips2_set_irq, + "lasips2-port-input-irq", 2); } -LASIPS2State *lasips2_initfn(hwaddr base, qemu_irq irq) +static void lasips2_class_init(ObjectClass *klass, void *data) { - DeviceState *dev; + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = lasips2_realize; + dc->vmsd = &vmstate_lasips2; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} - dev = qdev_new(TYPE_LASIPS2); - qdev_prop_set_uint64(dev, "base", base); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); +static const TypeInfo lasips2_info = { + .name = TYPE_LASIPS2, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = lasips2_init, + .instance_size = sizeof(LASIPS2State), + .class_init = lasips2_class_init, +}; - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq); +static void lasips2_port_set_irq(void *opaque, int n, int level) +{ + LASIPS2Port *s = LASIPS2_PORT(opaque); - return LASIPS2(dev); + qemu_set_irq(s->irq, level); } -static void lasips2_realize(DeviceState *dev, Error **errp) +static void lasips2_port_realize(DeviceState *dev, Error **errp) { - LASIPS2State *s = LASIPS2(dev); + LASIPS2Port *s = LASIPS2_PORT(dev); - vmstate_register(NULL, s->base, &vmstate_lasips2, s); + qdev_connect_gpio_out(DEVICE(s->ps2dev), PS2_DEVICE_IRQ, + qdev_get_gpio_in_named(dev, "ps2-input-irq", 0)); +} + +static void lasips2_port_init(Object *obj) +{ + LASIPS2Port *s = LASIPS2_PORT(obj); - s->kbd.dev = ps2_kbd_init(); - qdev_connect_gpio_out(DEVICE(s->kbd.dev), PS2_DEVICE_IRQ, - qdev_get_gpio_in_named(dev, "ps2-kbd-input-irq", - 0)); - s->mouse.dev = ps2_mouse_init(); - qdev_connect_gpio_out(DEVICE(s->mouse.dev), PS2_DEVICE_IRQ, - qdev_get_gpio_in_named(dev, "ps2-mouse-input-irq", - 0)); + qdev_init_gpio_out(DEVICE(obj), &s->irq, 1); + qdev_init_gpio_in_named(DEVICE(obj), lasips2_port_set_irq, + "ps2-input-irq", 1); } -static void lasips2_init(Object *obj) +static void lasips2_port_class_init(ObjectClass *klass, void *data) { - LASIPS2State *s = LASIPS2(obj); + DeviceClass *dc = DEVICE_CLASS(klass); - s->kbd.id = 0; - s->mouse.id = 1; - s->kbd.parent = s; - s->mouse.parent = s; + dc->realize = lasips2_port_realize; +} - memory_region_init_io(&s->kbd.reg, obj, &lasips2_reg_ops, &s->kbd, - "lasips2-kbd", 0x100); - memory_region_init_io(&s->mouse.reg, obj, &lasips2_reg_ops, &s->mouse, - "lasips2-mouse", 0x100); +static const TypeInfo lasips2_port_info = { + .name = TYPE_LASIPS2_PORT, + .parent = TYPE_DEVICE, + .instance_init = lasips2_port_init, + .instance_size = sizeof(LASIPS2Port), + .class_init = lasips2_port_class_init, + .class_size = sizeof(LASIPS2PortDeviceClass), + .abstract = true, +}; - sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->kbd.reg); - sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mouse.reg); +static void lasips2_kbd_port_realize(DeviceState *dev, Error **errp) +{ + LASIPS2KbdPort *s = LASIPS2_KBD_PORT(dev); + LASIPS2Port *lp = LASIPS2_PORT(dev); + LASIPS2PortDeviceClass *lpdc = LASIPS2_PORT_GET_CLASS(lp); - sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->kbd), errp)) { + return; + } + + lp->ps2dev = PS2_DEVICE(&s->kbd); + lpdc->parent_realize(dev, errp); +} + +static void lasips2_kbd_port_init(Object *obj) +{ + LASIPS2KbdPort *s = LASIPS2_KBD_PORT(obj); + LASIPS2Port *lp = LASIPS2_PORT(obj); + + memory_region_init_io(&lp->reg, obj, &lasips2_reg_ops, lp, "lasips2-kbd", + 0x100); + + object_initialize_child(obj, "kbd", &s->kbd, TYPE_PS2_KBD_DEVICE); + + lp->id = 0; + lp->lasips2 = container_of(s, LASIPS2State, kbd_port); +} + +static void lasips2_kbd_port_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + LASIPS2PortDeviceClass *lpdc = LASIPS2_PORT_CLASS(klass); - qdev_init_gpio_in_named(DEVICE(obj), lasips2_set_kbd_irq, - "ps2-kbd-input-irq", 1); - qdev_init_gpio_in_named(DEVICE(obj), lasips2_set_mouse_irq, - "ps2-mouse-input-irq", 1); + device_class_set_parent_realize(dc, lasips2_kbd_port_realize, + &lpdc->parent_realize); } -static Property lasips2_properties[] = { - DEFINE_PROP_UINT64("base", LASIPS2State, base, UINT64_MAX), - DEFINE_PROP_END_OF_LIST(), +static const TypeInfo lasips2_kbd_port_info = { + .name = TYPE_LASIPS2_KBD_PORT, + .parent = TYPE_LASIPS2_PORT, + .instance_size = sizeof(LASIPS2KbdPort), + .instance_init = lasips2_kbd_port_init, + .class_init = lasips2_kbd_port_class_init, }; -static void lasips2_class_init(ObjectClass *klass, void *data) +static void lasips2_mouse_port_realize(DeviceState *dev, Error **errp) +{ + LASIPS2MousePort *s = LASIPS2_MOUSE_PORT(dev); + LASIPS2Port *lp = LASIPS2_PORT(dev); + LASIPS2PortDeviceClass *lpdc = LASIPS2_PORT_GET_CLASS(lp); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->mouse), errp)) { + return; + } + + lp->ps2dev = PS2_DEVICE(&s->mouse); + lpdc->parent_realize(dev, errp); +} + +static void lasips2_mouse_port_init(Object *obj) +{ + LASIPS2MousePort *s = LASIPS2_MOUSE_PORT(obj); + LASIPS2Port *lp = LASIPS2_PORT(obj); + + memory_region_init_io(&lp->reg, obj, &lasips2_reg_ops, lp, "lasips2-mouse", + 0x100); + + object_initialize_child(obj, "mouse", &s->mouse, TYPE_PS2_MOUSE_DEVICE); + + lp->id = 1; + lp->lasips2 = container_of(s, LASIPS2State, mouse_port); +} + +static void lasips2_mouse_port_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + LASIPS2PortDeviceClass *lpdc = LASIPS2_PORT_CLASS(klass); - dc->realize = lasips2_realize; - device_class_set_props(dc, lasips2_properties); - set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + device_class_set_parent_realize(dc, lasips2_mouse_port_realize, + &lpdc->parent_realize); } -static const TypeInfo lasips2_info = { - .name = TYPE_LASIPS2, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_init = lasips2_init, - .instance_size = sizeof(LASIPS2State), - .class_init = lasips2_class_init, +static const TypeInfo lasips2_mouse_port_info = { + .name = TYPE_LASIPS2_MOUSE_PORT, + .parent = TYPE_LASIPS2_PORT, + .instance_size = sizeof(LASIPS2MousePort), + .instance_init = lasips2_mouse_port_init, + .class_init = lasips2_mouse_port_class_init, }; static void lasips2_register_types(void) { type_register_static(&lasips2_info); + type_register_static(&lasips2_port_info); + type_register_static(&lasips2_kbd_port_info); + type_register_static(&lasips2_mouse_port_info); } type_init(lasips2_register_types) diff --git a/hw/input/pckbd.c b/hw/input/pckbd.c index 9184411c3e..b92b63bedc 100644 --- a/hw/input/pckbd.c +++ b/hw/input/pckbd.c @@ -286,7 +286,7 @@ static void kbd_queue(KBDState *s, int b, int aux) s->pending |= aux ? KBD_PENDING_CTRL_AUX : KBD_PENDING_CTRL_KBD; kbd_safe_update_irq(s); } else { - ps2_queue(aux ? s->mouse : s->kbd, b); + ps2_queue(aux ? PS2_DEVICE(&s->ps2mouse) : PS2_DEVICE(&s->ps2kbd), b); } } @@ -408,9 +408,9 @@ static uint64_t kbd_read_data(void *opaque, hwaddr addr, timer_mod(s->throttle_timer, qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) + 1000); } - s->obdata = ps2_read_data(s->kbd); + s->obdata = ps2_read_data(PS2_DEVICE(&s->ps2kbd)); } else if (s->obsrc & KBD_OBSRC_MOUSE) { - s->obdata = ps2_read_data(s->mouse); + s->obdata = ps2_read_data(PS2_DEVICE(&s->ps2mouse)); } else if (s->obsrc & KBD_OBSRC_CTRL) { s->obdata = kbd_dequeue(s); } @@ -429,14 +429,15 @@ static void kbd_write_data(void *opaque, hwaddr addr, switch (s->write_cmd) { case 0: - ps2_write_keyboard(s->kbd, val); + ps2_write_keyboard(&s->ps2kbd, val); /* sending data to the keyboard reenables PS/2 communication */ s->mode &= ~KBD_MODE_DISABLE_KBD; kbd_safe_update_irq(s); break; case KBD_CCMD_WRITE_MODE: s->mode = val; - ps2_keyboard_set_translation(s->kbd, (s->mode & KBD_MODE_KCC) != 0); + ps2_keyboard_set_translation(&s->ps2kbd, + (s->mode & KBD_MODE_KCC) != 0); /* * a write to the mode byte interrupt enable flags directly updates * the irq lines @@ -458,7 +459,7 @@ static void kbd_write_data(void *opaque, hwaddr addr, outport_write(s, val); break; case KBD_CCMD_WRITE_MOUSE: - ps2_write_mouse(s->mouse, val); + ps2_write_mouse(&s->ps2mouse, val); /* sending data to the mouse reenables PS/2 communication */ s->mode &= ~KBD_MODE_DISABLE_MOUSE; kbd_safe_update_irq(s); @@ -699,15 +700,19 @@ static void i8042_mmio_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->region); - /* Note we can't use dc->vmsd without breaking migration compatibility */ - vmstate_register(NULL, 0, &vmstate_kbd, ks); + if (!sysbus_realize(SYS_BUS_DEVICE(&ks->ps2kbd), errp)) { + return; + } + + if (!sysbus_realize(SYS_BUS_DEVICE(&ks->ps2mouse), errp)) { + return; + } - ks->kbd = ps2_kbd_init(); - qdev_connect_gpio_out(DEVICE(ks->kbd), PS2_DEVICE_IRQ, + qdev_connect_gpio_out(DEVICE(&ks->ps2kbd), PS2_DEVICE_IRQ, qdev_get_gpio_in_named(dev, "ps2-kbd-input-irq", 0)); - ks->mouse = ps2_mouse_init(); - qdev_connect_gpio_out(DEVICE(ks->mouse), PS2_DEVICE_IRQ, + + qdev_connect_gpio_out(DEVICE(&ks->ps2mouse), PS2_DEVICE_IRQ, qdev_get_gpio_in_named(dev, "ps2-mouse-input-irq", 0)); } @@ -719,6 +724,10 @@ static void i8042_mmio_init(Object *obj) ks->extended_state = true; + object_initialize_child(obj, "ps2kbd", &ks->ps2kbd, TYPE_PS2_KBD_DEVICE); + object_initialize_child(obj, "ps2mouse", &ks->ps2mouse, + TYPE_PS2_MOUSE_DEVICE); + qdev_init_gpio_out(DEVICE(obj), ks->irqs, 2); qdev_init_gpio_in_named(DEVICE(obj), i8042_mmio_set_kbd_irq, "ps2-kbd-input-irq", 1); @@ -732,32 +741,27 @@ static Property i8042_mmio_properties[] = { DEFINE_PROP_END_OF_LIST(), }; +static const VMStateDescription vmstate_kbd_mmio = { + .name = "pckbd-mmio", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(kbd, MMIOKBDState, 0, vmstate_kbd, KBDState), + VMSTATE_END_OF_LIST() + } +}; + static void i8042_mmio_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = i8042_mmio_realize; dc->reset = i8042_mmio_reset; + dc->vmsd = &vmstate_kbd_mmio; device_class_set_props(dc, i8042_mmio_properties); set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } -MMIOKBDState *i8042_mm_init(qemu_irq kbd_irq, qemu_irq mouse_irq, - ram_addr_t size, hwaddr mask) -{ - DeviceState *dev; - - dev = qdev_new(TYPE_I8042_MMIO); - qdev_prop_set_uint64(dev, "mask", mask); - qdev_prop_set_uint32(dev, "size", size); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - - qdev_connect_gpio_out(dev, I8042_KBD_IRQ, kbd_irq); - qdev_connect_gpio_out(dev, I8042_MOUSE_IRQ, mouse_irq); - - return I8042_MMIO(dev); -} - static const TypeInfo i8042_mmio_info = { .name = TYPE_I8042_MMIO, .parent = TYPE_SYS_BUS_DEVICE, @@ -770,7 +774,7 @@ void i8042_isa_mouse_fake_event(ISAKBDState *isa) { KBDState *s = &isa->kbd; - ps2_mouse_fake_event(s->mouse); + ps2_mouse_fake_event(&s->ps2mouse); } void i8042_setup_a20_line(ISADevice *dev, qemu_irq a20_out) @@ -843,6 +847,10 @@ static void i8042_initfn(Object *obj) memory_region_init_io(isa_s->io + 1, obj, &i8042_cmd_ops, s, "i8042-cmd", 1); + object_initialize_child(obj, "ps2kbd", &s->ps2kbd, TYPE_PS2_KBD_DEVICE); + object_initialize_child(obj, "ps2mouse", &s->ps2mouse, + TYPE_PS2_MOUSE_DEVICE); + qdev_init_gpio_out_named(DEVICE(obj), &s->a20_out, I8042_A20_LINE, 1); qdev_init_gpio_out(DEVICE(obj), s->irqs, 2); @@ -876,14 +884,22 @@ static void i8042_realizefn(DeviceState *dev, Error **errp) isa_register_ioport(isadev, isa_s->io + 0, 0x60); isa_register_ioport(isadev, isa_s->io + 1, 0x64); - s->kbd = ps2_kbd_init(); - qdev_connect_gpio_out(DEVICE(s->kbd), PS2_DEVICE_IRQ, + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ps2kbd), errp)) { + return; + } + + qdev_connect_gpio_out(DEVICE(&s->ps2kbd), PS2_DEVICE_IRQ, qdev_get_gpio_in_named(dev, "ps2-kbd-input-irq", 0)); - s->mouse = ps2_mouse_init(); - qdev_connect_gpio_out(DEVICE(s->mouse), PS2_DEVICE_IRQ, + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ps2mouse), errp)) { + return; + } + + qdev_connect_gpio_out(DEVICE(&s->ps2mouse), PS2_DEVICE_IRQ, qdev_get_gpio_in_named(dev, "ps2-mouse-input-irq", 0)); + if (isa_s->kbd_throttle && !isa_s->kbd.extended_state) { warn_report(TYPE_I8042 ": can't enable kbd-throttle without" " extended-state, disabling kbd-throttle"); diff --git a/hw/input/pl050.c b/hw/input/pl050.c index 209cc001cf..ec5e19285e 100644 --- a/hw/input/pl050.c +++ b/hw/input/pl050.c @@ -19,26 +19,12 @@ #include "hw/sysbus.h" #include "migration/vmstate.h" #include "hw/input/ps2.h" +#include "hw/input/pl050.h" #include "hw/irq.h" #include "qemu/log.h" #include "qemu/module.h" #include "qom/object.h" -#define TYPE_PL050 "pl050" -OBJECT_DECLARE_SIMPLE_TYPE(PL050State, PL050) - -struct PL050State { - SysBusDevice parent_obj; - - MemoryRegion iomem; - void *dev; - uint32_t cr; - uint32_t clk; - uint32_t last; - int pending; - qemu_irq irq; - bool is_mouse; -}; static const VMStateDescription vmstate_pl050 = { .name = "pl050", @@ -115,7 +101,7 @@ static uint64_t pl050_read(void *opaque, hwaddr offset, } case 2: /* KMIDATA */ if (s->pending) { - s->last = ps2_read_data(s->dev); + s->last = ps2_read_data(s->ps2dev); } return s->last; case 3: /* KMICLKDIV */ @@ -144,9 +130,9 @@ static void pl050_write(void *opaque, hwaddr offset, /* ??? This should toggle the TX interrupt line. */ /* ??? This means kbd/mouse can block each other. */ if (s->is_mouse) { - ps2_write_mouse(s->dev, value); + ps2_write_mouse(PS2_MOUSE_DEVICE(s->ps2dev), value); } else { - ps2_write_keyboard(s->dev, value); + ps2_write_keyboard(PS2_KBD_DEVICE(s->ps2dev), value); } break; case 3: /* KMICLKDIV */ @@ -166,48 +152,100 @@ static const MemoryRegionOps pl050_ops = { static void pl050_realize(DeviceState *dev, Error **errp) { PL050State *s = PL050(dev); - SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - memory_region_init_io(&s->iomem, OBJECT(s), &pl050_ops, s, "pl050", 0x1000); - sysbus_init_mmio(sbd, &s->iomem); - sysbus_init_irq(sbd, &s->irq); - if (s->is_mouse) { - s->dev = ps2_mouse_init(); - } else { - s->dev = ps2_kbd_init(); - } - qdev_connect_gpio_out(DEVICE(s->dev), PS2_DEVICE_IRQ, + qdev_connect_gpio_out(DEVICE(s->ps2dev), PS2_DEVICE_IRQ, qdev_get_gpio_in_named(dev, "ps2-input-irq", 0)); } -static void pl050_keyboard_init(Object *obj) +static void pl050_kbd_realize(DeviceState *dev, Error **errp) { - PL050State *s = PL050(obj); + PL050DeviceClass *pdc = PL050_GET_CLASS(dev); + PL050KbdState *s = PL050_KBD_DEVICE(dev); + PL050State *ps = PL050(dev); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->kbd), errp)) { + return; + } + + ps->ps2dev = PS2_DEVICE(&s->kbd); + pdc->parent_realize(dev, errp); +} + +static void pl050_kbd_init(Object *obj) +{ + PL050KbdState *s = PL050_KBD_DEVICE(obj); + PL050State *ps = PL050(obj); + + ps->is_mouse = false; + object_initialize_child(obj, "kbd", &s->kbd, TYPE_PS2_KBD_DEVICE); +} + +static void pl050_mouse_realize(DeviceState *dev, Error **errp) +{ + PL050DeviceClass *pdc = PL050_GET_CLASS(dev); + PL050MouseState *s = PL050_MOUSE_DEVICE(dev); + PL050State *ps = PL050(dev); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->mouse), errp)) { + return; + } - s->is_mouse = false; + ps->ps2dev = PS2_DEVICE(&s->mouse); + pdc->parent_realize(dev, errp); } static void pl050_mouse_init(Object *obj) { - PL050State *s = PL050(obj); + PL050MouseState *s = PL050_MOUSE_DEVICE(obj); + PL050State *ps = PL050(obj); + + ps->is_mouse = true; + object_initialize_child(obj, "mouse", &s->mouse, TYPE_PS2_MOUSE_DEVICE); +} - s->is_mouse = true; +static void pl050_kbd_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PL050DeviceClass *pdc = PL050_CLASS(oc); + + device_class_set_parent_realize(dc, pl050_kbd_realize, + &pdc->parent_realize); } static const TypeInfo pl050_kbd_info = { - .name = "pl050_keyboard", + .name = TYPE_PL050_KBD_DEVICE, .parent = TYPE_PL050, - .instance_init = pl050_keyboard_init, + .instance_init = pl050_kbd_init, + .instance_size = sizeof(PL050KbdState), + .class_init = pl050_kbd_class_init, }; +static void pl050_mouse_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PL050DeviceClass *pdc = PL050_CLASS(oc); + + device_class_set_parent_realize(dc, pl050_mouse_realize, + &pdc->parent_realize); +} + static const TypeInfo pl050_mouse_info = { - .name = "pl050_mouse", + .name = TYPE_PL050_MOUSE_DEVICE, .parent = TYPE_PL050, .instance_init = pl050_mouse_init, + .instance_size = sizeof(PL050MouseState), + .class_init = pl050_mouse_class_init, }; static void pl050_init(Object *obj) { + PL050State *s = PL050(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &pl050_ops, s, "pl050", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + qdev_init_gpio_in_named(DEVICE(obj), pl050_set_irq, "ps2-input-irq", 1); } @@ -224,6 +262,8 @@ static const TypeInfo pl050_type_info = { .parent = TYPE_SYS_BUS_DEVICE, .instance_init = pl050_init, .instance_size = sizeof(PL050State), + .class_init = pl050_class_init, + .class_size = sizeof(PL050DeviceClass), .abstract = true, .class_init = pl050_class_init, }; diff --git a/hw/input/ps2.c b/hw/input/ps2.c index 59bac28ac8..05cf7111e3 100644 --- a/hw/input/ps2.c +++ b/hw/input/ps2.c @@ -1224,19 +1224,6 @@ static void ps2_kbd_realize(DeviceState *dev, Error **errp) qemu_input_handler_register(dev, &ps2_keyboard_handler); } -void *ps2_kbd_init(void) -{ - DeviceState *dev; - PS2KbdState *s; - - dev = qdev_new(TYPE_PS2_KBD_DEVICE); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - s = PS2_KBD_DEVICE(dev); - - trace_ps2_kbd_init(s); - return s; -} - static QemuInputHandler ps2_mouse_handler = { .name = "QEMU PS/2 Mouse", .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL, @@ -1249,19 +1236,6 @@ static void ps2_mouse_realize(DeviceState *dev, Error **errp) qemu_input_handler_register(dev, &ps2_mouse_handler); } -void *ps2_mouse_init(void) -{ - DeviceState *dev; - PS2MouseState *s; - - dev = qdev_new(TYPE_PS2_MOUSE_DEVICE); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - s = PS2_MOUSE_DEVICE(dev); - - trace_ps2_mouse_init(s); - return s; -} - static void ps2_kbd_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/input/trace-events b/hw/input/trace-events index e0bfe7f3ee..29001a827d 100644 --- a/hw/input/trace-events +++ b/hw/input/trace-events @@ -41,8 +41,6 @@ ps2_mouse_fake_event(void *opaque) "%p" ps2_write_mouse(void *opaque, int val) "%p val %d" ps2_kbd_reset(void *opaque) "%p" ps2_mouse_reset(void *opaque) "%p" -ps2_kbd_init(void *s) "%p" -ps2_mouse_init(void *s) "%p" # hid.c hid_kbd_queue_full(void) "queue full" diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 13df002ce4..1f7763964c 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -2389,8 +2389,15 @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr, startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */ for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { + /* + * Note that if the input line is still held high and the interrupt + * is not active then rule R_CVJS requires that the Pending state + * remains set; in that case we mustn't let it be cleared. + */ if (value & (1 << i) && - (attrs.secure || s->itns[startvec + i])) { + (attrs.secure || s->itns[startvec + i]) && + !(setval == 0 && s->vectors[startvec + i].level && + !s->vectors[startvec + i].active)) { s->vectors[startvec + i].pending = setval; } } diff --git a/hw/intc/loongarch_pch_pic.c b/hw/intc/loongarch_pch_pic.c index 3c9814a3b4..3380b09807 100644 --- a/hw/intc/loongarch_pch_pic.c +++ b/hw/intc/loongarch_pch_pic.c @@ -15,21 +15,21 @@ static void pch_pic_update_irq(LoongArchPCHPIC *s, uint64_t mask, int level) { - unsigned long val; + uint64_t val; int irq; if (level) { val = mask & s->intirr & ~s->int_mask; if (val) { - irq = find_first_bit(&val, 64); - s->intisr |= 0x1ULL << irq; + irq = ctz64(val); + s->intisr |= MAKE_64BIT_MASK(irq, 1); qemu_set_irq(s->parent_irq[s->htmsi_vector[irq]], 1); } } else { val = mask & s->intisr; if (val) { - irq = find_first_bit(&val, 64); - s->intisr &= ~(0x1ULL << irq); + irq = ctz64(val); + s->intisr &= ~MAKE_64BIT_MASK(irq, 1); qemu_set_irq(s->parent_irq[s->htmsi_vector[irq]], 0); } } diff --git a/hw/intc/xics.c b/hw/intc/xics.c index 24e67020db..5b0b4d9624 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -301,23 +301,25 @@ void icp_reset(ICPState *icp) static void icp_realize(DeviceState *dev, Error **errp) { ICPState *icp = ICP(dev); + PowerPCCPU *cpu; CPUPPCState *env; Error *err = NULL; assert(icp->xics); assert(icp->cs); - env = &POWERPC_CPU(icp->cs)->env; + cpu = POWERPC_CPU(icp->cs); + env = &cpu->env; switch (PPC_INPUT(env)) { case PPC_FLAGS_INPUT_POWER7: - icp->output = env->irq_inputs[POWER7_INPUT_INT]; + icp->output = qdev_get_gpio_in(DEVICE(cpu), POWER7_INPUT_INT); break; case PPC_FLAGS_INPUT_POWER9: /* For SPAPR xics emulation */ - icp->output = env->irq_inputs[POWER9_INPUT_INT]; + icp->output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_INT); break; case PPC_FLAGS_INPUT_970: - icp->output = env->irq_inputs[PPC970_INPUT_INT]; + icp->output = qdev_get_gpio_in(DEVICE(cpu), PPC970_INPUT_INT); break; default: diff --git a/hw/intc/xive.c b/hw/intc/xive.c index ae221fed73..a986b96843 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -695,8 +695,8 @@ static void xive_tctx_realize(DeviceState *dev, Error **errp) env = &cpu->env; switch (PPC_INPUT(env)) { case PPC_FLAGS_INPUT_POWER9: - tctx->hv_output = env->irq_inputs[POWER9_INPUT_HINT]; - tctx->os_output = env->irq_inputs[POWER9_INPUT_INT]; + tctx->hv_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_HINT); + tctx->os_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_INT); break; default: diff --git a/hw/loongarch/Kconfig b/hw/loongarch/Kconfig index 35b6680772..a99aa387c3 100644 --- a/hw/loongarch/Kconfig +++ b/hw/loongarch/Kconfig @@ -14,3 +14,6 @@ config LOONGARCH_VIRT select LOONGARCH_PCH_MSI select LOONGARCH_EXTIOI select LS7A_RTC + select SMBIOS + select ACPI_PCI + select ACPI_HW_REDUCED diff --git a/hw/loongarch/acpi-build.c b/hw/loongarch/acpi-build.c new file mode 100644 index 0000000000..b95b83b079 --- /dev/null +++ b/hw/loongarch/acpi-build.c @@ -0,0 +1,609 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Support for generating ACPI tables and passing them to Guests + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/bitmap.h" +#include "hw/pci/pci.h" +#include "hw/core/cpu.h" +#include "target/loongarch/cpu.h" +#include "hw/acpi/acpi-defs.h" +#include "hw/acpi/acpi.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/acpi/bios-linker-loader.h" +#include "migration/vmstate.h" +#include "hw/mem/memory-device.h" +#include "sysemu/reset.h" + +/* Supported chipsets: */ +#include "hw/pci-host/ls7a.h" +#include "hw/loongarch/virt.h" +#include "hw/acpi/aml-build.h" + +#include "hw/acpi/utils.h" +#include "hw/acpi/pci.h" + +#include "qom/qom-qobject.h" + +#include "hw/acpi/generic_event_device.h" + +#define ACPI_BUILD_ALIGN_SIZE 0x1000 +#define ACPI_BUILD_TABLE_SIZE 0x20000 + +#ifdef DEBUG_ACPI_BUILD +#define ACPI_BUILD_DPRINTF(fmt, ...) \ + do {printf("ACPI_BUILD: " fmt, ## __VA_ARGS__); } while (0) +#else +#define ACPI_BUILD_DPRINTF(fmt, ...) +#endif + +/* build FADT */ +static void init_common_fadt_data(AcpiFadtData *data) +{ + AcpiFadtData fadt = { + /* ACPI 5.0: 4.1 Hardware-Reduced ACPI */ + .rev = 5, + .flags = ((1 << ACPI_FADT_F_HW_REDUCED_ACPI) | + (1 << ACPI_FADT_F_RESET_REG_SUP)), + + /* ACPI 5.0: 4.8.3.7 Sleep Control and Status Registers */ + .sleep_ctl = { + .space_id = AML_AS_SYSTEM_MEMORY, + .bit_width = 8, + .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_SLEEP_CTL, + }, + .sleep_sts = { + .space_id = AML_AS_SYSTEM_MEMORY, + .bit_width = 8, + .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_SLEEP_STS, + }, + + /* ACPI 5.0: 4.8.3.6 Reset Register */ + .reset_reg = { + .space_id = AML_AS_SYSTEM_MEMORY, + .bit_width = 8, + .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_RESET, + }, + .reset_val = ACPI_GED_RESET_VALUE, + }; + *data = fadt; +} + +static void acpi_align_size(GArray *blob, unsigned align) +{ + /* + * Align size to multiple of given size. This reduces the chance + * we need to change size in the future (breaking cross version migration). + */ + g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); +} + +/* build FACS */ +static void +build_facs(GArray *table_data) +{ + const char *sig = "FACS"; + const uint8_t reserved[40] = {}; + + g_array_append_vals(table_data, sig, 4); /* Signature */ + build_append_int_noprefix(table_data, 64, 4); /* Length */ + build_append_int_noprefix(table_data, 0, 4); /* Hardware Signature */ + build_append_int_noprefix(table_data, 0, 4); /* Firmware Waking Vector */ + build_append_int_noprefix(table_data, 0, 4); /* Global Lock */ + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + g_array_append_vals(table_data, reserved, 40); /* Reserved */ +} + +/* build MADT */ +static void +build_madt(GArray *table_data, BIOSLinker *linker, LoongArchMachineState *lams) +{ + MachineState *ms = MACHINE(lams); + int i; + AcpiTable table = { .sig = "APIC", .rev = 1, .oem_id = lams->oem_id, + .oem_table_id = lams->oem_table_id }; + + acpi_table_begin(&table, table_data); + + /* Local APIC Address */ + build_append_int_noprefix(table_data, 0, 4); + build_append_int_noprefix(table_data, 1 /* PCAT_COMPAT */, 4); /* Flags */ + + for (i = 0; i < ms->smp.cpus; i++) { + /* Processor Core Interrupt Controller Structure */ + build_append_int_noprefix(table_data, 17, 1); /* Type */ + build_append_int_noprefix(table_data, 15, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, i + 1, 4); /* ACPI Processor ID */ + build_append_int_noprefix(table_data, i, 4); /* Core ID */ + build_append_int_noprefix(table_data, 1, 4); /* Flags */ + } + + /* Extend I/O Interrupt Controller Structure */ + build_append_int_noprefix(table_data, 20, 1); /* Type */ + build_append_int_noprefix(table_data, 13, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, 3, 1); /* Cascade */ + build_append_int_noprefix(table_data, 0, 1); /* Node */ + build_append_int_noprefix(table_data, 0xffff, 8); /* Node map */ + + /* MSI Interrupt Controller Structure */ + build_append_int_noprefix(table_data, 21, 1); /* Type */ + build_append_int_noprefix(table_data, 19, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, LS7A_PCH_MSI_ADDR_LOW, 8);/* Address */ + build_append_int_noprefix(table_data, 0x40, 4); /* Start */ + build_append_int_noprefix(table_data, 0xc0, 4); /* Count */ + + /* Bridge I/O Interrupt Controller Structure */ + build_append_int_noprefix(table_data, 22, 1); /* Type */ + build_append_int_noprefix(table_data, 17, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, LS7A_PCH_REG_BASE, 8);/* Address */ + build_append_int_noprefix(table_data, 0x1000, 2); /* Size */ + build_append_int_noprefix(table_data, 0, 2); /* Id */ + build_append_int_noprefix(table_data, 0x40, 2); /* Base */ + + acpi_table_end(linker, &table); +} + +/* build SRAT */ +static void +build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) +{ + uint64_t i; + LoongArchMachineState *lams = LOONGARCH_MACHINE(machine); + MachineState *ms = MACHINE(lams); + AcpiTable table = { .sig = "SRAT", .rev = 1, .oem_id = lams->oem_id, + .oem_table_id = lams->oem_table_id }; + + acpi_table_begin(&table, table_data); + build_append_int_noprefix(table_data, 1, 4); /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); /* Reserved */ + + for (i = 0; i < ms->smp.cpus; ++i) { + /* Processor Local APIC/SAPIC Affinity Structure */ + build_append_int_noprefix(table_data, 0, 1); /* Type */ + build_append_int_noprefix(table_data, 16, 1); /* Length */ + /* Proximity Domain [7:0] */ + build_append_int_noprefix(table_data, 0, 1); + build_append_int_noprefix(table_data, i, 1); /* APIC ID */ + /* Flags, Table 5-36 */ + build_append_int_noprefix(table_data, 1, 4); + build_append_int_noprefix(table_data, 0, 1); /* Local SAPIC EID */ + /* Proximity Domain [31:8] */ + build_append_int_noprefix(table_data, 0, 3); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + } + + build_srat_memory(table_data, VIRT_LOWMEM_BASE, VIRT_LOWMEM_SIZE, + 0, MEM_AFFINITY_ENABLED); + + build_srat_memory(table_data, VIRT_HIGHMEM_BASE, machine->ram_size - VIRT_LOWMEM_SIZE, + 0, MEM_AFFINITY_ENABLED); + + acpi_table_end(linker, &table); +} + +typedef +struct AcpiBuildState { + /* Copy of table in RAM (for patching). */ + MemoryRegion *table_mr; + /* Is table patched? */ + uint8_t patched; + void *rsdp; + MemoryRegion *rsdp_mr; + MemoryRegion *linker_mr; +} AcpiBuildState; + +static void build_gpex_pci0_int(Aml *table) +{ + Aml *sb_scope = aml_scope("_SB"); + Aml *pci0_scope = aml_scope("PCI0"); + Aml *prt_pkg = aml_varpackage(128); + int slot, pin; + + for (slot = 0; slot < PCI_SLOT_MAX; slot++) { + for (pin = 0; pin < PCI_NUM_PINS; pin++) { + Aml *pkg = aml_package(4); + aml_append(pkg, aml_int((slot << 16) | 0xFFFF)); + aml_append(pkg, aml_int(pin)); + aml_append(pkg, aml_int(0)); + aml_append(pkg, aml_int(80 + (slot + pin) % 4)); + aml_append(prt_pkg, pkg); + } + } + aml_append(pci0_scope, aml_name_decl("_PRT", prt_pkg)); + aml_append(sb_scope, pci0_scope); + aml_append(table, sb_scope); +} + +static void build_dbg_aml(Aml *table) +{ + Aml *field; + Aml *method; + Aml *while_ctx; + Aml *scope = aml_scope("\\"); + Aml *buf = aml_local(0); + Aml *len = aml_local(1); + Aml *idx = aml_local(2); + + aml_append(scope, + aml_operation_region("DBG", AML_SYSTEM_IO, aml_int(0x0402), 0x01)); + field = aml_field("DBG", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); + aml_append(field, aml_named_field("DBGB", 8)); + aml_append(scope, field); + + method = aml_method("DBUG", 1, AML_NOTSERIALIZED); + + aml_append(method, aml_to_hexstring(aml_arg(0), buf)); + aml_append(method, aml_to_buffer(buf, buf)); + aml_append(method, aml_subtract(aml_sizeof(buf), aml_int(1), len)); + aml_append(method, aml_store(aml_int(0), idx)); + + while_ctx = aml_while(aml_lless(idx, len)); + aml_append(while_ctx, + aml_store(aml_derefof(aml_index(buf, idx)), aml_name("DBGB"))); + aml_append(while_ctx, aml_increment(idx)); + aml_append(method, while_ctx); + aml_append(method, aml_store(aml_int(0x0A), aml_name("DBGB"))); + aml_append(scope, method); + aml_append(table, scope); +} + +static Aml *build_osc_method(void) +{ + Aml *if_ctx; + Aml *if_ctx2; + Aml *else_ctx; + Aml *method; + Aml *a_cwd1 = aml_name("CDW1"); + Aml *a_ctrl = aml_local(0); + + method = aml_method("_OSC", 4, AML_NOTSERIALIZED); + aml_append(method, aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1")); + + if_ctx = aml_if(aml_equal( + aml_arg(0), aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766"))); + aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2")); + aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3")); + aml_append(if_ctx, aml_store(aml_name("CDW3"), a_ctrl)); + + /* + * Always allow native PME, AER (no dependencies) + * Allow SHPC (PCI bridges can have SHPC controller) + */ + aml_append(if_ctx, aml_and(a_ctrl, aml_int(0x1F), a_ctrl)); + + if_ctx2 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(1)))); + /* Unknown revision */ + aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x08), a_cwd1)); + aml_append(if_ctx, if_ctx2); + + if_ctx2 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), a_ctrl))); + /* Capabilities bits were masked */ + aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x10), a_cwd1)); + aml_append(if_ctx, if_ctx2); + + /* Update DWORD3 in the buffer */ + aml_append(if_ctx, aml_store(a_ctrl, aml_name("CDW3"))); + aml_append(method, if_ctx); + + else_ctx = aml_else(); + /* Unrecognized UUID */ + aml_append(else_ctx, aml_or(a_cwd1, aml_int(4), a_cwd1)); + aml_append(method, else_ctx); + + aml_append(method, aml_return(aml_arg(3))); + return method; +} + +static void build_uart_device_aml(Aml *table) +{ + Aml *dev; + Aml *crs; + Aml *pkg0, *pkg1, *pkg2; + uint32_t uart_irq = LS7A_UART_IRQ; + + Aml *scope = aml_scope("_SB"); + dev = aml_device("COMA"); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + crs = aml_resource_template(); + aml_append(crs, + aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, + AML_NON_CACHEABLE, AML_READ_WRITE, + 0, 0x1FE001E0, 0x1FE001E7, 0, 0x8)); + aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_SHARED, &uart_irq, 1)); + aml_append(dev, aml_name_decl("_CRS", crs)); + pkg0 = aml_package(0x2); + aml_append(pkg0, aml_int(0x05F5E100)); + aml_append(pkg0, aml_string("clock-frenquency")); + pkg1 = aml_package(0x1); + aml_append(pkg1, pkg0); + pkg2 = aml_package(0x2); + aml_append(pkg2, aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301")); + aml_append(pkg2, pkg1); + aml_append(dev, aml_name_decl("_DSD", pkg2)); + aml_append(scope, dev); + aml_append(table, scope); +} + +/* build DSDT */ +static void +build_dsdt(GArray *table_data, BIOSLinker *linker, MachineState *machine) +{ + Aml *dsdt, *sb_scope, *scope, *dev, *crs, *pkg; + int root_bus_limit = 0x7F; + LoongArchMachineState *lams = LOONGARCH_MACHINE(machine); + AcpiTable table = { .sig = "DSDT", .rev = 1, .oem_id = lams->oem_id, + .oem_table_id = lams->oem_table_id }; + + acpi_table_begin(&table, table_data); + + dsdt = init_aml_allocator(); + + build_dbg_aml(dsdt); + + sb_scope = aml_scope("_SB"); + dev = aml_device("PCI0"); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); + aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03"))); + aml_append(dev, aml_name_decl("_ADR", aml_int(0))); + aml_append(dev, aml_name_decl("_BBN", aml_int(0))); + aml_append(dev, aml_name_decl("_UID", aml_int(1))); + aml_append(dev, build_osc_method()); + aml_append(sb_scope, dev); + aml_append(dsdt, sb_scope); + + build_gpex_pci0_int(dsdt); + build_uart_device_aml(dsdt); + if (lams->acpi_ged) { + build_ged_aml(dsdt, "\\_SB."GED_DEVICE, + HOTPLUG_HANDLER(lams->acpi_ged), + LS7A_SCI_IRQ - PCH_PIC_IRQ_OFFSET, AML_SYSTEM_MEMORY, + VIRT_GED_EVT_ADDR); + } + + scope = aml_scope("\\_SB.PCI0"); + /* Build PCI0._CRS */ + crs = aml_resource_template(); + aml_append(crs, + aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, + 0x0000, 0x0, root_bus_limit, + 0x0000, root_bus_limit + 1)); + aml_append(crs, + aml_dword_io(AML_MIN_FIXED, AML_MAX_FIXED, + AML_POS_DECODE, AML_ENTIRE_RANGE, + 0x0000, 0x0000, 0xFFFF, 0x18000000, 0x10000)); + aml_append(crs, + aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, + AML_CACHEABLE, AML_READ_WRITE, + 0, LS7A_PCI_MEM_BASE, + LS7A_PCI_MEM_BASE + LS7A_PCI_MEM_SIZE - 1, + 0, LS7A_PCI_MEM_BASE)); + aml_append(scope, aml_name_decl("_CRS", crs)); + aml_append(dsdt, scope); + + /* System State Package */ + scope = aml_scope("\\"); + pkg = aml_package(4); + aml_append(pkg, aml_int(ACPI_GED_SLP_TYP_S5)); + aml_append(pkg, aml_int(0)); /* ignored */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(scope, aml_name_decl("_S5", pkg)); + aml_append(dsdt, scope); + /* Copy AML table into ACPI tables blob and patch header there */ + g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); + acpi_table_end(linker, &table); + free_aml_allocator(); +} + +static void acpi_build(AcpiBuildTables *tables, MachineState *machine) +{ + LoongArchMachineState *lams = LOONGARCH_MACHINE(machine); + GArray *table_offsets; + AcpiFadtData fadt_data; + unsigned facs, rsdt, fadt, dsdt; + uint8_t *u; + size_t aml_len = 0; + GArray *tables_blob = tables->table_data; + + init_common_fadt_data(&fadt_data); + + table_offsets = g_array_new(false, true, sizeof(uint32_t)); + ACPI_BUILD_DPRINTF("init ACPI tables\n"); + + bios_linker_loader_alloc(tables->linker, + ACPI_BUILD_TABLE_FILE, tables_blob, + 64, false); + + /* + * FACS is pointed to by FADT. + * We place it first since it's the only table that has alignment + * requirements. + */ + facs = tables_blob->len; + build_facs(tables_blob); + + /* DSDT is pointed to by FADT */ + dsdt = tables_blob->len; + build_dsdt(tables_blob, tables->linker, machine); + + /* + * Count the size of the DSDT, we will need it for + * legacy sizing of ACPI tables. + */ + aml_len += tables_blob->len - dsdt; + + /* ACPI tables pointed to by RSDT */ + fadt = tables_blob->len; + acpi_add_table(table_offsets, tables_blob); + fadt_data.facs_tbl_offset = &facs; + fadt_data.dsdt_tbl_offset = &dsdt; + fadt_data.xdsdt_tbl_offset = &dsdt; + build_fadt(tables_blob, tables->linker, &fadt_data, + lams->oem_id, lams->oem_table_id); + aml_len += tables_blob->len - fadt; + + acpi_add_table(table_offsets, tables_blob); + build_madt(tables_blob, tables->linker, lams); + + acpi_add_table(table_offsets, tables_blob); + build_srat(tables_blob, tables->linker, machine); + + acpi_add_table(table_offsets, tables_blob); + { + AcpiMcfgInfo mcfg = { + .base = cpu_to_le64(LS_PCIECFG_BASE), + .size = cpu_to_le64(LS_PCIECFG_SIZE), + }; + build_mcfg(tables_blob, tables->linker, &mcfg, lams->oem_id, + lams->oem_table_id); + } + + /* Add tables supplied by user (if any) */ + for (u = acpi_table_first(); u; u = acpi_table_next(u)) { + unsigned len = acpi_table_len(u); + + acpi_add_table(table_offsets, tables_blob); + g_array_append_vals(tables_blob, u, len); + } + + /* RSDT is pointed to by RSDP */ + rsdt = tables_blob->len; + build_rsdt(tables_blob, tables->linker, table_offsets, + lams->oem_id, lams->oem_table_id); + + /* RSDP is in FSEG memory, so allocate it separately */ + { + AcpiRsdpData rsdp_data = { + .revision = 0, + .oem_id = lams->oem_id, + .xsdt_tbl_offset = NULL, + .rsdt_tbl_offset = &rsdt, + }; + build_rsdp(tables->rsdp, tables->linker, &rsdp_data); + } + + /* + * The align size is 128, warn if 64k is not enough therefore + * the align size could be resized. + */ + if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) { + warn_report("ACPI table size %u exceeds %d bytes," + " migration may not work", + tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2); + error_printf("Try removing CPUs, NUMA nodes, memory slots" + " or PCI bridges."); + } + + acpi_align_size(tables->linker->cmd_blob, ACPI_BUILD_ALIGN_SIZE); + + /* Cleanup memory that's no longer used. */ + g_array_free(table_offsets, true); +} + +static void acpi_ram_update(MemoryRegion *mr, GArray *data) +{ + uint32_t size = acpi_data_len(data); + + /* + * Make sure RAM size is correct - in case it got changed + * e.g. by migration + */ + memory_region_ram_resize(mr, size, &error_abort); + + memcpy(memory_region_get_ram_ptr(mr), data->data, size); + memory_region_set_dirty(mr, 0, size); +} + +static void acpi_build_update(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + AcpiBuildTables tables; + + /* No state to update or already patched? Nothing to do. */ + if (!build_state || build_state->patched) { + return; + } + build_state->patched = 1; + + acpi_build_tables_init(&tables); + + acpi_build(&tables, MACHINE(qdev_get_machine())); + + acpi_ram_update(build_state->table_mr, tables.table_data); + acpi_ram_update(build_state->rsdp_mr, tables.rsdp); + acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob); + + acpi_build_tables_cleanup(&tables, true); +} + +static void acpi_build_reset(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + build_state->patched = 0; +} + +static const VMStateDescription vmstate_acpi_build = { + .name = "acpi_build", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(patched, AcpiBuildState), + VMSTATE_END_OF_LIST() + }, +}; + +void loongarch_acpi_setup(LoongArchMachineState *lams) +{ + AcpiBuildTables tables; + AcpiBuildState *build_state; + + if (!lams->fw_cfg) { + ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n"); + return; + } + + if (!loongarch_is_acpi_enabled(lams)) { + ACPI_BUILD_DPRINTF("ACPI disabled. Bailing out.\n"); + return; + } + + build_state = g_malloc0(sizeof *build_state); + + acpi_build_tables_init(&tables); + acpi_build(&tables, MACHINE(lams)); + + /* Now expose it all to Guest */ + build_state->table_mr = acpi_add_rom_blob(acpi_build_update, + build_state, tables.table_data, + ACPI_BUILD_TABLE_FILE); + assert(build_state->table_mr != NULL); + + build_state->linker_mr = + acpi_add_rom_blob(acpi_build_update, build_state, + tables.linker->cmd_blob, ACPI_BUILD_LOADER_FILE); + + build_state->rsdp_mr = acpi_add_rom_blob(acpi_build_update, + build_state, tables.rsdp, + ACPI_BUILD_RSDP_FILE); + + qemu_register_reset(acpi_build_reset, build_state); + acpi_build_reset(build_state); + vmstate_register(NULL, 0, &vmstate_acpi_build, build_state); + + /* + * Cleanup tables but don't free the memory: we track it + * in build_state. + */ + acpi_build_tables_cleanup(&tables, false); +} diff --git a/hw/loongarch/fw_cfg.c b/hw/loongarch/fw_cfg.c new file mode 100644 index 0000000000..f6503d5607 --- /dev/null +++ b/hw/loongarch/fw_cfg.c @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU fw_cfg helpers (LoongArch specific) + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "hw/loongarch/fw_cfg.h" +#include "hw/loongarch/virt.h" +#include "hw/nvram/fw_cfg.h" +#include "sysemu/sysemu.h" + +static void fw_cfg_boot_set(void *opaque, const char *boot_device, + Error **errp) +{ + fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); +} + +FWCfgState *loongarch_fw_cfg_init(ram_addr_t ram_size, MachineState *ms) +{ + FWCfgState *fw_cfg; + int max_cpus = ms->smp.max_cpus; + int smp_cpus = ms->smp.cpus; + + fw_cfg = fw_cfg_init_mem_wide(VIRT_FWCFG_BASE + 8, VIRT_FWCFG_BASE, 8, 0, NULL); + fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus); + + qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); + return fw_cfg; +} diff --git a/hw/loongarch/fw_cfg.h b/hw/loongarch/fw_cfg.h new file mode 100644 index 0000000000..7c0de4db4a --- /dev/null +++ b/hw/loongarch/fw_cfg.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU fw_cfg helpers (LoongArch specific) + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#ifndef HW_LOONGARCH_FW_CFG_H +#define HW_LOONGARCH_FW_CFG_H + +#include "hw/boards.h" +#include "hw/nvram/fw_cfg.h" + +FWCfgState *loongarch_fw_cfg_init(ram_addr_t ram_size, MachineState *ms); +#endif diff --git a/hw/loongarch/loongson3.c b/hw/loongarch/loongson3.c index 15fddfc4f5..a08dc9d299 100644 --- a/hw/loongarch/loongson3.c +++ b/hw/loongarch/loongson3.c @@ -28,13 +28,201 @@ #include "hw/pci-host/ls7a.h" #include "hw/pci-host/gpex.h" #include "hw/misc/unimp.h" - +#include "hw/loongarch/fw_cfg.h" #include "target/loongarch/cpu.h" +#include "hw/firmware/smbios.h" +#include "hw/acpi/aml-build.h" +#include "qapi/qapi-visit-common.h" +#include "hw/acpi/generic_event_device.h" +#include "hw/mem/nvdimm.h" +#include "sysemu/device_tree.h" +#include <libfdt.h> + +static void create_fdt(LoongArchMachineState *lams) +{ + MachineState *ms = MACHINE(lams); + + ms->fdt = create_device_tree(&lams->fdt_size); + if (!ms->fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + + /* Header */ + qemu_fdt_setprop_string(ms->fdt, "/", "compatible", + "linux,dummy-loongson3"); + qemu_fdt_setprop_cell(ms->fdt, "/", "#address-cells", 0x2); + qemu_fdt_setprop_cell(ms->fdt, "/", "#size-cells", 0x2); +} + +static void fdt_add_cpu_nodes(const LoongArchMachineState *lams) +{ + int num; + const MachineState *ms = MACHINE(lams); + int smp_cpus = ms->smp.cpus; + + qemu_fdt_add_subnode(ms->fdt, "/cpus"); + qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", 0x1); + qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0); + + /* cpu nodes */ + for (num = smp_cpus - 1; num >= 0; num--) { + char *nodename = g_strdup_printf("/cpus/cpu@%d", num); + LoongArchCPU *cpu = LOONGARCH_CPU(qemu_get_cpu(num)); + + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "cpu"); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", + cpu->dtb_compatible); + qemu_fdt_setprop_cell(ms->fdt, nodename, "reg", num); + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", + qemu_fdt_alloc_phandle(ms->fdt)); + g_free(nodename); + } + + /*cpu map */ + qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map"); + + for (num = smp_cpus - 1; num >= 0; num--) { + char *cpu_path = g_strdup_printf("/cpus/cpu@%d", num); + char *map_path; + + if (ms->smp.threads > 1) { + map_path = g_strdup_printf( + "/cpus/cpu-map/socket%d/core%d/thread%d", + num / (ms->smp.cores * ms->smp.threads), + (num / ms->smp.threads) % ms->smp.cores, + num % ms->smp.threads); + } else { + map_path = g_strdup_printf( + "/cpus/cpu-map/socket%d/core%d", + num / ms->smp.cores, + num % ms->smp.cores); + } + qemu_fdt_add_path(ms->fdt, map_path); + qemu_fdt_setprop_phandle(ms->fdt, map_path, "cpu", cpu_path); + + g_free(map_path); + g_free(cpu_path); + } +} + +static void fdt_add_fw_cfg_node(const LoongArchMachineState *lams) +{ + char *nodename; + hwaddr base = VIRT_FWCFG_BASE; + const MachineState *ms = MACHINE(lams); + + nodename = g_strdup_printf("/fw_cfg@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, + "compatible", "qemu,fw-cfg-mmio"); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, base, 2, 0x8); + qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); + g_free(nodename); +} + +static void fdt_add_pcie_node(const LoongArchMachineState *lams) +{ + char *nodename; + hwaddr base_mmio = LS7A_PCI_MEM_BASE; + hwaddr size_mmio = LS7A_PCI_MEM_SIZE; + hwaddr base_pio = LS7A_PCI_IO_BASE; + hwaddr size_pio = LS7A_PCI_IO_SIZE; + hwaddr base_pcie = LS_PCIECFG_BASE; + hwaddr size_pcie = LS_PCIECFG_SIZE; + hwaddr base = base_pcie; + + const MachineState *ms = MACHINE(lams); + + nodename = g_strdup_printf("/pcie@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, + "compatible", "pci-host-ecam-generic"); + qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "pci"); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 3); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 2); + qemu_fdt_setprop_cell(ms->fdt, nodename, "linux,pci-domain", 0); + qemu_fdt_setprop_cells(ms->fdt, nodename, "bus-range", 0, + PCIE_MMCFG_BUS(LS_PCIECFG_SIZE - 1)); + qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, base_pcie, 2, size_pcie); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "ranges", + 1, FDT_PCI_RANGE_IOPORT, 2, LS7A_PCI_IO_OFFSET, + 2, base_pio, 2, size_pio, + 1, FDT_PCI_RANGE_MMIO, 2, base_mmio, + 2, base_mmio, 2, size_mmio); + g_free(nodename); + qemu_fdt_dumpdtb(ms->fdt, lams->fdt_size); +} + #define PM_BASE 0x10080000 #define PM_SIZE 0x100 #define PM_CTRL 0x10 +static void virt_build_smbios(LoongArchMachineState *lams) +{ + MachineState *ms = MACHINE(lams); + MachineClass *mc = MACHINE_GET_CLASS(lams); + uint8_t *smbios_tables, *smbios_anchor; + size_t smbios_tables_len, smbios_anchor_len; + const char *product = "QEMU Virtual Machine"; + + if (!lams->fw_cfg) { + return; + } + + smbios_set_defaults("QEMU", product, mc->name, false, + true, SMBIOS_ENTRY_POINT_TYPE_64); + + smbios_get_tables(ms, NULL, 0, &smbios_tables, &smbios_tables_len, + &smbios_anchor, &smbios_anchor_len, &error_fatal); + + if (smbios_anchor) { + fw_cfg_add_file(lams->fw_cfg, "etc/smbios/smbios-tables", + smbios_tables, smbios_tables_len); + fw_cfg_add_file(lams->fw_cfg, "etc/smbios/smbios-anchor", + smbios_anchor, smbios_anchor_len); + } +} + +static void virt_machine_done(Notifier *notifier, void *data) +{ + LoongArchMachineState *lams = container_of(notifier, + LoongArchMachineState, machine_done); + virt_build_smbios(lams); + loongarch_acpi_setup(lams); +} + +struct memmap_entry { + uint64_t address; + uint64_t length; + uint32_t type; + uint32_t reserved; +}; + +static struct memmap_entry *memmap_table; +static unsigned memmap_entries; + +static void memmap_add_entry(uint64_t address, uint64_t length, uint32_t type) +{ + /* Ensure there are no duplicate entries. */ + for (unsigned i = 0; i < memmap_entries; i++) { + assert(memmap_table[i].address != address); + } + + memmap_table = g_renew(struct memmap_entry, memmap_table, + memmap_entries + 1); + memmap_table[memmap_entries].address = cpu_to_le64(address); + memmap_table[memmap_entries].length = cpu_to_le64(length); + memmap_table[memmap_entries].type = cpu_to_le32(type); + memmap_table[memmap_entries].reserved = 0; + memmap_entries++; +} + /* * This is a placeholder for missing ACPI, * and will eventually be replaced. @@ -76,6 +264,8 @@ static const MemoryRegionOps loongarch_virt_pm_ops = { static struct _loaderparams { uint64_t ram_size; const char *kernel_filename; + const char *kernel_cmdline; + const char *initrd_filename; } loaderparams; static uint64_t cpu_loongarch_virt_to_phys(void *opaque, uint64_t addr) @@ -103,7 +293,32 @@ static int64_t load_kernel_info(void) return kernel_entry; } -static void loongarch_devices_init(DeviceState *pch_pic) +static DeviceState *create_acpi_ged(DeviceState *pch_pic, LoongArchMachineState *lams) +{ + DeviceState *dev; + MachineState *ms = MACHINE(lams); + uint32_t event = ACPI_GED_PWR_DOWN_EVT; + + if (ms->ram_slots) { + event |= ACPI_GED_MEM_HOTPLUG_EVT; + } + dev = qdev_new(TYPE_ACPI_GED); + qdev_prop_set_uint32(dev, "ged-event", event); + + /* ged event */ + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, VIRT_GED_EVT_ADDR); + /* memory hotplug */ + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, VIRT_GED_MEM_ADDR); + /* ged regs used for reset and power down */ + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, VIRT_GED_REG_ADDR); + + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, + qdev_get_gpio_in(pch_pic, LS7A_SCI_IRQ - PCH_PIC_IRQ_OFFSET)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + return dev; +} + +static void loongarch_devices_init(DeviceState *pch_pic, LoongArchMachineState *lams) { DeviceState *gpex_dev; SysBusDevice *d; @@ -179,6 +394,8 @@ static void loongarch_devices_init(DeviceState *pch_pic) memory_region_init_io(pm_mem, NULL, &loongarch_virt_pm_ops, NULL, "loongarch_virt_pm", PM_SIZE); memory_region_add_subregion(get_system_memory(), PM_BASE, pm_mem); + /* acpi ged */ + lams->acpi_ged = create_acpi_ged(pch_pic, lams); } static void loongarch_irq_init(LoongArchMachineState *lams) @@ -280,7 +497,38 @@ static void loongarch_irq_init(LoongArchMachineState *lams) qdev_get_gpio_in(extioi, i + PCH_MSI_IRQ_START)); } - loongarch_devices_init(pch_pic); + loongarch_devices_init(pch_pic, lams); +} + +static void loongarch_firmware_init(LoongArchMachineState *lams) +{ + char *filename = MACHINE(lams)->firmware; + char *bios_name = NULL; + int bios_size; + + lams->bios_loaded = false; + if (filename) { + bios_name = qemu_find_file(QEMU_FILE_TYPE_BIOS, filename); + if (!bios_name) { + error_report("Could not find ROM image '%s'", filename); + exit(1); + } + + bios_size = load_image_targphys(bios_name, VIRT_BIOS_BASE, VIRT_BIOS_SIZE); + if (bios_size < 0) { + error_report("Could not load ROM image '%s'", bios_name); + exit(1); + } + + g_free(bios_name); + + memory_region_init_ram(&lams->bios, NULL, "loongarch.bios", + VIRT_BIOS_SIZE, &error_fatal); + memory_region_set_readonly(&lams->bios, true); + memory_region_add_subregion(get_system_memory(), VIRT_BIOS_BASE, &lams->bios); + lams->bios_loaded = true; + } + } static void reset_load_elf(void *opaque) @@ -294,18 +542,97 @@ static void reset_load_elf(void *opaque) } } +/* Load an image file into an fw_cfg entry identified by key. */ +static void load_image_to_fw_cfg(FWCfgState *fw_cfg, uint16_t size_key, + uint16_t data_key, const char *image_name, + bool try_decompress) +{ + size_t size = -1; + uint8_t *data; + + if (image_name == NULL) { + return; + } + + if (try_decompress) { + size = load_image_gzipped_buffer(image_name, + LOAD_IMAGE_MAX_GUNZIP_BYTES, &data); + } + + if (size == (size_t)-1) { + gchar *contents; + gsize length; + + if (!g_file_get_contents(image_name, &contents, &length, NULL)) { + error_report("failed to load \"%s\"", image_name); + exit(1); + } + size = length; + data = (uint8_t *)contents; + } + + fw_cfg_add_i32(fw_cfg, size_key, size); + fw_cfg_add_bytes(fw_cfg, data_key, data, size); +} + +static void fw_cfg_add_kernel_info(FWCfgState *fw_cfg) +{ + /* + * Expose the kernel, the command line, and the initrd in fw_cfg. + * We don't process them here at all, it's all left to the + * firmware. + */ + load_image_to_fw_cfg(fw_cfg, + FW_CFG_KERNEL_SIZE, FW_CFG_KERNEL_DATA, + loaderparams.kernel_filename, + false); + + if (loaderparams.initrd_filename) { + load_image_to_fw_cfg(fw_cfg, + FW_CFG_INITRD_SIZE, FW_CFG_INITRD_DATA, + loaderparams.initrd_filename, false); + } + + if (loaderparams.kernel_cmdline) { + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, + strlen(loaderparams.kernel_cmdline) + 1); + fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, + loaderparams.kernel_cmdline); + } +} + +static void loongarch_firmware_boot(LoongArchMachineState *lams) +{ + fw_cfg_add_kernel_info(lams->fw_cfg); +} + +static void loongarch_direct_kernel_boot(LoongArchMachineState *lams) +{ + MachineState *machine = MACHINE(lams); + int64_t kernel_addr = 0; + LoongArchCPU *lacpu; + int i; + + kernel_addr = load_kernel_info(); + if (!machine->firmware) { + for (i = 0; i < machine->smp.cpus; i++) { + lacpu = LOONGARCH_CPU(qemu_get_cpu(i)); + lacpu->env.load_elf = true; + lacpu->env.elf_address = kernel_addr; + } + } +} + static void loongarch_init(MachineState *machine) { + LoongArchCPU *lacpu; const char *cpu_model = machine->cpu_type; - const char *kernel_filename = machine->kernel_filename; ram_addr_t offset = 0; ram_addr_t ram_size = machine->ram_size; uint64_t highram_size = 0; MemoryRegion *address_space_mem = get_system_memory(); LoongArchMachineState *lams = LOONGARCH_MACHINE(machine); - LoongArchCPU *lacpu; int i; - int64_t kernel_addr = 0; if (!cpu_model) { cpu_model = LOONGARCH_CPU_TYPE_NAME("la464"); @@ -320,41 +647,102 @@ static void loongarch_init(MachineState *machine) error_report("ram_size must be greater than 1G."); exit(1); } - + create_fdt(lams); /* Init CPUs */ for (i = 0; i < machine->smp.cpus; i++) { cpu_create(machine->cpu_type); } - + fdt_add_cpu_nodes(lams); /* Add memory region */ memory_region_init_alias(&lams->lowmem, NULL, "loongarch.lowram", machine->ram, 0, 256 * MiB); memory_region_add_subregion(address_space_mem, offset, &lams->lowmem); offset += 256 * MiB; + memmap_add_entry(0, 256 * MiB, 1); highram_size = ram_size - 256 * MiB; memory_region_init_alias(&lams->highmem, NULL, "loongarch.highmem", machine->ram, offset, highram_size); memory_region_add_subregion(address_space_mem, 0x90000000, &lams->highmem); + memmap_add_entry(0x90000000, highram_size, 1); /* Add isa io region */ memory_region_init_alias(&lams->isa_io, NULL, "isa-io", get_system_io(), 0, LOONGARCH_ISA_IO_SIZE); memory_region_add_subregion(address_space_mem, LOONGARCH_ISA_IO_BASE, &lams->isa_io); - if (kernel_filename) { - loaderparams.ram_size = ram_size; - loaderparams.kernel_filename = kernel_filename; - kernel_addr = load_kernel_info(); - if (!machine->firmware) { - for (i = 0; i < machine->smp.cpus; i++) { - lacpu = LOONGARCH_CPU(qemu_get_cpu(i)); - lacpu->env.load_elf = true; - lacpu->env.elf_address = kernel_addr; - qemu_register_reset(reset_load_elf, lacpu); - } + /* load the BIOS image. */ + loongarch_firmware_init(lams); + + /* fw_cfg init */ + lams->fw_cfg = loongarch_fw_cfg_init(ram_size, machine); + rom_set_fw(lams->fw_cfg); + if (lams->fw_cfg != NULL) { + fw_cfg_add_file(lams->fw_cfg, "etc/memmap", + memmap_table, + sizeof(struct memmap_entry) * (memmap_entries)); + } + fdt_add_fw_cfg_node(lams); + loaderparams.ram_size = ram_size; + loaderparams.kernel_filename = machine->kernel_filename; + loaderparams.kernel_cmdline = machine->kernel_cmdline; + loaderparams.initrd_filename = machine->initrd_filename; + /* load the kernel. */ + if (loaderparams.kernel_filename) { + if (lams->bios_loaded) { + loongarch_firmware_boot(lams); + } else { + loongarch_direct_kernel_boot(lams); } } + /* register reset function */ + for (i = 0; i < machine->smp.cpus; i++) { + lacpu = LOONGARCH_CPU(qemu_get_cpu(i)); + qemu_register_reset(reset_load_elf, lacpu); + } /* Initialize the IO interrupt subsystem */ loongarch_irq_init(lams); + lams->machine_done.notify = virt_machine_done; + qemu_add_machine_init_done_notifier(&lams->machine_done); + fdt_add_pcie_node(lams); + + /* load fdt */ + MemoryRegion *fdt_rom = g_new(MemoryRegion, 1); + memory_region_init_rom(fdt_rom, NULL, "fdt", LA_FDT_SIZE, &error_fatal); + memory_region_add_subregion(get_system_memory(), LA_FDT_BASE, fdt_rom); + rom_add_blob_fixed("fdt", machine->fdt, lams->fdt_size, LA_FDT_BASE); +} + +bool loongarch_is_acpi_enabled(LoongArchMachineState *lams) +{ + if (lams->acpi == ON_OFF_AUTO_OFF) { + return false; + } + return true; +} + +static void loongarch_get_acpi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LoongArchMachineState *lams = LOONGARCH_MACHINE(obj); + OnOffAuto acpi = lams->acpi; + + visit_type_OnOffAuto(v, name, &acpi, errp); +} + +static void loongarch_set_acpi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LoongArchMachineState *lams = LOONGARCH_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &lams->acpi, errp); +} + +static void loongarch_machine_initfn(Object *obj) +{ + LoongArchMachineState *lams = LOONGARCH_MACHINE(obj); + + lams->acpi = ON_OFF_AUTO_AUTO; + lams->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); + lams->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); } static void loongarch_class_init(ObjectClass *oc, void *data) @@ -372,6 +760,12 @@ static void loongarch_class_init(ObjectClass *oc, void *data) mc->block_default_type = IF_VIRTIO; mc->default_boot_order = "c"; mc->no_cdrom = 1; + + object_class_property_add(oc, "acpi", "OnOffAuto", + loongarch_get_acpi, loongarch_set_acpi, + NULL, NULL); + object_class_property_set_description(oc, "acpi", + "Enable ACPI"); } static const TypeInfo loongarch_machine_types[] = { @@ -380,6 +774,7 @@ static const TypeInfo loongarch_machine_types[] = { .parent = TYPE_MACHINE, .instance_size = sizeof(LoongArchMachineState), .class_init = loongarch_class_init, + .instance_init = loongarch_machine_initfn, } }; diff --git a/hw/loongarch/meson.build b/hw/loongarch/meson.build index cecb1a5d65..6a2a1b18e5 100644 --- a/hw/loongarch/meson.build +++ b/hw/loongarch/meson.build @@ -1,4 +1,8 @@ loongarch_ss = ss.source_set() -loongarch_ss.add(when: 'CONFIG_LOONGARCH_VIRT', if_true: files('loongson3.c')) +loongarch_ss.add(files( + 'fw_cfg.c', +)) +loongarch_ss.add(when: 'CONFIG_LOONGARCH_VIRT', if_true: [files('loongson3.c'), fdt]) +loongarch_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-build.c')) hw_arch += {'loongarch': loongarch_ss} diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c index 1eb8bd5018..6aefe9a61b 100644 --- a/hw/mips/jazz.c +++ b/hw/mips/jazz.c @@ -361,9 +361,16 @@ static void mips_jazz_init(MachineState *machine, memory_region_add_subregion(address_space, 0x80004000, rtc); /* Keyboard (i8042) */ - i8042 = i8042_mm_init(qdev_get_gpio_in(rc4030, 6), - qdev_get_gpio_in(rc4030, 7), - 0x1000, 0x1); + i8042 = I8042_MMIO(qdev_new(TYPE_I8042_MMIO)); + qdev_prop_set_uint64(DEVICE(i8042), "mask", 1); + qdev_prop_set_uint32(DEVICE(i8042), "size", 0x1000); + sysbus_realize_and_unref(SYS_BUS_DEVICE(i8042), &error_fatal); + + qdev_connect_gpio_out(DEVICE(i8042), I8042_KBD_IRQ, + qdev_get_gpio_in(rc4030, 6)); + qdev_connect_gpio_out(DEVICE(i8042), I8042_MOUSE_IRQ, + qdev_get_gpio_in(rc4030, 7)); + memory_region_add_subregion(address_space, 0x80005000, sysbus_mmio_get_region(SYS_BUS_DEVICE(i8042), 0)); diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 7ad948ee7c..dd0d056fde 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -49,7 +49,6 @@ #define VIRTIO_NET_VM_VERSION 11 -#define MAC_TABLE_ENTRIES 64 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ /* previously fixed value */ @@ -1434,57 +1433,71 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, return VIRTIO_NET_OK; } -static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev, + const struct iovec *in_sg, unsigned in_num, + const struct iovec *out_sg, + unsigned out_num) { VirtIONet *n = VIRTIO_NET(vdev); struct virtio_net_ctrl_hdr ctrl; virtio_net_ctrl_ack status = VIRTIO_NET_ERR; - VirtQueueElement *elem; size_t s; struct iovec *iov, *iov2; - unsigned int iov_cnt; + + if (iov_size(in_sg, in_num) < sizeof(status) || + iov_size(out_sg, out_num) < sizeof(ctrl)) { + virtio_error(vdev, "virtio-net ctrl missing headers"); + return 0; + } + + iov2 = iov = g_memdup2(out_sg, sizeof(struct iovec) * out_num); + s = iov_to_buf(iov, out_num, 0, &ctrl, sizeof(ctrl)); + iov_discard_front(&iov, &out_num, sizeof(ctrl)); + if (s != sizeof(ctrl)) { + status = VIRTIO_NET_ERR; + } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { + status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { + status = virtio_net_handle_mac(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { + status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { + status = virtio_net_handle_announce(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { + status = virtio_net_handle_mq(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { + status = virtio_net_handle_offloads(n, ctrl.cmd, iov, out_num); + } + + s = iov_from_buf(in_sg, in_num, 0, &status, sizeof(status)); + assert(s == sizeof(status)); + + g_free(iov2); + return sizeof(status); +} + +static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtQueueElement *elem; for (;;) { + size_t written; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { break; } - if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) || - iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) { - virtio_error(vdev, "virtio-net ctrl missing headers"); + + written = virtio_net_handle_ctrl_iov(vdev, elem->in_sg, elem->in_num, + elem->out_sg, elem->out_num); + if (written > 0) { + virtqueue_push(vq, elem, written); + virtio_notify(vdev, vq); + g_free(elem); + } else { virtqueue_detach_element(vq, elem, 0); g_free(elem); break; } - - iov_cnt = elem->out_num; - iov2 = iov = g_memdup2(elem->out_sg, - sizeof(struct iovec) * elem->out_num); - s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); - iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); - if (s != sizeof(ctrl)) { - status = VIRTIO_NET_ERR; - } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { - status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { - status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { - status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { - status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { - status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { - status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); - } - - s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status)); - assert(s == sizeof(status)); - - virtqueue_push(vq, elem, sizeof(status)); - virtio_notify(vdev, vq); - g_free(iov2); - g_free(elem); } } diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index 7f7f5b3452..32495d0123 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -17,6 +17,7 @@ #include "qemu/osdep.h" #include "qemu/datadir.h" #include "qemu/units.h" +#include "qemu/guest-random.h" #include "qapi/error.h" #include "e500.h" #include "e500-ccsr.h" @@ -346,6 +347,7 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms, }; const char *dtb_file = machine->dtb; const char *toplevel_compat = machine->dt_compatible; + uint8_t rng_seed[32]; if (dtb_file) { char *filename; @@ -403,6 +405,9 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms, if (ret < 0) fprintf(stderr, "couldn't set /chosen/bootargs\n"); + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + qemu_fdt_setprop(fdt, "/chosen", "rng-seed", rng_seed, sizeof(rng_seed)); + if (kvm_enabled()) { /* Read out host's frequencies */ clock_freq = kvmppc_get_clockfreq(); @@ -861,7 +866,6 @@ void ppce500_init(MachineState *machine) for (i = 0; i < smp_cpus; i++) { PowerPCCPU *cpu; CPUState *cs; - qemu_irq *input; cpu = POWERPC_CPU(object_new(machine->cpu_type)); env = &cpu->env; @@ -885,9 +889,10 @@ void ppce500_init(MachineState *machine) firstenv = env; } - input = (qemu_irq *)env->irq_inputs; - irqs[i].irq[OPENPIC_OUTPUT_INT] = input[PPCE500_INPUT_INT]; - irqs[i].irq[OPENPIC_OUTPUT_CINT] = input[PPCE500_INPUT_CINT]; + irqs[i].irq[OPENPIC_OUTPUT_INT] = + qdev_get_gpio_in(DEVICE(cpu), PPCE500_INPUT_INT); + irqs[i].irq[OPENPIC_OUTPUT_CINT] = + qdev_get_gpio_in(DEVICE(cpu), PPCE500_INPUT_CINT); env->spr_cb[SPR_BOOKE_PIR].default_value = cs->cpu_index = i; env->mpic_iack = pmc->ccsrbar_base + MPC8544_MPIC_REGS_OFFSET + 0xa0; diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c index c865921bdc..cf7eb72391 100644 --- a/hw/ppc/mac_newworld.c +++ b/hw/ppc/mac_newworld.c @@ -262,30 +262,30 @@ static void ppc_core99_init(MachineState *machine) switch (PPC_INPUT(env)) { case PPC_FLAGS_INPUT_6xx: openpic_irqs[i].irq[OPENPIC_OUTPUT_INT] = - ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; + qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_INT); openpic_irqs[i].irq[OPENPIC_OUTPUT_CINT] = - ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; + qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_INT); openpic_irqs[i].irq[OPENPIC_OUTPUT_MCK] = - ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_MCP]; + qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_MCP); /* Not connected ? */ openpic_irqs[i].irq[OPENPIC_OUTPUT_DEBUG] = NULL; /* Check this */ openpic_irqs[i].irq[OPENPIC_OUTPUT_RESET] = - ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_HRESET]; + qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_HRESET); break; #if defined(TARGET_PPC64) case PPC_FLAGS_INPUT_970: openpic_irqs[i].irq[OPENPIC_OUTPUT_INT] = - ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT]; + qdev_get_gpio_in(DEVICE(cpu), PPC970_INPUT_INT); openpic_irqs[i].irq[OPENPIC_OUTPUT_CINT] = - ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT]; + qdev_get_gpio_in(DEVICE(cpu), PPC970_INPUT_INT); openpic_irqs[i].irq[OPENPIC_OUTPUT_MCK] = - ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_MCP]; + qdev_get_gpio_in(DEVICE(cpu), PPC970_INPUT_MCP); /* Not connected ? */ openpic_irqs[i].irq[OPENPIC_OUTPUT_DEBUG] = NULL; /* Check this */ openpic_irqs[i].irq[OPENPIC_OUTPUT_RESET] = - ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_HRESET]; + qdev_get_gpio_in(DEVICE(cpu), PPC970_INPUT_HRESET); break; #endif /* defined(TARGET_PPC64) */ default: diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c index d62fdf0db3..03732ca7ed 100644 --- a/hw/ppc/mac_oldworld.c +++ b/hw/ppc/mac_oldworld.c @@ -271,7 +271,7 @@ static void ppc_heathrow_init(MachineState *machine) case PPC_FLAGS_INPUT_6xx: /* XXX: we register only 1 output pin for heathrow PIC */ qdev_connect_gpio_out(pic_dev, 0, - ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]); + qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_INT)); break; default: error_report("Bus model not supported on OldWorld Mac machine"); diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c index 9411ca6b16..61f4263953 100644 --- a/hw/ppc/pegasos2.c +++ b/hw/ppc/pegasos2.c @@ -155,7 +155,7 @@ static void pegasos2_init(MachineState *machine) /* Marvell Discovery II system controller */ pm->mv = DEVICE(sysbus_create_simple(TYPE_MV64361, -1, - ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT])); + qdev_get_gpio_in(DEVICE(pm->cpu), PPC6xx_INPUT_INT))); pci_bus = mv64361_get_pci_bus(pm->mv, 1); /* VIA VT8231 South Bridge (multifunction PCI device) */ diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index fea70df45e..690f448cb9 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -154,10 +154,7 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) void ppc6xx_irq_init(PowerPCCPU *cpu) { - CPUPPCState *env = &cpu->env; - - env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu, - PPC6xx_INPUT_NB); + qdev_init_gpio_in(DEVICE(cpu), ppc6xx_set_irq, PPC6xx_INPUT_NB); } #if defined(TARGET_PPC64) @@ -234,10 +231,7 @@ static void ppc970_set_irq(void *opaque, int pin, int level) void ppc970_irq_init(PowerPCCPU *cpu) { - CPUPPCState *env = &cpu->env; - - env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu, - PPC970_INPUT_NB); + qdev_init_gpio_in(DEVICE(cpu), ppc970_set_irq, PPC970_INPUT_NB); } /* POWER7 internal IRQ controller */ @@ -260,10 +254,7 @@ static void power7_set_irq(void *opaque, int pin, int level) void ppcPOWER7_irq_init(PowerPCCPU *cpu) { - CPUPPCState *env = &cpu->env; - - env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu, - POWER7_INPUT_NB); + qdev_init_gpio_in(DEVICE(cpu), power7_set_irq, POWER7_INPUT_NB); } /* POWER9 internal IRQ controller */ @@ -292,10 +283,7 @@ static void power9_set_irq(void *opaque, int pin, int level) void ppcPOWER9_irq_init(PowerPCCPU *cpu) { - CPUPPCState *env = &cpu->env; - - env->irq_inputs = (void **)qemu_allocate_irqs(&power9_set_irq, cpu, - POWER9_INPUT_NB); + qdev_init_gpio_in(DEVICE(cpu), power9_set_irq, POWER9_INPUT_NB); } #endif /* defined(TARGET_PPC64) */ @@ -431,10 +419,7 @@ static void ppc40x_set_irq(void *opaque, int pin, int level) void ppc40x_irq_init(PowerPCCPU *cpu) { - CPUPPCState *env = &cpu->env; - - env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq, - cpu, PPC40x_INPUT_NB); + qdev_init_gpio_in(DEVICE(cpu), ppc40x_set_irq, PPC40x_INPUT_NB); } /* PowerPC E500 internal IRQ controller */ @@ -489,10 +474,7 @@ static void ppce500_set_irq(void *opaque, int pin, int level) void ppce500_irq_init(PowerPCCPU *cpu) { - CPUPPCState *env = &cpu->env; - - env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq, - cpu, PPCE500_INPUT_NB); + qdev_init_gpio_in(DEVICE(cpu), ppce500_set_irq, PPCE500_INPUT_NB); } /* Enable or Disable the E500 EPR capability */ diff --git a/hw/ppc/ppc405_uc.c b/hw/ppc/ppc405_uc.c index 36c8ba6f3c..d6420c88d3 100644 --- a/hw/ppc/ppc405_uc.c +++ b/hw/ppc/ppc405_uc.c @@ -1470,9 +1470,9 @@ PowerPCCPU *ppc405ep_init(MemoryRegion *address_space_mem, sysbus_realize_and_unref(uicsbd, &error_fatal); sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_INT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]); + qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_INT)); sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_CINT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]); + qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_CINT)); *uicdevp = uicdev; diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c index d5973f2484..873f930c77 100644 --- a/hw/ppc/ppc440_bamboo.c +++ b/hw/ppc/ppc440_bamboo.c @@ -200,9 +200,9 @@ static void bamboo_init(MachineState *machine) sysbus_realize_and_unref(uicsbd, &error_fatal); sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_INT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]); + qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_INT)); sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_CINT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]); + qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_CINT)); /* SDRAM controller */ memset(ram_bases, 0, sizeof(ram_bases)); diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c index a1cd4505cc..f08714f2ec 100644 --- a/hw/ppc/prep.c +++ b/hw/ppc/prep.c @@ -275,7 +275,7 @@ static void ibm_40p_init(MachineState *machine) /* PCI -> ISA bridge */ i82378_dev = DEVICE(pci_create_simple(pci_bus, PCI_DEVFN(11, 0), "i82378")); qdev_connect_gpio_out(i82378_dev, 0, - cpu->env.irq_inputs[PPC6xx_INPUT_INT]); + qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_INT)); sysbus_connect_irq(pcihost, 0, qdev_get_gpio_in(i82378_dev, 15)); isa_bus = ISA_BUS(qdev_get_child_bus(i82378_dev, "isa.0")); diff --git a/hw/ppc/prep_systemio.c b/hw/ppc/prep_systemio.c index 8c9b8dd67b..5a56f155f5 100644 --- a/hw/ppc/prep_systemio.c +++ b/hw/ppc/prep_systemio.c @@ -262,7 +262,7 @@ static void prep_systemio_realize(DeviceState *dev, Error **errp) qemu_set_irq(s->non_contiguous_io_map_irq, s->iomap_type & PORT0850_IOMAP_NONCONTIGUOUS); cpu = POWERPC_CPU(first_cpu); - s->softreset_irq = cpu->env.irq_inputs[PPC6xx_INPUT_HRESET]; + s->softreset_irq = qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_HRESET); isa_register_portio_list(isa, &s->portio, 0x0, ppc_io800_port_list, s, "systemio800"); diff --git a/hw/ppc/sam460ex.c b/hw/ppc/sam460ex.c index 2f24598f55..7e8da657c2 100644 --- a/hw/ppc/sam460ex.c +++ b/hw/ppc/sam460ex.c @@ -334,9 +334,9 @@ static void sam460ex_init(MachineState *machine) if (i == 0) { sysbus_connect_irq(sbd, PPCUIC_OUTPUT_INT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]); + qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_INT)); sysbus_connect_irq(sbd, PPCUIC_OUTPUT_CINT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]); + qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_CINT)); } else { sysbus_connect_irq(sbd, PPCUIC_OUTPUT_INT, qdev_get_gpio_in(uic[0], input_ints[i])); diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 9a5382d527..bc9ba6e6dc 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -27,6 +27,7 @@ #include "qemu/osdep.h" #include "qemu/datadir.h" #include "qemu/memalign.h" +#include "qemu/guest-random.h" #include "qapi/error.h" #include "qapi/qapi-events-machine.h" #include "qapi/qapi-events-qdev.h" @@ -1014,6 +1015,7 @@ static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset) { MachineState *machine = MACHINE(spapr); SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); + uint8_t rng_seed[32]; int chosen; _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen")); @@ -1091,6 +1093,9 @@ static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset) spapr_dt_ov5_platform_support(spapr, fdt, chosen); } + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + _FDT(fdt_setprop(fdt, chosen, "rng-seed", rng_seed, sizeof(rng_seed))); + _FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5")); } @@ -1331,6 +1336,11 @@ static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu, patb = spapr->nested_ptcr & PTCR_PATB; pats = spapr->nested_ptcr & PTCR_PATS; + /* Check if partition table is properly aligned */ + if (patb & MAKE_64BIT_MASK(0, pats + 12)) { + return false; + } + /* Calculate number of entries */ pats = 1ull << (pats + 12 - 4); if (pats <= lpid) { diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index d761a7d0c3..a8d4a6bcf0 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -920,6 +920,7 @@ static target_ulong h_register_process_table(PowerPCCPU *cpu, target_ulong page_size = args[2]; target_ulong table_size = args[3]; target_ulong update_lpcr = 0; + target_ulong table_byte_size; uint64_t cproc; if (flags & ~FLAGS_MASK) { /* Check no reserved bits are set */ @@ -927,6 +928,14 @@ static target_ulong h_register_process_table(PowerPCCPU *cpu, } if (flags & FLAG_MODIFY) { if (flags & FLAG_REGISTER) { + /* Check process table alignment */ + table_byte_size = 1ULL << (table_size + 12); + if (proc_tbl & (table_byte_size - 1)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: process table not properly aligned: proc_tbl 0x" + TARGET_FMT_lx" proc_tbl_size 0x"TARGET_FMT_lx"\n", + __func__, proc_tbl, table_byte_size); + } if (flags & FLAG_RADIX) { /* Register new RADIX process table */ if (proc_tbl & 0xfff || proc_tbl >> 60) { return H_P2; diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c index b67a709ddc..53b126ff48 100644 --- a/hw/ppc/virtex_ml507.c +++ b/hw/ppc/virtex_ml507.c @@ -111,9 +111,9 @@ static PowerPCCPU *ppc440_init_xilinx(const char *cpu_type, uint32_t sysclk) sysbus_realize_and_unref(uicsbd, &error_fatal); sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_INT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]); + qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_INT)); sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_CINT, - ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]); + qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_CINT)); /* This board doesn't wire anything up to the inputs of the UIC. */ return cpu; @@ -213,7 +213,7 @@ static void virtex_init(MachineState *machine) CPUPPCState *env; hwaddr ram_base = 0; DriveInfo *dinfo; - qemu_irq irq[32], *cpu_irq; + qemu_irq irq[32], cpu_irq; int kernel_size; int i; @@ -236,12 +236,12 @@ static void virtex_init(MachineState *machine) dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, 64 * KiB, 1, 0x89, 0x18, 0x0000, 0x0, 1); - cpu_irq = (qemu_irq *) &env->irq_inputs[PPC40x_INPUT_INT]; + cpu_irq = qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_INT); dev = qdev_new("xlnx.xps-intc"); qdev_prop_set_uint32(dev, "kind-of-intr", 0); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irq[0]); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irq); for (i = 0; i < 32; i++) { irq[i] = qdev_get_gpio_in(dev, i); } diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c index 0cd0a5e540..296cc6c8e6 100644 --- a/hw/usb/hcd-xhci.c +++ b/hw/usb/hcd-xhci.c @@ -3269,7 +3269,8 @@ static void xhci_wakeup_endpoint(USBBus *bus, USBEndpoint *ep, DPRINTF("%s\n", __func__); slotid = ep->dev->addr; - if (slotid == 0 || !xhci->slots[slotid-1].enabled) { + if (slotid == 0 || slotid > xhci->numslots || + !xhci->slots[slotid - 1].enabled) { DPRINTF("%s: oops, no slot for dev %d\n", __func__, ep->dev->addr); return; } diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index 56c96ebd13..e4956728dd 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -122,17 +122,35 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq, return true; } -static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, - const struct iovec *iovec, size_t num, - bool more_descs, bool write) +/** + * Write descriptors to SVQ vring + * + * @svq: The shadow virtqueue + * @sg: Cache for hwaddr + * @iovec: The iovec from the guest + * @num: iovec length + * @more_descs: True if more descriptors come in the chain + * @write: True if they are writeable descriptors + * + * Return true if success, false otherwise and print error. + */ +static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, + const struct iovec *iovec, size_t num, + bool more_descs, bool write) { uint16_t i = svq->free_head, last = svq->free_head; unsigned n; uint16_t flags = write ? cpu_to_le16(VRING_DESC_F_WRITE) : 0; vring_desc_t *descs = svq->vring.desc; + bool ok; if (num == 0) { - return; + return true; + } + + ok = vhost_svq_translate_addr(svq, sg, iovec, num); + if (unlikely(!ok)) { + return false; } for (n = 0; n < num; n++) { @@ -150,40 +168,39 @@ static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, } svq->free_head = le16_to_cpu(svq->desc_next[last]); + return true; } static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, - VirtQueueElement *elem, unsigned *head) + const struct iovec *out_sg, size_t out_num, + const struct iovec *in_sg, size_t in_num, + unsigned *head) { unsigned avail_idx; vring_avail_t *avail = svq->vring.avail; bool ok; - g_autofree hwaddr *sgs = g_new(hwaddr, MAX(elem->out_num, elem->in_num)); + g_autofree hwaddr *sgs = g_new(hwaddr, MAX(out_num, in_num)); *head = svq->free_head; /* We need some descriptors here */ - if (unlikely(!elem->out_num && !elem->in_num)) { + if (unlikely(!out_num && !in_num)) { qemu_log_mask(LOG_GUEST_ERROR, "Guest provided element with no descriptors"); return false; } - ok = vhost_svq_translate_addr(svq, sgs, elem->out_sg, elem->out_num); + ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, in_num > 0, + false); if (unlikely(!ok)) { return false; } - vhost_vring_write_descs(svq, sgs, elem->out_sg, elem->out_num, - elem->in_num > 0, false); - - ok = vhost_svq_translate_addr(svq, sgs, elem->in_sg, elem->in_num); + ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, false, true); if (unlikely(!ok)) { return false; } - vhost_vring_write_descs(svq, sgs, elem->in_sg, elem->in_num, false, true); - /* * Put the entry in the available array (but don't update avail->idx until * they do sync). @@ -199,38 +216,58 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, return true; } +static void vhost_svq_kick(VhostShadowVirtqueue *svq) +{ + /* + * We need to expose the available array entries before checking the used + * flags + */ + smp_mb(); + if (svq->vring.used->flags & VRING_USED_F_NO_NOTIFY) { + return; + } + + event_notifier_set(&svq->hdev_kick); +} + /** * Add an element to a SVQ. * * The caller must check that there is enough slots for the new element. It - * takes ownership of the element: In case of failure, it is free and the SVQ - * is considered broken. + * takes ownership of the element: In case of failure not ENOSPC, it is free. + * + * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full */ -static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) +int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, + size_t out_num, const struct iovec *in_sg, size_t in_num, + VirtQueueElement *elem) { unsigned qemu_head; - bool ok = vhost_svq_add_split(svq, elem, &qemu_head); + unsigned ndescs = in_num + out_num; + bool ok; + + if (unlikely(ndescs > vhost_svq_available_slots(svq))) { + return -ENOSPC; + } + + ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head); if (unlikely(!ok)) { g_free(elem); - return false; + return -EINVAL; } - svq->ring_id_maps[qemu_head] = elem; - return true; + svq->desc_state[qemu_head].elem = elem; + svq->desc_state[qemu_head].ndescs = ndescs; + vhost_svq_kick(svq); + return 0; } -static void vhost_svq_kick(VhostShadowVirtqueue *svq) +/* Convenience wrapper to add a guest's element to SVQ */ +static int vhost_svq_add_element(VhostShadowVirtqueue *svq, + VirtQueueElement *elem) { - /* - * We need to expose the available array entries before checking the used - * flags - */ - smp_mb(); - if (svq->vring.used->flags & VRING_USED_F_NO_NOTIFY) { - return; - } - - event_notifier_set(&svq->hdev_kick); + return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->in_sg, + elem->in_num, elem); } /** @@ -257,7 +294,7 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) while (true) { VirtQueueElement *elem; - bool ok; + int r; if (svq->next_guest_avail_elem) { elem = g_steal_pointer(&svq->next_guest_avail_elem); @@ -269,28 +306,30 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) break; } - if (elem->out_num + elem->in_num > vhost_svq_available_slots(svq)) { - /* - * This condition is possible since a contiguous buffer in GPA - * does not imply a contiguous buffer in qemu's VA - * scatter-gather segments. If that happens, the buffer exposed - * to the device needs to be a chain of descriptors at this - * moment. - * - * SVQ cannot hold more available buffers if we are here: - * queue the current guest descriptor and ignore further kicks - * until some elements are used. - */ - svq->next_guest_avail_elem = elem; - return; + if (svq->ops) { + r = svq->ops->avail_handler(svq, elem, svq->ops_opaque); + } else { + r = vhost_svq_add_element(svq, elem); } - - ok = vhost_svq_add(svq, elem); - if (unlikely(!ok)) { - /* VQ is broken, just return and ignore any other kicks */ + if (unlikely(r != 0)) { + if (r == -ENOSPC) { + /* + * This condition is possible since a contiguous buffer in + * GPA does not imply a contiguous buffer in qemu's VA + * scatter-gather segments. If that happens, the buffer + * exposed to the device needs to be a chain of descriptors + * at this moment. + * + * SVQ cannot hold more available buffers if we are here: + * queue the current guest descriptor and ignore kicks + * until some elements are used. + */ + svq->next_guest_avail_elem = elem; + } + + /* VQ is full or broken, just return and ignore kicks */ return; } - vhost_svq_kick(svq); } virtio_queue_set_notification(svq->vq, true); @@ -311,11 +350,12 @@ static void vhost_handle_guest_kick_notifier(EventNotifier *n) static bool vhost_svq_more_used(VhostShadowVirtqueue *svq) { + uint16_t *used_idx = &svq->vring.used->idx; if (svq->last_used_idx != svq->shadow_used_idx) { return true; } - svq->shadow_used_idx = cpu_to_le16(svq->vring.used->idx); + svq->shadow_used_idx = cpu_to_le16(*(volatile uint16_t *)used_idx); return svq->last_used_idx != svq->shadow_used_idx; } @@ -376,21 +416,36 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, return NULL; } - if (unlikely(!svq->ring_id_maps[used_elem.id])) { + if (unlikely(!svq->desc_state[used_elem.id].elem)) { qemu_log_mask(LOG_GUEST_ERROR, "Device %s says index %u is used, but it was not available", svq->vdev->name, used_elem.id); return NULL; } - num = svq->ring_id_maps[used_elem.id]->in_num + - svq->ring_id_maps[used_elem.id]->out_num; + num = svq->desc_state[used_elem.id].ndescs; last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id); svq->desc_next[last_used_chain] = svq->free_head; svq->free_head = used_elem.id; *len = used_elem.len; - return g_steal_pointer(&svq->ring_id_maps[used_elem.id]); + return g_steal_pointer(&svq->desc_state[used_elem.id].elem); +} + +/** + * Push an element to SVQ, returning it to the guest. + */ +void vhost_svq_push_elem(VhostShadowVirtqueue *svq, + const VirtQueueElement *elem, uint32_t len) +{ + virtqueue_push(svq->vq, elem, len); + if (svq->next_guest_avail_elem) { + /* + * Avail ring was full when vhost_svq_flush was called, so it's a + * good moment to make more descriptors available if possible. + */ + vhost_handle_guest_kick(svq); + } } static void vhost_svq_flush(VhostShadowVirtqueue *svq, @@ -435,6 +490,33 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq, } /** + * Poll the SVQ for one device used buffer. + * + * This function race with main event loop SVQ polling, so extra + * synchronization is needed. + * + * Return the length written by the device. + */ +size_t vhost_svq_poll(VhostShadowVirtqueue *svq) +{ + int64_t start_us = g_get_monotonic_time(); + do { + uint32_t len; + VirtQueueElement *elem = vhost_svq_get_buf(svq, &len); + if (elem) { + return len; + } + + if (unlikely(g_get_monotonic_time() - start_us > 10e6)) { + return 0; + } + + /* Make sure we read new used_idx */ + smp_rmb(); + } while (true); +} + +/** * Forward used buffers. * * @n: hdev call event notifier, the one that device set to notify svq. @@ -560,7 +642,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, memset(svq->vring.desc, 0, driver_size); svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size); memset(svq->vring.used, 0, device_size); - svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num); + svq->desc_state = g_new0(SVQDescState, svq->vring.num); svq->desc_next = g_new0(uint16_t, svq->vring.num); for (unsigned i = 0; i < svq->vring.num - 1; i++) { svq->desc_next[i] = cpu_to_le16(i + 1); @@ -585,7 +667,7 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq) for (unsigned i = 0; i < svq->vring.num; ++i) { g_autofree VirtQueueElement *elem = NULL; - elem = g_steal_pointer(&svq->ring_id_maps[i]); + elem = g_steal_pointer(&svq->desc_state[i].elem); if (elem) { virtqueue_detach_element(svq->vq, elem, 0); } @@ -597,7 +679,7 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq) } svq->vq = NULL; g_free(svq->desc_next); - g_free(svq->ring_id_maps); + g_free(svq->desc_state); qemu_vfree(svq->vring.desc); qemu_vfree(svq->vring.used); } @@ -607,12 +689,16 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq) * shadow methods and file descriptors. * * @iova_tree: Tree to perform descriptors translations + * @ops: SVQ owner callbacks + * @ops_opaque: ops opaque pointer * * Returns the new virtqueue or NULL. * * In case of error, reason is reported through error_report. */ -VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree) +VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree, + const VhostShadowVirtqueueOps *ops, + void *ops_opaque) { g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1); int r; @@ -634,6 +720,8 @@ VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree) event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND); event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call); svq->iova_tree = iova_tree; + svq->ops = ops; + svq->ops_opaque = ops_opaque; return g_steal_pointer(&svq); err_init_hdev_call: diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h index c132c994e9..d04c34a589 100644 --- a/hw/virtio/vhost-shadow-virtqueue.h +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -15,6 +15,37 @@ #include "standard-headers/linux/vhost_types.h" #include "hw/virtio/vhost-iova-tree.h" +typedef struct SVQDescState { + VirtQueueElement *elem; + + /* + * Number of descriptors exposed to the device. May or may not match + * guest's + */ + unsigned int ndescs; +} SVQDescState; + +typedef struct VhostShadowVirtqueue VhostShadowVirtqueue; + +/** + * Callback to handle an avail buffer. + * + * @svq: Shadow virtqueue + * @elem: Element placed in the queue by the guest + * @vq_callback_opaque: Opaque + * + * Returns 0 if the vq is running as expected. + * + * Note that ownership of elem is transferred to the callback. + */ +typedef int (*VirtQueueAvailCallback)(VhostShadowVirtqueue *svq, + VirtQueueElement *elem, + void *vq_callback_opaque); + +typedef struct VhostShadowVirtqueueOps { + VirtQueueAvailCallback avail_handler; +} VhostShadowVirtqueueOps; + /* Shadow virtqueue to relay notifications */ typedef struct VhostShadowVirtqueue { /* Shadow vring */ @@ -47,8 +78,8 @@ typedef struct VhostShadowVirtqueue { /* IOVA mapping */ VhostIOVATree *iova_tree; - /* Map for use the guest's descriptors */ - VirtQueueElement **ring_id_maps; + /* SVQ vring descriptors state */ + SVQDescState *desc_state; /* Next VirtQueue element that guest made available */ VirtQueueElement *next_guest_avail_elem; @@ -59,6 +90,12 @@ typedef struct VhostShadowVirtqueue { */ uint16_t *desc_next; + /* Caller callbacks */ + const VhostShadowVirtqueueOps *ops; + + /* Caller callbacks opaque */ + void *ops_opaque; + /* Next head to expose to the device */ uint16_t shadow_avail_idx; @@ -74,6 +111,13 @@ typedef struct VhostShadowVirtqueue { bool vhost_svq_valid_features(uint64_t features, Error **errp); +void vhost_svq_push_elem(VhostShadowVirtqueue *svq, + const VirtQueueElement *elem, uint32_t len); +int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, + size_t out_num, const struct iovec *in_sg, size_t in_num, + VirtQueueElement *elem); +size_t vhost_svq_poll(VhostShadowVirtqueue *svq); + void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd); void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd); void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq, @@ -85,7 +129,9 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, VirtQueue *vq); void vhost_svq_stop(VhostShadowVirtqueue *svq); -VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree); +VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree, + const VhostShadowVirtqueueOps *ops, + void *ops_opaque); void vhost_svq_free(gpointer vq); G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostShadowVirtqueue, vhost_svq_free); diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 66f054a12c..291cd19054 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -20,6 +20,7 @@ #include "hw/virtio/vhost-shadow-virtqueue.h" #include "hw/virtio/vhost-vdpa.h" #include "exec/address-spaces.h" +#include "migration/blocker.h" #include "qemu/cutils.h" #include "qemu/main-loop.h" #include "cpu.h" @@ -71,8 +72,8 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, return false; } -static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, - void *vaddr, bool readonly) +int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, + void *vaddr, bool readonly) { struct vhost_msg_v2 msg = {}; int fd = v->device_fd; @@ -97,8 +98,7 @@ static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, return ret; } -static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, - hwaddr size) +int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size) { struct vhost_msg_v2 msg = {}; int fd = v->device_fd; @@ -418,8 +418,10 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v, shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free); for (unsigned n = 0; n < hdev->nvqs; ++n) { - g_autoptr(VhostShadowVirtqueue) svq = vhost_svq_new(v->iova_tree); + g_autoptr(VhostShadowVirtqueue) svq; + svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops, + v->shadow_vq_ops_opaque); if (unlikely(!svq)) { error_setg(errp, "Cannot create svq %u", n); return -1; @@ -1021,6 +1023,13 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev) return true; } + if (v->migration_blocker) { + int r = migrate_add_blocker(v->migration_blocker, &err); + if (unlikely(r < 0)) { + return false; + } + } + for (i = 0; i < v->shadow_vqs->len; ++i) { VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i); VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); @@ -1063,6 +1072,10 @@ err: vhost_svq_stop(svq); } + if (v->migration_blocker) { + migrate_del_blocker(v->migration_blocker); + } + return false; } @@ -1082,6 +1095,9 @@ static bool vhost_vdpa_svqs_stop(struct vhost_dev *dev) } } + if (v->migration_blocker) { + migrate_del_blocker(v->migration_blocker); + } return true; } diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index 5968551a05..2281be4e10 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -35,6 +35,7 @@ extern intptr_t qemu_host_page_mask; void qemu_init_cpu_list(void); void cpu_list_lock(void); void cpu_list_unlock(void); +unsigned int cpu_list_generation_id_get(void); void tcg_flush_softmmu_tlb(CPUState *cs); diff --git a/include/exec/memory.h b/include/exec/memory.h index a6a0f4d8ad..bfb1de8eea 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -69,7 +69,10 @@ static inline void fuzz_dma_read_cb(size_t addr, /* Dirty tracking enabled because measuring dirty rate */ #define GLOBAL_DIRTY_DIRTY_RATE (1U << 1) -#define GLOBAL_DIRTY_MASK (0x3) +/* Dirty tracking enabled because dirty limit */ +#define GLOBAL_DIRTY_LIMIT (1U << 2) + +#define GLOBAL_DIRTY_MASK (0x7) extern unsigned int global_dirty_tracking; diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h index d864879421..c9d25d493e 100644 --- a/include/hw/arm/bcm2835_peripherals.h +++ b/include/hw/arm/bcm2835_peripherals.h @@ -17,6 +17,7 @@ #include "hw/char/bcm2835_aux.h" #include "hw/display/bcm2835_fb.h" #include "hw/dma/bcm2835_dma.h" +#include "hw/or-irq.h" #include "hw/intc/bcm2835_ic.h" #include "hw/misc/bcm2835_property.h" #include "hw/misc/bcm2835_rng.h" @@ -55,6 +56,7 @@ struct BCM2835PeripheralState { BCM2835AuxState aux; BCM2835FBState fb; BCM2835DMAState dma; + qemu_or_irq orgated_dma_irq; BCM2835ICState ic; BCM2835PropertyState property; BCM2835RngState rng; diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 996f94059f..500503da13 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -418,6 +418,12 @@ struct CPUState { */ bool throttle_thread_scheduled; + /* + * Sleep throttle_us_per_full microseconds once dirty ring is full + * if dirty page rate limit is enabled. + */ + int64_t throttle_us_per_full; + bool ignore_memory_transaction_failures; /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */ diff --git a/include/hw/input/i8042.h b/include/hw/input/i8042.h index ca933d8e1b..9fb3f8d787 100644 --- a/include/hw/input/i8042.h +++ b/include/hw/input/i8042.h @@ -10,6 +10,7 @@ #include "hw/isa/isa.h" #include "hw/sysbus.h" +#include "hw/input/ps2.h" #include "qom/object.h" #define I8042_KBD_IRQ 0 @@ -30,8 +31,8 @@ typedef struct KBDState { uint8_t obdata; uint8_t cbdata; uint8_t pending_tmp; - void *kbd; - void *mouse; + PS2KbdState ps2kbd; + PS2MouseState ps2mouse; QEMUTimer *throttle_timer; qemu_irq irqs[2]; @@ -87,8 +88,6 @@ struct MMIOKBDState { #define I8042_A20_LINE "a20" -MMIOKBDState *i8042_mm_init(qemu_irq kbd_irq, qemu_irq mouse_irq, - ram_addr_t size, hwaddr mask); void i8042_isa_mouse_fake_event(ISAKBDState *isa); void i8042_setup_a20_line(ISADevice *dev, qemu_irq a20_out); diff --git a/include/hw/input/lasips2.h b/include/hw/input/lasips2.h index 03f0c9e9f9..01911c50f9 100644 --- a/include/hw/input/lasips2.h +++ b/include/hw/input/lasips2.h @@ -12,10 +12,8 @@ * + sysbus MMIO region 1: MemoryRegion defining the LASI PS2 mouse * registers * + sysbus IRQ 0: LASI PS2 output irq - * + Named GPIO input "ps2-kbd-input-irq": set to 1 if the downstream PS2 - * keyboard device has asserted its irq - * + Named GPIO input "ps2-mouse-input-irq": set to 1 if the downstream PS2 - * mouse device has asserted its irq + * + Named GPIO input "lasips2-port-input-irq[0..1]": set to 1 if the downstream + * LASIPS2Port has asserted its irq */ #ifndef HW_INPUT_LASIPS2_H @@ -23,31 +21,60 @@ #include "exec/hwaddr.h" #include "hw/sysbus.h" +#include "hw/input/ps2.h" -struct LASIPS2State; -typedef struct LASIPS2Port { - struct LASIPS2State *parent; +#define TYPE_LASIPS2_PORT "lasips2-port" +OBJECT_DECLARE_TYPE(LASIPS2Port, LASIPS2PortDeviceClass, LASIPS2_PORT) + +struct LASIPS2PortDeviceClass { + DeviceClass parent; + + DeviceRealize parent_realize; +}; + +typedef struct LASIPS2State LASIPS2State; + +struct LASIPS2Port { + DeviceState parent_obj; + + LASIPS2State *lasips2; MemoryRegion reg; - void *dev; + PS2State *ps2dev; uint8_t id; uint8_t control; uint8_t buf; bool loopback_rbne; - bool irq; -} LASIPS2Port; + qemu_irq irq; +}; + +#define TYPE_LASIPS2_KBD_PORT "lasips2-kbd-port" +OBJECT_DECLARE_SIMPLE_TYPE(LASIPS2KbdPort, LASIPS2_KBD_PORT) + +struct LASIPS2KbdPort { + LASIPS2Port parent_obj; + + PS2KbdState kbd; +}; + +#define TYPE_LASIPS2_MOUSE_PORT "lasips2-mouse-port" +OBJECT_DECLARE_SIMPLE_TYPE(LASIPS2MousePort, LASIPS2_MOUSE_PORT) + +struct LASIPS2MousePort { + LASIPS2Port parent_obj; + + PS2MouseState mouse; +}; struct LASIPS2State { SysBusDevice parent_obj; - hwaddr base; - LASIPS2Port kbd; - LASIPS2Port mouse; + LASIPS2KbdPort kbd_port; + LASIPS2MousePort mouse_port; + uint8_t int_status; qemu_irq irq; }; #define TYPE_LASIPS2 "lasips2" OBJECT_DECLARE_SIMPLE_TYPE(LASIPS2State, LASIPS2) -LASIPS2State *lasips2_initfn(hwaddr base, qemu_irq irq); - #endif /* HW_INPUT_LASIPS2_H */ diff --git a/include/hw/input/pl050.h b/include/hw/input/pl050.h new file mode 100644 index 0000000000..89ec4fafc9 --- /dev/null +++ b/include/hw/input/pl050.h @@ -0,0 +1,59 @@ +/* + * Arm PrimeCell PL050 Keyboard / Mouse Interface + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#ifndef HW_PL050_H +#define HW_PL050_H + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/input/ps2.h" +#include "hw/irq.h" + +struct PL050DeviceClass { + SysBusDeviceClass parent_class; + + DeviceRealize parent_realize; +}; + +#define TYPE_PL050 "pl050" +OBJECT_DECLARE_TYPE(PL050State, PL050DeviceClass, PL050) + +struct PL050State { + SysBusDevice parent_obj; + + MemoryRegion iomem; + PS2State *ps2dev; + uint32_t cr; + uint32_t clk; + uint32_t last; + int pending; + qemu_irq irq; + bool is_mouse; +}; + +#define TYPE_PL050_KBD_DEVICE "pl050_keyboard" +OBJECT_DECLARE_SIMPLE_TYPE(PL050KbdState, PL050_KBD_DEVICE) + +struct PL050KbdState { + PL050State parent_obj; + + PS2KbdState kbd; +}; + +#define TYPE_PL050_MOUSE_DEVICE "pl050_mouse" +OBJECT_DECLARE_SIMPLE_TYPE(PL050MouseState, PL050_MOUSE_DEVICE) + +struct PL050MouseState { + PL050State parent_obj; + + PS2MouseState mouse; +}; + +#endif diff --git a/include/hw/input/ps2.h b/include/hw/input/ps2.h index a78619d8cb..ff777582cd 100644 --- a/include/hw/input/ps2.h +++ b/include/hw/input/ps2.h @@ -98,8 +98,6 @@ struct PS2MouseState { OBJECT_DECLARE_SIMPLE_TYPE(PS2MouseState, PS2_MOUSE_DEVICE) /* ps2.c */ -void *ps2_kbd_init(void); -void *ps2_mouse_init(void); void ps2_write_mouse(PS2MouseState *s, int val); void ps2_write_keyboard(PS2KbdState *s, int val); uint32_t ps2_read_data(PS2State *s); diff --git a/include/hw/loongarch/virt.h b/include/hw/loongarch/virt.h index 09a816191c..f4f24df428 100644 --- a/include/hw/loongarch/virt.h +++ b/include/hw/loongarch/virt.h @@ -17,6 +17,19 @@ #define LOONGARCH_ISA_IO_BASE 0x18000000UL #define LOONGARCH_ISA_IO_SIZE 0x0004000 +#define VIRT_FWCFG_BASE 0x1e020000UL +#define VIRT_BIOS_BASE 0x1c000000UL +#define VIRT_BIOS_SIZE (4 * MiB) + +#define VIRT_LOWMEM_BASE 0 +#define VIRT_LOWMEM_SIZE 0x10000000 +#define VIRT_HIGHMEM_BASE 0x90000000 +#define VIRT_GED_EVT_ADDR 0x100e0000 +#define VIRT_GED_MEM_ADDR (VIRT_GED_EVT_ADDR + ACPI_GED_EVT_SEL_LEN) +#define VIRT_GED_REG_ADDR (VIRT_GED_MEM_ADDR + MEMORY_HOTPLUG_IO_LEN) + +#define LA_FDT_BASE 0x1c400000 +#define LA_FDT_SIZE 0x100000 struct LoongArchMachineState { /*< private >*/ @@ -26,8 +39,20 @@ struct LoongArchMachineState { MemoryRegion lowmem; MemoryRegion highmem; MemoryRegion isa_io; + MemoryRegion bios; + bool bios_loaded; + /* State for other subsystems/APIs: */ + FWCfgState *fw_cfg; + Notifier machine_done; + OnOffAuto acpi; + char *oem_id; + char *oem_table_id; + DeviceState *acpi_ged; + int fdt_size; }; #define TYPE_LOONGARCH_MACHINE MACHINE_TYPE_NAME("virt") OBJECT_DECLARE_SIMPLE_TYPE(LoongArchMachineState, LOONGARCH_MACHINE) +bool loongarch_is_acpi_enabled(LoongArchMachineState *lams); +void loongarch_acpi_setup(LoongArchMachineState *lams); #endif diff --git a/include/hw/pci-host/ls7a.h b/include/hw/pci-host/ls7a.h index 08c5f78be2..0fdc86b973 100644 --- a/include/hw/pci-host/ls7a.h +++ b/include/hw/pci-host/ls7a.h @@ -23,6 +23,9 @@ #define LS7A_PCI_IO_BASE 0x18004000UL #define LS7A_PCI_IO_SIZE 0xC000 +#define LS7A_PCI_MEM_BASE 0x40000000UL +#define LS7A_PCI_MEM_SIZE 0x40000000UL + #define LS7A_PCH_REG_BASE 0x10000000UL #define LS7A_IOAPIC_REG_BASE (LS7A_PCH_REG_BASE) #define LS7A_PCH_MSI_ADDR_LOW 0x2FF00000UL @@ -41,4 +44,5 @@ #define LS7A_MISC_REG_BASE (LS7A_PCH_REG_BASE + 0x00080000) #define LS7A_RTC_REG_BASE (LS7A_MISC_REG_BASE + 0x00050100) #define LS7A_RTC_LEN 0x100 +#define LS7A_SCI_IRQ (PCH_PIC_IRQ_OFFSET + 4) #endif diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h index a29dbb3f53..d10a89303e 100644 --- a/include/hw/virtio/vhost-vdpa.h +++ b/include/hw/virtio/vhost-vdpa.h @@ -15,6 +15,7 @@ #include <gmodule.h> #include "hw/virtio/vhost-iova-tree.h" +#include "hw/virtio/vhost-shadow-virtqueue.h" #include "hw/virtio/virtio.h" #include "standard-headers/linux/vhost_types.h" @@ -34,9 +35,16 @@ typedef struct vhost_vdpa { bool shadow_vqs_enabled; /* IOVA mapping used by the Shadow Virtqueue */ VhostIOVATree *iova_tree; + Error *migration_blocker; GPtrArray *shadow_vqs; + const VhostShadowVirtqueueOps *shadow_vq_ops; + void *shadow_vq_ops_opaque; struct vhost_dev *dev; VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX]; } VhostVDPA; +int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, + void *vaddr, bool readonly); +int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size); + #endif diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h index eb87032627..ef234ffe7e 100644 --- a/include/hw/virtio/virtio-net.h +++ b/include/hw/virtio/virtio-net.h @@ -35,6 +35,9 @@ OBJECT_DECLARE_SIMPLE_TYPE(VirtIONet, VIRTIO_NET) * and latency. */ #define TX_BURST 256 +/* Maximum VIRTIO_NET_CTRL_MAC_TABLE_SET unicast + multicast entries. */ +#define MAC_TABLE_ENTRIES 64 + typedef struct virtio_net_conf { uint32_t txtimer; @@ -218,6 +221,10 @@ struct VirtIONet { struct EBPFRSSContext ebpf_rss; }; +size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev, + const struct iovec *in_sg, unsigned in_num, + const struct iovec *out_sg, + unsigned out_num); void virtio_net_set_netclient_name(VirtIONet *n, const char *name, const char *type); diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h index 2e89a97bd6..a618eb1e4e 100644 --- a/include/monitor/hmp.h +++ b/include/monitor/hmp.h @@ -131,6 +131,9 @@ void hmp_replay_delete_break(Monitor *mon, const QDict *qdict); void hmp_replay_seek(Monitor *mon, const QDict *qdict); void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict); void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict); +void hmp_set_vcpu_dirty_limit(Monitor *mon, const QDict *qdict); +void hmp_cancel_vcpu_dirty_limit(Monitor *mon, const QDict *qdict); +void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict); void hmp_human_readable_text_helper(Monitor *mon, HumanReadableText *(*qmp_handler)(Error **)); void hmp_info_stats(Monitor *mon, const QDict *qdict); diff --git a/include/sysemu/dirtylimit.h b/include/sysemu/dirtylimit.h new file mode 100644 index 0000000000..8d2c1f3a6b --- /dev/null +++ b/include/sysemu/dirtylimit.h @@ -0,0 +1,37 @@ +/* + * Dirty page rate limit common functions + * + * Copyright (c) 2022 CHINA TELECOM CO.,LTD. + * + * Authors: + * Hyman Huang(黄勇) <huangy81@chinatelecom.cn> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_DIRTYRLIMIT_H +#define QEMU_DIRTYRLIMIT_H + +#define DIRTYLIMIT_CALC_TIME_MS 1000 /* 1000ms */ + +int64_t vcpu_dirty_rate_get(int cpu_index); +void vcpu_dirty_rate_stat_start(void); +void vcpu_dirty_rate_stat_stop(void); +void vcpu_dirty_rate_stat_initialize(void); +void vcpu_dirty_rate_stat_finalize(void); + +void dirtylimit_state_lock(void); +void dirtylimit_state_unlock(void); +void dirtylimit_state_initialize(void); +void dirtylimit_state_finalize(void); +bool dirtylimit_in_service(void); +bool dirtylimit_vcpu_index_valid(int cpu_index); +void dirtylimit_process(void); +void dirtylimit_change(bool start); +void dirtylimit_set_vcpu(int cpu_index, + uint64_t quota, + bool enable); +void dirtylimit_set_all(uint64_t quota, + bool enable); +void dirtylimit_vcpu_execute(CPUState *cpu); +#endif diff --git a/include/sysemu/dirtyrate.h b/include/sysemu/dirtyrate.h new file mode 100644 index 0000000000..4d3b9a4902 --- /dev/null +++ b/include/sysemu/dirtyrate.h @@ -0,0 +1,28 @@ +/* + * dirty page rate helper functions + * + * Copyright (c) 2022 CHINA TELECOM CO.,LTD. + * + * Authors: + * Hyman Huang(黄勇) <huangy81@chinatelecom.cn> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_DIRTYRATE_H +#define QEMU_DIRTYRATE_H + +typedef struct VcpuStat { + int nvcpu; /* number of vcpu */ + DirtyRateVcpu *rates; /* array of dirty rate for each vcpu */ +} VcpuStat; + +int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms, + VcpuStat *stat, + unsigned int flag, + bool one_shot); + +void global_dirty_log_change(unsigned int flag, + bool start); +#endif diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index a783c78868..efd6dee818 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -582,4 +582,6 @@ bool kvm_cpu_check_are_resettable(void); bool kvm_arch_cpu_check_are_resettable(void); bool kvm_dirty_ring_enabled(void); + +uint32_t kvm_dirty_ring_size(void); #endif diff --git a/io/channel-socket.c b/io/channel-socket.c index 4466bb1cd4..74a936cc1f 100644 --- a/io/channel-socket.c +++ b/io/channel-socket.c @@ -716,12 +716,18 @@ static int qio_channel_socket_flush(QIOChannel *ioc, struct cmsghdr *cm; char control[CMSG_SPACE(sizeof(*serr))]; int received; - int ret = 1; + int ret; + + if (sioc->zero_copy_queued == sioc->zero_copy_sent) { + return 0; + } msg.msg_control = control; msg.msg_controllen = sizeof(control); memset(control, 0, sizeof(control)); + ret = 1; + while (sioc->zero_copy_sent < sioc->zero_copy_queued) { received = recvmsg(sioc->fd, &msg, MSG_ERRQUEUE); if (received < 0) { diff --git a/migration/channel.c b/migration/channel.c index 90087d8986..1b0815039f 100644 --- a/migration/channel.c +++ b/migration/channel.c @@ -38,9 +38,7 @@ void migration_channel_process_incoming(QIOChannel *ioc) trace_migration_set_incoming_channel( ioc, object_get_typename(OBJECT(ioc))); - if (migrate_use_tls() && - !object_dynamic_cast(OBJECT(ioc), - TYPE_QIO_CHANNEL_TLS)) { + if (migrate_channel_requires_tls_upgrade(ioc)) { migration_tls_channel_process_incoming(s, ioc, &local_err); } else { migration_ioc_register_yank(ioc); @@ -70,10 +68,7 @@ void migration_channel_connect(MigrationState *s, ioc, object_get_typename(OBJECT(ioc)), hostname, error); if (!error) { - if (s->parameters.tls_creds && - *s->parameters.tls_creds && - !object_dynamic_cast(OBJECT(ioc), - TYPE_QIO_CHANNEL_TLS)) { + if (migrate_channel_requires_tls_upgrade(ioc)) { migration_tls_channel_connect(s, ioc, hostname, &error); if (!error) { diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c index aace12a787..795fab5c37 100644 --- a/migration/dirtyrate.c +++ b/migration/dirtyrate.c @@ -46,7 +46,7 @@ static struct DirtyRateStat DirtyStat; static DirtyRateMeasureMode dirtyrate_mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING; -static int64_t set_sample_page_period(int64_t msec, int64_t initial_time) +static int64_t dirty_stat_wait(int64_t msec, int64_t initial_time) { int64_t current_time; @@ -60,6 +60,132 @@ static int64_t set_sample_page_period(int64_t msec, int64_t initial_time) return msec; } +static inline void record_dirtypages(DirtyPageRecord *dirty_pages, + CPUState *cpu, bool start) +{ + if (start) { + dirty_pages[cpu->cpu_index].start_pages = cpu->dirty_pages; + } else { + dirty_pages[cpu->cpu_index].end_pages = cpu->dirty_pages; + } +} + +static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages, + int64_t calc_time_ms) +{ + uint64_t memory_size_MB; + uint64_t increased_dirty_pages = + dirty_pages.end_pages - dirty_pages.start_pages; + + memory_size_MB = (increased_dirty_pages * TARGET_PAGE_SIZE) >> 20; + + return memory_size_MB * 1000 / calc_time_ms; +} + +void global_dirty_log_change(unsigned int flag, bool start) +{ + qemu_mutex_lock_iothread(); + if (start) { + memory_global_dirty_log_start(flag); + } else { + memory_global_dirty_log_stop(flag); + } + qemu_mutex_unlock_iothread(); +} + +/* + * global_dirty_log_sync + * 1. sync dirty log from kvm + * 2. stop dirty tracking if needed. + */ +static void global_dirty_log_sync(unsigned int flag, bool one_shot) +{ + qemu_mutex_lock_iothread(); + memory_global_dirty_log_sync(); + if (one_shot) { + memory_global_dirty_log_stop(flag); + } + qemu_mutex_unlock_iothread(); +} + +static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat) +{ + CPUState *cpu; + DirtyPageRecord *records; + int nvcpu = 0; + + CPU_FOREACH(cpu) { + nvcpu++; + } + + stat->nvcpu = nvcpu; + stat->rates = g_malloc0(sizeof(DirtyRateVcpu) * nvcpu); + + records = g_malloc0(sizeof(DirtyPageRecord) * nvcpu); + + return records; +} + +static void vcpu_dirty_stat_collect(VcpuStat *stat, + DirtyPageRecord *records, + bool start) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + record_dirtypages(records, cpu, start); + } +} + +int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms, + VcpuStat *stat, + unsigned int flag, + bool one_shot) +{ + DirtyPageRecord *records; + int64_t init_time_ms; + int64_t duration; + int64_t dirtyrate; + int i = 0; + unsigned int gen_id; + +retry: + init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + + cpu_list_lock(); + gen_id = cpu_list_generation_id_get(); + records = vcpu_dirty_stat_alloc(stat); + vcpu_dirty_stat_collect(stat, records, true); + cpu_list_unlock(); + + duration = dirty_stat_wait(calc_time_ms, init_time_ms); + + global_dirty_log_sync(flag, one_shot); + + cpu_list_lock(); + if (gen_id != cpu_list_generation_id_get()) { + g_free(records); + g_free(stat->rates); + cpu_list_unlock(); + goto retry; + } + vcpu_dirty_stat_collect(stat, records, false); + cpu_list_unlock(); + + for (i = 0; i < stat->nvcpu; i++) { + dirtyrate = do_calculate_dirtyrate(records[i], duration); + + stat->rates[i].id = i; + stat->rates[i].dirty_rate = dirtyrate; + + trace_dirtyrate_do_calculate_vcpu(i, dirtyrate); + } + + g_free(records); + + return duration; +} + static bool is_sample_period_valid(int64_t sec) { if (sec < MIN_FETCH_DIRTYRATE_TIME_SEC || @@ -396,44 +522,6 @@ static bool compare_page_hash_info(struct RamblockDirtyInfo *info, return true; } -static inline void record_dirtypages(DirtyPageRecord *dirty_pages, - CPUState *cpu, bool start) -{ - if (start) { - dirty_pages[cpu->cpu_index].start_pages = cpu->dirty_pages; - } else { - dirty_pages[cpu->cpu_index].end_pages = cpu->dirty_pages; - } -} - -static void dirtyrate_global_dirty_log_start(void) -{ - qemu_mutex_lock_iothread(); - memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE); - qemu_mutex_unlock_iothread(); -} - -static void dirtyrate_global_dirty_log_stop(void) -{ - qemu_mutex_lock_iothread(); - memory_global_dirty_log_sync(); - memory_global_dirty_log_stop(GLOBAL_DIRTY_DIRTY_RATE); - qemu_mutex_unlock_iothread(); -} - -static int64_t do_calculate_dirtyrate_vcpu(DirtyPageRecord dirty_pages) -{ - uint64_t memory_size_MB; - int64_t time_s; - uint64_t increased_dirty_pages = - dirty_pages.end_pages - dirty_pages.start_pages; - - memory_size_MB = (increased_dirty_pages * TARGET_PAGE_SIZE) >> 20; - time_s = DirtyStat.calc_time; - - return memory_size_MB / time_s; -} - static inline void record_dirtypages_bitmap(DirtyPageRecord *dirty_pages, bool start) { @@ -444,11 +532,6 @@ static inline void record_dirtypages_bitmap(DirtyPageRecord *dirty_pages, } } -static void do_calculate_dirtyrate_bitmap(DirtyPageRecord dirty_pages) -{ - DirtyStat.dirty_rate = do_calculate_dirtyrate_vcpu(dirty_pages); -} - static inline void dirtyrate_manual_reset_protect(void) { RAMBlock *block = NULL; @@ -492,71 +575,49 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config) DirtyStat.start_time = start_time / 1000; msec = config.sample_period_seconds * 1000; - msec = set_sample_page_period(msec, start_time); + msec = dirty_stat_wait(msec, start_time); DirtyStat.calc_time = msec / 1000; /* - * dirtyrate_global_dirty_log_stop do two things. + * do two things. * 1. fetch dirty bitmap from kvm * 2. stop dirty tracking */ - dirtyrate_global_dirty_log_stop(); + global_dirty_log_sync(GLOBAL_DIRTY_DIRTY_RATE, true); record_dirtypages_bitmap(&dirty_pages, false); - do_calculate_dirtyrate_bitmap(dirty_pages); + DirtyStat.dirty_rate = do_calculate_dirtyrate(dirty_pages, msec); } static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config) { - CPUState *cpu; - int64_t msec = 0; - int64_t start_time; + int64_t duration; uint64_t dirtyrate = 0; uint64_t dirtyrate_sum = 0; - DirtyPageRecord *dirty_pages; - int nvcpu = 0; int i = 0; - CPU_FOREACH(cpu) { - nvcpu++; - } - - dirty_pages = malloc(sizeof(*dirty_pages) * nvcpu); - - DirtyStat.dirty_ring.nvcpu = nvcpu; - DirtyStat.dirty_ring.rates = malloc(sizeof(DirtyRateVcpu) * nvcpu); - - dirtyrate_global_dirty_log_start(); - - CPU_FOREACH(cpu) { - record_dirtypages(dirty_pages, cpu, true); - } - - start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); - DirtyStat.start_time = start_time / 1000; + /* start log sync */ + global_dirty_log_change(GLOBAL_DIRTY_DIRTY_RATE, true); - msec = config.sample_period_seconds * 1000; - msec = set_sample_page_period(msec, start_time); - DirtyStat.calc_time = msec / 1000; + DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000; - dirtyrate_global_dirty_log_stop(); + /* calculate vcpu dirtyrate */ + duration = vcpu_calculate_dirtyrate(config.sample_period_seconds * 1000, + &DirtyStat.dirty_ring, + GLOBAL_DIRTY_DIRTY_RATE, + true); - CPU_FOREACH(cpu) { - record_dirtypages(dirty_pages, cpu, false); - } + DirtyStat.calc_time = duration / 1000; + /* calculate vm dirtyrate */ for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) { - dirtyrate = do_calculate_dirtyrate_vcpu(dirty_pages[i]); - trace_dirtyrate_do_calculate_vcpu(i, dirtyrate); - - DirtyStat.dirty_ring.rates[i].id = i; + dirtyrate = DirtyStat.dirty_ring.rates[i].dirty_rate; DirtyStat.dirty_ring.rates[i].dirty_rate = dirtyrate; dirtyrate_sum += dirtyrate; } DirtyStat.dirty_rate = dirtyrate_sum; - free(dirty_pages); } static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config) @@ -574,7 +635,7 @@ static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config) rcu_read_unlock(); msec = config.sample_period_seconds * 1000; - msec = set_sample_page_period(msec, initial_time); + msec = dirty_stat_wait(msec, initial_time); DirtyStat.start_time = initial_time / 1000; DirtyStat.calc_time = msec / 1000; diff --git a/migration/dirtyrate.h b/migration/dirtyrate.h index 69d4c5b865..594a5c0bb6 100644 --- a/migration/dirtyrate.h +++ b/migration/dirtyrate.h @@ -13,6 +13,8 @@ #ifndef QEMU_MIGRATION_DIRTYRATE_H #define QEMU_MIGRATION_DIRTYRATE_H +#include "sysemu/dirtyrate.h" + /* * Sample 512 pages per GB as default. */ @@ -65,11 +67,6 @@ typedef struct SampleVMStat { uint64_t total_block_mem_MB; /* size of total sampled pages in MB */ } SampleVMStat; -typedef struct VcpuStat { - int nvcpu; /* number of vcpu */ - DirtyRateVcpu *rates; /* array of dirty rate for each vcpu */ -} VcpuStat; - /* * Store calculation statistics for each measure. */ diff --git a/migration/migration.c b/migration/migration.c index 78f5057373..e03f698a3c 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -48,6 +48,7 @@ #include "trace.h" #include "exec/target_page.h" #include "io/channel-buffer.h" +#include "io/channel-tls.h" #include "migration/colo.h" #include "hw/boards.h" #include "hw/qdev-properties.h" @@ -215,9 +216,11 @@ void migration_object_init(void) current_incoming->postcopy_remote_fds = g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD)); qemu_mutex_init(¤t_incoming->rp_mutex); + qemu_mutex_init(¤t_incoming->postcopy_prio_thread_mutex); qemu_event_init(¤t_incoming->main_thread_load_event, false); qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0); qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0); + qemu_sem_init(¤t_incoming->postcopy_pause_sem_fast_load, 0); qemu_mutex_init(¤t_incoming->page_request_mutex); current_incoming->page_requested = g_tree_new(page_request_addr_cmp); @@ -321,6 +324,12 @@ void migration_incoming_state_destroy(void) mis->page_requested = NULL; } + if (mis->postcopy_qemufile_dst) { + migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); + qemu_fclose(mis->postcopy_qemufile_dst); + mis->postcopy_qemufile_dst = NULL; + } + yank_unregister_instance(MIGRATION_YANK_INSTANCE); } @@ -691,9 +700,9 @@ static bool postcopy_try_recover(void) /* * Here, we only wake up the main loading thread (while the - * fault thread will still be waiting), so that we can receive + * rest threads will still be waiting), so that we can receive * commands from source now, and answer it if needed. The - * fault thread will be woken up afterwards until we are sure + * rest threads will be woken up afterwards until we are sure * that source is ready to reply to page requests. */ qemu_sem_post(&mis->postcopy_pause_sem_dst); @@ -714,15 +723,21 @@ void migration_fd_process_incoming(QEMUFile *f, Error **errp) migration_incoming_process(); } +static bool migration_needs_multiple_sockets(void) +{ + return migrate_use_multifd() || migrate_postcopy_preempt(); +} + void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) { MigrationIncomingState *mis = migration_incoming_get_current(); Error *local_err = NULL; bool start_migration; + QEMUFile *f; if (!mis->from_src_file) { /* The first connection (multifd may have multiple) */ - QEMUFile *f = qemu_file_new_input(ioc); + f = qemu_file_new_input(ioc); if (!migration_incoming_setup(f, errp)) { return; @@ -730,13 +745,19 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) /* * Common migration only needs one channel, so we can start - * right now. Multifd needs more than one channel, we wait. + * right now. Some features need more than one channel, we wait. */ - start_migration = !migrate_use_multifd(); + start_migration = !migration_needs_multiple_sockets(); } else { /* Multiple connections */ - assert(migrate_use_multifd()); - start_migration = multifd_recv_new_channel(ioc, &local_err); + assert(migration_needs_multiple_sockets()); + if (migrate_use_multifd()) { + start_migration = multifd_recv_new_channel(ioc, &local_err); + } else { + assert(migrate_postcopy_preempt()); + f = qemu_file_new_input(ioc); + start_migration = postcopy_preempt_new_channel(mis, f); + } if (local_err) { error_propagate(errp, local_err); return; @@ -761,11 +782,20 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) bool migration_has_all_channels(void) { MigrationIncomingState *mis = migration_incoming_get_current(); - bool all_channels; - all_channels = multifd_recv_all_channels_created(); + if (!mis->from_src_file) { + return false; + } + + if (migrate_use_multifd()) { + return multifd_recv_all_channels_created(); + } + + if (migrate_postcopy_preempt()) { + return mis->postcopy_qemufile_dst != NULL; + } - return all_channels && mis->from_src_file != NULL; + return true; } /* @@ -1027,6 +1057,8 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) info->ram->normal_bytes = ram_counters.normal * page_size; info->ram->mbps = s->mbps; info->ram->dirty_sync_count = ram_counters.dirty_sync_count; + info->ram->dirty_sync_missed_zero_copy = + ram_counters.dirty_sync_missed_zero_copy; info->ram->postcopy_requests = ram_counters.postcopy_requests; info->ram->page_size = page_size; info->ram->multifd_bytes = ram_counters.multifd_bytes; @@ -1274,7 +1306,9 @@ static bool migrate_caps_check(bool *cap_list, #ifdef CONFIG_LINUX if (cap_list[MIGRATION_CAPABILITY_ZERO_COPY_SEND] && (!cap_list[MIGRATION_CAPABILITY_MULTIFD] || - migrate_use_compression() || + cap_list[MIGRATION_CAPABILITY_COMPRESS] || + cap_list[MIGRATION_CAPABILITY_XBZRLE] || + migrate_multifd_compression() || migrate_use_tls())) { error_setg(errp, "Zero copy only available for non-compressed non-TLS multifd migration"); @@ -1297,6 +1331,13 @@ static bool migrate_caps_check(bool *cap_list, return false; } + if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]) { + if (!cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { + error_setg(errp, "Postcopy preempt requires postcopy-ram"); + return false; + } + } + return true; } @@ -1511,6 +1552,17 @@ static bool migrate_params_check(MigrationParameters *params, Error **errp) error_prepend(errp, "Invalid mapping given for block-bitmap-mapping: "); return false; } + +#ifdef CONFIG_LINUX + if (migrate_use_zero_copy_send() && + ((params->has_multifd_compression && params->multifd_compression) || + (params->has_tls_creds && params->tls_creds && *params->tls_creds))) { + error_setg(errp, + "Zero copy only available for non-compressed non-TLS multifd migration"); + return false; + } +#endif + return true; } @@ -1867,6 +1919,12 @@ static void migrate_fd_cleanup(MigrationState *s) qemu_fclose(tmp); } + if (s->postcopy_qemufile_src) { + migration_ioc_unregister_yank_from_file(s->postcopy_qemufile_src); + qemu_fclose(s->postcopy_qemufile_src); + s->postcopy_qemufile_src = NULL; + } + assert(!migration_is_active(s)); if (s->state == MIGRATION_STATUS_CANCELLING) { @@ -2663,6 +2721,15 @@ bool migrate_background_snapshot(void) return s->enabled_capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]; } +bool migrate_postcopy_preempt(void) +{ + MigrationState *s; + + s = migrate_get_current(); + + return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]; +} + /* migration thread support */ /* * Something bad happened to the RP stream, mark an error @@ -3002,6 +3069,12 @@ static int postcopy_start(MigrationState *ms) int64_t bandwidth = migrate_max_postcopy_bandwidth(); bool restart_block = false; int cur_state = MIGRATION_STATUS_ACTIVE; + + if (postcopy_preempt_wait_channel(ms)) { + migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); + return -1; + } + if (!migrate_pause_before_switchover()) { migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_POSTCOPY_ACTIVE); @@ -3141,6 +3214,8 @@ static int postcopy_start(MigrationState *ms) MIGRATION_STATUS_FAILED); } + trace_postcopy_preempt_enabled(migrate_postcopy_preempt()); + return ret; fail_closefb: @@ -3253,6 +3328,11 @@ static void migration_completion(MigrationState *s) qemu_savevm_state_complete_postcopy(s->to_dst_file); qemu_mutex_unlock_iothread(); + /* Shutdown the postcopy fast path thread */ + if (migrate_postcopy_preempt()) { + postcopy_preempt_shutdown_file(s); + } + trace_migration_completion_postcopy_end_after_complete(); } else { goto fail; @@ -3447,6 +3527,18 @@ static MigThrError postcopy_pause(MigrationState *s) qemu_file_shutdown(file); qemu_fclose(file); + /* + * Do the same to postcopy fast path socket too if there is. No + * locking needed because no racer as long as we do this before setting + * status to paused. + */ + if (s->postcopy_qemufile_src) { + migration_ioc_unregister_yank_from_file(s->postcopy_qemufile_src); + qemu_file_shutdown(s->postcopy_qemufile_src); + qemu_fclose(s->postcopy_qemufile_src); + s->postcopy_qemufile_src = NULL; + } + migrate_set_state(&s->state, s->state, MIGRATION_STATUS_POSTCOPY_PAUSED); @@ -3464,6 +3556,14 @@ static MigThrError postcopy_pause(MigrationState *s) if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { /* Woken up by a recover procedure. Give it a shot */ + if (postcopy_preempt_wait_channel(s)) { + /* + * Preempt enabled, and new channel create failed; loop + * back to wait for another recovery. + */ + continue; + } + /* * Firstly, let's wake up the return path now, with a new * return path channel. @@ -3502,8 +3602,13 @@ static MigThrError migration_detect_error(MigrationState *s) return MIG_THR_ERR_FATAL; } - /* Try to detect any file errors */ - ret = qemu_file_get_error_obj(s->to_dst_file, &local_error); + /* + * Try to detect any file errors. Note that postcopy_qemufile_src will + * be NULL when postcopy preempt is not enabled. + */ + ret = qemu_file_get_error_obj_any(s->to_dst_file, + s->postcopy_qemufile_src, + &local_error); if (!ret) { /* Everything is fine */ assert(!local_error); @@ -4141,6 +4246,15 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) } } + /* This needs to be done before resuming a postcopy */ + if (postcopy_preempt_setup(s, &local_err)) { + error_report_err(local_err); + migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, + MIGRATION_STATUS_FAILED); + migrate_fd_cleanup(s); + return; + } + if (resume) { /* Wakeup the main migration thread to do the recovery */ migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, @@ -4265,6 +4379,11 @@ static Property migration_properties[] = { DEFINE_PROP_SIZE("announce-step", MigrationState, parameters.announce_step, DEFAULT_MIGRATE_ANNOUNCE_STEP), + DEFINE_PROP_BOOL("x-postcopy-preempt-break-huge", MigrationState, + postcopy_preempt_break_huge, true), + DEFINE_PROP_STRING("tls-creds", MigrationState, parameters.tls_creds), + DEFINE_PROP_STRING("tls-hostname", MigrationState, parameters.tls_hostname), + DEFINE_PROP_STRING("tls-authz", MigrationState, parameters.tls_authz), /* Migration capabilities */ DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), @@ -4274,6 +4393,8 @@ static Property migration_properties[] = { DEFINE_PROP_MIG_CAP("x-compress", MIGRATION_CAPABILITY_COMPRESS), DEFINE_PROP_MIG_CAP("x-events", MIGRATION_CAPABILITY_EVENTS), DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM), + DEFINE_PROP_MIG_CAP("x-postcopy-preempt", + MIGRATION_CAPABILITY_POSTCOPY_PREEMPT), DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO), DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM), DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK), @@ -4300,18 +4421,16 @@ static void migration_class_init(ObjectClass *klass, void *data) static void migration_instance_finalize(Object *obj) { MigrationState *ms = MIGRATION_OBJ(obj); - MigrationParameters *params = &ms->parameters; qemu_mutex_destroy(&ms->error_mutex); qemu_mutex_destroy(&ms->qemu_file_lock); - g_free(params->tls_hostname); - g_free(params->tls_creds); qemu_sem_destroy(&ms->wait_unplug_sem); qemu_sem_destroy(&ms->rate_limit_sem); qemu_sem_destroy(&ms->pause_sem); qemu_sem_destroy(&ms->postcopy_pause_sem); qemu_sem_destroy(&ms->postcopy_pause_rp_sem); qemu_sem_destroy(&ms->rp_state.rp_sem); + qemu_sem_destroy(&ms->postcopy_qemufile_src_sem); error_free(ms->error); } @@ -4358,6 +4477,7 @@ static void migration_instance_init(Object *obj) qemu_sem_init(&ms->rp_state.rp_sem, 0); qemu_sem_init(&ms->rate_limit_sem, 0); qemu_sem_init(&ms->wait_unplug_sem, 0); + qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0); qemu_mutex_init(&ms->qemu_file_lock); } diff --git a/migration/migration.h b/migration/migration.h index 485d58b95f..cdad8aceaa 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -23,6 +23,7 @@ #include "io/channel-buffer.h" #include "net/announce.h" #include "qom/object.h" +#include "postcopy-ram.h" struct PostcopyBlocktimeContext; @@ -67,7 +68,7 @@ typedef struct { struct MigrationIncomingState { QEMUFile *from_src_file; /* Previously received RAM's RAMBlock pointer */ - RAMBlock *last_recv_block; + RAMBlock *last_recv_block[RAM_CHANNEL_MAX]; /* A hook to allow cleanup at the end of incoming migration */ void *transport_data; void (*transport_cleanup)(void *data); @@ -112,6 +113,23 @@ struct MigrationIncomingState { * enabled. */ unsigned int postcopy_channels; + /* QEMUFile for postcopy only; it'll be handled by a separate thread */ + QEMUFile *postcopy_qemufile_dst; + /* Postcopy priority thread is used to receive postcopy requested pages */ + QemuThread postcopy_prio_thread; + bool postcopy_prio_thread_created; + /* + * Used to sync between the ram load main thread and the fast ram load + * thread. It protects postcopy_qemufile_dst, which is the postcopy + * fast channel. + * + * The ram fast load thread will take it mostly for the whole lifecycle + * because it needs to continuously read data from the channel, and + * it'll only release this mutex if postcopy is interrupted, so that + * the ram load main thread will take this mutex over and properly + * release the broken channel. + */ + QemuMutex postcopy_prio_thread_mutex; /* * An array of temp host huge pages to be used, one for each postcopy * channel. @@ -141,6 +159,13 @@ struct MigrationIncomingState { /* notify PAUSED postcopy incoming migrations to try to continue */ QemuSemaphore postcopy_pause_sem_dst; QemuSemaphore postcopy_pause_sem_fault; + /* + * This semaphore is used to allow the ram fast load thread (only when + * postcopy preempt is enabled) fall into sleep when there's network + * interruption detected. When the recovery is done, the main load + * thread will kick the fast ram load thread using this semaphore. + */ + QemuSemaphore postcopy_pause_sem_fast_load; /* List of listening socket addresses */ SocketAddressList *socket_address_list; @@ -192,6 +217,15 @@ struct MigrationState { QEMUBH *cleanup_bh; /* Protected by qemu_file_lock */ QEMUFile *to_dst_file; + /* Postcopy specific transfer channel */ + QEMUFile *postcopy_qemufile_src; + /* + * It is posted when the preempt channel is established. Note: this is + * used for both the start or recover of a postcopy migration. We'll + * post to this sem every time a new preempt channel is created in the + * main thread, and we keep post() and wait() in pair. + */ + QemuSemaphore postcopy_qemufile_src_sem; QIOChannelBuffer *bioc; /* * Protects to_dst_file/from_dst_file pointers. We need to make sure we @@ -306,6 +340,13 @@ struct MigrationState { bool send_configuration; /* Whether we send section footer during migration */ bool send_section_footer; + /* + * Whether we allow break sending huge pages when postcopy preempt is + * enabled. When disabled, we won't interrupt precopy within sending a + * host huge page, which is the old behavior of vanilla postcopy. + * NOTE: this parameter is ignored if postcopy preempt is not enabled. + */ + bool postcopy_preempt_break_huge; /* Needed by postcopy-pause state */ QemuSemaphore postcopy_pause_sem; @@ -400,6 +441,7 @@ int migrate_decompress_threads(void); bool migrate_use_events(void); bool migrate_postcopy_blocktime(void); bool migrate_background_snapshot(void); +bool migrate_postcopy_preempt(void); /* Sending on the return path - generic and then for each message type */ void migrate_send_rp_shut(MigrationIncomingState *mis, diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c index 3a7ae44485..18213a9513 100644 --- a/migration/multifd-zlib.c +++ b/migration/multifd-zlib.c @@ -27,6 +27,8 @@ struct zlib_data { uint8_t *zbuff; /* size of compressed buffer */ uint32_t zbuff_len; + /* uncompressed buffer of size qemu_target_page_size() */ + uint8_t *buf; }; /* Multifd zlib compression */ @@ -45,26 +47,38 @@ static int zlib_send_setup(MultiFDSendParams *p, Error **errp) { struct zlib_data *z = g_new0(struct zlib_data, 1); z_stream *zs = &z->zs; + const char *err_msg; zs->zalloc = Z_NULL; zs->zfree = Z_NULL; zs->opaque = Z_NULL; if (deflateInit(zs, migrate_multifd_zlib_level()) != Z_OK) { - g_free(z); - error_setg(errp, "multifd %u: deflate init failed", p->id); - return -1; + err_msg = "deflate init failed"; + goto err_free_z; } /* This is the maxium size of the compressed buffer */ z->zbuff_len = compressBound(MULTIFD_PACKET_SIZE); z->zbuff = g_try_malloc(z->zbuff_len); if (!z->zbuff) { - deflateEnd(&z->zs); - g_free(z); - error_setg(errp, "multifd %u: out of memory for zbuff", p->id); - return -1; + err_msg = "out of memory for zbuff"; + goto err_deflate_end; + } + z->buf = g_try_malloc(qemu_target_page_size()); + if (!z->buf) { + err_msg = "out of memory for buf"; + goto err_free_zbuff; } p->data = z; return 0; + +err_free_zbuff: + g_free(z->zbuff); +err_deflate_end: + deflateEnd(&z->zs); +err_free_z: + g_free(z); + error_setg(errp, "multifd %u: %s", p->id, err_msg); + return -1; } /** @@ -82,6 +96,8 @@ static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp) deflateEnd(&z->zs); g_free(z->zbuff); z->zbuff = NULL; + g_free(z->buf); + z->buf = NULL; g_free(p->data); p->data = NULL; } @@ -114,8 +130,14 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) flush = Z_SYNC_FLUSH; } + /* + * Since the VM might be running, the page may be changing concurrently + * with compression. zlib does not guarantee that this is safe, + * therefore copy the page before calling deflate(). + */ + memcpy(z->buf, p->pages->block->host + p->normal[i], page_size); zs->avail_in = page_size; - zs->next_in = p->pages->block->host + p->normal[i]; + zs->next_in = z->buf; zs->avail_out = available; zs->next_out = z->zbuff + out_size; diff --git a/migration/multifd.c b/migration/multifd.c index 684c014c86..586ddc9d65 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -624,6 +624,8 @@ int multifd_send_sync_main(QEMUFile *f) if (ret < 0) { error_report_err(err); return -1; + } else if (ret == 1) { + dirty_sync_missed_zero_copy(); } } } @@ -831,9 +833,7 @@ static bool multifd_channel_connect(MultiFDSendParams *p, migrate_get_current()->hostname, error); if (!error) { - if (migrate_use_tls() && - !object_dynamic_cast(OBJECT(ioc), - TYPE_QIO_CHANNEL_TLS)) { + if (migrate_channel_requires_tls_upgrade(ioc)) { multifd_tls_channel_connect(p, ioc, &error); if (!error) { /* diff --git a/migration/multifd.h b/migration/multifd.h index 4d8d89e5e5..519f498643 100644 --- a/migration/multifd.h +++ b/migration/multifd.h @@ -65,7 +65,9 @@ typedef struct { } MultiFDPages_t; typedef struct { - /* this fields are not changed once the thread is created */ + /* Fields are only written at creating/deletion time */ + /* No lock required for them, they are read only */ + /* channel number */ uint8_t id; /* channel thread name */ @@ -74,39 +76,47 @@ typedef struct { QemuThread thread; /* communication channel */ QIOChannel *c; + /* is the yank function registered */ + bool registered_yank; + /* packet allocated len */ + uint32_t packet_len; + /* multifd flags for sending ram */ + int write_flags; + /* sem where to wait for more work */ QemuSemaphore sem; + /* syncs main thread and channels */ + QemuSemaphore sem_sync; + /* this mutex protects the following parameters */ QemuMutex mutex; /* is this channel thread running */ bool running; /* should this thread finish */ bool quit; - /* is the yank function registered */ - bool registered_yank; + /* multifd flags for each packet */ + uint32_t flags; + /* global number of generated multifd packets */ + uint64_t packet_num; /* thread has work to do */ int pending_job; - /* array of pages to sent */ + /* array of pages to sent. + * The owner of 'pages' depends of 'pending_job' value: + * pending_job == 0 -> migration_thread can use it. + * pending_job != 0 -> multifd_channel can use it. + */ MultiFDPages_t *pages; - /* packet allocated len */ - uint32_t packet_len; + + /* thread local variables. No locking required */ + /* pointer to the packet */ MultiFDPacket_t *packet; - /* multifd flags for sending ram */ - int write_flags; - /* multifd flags for each packet */ - uint32_t flags; /* size of the next packet that contains pages */ uint32_t next_packet_size; - /* global number of generated multifd packets */ - uint64_t packet_num; - /* thread local variables */ /* packets sent through this channel */ uint64_t num_packets; /* non zero pages sent through this channel */ uint64_t total_normal_pages; - /* syncs main thread and channels */ - QemuSemaphore sem_sync; /* buffers to send */ struct iovec *iov; /* number of iovs used */ @@ -120,7 +130,9 @@ typedef struct { } MultiFDSendParams; typedef struct { - /* this fields are not changed once the thread is created */ + /* Fields are only written at creating/deletion time */ + /* No lock required for them, they are read only */ + /* channel number */ uint8_t id; /* channel thread name */ @@ -129,31 +141,35 @@ typedef struct { QemuThread thread; /* communication channel */ QIOChannel *c; + /* packet allocated len */ + uint32_t packet_len; + + /* syncs main thread and channels */ + QemuSemaphore sem_sync; + /* this mutex protects the following parameters */ QemuMutex mutex; /* is this channel thread running */ bool running; /* should this thread finish */ bool quit; - /* ramblock host address */ - uint8_t *host; - /* packet allocated len */ - uint32_t packet_len; - /* pointer to the packet */ - MultiFDPacket_t *packet; /* multifd flags for each packet */ uint32_t flags; /* global number of generated multifd packets */ uint64_t packet_num; - /* thread local variables */ + + /* thread local variables. No locking required */ + + /* pointer to the packet */ + MultiFDPacket_t *packet; /* size of the next packet that contains pages */ uint32_t next_packet_size; /* packets sent through this channel */ uint64_t num_packets; + /* ramblock host address */ + uint8_t *host; /* non zero pages recv through this channel */ uint64_t total_normal_pages; - /* syncs main thread and channels */ - QemuSemaphore sem_sync; /* buffers to recv */ struct iovec *iov; /* Pages that are not zero */ diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index a66dd536d9..b9a37ef255 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -33,6 +33,10 @@ #include "trace.h" #include "hw/boards.h" #include "exec/ramblock.h" +#include "socket.h" +#include "qemu-file.h" +#include "yank_functions.h" +#include "tls.h" /* Arbitrary limit on size of each discard command, * keeps them around ~200 bytes @@ -567,6 +571,11 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) { trace_postcopy_ram_incoming_cleanup_entry(); + if (mis->postcopy_prio_thread_created) { + qemu_thread_join(&mis->postcopy_prio_thread); + mis->postcopy_prio_thread_created = false; + } + if (mis->have_fault_thread) { Error *local_err = NULL; @@ -1102,8 +1111,13 @@ static int postcopy_temp_pages_setup(MigrationIncomingState *mis) int err, i, channels; void *temp_page; - /* TODO: will be boosted when enable postcopy preemption */ - mis->postcopy_channels = 1; + if (migrate_postcopy_preempt()) { + /* If preemption enabled, need extra channel for urgent requests */ + mis->postcopy_channels = RAM_CHANNEL_MAX; + } else { + /* Both precopy/postcopy on the same channel */ + mis->postcopy_channels = 1; + } channels = mis->postcopy_channels; mis->postcopy_tmp_pages = g_malloc0_n(sizeof(PostcopyTmpPage), channels); @@ -1170,7 +1184,7 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis) return -1; } - postcopy_thread_create(mis, &mis->fault_thread, "postcopy/fault", + postcopy_thread_create(mis, &mis->fault_thread, "fault-default", postcopy_ram_fault_thread, QEMU_THREAD_JOINABLE); mis->have_fault_thread = true; @@ -1185,6 +1199,16 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis) return -1; } + if (migrate_postcopy_preempt()) { + /* + * This thread needs to be created after the temp pages because + * it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately. + */ + postcopy_thread_create(mis, &mis->postcopy_prio_thread, "fault-fast", + postcopy_preempt_thread, QEMU_THREAD_JOINABLE); + mis->postcopy_prio_thread_created = true; + } + trace_postcopy_ram_enable_notify(); return 0; @@ -1514,3 +1538,159 @@ void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd) } } } + +bool postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file) +{ + /* + * The new loading channel has its own threads, so it needs to be + * blocked too. It's by default true, just be explicit. + */ + qemu_file_set_blocking(file, true); + mis->postcopy_qemufile_dst = file; + trace_postcopy_preempt_new_channel(); + + /* Start the migration immediately */ + return true; +} + +/* + * Setup the postcopy preempt channel with the IOC. If ERROR is specified, + * setup the error instead. This helper will free the ERROR if specified. + */ +static void +postcopy_preempt_send_channel_done(MigrationState *s, + QIOChannel *ioc, Error *local_err) +{ + if (local_err) { + migrate_set_error(s, local_err); + error_free(local_err); + } else { + migration_ioc_register_yank(ioc); + s->postcopy_qemufile_src = qemu_file_new_output(ioc); + trace_postcopy_preempt_new_channel(); + } + + /* + * Kick the waiter in all cases. The waiter should check upon + * postcopy_qemufile_src to know whether it failed or not. + */ + qemu_sem_post(&s->postcopy_qemufile_src_sem); +} + +static void +postcopy_preempt_tls_handshake(QIOTask *task, gpointer opaque) +{ + g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task)); + MigrationState *s = opaque; + Error *local_err = NULL; + + qio_task_propagate_error(task, &local_err); + postcopy_preempt_send_channel_done(s, ioc, local_err); +} + +static void +postcopy_preempt_send_channel_new(QIOTask *task, gpointer opaque) +{ + g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task)); + MigrationState *s = opaque; + QIOChannelTLS *tioc; + Error *local_err = NULL; + + if (qio_task_propagate_error(task, &local_err)) { + goto out; + } + + if (migrate_channel_requires_tls_upgrade(ioc)) { + tioc = migration_tls_client_create(s, ioc, s->hostname, &local_err); + if (!tioc) { + goto out; + } + trace_postcopy_preempt_tls_handshake(); + qio_channel_set_name(QIO_CHANNEL(tioc), "migration-tls-preempt"); + qio_channel_tls_handshake(tioc, postcopy_preempt_tls_handshake, + s, NULL, NULL); + /* Setup the channel until TLS handshake finished */ + return; + } + +out: + /* This handles both good and error cases */ + postcopy_preempt_send_channel_done(s, ioc, local_err); +} + +/* Returns 0 if channel established, -1 for error. */ +int postcopy_preempt_wait_channel(MigrationState *s) +{ + /* If preempt not enabled, no need to wait */ + if (!migrate_postcopy_preempt()) { + return 0; + } + + /* + * We need the postcopy preempt channel to be established before + * starting doing anything. + */ + qemu_sem_wait(&s->postcopy_qemufile_src_sem); + + return s->postcopy_qemufile_src ? 0 : -1; +} + +int postcopy_preempt_setup(MigrationState *s, Error **errp) +{ + if (!migrate_postcopy_preempt()) { + return 0; + } + + if (!migrate_multi_channels_is_allowed()) { + error_setg(errp, "Postcopy preempt is not supported as current " + "migration stream does not support multi-channels."); + return -1; + } + + /* Kick an async task to connect */ + socket_send_channel_create(postcopy_preempt_send_channel_new, s); + + return 0; +} + +static void postcopy_pause_ram_fast_load(MigrationIncomingState *mis) +{ + trace_postcopy_pause_fast_load(); + qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); + qemu_sem_wait(&mis->postcopy_pause_sem_fast_load); + qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); + trace_postcopy_pause_fast_load_continued(); +} + +void *postcopy_preempt_thread(void *opaque) +{ + MigrationIncomingState *mis = opaque; + int ret; + + trace_postcopy_preempt_thread_entry(); + + rcu_register_thread(); + + qemu_sem_post(&mis->thread_sync_sem); + + /* Sending RAM_SAVE_FLAG_EOS to terminate this thread */ + qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); + while (1) { + ret = ram_load_postcopy(mis->postcopy_qemufile_dst, + RAM_CHANNEL_POSTCOPY); + /* If error happened, go into recovery routine */ + if (ret) { + postcopy_pause_ram_fast_load(mis); + } else { + /* We're done */ + break; + } + } + qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); + + rcu_unregister_thread(); + + trace_postcopy_preempt_thread_exit(); + + return NULL; +} diff --git a/migration/postcopy-ram.h b/migration/postcopy-ram.h index 07684c0e1d..6147bf7d1d 100644 --- a/migration/postcopy-ram.h +++ b/migration/postcopy-ram.h @@ -183,4 +183,15 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd, uint64_t client_addr, int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, uint64_t client_addr, uint64_t offset); +/* Hard-code channels for now for postcopy preemption */ +enum PostcopyChannels { + RAM_CHANNEL_PRECOPY = 0, + RAM_CHANNEL_POSTCOPY = 1, + RAM_CHANNEL_MAX, +}; + +bool postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file); +int postcopy_preempt_setup(MigrationState *s, Error **errp); +int postcopy_preempt_wait_channel(MigrationState *s); + #endif diff --git a/migration/qemu-file.c b/migration/qemu-file.c index 1e80d496b7..4f400c2e52 100644 --- a/migration/qemu-file.c +++ b/migration/qemu-file.c @@ -161,6 +161,33 @@ int qemu_file_get_error_obj(QEMUFile *f, Error **errp) } /* + * Get last error for either stream f1 or f2 with optional Error*. + * The error returned (non-zero) can be either from f1 or f2. + * + * If any of the qemufile* is NULL, then skip the check on that file. + * + * When there is no error on both qemufile, zero is returned. + */ +int qemu_file_get_error_obj_any(QEMUFile *f1, QEMUFile *f2, Error **errp) +{ + int ret = 0; + + if (f1) { + ret = qemu_file_get_error_obj(f1, errp); + /* If there's already error detected, return */ + if (ret) { + return ret; + } + } + + if (f2) { + ret = qemu_file_get_error_obj(f2, errp); + } + + return ret; +} + +/* * Set the last error for stream f with optional Error* */ void qemu_file_set_error_obj(QEMUFile *f, int ret, Error *err) @@ -384,10 +411,8 @@ static ssize_t qemu_fill_buffer(QEMUFile *f) f->total_transferred += len; } else if (len == 0) { qemu_file_set_error_obj(f, -EIO, local_error); - } else if (len != -EAGAIN) { - qemu_file_set_error_obj(f, len, local_error); } else { - error_free(local_error); + qemu_file_set_error_obj(f, len, local_error); } return len; diff --git a/migration/qemu-file.h b/migration/qemu-file.h index 96e72d8bd8..fa13d04d78 100644 --- a/migration/qemu-file.h +++ b/migration/qemu-file.h @@ -141,6 +141,7 @@ void qemu_file_acct_rate_limit(QEMUFile *f, int64_t len); void qemu_file_set_rate_limit(QEMUFile *f, int64_t new_rate); int64_t qemu_file_get_rate_limit(QEMUFile *f); int qemu_file_get_error_obj(QEMUFile *f, Error **errp); +int qemu_file_get_error_obj_any(QEMUFile *f1, QEMUFile *f2, Error **errp); void qemu_file_set_error_obj(QEMUFile *f, int ret, Error *err); void qemu_file_set_error(QEMUFile *f, int ret); int qemu_file_shutdown(QEMUFile *f); diff --git a/migration/ram.c b/migration/ram.c index 01f9cc1d72..b94669ba5d 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -296,6 +296,20 @@ struct RAMSrcPageRequest { QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req; }; +typedef struct { + /* + * Cached ramblock/offset values if preempted. They're only meaningful if + * preempted==true below. + */ + RAMBlock *ram_block; + unsigned long ram_page; + /* + * Whether a postcopy preemption just happened. Will be reset after + * precopy recovered to background migration. + */ + bool preempted; +} PostcopyPreemptState; + /* State of RAM for migration */ struct RAMState { /* QEMUFile used for this migration */ @@ -350,6 +364,14 @@ struct RAMState { /* Queue of outstanding page requests from the destination */ QemuMutex src_page_req_mutex; QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests; + + /* Postcopy preemption informations */ + PostcopyPreemptState postcopy_preempt_state; + /* + * Current channel we're using on src VM. Only valid if postcopy-preempt + * is enabled. + */ + unsigned int postcopy_channel; }; typedef struct RAMState RAMState; @@ -357,6 +379,11 @@ static RAMState *ram_state; static NotifierWithReturnList precopy_notifier_list; +static void postcopy_preempt_reset(RAMState *rs) +{ + memset(&rs->postcopy_preempt_state, 0, sizeof(PostcopyPreemptState)); +} + /* Whether postcopy has queued requests? */ static bool postcopy_has_request(RAMState *rs) { @@ -407,6 +434,11 @@ static void ram_transferred_add(uint64_t bytes) ram_counters.transferred += bytes; } +void dirty_sync_missed_zero_copy(void) +{ + ram_counters.dirty_sync_missed_zero_copy++; +} + /* used by the search for pages to send */ struct PageSearchStatus { /* Current block being searched */ @@ -415,8 +447,28 @@ struct PageSearchStatus { unsigned long page; /* Set once we wrap around */ bool complete_round; - /* Whether current page is explicitly requested by postcopy */ + /* + * [POSTCOPY-ONLY] Whether current page is explicitly requested by + * postcopy. When set, the request is "urgent" because the dest QEMU + * threads are waiting for us. + */ bool postcopy_requested; + /* + * [POSTCOPY-ONLY] The target channel to use to send current page. + * + * Note: This may _not_ match with the value in postcopy_requested + * above. Let's imagine the case where the postcopy request is exactly + * the page that we're sending in progress during precopy. In this case + * we'll have postcopy_requested set to true but the target channel + * will be the precopy channel (so that we don't split brain on that + * specific page since the precopy channel already contains partial of + * that page data). + * + * Besides that specific use case, postcopy_target_channel should + * always be equal to postcopy_requested, because by default we send + * postcopy pages via postcopy preempt channel. + */ + bool postcopy_target_channel; }; typedef struct PageSearchStatus PageSearchStatus; @@ -468,6 +520,9 @@ static QemuCond decomp_done_cond; static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block, ram_addr_t offset, uint8_t *source_buf); +static void postcopy_preempt_restore(RAMState *rs, PageSearchStatus *pss, + bool postcopy_requested); + static void *do_data_compress(void *opaque) { CompressParam *param = opaque; @@ -1489,8 +1544,12 @@ retry: */ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again) { - /* This is not a postcopy requested page */ + /* + * This is not a postcopy requested page, mark it "not urgent", and use + * precopy channel to send it. + */ pss->postcopy_requested = false; + pss->postcopy_target_channel = RAM_CHANNEL_PRECOPY; pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page); if (pss->complete_round && pss->block == rs->last_seen_block && @@ -1947,6 +2006,55 @@ void ram_write_tracking_stop(void) } #endif /* defined(__linux__) */ +/* + * Check whether two addr/offset of the ramblock falls onto the same host huge + * page. Returns true if so, false otherwise. + */ +static bool offset_on_same_huge_page(RAMBlock *rb, uint64_t addr1, + uint64_t addr2) +{ + size_t page_size = qemu_ram_pagesize(rb); + + addr1 = ROUND_DOWN(addr1, page_size); + addr2 = ROUND_DOWN(addr2, page_size); + + return addr1 == addr2; +} + +/* + * Whether a previous preempted precopy huge page contains current requested + * page? Returns true if so, false otherwise. + * + * This should really happen very rarely, because it means when we were sending + * during background migration for postcopy we're sending exactly the page that + * some vcpu got faulted on on dest node. When it happens, we probably don't + * need to do much but drop the request, because we know right after we restore + * the precopy stream it'll be serviced. It'll slightly affect the order of + * postcopy requests to be serviced (e.g. it'll be the same as we move current + * request to the end of the queue) but it shouldn't be a big deal. The most + * imporant thing is we can _never_ try to send a partial-sent huge page on the + * POSTCOPY channel again, otherwise that huge page will got "split brain" on + * two channels (PRECOPY, POSTCOPY). + */ +static bool postcopy_preempted_contains(RAMState *rs, RAMBlock *block, + ram_addr_t offset) +{ + PostcopyPreemptState *state = &rs->postcopy_preempt_state; + + /* No preemption at all? */ + if (!state->preempted) { + return false; + } + + /* Not even the same ramblock? */ + if (state->ram_block != block) { + return false; + } + + return offset_on_same_huge_page(block, offset, + state->ram_page << TARGET_PAGE_BITS); +} + /** * get_queued_page: unqueue a page from the postcopy requests * @@ -1964,7 +2072,20 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) block = unqueue_page(rs, &offset); - if (!block) { + if (block) { + /* See comment above postcopy_preempted_contains() */ + if (postcopy_preempted_contains(rs, block, offset)) { + trace_postcopy_preempt_hit(block->idstr, offset); + /* + * If what we preempted previously was exactly what we're + * requesting right now, restore the preempted precopy + * immediately, boosting its priority as it's requested by + * postcopy. + */ + postcopy_preempt_restore(rs, pss, true); + return true; + } + } else { /* * Poll write faults too if background snapshot is enabled; that's * when we have vcpus got blocked by the write protected pages. @@ -1986,7 +2107,9 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) * really rare. */ pss->complete_round = false; + /* Mark it an urgent request, meanwhile using POSTCOPY channel */ pss->postcopy_requested = true; + pss->postcopy_target_channel = RAM_CHANNEL_POSTCOPY; } return !!block; @@ -2180,6 +2303,129 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss) return ram_save_page(rs, pss); } +static bool postcopy_needs_preempt(RAMState *rs, PageSearchStatus *pss) +{ + MigrationState *ms = migrate_get_current(); + + /* Not enabled eager preempt? Then never do that. */ + if (!migrate_postcopy_preempt()) { + return false; + } + + /* If the user explicitly disabled breaking of huge page, skip */ + if (!ms->postcopy_preempt_break_huge) { + return false; + } + + /* If the ramblock we're sending is a small page? Never bother. */ + if (qemu_ram_pagesize(pss->block) == TARGET_PAGE_SIZE) { + return false; + } + + /* Not in postcopy at all? */ + if (!migration_in_postcopy()) { + return false; + } + + /* + * If we're already handling a postcopy request, don't preempt as this page + * has got the same high priority. + */ + if (pss->postcopy_requested) { + return false; + } + + /* If there's postcopy requests, then check it up! */ + return postcopy_has_request(rs); +} + +/* Returns true if we preempted precopy, false otherwise */ +static void postcopy_do_preempt(RAMState *rs, PageSearchStatus *pss) +{ + PostcopyPreemptState *p_state = &rs->postcopy_preempt_state; + + trace_postcopy_preempt_triggered(pss->block->idstr, pss->page); + + /* + * Time to preempt precopy. Cache current PSS into preempt state, so that + * after handling the postcopy pages we can recover to it. We need to do + * so because the dest VM will have partial of the precopy huge page kept + * over in its tmp huge page caches; better move on with it when we can. + */ + p_state->ram_block = pss->block; + p_state->ram_page = pss->page; + p_state->preempted = true; +} + +/* Whether we're preempted by a postcopy request during sending a huge page */ +static bool postcopy_preempt_triggered(RAMState *rs) +{ + return rs->postcopy_preempt_state.preempted; +} + +static void postcopy_preempt_restore(RAMState *rs, PageSearchStatus *pss, + bool postcopy_requested) +{ + PostcopyPreemptState *state = &rs->postcopy_preempt_state; + + assert(state->preempted); + + pss->block = state->ram_block; + pss->page = state->ram_page; + + /* Whether this is a postcopy request? */ + pss->postcopy_requested = postcopy_requested; + /* + * When restoring a preempted page, the old data resides in PRECOPY + * slow channel, even if postcopy_requested is set. So always use + * PRECOPY channel here. + */ + pss->postcopy_target_channel = RAM_CHANNEL_PRECOPY; + + trace_postcopy_preempt_restored(pss->block->idstr, pss->page); + + /* Reset preempt state, most importantly, set preempted==false */ + postcopy_preempt_reset(rs); +} + +static void postcopy_preempt_choose_channel(RAMState *rs, PageSearchStatus *pss) +{ + MigrationState *s = migrate_get_current(); + unsigned int channel = pss->postcopy_target_channel; + QEMUFile *next; + + if (channel != rs->postcopy_channel) { + if (channel == RAM_CHANNEL_PRECOPY) { + next = s->to_dst_file; + } else { + next = s->postcopy_qemufile_src; + } + /* Update and cache the current channel */ + rs->f = next; + rs->postcopy_channel = channel; + + /* + * If channel switched, reset last_sent_block since the old sent block + * may not be on the same channel. + */ + rs->last_sent_block = NULL; + + trace_postcopy_preempt_switch_channel(channel); + } + + trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page); +} + +/* We need to make sure rs->f always points to the default channel elsewhere */ +static void postcopy_preempt_reset_channel(RAMState *rs) +{ + if (migrate_postcopy_preempt() && migration_in_postcopy()) { + rs->postcopy_channel = RAM_CHANNEL_PRECOPY; + rs->f = migrate_get_current()->to_dst_file; + trace_postcopy_preempt_reset_channel(); + } +} + /** * ram_save_host_page: save a whole host page * @@ -2211,7 +2457,16 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) return 0; } + if (migrate_postcopy_preempt() && migration_in_postcopy()) { + postcopy_preempt_choose_channel(rs, pss); + } + do { + if (postcopy_needs_preempt(rs, pss)) { + postcopy_do_preempt(rs, pss); + break; + } + /* Check the pages is dirty and if it is send it */ if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) { tmppages = ram_save_target_page(rs, pss); @@ -2235,6 +2490,19 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) /* The offset we leave with is the min boundary of host page and block */ pss->page = MIN(pss->page, hostpage_boundary); + /* + * When with postcopy preempt mode, flush the data as soon as possible for + * postcopy requests, because we've already sent a whole huge page, so the + * dst node should already have enough resource to atomically filling in + * the current missing page. + * + * More importantly, when using separate postcopy channel, we must do + * explicit flush or it won't flush until the buffer is full. + */ + if (migrate_postcopy_preempt() && pss->postcopy_requested) { + qemu_fflush(rs->f); + } + res = ram_save_release_protection(rs, pss, start_page); return (res < 0 ? res : pages); } @@ -2276,8 +2544,17 @@ static int ram_find_and_save_block(RAMState *rs) found = get_queued_page(rs, &pss); if (!found) { - /* priority queue empty, so just search for something dirty */ - found = find_dirty_block(rs, &pss, &again); + /* + * Recover previous precopy ramblock/offset if postcopy has + * preempted precopy. Otherwise find the next dirty bit. + */ + if (postcopy_preempt_triggered(rs)) { + postcopy_preempt_restore(rs, &pss, false); + found = true; + } else { + /* priority queue empty, so just search for something dirty */ + found = find_dirty_block(rs, &pss, &again); + } } if (found) { @@ -2405,6 +2682,8 @@ static void ram_state_reset(RAMState *rs) rs->last_page = 0; rs->last_version = ram_list.version; rs->xbzrle_enabled = false; + postcopy_preempt_reset(rs); + rs->postcopy_channel = RAM_CHANNEL_PRECOPY; } #define MAX_WAIT 50 /* ms, half buffered_file limit */ @@ -3048,6 +3327,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) } qemu_mutex_unlock(&rs->bitmap_mutex); + postcopy_preempt_reset_channel(rs); + /* * Must occur before EOS (or any QEMUFile operation) * because of RDMA protocol. @@ -3125,6 +3406,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque) return ret; } + postcopy_preempt_reset_channel(rs); + ret = multifd_send_sync_main(rs->f); if (ret < 0) { return ret; @@ -3209,11 +3492,13 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host) * @mis: the migration incoming state pointer * @f: QEMUFile where to read the data from * @flags: Page flags (mostly to see if it's a continuation of previous block) + * @channel: the channel we're using */ static inline RAMBlock *ram_block_from_stream(MigrationIncomingState *mis, - QEMUFile *f, int flags) + QEMUFile *f, int flags, + int channel) { - RAMBlock *block = mis->last_recv_block; + RAMBlock *block = mis->last_recv_block[channel]; char id[256]; uint8_t len; @@ -3240,7 +3525,7 @@ static inline RAMBlock *ram_block_from_stream(MigrationIncomingState *mis, return NULL; } - mis->last_recv_block = block; + mis->last_recv_block[channel] = block; return block; } @@ -3659,15 +3944,15 @@ int ram_postcopy_incoming_init(MigrationIncomingState *mis) * rcu_read_lock is taken prior to this being called. * * @f: QEMUFile where to send the data + * @channel: the channel to use for loading */ -int ram_load_postcopy(QEMUFile *f) +int ram_load_postcopy(QEMUFile *f, int channel) { int flags = 0, ret = 0; bool place_needed = false; bool matches_target_page_size = false; MigrationIncomingState *mis = migration_incoming_get_current(); - /* Currently we only use channel 0. TODO: use all the channels */ - PostcopyTmpPage *tmp_page = &mis->postcopy_tmp_pages[0]; + PostcopyTmpPage *tmp_page = &mis->postcopy_tmp_pages[channel]; while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { ram_addr_t addr; @@ -3691,10 +3976,10 @@ int ram_load_postcopy(QEMUFile *f) flags = addr & ~TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK; - trace_ram_load_postcopy_loop((uint64_t)addr, flags); + trace_ram_load_postcopy_loop(channel, (uint64_t)addr, flags); if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | RAM_SAVE_FLAG_COMPRESS_PAGE)) { - block = ram_block_from_stream(mis, f, flags); + block = ram_block_from_stream(mis, f, flags, channel); if (!block) { ret = -EINVAL; break; @@ -3732,10 +4017,10 @@ int ram_load_postcopy(QEMUFile *f) } else if (tmp_page->host_addr != host_page_from_ram_block_offset(block, addr)) { /* not the 1st TP within the HP */ - error_report("Non-same host page detected. " + error_report("Non-same host page detected on channel %d: " "Target host page %p, received host page %p " "(rb %s offset 0x"RAM_ADDR_FMT" target_pages %d)", - tmp_page->host_addr, + channel, tmp_page->host_addr, host_page_from_ram_block_offset(block, addr), block->idstr, addr, tmp_page->target_pages); ret = -EINVAL; @@ -3945,7 +4230,8 @@ static int ram_load_precopy(QEMUFile *f) if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) { - RAMBlock *block = ram_block_from_stream(mis, f, flags); + RAMBlock *block = ram_block_from_stream(mis, f, flags, + RAM_CHANNEL_PRECOPY); host = host_from_ram_block_offset(block, addr); /* @@ -4122,7 +4408,12 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) */ WITH_RCU_READ_LOCK_GUARD() { if (postcopy_running) { - ret = ram_load_postcopy(f); + /* + * Note! Here RAM_CHANNEL_PRECOPY is the precopy channel of + * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to + * service fast page faults. + */ + ret = ram_load_postcopy(f, RAM_CHANNEL_PRECOPY); } else { ret = ram_load_precopy(f); } @@ -4284,6 +4575,12 @@ static int ram_resume_prepare(MigrationState *s, void *opaque) return 0; } +void postcopy_preempt_shutdown_file(MigrationState *s) +{ + qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS); + qemu_fflush(s->postcopy_qemufile_src); +} + static SaveVMHandlers savevm_ram_handlers = { .save_setup = ram_save_setup, .save_live_iterate = ram_save_iterate, diff --git a/migration/ram.h b/migration/ram.h index ded0a3a086..c7af65ac74 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -61,7 +61,7 @@ void ram_postcopy_send_discard_bitmap(MigrationState *ms); /* For incoming postcopy discard */ int ram_discard_range(const char *block_name, uint64_t start, size_t length); int ram_postcopy_incoming_init(MigrationIncomingState *mis); -int ram_load_postcopy(QEMUFile *f); +int ram_load_postcopy(QEMUFile *f, int channel); void ram_handle_compressed(void *host, uint8_t ch, uint64_t size); @@ -73,6 +73,8 @@ int64_t ramblock_recv_bitmap_send(QEMUFile *file, const char *block_name); int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb); bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start); +void postcopy_preempt_shutdown_file(MigrationState *s); +void *postcopy_preempt_thread(void *opaque); /* ram cache */ int colo_init_ram_cache(void); @@ -87,4 +89,6 @@ void ram_write_tracking_prepare(void); int ram_write_tracking_start(void); void ram_write_tracking_stop(void); +void dirty_sync_missed_zero_copy(void); + #endif diff --git a/migration/savevm.c b/migration/savevm.c index e8a1b96fcd..48e85c052c 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -2117,6 +2117,13 @@ static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) */ qemu_sem_post(&mis->postcopy_pause_sem_fault); + if (migrate_postcopy_preempt()) { + /* The channel should already be setup again; make sure of it */ + assert(mis->postcopy_qemufile_dst); + /* Kick the fast ram load thread too */ + qemu_sem_post(&mis->postcopy_pause_sem_fast_load); + } + return 0; } @@ -2540,16 +2547,6 @@ static bool postcopy_pause_incoming(MigrationIncomingState *mis) { int i; - /* - * If network is interrupted, any temp page we received will be useless - * because we didn't mark them as "received" in receivedmap. After a - * proper recovery later (which will sync src dirty bitmap with receivedmap - * on dest) these cached small pages will be resent again. - */ - for (i = 0; i < mis->postcopy_channels; i++) { - postcopy_temp_page_reset(&mis->postcopy_tmp_pages[i]); - } - trace_postcopy_pause_incoming(); assert(migrate_postcopy_ram()); @@ -2572,12 +2569,37 @@ static bool postcopy_pause_incoming(MigrationIncomingState *mis) mis->to_src_file = NULL; qemu_mutex_unlock(&mis->rp_mutex); + /* + * NOTE: this must happen before reset the PostcopyTmpPages below, + * otherwise it's racy to reset those fields when the fast load thread + * can be accessing it in parallel. + */ + if (mis->postcopy_qemufile_dst) { + qemu_file_shutdown(mis->postcopy_qemufile_dst); + /* Take the mutex to make sure the fast ram load thread halted */ + qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); + migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); + qemu_fclose(mis->postcopy_qemufile_dst); + mis->postcopy_qemufile_dst = NULL; + qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); + } + migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, MIGRATION_STATUS_POSTCOPY_PAUSED); /* Notify the fault thread for the invalidated file handle */ postcopy_fault_thread_notify(mis); + /* + * If network is interrupted, any temp page we received will be useless + * because we didn't mark them as "received" in receivedmap. After a + * proper recovery later (which will sync src dirty bitmap with receivedmap + * on dest) these cached small pages will be resent again. + */ + for (i = 0; i < mis->postcopy_channels; i++) { + postcopy_temp_page_reset(&mis->postcopy_tmp_pages[i]); + } + error_report("Detected IO failure for postcopy. " "Migration paused."); @@ -2599,8 +2621,8 @@ retry: while (true) { section_type = qemu_get_byte(f); - if (qemu_file_get_error(f)) { - ret = qemu_file_get_error(f); + ret = qemu_file_get_error_obj_any(f, mis->postcopy_qemufile_dst, NULL); + if (ret) { break; } diff --git a/migration/socket.c b/migration/socket.c index 4fd5e85f50..e6fdf3c5e1 100644 --- a/migration/socket.c +++ b/migration/socket.c @@ -26,7 +26,7 @@ #include "io/channel-socket.h" #include "io/net-listener.h" #include "trace.h" - +#include "postcopy-ram.h" struct SocketOutgoingArgs { SocketAddress *saddr; @@ -39,6 +39,24 @@ void socket_send_channel_create(QIOTaskFunc f, void *data) f, data, NULL, NULL); } +QIOChannel *socket_send_channel_create_sync(Error **errp) +{ + QIOChannelSocket *sioc = qio_channel_socket_new(); + + if (!outgoing_args.saddr) { + object_unref(OBJECT(sioc)); + error_setg(errp, "Initial sock address not set!"); + return NULL; + } + + if (qio_channel_socket_connect_sync(sioc, outgoing_args.saddr, errp) < 0) { + object_unref(OBJECT(sioc)); + return NULL; + } + + return QIO_CHANNEL(sioc); +} + int socket_send_channel_destroy(QIOChannel *send) { /* Remove channel */ @@ -166,6 +184,8 @@ socket_start_incoming_migration_internal(SocketAddress *saddr, if (migrate_use_multifd()) { num = migrate_multifd_channels(); + } else if (migrate_postcopy_preempt()) { + num = RAM_CHANNEL_MAX; } if (qio_net_listener_open_sync(listener, saddr, num, errp) < 0) { diff --git a/migration/socket.h b/migration/socket.h index 891dbccceb..dc54df4e6c 100644 --- a/migration/socket.h +++ b/migration/socket.h @@ -21,6 +21,7 @@ #include "io/task.h" void socket_send_channel_create(QIOTaskFunc f, void *data); +QIOChannel *socket_send_channel_create_sync(Error **errp); int socket_send_channel_destroy(QIOChannel *send); void socket_start_incoming_migration(const char *str, Error **errp); diff --git a/migration/tls.c b/migration/tls.c index 32c384a8b6..73e8c9d3c2 100644 --- a/migration/tls.c +++ b/migration/tls.c @@ -166,3 +166,12 @@ void migration_tls_channel_connect(MigrationState *s, NULL, NULL); } + +bool migrate_channel_requires_tls_upgrade(QIOChannel *ioc) +{ + if (!migrate_use_tls()) { + return false; + } + + return !object_dynamic_cast(OBJECT(ioc), TYPE_QIO_CHANNEL_TLS); +} diff --git a/migration/tls.h b/migration/tls.h index de4fe2cafd..98e23c9b0e 100644 --- a/migration/tls.h +++ b/migration/tls.h @@ -37,4 +37,8 @@ void migration_tls_channel_connect(MigrationState *s, QIOChannel *ioc, const char *hostname, Error **errp); + +/* Whether the QIO channel requires further TLS handshake? */ +bool migrate_channel_requires_tls_upgrade(QIOChannel *ioc); + #endif diff --git a/migration/trace-events b/migration/trace-events index 1aec580e92..a34afe7b85 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -91,7 +91,7 @@ migration_bitmap_clear_dirty(char *str, uint64_t start, uint64_t size, unsigned migration_throttle(void) "" ram_discard_range(const char *rbname, uint64_t start, size_t len) "%s: start: %" PRIx64 " %zx" ram_load_loop(const char *rbname, uint64_t addr, int flags, void *host) "%s: addr: 0x%" PRIx64 " flags: 0x%x host: %p" -ram_load_postcopy_loop(uint64_t addr, int flags) "@%" PRIx64 " %x" +ram_load_postcopy_loop(int channel, uint64_t addr, int flags) "chan=%d addr=0x%" PRIx64 " flags=0x%x" ram_postcopy_send_discard_bitmap(void) "" ram_save_page(const char *rbname, uint64_t offset, void *host) "%s: offset: 0x%" PRIx64 " host: %p" ram_save_queue_pages(const char *rbname, size_t start, size_t len) "%s: start: 0x%zx len: 0x%zx" @@ -111,6 +111,12 @@ ram_load_complete(int ret, uint64_t seq_iter) "exit_code %d seq iteration %" PRI ram_write_tracking_ramblock_start(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu" ram_write_tracking_ramblock_stop(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu" unqueue_page(char *block, uint64_t offset, bool dirty) "ramblock '%s' offset 0x%"PRIx64" dirty %d" +postcopy_preempt_triggered(char *str, unsigned long page) "during sending ramblock %s offset 0x%lx" +postcopy_preempt_restored(char *str, unsigned long page) "ramblock %s offset 0x%lx" +postcopy_preempt_hit(char *str, uint64_t offset) "ramblock %s offset 0x%"PRIx64 +postcopy_preempt_send_host_page(char *str, uint64_t offset) "ramblock %s offset 0x%"PRIx64 +postcopy_preempt_switch_channel(int channel) "%d" +postcopy_preempt_reset_channel(void) "" # multifd.c multifd_new_send_channel_async(uint8_t id) "channel %u" @@ -176,6 +182,7 @@ migration_thread_low_pending(uint64_t pending) "%" PRIu64 migrate_transferred(uint64_t tranferred, uint64_t time_spent, uint64_t bandwidth, uint64_t size) "transferred %" PRIu64 " time_spent %" PRIu64 " bandwidth %" PRIu64 " max_size %" PRId64 process_incoming_migration_co_end(int ret, int ps) "ret=%d postcopy-state=%d" process_incoming_migration_co_postcopy_end_main(void) "" +postcopy_preempt_enabled(bool value) "%d" # channel.c migration_set_incoming_channel(void *ioc, const char *ioctype) "ioc=%p ioctype=%s" @@ -263,6 +270,8 @@ mark_postcopy_blocktime_begin(uint64_t addr, void *dd, uint32_t time, int cpu, i mark_postcopy_blocktime_end(uint64_t addr, void *dd, uint32_t time, int affected_cpu) "addr: 0x%" PRIx64 ", dd: %p, time: %u, affected_cpu: %d" postcopy_pause_fault_thread(void) "" postcopy_pause_fault_thread_continued(void) "" +postcopy_pause_fast_load(void) "" +postcopy_pause_fast_load_continued(void) "" postcopy_ram_fault_thread_entry(void) "" postcopy_ram_fault_thread_exit(void) "" postcopy_ram_fault_thread_fds_core(int baseufd, int quitfd) "ufd: %d quitfd: %d" @@ -278,6 +287,10 @@ postcopy_request_shared_page(const char *sharer, const char *rb, uint64_t rb_off postcopy_request_shared_page_present(const char *sharer, const char *rb, uint64_t rb_offset) "%s already %s offset 0x%"PRIx64 postcopy_wake_shared(uint64_t client_addr, const char *rb) "at 0x%"PRIx64" in %s" postcopy_page_req_del(void *addr, int count) "resolved page req %p total %d" +postcopy_preempt_tls_handshake(void) "" +postcopy_preempt_new_channel(void) "" +postcopy_preempt_thread_entry(void) "" +postcopy_preempt_thread_exit(void) "" get_mem_fault_cpu_index(int cpu, uint32_t pid) "cpu: %d, pid: %u" diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c index e8d6963722..c6cd6f91dd 100644 --- a/monitor/hmp-cmds.c +++ b/monitor/hmp-cmds.c @@ -307,6 +307,11 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) monitor_printf(mon, "postcopy ram: %" PRIu64 " kbytes\n", info->ram->postcopy_bytes >> 10); } + if (info->ram->dirty_sync_missed_zero_copy) { + monitor_printf(mon, + "Zero-copy-send fallbacks happened: %" PRIu64 " times\n", + info->ram->dirty_sync_missed_zero_copy); + } } if (info->has_disk) { diff --git a/net/colo-compare.c b/net/colo-compare.c index d5d0965805..787c740f14 100644 --- a/net/colo-compare.c +++ b/net/colo-compare.c @@ -1323,7 +1323,7 @@ static void colo_compare_complete(UserCreatable *uc, Error **errp) s->connection_track_table = g_hash_table_new_full(connection_key_hash, connection_key_equal, g_free, - connection_destroy); + NULL); colo_compare_iothread(s); diff --git a/net/colo.c b/net/colo.c index 1f8162f59f..6b0ff562ad 100644 --- a/net/colo.c +++ b/net/colo.c @@ -46,7 +46,14 @@ int parse_packet_early(Packet *pkt) static const uint8_t vlan[] = {0x81, 0x00}; uint8_t *data = pkt->data + pkt->vnet_hdr_len; uint16_t l3_proto; - ssize_t l2hdr_len = eth_get_l2_hdr_length(data); + ssize_t l2hdr_len; + + if (data == NULL) { + trace_colo_proxy_main_vnet_info("This packet is not parsed correctly, " + "pkt->vnet_hdr_len", pkt->vnet_hdr_len); + return 1; + } + l2hdr_len = eth_get_l2_hdr_length(data); if (pkt->size < ETH_HLEN + pkt->vnet_hdr_len) { trace_colo_proxy_main("pkt->size < ETH_HLEN"); @@ -218,7 +225,7 @@ Connection *connection_get(GHashTable *connection_track_table, /* * clear the conn_list */ - while (!g_queue_is_empty(conn_list)) { + while (conn_list && !g_queue_is_empty(conn_list)) { connection_destroy(g_queue_pop_head(conn_list)); } } diff --git a/net/filter-rewriter.c b/net/filter-rewriter.c index bf05023dc3..c18c4c2019 100644 --- a/net/filter-rewriter.c +++ b/net/filter-rewriter.c @@ -383,7 +383,7 @@ static void colo_rewriter_setup(NetFilterState *nf, Error **errp) s->connection_track_table = g_hash_table_new_full(connection_key_hash, connection_key_equal, g_free, - connection_destroy); + NULL); s->incoming_queue = qemu_new_net_queue(qemu_netfilter_pass_to_next, nf); } diff --git a/net/meson.build b/net/meson.build index 754e2d1d40..d1be76daf3 100644 --- a/net/meson.build +++ b/net/meson.build @@ -41,7 +41,8 @@ endif softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files(tap_posix)) softmmu_ss.add(when: 'CONFIG_WIN32', if_true: files('tap-win32.c')) if have_vhost_net_vdpa - softmmu_ss.add(files('vhost-vdpa.c')) + softmmu_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost-vdpa.c'), if_false: files('vhost-vdpa-stub.c')) + softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-vdpa-stub.c')) endif vmnet_files = files( diff --git a/net/trace-events b/net/trace-events index d7a17256cc..6af927b4b9 100644 --- a/net/trace-events +++ b/net/trace-events @@ -9,6 +9,7 @@ vhost_user_event(const char *chr, int event) "chr: %s got event: %d" # colo.c colo_proxy_main(const char *chr) ": %s" +colo_proxy_main_vnet_info(const char *sta, int size) ": %s = %d" # colo-compare.c colo_compare_main(const char *chr) ": %s" diff --git a/net/vhost-vdpa-stub.c b/net/vhost-vdpa-stub.c new file mode 100644 index 0000000000..1732ed2443 --- /dev/null +++ b/net/vhost-vdpa-stub.c @@ -0,0 +1,21 @@ +/* + * vhost-vdpa-stub.c + * + * Copyright (c) 2022 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "clients.h" +#include "net/vhost-vdpa.h" +#include "qapi/error.h" + +int net_init_vhost_vdpa(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp) +{ + error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); + return -1; +} diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index df1e69ee72..6abad276a6 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -11,11 +11,14 @@ #include "qemu/osdep.h" #include "clients.h" +#include "hw/virtio/virtio-net.h" #include "net/vhost_net.h" #include "net/vhost-vdpa.h" #include "hw/virtio/vhost-vdpa.h" #include "qemu/config-file.h" #include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/memalign.h" #include "qemu/option.h" #include "qapi/error.h" #include <linux/vhost.h> @@ -30,6 +33,9 @@ typedef struct VhostVDPAState { NetClientState nc; struct vhost_vdpa vhost_vdpa; VHostNetState *vhost_net; + + /* Control commands shadow buffers */ + void *cvq_cmd_out_buffer, *cvq_cmd_in_buffer; bool started; } VhostVDPAState; @@ -69,6 +75,28 @@ const int vdpa_feature_bits[] = { VHOST_INVALID_FEATURE_BIT }; +/** Supported device specific feature bits with SVQ */ +static const uint64_t vdpa_svq_device_features = + BIT_ULL(VIRTIO_NET_F_CSUM) | + BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | + BIT_ULL(VIRTIO_NET_F_MTU) | + BIT_ULL(VIRTIO_NET_F_MAC) | + BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | + BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | + BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | + BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | + BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | + BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | + BIT_ULL(VIRTIO_NET_F_HOST_ECN) | + BIT_ULL(VIRTIO_NET_F_HOST_UFO) | + BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | + BIT_ULL(VIRTIO_NET_F_STATUS) | + BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | + BIT_ULL(VIRTIO_F_ANY_LAYOUT) | + BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | + BIT_ULL(VIRTIO_NET_F_RSC_EXT) | + BIT_ULL(VIRTIO_NET_F_STANDBY); + VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); @@ -127,7 +155,13 @@ err_init: static void vhost_vdpa_cleanup(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); + struct vhost_dev *dev = &s->vhost_net->dev; + qemu_vfree(s->cvq_cmd_out_buffer); + qemu_vfree(s->cvq_cmd_in_buffer); + if (dev->vq_index + dev->nvqs == dev->vq_index_end) { + g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); + } if (s->vhost_net) { vhost_net_cleanup(s->vhost_net); g_free(s->vhost_net); @@ -187,13 +221,251 @@ static NetClientInfo net_vhost_vdpa_info = { .check_peer_type = vhost_vdpa_check_peer_type, }; +static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) +{ + VhostIOVATree *tree = v->iova_tree; + DMAMap needle = { + /* + * No need to specify size or to look for more translations since + * this contiguous chunk was allocated by us. + */ + .translated_addr = (hwaddr)(uintptr_t)addr, + }; + const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle); + int r; + + if (unlikely(!map)) { + error_report("Cannot locate expected map"); + return; + } + + r = vhost_vdpa_dma_unmap(v, map->iova, map->size + 1); + if (unlikely(r != 0)) { + error_report("Device cannot unmap: %s(%d)", g_strerror(r), r); + } + + vhost_iova_tree_remove(tree, map); +} + +static size_t vhost_vdpa_net_cvq_cmd_len(void) +{ + /* + * MAC_TABLE_SET is the ctrl command that produces the longer out buffer. + * In buffer is always 1 byte, so it should fit here + */ + return sizeof(struct virtio_net_ctrl_hdr) + + 2 * sizeof(struct virtio_net_ctrl_mac) + + MAC_TABLE_ENTRIES * ETH_ALEN; +} + +static size_t vhost_vdpa_net_cvq_cmd_page_len(void) +{ + return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size()); +} + +/** Copy and map a guest buffer. */ +static bool vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, + const struct iovec *out_data, + size_t out_num, size_t data_len, void *buf, + size_t *written, bool write) +{ + DMAMap map = {}; + int r; + + if (unlikely(!data_len)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid legnth of %s buffer\n", + __func__, write ? "in" : "out"); + return false; + } + + *written = iov_to_buf(out_data, out_num, 0, buf, data_len); + map.translated_addr = (hwaddr)(uintptr_t)buf; + map.size = vhost_vdpa_net_cvq_cmd_page_len() - 1; + map.perm = write ? IOMMU_RW : IOMMU_RO, + r = vhost_iova_tree_map_alloc(v->iova_tree, &map); + if (unlikely(r != IOVA_OK)) { + error_report("Cannot map injected element"); + return false; + } + + r = vhost_vdpa_dma_map(v, map.iova, vhost_vdpa_net_cvq_cmd_page_len(), buf, + !write); + if (unlikely(r < 0)) { + goto dma_map_err; + } + + return true; + +dma_map_err: + vhost_iova_tree_remove(v->iova_tree, &map); + return false; +} + +/** + * Copy the guest element into a dedicated buffer suitable to be sent to NIC + * + * @iov: [0] is the out buffer, [1] is the in one + */ +static bool vhost_vdpa_net_cvq_map_elem(VhostVDPAState *s, + VirtQueueElement *elem, + struct iovec *iov) +{ + size_t in_copied; + bool ok; + + iov[0].iov_base = s->cvq_cmd_out_buffer; + ok = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, elem->out_sg, elem->out_num, + vhost_vdpa_net_cvq_cmd_len(), iov[0].iov_base, + &iov[0].iov_len, false); + if (unlikely(!ok)) { + return false; + } + + iov[1].iov_base = s->cvq_cmd_in_buffer; + ok = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, NULL, 0, + sizeof(virtio_net_ctrl_ack), iov[1].iov_base, + &in_copied, true); + if (unlikely(!ok)) { + vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); + return false; + } + + iov[1].iov_len = sizeof(virtio_net_ctrl_ack); + return true; +} + +/** + * Do not forward commands not supported by SVQ. Otherwise, the device could + * accept it and qemu would not know how to update the device model. + */ +static bool vhost_vdpa_net_cvq_validate_cmd(const struct iovec *out, + size_t out_num) +{ + struct virtio_net_ctrl_hdr ctrl; + size_t n; + + n = iov_to_buf(out, out_num, 0, &ctrl, sizeof(ctrl)); + if (unlikely(n < sizeof(ctrl))) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid legnth of out buffer %zu\n", __func__, n); + return false; + } + + switch (ctrl.class) { + case VIRTIO_NET_CTRL_MAC: + switch (ctrl.cmd) { + case VIRTIO_NET_CTRL_MAC_ADDR_SET: + return true; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid mac cmd %u\n", + __func__, ctrl.cmd); + }; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid control class %u\n", + __func__, ctrl.class); + }; + + return false; +} + +/** + * Validate and copy control virtqueue commands. + * + * Following QEMU guidelines, we offer a copy of the buffers to the device to + * prevent TOCTOU bugs. + */ +static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, + VirtQueueElement *elem, + void *opaque) +{ + VhostVDPAState *s = opaque; + size_t in_len, dev_written; + virtio_net_ctrl_ack status = VIRTIO_NET_ERR; + /* out and in buffers sent to the device */ + struct iovec dev_buffers[2] = { + { .iov_base = s->cvq_cmd_out_buffer }, + { .iov_base = s->cvq_cmd_in_buffer }, + }; + /* in buffer used for device model */ + const struct iovec in = { + .iov_base = &status, + .iov_len = sizeof(status), + }; + int r = -EINVAL; + bool ok; + + ok = vhost_vdpa_net_cvq_map_elem(s, elem, dev_buffers); + if (unlikely(!ok)) { + goto out; + } + + ok = vhost_vdpa_net_cvq_validate_cmd(&dev_buffers[0], 1); + if (unlikely(!ok)) { + goto out; + } + + r = vhost_svq_add(svq, &dev_buffers[0], 1, &dev_buffers[1], 1, elem); + if (unlikely(r != 0)) { + if (unlikely(r == -ENOSPC)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", + __func__); + } + goto out; + } + + /* + * We can poll here since we've had BQL from the time we sent the + * descriptor. Also, we need to take the answer before SVQ pulls by itself, + * when BQL is released + */ + dev_written = vhost_svq_poll(svq); + if (unlikely(dev_written < sizeof(status))) { + error_report("Insufficient written data (%zu)", dev_written); + goto out; + } + + memcpy(&status, dev_buffers[1].iov_base, sizeof(status)); + if (status != VIRTIO_NET_OK) { + goto out; + } + + status = VIRTIO_NET_ERR; + virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, dev_buffers, 1); + if (status != VIRTIO_NET_OK) { + error_report("Bad CVQ processing in model"); + } + +out: + in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, + sizeof(status)); + if (unlikely(in_len < sizeof(status))) { + error_report("Bad device CVQ written length"); + } + vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status))); + g_free(elem); + if (dev_buffers[0].iov_base) { + vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, dev_buffers[0].iov_base); + } + if (dev_buffers[1].iov_base) { + vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, dev_buffers[1].iov_base); + } + return r; +} + +static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = { + .avail_handler = vhost_vdpa_net_handle_ctrl_avail, +}; + static NetClientState *net_vhost_vdpa_init(NetClientState *peer, const char *device, const char *name, int vdpa_device_fd, int queue_pair_index, int nvqs, - bool is_datapath) + bool is_datapath, + bool svq, + VhostIOVATree *iova_tree) { NetClientState *nc = NULL; VhostVDPAState *s; @@ -211,6 +483,21 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, s->vhost_vdpa.device_fd = vdpa_device_fd; s->vhost_vdpa.index = queue_pair_index; + s->vhost_vdpa.shadow_vqs_enabled = svq; + s->vhost_vdpa.iova_tree = iova_tree; + if (!is_datapath) { + s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(), + vhost_vdpa_net_cvq_cmd_page_len()); + memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len()); + s->cvq_cmd_in_buffer = qemu_memalign(qemu_real_host_page_size(), + vhost_vdpa_net_cvq_cmd_page_len()); + memset(s->cvq_cmd_in_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len()); + + s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; + s->vhost_vdpa.shadow_vq_ops_opaque = s; + error_setg(&s->vhost_vdpa.migration_blocker, + "Migration disabled: vhost-vdpa uses CVQ."); + } ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); if (ret) { qemu_del_net_client(nc); @@ -219,20 +506,32 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, return nc; } -static int vhost_vdpa_get_max_queue_pairs(int fd, int *has_cvq, Error **errp) +static int vhost_vdpa_get_iova_range(int fd, + struct vhost_vdpa_iova_range *iova_range) +{ + int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range); + + return ret < 0 ? -errno : 0; +} + +static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp) +{ + int ret = ioctl(fd, VHOST_GET_FEATURES, features); + if (unlikely(ret < 0)) { + error_setg_errno(errp, errno, + "Fail to query features from vhost-vDPA device"); + } + return ret; +} + +static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features, + int *has_cvq, Error **errp) { unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); g_autofree struct vhost_vdpa_config *config = NULL; __virtio16 *max_queue_pairs; - uint64_t features; int ret; - ret = ioctl(fd, VHOST_GET_FEATURES, &features); - if (ret) { - error_setg(errp, "Fail to query features from vhost-vDPA device"); - return ret; - } - if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) { *has_cvq = 1; } else { @@ -262,10 +561,12 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp) { const NetdevVhostVDPAOptions *opts; + uint64_t features; int vdpa_device_fd; g_autofree NetClientState **ncs = NULL; + g_autoptr(VhostIOVATree) iova_tree = NULL; NetClientState *nc; - int queue_pairs, i, has_cvq = 0; + int queue_pairs, r, i, has_cvq = 0; assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); opts = &netdev->u.vhost_vdpa; @@ -279,29 +580,57 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, return -errno; } - queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, + r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp); + if (unlikely(r < 0)) { + return r; + } + + queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features, &has_cvq, errp); if (queue_pairs < 0) { qemu_close(vdpa_device_fd); return queue_pairs; } + if (opts->x_svq) { + struct vhost_vdpa_iova_range iova_range; + + uint64_t invalid_dev_features = + features & ~vdpa_svq_device_features & + /* Transport are all accepted at this point */ + ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START, + VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START); + + if (invalid_dev_features) { + error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64, + invalid_dev_features); + goto err_svq; + } + + vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range); + iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last); + } + ncs = g_malloc0(sizeof(*ncs) * queue_pairs); for (i = 0; i < queue_pairs; i++) { ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, - vdpa_device_fd, i, 2, true); + vdpa_device_fd, i, 2, true, opts->x_svq, + iova_tree); if (!ncs[i]) goto err; } if (has_cvq) { nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, - vdpa_device_fd, i, 1, false); + vdpa_device_fd, i, 1, false, + opts->x_svq, iova_tree); if (!nc) goto err; } + /* iova_tree ownership belongs to last NetClientState */ + g_steal_pointer(&iova_tree); return 0; err: @@ -310,6 +639,8 @@ err: qemu_del_net_client(ncs[i]); } } + +err_svq: qemu_close(vdpa_device_fd); return -1; diff --git a/python/qemu/qmp/legacy.py b/python/qemu/qmp/legacy.py index 03b5574618..1951754455 100644 --- a/python/qemu/qmp/legacy.py +++ b/python/qemu/qmp/legacy.py @@ -50,7 +50,7 @@ QMPObject = Dict[str, object] # QMPMessage can be outgoing commands or incoming events/returns. # QMPReturnValue is usually a dict/json object, but due to QAPI's -# 'returns-whitelist', it can actually be anything. +# 'command-returns-exceptions', it can actually be anything. # # {'return': {}} is a QMPMessage, # {} is the QMPReturnValue. diff --git a/qapi/migration.json b/qapi/migration.json index 7102e474a6..81185d4311 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -55,6 +55,10 @@ # @postcopy-bytes: The number of bytes sent during the post-copy phase # (since 7.0). # +# @dirty-sync-missed-zero-copy: Number of times dirty RAM synchronization could +# not avoid copying dirty pages. This is between +# 0 and @dirty-sync-count * @multifd-channels. +# (since 7.1) # Since: 0.14 ## { 'struct': 'MigrationStats', @@ -65,7 +69,8 @@ 'postcopy-requests' : 'int', 'page-size' : 'int', 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64', 'precopy-bytes' : 'uint64', 'downtime-bytes' : 'uint64', - 'postcopy-bytes' : 'uint64' } } + 'postcopy-bytes' : 'uint64', + 'dirty-sync-missed-zero-copy' : 'uint64' } } ## # @XBZRLECacheStats: @@ -467,6 +472,11 @@ # Requires that QEMU be permitted to use locked memory # for guest RAM pages. # (since 7.1) +# @postcopy-preempt: If enabled, the migration process will allow postcopy +# requests to preempt precopy stream, so postcopy requests +# will be handled faster. This is a performance feature and +# should not affect the correctness of postcopy migration. +# (since 7.1) # # Features: # @unstable: Members @x-colo and @x-ignore-shared are experimental. @@ -482,7 +492,7 @@ 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, 'validate-uuid', 'background-snapshot', - 'zero-copy-send'] } + 'zero-copy-send', 'postcopy-preempt'] } ## # @MigrationCapabilityStatus: @@ -1869,6 +1879,86 @@ { 'command': 'query-dirty-rate', 'returns': 'DirtyRateInfo' } ## +# @DirtyLimitInfo: +# +# Dirty page rate limit information of a virtual CPU. +# +# @cpu-index: index of a virtual CPU. +# +# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual +# CPU, 0 means unlimited. +# +# @current-rate: current dirty page rate (MB/s) for a virtual CPU. +# +# Since: 7.1 +# +## +{ 'struct': 'DirtyLimitInfo', + 'data': { 'cpu-index': 'int', + 'limit-rate': 'uint64', + 'current-rate': 'uint64' } } + +## +# @set-vcpu-dirty-limit: +# +# Set the upper limit of dirty page rate for virtual CPUs. +# +# Requires KVM with accelerator property "dirty-ring-size" set. +# A virtual CPU's dirty page rate is a measure of its memory load. +# To observe dirty page rates, use @calc-dirty-rate. +# +# @cpu-index: index of a virtual CPU, default is all. +# +# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. +# +# Since: 7.1 +# +# Example: +# {"execute": "set-vcpu-dirty-limit"} +# "arguments": { "dirty-rate": 200, +# "cpu-index": 1 } } +# +## +{ 'command': 'set-vcpu-dirty-limit', + 'data': { '*cpu-index': 'int', + 'dirty-rate': 'uint64' } } + +## +# @cancel-vcpu-dirty-limit: +# +# Cancel the upper limit of dirty page rate for virtual CPUs. +# +# Cancel the dirty page limit for the vCPU which has been set with +# set-vcpu-dirty-limit command. Note that this command requires +# support from dirty ring, same as the "set-vcpu-dirty-limit". +# +# @cpu-index: index of a virtual CPU, default is all. +# +# Since: 7.1 +# +# Example: +# {"execute": "cancel-vcpu-dirty-limit"} +# "arguments": { "cpu-index": 1 } } +# +## +{ 'command': 'cancel-vcpu-dirty-limit', + 'data': { '*cpu-index': 'int'} } + +## +# @query-vcpu-dirty-limit: +# +# Returns information about virtual CPU dirty page rate limits, if any. +# +# Since: 7.1 +# +# Example: +# {"execute": "query-vcpu-dirty-limit"} +# +## +{ 'command': 'query-vcpu-dirty-limit', + 'returns': [ 'DirtyLimitInfo' ] } + +## # @snapshot-save: # # Save a VM snapshot diff --git a/qapi/net.json b/qapi/net.json index 9af11e9a3b..75ba2cb989 100644 --- a/qapi/net.json +++ b/qapi/net.json @@ -445,12 +445,19 @@ # @queues: number of queues to be created for multiqueue vhost-vdpa # (default: 1) # +# @x-svq: Start device with (experimental) shadow virtqueue. (Since 7.1) +# (default: false) +# +# Features: +# @unstable: Member @x-svq is experimental. +# # Since: 5.1 ## { 'struct': 'NetdevVhostVDPAOptions', 'data': { '*vhostdev': 'str', - '*queues': 'int' } } + '*queues': 'int', + '*x-svq': {'type': 'bool', 'features' : [ 'unstable'] } } } ## # @NetdevVmnetHostOptions: diff --git a/qapi/ui.json b/qapi/ui.json index 413371d5e8..cf58ab4283 100644 --- a/qapi/ui.json +++ b/qapi/ui.json @@ -1195,12 +1195,17 @@ # assuming the guest will resize the display to match # the window size then. Otherwise it defaults to "off". # Since 3.1 +# @show-tabs: Display the tab bar for switching between the various graphical +# interfaces (e.g. VGA and virtual console character devices) +# by default. +# Since 7.1 # # Since: 2.12 ## { 'struct' : 'DisplayGTK', 'data' : { '*grab-on-hover' : 'bool', - '*zoom-to-fit' : 'bool' } } + '*zoom-to-fit' : 'bool', + '*show-tabs' : 'bool' } } ## # @DisplayEGLHeadless: diff --git a/qemu-options.hx b/qemu-options.hx index 377d22fbd8..79e00916a1 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -1938,7 +1938,7 @@ DEF("display", HAS_ARG, QEMU_OPTION_display, #endif #if defined(CONFIG_GTK) "-display gtk[,full-screen=on|off][,gl=on|off][,grab-on-hover=on|off]\n" - " [,show-cursor=on|off][,window-close=on|off]\n" + " [,show-tabs=on|off][,show-cursor=on|off][,window-close=on|off]\n" #endif #if defined(CONFIG_VNC) "-display vnc=<display>[,<optargs>]\n" @@ -2023,6 +2023,10 @@ SRST ``grab-on-hover=on|off`` : Grab keyboard input on mouse hover + ``show-tabs=on|off`` : Display the tab bar for switching between the + various graphical interfaces (e.g. VGA and + virtual console character devices) by default. + ``show-cursor=on|off`` : Force showing the mouse cursor ``window-close=on|off`` : Allow to quit qemu with window close button diff --git a/qga/commands-posix.c b/qga/commands-posix.c index f18530d85f..954efed01b 100644 --- a/qga/commands-posix.c +++ b/qga/commands-posix.c @@ -1207,7 +1207,15 @@ static void build_guest_fsinfo_for_device(char const *devpath, syspath = realpath(devpath, NULL); if (!syspath) { - error_setg_errno(errp, errno, "realpath(\"%s\")", devpath); + if (errno != ENOENT) { + error_setg_errno(errp, errno, "realpath(\"%s\")", devpath); + return; + } + + /* ENOENT: This devpath may not exist because of container config */ + if (!fs->name) { + fs->name = g_path_get_basename(devpath); + } return; } diff --git a/scripts/vmstate-static-checker.py b/scripts/vmstate-static-checker.py index 539ead62b4..b369388360 100755 --- a/scripts/vmstate-static-checker.py +++ b/scripts/vmstate-static-checker.py @@ -40,7 +40,7 @@ def check_fields_match(name, s_field, d_field): return True # Some fields changed names between qemu versions. This list - # is used to whitelist such changes in each section / description. + # is used to allow such changes in each section / description. changed_names = { 'apic': ['timer', 'timer_expiry'], 'e1000': ['dev', 'parent_obj'], diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c new file mode 100644 index 0000000000..8d98cb7f2c --- /dev/null +++ b/softmmu/dirtylimit.c @@ -0,0 +1,601 @@ +/* + * Dirty page rate limit implementation code + * + * Copyright (c) 2022 CHINA TELECOM CO.,LTD. + * + * Authors: + * Hyman Huang(黄勇) <huangy81@chinatelecom.cn> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/main-loop.h" +#include "qapi/qapi-commands-migration.h" +#include "qapi/qmp/qdict.h" +#include "qapi/error.h" +#include "sysemu/dirtyrate.h" +#include "sysemu/dirtylimit.h" +#include "monitor/hmp.h" +#include "monitor/monitor.h" +#include "exec/memory.h" +#include "hw/boards.h" +#include "sysemu/kvm.h" +#include "trace.h" + +/* + * Dirtylimit stop working if dirty page rate error + * value less than DIRTYLIMIT_TOLERANCE_RANGE + */ +#define DIRTYLIMIT_TOLERANCE_RANGE 25 /* MB/s */ +/* + * Plus or minus vcpu sleep time linearly if dirty + * page rate error value percentage over + * DIRTYLIMIT_LINEAR_ADJUSTMENT_PCT. + * Otherwise, plus or minus a fixed vcpu sleep time. + */ +#define DIRTYLIMIT_LINEAR_ADJUSTMENT_PCT 50 +/* + * Max vcpu sleep time percentage during a cycle + * composed of dirty ring full and sleep time. + */ +#define DIRTYLIMIT_THROTTLE_PCT_MAX 99 + +struct { + VcpuStat stat; + bool running; + QemuThread thread; +} *vcpu_dirty_rate_stat; + +typedef struct VcpuDirtyLimitState { + int cpu_index; + bool enabled; + /* + * Quota dirty page rate, unit is MB/s + * zero if not enabled. + */ + uint64_t quota; +} VcpuDirtyLimitState; + +struct { + VcpuDirtyLimitState *states; + /* Max cpus number configured by user */ + int max_cpus; + /* Number of vcpu under dirtylimit */ + int limited_nvcpu; +} *dirtylimit_state; + +/* protect dirtylimit_state */ +static QemuMutex dirtylimit_mutex; + +/* dirtylimit thread quit if dirtylimit_quit is true */ +static bool dirtylimit_quit; + +static void vcpu_dirty_rate_stat_collect(void) +{ + VcpuStat stat; + int i = 0; + + /* calculate vcpu dirtyrate */ + vcpu_calculate_dirtyrate(DIRTYLIMIT_CALC_TIME_MS, + &stat, + GLOBAL_DIRTY_LIMIT, + false); + + for (i = 0; i < stat.nvcpu; i++) { + vcpu_dirty_rate_stat->stat.rates[i].id = i; + vcpu_dirty_rate_stat->stat.rates[i].dirty_rate = + stat.rates[i].dirty_rate; + } + + free(stat.rates); +} + +static void *vcpu_dirty_rate_stat_thread(void *opaque) +{ + rcu_register_thread(); + + /* start log sync */ + global_dirty_log_change(GLOBAL_DIRTY_LIMIT, true); + + while (qatomic_read(&vcpu_dirty_rate_stat->running)) { + vcpu_dirty_rate_stat_collect(); + if (dirtylimit_in_service()) { + dirtylimit_process(); + } + } + + /* stop log sync */ + global_dirty_log_change(GLOBAL_DIRTY_LIMIT, false); + + rcu_unregister_thread(); + return NULL; +} + +int64_t vcpu_dirty_rate_get(int cpu_index) +{ + DirtyRateVcpu *rates = vcpu_dirty_rate_stat->stat.rates; + return qatomic_read_i64(&rates[cpu_index].dirty_rate); +} + +void vcpu_dirty_rate_stat_start(void) +{ + if (qatomic_read(&vcpu_dirty_rate_stat->running)) { + return; + } + + qatomic_set(&vcpu_dirty_rate_stat->running, 1); + qemu_thread_create(&vcpu_dirty_rate_stat->thread, + "dirtyrate-stat", + vcpu_dirty_rate_stat_thread, + NULL, + QEMU_THREAD_JOINABLE); +} + +void vcpu_dirty_rate_stat_stop(void) +{ + qatomic_set(&vcpu_dirty_rate_stat->running, 0); + dirtylimit_state_unlock(); + qemu_mutex_unlock_iothread(); + qemu_thread_join(&vcpu_dirty_rate_stat->thread); + qemu_mutex_lock_iothread(); + dirtylimit_state_lock(); +} + +void vcpu_dirty_rate_stat_initialize(void) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + int max_cpus = ms->smp.max_cpus; + + vcpu_dirty_rate_stat = + g_malloc0(sizeof(*vcpu_dirty_rate_stat)); + + vcpu_dirty_rate_stat->stat.nvcpu = max_cpus; + vcpu_dirty_rate_stat->stat.rates = + g_malloc0(sizeof(DirtyRateVcpu) * max_cpus); + + vcpu_dirty_rate_stat->running = false; +} + +void vcpu_dirty_rate_stat_finalize(void) +{ + free(vcpu_dirty_rate_stat->stat.rates); + vcpu_dirty_rate_stat->stat.rates = NULL; + + free(vcpu_dirty_rate_stat); + vcpu_dirty_rate_stat = NULL; +} + +void dirtylimit_state_lock(void) +{ + qemu_mutex_lock(&dirtylimit_mutex); +} + +void dirtylimit_state_unlock(void) +{ + qemu_mutex_unlock(&dirtylimit_mutex); +} + +static void +__attribute__((__constructor__)) dirtylimit_mutex_init(void) +{ + qemu_mutex_init(&dirtylimit_mutex); +} + +static inline VcpuDirtyLimitState *dirtylimit_vcpu_get_state(int cpu_index) +{ + return &dirtylimit_state->states[cpu_index]; +} + +void dirtylimit_state_initialize(void) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + int max_cpus = ms->smp.max_cpus; + int i; + + dirtylimit_state = g_malloc0(sizeof(*dirtylimit_state)); + + dirtylimit_state->states = + g_malloc0(sizeof(VcpuDirtyLimitState) * max_cpus); + + for (i = 0; i < max_cpus; i++) { + dirtylimit_state->states[i].cpu_index = i; + } + + dirtylimit_state->max_cpus = max_cpus; + trace_dirtylimit_state_initialize(max_cpus); +} + +void dirtylimit_state_finalize(void) +{ + free(dirtylimit_state->states); + dirtylimit_state->states = NULL; + + free(dirtylimit_state); + dirtylimit_state = NULL; + + trace_dirtylimit_state_finalize(); +} + +bool dirtylimit_in_service(void) +{ + return !!dirtylimit_state; +} + +bool dirtylimit_vcpu_index_valid(int cpu_index) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + + return !(cpu_index < 0 || + cpu_index >= ms->smp.max_cpus); +} + +static inline int64_t dirtylimit_dirty_ring_full_time(uint64_t dirtyrate) +{ + static uint64_t max_dirtyrate; + uint32_t dirty_ring_size = kvm_dirty_ring_size(); + uint64_t dirty_ring_size_meory_MB = + dirty_ring_size * TARGET_PAGE_SIZE >> 20; + + if (max_dirtyrate < dirtyrate) { + max_dirtyrate = dirtyrate; + } + + return dirty_ring_size_meory_MB * 1000000 / max_dirtyrate; +} + +static inline bool dirtylimit_done(uint64_t quota, + uint64_t current) +{ + uint64_t min, max; + + min = MIN(quota, current); + max = MAX(quota, current); + + return ((max - min) <= DIRTYLIMIT_TOLERANCE_RANGE) ? true : false; +} + +static inline bool +dirtylimit_need_linear_adjustment(uint64_t quota, + uint64_t current) +{ + uint64_t min, max; + + min = MIN(quota, current); + max = MAX(quota, current); + + return ((max - min) * 100 / max) > DIRTYLIMIT_LINEAR_ADJUSTMENT_PCT; +} + +static void dirtylimit_set_throttle(CPUState *cpu, + uint64_t quota, + uint64_t current) +{ + int64_t ring_full_time_us = 0; + uint64_t sleep_pct = 0; + uint64_t throttle_us = 0; + + if (current == 0) { + cpu->throttle_us_per_full = 0; + return; + } + + ring_full_time_us = dirtylimit_dirty_ring_full_time(current); + + if (dirtylimit_need_linear_adjustment(quota, current)) { + if (quota < current) { + sleep_pct = (current - quota) * 100 / current; + throttle_us = + ring_full_time_us * sleep_pct / (double)(100 - sleep_pct); + cpu->throttle_us_per_full += throttle_us; + } else { + sleep_pct = (quota - current) * 100 / quota; + throttle_us = + ring_full_time_us * sleep_pct / (double)(100 - sleep_pct); + cpu->throttle_us_per_full -= throttle_us; + } + + trace_dirtylimit_throttle_pct(cpu->cpu_index, + sleep_pct, + throttle_us); + } else { + if (quota < current) { + cpu->throttle_us_per_full += ring_full_time_us / 10; + } else { + cpu->throttle_us_per_full -= ring_full_time_us / 10; + } + } + + /* + * TODO: in the big kvm_dirty_ring_size case (eg: 65536, or other scenario), + * current dirty page rate may never reach the quota, we should stop + * increasing sleep time? + */ + cpu->throttle_us_per_full = MIN(cpu->throttle_us_per_full, + ring_full_time_us * DIRTYLIMIT_THROTTLE_PCT_MAX); + + cpu->throttle_us_per_full = MAX(cpu->throttle_us_per_full, 0); +} + +static void dirtylimit_adjust_throttle(CPUState *cpu) +{ + uint64_t quota = 0; + uint64_t current = 0; + int cpu_index = cpu->cpu_index; + + quota = dirtylimit_vcpu_get_state(cpu_index)->quota; + current = vcpu_dirty_rate_get(cpu_index); + + if (!dirtylimit_done(quota, current)) { + dirtylimit_set_throttle(cpu, quota, current); + } + + return; +} + +void dirtylimit_process(void) +{ + CPUState *cpu; + + if (!qatomic_read(&dirtylimit_quit)) { + dirtylimit_state_lock(); + + if (!dirtylimit_in_service()) { + dirtylimit_state_unlock(); + return; + } + + CPU_FOREACH(cpu) { + if (!dirtylimit_vcpu_get_state(cpu->cpu_index)->enabled) { + continue; + } + dirtylimit_adjust_throttle(cpu); + } + dirtylimit_state_unlock(); + } +} + +void dirtylimit_change(bool start) +{ + if (start) { + qatomic_set(&dirtylimit_quit, 0); + } else { + qatomic_set(&dirtylimit_quit, 1); + } +} + +void dirtylimit_set_vcpu(int cpu_index, + uint64_t quota, + bool enable) +{ + trace_dirtylimit_set_vcpu(cpu_index, quota); + + if (enable) { + dirtylimit_state->states[cpu_index].quota = quota; + if (!dirtylimit_vcpu_get_state(cpu_index)->enabled) { + dirtylimit_state->limited_nvcpu++; + } + } else { + dirtylimit_state->states[cpu_index].quota = 0; + if (dirtylimit_state->states[cpu_index].enabled) { + dirtylimit_state->limited_nvcpu--; + } + } + + dirtylimit_state->states[cpu_index].enabled = enable; +} + +void dirtylimit_set_all(uint64_t quota, + bool enable) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + int max_cpus = ms->smp.max_cpus; + int i; + + for (i = 0; i < max_cpus; i++) { + dirtylimit_set_vcpu(i, quota, enable); + } +} + +void dirtylimit_vcpu_execute(CPUState *cpu) +{ + if (dirtylimit_in_service() && + dirtylimit_vcpu_get_state(cpu->cpu_index)->enabled && + cpu->throttle_us_per_full) { + trace_dirtylimit_vcpu_execute(cpu->cpu_index, + cpu->throttle_us_per_full); + usleep(cpu->throttle_us_per_full); + } +} + +static void dirtylimit_init(void) +{ + dirtylimit_state_initialize(); + dirtylimit_change(true); + vcpu_dirty_rate_stat_initialize(); + vcpu_dirty_rate_stat_start(); +} + +static void dirtylimit_cleanup(void) +{ + vcpu_dirty_rate_stat_stop(); + vcpu_dirty_rate_stat_finalize(); + dirtylimit_change(false); + dirtylimit_state_finalize(); +} + +void qmp_cancel_vcpu_dirty_limit(bool has_cpu_index, + int64_t cpu_index, + Error **errp) +{ + if (!kvm_enabled() || !kvm_dirty_ring_enabled()) { + return; + } + + if (has_cpu_index && !dirtylimit_vcpu_index_valid(cpu_index)) { + error_setg(errp, "incorrect cpu index specified"); + return; + } + + if (!dirtylimit_in_service()) { + return; + } + + dirtylimit_state_lock(); + + if (has_cpu_index) { + dirtylimit_set_vcpu(cpu_index, 0, false); + } else { + dirtylimit_set_all(0, false); + } + + if (!dirtylimit_state->limited_nvcpu) { + dirtylimit_cleanup(); + } + + dirtylimit_state_unlock(); +} + +void hmp_cancel_vcpu_dirty_limit(Monitor *mon, const QDict *qdict) +{ + int64_t cpu_index = qdict_get_try_int(qdict, "cpu_index", -1); + Error *err = NULL; + + qmp_cancel_vcpu_dirty_limit(!!(cpu_index != -1), cpu_index, &err); + if (err) { + hmp_handle_error(mon, err); + return; + } + + monitor_printf(mon, "[Please use 'info vcpu_dirty_limit' to query " + "dirty limit for virtual CPU]\n"); +} + +void qmp_set_vcpu_dirty_limit(bool has_cpu_index, + int64_t cpu_index, + uint64_t dirty_rate, + Error **errp) +{ + if (!kvm_enabled() || !kvm_dirty_ring_enabled()) { + error_setg(errp, "dirty page limit feature requires KVM with" + " accelerator property 'dirty-ring-size' set'"); + return; + } + + if (has_cpu_index && !dirtylimit_vcpu_index_valid(cpu_index)) { + error_setg(errp, "incorrect cpu index specified"); + return; + } + + if (!dirty_rate) { + qmp_cancel_vcpu_dirty_limit(has_cpu_index, cpu_index, errp); + return; + } + + dirtylimit_state_lock(); + + if (!dirtylimit_in_service()) { + dirtylimit_init(); + } + + if (has_cpu_index) { + dirtylimit_set_vcpu(cpu_index, dirty_rate, true); + } else { + dirtylimit_set_all(dirty_rate, true); + } + + dirtylimit_state_unlock(); +} + +void hmp_set_vcpu_dirty_limit(Monitor *mon, const QDict *qdict) +{ + int64_t dirty_rate = qdict_get_int(qdict, "dirty_rate"); + int64_t cpu_index = qdict_get_try_int(qdict, "cpu_index", -1); + Error *err = NULL; + + qmp_set_vcpu_dirty_limit(!!(cpu_index != -1), cpu_index, dirty_rate, &err); + if (err) { + hmp_handle_error(mon, err); + return; + } + + monitor_printf(mon, "[Please use 'info vcpu_dirty_limit' to query " + "dirty limit for virtual CPU]\n"); +} + +static struct DirtyLimitInfo *dirtylimit_query_vcpu(int cpu_index) +{ + DirtyLimitInfo *info = NULL; + + info = g_malloc0(sizeof(*info)); + info->cpu_index = cpu_index; + info->limit_rate = dirtylimit_vcpu_get_state(cpu_index)->quota; + info->current_rate = vcpu_dirty_rate_get(cpu_index); + + return info; +} + +static struct DirtyLimitInfoList *dirtylimit_query_all(void) +{ + int i, index; + DirtyLimitInfo *info = NULL; + DirtyLimitInfoList *head = NULL, **tail = &head; + + dirtylimit_state_lock(); + + if (!dirtylimit_in_service()) { + dirtylimit_state_unlock(); + return NULL; + } + + for (i = 0; i < dirtylimit_state->max_cpus; i++) { + index = dirtylimit_state->states[i].cpu_index; + if (dirtylimit_vcpu_get_state(index)->enabled) { + info = dirtylimit_query_vcpu(index); + QAPI_LIST_APPEND(tail, info); + } + } + + dirtylimit_state_unlock(); + + return head; +} + +struct DirtyLimitInfoList *qmp_query_vcpu_dirty_limit(Error **errp) +{ + if (!dirtylimit_in_service()) { + return NULL; + } + + return dirtylimit_query_all(); +} + +void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict) +{ + DirtyLimitInfoList *limit, *head, *info = NULL; + Error *err = NULL; + + if (!dirtylimit_in_service()) { + monitor_printf(mon, "Dirty page limit not enabled!\n"); + return; + } + + info = qmp_query_vcpu_dirty_limit(&err); + if (err) { + hmp_handle_error(mon, err); + return; + } + + head = info; + for (limit = head; limit != NULL; limit = limit->next) { + monitor_printf(mon, "vcpu[%"PRIi64"], limit rate %"PRIi64 " (MB/s)," + " current rate %"PRIi64 " (MB/s)\n", + limit->value->cpu_index, + limit->value->limit_rate, + limit->value->current_rate); + } + + g_free(info); +} diff --git a/softmmu/meson.build b/softmmu/meson.build index 8138248661..3272af1f31 100644 --- a/softmmu/meson.build +++ b/softmmu/meson.build @@ -4,6 +4,7 @@ specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: [files( 'memory.c', 'physmem.c', 'qtest.c', + 'dirtylimit.c', )]) specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: [files( diff --git a/softmmu/runstate.c b/softmmu/runstate.c index fac7b63259..168e1b78a0 100644 --- a/softmmu/runstate.c +++ b/softmmu/runstate.c @@ -126,6 +126,7 @@ static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE_RESTORE_VM, RUN_STATE_PRELAUNCH }, { RUN_STATE_COLO, RUN_STATE_RUNNING }, + { RUN_STATE_COLO, RUN_STATE_PRELAUNCH }, { RUN_STATE_COLO, RUN_STATE_SHUTDOWN}, { RUN_STATE_RUNNING, RUN_STATE_DEBUG }, diff --git a/softmmu/trace-events b/softmmu/trace-events index 9c88887b3c..22606dc27b 100644 --- a/softmmu/trace-events +++ b/softmmu/trace-events @@ -31,3 +31,10 @@ runstate_set(int current_state, const char *current_state_str, int new_state, co system_wakeup_request(int reason) "reason=%d" qemu_system_shutdown_request(int reason) "reason=%d" qemu_system_powerdown_request(void) "" + +#dirtylimit.c +dirtylimit_state_initialize(int max_cpus) "dirtylimit state initialize: max cpus %d" +dirtylimit_state_finalize(void) +dirtylimit_throttle_pct(int cpu_index, uint64_t pct, int64_t time_us) "CPU[%d] throttle percent: %" PRIu64 ", throttle adjust time %"PRIi64 " us" +dirtylimit_set_vcpu(int cpu_index, uint64_t quota) "CPU[%d] set dirty page rate limit %"PRIu64 +dirtylimit_vcpu_execute(int cpu_index, int64_t sleep_time_us) "CPU[%d] sleep %"PRIi64 " us" diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 5de7e097e9..1b7b3d76bb 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -226,7 +226,7 @@ static void arm_cpu_reset(DeviceState *dev) * Enable TBI0 but not TBI1. * Note that this must match useronly_clean_ptr. */ - env->cp15.tcr_el[1].raw_tcr = 5 | (1ULL << 37); + env->cp15.tcr_el[1] = 5 | (1ULL << 37); /* Enable MTE */ if (cpu_isar_feature(aa64_mte, cpu)) { diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 1e36a839ee..e890ee074d 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -166,12 +166,6 @@ typedef struct ARMGenericTimer { #define GTIMER_HYPVIRT 4 #define NUM_GTIMERS 5 -typedef struct { - uint64_t raw_tcr; - uint32_t mask; - uint32_t base_mask; -} TCR; - #define VTCR_NSW (1u << 29) #define VTCR_NSA (1u << 30) #define VSTCR_SW VTCR_NSW @@ -339,9 +333,9 @@ typedef struct CPUArchState { uint64_t vttbr_el2; /* Virtualization Translation Table Base. */ uint64_t vsttbr_el2; /* Secure Virtualization Translation Table. */ /* MMU translation table base control. */ - TCR tcr_el[4]; - TCR vtcr_el2; /* Virtualization Translation Control. */ - TCR vstcr_el2; /* Secure Virtualization Translation Control. */ + uint64_t tcr_el[4]; + uint64_t vtcr_el2; /* Virtualization Translation Control. */ + uint64_t vstcr_el2; /* Secure Virtualization Translation Control. */ uint32_t c2_data; /* MPU data cacheable bits. */ uint32_t c2_insn; /* MPU instruction cacheable bits. */ union { /* MMU domain access control register @@ -1418,6 +1412,25 @@ FIELD(CPTR_EL3, TCPAC, 31, 1) #define TTBCR_SH1 (1U << 28) #define TTBCR_EAE (1U << 31) +FIELD(VTCR, T0SZ, 0, 6) +FIELD(VTCR, SL0, 6, 2) +FIELD(VTCR, IRGN0, 8, 2) +FIELD(VTCR, ORGN0, 10, 2) +FIELD(VTCR, SH0, 12, 2) +FIELD(VTCR, TG0, 14, 2) +FIELD(VTCR, PS, 16, 3) +FIELD(VTCR, VS, 19, 1) +FIELD(VTCR, HA, 21, 1) +FIELD(VTCR, HD, 22, 1) +FIELD(VTCR, HWU59, 25, 1) +FIELD(VTCR, HWU60, 26, 1) +FIELD(VTCR, HWU61, 27, 1) +FIELD(VTCR, HWU62, 28, 1) +FIELD(VTCR, NSW, 29, 1) +FIELD(VTCR, NSA, 30, 1) +FIELD(VTCR, DS, 32, 1) +FIELD(VTCR, SL2, 33, 1) + /* Bit definitions for ARMv8 SPSR (PSTATE) format. * Only these are valid when in AArch64 mode; in * AArch32 mode SPSRs are basically CPSR-format. @@ -3392,9 +3405,12 @@ static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x) /* * AArch64 usage of the PAGE_TARGET_* bits for linux-user. + * Note that with the Linux kernel, PROT_MTE may not be cleared by mprotect + * mprotect but PROT_BTI may be cleared. C.f. the kernel's VM_ARCH_CLEAR. */ -#define PAGE_BTI PAGE_TARGET_1 -#define PAGE_MTE PAGE_TARGET_2 +#define PAGE_BTI PAGE_TARGET_1 +#define PAGE_MTE PAGE_TARGET_2 +#define PAGE_TARGET_STICKY PAGE_MTE #ifdef TARGET_TAGGED_ADDRESSES /** diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c index d09fccb0a4..c21739242c 100644 --- a/target/arm/debug_helper.c +++ b/target/arm/debug_helper.c @@ -439,7 +439,7 @@ static uint32_t arm_debug_exception_fsr(CPUARMState *env) using_lpae = true; } else { if (arm_feature(env, ARM_FEATURE_LPAE) && - (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) { + (env->cp15.tcr_el[target_el] & TTBCR_EAE)) { using_lpae = true; } } diff --git a/target/arm/helper.c b/target/arm/helper.c index cfcad97ce0..1a8b06410e 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -3606,19 +3606,21 @@ static const ARMCPRegInfo pmsav5_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, }; -static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - TCR *tcr = raw_ptr(env, ri); - int maskshift = extract32(value, 0, 3); + ARMCPU *cpu = env_archcpu(env); if (!arm_feature(env, ARM_FEATURE_V8)) { if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { - /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when - * using Long-desciptor translation table format */ + /* + * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when + * using Long-descriptor translation table format + */ value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); } else if (arm_feature(env, ARM_FEATURE_EL3)) { - /* In an implementation that includes the Security Extensions + /* + * In an implementation that includes the Security Extensions * TTBCR has additional fields PD0 [4] and PD1 [5] for * Short-descriptor translation table format. */ @@ -3628,55 +3630,23 @@ static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, } } - /* Update the masks corresponding to the TCR bank being written - * Note that we always calculate mask and base_mask, but - * they are only used for short-descriptor tables (ie if EAE is 0); - * for long-descriptor tables the TCR fields are used differently - * and the mask and base_mask values are meaningless. - */ - tcr->raw_tcr = value; - tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); - tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); -} - -static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = env_archcpu(env); - TCR *tcr = raw_ptr(env, ri); - if (arm_feature(env, ARM_FEATURE_LPAE)) { /* With LPAE the TTBCR could result in a change of ASID * via the TTBCR.A1 bit, so do a TLB flush. */ tlb_flush(CPU(cpu)); } - /* Preserve the high half of TCR_EL1, set via TTBCR2. */ - value = deposit64(tcr->raw_tcr, 0, 32, value); - vmsa_ttbcr_raw_write(env, ri, value); -} - -static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) -{ - TCR *tcr = raw_ptr(env, ri); - - /* Reset both the TCR as well as the masks corresponding to the bank of - * the TCR being reset. - */ - tcr->raw_tcr = 0; - tcr->mask = 0; - tcr->base_mask = 0xffffc000u; + raw_write(env, ri, value); } static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); - TCR *tcr = raw_ptr(env, ri); /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ tlb_flush(CPU(cpu)); - tcr->raw_tcr = value; + raw_write(env, ri, value); } static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -3780,15 +3750,15 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = { .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, .access = PL1_RW, .accessfn = access_tvm_trvm, .writefn = vmsa_tcr_el12_write, - .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, + .raw_writefn = raw_write, + .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, - .raw_writefn = vmsa_ttbcr_raw_write, - /* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */ - .bank_fieldoffsets = { offsetof(CPUARMState, cp15.tcr_el[3]), - offsetof(CPUARMState, cp15.tcr_el[1])} }, + .raw_writefn = raw_write, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), + offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, }; /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing @@ -3799,8 +3769,8 @@ static const ARMCPRegInfo ttbcr2_reginfo = { .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, .bank_fieldoffsets = { - offsetofhigh32(CPUARMState, cp15.tcr_el[3].raw_tcr), - offsetofhigh32(CPUARMState, cp15.tcr_el[1].raw_tcr), + offsetofhigh32(CPUARMState, cp15.tcr_el[3]), + offsetofhigh32(CPUARMState, cp15.tcr_el[1]), }, }; @@ -4216,7 +4186,7 @@ static int vae1_tlbmask(CPUARMState *env) static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx, uint64_t addr) { - uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; + uint64_t tcr = regime_tcr(env, mmu_idx); int tbi = aa64_va_parameter_tbi(tcr, mmu_idx); int select = extract64(addr, 55, 1); @@ -5403,19 +5373,16 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, .access = PL2_RW, .writefn = vmsa_tcr_el12_write, - /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */ .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, { .name = "VTCR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, .type = ARM_CP_ALIAS, .access = PL2_RW, .accessfn = access_el3_aa32ns, - .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, + .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) }, { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, .access = PL2_RW, - /* no .writefn needed as this can't cause an ASID change; - * no .raw_writefn or .resetfn needed as we never use mask/base_mask - */ + /* no .writefn needed as this can't cause an ASID change */ .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, { .name = "VTTBR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 6, .crm = 2, @@ -5645,12 +5612,8 @@ static const ARMCPRegInfo el3_cp_reginfo[] = { { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, .access = PL3_RW, - /* no .writefn needed as this can't cause an ASID change; - * we must provide a .raw_writefn and .resetfn because we handle - * reset and migration for the AArch32 TTBCR(S), which might be - * using mask and base_mask. - */ - .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, + /* no .writefn needed as this can't cause an ASID change */ + .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, @@ -10158,7 +10121,7 @@ static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx) ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, ARMMMUIdx mmu_idx, bool data) { - uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; + uint64_t tcr = regime_tcr(env, mmu_idx); bool epd, hpd, using16k, using64k, tsz_oob, ds; int select, tsz, tbi, max_tsz, min_tsz, ps, sh; ARMCPU *cpu = env_archcpu(env); @@ -10849,7 +10812,7 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, { CPUARMTBFlags flags = {}; ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); - uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; + uint64_t tcr = regime_tcr(env, mmu_idx); uint64_t sctlr; int tbii, tbid; @@ -10882,13 +10845,19 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, } if (cpu_isar_feature(aa64_sme, env_archcpu(env))) { int sme_el = sme_exception_el(env, el); + bool sm = FIELD_EX64(env->svcr, SVCR, SM); DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el); if (sme_el == 0) { /* Similarly, do not compute SVL if SME is disabled. */ - DP_TBFLAG_A64(flags, SVL, sve_vqm1_for_el_sm(env, el, true)); + int svl = sve_vqm1_for_el_sm(env, el, true); + DP_TBFLAG_A64(flags, SVL, svl); + if (sm) { + /* If SVE is disabled, we will not have set VL above. */ + DP_TBFLAG_A64(flags, VL, svl); + } } - if (FIELD_EX64(env->svcr, SVCR, SM)) { + if (sm) { DP_TBFLAG_A64(flags, PSTATE_SM, 1); DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el)); } @@ -11222,6 +11191,21 @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) } } +static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm) +{ + int exc_el; + + if (sm) { + exc_el = sme_exception_el(env, el); + } else { + exc_el = sve_exception_el(env, el); + } + if (exc_el) { + return 0; /* disabled */ + } + return sve_vqm1_for_el_sm(env, el, sm); +} + /* * Notice a change in SVE vector size when changing EL. */ @@ -11230,7 +11214,7 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, { ARMCPU *cpu = env_archcpu(env); int old_len, new_len; - bool old_a64, new_a64; + bool old_a64, new_a64, sm; /* Nothing to do if no SVE. */ if (!cpu_isar_feature(aa64_sve, cpu)) { @@ -11250,7 +11234,8 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, * invoke ResetSVEState when taking an exception from, or * returning to, AArch32 state when PSTATE.SM is enabled. */ - if (old_a64 != new_a64 && FIELD_EX64(env->svcr, SVCR, SM)) { + sm = FIELD_EX64(env->svcr, SVCR, SM); + if (old_a64 != new_a64 && sm) { arm_reset_sve_state(env); return; } @@ -11267,10 +11252,13 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, * we already have the correct register contents when encountering the * vq0->vq0 transition between EL0->EL1. */ - old_len = (old_a64 && !sve_exception_el(env, old_el) - ? sve_vqm1_for_el(env, old_el) : 0); - new_len = (new_a64 && !sve_exception_el(env, new_el) - ? sve_vqm1_for_el(env, new_el) : 0); + old_len = new_len = 0; + if (old_a64) { + old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm); + } + if (new_a64) { + new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm); + } /* When changing vector length, clear inaccessible state. */ if (new_len < old_len) { diff --git a/target/arm/internals.h b/target/arm/internals.h index 00e2e710f6..b8fefdff67 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -252,9 +252,9 @@ unsigned int arm_pamax(ARMCPU *cpu); */ static inline bool extended_addresses_enabled(CPUARMState *env) { - TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; + uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; return arm_el_is_aa64(env, 1) || - (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE)); + (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); } /* Update a QEMU watchpoint based on the information the guest has set in the @@ -777,20 +777,36 @@ static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; } -/* Return the TCR controlling this translation regime */ -static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) +/* + * These are the fields in VTCR_EL2 which affect both the Secure stage 2 + * and the Non-Secure stage 2 translation regimes (and hence which are + * not present in VSTCR_EL2). + */ +#define VTCR_SHARED_FIELD_MASK \ + (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ + R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ + R_VTCR_DS_MASK) + +/* Return the value of the TCR controlling this translation regime */ +static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) { if (mmu_idx == ARMMMUIdx_Stage2) { - return &env->cp15.vtcr_el2; + return env->cp15.vtcr_el2; } if (mmu_idx == ARMMMUIdx_Stage2_S) { /* - * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but - * those are not currently used by QEMU, so just return VSTCR_EL2. + * Secure stage 2 shares fields from VTCR_EL2. We merge those + * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format + * value so the callers don't need to special case this. + * + * If a future architecture change defines bits in VSTCR_EL2 that + * overlap with these VTCR_EL2 fields we may need to revisit this. */ - return &env->cp15.vstcr_el2; + uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; + v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; + return v; } - return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; + return env->cp15.tcr_el[regime_el(env, mmu_idx)]; } /** diff --git a/target/arm/ptw.c b/target/arm/ptw.c index e71fc1f429..3261039d93 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -241,9 +241,9 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, if (arm_is_secure_below_el3(env)) { /* Check if page table walk is to secure or non-secure PA space. */ if (*is_secure) { - *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW); + *is_secure = !(env->cp15.vstcr_el2 & VSTCR_SW); } else { - *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW); + *is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW); } } else { assert(!*is_secure); @@ -315,20 +315,24 @@ static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, uint32_t *table, uint32_t address) { /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ - TCR *tcr = regime_tcr(env, mmu_idx); + uint64_t tcr = regime_tcr(env, mmu_idx); + int maskshift = extract32(tcr, 0, 3); + uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift); + uint32_t base_mask; - if (address & tcr->mask) { - if (tcr->raw_tcr & TTBCR_PD1) { + if (address & mask) { + if (tcr & TTBCR_PD1) { /* Translation table walk disabled for TTBR1 */ return false; } *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; } else { - if (tcr->raw_tcr & TTBCR_PD0) { + if (tcr & TTBCR_PD0) { /* Translation table walk disabled for TTBR0 */ return false; } - *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; + base_mask = ~((uint32_t)0x3fffu >> maskshift); + *table = regime_ttbr(env, mmu_idx, 0) & base_mask; } *table |= (address >> 18) & 0x3ffc; return true; @@ -820,7 +824,7 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, ARMMMUIdx mmu_idx) { - uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; + uint64_t tcr = regime_tcr(env, mmu_idx); uint32_t el = regime_el(env, mmu_idx); int select, tsz; bool epd, hpd; @@ -994,7 +998,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, uint32_t attrs; int32_t stride; int addrsize, inputsize, outputsize; - TCR *tcr = regime_tcr(env, mmu_idx); + uint64_t tcr = regime_tcr(env, mmu_idx); int ap, ns, xn, pxn; uint32_t el = regime_el(env, mmu_idx); uint64_t descaddrmask; @@ -1112,8 +1116,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, * For stage 2 translations the starting level is specified by the * VTCR_EL2.SL0 field (whose interpretation depends on the page size) */ - uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); - uint32_t sl2 = extract64(tcr->raw_tcr, 33, 1); + uint32_t sl0 = extract32(tcr, 6, 2); + uint32_t sl2 = extract64(tcr, 33, 1); uint32_t startlevel; bool ok; @@ -2337,9 +2341,9 @@ bool get_phys_addr(CPUARMState *env, target_ulong address, ipa_secure = attrs->secure; if (arm_is_secure_below_el3(env)) { if (ipa_secure) { - attrs->secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW); + attrs->secure = !(env->cp15.vstcr_el2 & VSTCR_SW); } else { - attrs->secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW); + attrs->secure = !(env->cp15.vtcr_el2 & VTCR_NSW); } } else { assert(!ipa_secure); @@ -2381,11 +2385,11 @@ bool get_phys_addr(CPUARMState *env, target_ulong address, if (arm_is_secure_below_el3(env)) { if (ipa_secure) { attrs->secure = - !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW)); + !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)); } else { attrs->secure = - !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW)) - || (env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW))); + !((env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)) + || (env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))); } } return 0; @@ -2462,7 +2466,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address, int r_el = regime_el(env, mmu_idx); if (arm_el_is_aa64(env, r_el)) { int pamax = arm_pamax(env_archcpu(env)); - uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr; + uint64_t tcr = env->cp15.tcr_el[r_el]; int addrtop, tbi; tbi = aa64_va_parameter_tbi(tcr, mmu_idx); diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c index 7d8a86b3c4..5a709eab56 100644 --- a/target/arm/tlb_helper.c +++ b/target/arm/tlb_helper.c @@ -20,7 +20,7 @@ bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) return true; } if (arm_feature(env, ARM_FEATURE_LPAE) - && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { + && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { return true; } return false; diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index b7b64f7358..163df8c615 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -3138,7 +3138,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, bool is_store = false; bool is_extended = false; bool is_unpriv = (idx == 2); - bool iss_valid = !is_vector; + bool iss_valid; bool post_index; bool writeback; int memidx; @@ -3191,6 +3191,8 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, g_assert_not_reached(); } + iss_valid = !is_vector && !writeback; + if (rn == 31) { gen_check_sp_alignment(s); } diff --git a/target/hexagon/gen_tcg.h b/target/hexagon/gen_tcg.h index c6f0879b6e..50634ac459 100644 --- a/target/hexagon/gen_tcg.h +++ b/target/hexagon/gen_tcg.h @@ -339,12 +339,13 @@ do { \ TCGv LSB = tcg_temp_local_new(); \ TCGLabel *label = gen_new_label(); \ - GET_EA; \ + tcg_gen_movi_tl(EA, 0); \ PRED; \ + CHECK_NOSHUF_PRED(GET_EA, SIZE, LSB); \ PRED_LOAD_CANCEL(LSB, EA); \ tcg_gen_movi_tl(RdV, 0); \ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \ - fLOAD(1, SIZE, SIGN, EA, RdV); \ + fLOAD(1, SIZE, SIGN, EA, RdV); \ gen_set_label(label); \ tcg_temp_free(LSB); \ } while (0) @@ -398,12 +399,13 @@ do { \ TCGv LSB = tcg_temp_local_new(); \ TCGLabel *label = gen_new_label(); \ - GET_EA; \ + tcg_gen_movi_tl(EA, 0); \ PRED; \ + CHECK_NOSHUF_PRED(GET_EA, 8, LSB); \ PRED_LOAD_CANCEL(LSB, EA); \ tcg_gen_movi_i64(RddV, 0); \ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \ - fLOAD(1, 8, u, EA, RddV); \ + fLOAD(1, 8, u, EA, RddV); \ gen_set_label(label); \ tcg_temp_free(LSB); \ } while (0) diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c index cd6af4bceb..8a334ba07b 100644 --- a/target/hexagon/genptr.c +++ b/target/hexagon/genptr.c @@ -638,5 +638,12 @@ static void vec_to_qvec(size_t size, intptr_t dstoff, intptr_t srcoff) tcg_temp_free_i64(mask); } +static void probe_noshuf_load(TCGv va, int s, int mi) +{ + TCGv size = tcg_constant_tl(s); + TCGv mem_idx = tcg_constant_tl(mi); + gen_helper_probe_noshuf_load(cpu_env, va, size, mem_idx); +} + #include "tcg_funcs_generated.c.inc" #include "tcg_func_table_generated.c.inc" diff --git a/target/hexagon/helper.h b/target/hexagon/helper.h index c89aa4ed4d..368f0b5708 100644 --- a/target/hexagon/helper.h +++ b/target/hexagon/helper.h @@ -104,6 +104,7 @@ DEF_HELPER_1(vwhist128q, void, env) DEF_HELPER_2(vwhist128m, void, env, s32) DEF_HELPER_2(vwhist128qm, void, env, s32) +DEF_HELPER_4(probe_noshuf_load, void, env, i32, int, int) DEF_HELPER_2(probe_pkt_scalar_store_s0, void, env, int) DEF_HELPER_2(probe_hvx_stores, void, env, int) DEF_HELPER_3(probe_pkt_scalar_hvx_stores, void, env, int, int) diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h index a78e84faa4..92eb8bbf05 100644 --- a/target/hexagon/macros.h +++ b/target/hexagon/macros.h @@ -87,49 +87,66 @@ * * * For qemu, we look for a load in slot 0 when there is a store in slot 1 - * in the same packet. When we see this, we call a helper that merges the - * bytes from the store buffer with the value loaded from memory. + * in the same packet. When we see this, we call a helper that probes the + * load to make sure it doesn't fault. Then, we process the store ahead of + * the actual load. + */ -#define CHECK_NOSHUF \ +#define CHECK_NOSHUF(VA, SIZE) \ do { \ if (insn->slot == 0 && pkt->pkt_has_store_s1) { \ + probe_noshuf_load(VA, SIZE, ctx->mem_idx); \ + process_store(ctx, pkt, 1); \ + } \ + } while (0) + +#define CHECK_NOSHUF_PRED(GET_EA, SIZE, PRED) \ + do { \ + TCGLabel *label = gen_new_label(); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, PRED, 0, label); \ + GET_EA; \ + if (insn->slot == 0 && pkt->pkt_has_store_s1) { \ + probe_noshuf_load(EA, SIZE, ctx->mem_idx); \ + } \ + gen_set_label(label); \ + if (insn->slot == 0 && pkt->pkt_has_store_s1) { \ process_store(ctx, pkt, 1); \ } \ } while (0) #define MEM_LOAD1s(DST, VA) \ do { \ - CHECK_NOSHUF; \ + CHECK_NOSHUF(VA, 1); \ tcg_gen_qemu_ld8s(DST, VA, ctx->mem_idx); \ } while (0) #define MEM_LOAD1u(DST, VA) \ do { \ - CHECK_NOSHUF; \ + CHECK_NOSHUF(VA, 1); \ tcg_gen_qemu_ld8u(DST, VA, ctx->mem_idx); \ } while (0) #define MEM_LOAD2s(DST, VA) \ do { \ - CHECK_NOSHUF; \ + CHECK_NOSHUF(VA, 2); \ tcg_gen_qemu_ld16s(DST, VA, ctx->mem_idx); \ } while (0) #define MEM_LOAD2u(DST, VA) \ do { \ - CHECK_NOSHUF; \ + CHECK_NOSHUF(VA, 2); \ tcg_gen_qemu_ld16u(DST, VA, ctx->mem_idx); \ } while (0) #define MEM_LOAD4s(DST, VA) \ do { \ - CHECK_NOSHUF; \ + CHECK_NOSHUF(VA, 4); \ tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \ } while (0) #define MEM_LOAD4u(DST, VA) \ do { \ - CHECK_NOSHUF; \ + CHECK_NOSHUF(VA, 4); \ tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \ } while (0) #define MEM_LOAD8u(DST, VA) \ do { \ - CHECK_NOSHUF; \ + CHECK_NOSHUF(VA, 8); \ tcg_gen_qemu_ld64(DST, VA, ctx->mem_idx); \ } while (0) diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c index a5ed819c04..085afc3274 100644 --- a/target/hexagon/op_helper.c +++ b/target/hexagon/op_helper.c @@ -442,6 +442,17 @@ static void probe_store(CPUHexagonState *env, int slot, int mmu_idx) } } +/* + * Called from a mem_noshuf packet to make sure the load doesn't + * raise an exception + */ +void HELPER(probe_noshuf_load)(CPUHexagonState *env, target_ulong va, + int size, int mmu_idx) +{ + uintptr_t retaddr = GETPC(); + probe_read(env, va, size, mmu_idx, retaddr); +} + /* Called during packet commit when there are two scalar stores */ void HELPER(probe_pkt_scalar_store_s0)(CPUHexagonState *env, int mmu_idx) { @@ -514,10 +525,12 @@ void HELPER(probe_pkt_scalar_hvx_stores)(CPUHexagonState *env, int mask, * If the load is in slot 0 and there is a store in slot1 (that * wasn't cancelled), we have to do the store first. */ -static void check_noshuf(CPUHexagonState *env, uint32_t slot) +static void check_noshuf(CPUHexagonState *env, uint32_t slot, + target_ulong vaddr, int size) { if (slot == 0 && env->pkt_has_store_s1 && ((env->slot_cancelled & (1 << 1)) == 0)) { + HELPER(probe_noshuf_load)(env, vaddr, size, MMU_USER_IDX); HELPER(commit_store)(env, 1); } } @@ -526,7 +539,7 @@ static uint8_t mem_load1(CPUHexagonState *env, uint32_t slot, target_ulong vaddr) { uintptr_t ra = GETPC(); - check_noshuf(env, slot); + check_noshuf(env, slot, vaddr, 1); return cpu_ldub_data_ra(env, vaddr, ra); } @@ -534,7 +547,7 @@ static uint16_t mem_load2(CPUHexagonState *env, uint32_t slot, target_ulong vaddr) { uintptr_t ra = GETPC(); - check_noshuf(env, slot); + check_noshuf(env, slot, vaddr, 2); return cpu_lduw_data_ra(env, vaddr, ra); } @@ -542,7 +555,7 @@ static uint32_t mem_load4(CPUHexagonState *env, uint32_t slot, target_ulong vaddr) { uintptr_t ra = GETPC(); - check_noshuf(env, slot); + check_noshuf(env, slot, vaddr, 4); return cpu_ldl_data_ra(env, vaddr, ra); } @@ -550,7 +563,7 @@ static uint64_t mem_load8(CPUHexagonState *env, uint32_t slot, target_ulong vaddr) { uintptr_t ra = GETPC(); - check_noshuf(env, slot); + check_noshuf(env, slot, vaddr, 8); return cpu_ldq_data_ra(env, vaddr, ra); } diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c index e21715592a..1c69a76f2b 100644 --- a/target/loongarch/cpu.c +++ b/target/loongarch/cpu.c @@ -140,7 +140,7 @@ static void loongarch_cpu_do_interrupt(CPUState *cs) if (cs->exception_index != EXCCODE_INT) { if (cs->exception_index < 0 || - cs->exception_index > ARRAY_SIZE(excp_names)) { + cs->exception_index >= ARRAY_SIZE(excp_names)) { name = "unknown"; } else { name = excp_names[cs->exception_index]; @@ -190,8 +190,8 @@ static void loongarch_cpu_do_interrupt(CPUState *cs) cause = cs->exception_index; break; default: - qemu_log("Error: exception(%d) '%s' has not been supported\n", - cs->exception_index, excp_names[cs->exception_index]); + qemu_log("Error: exception(%d) has not been supported\n", + cs->exception_index); abort(); } @@ -341,6 +341,7 @@ static void loongarch_la464_initfn(Object *obj) env->cpucfg[i] = 0x0; } + cpu->dtb_compatible = "loongarch,Loongson-3A5000"; env->cpucfg[0] = 0x14c010; /* PRID */ uint32_t data = 0; @@ -406,7 +407,7 @@ static void loongarch_la464_initfn(Object *obj) data = 0; data = FIELD_DP32(data, CPUCFG20, L3IU_WAYS, 15); data = FIELD_DP32(data, CPUCFG20, L3IU_SETS, 14); - data = FIELD_DP32(data, CPUCFG20, L3IU_SETS, 6); + data = FIELD_DP32(data, CPUCFG20, L3IU_SIZE, 6); env->cpucfg[20] = data; env->CSR_ASID = FIELD_DP64(0, CSR_ASID, ASIDBITS, 0xa); @@ -571,12 +572,22 @@ static void loongarch_cpu_init(Object *obj) static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) { ObjectClass *oc; - char *typename; - typename = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model); - oc = object_class_by_name(typename); - g_free(typename); - return oc; + oc = object_class_by_name(cpu_model); + if (!oc) { + g_autofree char *typename + = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model); + oc = object_class_by_name(typename); + if (!oc) { + return NULL; + } + } + + if (object_class_dynamic_cast(oc, TYPE_LOONGARCH_CPU) + && !object_class_is_abstract(oc)) { + return oc; + } + return NULL; } void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h index d141ec9b5d..a36349df83 100644 --- a/target/loongarch/cpu.h +++ b/target/loongarch/cpu.h @@ -326,6 +326,9 @@ struct ArchCPU { CPUNegativeOffsetState neg; CPULoongArchState env; QEMUTimer timer; + + /* 'compatible' string for this CPU for Linux device trees */ + const char *dtb_compatible; }; #define TYPE_LOONGARCH_CPU "loongarch-cpu" diff --git a/target/loongarch/fpu_helper.c b/target/loongarch/fpu_helper.c index 3d0cb8dd0d..bd76529219 100644 --- a/target/loongarch/fpu_helper.c +++ b/target/loongarch/fpu_helper.c @@ -13,9 +13,6 @@ #include "fpu/softfloat.h" #include "internals.h" -#define FLOAT_TO_INT32_OVERFLOW 0x7fffffff -#define FLOAT_TO_INT64_OVERFLOW 0x7fffffffffffffffULL - static inline uint64_t nanbox_s(float32 fp) { return fp | MAKE_64BIT_MASK(32, 32); @@ -544,9 +541,10 @@ uint64_t helper_ftintrm_l_d(CPULoongArchState *env, uint64_t fj) fd = float64_to_int64(fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT64_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -561,9 +559,10 @@ uint64_t helper_ftintrm_l_s(CPULoongArchState *env, uint64_t fj) fd = float32_to_int64((uint32_t)fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT64_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -578,9 +577,10 @@ uint64_t helper_ftintrm_w_d(CPULoongArchState *env, uint64_t fj) fd = (uint64_t)float64_to_int32(fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT32_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -595,9 +595,10 @@ uint64_t helper_ftintrm_w_s(CPULoongArchState *env, uint64_t fj) fd = (uint64_t)float32_to_int32((uint32_t)fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT32_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -612,9 +613,10 @@ uint64_t helper_ftintrp_l_d(CPULoongArchState *env, uint64_t fj) fd = float64_to_int64(fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT64_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -629,9 +631,10 @@ uint64_t helper_ftintrp_l_s(CPULoongArchState *env, uint64_t fj) fd = float32_to_int64((uint32_t)fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT64_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -646,9 +649,10 @@ uint64_t helper_ftintrp_w_d(CPULoongArchState *env, uint64_t fj) fd = (uint64_t)float64_to_int32(fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT32_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -663,9 +667,10 @@ uint64_t helper_ftintrp_w_s(CPULoongArchState *env, uint64_t fj) fd = (uint64_t)float32_to_int32((uint32_t)fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT32_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -679,9 +684,10 @@ uint64_t helper_ftintrz_l_d(CPULoongArchState *env, uint64_t fj) fd = float64_to_int64_round_to_zero(fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT64_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -695,9 +701,10 @@ uint64_t helper_ftintrz_l_s(CPULoongArchState *env, uint64_t fj) fd = float32_to_int64_round_to_zero((uint32_t)fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT64_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -711,9 +718,10 @@ uint64_t helper_ftintrz_w_d(CPULoongArchState *env, uint64_t fj) fd = (uint64_t)float64_to_int32_round_to_zero(fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT32_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -727,9 +735,10 @@ uint64_t helper_ftintrz_w_s(CPULoongArchState *env, uint64_t fj) fd = float32_to_int32_round_to_zero((uint32_t)fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT32_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return (uint64_t)fd; @@ -744,9 +753,10 @@ uint64_t helper_ftintrne_l_d(CPULoongArchState *env, uint64_t fj) fd = float64_to_int64(fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT64_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -761,9 +771,10 @@ uint64_t helper_ftintrne_l_s(CPULoongArchState *env, uint64_t fj) fd = float32_to_int64((uint32_t)fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT64_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -778,9 +789,10 @@ uint64_t helper_ftintrne_w_d(CPULoongArchState *env, uint64_t fj) fd = (uint64_t)float64_to_int32(fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT32_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -795,9 +807,10 @@ uint64_t helper_ftintrne_w_s(CPULoongArchState *env, uint64_t fj) fd = float32_to_int32((uint32_t)fj, &env->fp_status); set_float_rounding_mode(old_mode, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT32_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return (uint64_t)fd; @@ -808,9 +821,10 @@ uint64_t helper_ftint_l_d(CPULoongArchState *env, uint64_t fj) uint64_t fd; fd = float64_to_int64(fj, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT64_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -821,9 +835,10 @@ uint64_t helper_ftint_l_s(CPULoongArchState *env, uint64_t fj) uint64_t fd; fd = float32_to_int64((uint32_t)fj, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) & - (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT64_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -834,9 +849,10 @@ uint64_t helper_ftint_w_s(CPULoongArchState *env, uint64_t fj) uint64_t fd; fd = (uint64_t)float32_to_int32((uint32_t)fj, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) - & (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT32_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float32_is_any_nan((uint32_t)fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; @@ -847,9 +863,10 @@ uint64_t helper_ftint_w_d(CPULoongArchState *env, uint64_t fj) uint64_t fd; fd = (uint64_t)float64_to_int32(fj, &env->fp_status); - if (get_float_exception_flags(&env->fp_status) - & (float_flag_invalid | float_flag_overflow)) { - fd = FLOAT_TO_INT32_OVERFLOW; + if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { + if (float64_is_any_nan(fj)) { + fd = 0; + } } update_fcsr0(env, GETPC()); return fd; diff --git a/target/loongarch/op_helper.c b/target/loongarch/op_helper.c index 4b429b6699..568c071601 100644 --- a/target/loongarch/op_helper.c +++ b/target/loongarch/op_helper.c @@ -81,7 +81,7 @@ target_ulong helper_crc32c(target_ulong val, target_ulong m, uint64_t sz) target_ulong helper_cpucfg(CPULoongArchState *env, target_ulong rj) { - return rj > 21 ? 0 : env->cpucfg[rj]; + return rj >= ARRAY_SIZE(env->cpucfg) ? 0 : env->cpucfg[rj]; } uint64_t helper_rdtime_d(CPULoongArchState *env) diff --git a/target/loongarch/tlb_helper.c b/target/loongarch/tlb_helper.c index bab19c7e05..610b6d123c 100644 --- a/target/loongarch/tlb_helper.c +++ b/target/loongarch/tlb_helper.c @@ -298,7 +298,7 @@ static void invalidate_tlb_entry(CPULoongArchState *env, int index) } else { tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); } - pagesize = 1 << tlb_ps; + pagesize = MAKE_64BIT_MASK(tlb_ps, 1); mask = MAKE_64BIT_MASK(0, tlb_ps + 1); if (tlb_v0) { @@ -736,7 +736,7 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd, (tmp0 & (~(1 << R_TLBENTRY_G_SHIFT))); ps = ptbase + ptwidth - 1; if (odd) { - tmp0 += (1 << ps); + tmp0 += MAKE_64BIT_MASK(ps, 1); } } else { /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */ diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 7aaff9dcc5..a4c893cfad 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -1184,7 +1184,6 @@ struct CPUArchState { * by recent Book3s compatible CPUs (POWER7 and newer). */ uint32_t irq_input_state; - void **irq_inputs; target_ulong excp_vectors[POWERPC_EXCP_NB]; /* Exception vectors */ target_ulong excp_prefix; @@ -2215,8 +2214,6 @@ enum { PPC_DCR = 0x1000000000000000ULL, /* DCR extended accesse */ PPC_DCRX = 0x2000000000000000ULL, - /* user-mode DCR access, implemented in PowerPC 460 */ - PPC_DCRUX = 0x4000000000000000ULL, /* popcntw and popcntd instructions */ PPC_POPCNTWD = 0x8000000000000000ULL, @@ -2240,8 +2237,8 @@ enum { | PPC_405_MAC | PPC_440_SPEC | PPC_BOOKE \ | PPC_MFAPIDI | PPC_TLBIVA | PPC_TLBIVAX \ | PPC_4xx_COMMON | PPC_40x_ICBT | PPC_RFMCI \ - | PPC_RFDI | PPC_DCR | PPC_DCRX | PPC_DCRUX \ - | PPC_POPCNTWD | PPC_CILDST) + | PPC_RFDI | PPC_DCR | PPC_DCRX | PPC_POPCNTWD \ + | PPC_CILDST) /* extended type values */ diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index 86ad28466a..d1493a660c 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -6373,7 +6373,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_MEM_TLBSYNC | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | @@ -6591,7 +6591,7 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_MEM_TLBSYNC | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | @@ -6678,7 +6678,6 @@ static void init_ppc_proc(PowerPCCPU *cpu) #if !defined(CONFIG_USER_ONLY) int i; - env->irq_inputs = NULL; /* Set all exception vectors to an invalid address */ for (i = 0; i < POWERPC_EXCP_NB; i++) { env->excp_vectors[i] = (target_ulong)(-1ULL); @@ -6808,10 +6807,6 @@ static void init_ppc_proc(PowerPCCPU *cpu) /* Pre-compute some useful values */ env->tlb_per_way = env->nb_tlb / env->nb_ways; } - if (env->irq_inputs == NULL) { - warn_report("no internal IRQ controller registered." - " Attempt QEMU to crash very soon !"); - } #endif if (env->check_pow == NULL) { warn_report("no power management check handler registered." @@ -7476,17 +7471,15 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags) "%08x iidx %d didx %d\n", env->msr, env->spr[SPR_HID0], env->hflags, cpu_mmu_index(env, true), cpu_mmu_index(env, false)); -#if !defined(NO_TIMER_DUMP) - qemu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64 #if !defined(CONFIG_USER_ONLY) - " DECR " TARGET_FMT_lu -#endif - "\n", - cpu_ppc_load_tbu(env), cpu_ppc_load_tbl(env) -#if !defined(CONFIG_USER_ONLY) - , cpu_ppc_load_decr(env) -#endif - ); + if (env->tb_env) { + qemu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64 + " DECR " TARGET_FMT_lu "\n", cpu_ppc_load_tbu(env), + cpu_ppc_load_tbl(env), cpu_ppc_load_decr(env)); + } +#else + qemu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64 "\n", cpu_ppc_load_tbu(env), + cpu_ppc_load_tbl(env)); #endif for (i = 0; i < 32; i++) { if ((i & (RGPL - 1)) == 0) { diff --git a/target/ppc/helper.h b/target/ppc/helper.h index ed0641a234..159b352f6e 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -674,13 +674,16 @@ DEF_HELPER_FLAGS_1(tlbia, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_2(tlbie, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_2(tlbiva, TCG_CALL_NO_RWG, void, env, tl) #if defined(TARGET_PPC64) -DEF_HELPER_FLAGS_3(store_slb, TCG_CALL_NO_RWG, void, env, tl, tl) -DEF_HELPER_2(load_slb_esid, tl, env, tl) -DEF_HELPER_2(load_slb_vsid, tl, env, tl) -DEF_HELPER_2(find_slb_vsid, tl, env, tl) -DEF_HELPER_FLAGS_2(slbia, TCG_CALL_NO_RWG, void, env, i32) -DEF_HELPER_FLAGS_2(slbie, TCG_CALL_NO_RWG, void, env, tl) -DEF_HELPER_FLAGS_2(slbieg, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_4(tlbie_isa300, TCG_CALL_NO_WG, void, \ + env, tl, tl, i32) +DEF_HELPER_FLAGS_3(SLBMTE, TCG_CALL_NO_RWG, void, env, tl, tl) +DEF_HELPER_2(SLBMFEE, tl, env, tl) +DEF_HELPER_2(SLBMFEV, tl, env, tl) +DEF_HELPER_2(SLBFEE, tl, env, tl) +DEF_HELPER_FLAGS_2(SLBIA, TCG_CALL_NO_RWG, void, env, i32) +DEF_HELPER_FLAGS_3(SLBIAG, TCG_CALL_NO_RWG, void, env, tl, i32) +DEF_HELPER_FLAGS_2(SLBIE, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(SLBIEG, TCG_CALL_NO_RWG, void, env, tl) #endif DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_NO_RWG, tl, env, tl) DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_NO_RWG, void, env, tl, tl) @@ -694,10 +697,10 @@ DEF_HELPER_2(book3s_msgclr, void, env, tl) DEF_HELPER_4(dlmzb, tl, env, tl, tl, i32) #if !defined(CONFIG_USER_ONLY) DEF_HELPER_2(rac, tl, env, tl) -#endif DEF_HELPER_2(load_dcr, tl, env, tl) DEF_HELPER_3(store_dcr, void, env, tl, tl) +#endif DEF_HELPER_2(load_dump_spr, void, env, i32) DEF_HELPER_2(store_dump_spr, void, env, i32) diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode index f7653ef9d5..eb41efc100 100644 --- a/target/ppc/insn32.decode +++ b/target/ppc/insn32.decode @@ -146,6 +146,15 @@ &X_imm8 xt imm:uint8_t @X_imm8 ...... ..... .. imm:8 .......... . &X_imm8 xt=%x_xt +&X_ih ih:uint8_t +@X_ih ...... .. ih:3 ..... ..... .......... . &X_ih + +&X_rb rb +@X_rb ...... ..... ..... rb:5 .......... . &X_rb + +&X_rs_l rs l:bool +@X_rs_l ...... rs:5 .... l:1 ..... .......... . &X_rs_l + &X_uim5 xt uim:uint8_t @X_uim5 ...... ..... ..... uim:5 .......... . &X_uim5 xt=%x_xt @@ -856,3 +865,28 @@ VMODSD 000100 ..... ..... ..... 11111001011 @VX VMODUD 000100 ..... ..... ..... 11011001011 @VX VMODSQ 000100 ..... ..... ..... 11100001011 @VX VMODUQ 000100 ..... ..... ..... 11000001011 @VX + +## SLB Management Instructions + +SLBIE 011111 ----- ----- ..... 0110110010 - @X_rb +SLBIEG 011111 ..... ----- ..... 0111010010 - @X_tb + +SLBIA 011111 --... ----- ----- 0111110010 - @X_ih +SLBIAG 011111 ..... ----. ----- 1101010010 - @X_rs_l + +SLBMTE 011111 ..... ----- ..... 0110010010 - @X_tb + +SLBMFEV 011111 ..... ----- ..... 1101010011 - @X_tb +SLBMFEE 011111 ..... ----- ..... 1110010011 - @X_tb + +SLBFEE 011111 ..... ----- ..... 1111010011 1 @X_tb + +SLBSYNC 011111 ----- ----- ----- 0101010010 - + +## TLB Management Instructions + +&X_tlbie rb rs ric prs:bool r:bool +@X_tlbie ...... rs:5 - ric:2 prs:1 r:1 rb:5 .......... - &X_tlbie + +TLBIE 011111 ..... - .. . . ..... 0100110010 - @X_tlbie +TLBIEL 011111 ..... - .. . . ..... 0100010010 - @X_tlbie diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 6eed466f80..466d0d2f4c 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -1877,6 +1877,12 @@ static int kvmppc_find_cpu_dt(char *buf, int buf_len) buf[0] = '\0'; while ((dirp = readdir(dp)) != NULL) { FILE *f; + + /* Don't accidentally read from the current and parent directories */ + if (strcmp(dirp->d_name, ".") == 0 || strcmp(dirp->d_name, "..") == 0) { + continue; + } + snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU, dirp->d_name); f = fopen(buf, "r"); diff --git a/target/ppc/mmu-book3s-v3.c b/target/ppc/mmu-book3s-v3.c index f4985bae78..c8f69b3df9 100644 --- a/target/ppc/mmu-book3s-v3.c +++ b/target/ppc/mmu-book3s-v3.c @@ -28,6 +28,11 @@ bool ppc64_v3_get_pate(PowerPCCPU *cpu, target_ulong lpid, ppc_v3_pate_t *entry) uint64_t patb = cpu->env.spr[SPR_PTCR] & PTCR_PATB; uint64_t pats = cpu->env.spr[SPR_PTCR] & PTCR_PATS; + /* Check if partition table is properly aligned */ + if (patb & MAKE_64BIT_MASK(0, pats + 12)) { + return false; + } + /* Calculate number of entries */ pats = 1ull << (pats + 12 - 4); if (pats <= lpid) { diff --git a/target/ppc/mmu-book3s-v3.h b/target/ppc/mmu-book3s-v3.h index d6d5ed8f8e..674377a19e 100644 --- a/target/ppc/mmu-book3s-v3.h +++ b/target/ppc/mmu-book3s-v3.h @@ -50,6 +50,21 @@ struct prtb_entry { #ifdef TARGET_PPC64 +/* + * tlbie[l] helper flags + * + * RIC, PRS, R and local are passed as flags in the last argument. + */ +#define TLBIE_F_RIC_SHIFT 0 +#define TLBIE_F_PRS_SHIFT 2 +#define TLBIE_F_R_SHIFT 3 +#define TLBIE_F_LOCAL_SHIFT 4 + +#define TLBIE_F_RIC_MASK (3 << TLBIE_F_RIC_SHIFT) +#define TLBIE_F_PRS (1 << TLBIE_F_PRS_SHIFT) +#define TLBIE_F_R (1 << TLBIE_F_R_SHIFT) +#define TLBIE_F_LOCAL (1 << TLBIE_F_LOCAL_SHIFT) + static inline bool ppc64_use_proc_tbl(PowerPCCPU *cpu) { return !!(cpu->env.spr[SPR_LPCR] & LPCR_UPRT); diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index da9fe99ff8..b9b31fd276 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -101,7 +101,7 @@ void dump_slb(PowerPCCPU *cpu) } #ifdef CONFIG_TCG -void helper_slbia(CPUPPCState *env, uint32_t ih) +void helper_SLBIA(CPUPPCState *env, uint32_t ih) { PowerPCCPU *cpu = env_archcpu(env); int starting_entry; @@ -173,6 +173,33 @@ void helper_slbia(CPUPPCState *env, uint32_t ih) } } +#if defined(TARGET_PPC64) +void helper_SLBIAG(CPUPPCState *env, target_ulong rs, uint32_t l) +{ + PowerPCCPU *cpu = env_archcpu(env); + int n; + + /* + * slbiag must always flush all TLB (which is equivalent to ERAT in ppc + * architecture). Matching on SLB_ESID_V is not good enough, because slbmte + * can overwrite a valid SLB without flushing its lookaside information. + * + * It would be possible to keep the TLB in synch with the SLB by flushing + * when a valid entry is overwritten by slbmte, and therefore slbiag would + * not have to flush unless it evicts a valid SLB entry. However it is + * expected that slbmte is more common than slbiag, and slbiag is usually + * going to evict valid SLB entries, so that tradeoff is unlikely to be a + * good one. + */ + env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; + + for (n = 0; n < cpu->hash64_opts->slb_size; n++) { + ppc_slb_t *slb = &env->slb[n]; + slb->esid &= ~SLB_ESID_V; + } +} +#endif + static void __helper_slbie(CPUPPCState *env, target_ulong addr, target_ulong global) { @@ -197,12 +224,12 @@ static void __helper_slbie(CPUPPCState *env, target_ulong addr, } } -void helper_slbie(CPUPPCState *env, target_ulong addr) +void helper_SLBIE(CPUPPCState *env, target_ulong addr) { __helper_slbie(env, addr, false); } -void helper_slbieg(CPUPPCState *env, target_ulong addr) +void helper_SLBIEG(CPUPPCState *env, target_ulong addr) { __helper_slbie(env, addr, true); } @@ -309,7 +336,7 @@ static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb, return 0; } -void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) +void helper_SLBMTE(CPUPPCState *env, target_ulong rb, target_ulong rs) { PowerPCCPU *cpu = env_archcpu(env); @@ -319,7 +346,7 @@ void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) } } -target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) +target_ulong helper_SLBMFEE(CPUPPCState *env, target_ulong rb) { PowerPCCPU *cpu = env_archcpu(env); target_ulong rt = 0; @@ -331,7 +358,7 @@ target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) return rt; } -target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb) +target_ulong helper_SLBFEE(CPUPPCState *env, target_ulong rb) { PowerPCCPU *cpu = env_archcpu(env); target_ulong rt = 0; @@ -343,7 +370,7 @@ target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb) return rt; } -target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) +target_ulong helper_SLBMFEV(CPUPPCState *env, target_ulong rb) { PowerPCCPU *cpu = env_archcpu(env); target_ulong rt = 0; diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c index 21ac958e48..00f2e9fa2e 100644 --- a/target/ppc/mmu-radix64.c +++ b/target/ppc/mmu-radix64.c @@ -236,16 +236,36 @@ static void ppc_radix64_set_rc(PowerPCCPU *cpu, MMUAccessType access_type, } } +static bool ppc_radix64_is_valid_level(int level, int psize, uint64_t nls) +{ + /* + * Check if this is a valid level, according to POWER9 and POWER10 + * Processor User's Manuals, sections 4.10.4.1 and 5.10.6.1, respectively: + * Supported Radix Tree Configurations and Resulting Page Sizes. + * + * Note: these checks are specific to POWER9 and POWER10 CPUs. Any future + * CPUs that supports a different Radix MMU configuration will need their + * own implementation. + */ + switch (level) { + case 0: /* Root Page Dir */ + return psize == 52 && nls == 13; + case 1: + case 2: + return nls == 9; + case 3: + return nls == 9 || nls == 5; + default: + qemu_log_mask(LOG_GUEST_ERROR, "invalid radix level: %d\n", level); + return false; + } +} + static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr, uint64_t *pte_addr, uint64_t *nls, int *psize, uint64_t *pte, int *fault_cause) { - uint64_t index, pde; - - if (*nls < 5) { /* Directory maps less than 2**5 entries */ - *fault_cause |= DSISR_R_BADCONFIG; - return 1; - } + uint64_t index, mask, nlb, pde; /* Read page <directory/table> entry from guest address space */ pde = ldq_phys(as, *pte_addr); @@ -260,7 +280,17 @@ static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr, *nls = pde & R_PDE_NLS; index = eaddr >> (*psize - *nls); /* Shift */ index &= ((1UL << *nls) - 1); /* Mask */ - *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde)); + nlb = pde & R_PDE_NLB; + mask = MAKE_64BIT_MASK(0, *nls + 3); + + if (nlb & mask) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: misaligned page dir/table base: 0x"TARGET_FMT_lx + " page dir size: 0x"TARGET_FMT_lx"\n", + __func__, nlb, mask + 1); + nlb &= ~mask; + } + *pte_addr = nlb + index * sizeof(pde); } return 0; } @@ -270,19 +300,30 @@ static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr, hwaddr *raddr, int *psize, uint64_t *pte, int *fault_cause, hwaddr *pte_addr) { - uint64_t index, pde, rpn , mask; + uint64_t index, pde, rpn, mask; + int level = 0; - if (nls < 5) { /* Directory maps less than 2**5 entries */ - *fault_cause |= DSISR_R_BADCONFIG; - return 1; + index = eaddr >> (*psize - nls); /* Shift */ + index &= ((1UL << nls) - 1); /* Mask */ + mask = MAKE_64BIT_MASK(0, nls + 3); + + if (base_addr & mask) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: misaligned page dir base: 0x"TARGET_FMT_lx + " page dir size: 0x"TARGET_FMT_lx"\n", + __func__, base_addr, mask + 1); + base_addr &= ~mask; } + *pte_addr = base_addr + index * sizeof(pde); - index = eaddr >> (*psize - nls); /* Shift */ - index &= ((1UL << nls) - 1); /* Mask */ - *pte_addr = base_addr + (index * sizeof(pde)); do { int ret; + if (!ppc_radix64_is_valid_level(level++, *psize, nls)) { + *fault_cause |= DSISR_R_BADCONFIG; + return 1; + } + ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde, fault_cause); if (ret) { @@ -383,7 +424,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; - uint64_t offset, size, prtbe_addr, prtbe0, base_addr, nls, index, pte; + uint64_t offset, size, prtb, prtbe_addr, prtbe0, base_addr, nls, index, pte; int fault_cause = 0, h_page_size, h_prot; hwaddr h_raddr, pte_addr; int ret; @@ -393,9 +434,18 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, __func__, access_str(access_type), eaddr, mmu_idx, pid); + prtb = (pate.dw1 & PATE1_R_PRTB); + size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); + if (prtb & (size - 1)) { + /* Process Table not properly aligned */ + if (guest_visible) { + ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG); + } + return 1; + } + /* Index Process Table by PID to Find Corresponding Process Table Entry */ offset = pid * sizeof(struct prtb_entry); - size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); if (offset >= size) { /* offset exceeds size of the process table */ if (guest_visible) { @@ -403,7 +453,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, } return 1; } - prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset; + prtbe_addr = prtb + offset; if (vhyp_flat_addressing(cpu)) { prtbe0 = ldq_phys(cs->as, prtbe_addr); @@ -447,6 +497,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, } } else { uint64_t rpn, mask; + int level = 0; index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */ index &= ((1UL << nls) - 1); /* Mask */ @@ -466,6 +517,11 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, return ret; } + if (!ppc_radix64_is_valid_level(level++, *g_page_size, nls)) { + fault_cause |= DSISR_R_BADCONFIG; + return 1; + } + ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, &h_raddr, &nls, g_page_size, &pte, &fault_cause); if (ret) { @@ -568,7 +624,7 @@ static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr, return false; } - /* Get Process Table */ + /* Get Partition Table */ if (cpu->vhyp) { PPCVirtualHypervisorClass *vhc; vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c index 15239dc95b..2a91f3f46a 100644 --- a/target/ppc/mmu_helper.c +++ b/target/ppc/mmu_helper.c @@ -429,6 +429,160 @@ void helper_tlbie(CPUPPCState *env, target_ulong addr) ppc_tlb_invalidate_one(env, addr); } +#if defined(TARGET_PPC64) + +/* Invalidation Selector */ +#define TLBIE_IS_VA 0 +#define TLBIE_IS_PID 1 +#define TLBIE_IS_LPID 2 +#define TLBIE_IS_ALL 3 + +/* Radix Invalidation Control */ +#define TLBIE_RIC_TLB 0 +#define TLBIE_RIC_PWC 1 +#define TLBIE_RIC_ALL 2 +#define TLBIE_RIC_GRP 3 + +/* Radix Actual Page sizes */ +#define TLBIE_R_AP_4K 0 +#define TLBIE_R_AP_64K 5 +#define TLBIE_R_AP_2M 1 +#define TLBIE_R_AP_1G 2 + +/* RB field masks */ +#define TLBIE_RB_EPN_MASK PPC_BITMASK(0, 51) +#define TLBIE_RB_IS_MASK PPC_BITMASK(52, 53) +#define TLBIE_RB_AP_MASK PPC_BITMASK(56, 58) + +void helper_tlbie_isa300(CPUPPCState *env, target_ulong rb, target_ulong rs, + uint32_t flags) +{ + unsigned ric = (flags & TLBIE_F_RIC_MASK) >> TLBIE_F_RIC_SHIFT; + /* + * With the exception of the checks for invalid instruction forms, + * PRS is currently ignored, because we don't know if a given TLB entry + * is process or partition scoped. + */ + bool prs = flags & TLBIE_F_PRS; + bool r = flags & TLBIE_F_R; + bool local = flags & TLBIE_F_LOCAL; + bool effR; + unsigned is = extract64(rb, PPC_BIT_NR(53), 2); + unsigned ap; /* actual page size */ + target_ulong addr, pgoffs_mask; + + qemu_log_mask(CPU_LOG_MMU, + "%s: local=%d addr=" TARGET_FMT_lx " ric=%u prs=%d r=%d is=%u\n", + __func__, local, rb & TARGET_PAGE_MASK, ric, prs, r, is); + + effR = FIELD_EX64(env->msr, MSR, HV) ? r : env->spr[SPR_LPCR] & LPCR_HR; + + /* Partial TLB invalidation is supported for Radix only for now. */ + if (!effR) { + goto inval_all; + } + + /* Check for invalid instruction forms (effR=1). */ + if (unlikely(ric == TLBIE_RIC_GRP || + ((ric == TLBIE_RIC_PWC || ric == TLBIE_RIC_ALL) && + is == TLBIE_IS_VA) || + (!prs && is == TLBIE_IS_PID))) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid instruction form: ric=%u prs=%d r=%d is=%u\n", + __func__, ric, prs, r, is); + goto invalid; + } + + /* We don't cache Page Walks. */ + if (ric == TLBIE_RIC_PWC) { + if (local) { + unsigned set = extract64(rb, PPC_BIT_NR(51), 12); + if (set != 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid set: %d\n", + __func__, set); + goto invalid; + } + } + return; + } + + /* + * Invalidation by LPID or PID is not supported, so fallback + * to full TLB flush in these cases. + */ + if (is != TLBIE_IS_VA) { + goto inval_all; + } + + /* + * The results of an attempt to invalidate a translation outside of + * quadrant 0 for Radix Tree translation (effR=1, RIC=0, PRS=1, IS=0, + * and EA 0:1 != 0b00) are boundedly undefined. + */ + if (unlikely(ric == TLBIE_RIC_TLB && prs && is == TLBIE_IS_VA && + (rb & R_EADDR_QUADRANT) != R_EADDR_QUADRANT0)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: attempt to invalidate a translation outside of quadrant 0\n", + __func__); + goto inval_all; + } + + assert(is == TLBIE_IS_VA); + assert(ric == TLBIE_RIC_TLB || ric == TLBIE_RIC_ALL); + + ap = extract64(rb, PPC_BIT_NR(58), 3); + switch (ap) { + case TLBIE_R_AP_4K: + pgoffs_mask = 0xfffull; + break; + + case TLBIE_R_AP_64K: + pgoffs_mask = 0xffffull; + break; + + case TLBIE_R_AP_2M: + pgoffs_mask = 0x1fffffull; + break; + + case TLBIE_R_AP_1G: + pgoffs_mask = 0x3fffffffull; + break; + + default: + /* + * If the value specified in RS 0:31, RS 32:63, RB 54:55, RB 56:58, + * RB 44:51, or RB 56:63, when it is needed to perform the specified + * operation, is not supported by the implementation, the instruction + * is treated as if the instruction form were invalid. + */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid AP: %d\n", __func__, ap); + goto invalid; + } + + addr = rb & TLBIE_RB_EPN_MASK & ~pgoffs_mask; + + if (local) { + tlb_flush_page(env_cpu(env), addr); + } else { + tlb_flush_page_all_cpus(env_cpu(env), addr); + } + return; + +inval_all: + env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; + if (!local) { + env->tlb_need_flush |= TLB_NEED_GLOBAL_FLUSH; + } + return; + +invalid: + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | + POWERPC_EXCP_INVAL_INVAL, GETPC()); +} + +#endif + void helper_tlbiva(CPUPPCState *env, target_ulong addr) { /* tlbiva instruction only exists on BookE */ diff --git a/target/ppc/monitor.c b/target/ppc/monitor.c index 0b805ef6e9..8250b1304e 100644 --- a/target/ppc/monitor.c +++ b/target/ppc/monitor.c @@ -55,6 +55,9 @@ static target_long monitor_get_decr(Monitor *mon, const struct MonitorDef *md, int val) { CPUArchState *env = mon_get_cpu_env(mon); + if (!env->tb_env) { + return 0; + } return cpu_ppc_load_decr(env); } @@ -62,6 +65,9 @@ static target_long monitor_get_tbu(Monitor *mon, const struct MonitorDef *md, int val) { CPUArchState *env = mon_get_cpu_env(mon); + if (!env->tb_env) { + return 0; + } return cpu_ppc_load_tbu(env); } @@ -69,6 +75,9 @@ static target_long monitor_get_tbl(Monitor *mon, const struct MonitorDef *md, int val) { CPUArchState *env = mon_get_cpu_env(mon); + if (!env->tb_env) { + return 0; + } return cpu_ppc_load_tbl(env); } diff --git a/target/ppc/power8-pmu-regs.c.inc b/target/ppc/power8-pmu-regs.c.inc index 2bab6cece7..c3cc919ee4 100644 --- a/target/ppc/power8-pmu-regs.c.inc +++ b/target/ppc/power8-pmu-regs.c.inc @@ -22,7 +22,7 @@ static bool spr_groupA_read_allowed(DisasContext *ctx) { if (!ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) { - gen_hvpriv_exception(ctx, POWERPC_EXCP_FU); + gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU); return false; } @@ -46,10 +46,10 @@ static bool spr_groupA_write_allowed(DisasContext *ctx) if (ctx->mmcr0_pmcc1) { /* PMCC = 0b01 */ - gen_hvpriv_exception(ctx, POWERPC_EXCP_FU); + gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU); } else { /* PMCC = 0b00 */ - gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR); + gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG); } return false; @@ -214,7 +214,7 @@ void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn) * Interrupt. */ if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) { - gen_hvpriv_exception(ctx, POWERPC_EXCP_FU); + gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU); return; } @@ -249,7 +249,7 @@ void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn) * Interrupt. */ if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) { - gen_hvpriv_exception(ctx, POWERPC_EXCP_FU); + gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU); return; } diff --git a/target/ppc/timebase_helper.c b/target/ppc/timebase_helper.c index 86d01d6e4e..b80f56af7e 100644 --- a/target/ppc/timebase_helper.c +++ b/target/ppc/timebase_helper.c @@ -143,7 +143,6 @@ void helper_store_booke_tsr(CPUPPCState *env, target_ulong val) { store_booke_tsr(env, val); } -#endif /*****************************************************************************/ /* Embedded PowerPC specific helpers */ @@ -169,7 +168,7 @@ target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn) (uint32_t)dcrn, (uint32_t)dcrn); raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | - POWERPC_EXCP_PRIV_REG, GETPC()); + POWERPC_EXCP_INVAL_INVAL, GETPC()); } } return val; @@ -192,7 +191,8 @@ void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val) (uint32_t)dcrn, (uint32_t)dcrn); raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | - POWERPC_EXCP_PRIV_REG, GETPC()); + POWERPC_EXCP_INVAL_INVAL, GETPC()); } } } +#endif diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 1d6daa4608..5a18ee577f 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -907,9 +907,9 @@ void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn) } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) { sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38; } else { - printf("Trying to write an unknown exception vector %d %03x\n", - sprn, sprn); - gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); + qemu_log_mask(LOG_GUEST_ERROR, "Trying to write an unknown exception" + " vector 0x%03x\n", sprn); + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); return; } @@ -1267,38 +1267,43 @@ typedef struct opcode_t { const char *oname; } opcode_t; +static void gen_priv_opc(DisasContext *ctx) +{ + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); +} + /* Helpers for priv. check */ -#define GEN_PRIV \ - do { \ - gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; \ +#define GEN_PRIV(CTX) \ + do { \ + gen_priv_opc(CTX); return; \ } while (0) #if defined(CONFIG_USER_ONLY) -#define CHK_HV GEN_PRIV -#define CHK_SV GEN_PRIV -#define CHK_HVRM GEN_PRIV +#define CHK_HV(CTX) GEN_PRIV(CTX) +#define CHK_SV(CTX) GEN_PRIV(CTX) +#define CHK_HVRM(CTX) GEN_PRIV(CTX) #else -#define CHK_HV \ - do { \ - if (unlikely(ctx->pr || !ctx->hv)) { \ - GEN_PRIV; \ - } \ +#define CHK_HV(CTX) \ + do { \ + if (unlikely(ctx->pr || !ctx->hv)) {\ + GEN_PRIV(CTX); \ + } \ } while (0) -#define CHK_SV \ +#define CHK_SV(CTX) \ do { \ if (unlikely(ctx->pr)) { \ - GEN_PRIV; \ + GEN_PRIV(CTX); \ } \ } while (0) -#define CHK_HVRM \ - do { \ - if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \ - GEN_PRIV; \ - } \ +#define CHK_HVRM(CTX) \ + do { \ + if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \ + GEN_PRIV(CTX); \ + } \ } while (0) #endif -#define CHK_NONE +#define CHK_NONE(CTX) /*****************************************************************************/ /* PowerPC instructions table */ @@ -3252,7 +3257,7 @@ GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ)) static void glue(gen_, name##x)(DisasContext *ctx) \ { \ TCGv EA; \ - chk; \ + chk(ctx); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ @@ -3270,7 +3275,7 @@ static void glue(gen_, name##x)(DisasContext *ctx) \ static void glue(gen_, name##epx)(DisasContext *ctx) \ { \ TCGv EA; \ - CHK_SV; \ + CHK_SV(ctx); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ @@ -3298,7 +3303,7 @@ GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST) static void glue(gen_, name##x)(DisasContext *ctx) \ { \ TCGv EA; \ - chk; \ + chk(ctx); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ @@ -3315,7 +3320,7 @@ static void glue(gen_, name##x)(DisasContext *ctx) \ static void glue(gen_, name##epx)(DisasContext *ctx) \ { \ TCGv EA; \ - CHK_SV; \ + CHK_SV(ctx); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ @@ -4078,11 +4083,11 @@ static void gen_wait(DisasContext *ctx) static void gen_doze(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv_i32 t; - CHK_HV; + CHK_HV(ctx); t = tcg_const_i32(PPC_PM_DOZE); gen_helper_pminsn(cpu_env, t); tcg_temp_free_i32(t); @@ -4094,11 +4099,11 @@ static void gen_doze(DisasContext *ctx) static void gen_nap(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv_i32 t; - CHK_HV; + CHK_HV(ctx); t = tcg_const_i32(PPC_PM_NAP); gen_helper_pminsn(cpu_env, t); tcg_temp_free_i32(t); @@ -4110,11 +4115,11 @@ static void gen_nap(DisasContext *ctx) static void gen_stop(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv_i32 t; - CHK_HV; + CHK_HV(ctx); t = tcg_const_i32(PPC_PM_STOP); gen_helper_pminsn(cpu_env, t); tcg_temp_free_i32(t); @@ -4126,11 +4131,11 @@ static void gen_stop(DisasContext *ctx) static void gen_sleep(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv_i32 t; - CHK_HV; + CHK_HV(ctx); t = tcg_const_i32(PPC_PM_SLEEP); gen_helper_pminsn(cpu_env, t); tcg_temp_free_i32(t); @@ -4142,11 +4147,11 @@ static void gen_sleep(DisasContext *ctx) static void gen_rvwinkle(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv_i32 t; - CHK_HV; + CHK_HV(ctx); t = tcg_const_i32(PPC_PM_RVWINKLE); gen_helper_pminsn(cpu_env, t); tcg_temp_free_i32(t); @@ -4476,7 +4481,7 @@ static void gen_mcrf(DisasContext *ctx) static void gen_rfi(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else /* * This instruction doesn't exist anymore on 64-bit server @@ -4487,7 +4492,7 @@ static void gen_rfi(DisasContext *ctx) return; } /* Restore CPU state */ - CHK_SV; + CHK_SV(ctx); gen_icount_io_start(ctx); gen_update_cfar(ctx, ctx->cia); gen_helper_rfi(cpu_env); @@ -4499,10 +4504,10 @@ static void gen_rfi(DisasContext *ctx) static void gen_rfid(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else /* Restore CPU state */ - CHK_SV; + CHK_SV(ctx); gen_icount_io_start(ctx); gen_update_cfar(ctx, ctx->cia); gen_helper_rfid(cpu_env); @@ -4514,10 +4519,10 @@ static void gen_rfid(DisasContext *ctx) static void gen_rfscv(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else /* Restore CPU state */ - CHK_SV; + CHK_SV(ctx); gen_icount_io_start(ctx); gen_update_cfar(ctx, ctx->cia); gen_helper_rfscv(cpu_env); @@ -4529,10 +4534,10 @@ static void gen_rfscv(DisasContext *ctx) static void gen_hrfid(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else /* Restore CPU state */ - CHK_HV; + CHK_HV(ctx); gen_helper_hrfid(cpu_env); ctx->base.is_jmp = DISAS_EXIT; #endif @@ -4733,7 +4738,7 @@ static void gen_mfcr(DisasContext *ctx) /* mfmsr */ static void gen_mfmsr(DisasContext *ctx) { - CHK_SV; + CHK_SV(ctx); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr); } @@ -4789,11 +4794,11 @@ static inline void gen_op_mfspr(DisasContext *ctx) */ if (sprn & 0x10) { if (ctx->pr) { - gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR); + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); } } else { if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) { - gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR); + gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG); } } } @@ -4847,7 +4852,7 @@ static void gen_mtmsrd(DisasContext *ctx) return; } - CHK_SV; + CHK_SV(ctx); #if !defined(CONFIG_USER_ONLY) TCGv t0, t1; @@ -4890,7 +4895,7 @@ static void gen_mtmsrd(DisasContext *ctx) static void gen_mtmsr(DisasContext *ctx) { - CHK_SV; + CHK_SV(ctx); #if !defined(CONFIG_USER_ONLY) TCGv t0, t1; @@ -4976,11 +4981,11 @@ static void gen_mtspr(DisasContext *ctx) */ if (sprn & 0x10) { if (ctx->pr) { - gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR); + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); } } else { if (ctx->pr || sprn == 0) { - gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR); + gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG); } } } @@ -5022,7 +5027,7 @@ static void gen_dcbfep(DisasContext *ctx) { /* XXX: specification says this is treated as a load by the MMU */ TCGv t0; - CHK_SV; + CHK_SV(ctx); gen_set_access_type(ctx, ACCESS_CACHE); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); @@ -5034,11 +5039,11 @@ static void gen_dcbfep(DisasContext *ctx) static void gen_dcbi(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv EA, val; - CHK_SV; + CHK_SV(ctx); EA = tcg_temp_new(); gen_set_access_type(ctx, ACCESS_CACHE); gen_addr_reg_index(ctx, EA); @@ -5223,11 +5228,11 @@ static void gen_dcba(DisasContext *ctx) static void gen_mfsr(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_const_tl(SR(ctx->opcode)); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); tcg_temp_free(t0); @@ -5238,11 +5243,11 @@ static void gen_mfsr(DisasContext *ctx) static void gen_mfsrin(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); @@ -5254,11 +5259,11 @@ static void gen_mfsrin(DisasContext *ctx) static void gen_mtsr(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_const_tl(SR(ctx->opcode)); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(t0); @@ -5269,10 +5274,10 @@ static void gen_mtsr(DisasContext *ctx) static void gen_mtsrin(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); @@ -5288,11 +5293,11 @@ static void gen_mtsrin(DisasContext *ctx) static void gen_mfsr_64b(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_const_tl(SR(ctx->opcode)); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); tcg_temp_free(t0); @@ -5303,11 +5308,11 @@ static void gen_mfsr_64b(DisasContext *ctx) static void gen_mfsrin_64b(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); @@ -5319,11 +5324,11 @@ static void gen_mfsrin_64b(DisasContext *ctx) static void gen_mtsr_64b(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_const_tl(SR(ctx->opcode)); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(t0); @@ -5334,11 +5339,11 @@ static void gen_mtsr_64b(DisasContext *ctx) static void gen_mtsrin_64b(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); @@ -5346,67 +5351,6 @@ static void gen_mtsrin_64b(DisasContext *ctx) #endif /* defined(CONFIG_USER_ONLY) */ } -/* slbmte */ -static void gen_slbmte(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - - gen_helper_store_slb(cpu_env, cpu_gpr[rB(ctx->opcode)], - cpu_gpr[rS(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -static void gen_slbmfee(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - - gen_helper_load_slb_esid(cpu_gpr[rS(ctx->opcode)], cpu_env, - cpu_gpr[rB(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -static void gen_slbmfev(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - - gen_helper_load_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env, - cpu_gpr[rB(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -static void gen_slbfee_(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); -#else - TCGLabel *l1, *l2; - - if (unlikely(ctx->pr)) { - gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); - return; - } - gen_helper_find_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env, - cpu_gpr[rB(ctx->opcode)]); - l1 = gen_new_label(); - l2 = gen_new_label(); - tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rS(ctx->opcode)], -1, l1); - tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_movi_tl(cpu_gpr[rS(ctx->opcode)], 0); - gen_set_label(l2); -#endif -} #endif /* defined(TARGET_PPC64) */ /*** Lookaside buffer management ***/ @@ -5416,83 +5360,25 @@ static void gen_slbfee_(DisasContext *ctx) static void gen_tlbia(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_HV; + CHK_HV(ctx); gen_helper_tlbia(cpu_env); #endif /* defined(CONFIG_USER_ONLY) */ } -/* tlbiel */ -static void gen_tlbiel(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - bool psr = (ctx->opcode >> 17) & 0x1; - - if (ctx->pr || (!ctx->hv && !psr && ctx->hr)) { - /* - * tlbiel is privileged except when PSR=0 and HR=1, making it - * hypervisor privileged. - */ - GEN_PRIV; - } - - gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -/* tlbie */ -static void gen_tlbie(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - bool psr = (ctx->opcode >> 17) & 0x1; - TCGv_i32 t1; - - if (ctx->pr) { - /* tlbie is privileged... */ - GEN_PRIV; - } else if (!ctx->hv) { - if (!ctx->gtse || (!psr && ctx->hr)) { - /* - * ... except when GTSE=0 or when PSR=0 and HR=1, making it - * hypervisor privileged. - */ - GEN_PRIV; - } - } - - if (NARROW_MODE(ctx)) { - TCGv t0 = tcg_temp_new(); - tcg_gen_ext32u_tl(t0, cpu_gpr[rB(ctx->opcode)]); - gen_helper_tlbie(cpu_env, t0); - tcg_temp_free(t0); - } else { - gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); - } - t1 = tcg_temp_new_i32(); - tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush)); - tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH); - tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush)); - tcg_temp_free_i32(t1); -#endif /* defined(CONFIG_USER_ONLY) */ -} - /* tlbsync */ static void gen_tlbsync(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else if (ctx->gtse) { - CHK_SV; /* If gtse is set then tlbsync is supervisor privileged */ + CHK_SV(ctx); /* If gtse is set then tlbsync is supervisor privileged */ } else { - CHK_HV; /* Else hypervisor privileged */ + CHK_HV(ctx); /* Else hypervisor privileged */ } /* BookS does both ptesync and tlbsync make tlbsync a nop for server */ @@ -5502,60 +5388,6 @@ static void gen_tlbsync(DisasContext *ctx) #endif /* defined(CONFIG_USER_ONLY) */ } -#if defined(TARGET_PPC64) -/* slbia */ -static void gen_slbia(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - uint32_t ih = (ctx->opcode >> 21) & 0x7; - TCGv_i32 t0 = tcg_const_i32(ih); - - CHK_SV; - - gen_helper_slbia(cpu_env, t0); - tcg_temp_free_i32(t0); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -/* slbie */ -static void gen_slbie(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - - gen_helper_slbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -/* slbieg */ -static void gen_slbieg(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - - gen_helper_slbieg(cpu_env, cpu_gpr[rB(ctx->opcode)]); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -/* slbsync */ -static void gen_slbsync(DisasContext *ctx) -{ -#if defined(CONFIG_USER_ONLY) - GEN_PRIV; -#else - CHK_SV; - gen_check_tlb_flush(ctx, true); -#endif /* defined(CONFIG_USER_ONLY) */ -} - -#endif /* defined(TARGET_PPC64) */ - /*** External control ***/ /* Optional: */ @@ -5591,9 +5423,9 @@ static void gen_ecowx(DisasContext *ctx) static void gen_tlbld_6xx(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5602,9 +5434,9 @@ static void gen_tlbld_6xx(DisasContext *ctx) static void gen_tlbli_6xx(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5622,11 +5454,11 @@ static void gen_mfapidi(DisasContext *ctx) static void gen_tlbiva(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]); @@ -5853,11 +5685,11 @@ GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C); static void gen_mfdcr(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv dcrn; - CHK_SV; + CHK_SV(ctx); dcrn = tcg_const_tl(SPR(ctx->opcode)); gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn); tcg_temp_free(dcrn); @@ -5868,11 +5700,11 @@ static void gen_mfdcr(DisasContext *ctx) static void gen_mtdcr(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv dcrn; - CHK_SV; + CHK_SV(ctx); dcrn = tcg_const_tl(SPR(ctx->opcode)); gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(dcrn); @@ -5884,9 +5716,9 @@ static void gen_mtdcr(DisasContext *ctx) static void gen_mfdcrx(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)]); /* Note: Rc update flag set leads to undefined state of Rc0 */ @@ -5898,35 +5730,19 @@ static void gen_mfdcrx(DisasContext *ctx) static void gen_mtdcrx(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); /* Note: Rc update flag set leads to undefined state of Rc0 */ #endif /* defined(CONFIG_USER_ONLY) */ } -/* mfdcrux (PPC 460) : user-mode access to DCR */ -static void gen_mfdcrux(DisasContext *ctx) -{ - gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, - cpu_gpr[rA(ctx->opcode)]); - /* Note: Rc update flag set leads to undefined state of Rc0 */ -} - -/* mtdcrux (PPC 460) : user-mode access to DCR */ -static void gen_mtdcrux(DisasContext *ctx) -{ - gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)], - cpu_gpr[rS(ctx->opcode)]); - /* Note: Rc update flag set leads to undefined state of Rc0 */ -} - /* dccci */ static void gen_dccci(DisasContext *ctx) { - CHK_SV; + CHK_SV(ctx); /* interpreted as no-op */ } @@ -5934,11 +5750,11 @@ static void gen_dccci(DisasContext *ctx) static void gen_dcread(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv EA, val; - CHK_SV; + CHK_SV(ctx); gen_set_access_type(ctx, ACCESS_CACHE); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); @@ -5963,14 +5779,14 @@ static void gen_icbt_40x(DisasContext *ctx) /* iccci */ static void gen_iccci(DisasContext *ctx) { - CHK_SV; + CHK_SV(ctx); /* interpreted as no-op */ } /* icread */ static void gen_icread(DisasContext *ctx) { - CHK_SV; + CHK_SV(ctx); /* interpreted as no-op */ } @@ -5978,9 +5794,9 @@ static void gen_icread(DisasContext *ctx) static void gen_rfci_40x(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); /* Restore CPU state */ gen_helper_40x_rfci(cpu_env); ctx->base.is_jmp = DISAS_EXIT; @@ -5990,9 +5806,9 @@ static void gen_rfci_40x(DisasContext *ctx) static void gen_rfci(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); /* Restore CPU state */ gen_helper_rfci(cpu_env); ctx->base.is_jmp = DISAS_EXIT; @@ -6005,9 +5821,9 @@ static void gen_rfci(DisasContext *ctx) static void gen_rfdi(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); /* Restore CPU state */ gen_helper_rfdi(cpu_env); ctx->base.is_jmp = DISAS_EXIT; @@ -6018,9 +5834,9 @@ static void gen_rfdi(DisasContext *ctx) static void gen_rfmci(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); /* Restore CPU state */ gen_helper_rfmci(cpu_env); ctx->base.is_jmp = DISAS_EXIT; @@ -6033,9 +5849,9 @@ static void gen_rfmci(DisasContext *ctx) static void gen_tlbre_40x(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); switch (rB(ctx->opcode)) { case 0: gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env, @@ -6056,11 +5872,11 @@ static void gen_tlbre_40x(DisasContext *ctx) static void gen_tlbsx_40x(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); @@ -6079,9 +5895,9 @@ static void gen_tlbsx_40x(DisasContext *ctx) static void gen_tlbwe_40x(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); switch (rB(ctx->opcode)) { case 0: @@ -6105,9 +5921,9 @@ static void gen_tlbwe_40x(DisasContext *ctx) static void gen_tlbre_440(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); switch (rB(ctx->opcode)) { case 0: @@ -6131,11 +5947,11 @@ static void gen_tlbre_440(DisasContext *ctx) static void gen_tlbsx_440(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); @@ -6154,9 +5970,9 @@ static void gen_tlbsx_440(DisasContext *ctx) static void gen_tlbwe_440(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); switch (rB(ctx->opcode)) { case 0: case 1: @@ -6181,9 +5997,9 @@ static void gen_tlbwe_440(DisasContext *ctx) static void gen_tlbre_booke206(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); gen_helper_booke206_tlbre(cpu_env); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6192,11 +6008,11 @@ static void gen_tlbre_booke206(DisasContext *ctx) static void gen_tlbsx_booke206(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); if (rA(ctx->opcode)) { t0 = tcg_temp_new(); tcg_gen_mov_tl(t0, cpu_gpr[rD(ctx->opcode)]); @@ -6214,9 +6030,9 @@ static void gen_tlbsx_booke206(DisasContext *ctx) static void gen_tlbwe_booke206(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); gen_helper_booke206_tlbwe(cpu_env); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6224,11 +6040,11 @@ static void gen_tlbwe_booke206(DisasContext *ctx) static void gen_tlbivax_booke206(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_booke206_tlbivax(cpu_env, t0); @@ -6239,11 +6055,11 @@ static void gen_tlbivax_booke206(DisasContext *ctx) static void gen_tlbilx_booke206(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); @@ -6271,11 +6087,11 @@ static void gen_tlbilx_booke206(DisasContext *ctx) static void gen_wrtee(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else TCGv t0; - CHK_SV; + CHK_SV(ctx); t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE)); tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE)); @@ -6293,9 +6109,9 @@ static void gen_wrtee(DisasContext *ctx) static void gen_wrteei(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); if (ctx->opcode & 0x00008000) { tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE)); /* Stop translation to have a chance to raise an exception */ @@ -6349,9 +6165,9 @@ static void gen_icbt_440(DisasContext *ctx) static void gen_msgclr(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_HV; + CHK_HV(ctx); if (is_book3s_arch2x(ctx)) { gen_helper_book3s_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]); } else { @@ -6363,9 +6179,9 @@ static void gen_msgclr(DisasContext *ctx) static void gen_msgsnd(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_HV; + CHK_HV(ctx); if (is_book3s_arch2x(ctx)) { gen_helper_book3s_msgsnd(cpu_gpr[rB(ctx->opcode)]); } else { @@ -6378,9 +6194,9 @@ static void gen_msgsnd(DisasContext *ctx) static void gen_msgclrp(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); gen_helper_book3s_msgclrp(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6388,9 +6204,9 @@ static void gen_msgclrp(DisasContext *ctx) static void gen_msgsndp(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_SV; + CHK_SV(ctx); gen_helper_book3s_msgsndp(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6399,9 +6215,9 @@ static void gen_msgsndp(DisasContext *ctx) static void gen_msgsync(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) - GEN_PRIV; + GEN_PRIV(ctx); #else - CHK_HV; + CHK_HV(ctx); #endif /* defined(CONFIG_USER_ONLY) */ /* interpreted as no-op */ } @@ -6512,7 +6328,7 @@ static void gen_tcheck(DisasContext *ctx) #define GEN_TM_PRIV_NOOP(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ - gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); \ + gen_priv_opc(ctx); \ } #else @@ -6520,7 +6336,7 @@ static inline void gen_##name(DisasContext *ctx) \ #define GEN_TM_PRIV_NOOP(name) \ static inline void gen_##name(DisasContext *ctx) \ { \ - CHK_SV; \ + CHK_SV(ctx); \ if (unlikely(!ctx->tm_enabled)) { \ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \ return; \ @@ -6628,6 +6444,27 @@ static int times_16(DisasContext *ctx, int x) } \ } while (0) +#if !defined(CONFIG_USER_ONLY) +#define REQUIRE_SV(CTX) \ + do { \ + if (unlikely((CTX)->pr)) { \ + gen_priv_opc(CTX); \ + return true; \ + } \ + } while (0) + +#define REQUIRE_HV(CTX) \ + do { \ + if (unlikely((CTX)->pr || !(CTX)->hv)) \ + gen_priv_opc(CTX); \ + return true; \ + } \ + } while (0) +#else +#define REQUIRE_SV(CTX) do { gen_priv_opc(CTX); return true; } while (0) +#define REQUIRE_HV(CTX) do { gen_priv_opc(CTX); return true; } while (0) +#endif + /* * Helpers for implementing sets of trans_* functions. * Defer the implementation of NAME to FUNC, with optional extra arguments. @@ -6699,6 +6536,8 @@ static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a) #include "translate/branch-impl.c.inc" +#include "translate/storage-ctrl-impl.c.inc" + /* Handles lfdp */ static void gen_dform39(DisasContext *ctx) { @@ -6927,27 +6766,13 @@ GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001, GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B), GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT_64B), -GEN_HANDLER2(slbmte, "slbmte", 0x1F, 0x12, 0x0C, 0x001F0001, PPC_SEGMENT_64B), -GEN_HANDLER2(slbmfee, "slbmfee", 0x1F, 0x13, 0x1C, 0x001F0001, PPC_SEGMENT_64B), -GEN_HANDLER2(slbmfev, "slbmfev", 0x1F, 0x13, 0x1A, 0x001F0001, PPC_SEGMENT_64B), -GEN_HANDLER2(slbfee_, "slbfee.", 0x1F, 0x13, 0x1E, 0x001F0000, PPC_SEGMENT_64B), #endif GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA), /* * XXX Those instructions will need to be handled differently for * different ISA versions */ -GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x001F0001, PPC_MEM_TLBIE), -GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x001F0001, PPC_MEM_TLBIE), -GEN_HANDLER_E(tlbiel, 0x1F, 0x12, 0x08, 0x00100001, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E(tlbie, 0x1F, 0x12, 0x09, 0x00100001, PPC_NONE, PPC2_ISA300), GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC), -#if defined(TARGET_PPC64) -GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x031FFC01, PPC_SLBI), -GEN_HANDLER(slbie, 0x1F, 0x12, 0x0D, 0x03FF0001, PPC_SLBI), -GEN_HANDLER_E(slbieg, 0x1F, 0x12, 0x0E, 0x001F0001, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E(slbsync, 0x1F, 0x12, 0x0A, 0x03FFF801, PPC_NONE, PPC2_ISA300), -#endif GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN), GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN), GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB), @@ -6958,8 +6783,6 @@ GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR), GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR), GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX), GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX), -GEN_HANDLER(mfdcrux, 0x1F, 0x03, 0x09, 0x00000000, PPC_DCRUX), -GEN_HANDLER(mtdcrux, 0x1F, 0x03, 0x0D, 0x00000000, PPC_DCRUX), GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON), GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON), GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT), diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc index cb0097bedb..db14d3bebc 100644 --- a/target/ppc/translate/fixedpoint-impl.c.inc +++ b/target/ppc/translate/fixedpoint-impl.c.inc @@ -79,11 +79,8 @@ static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed) REQUIRE_INSNS_FLAGS(ctx, 64BX); if (!prefixed && !(ctx->insns_flags2 & PPC2_LSQ_ISA207)) { - if (ctx->pr) { - /* lq and stq were privileged prior to V. 2.07 */ - gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); - return true; - } + /* lq and stq were privileged prior to V. 2.07 */ + REQUIRE_SV(ctx); if (ctx->le_mode) { gen_align_no_le(ctx); diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc index 319513d001..0e893eafa7 100644 --- a/target/ppc/translate/fp-impl.c.inc +++ b/target/ppc/translate/fp-impl.c.inc @@ -901,7 +901,7 @@ static void gen_lfdepx(DisasContext *ctx) { TCGv EA; TCGv_i64 t0; - CHK_SV; + CHK_SV(ctx); if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; @@ -1058,7 +1058,7 @@ static void gen_stfdepx(DisasContext *ctx) { TCGv EA; TCGv_i64 t0; - CHK_SV; + CHK_SV(ctx); if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; diff --git a/target/ppc/translate/storage-ctrl-impl.c.inc b/target/ppc/translate/storage-ctrl-impl.c.inc new file mode 100644 index 0000000000..6ea1d22ef9 --- /dev/null +++ b/target/ppc/translate/storage-ctrl-impl.c.inc @@ -0,0 +1,250 @@ +/* + * Power ISA decode for Storage Control instructions + * + * Copyright (c) 2022 Instituto de Pesquisas Eldorado (eldorado.org.br) + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +/* + * Store Control Instructions + */ + +#include "mmu-book3s-v3.h" + +static bool trans_SLBIE(DisasContext *ctx, arg_SLBIE *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS(ctx, SLBI); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_SLBIE(cpu_env, cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_SLBIEG(DisasContext *ctx, arg_SLBIEG *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_SLBIEG(cpu_env, cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_SLBIA(DisasContext *ctx, arg_SLBIA *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS(ctx, SLBI); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_SLBIA(cpu_env, tcg_constant_i32(a->ih)); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_SLBIAG(DisasContext *ctx, arg_SLBIAG *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_SLBIAG(cpu_env, cpu_gpr[a->rs], tcg_constant_i32(a->l)); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_SLBMTE(DisasContext *ctx, arg_SLBMTE *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS(ctx, SEGMENT_64B); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_SLBMTE(cpu_env, cpu_gpr[a->rb], cpu_gpr[a->rt]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_SLBMFEV(DisasContext *ctx, arg_SLBMFEV *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS(ctx, SEGMENT_64B); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_SLBMFEV(cpu_gpr[a->rt], cpu_env, cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_SLBMFEE(DisasContext *ctx, arg_SLBMFEE *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS(ctx, SEGMENT_64B); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_helper_SLBMFEE(cpu_gpr[a->rt], cpu_env, cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_SLBFEE(DisasContext *ctx, arg_SLBFEE *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS(ctx, SEGMENT_64B); + +#if defined(CONFIG_USER_ONLY) + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); +#else + +#if defined(TARGET_PPC64) + TCGLabel *l1, *l2; + + if (unlikely(ctx->pr)) { + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); + return true; + } + gen_helper_SLBFEE(cpu_gpr[a->rt], cpu_env, + cpu_gpr[a->rb]); + l1 = gen_new_label(); + l2 = gen_new_label(); + tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[a->rt], -1, l1); + tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ); + tcg_gen_br(l2); + gen_set_label(l1); + tcg_gen_movi_tl(cpu_gpr[a->rt], 0); + gen_set_label(l2); +#else + qemu_build_not_reached(); +#endif +#endif + return true; +} + +static bool trans_SLBSYNC(DisasContext *ctx, arg_SLBSYNC *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_SV(ctx); + +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) + gen_check_tlb_flush(ctx, true); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool do_tlbie(DisasContext *ctx, arg_X_tlbie *a, bool local) +{ +#if defined(CONFIG_USER_ONLY) + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); + return true; +#else + TCGv_i32 t1; + int rb; + + rb = a->rb; + + if ((ctx->insns_flags2 & PPC2_ISA300) == 0) { + /* + * Before Power ISA 3.0, the corresponding bits of RIC, PRS, and R + * (and RS for tlbiel) were reserved fields and should be ignored. + */ + a->ric = 0; + a->prs = false; + a->r = false; + if (local) { + a->rs = 0; + } + } + + if (ctx->pr) { + /* tlbie[l] is privileged... */ + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); + return true; + } else if (!ctx->hv) { + if ((!a->prs && ctx->hr) || (!local && !ctx->gtse)) { + /* + * ... except when PRS=0 and HR=1, or when GTSE=0 for tlbie, + * making it hypervisor privileged. + */ + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); + return true; + } + } + + if (!local && NARROW_MODE(ctx)) { + TCGv t0 = tcg_temp_new(); + tcg_gen_ext32u_tl(t0, cpu_gpr[rb]); + gen_helper_tlbie(cpu_env, t0); + tcg_temp_free(t0); + +#if defined(TARGET_PPC64) + /* + * ISA 3.1B says that MSR SF must be 1 when this instruction is executed; + * otherwise the results are undefined. + */ + } else if (a->r) { + gen_helper_tlbie_isa300(cpu_env, cpu_gpr[rb], cpu_gpr[a->rs], + tcg_constant_i32(a->ric << TLBIE_F_RIC_SHIFT | + a->prs << TLBIE_F_PRS_SHIFT | + a->r << TLBIE_F_R_SHIFT | + local << TLBIE_F_LOCAL_SHIFT)); + return true; +#endif + + } else { + gen_helper_tlbie(cpu_env, cpu_gpr[rb]); + } + + if (local) { + return true; + } + + t1 = tcg_temp_new_i32(); + tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush)); + tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH); + tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush)); + tcg_temp_free_i32(t1); + + return true; +#endif +} + +TRANS_FLAGS(MEM_TLBIE, TLBIE, do_tlbie, false) +TRANS_FLAGS(MEM_TLBIE, TLBIEL, do_tlbie, true) diff --git a/target/s390x/tcg/vec_fpu_helper.c b/target/s390x/tcg/vec_fpu_helper.c index 2a618a1093..75cf605b9f 100644 --- a/target/s390x/tcg/vec_fpu_helper.c +++ b/target/s390x/tcg/vec_fpu_helper.c @@ -824,7 +824,7 @@ static S390MinMaxRes vfmin_res(uint16_t dcmask_a, uint16_t dcmask_b, default: g_assert_not_reached(); } - } else if (unlikely(dcmask_a & dcmask_b & DCMASK_ZERO)) { + } else if (unlikely((dcmask_a & DCMASK_ZERO) && (dcmask_b & DCMASK_ZERO))) { switch (type) { case S390_MINMAX_TYPE_JAVA: return neg_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; @@ -874,7 +874,7 @@ static S390MinMaxRes vfmax_res(uint16_t dcmask_a, uint16_t dcmask_b, default: g_assert_not_reached(); } - } else if (unlikely(dcmask_a & dcmask_b & DCMASK_ZERO)) { + } else if (unlikely((dcmask_a & DCMASK_ZERO) && (dcmask_b & DCMASK_ZERO))) { const bool neg_a = dcmask_a & DCMASK_NEGATIVE; switch (type) { diff --git a/tests/docker/Makefile.include b/tests/docker/Makefile.include index ef4518d9eb..9a45e8890b 100644 --- a/tests/docker/Makefile.include +++ b/tests/docker/Makefile.include @@ -140,6 +140,7 @@ docker-image-debian-nios2-cross: $(DOCKER_FILES_DIR)/debian-toolchain.docker \ # Specialist build images, sometimes very limited tools docker-image-debian-tricore-cross: docker-image-debian10 docker-image-debian-all-test-cross: docker-image-debian10 +docker-image-debian-loongarch-cross: docker-image-debian11 docker-image-debian-microblaze-cross: docker-image-debian10 docker-image-debian-nios2-cross: docker-image-debian10 docker-image-debian-powerpc-test-cross: docker-image-debian11 @@ -149,6 +150,7 @@ docker-image-debian-riscv64-test-cross: docker-image-debian11 DOCKER_PARTIAL_IMAGES += debian-alpha-cross DOCKER_PARTIAL_IMAGES += debian-powerpc-test-cross DOCKER_PARTIAL_IMAGES += debian-hppa-cross +DOCKER_PARTIAL_IMAGES += debian-loongarch-cross DOCKER_PARTIAL_IMAGES += debian-m68k-cross debian-mips64-cross DOCKER_PARTIAL_IMAGES += debian-microblaze-cross DOCKER_PARTIAL_IMAGES += debian-nios2-cross diff --git a/tests/docker/dockerfiles/debian-loongarch-cross.docker b/tests/docker/dockerfiles/debian-loongarch-cross.docker new file mode 100644 index 0000000000..ca2469d2a8 --- /dev/null +++ b/tests/docker/dockerfiles/debian-loongarch-cross.docker @@ -0,0 +1,25 @@ +# +# Docker cross-compiler target +# +# This docker target builds on the debian11 base image, +# using a prebuilt toolchains for LoongArch64 from: +# https://github.com/loongson/build-tools/releases +# +FROM qemu/debian11 + +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ + DEBIAN_FRONTEND=noninteractive eatmydata \ + apt-get install -y --no-install-recommends \ + build-essential \ + ca-certificates \ + curl \ + gettext \ + git \ + python3-minimal + +RUN curl -#SL https://github.com/loongson/build-tools/releases/download/2022.05.29/loongarch64-clfs-5.0-cross-tools-gcc-glibc.tar.xz \ + | tar -xJC /opt + +ENV PATH $PATH:/opt/cross-tools/bin +ENV LD_LIBRARY_PATH /opt/cross-tools/lib:/opt/cross-tools/loongarch64-unknown-linux-gnu/lib:$LD_LIBRARY_PATH diff --git a/tests/qtest/bcm2835-dma-test.c b/tests/qtest/bcm2835-dma-test.c new file mode 100644 index 0000000000..8293d822b9 --- /dev/null +++ b/tests/qtest/bcm2835-dma-test.c @@ -0,0 +1,118 @@ +/* + * QTest testcase for BCM283x DMA engine (on Raspberry Pi 3) + * and its interrupts coming to Interrupt Controller. + * + * Copyright (c) 2022 Auriga LLC + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "libqtest-single.h" + +/* Offsets in raspi3b platform: */ +#define RASPI3_DMA_BASE 0x3f007000 +#define RASPI3_IC_BASE 0x3f00b200 + +/* Used register/fields definitions */ + +/* DMA engine registers: */ +#define BCM2708_DMA_CS 0 +#define BCM2708_DMA_ACTIVE (1 << 0) +#define BCM2708_DMA_INT (1 << 2) + +#define BCM2708_DMA_ADDR 0x04 + +#define BCM2708_DMA_INT_STATUS 0xfe0 + +/* DMA Trasfer Info fields: */ +#define BCM2708_DMA_INT_EN (1 << 0) +#define BCM2708_DMA_D_INC (1 << 4) +#define BCM2708_DMA_S_INC (1 << 8) + +/* Interrupt controller registers: */ +#define IRQ_PENDING_BASIC 0x00 +#define IRQ_GPU_PENDING1_AGGR (1 << 8) +#define IRQ_PENDING_1 0x04 +#define IRQ_ENABLE_1 0x10 + +/* Data for the test: */ +#define SCB_ADDR 256 +#define S_ADDR 32 +#define D_ADDR 64 +#define TXFR_LEN 32 +const uint32_t check_data = 0x12345678; + +static void bcm2835_dma_test_interrupt(int dma_c, int irq_line) +{ + uint64_t dma_base = RASPI3_DMA_BASE + dma_c * 0x100; + int gpu_irq_line = 16 + irq_line; + + /* Check that interrupts are silent by default: */ + writel(RASPI3_IC_BASE + IRQ_ENABLE_1, 1 << gpu_irq_line); + int isr = readl(dma_base + BCM2708_DMA_INT_STATUS); + g_assert_cmpint(isr, ==, 0); + uint32_t reg0 = readl(dma_base + BCM2708_DMA_CS); + g_assert_cmpint(reg0, ==, 0); + uint32_t ic_pending = readl(RASPI3_IC_BASE + IRQ_PENDING_BASIC); + g_assert_cmpint(ic_pending, ==, 0); + uint32_t gpu_pending1 = readl(RASPI3_IC_BASE + IRQ_PENDING_1); + g_assert_cmpint(gpu_pending1, ==, 0); + + /* Prepare Control Block: */ + writel(SCB_ADDR + 0, BCM2708_DMA_S_INC | BCM2708_DMA_D_INC | + BCM2708_DMA_INT_EN); /* transfer info */ + writel(SCB_ADDR + 4, S_ADDR); /* source address */ + writel(SCB_ADDR + 8, D_ADDR); /* destination address */ + writel(SCB_ADDR + 12, TXFR_LEN); /* transfer length */ + writel(dma_base + BCM2708_DMA_ADDR, SCB_ADDR); + + writel(S_ADDR, check_data); + for (int word = S_ADDR + 4; word < S_ADDR + TXFR_LEN; word += 4) { + writel(word, ~check_data); + } + /* Perform the transfer: */ + writel(dma_base + BCM2708_DMA_CS, BCM2708_DMA_ACTIVE); + + /* Check that destination == source: */ + uint32_t data = readl(D_ADDR); + g_assert_cmpint(data, ==, check_data); + for (int word = D_ADDR + 4; word < D_ADDR + TXFR_LEN; word += 4) { + data = readl(word); + g_assert_cmpint(data, ==, ~check_data); + } + + /* Check that interrupt status is set both in DMA and IC controllers: */ + isr = readl(RASPI3_DMA_BASE + BCM2708_DMA_INT_STATUS); + g_assert_cmpint(isr, ==, 1 << dma_c); + + ic_pending = readl(RASPI3_IC_BASE + IRQ_PENDING_BASIC); + g_assert_cmpint(ic_pending, ==, IRQ_GPU_PENDING1_AGGR); + + gpu_pending1 = readl(RASPI3_IC_BASE + IRQ_PENDING_1); + g_assert_cmpint(gpu_pending1, ==, 1 << gpu_irq_line); + + /* Clean up, clear interrupt: */ + writel(dma_base + BCM2708_DMA_CS, BCM2708_DMA_INT); +} + +static void bcm2835_dma_test_interrupts(void) +{ + /* DMA engines 0--10 have separate IRQ lines, 11--14 - only one: */ + bcm2835_dma_test_interrupt(0, 0); + bcm2835_dma_test_interrupt(10, 10); + bcm2835_dma_test_interrupt(11, 11); + bcm2835_dma_test_interrupt(14, 11); +} + +int main(int argc, char **argv) +{ + int ret; + g_test_init(&argc, &argv, NULL); + qtest_add_func("/bcm2835/dma/test_interrupts", + bcm2835_dma_test_interrupts); + qtest_start("-machine raspi3b"); + ret = g_test_run(); + qtest_end(); + return ret; +} diff --git a/tests/qtest/machine-none-test.c b/tests/qtest/machine-none-test.c index d0f8cd9902..f92fab479f 100644 --- a/tests/qtest/machine-none-test.c +++ b/tests/qtest/machine-none-test.c @@ -54,6 +54,7 @@ static struct arch2cpu cpus_map[] = { { "riscv64", "rv64" }, { "riscv32", "rv32" }, { "rx", "rx62n" }, + { "loongarch64", "la464"}, }; static const char *get_cpu_model_by_arch(const char *arch) diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build index 31287a9173..3a474010e4 100644 --- a/tests/qtest/meson.build +++ b/tests/qtest/meson.build @@ -218,7 +218,8 @@ qtests_aarch64 = \ ['arm-cpu-features', 'numa-test', 'boot-serial-test', - 'migration-test'] + 'migration-test', + 'bcm2835-dma-test'] qtests_s390x = \ (slirp.found() ? ['pxe-test', 'test-netfilter'] : []) + \ diff --git a/tests/qtest/migration-helpers.c b/tests/qtest/migration-helpers.c index e81e831c85..c6fbeb3974 100644 --- a/tests/qtest/migration-helpers.c +++ b/tests/qtest/migration-helpers.c @@ -84,6 +84,28 @@ QDict *wait_command(QTestState *who, const char *command, ...) } /* + * Execute the qmp command only + */ +QDict *qmp_command(QTestState *who, const char *command, ...) +{ + va_list ap; + QDict *resp, *ret; + + va_start(ap, command); + resp = qtest_vqmp(who, command, ap); + va_end(ap); + + g_assert(!qdict_haskey(resp, "error")); + g_assert(qdict_haskey(resp, "return")); + + ret = qdict_get_qdict(resp, "return"); + qobject_ref(ret); + qobject_unref(resp); + + return ret; +} + +/* * Send QMP command "migrate". * Arguments are built from @fmt... (formatted like * qobject_from_jsonf_nofail()) with "uri": @uri spliced in. diff --git a/tests/qtest/migration-helpers.h b/tests/qtest/migration-helpers.h index 78587c2b82..59561898d0 100644 --- a/tests/qtest/migration-helpers.h +++ b/tests/qtest/migration-helpers.h @@ -23,6 +23,8 @@ QDict *wait_command_fd(QTestState *who, int fd, const char *command, ...); G_GNUC_PRINTF(2, 3) QDict *wait_command(QTestState *who, const char *command, ...); +QDict *qmp_command(QTestState *who, const char *command, ...); + G_GNUC_PRINTF(3, 4) void migrate_qmp(QTestState *who, const char *uri, const char *fmt, ...); diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c index 9e64125f02..71595a74fd 100644 --- a/tests/qtest/migration-test.c +++ b/tests/qtest/migration-test.c @@ -24,6 +24,7 @@ #include "qapi/qobject-input-visitor.h" #include "qapi/qobject-output-visitor.h" #include "crypto/tlscredspsk.h" +#include "qapi/qmp/qlist.h" #include "migration-helpers.h" #include "tests/migration/migration-test.h" @@ -46,6 +47,12 @@ unsigned start_address; unsigned end_address; static bool uffd_feature_thread_id; +/* + * Dirtylimit stop working if dirty page rate error + * value less than DIRTYLIMIT_TOLERANCE_RANGE + */ +#define DIRTYLIMIT_TOLERANCE_RANGE 25 /* MB/s */ + #if defined(__linux__) #include <sys/syscall.h> #include <sys/vfs.h> @@ -496,6 +503,82 @@ typedef struct { const char *opts_target; } MigrateStart; +/* + * A hook that runs after the src and dst QEMUs have been + * created, but before the migration is started. This can + * be used to set migration parameters and capabilities. + * + * Returns: NULL, or a pointer to opaque state to be + * later passed to the TestMigrateFinishHook + */ +typedef void * (*TestMigrateStartHook)(QTestState *from, + QTestState *to); + +/* + * A hook that runs after the migration has finished, + * regardless of whether it succeeded or failed, but + * before QEMU has terminated (unless it self-terminated + * due to migration error) + * + * @opaque is a pointer to state previously returned + * by the TestMigrateStartHook if any, or NULL. + */ +typedef void (*TestMigrateFinishHook)(QTestState *from, + QTestState *to, + void *opaque); + +typedef struct { + /* Optional: fine tune start parameters */ + MigrateStart start; + + /* Required: the URI for the dst QEMU to listen on */ + const char *listen_uri; + + /* + * Optional: the URI for the src QEMU to connect to + * If NULL, then it will query the dst QEMU for its actual + * listening address and use that as the connect address. + * This allows for dynamically picking a free TCP port. + */ + const char *connect_uri; + + /* Optional: callback to run at start to set migration parameters */ + TestMigrateStartHook start_hook; + /* Optional: callback to run at finish to cleanup */ + TestMigrateFinishHook finish_hook; + + /* + * Optional: normally we expect the migration process to complete. + * + * There can be a variety of reasons and stages in which failure + * can happen during tests. + * + * If a failure is expected to happen at time of establishing + * the connection, then MIG_TEST_FAIL will indicate that the dst + * QEMU is expected to stay running and accept future migration + * connections. + * + * If a failure is expected to happen while processing the + * migration stream, then MIG_TEST_FAIL_DEST_QUIT_ERR will indicate + * that the dst QEMU is expected to quit with non-zero exit status + */ + enum { + /* This test should succeed, the default */ + MIG_TEST_SUCCEED = 0, + /* This test should fail, dest qemu should keep alive */ + MIG_TEST_FAIL, + /* This test should fail, dest qemu should fail with abnormal status */ + MIG_TEST_FAIL_DEST_QUIT_ERR, + } result; + + /* Optional: set number of migration passes to wait for */ + unsigned int iterations; + + /* Postcopy specific fields */ + void *postcopy_data; + bool postcopy_preempt; +} MigrateCommon; + static int test_migrate_start(QTestState **from, QTestState **to, const char *uri, MigrateStart *args) { @@ -982,19 +1065,28 @@ test_migrate_tls_x509_finish(QTestState *from, static int migrate_postcopy_prepare(QTestState **from_ptr, QTestState **to_ptr, - MigrateStart *args) + MigrateCommon *args) { g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); QTestState *from, *to; - if (test_migrate_start(&from, &to, uri, args)) { + if (test_migrate_start(&from, &to, uri, &args->start)) { return -1; } + if (args->start_hook) { + args->postcopy_data = args->start_hook(from, to); + } + migrate_set_capability(from, "postcopy-ram", true); migrate_set_capability(to, "postcopy-ram", true); migrate_set_capability(to, "postcopy-blocktime", true); + if (args->postcopy_preempt) { + migrate_set_capability(from, "postcopy-preempt", true); + migrate_set_capability(to, "postcopy-preempt", true); + } + migrate_ensure_non_converge(from); /* Wait for the first serial output from the source */ @@ -1010,7 +1102,8 @@ static int migrate_postcopy_prepare(QTestState **from_ptr, return 0; } -static void migrate_postcopy_complete(QTestState *from, QTestState *to) +static void migrate_postcopy_complete(QTestState *from, QTestState *to, + MigrateCommon *args) { wait_for_migration_complete(from); @@ -1021,30 +1114,73 @@ static void migrate_postcopy_complete(QTestState *from, QTestState *to) read_blocktime(to); } + if (args->finish_hook) { + args->finish_hook(from, to, args->postcopy_data); + args->postcopy_data = NULL; + } + test_migrate_end(from, to, true); } -static void test_postcopy(void) +static void test_postcopy_common(MigrateCommon *args) { - MigrateStart args = {}; QTestState *from, *to; - if (migrate_postcopy_prepare(&from, &to, &args)) { + if (migrate_postcopy_prepare(&from, &to, args)) { return; } migrate_postcopy_start(from, to); - migrate_postcopy_complete(from, to); + migrate_postcopy_complete(from, to, args); } -static void test_postcopy_recovery(void) +static void test_postcopy(void) { - MigrateStart args = { - .hide_stderr = true, + MigrateCommon args = { }; + + test_postcopy_common(&args); +} + +static void test_postcopy_preempt(void) +{ + MigrateCommon args = { + .postcopy_preempt = true, }; + + test_postcopy_common(&args); +} + +#ifdef CONFIG_GNUTLS +static void test_postcopy_tls_psk(void) +{ + MigrateCommon args = { + .start_hook = test_migrate_tls_psk_start_match, + .finish_hook = test_migrate_tls_psk_finish, + }; + + test_postcopy_common(&args); +} + +static void test_postcopy_preempt_tls_psk(void) +{ + MigrateCommon args = { + .postcopy_preempt = true, + .start_hook = test_migrate_tls_psk_start_match, + .finish_hook = test_migrate_tls_psk_finish, + }; + + test_postcopy_common(&args); +} +#endif + +static void test_postcopy_recovery_common(MigrateCommon *args) +{ QTestState *from, *to; g_autofree char *uri = NULL; - if (migrate_postcopy_prepare(&from, &to, &args)) { + /* Always hide errors for postcopy recover tests since they're expected */ + args->start.hide_stderr = true; + + if (migrate_postcopy_prepare(&from, &to, args)) { return; } @@ -1095,9 +1231,51 @@ static void test_postcopy_recovery(void) /* Restore the postcopy bandwidth to unlimited */ migrate_set_parameter_int(from, "max-postcopy-bandwidth", 0); - migrate_postcopy_complete(from, to); + migrate_postcopy_complete(from, to, args); } +static void test_postcopy_recovery(void) +{ + MigrateCommon args = { }; + + test_postcopy_recovery_common(&args); +} + +#ifdef CONFIG_GNUTLS +static void test_postcopy_recovery_tls_psk(void) +{ + MigrateCommon args = { + .start_hook = test_migrate_tls_psk_start_match, + .finish_hook = test_migrate_tls_psk_finish, + }; + + test_postcopy_recovery_common(&args); +} +#endif + +static void test_postcopy_preempt_recovery(void) +{ + MigrateCommon args = { + .postcopy_preempt = true, + }; + + test_postcopy_recovery_common(&args); +} + +#ifdef CONFIG_GNUTLS +/* This contains preempt+recovery+tls test altogether */ +static void test_postcopy_preempt_all(void) +{ + MigrateCommon args = { + .postcopy_preempt = true, + .start_hook = test_migrate_tls_psk_start_match, + .finish_hook = test_migrate_tls_psk_finish, + }; + + test_postcopy_recovery_common(&args); +} +#endif + static void test_baddest(void) { MigrateStart args = { @@ -1113,78 +1291,6 @@ static void test_baddest(void) test_migrate_end(from, to, false); } -/* - * A hook that runs after the src and dst QEMUs have been - * created, but before the migration is started. This can - * be used to set migration parameters and capabilities. - * - * Returns: NULL, or a pointer to opaque state to be - * later passed to the TestMigrateFinishHook - */ -typedef void * (*TestMigrateStartHook)(QTestState *from, - QTestState *to); - -/* - * A hook that runs after the migration has finished, - * regardless of whether it succeeded or failed, but - * before QEMU has terminated (unless it self-terminated - * due to migration error) - * - * @opaque is a pointer to state previously returned - * by the TestMigrateStartHook if any, or NULL. - */ -typedef void (*TestMigrateFinishHook)(QTestState *from, - QTestState *to, - void *opaque); - -typedef struct { - /* Optional: fine tune start parameters */ - MigrateStart start; - - /* Required: the URI for the dst QEMU to listen on */ - const char *listen_uri; - - /* - * Optional: the URI for the src QEMU to connect to - * If NULL, then it will query the dst QEMU for its actual - * listening address and use that as the connect address. - * This allows for dynamically picking a free TCP port. - */ - const char *connect_uri; - - /* Optional: callback to run at start to set migration parameters */ - TestMigrateStartHook start_hook; - /* Optional: callback to run at finish to cleanup */ - TestMigrateFinishHook finish_hook; - - /* - * Optional: normally we expect the migration process to complete. - * - * There can be a variety of reasons and stages in which failure - * can happen during tests. - * - * If a failure is expected to happen at time of establishing - * the connection, then MIG_TEST_FAIL will indicate that the dst - * QEMU is expected to stay running and accept future migration - * connections. - * - * If a failure is expected to happen while processing the - * migration stream, then MIG_TEST_FAIL_DEST_QUIT_ERR will indicate - * that the dst QEMU is expected to quit with non-zero exit status - */ - enum { - /* This test should succeed, the default */ - MIG_TEST_SUCCEED = 0, - /* This test should fail, dest qemu should keep alive */ - MIG_TEST_FAIL, - /* This test should fail, dest qemu should fail with abnormal status */ - MIG_TEST_FAIL_DEST_QUIT_ERR, - } result; - - /* Optional: set number of migration passes to wait for */ - unsigned int iterations; -} MigrateCommon; - static void test_precopy_common(MigrateCommon *args) { QTestState *from, *to; @@ -2059,6 +2165,253 @@ static void test_multifd_tcp_cancel(void) test_migrate_end(from, to2, true); } +static void calc_dirty_rate(QTestState *who, uint64_t calc_time) +{ + qobject_unref(qmp_command(who, + "{ 'execute': 'calc-dirty-rate'," + "'arguments': { " + "'calc-time': %ld," + "'mode': 'dirty-ring' }}", + calc_time)); +} + +static QDict *query_dirty_rate(QTestState *who) +{ + return qmp_command(who, "{ 'execute': 'query-dirty-rate' }"); +} + +static void dirtylimit_set_all(QTestState *who, uint64_t dirtyrate) +{ + qobject_unref(qmp_command(who, + "{ 'execute': 'set-vcpu-dirty-limit'," + "'arguments': { " + "'dirty-rate': %ld } }", + dirtyrate)); +} + +static void cancel_vcpu_dirty_limit(QTestState *who) +{ + qobject_unref(qmp_command(who, + "{ 'execute': 'cancel-vcpu-dirty-limit' }")); +} + +static QDict *query_vcpu_dirty_limit(QTestState *who) +{ + QDict *rsp; + + rsp = qtest_qmp(who, "{ 'execute': 'query-vcpu-dirty-limit' }"); + g_assert(!qdict_haskey(rsp, "error")); + g_assert(qdict_haskey(rsp, "return")); + + return rsp; +} + +static bool calc_dirtyrate_ready(QTestState *who) +{ + QDict *rsp_return; + gchar *status; + + rsp_return = query_dirty_rate(who); + g_assert(rsp_return); + + status = g_strdup(qdict_get_str(rsp_return, "status")); + g_assert(status); + + return g_strcmp0(status, "measuring"); +} + +static void wait_for_calc_dirtyrate_complete(QTestState *who, + int64_t time_s) +{ + int max_try_count = 10000; + usleep(time_s * 1000000); + + while (!calc_dirtyrate_ready(who) && max_try_count--) { + usleep(1000); + } + + /* + * Set the timeout with 10 s(max_try_count * 1000us), + * if dirtyrate measurement not complete, fail test. + */ + g_assert_cmpint(max_try_count, !=, 0); +} + +static int64_t get_dirty_rate(QTestState *who) +{ + QDict *rsp_return; + gchar *status; + QList *rates; + const QListEntry *entry; + QDict *rate; + int64_t dirtyrate; + + rsp_return = query_dirty_rate(who); + g_assert(rsp_return); + + status = g_strdup(qdict_get_str(rsp_return, "status")); + g_assert(status); + g_assert_cmpstr(status, ==, "measured"); + + rates = qdict_get_qlist(rsp_return, "vcpu-dirty-rate"); + g_assert(rates && !qlist_empty(rates)); + + entry = qlist_first(rates); + g_assert(entry); + + rate = qobject_to(QDict, qlist_entry_obj(entry)); + g_assert(rate); + + dirtyrate = qdict_get_try_int(rate, "dirty-rate", -1); + + qobject_unref(rsp_return); + return dirtyrate; +} + +static int64_t get_limit_rate(QTestState *who) +{ + QDict *rsp_return; + QList *rates; + const QListEntry *entry; + QDict *rate; + int64_t dirtyrate; + + rsp_return = query_vcpu_dirty_limit(who); + g_assert(rsp_return); + + rates = qdict_get_qlist(rsp_return, "return"); + g_assert(rates && !qlist_empty(rates)); + + entry = qlist_first(rates); + g_assert(entry); + + rate = qobject_to(QDict, qlist_entry_obj(entry)); + g_assert(rate); + + dirtyrate = qdict_get_try_int(rate, "limit-rate", -1); + + qobject_unref(rsp_return); + return dirtyrate; +} + +static QTestState *dirtylimit_start_vm(void) +{ + QTestState *vm = NULL; + g_autofree gchar *cmd = NULL; + const char *arch = qtest_get_arch(); + g_autofree char *bootpath = NULL; + + assert((strcmp(arch, "x86_64") == 0)); + bootpath = g_strdup_printf("%s/bootsect", tmpfs); + assert(sizeof(x86_bootsect) == 512); + init_bootfile(bootpath, x86_bootsect, sizeof(x86_bootsect)); + + cmd = g_strdup_printf("-accel kvm,dirty-ring-size=4096 " + "-name dirtylimit-test,debug-threads=on " + "-m 150M -smp 1 " + "-serial file:%s/vm_serial " + "-drive file=%s,format=raw ", + tmpfs, bootpath); + + vm = qtest_init(cmd); + return vm; +} + +static void dirtylimit_stop_vm(QTestState *vm) +{ + qtest_quit(vm); + cleanup("bootsect"); + cleanup("vm_serial"); +} + +static void test_vcpu_dirty_limit(void) +{ + QTestState *vm; + int64_t origin_rate; + int64_t quota_rate; + int64_t rate ; + int max_try_count = 20; + int hit = 0; + + /* Start vm for vcpu dirtylimit test */ + vm = dirtylimit_start_vm(); + + /* Wait for the first serial output from the vm*/ + wait_for_serial("vm_serial"); + + /* Do dirtyrate measurement with calc time equals 1s */ + calc_dirty_rate(vm, 1); + + /* Sleep calc time and wait for calc dirtyrate complete */ + wait_for_calc_dirtyrate_complete(vm, 1); + + /* Query original dirty page rate */ + origin_rate = get_dirty_rate(vm); + + /* VM booted from bootsect should dirty memory steadily */ + assert(origin_rate != 0); + + /* Setup quota dirty page rate at half of origin */ + quota_rate = origin_rate / 2; + + /* Set dirtylimit */ + dirtylimit_set_all(vm, quota_rate); + + /* + * Check if set-vcpu-dirty-limit and query-vcpu-dirty-limit + * works literally + */ + g_assert_cmpint(quota_rate, ==, get_limit_rate(vm)); + + /* Sleep a bit to check if it take effect */ + usleep(2000000); + + /* + * Check if dirtylimit take effect realistically, set the + * timeout with 20 s(max_try_count * 1s), if dirtylimit + * doesn't take effect, fail test. + */ + while (--max_try_count) { + calc_dirty_rate(vm, 1); + wait_for_calc_dirtyrate_complete(vm, 1); + rate = get_dirty_rate(vm); + + /* + * Assume hitting if current rate is less + * than quota rate (within accepting error) + */ + if (rate < (quota_rate + DIRTYLIMIT_TOLERANCE_RANGE)) { + hit = 1; + break; + } + } + + g_assert_cmpint(hit, ==, 1); + + hit = 0; + max_try_count = 20; + + /* Check if dirtylimit cancellation take effect */ + cancel_vcpu_dirty_limit(vm); + while (--max_try_count) { + calc_dirty_rate(vm, 1); + wait_for_calc_dirtyrate_complete(vm, 1); + rate = get_dirty_rate(vm); + + /* + * Assume dirtylimit be canceled if current rate is + * greater than quota rate (within accepting error) + */ + if (rate > (quota_rate + DIRTYLIMIT_TOLERANCE_RANGE)) { + hit = 1; + break; + } + } + + g_assert_cmpint(hit, ==, 1); + dirtylimit_stop_vm(vm); +} + static bool kvm_dirty_ring_supported(void) { #if defined(__linux__) && defined(HOST_X86_64) @@ -2123,13 +2476,31 @@ int main(int argc, char **argv) module_call_init(MODULE_INIT_QOM); qtest_add_func("/migration/postcopy/unix", test_postcopy); - qtest_add_func("/migration/postcopy/recovery", test_postcopy_recovery); + qtest_add_func("/migration/postcopy/plain", test_postcopy); + qtest_add_func("/migration/postcopy/recovery/plain", + test_postcopy_recovery); + qtest_add_func("/migration/postcopy/preempt/plain", test_postcopy_preempt); + qtest_add_func("/migration/postcopy/preempt/recovery/plain", + test_postcopy_preempt_recovery); + qtest_add_func("/migration/bad_dest", test_baddest); qtest_add_func("/migration/precopy/unix/plain", test_precopy_unix_plain); qtest_add_func("/migration/precopy/unix/xbzrle", test_precopy_unix_xbzrle); #ifdef CONFIG_GNUTLS qtest_add_func("/migration/precopy/unix/tls/psk", test_precopy_unix_tls_psk); + /* + * NOTE: psk test is enough for postcopy, as other types of TLS + * channels are tested under precopy. Here what we want to test is the + * general postcopy path that has TLS channel enabled. + */ + qtest_add_func("/migration/postcopy/tls/psk", test_postcopy_tls_psk); + qtest_add_func("/migration/postcopy/recovery/tls/psk", + test_postcopy_recovery_tls_psk); + qtest_add_func("/migration/postcopy/preempt/tls/psk", + test_postcopy_preempt_tls_psk); + qtest_add_func("/migration/postcopy/preempt/recovery/tls/psk", + test_postcopy_preempt_all); #ifdef CONFIG_TASN1 qtest_add_func("/migration/precopy/unix/tls/x509/default-host", test_precopy_unix_tls_x509_default_host); @@ -2204,6 +2575,8 @@ int main(int argc, char **argv) if (kvm_dirty_ring_supported()) { qtest_add_func("/migration/dirty_ring", test_precopy_unix_dirty_ring); + qtest_add_func("/migration/vcpu_dirty_limit", + test_vcpu_dirty_limit); } ret = g_test_run(); diff --git a/tests/qtest/npcm7xx_adc-test.c b/tests/qtest/npcm7xx_adc-test.c index 3fa6d9ece0..8048044d28 100644 --- a/tests/qtest/npcm7xx_adc-test.c +++ b/tests/qtest/npcm7xx_adc-test.c @@ -50,7 +50,7 @@ #define CON_INT BIT(18) #define CON_EN BIT(17) #define CON_RST BIT(16) -#define CON_CONV BIT(14) +#define CON_CONV BIT(13) #define CON_DIV(rv) extract32(rv, 1, 8) #define FST_RDST BIT(1) diff --git a/tests/qtest/qmp-cmd-test.c b/tests/qtest/qmp-cmd-test.c index 056b40e67f..af00712458 100644 --- a/tests/qtest/qmp-cmd-test.c +++ b/tests/qtest/qmp-cmd-test.c @@ -110,6 +110,8 @@ static bool query_is_ignored(const char *cmd) "query-sev-capabilities", "query-sgx", "query-sgx-capabilities", + /* Success depends on enabling dirty page rate limit */ + "query-vcpu-dirty-limit", NULL }; int i; diff --git a/tests/tcg/hexagon/Makefile.target b/tests/tcg/hexagon/Makefile.target index 23b9870534..96a4d7a614 100644 --- a/tests/tcg/hexagon/Makefile.target +++ b/tests/tcg/hexagon/Makefile.target @@ -35,6 +35,7 @@ HEX_TESTS += preg_alias HEX_TESTS += dual_stores HEX_TESTS += multi_result HEX_TESTS += mem_noshuf +HEX_TESTS += mem_noshuf_exception HEX_TESTS += circ HEX_TESTS += brev HEX_TESTS += load_unpack diff --git a/tests/tcg/hexagon/mem_noshuf.c b/tests/tcg/hexagon/mem_noshuf.c index dd714d5e98..0f4064e700 100644 --- a/tests/tcg/hexagon/mem_noshuf.c +++ b/tests/tcg/hexagon/mem_noshuf.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -84,6 +84,70 @@ MEM_NOSHUF32(mem_noshuf_sd_luh, long long, unsigned short, memd, memuh) MEM_NOSHUF32(mem_noshuf_sd_lw, long long, signed int, memd, memw) MEM_NOSHUF64(mem_noshuf_sd_ld, long long, signed long long, memd, memd) +static inline int pred_lw_sw(int pred, int *p, int *q, int x, int y) +{ + int ret; + asm volatile("p0 = cmp.eq(%5, #0)\n\t" + "%0 = %3\n\t" + "{\n\t" + " memw(%1) = %4\n\t" + " if (!p0) %0 = memw(%2)\n\t" + "}:mem_noshuf\n" + : "=&r"(ret) + : "r"(p), "r"(q), "r"(x), "r"(y), "r"(pred) + : "p0", "memory"); + return ret; +} + +static inline int pred_lw_sw_pi(int pred, int *p, int *q, int x, int y) +{ + int ret; + asm volatile("p0 = cmp.eq(%5, #0)\n\t" + "%0 = %3\n\t" + "r7 = %2\n\t" + "{\n\t" + " memw(%1) = %4\n\t" + " if (!p0) %0 = memw(r7++#4)\n\t" + "}:mem_noshuf\n" + : "=&r"(ret) + : "r"(p), "r"(q), "r"(x), "r"(y), "r"(pred) + : "r7", "p0", "memory"); + return ret; +} + +static inline long long pred_ld_sd(int pred, long long *p, long long *q, + long long x, long long y) +{ + unsigned long long ret; + asm volatile("p0 = cmp.eq(%5, #0)\n\t" + "%0 = %3\n\t" + "{\n\t" + " memd(%1) = %4\n\t" + " if (!p0) %0 = memd(%2)\n\t" + "}:mem_noshuf\n" + : "=&r"(ret) + : "r"(p), "r"(q), "r"(x), "r"(y), "r"(pred) + : "p0", "memory"); + return ret; +} + +static inline long long pred_ld_sd_pi(int pred, long long *p, long long *q, + long long x, long long y) +{ + long long ret; + asm volatile("p0 = cmp.eq(%5, #0)\n\t" + "%0 = %3\n\t" + "r7 = %2\n\t" + "{\n\t" + " memd(%1) = %4\n\t" + " if (!p0) %0 = memd(r7++#8)\n\t" + "}:mem_noshuf\n" + : "=&r"(ret) + : "r"(p), "r"(q), "r"(x), "r"(y), "r"(pred) + : "p0", "memory"); + return ret; +} + static inline unsigned int cancel_sw_lb(int pred, int *p, signed char *q, int x) { unsigned int ret; @@ -126,18 +190,22 @@ typedef union { int err; -static void check32(int n, int expect) +#define check32(n, expect) check32_(n, expect, __LINE__) + +static void check32_(int n, int expect, int line) { if (n != expect) { - printf("ERROR: 0x%08x != 0x%08x\n", n, expect); + printf("ERROR: 0x%08x != 0x%08x, line %d\n", n, expect, line); err++; } } -static void check64(long long n, long long expect) +#define check64(n, expect) check64_(n, expect, __LINE__) + +static void check64_(long long n, long long expect, int line) { if (n != expect) { - printf("ERROR: 0x%08llx != 0x%08llx\n", n, expect); + printf("ERROR: 0x%08llx != 0x%08llx, line %d\n", n, expect, line); err++; } } @@ -323,6 +391,50 @@ int main() res64 = mem_noshuf_sd_ld(&n.d[0], &n.d[1], 0x123456789abcdef0LL); check64(res64, 0xffffffffffffffffLL); + n.w[0] = ~0; + res32 = pred_lw_sw(0, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); + check32(res32, 0x12345678); + check32(n.w[0], 0xc0ffeeda); + + n.w[0] = ~0; + res32 = pred_lw_sw(1, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); + check32(res32, 0xc0ffeeda); + check32(n.w[0], 0xc0ffeeda); + + n.w[0] = ~0; + res32 = pred_lw_sw_pi(0, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); + check32(res32, 0x12345678); + check32(n.w[0], 0xc0ffeeda); + + n.w[0] = ~0; + res32 = pred_lw_sw_pi(1, &n.w[0], &n.w[0], 0x12345678, 0xc0ffeeda); + check32(res32, 0xc0ffeeda); + check32(n.w[0], 0xc0ffeeda); + + n.d[0] = ~0LL; + res64 = pred_ld_sd(0, &n.d[0], &n.d[0], + 0x1234567812345678LL, 0xc0ffeedac0ffeedaLL); + check64(res64, 0x1234567812345678LL); + check64(n.d[0], 0xc0ffeedac0ffeedaLL); + + n.d[0] = ~0LL; + res64 = pred_ld_sd(1, &n.d[0], &n.d[0], + 0x1234567812345678LL, 0xc0ffeedac0ffeedaLL); + check64(res64, 0xc0ffeedac0ffeedaLL); + check64(n.d[0], 0xc0ffeedac0ffeedaLL); + + n.d[0] = ~0LL; + res64 = pred_ld_sd_pi(0, &n.d[0], &n.d[0], + 0x1234567812345678LL, 0xc0ffeedac0ffeedaLL); + check64(res64, 0x1234567812345678LL); + check64(n.d[0], 0xc0ffeedac0ffeedaLL); + + n.d[0] = ~0LL; + res64 = pred_ld_sd_pi(1, &n.d[0], &n.d[0], + 0x1234567812345678LL, 0xc0ffeedac0ffeedaLL); + check64(res64, 0xc0ffeedac0ffeedaLL); + check64(n.d[0], 0xc0ffeedac0ffeedaLL); + puts(err ? "FAIL" : "PASS"); return err; } diff --git a/tests/tcg/hexagon/mem_noshuf_exception.c b/tests/tcg/hexagon/mem_noshuf_exception.c new file mode 100644 index 0000000000..08660ea3e1 --- /dev/null +++ b/tests/tcg/hexagon/mem_noshuf_exception.c @@ -0,0 +1,146 @@ +/* + * Copyright(c) 2022 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +/* + * Test the VLIW semantics of exceptions with mem_noshuf + * + * When a packet has the :mem_noshuf attribute, the semantics dictate + * That the load will get the data from the store if the addresses overlap. + * To accomplish this, we perform the store first. However, we have to + * handle the case where the store raises an exception. In that case, the + * store should not alter the machine state. + * + * We test this with a mem_noshuf packet with a store to a global variable, + * "should_not_change" and a load from NULL. After the SIGSEGV is caught, + * we check * that the "should_not_change" value is the same. + * + * We also check that a predicated load where the predicate is false doesn't + * raise an exception and allows the store to happen. + */ + +#include <stdlib.h> +#include <stdio.h> +#include <unistd.h> +#include <sys/types.h> +#include <fcntl.h> +#include <setjmp.h> +#include <signal.h> + +int err; +int segv_caught; + +#define SHOULD_NOT_CHANGE_VAL 5 +int should_not_change = SHOULD_NOT_CHANGE_VAL; + +#define OK_TO_CHANGE_VAL 13 +int ok_to_change = OK_TO_CHANGE_VAL; + +static void __check(const char *filename, int line, int x, int expect) +{ + if (x != expect) { + printf("ERROR %s:%d - %d != %d\n", + filename, line, x, expect); + err++; + } +} + +#define check(x, expect) __check(__FILE__, __LINE__, (x), (expect)) + +static void __chk_error(const char *filename, int line, int ret) +{ + if (ret < 0) { + printf("ERROR %s:%d - %d\n", filename, line, ret); + err++; + } +} + +#define chk_error(ret) __chk_error(__FILE__, __LINE__, (ret)) + +jmp_buf jmp_env; + +static void sig_segv(int sig, siginfo_t *info, void *puc) +{ + check(sig, SIGSEGV); + segv_caught = 1; + longjmp(jmp_env, 1); +} + +int main() +{ + struct sigaction act; + int dummy32; + long long dummy64; + void *p; + + /* SIGSEGV test */ + act.sa_sigaction = sig_segv; + sigemptyset(&act.sa_mask); + act.sa_flags = SA_SIGINFO; + chk_error(sigaction(SIGSEGV, &act, NULL)); + if (setjmp(jmp_env) == 0) { + asm volatile("r18 = ##should_not_change\n\t" + "r19 = #0\n\t" + "{\n\t" + " memw(r18) = #7\n\t" + " %0 = memw(r19)\n\t" + "}:mem_noshuf\n\t" + : "=r"(dummy32) : : "r18", "r19", "memory"); + } + + act.sa_handler = SIG_DFL; + sigemptyset(&act.sa_mask); + act.sa_flags = 0; + chk_error(sigaction(SIGSEGV, &act, NULL)); + + check(segv_caught, 1); + check(should_not_change, SHOULD_NOT_CHANGE_VAL); + + /* + * Check that a predicated load where the predicate is false doesn't + * raise an exception and allows the store to happen. + */ + asm volatile("r18 = ##ok_to_change\n\t" + "r19 = #0\n\t" + "p0 = cmp.gt(r0, r0)\n\t" + "{\n\t" + " memw(r18) = #7\n\t" + " if (p0) %0 = memw(r19)\n\t" + "}:mem_noshuf\n\t" + : "=r"(dummy32) : : "r18", "r19", "p0", "memory"); + + check(ok_to_change, 7); + + /* + * Also check that the post-increment doesn't happen when the + * predicate is false. + */ + ok_to_change = OK_TO_CHANGE_VAL; + p = NULL; + asm volatile("r18 = ##ok_to_change\n\t" + "p0 = cmp.gt(r0, r0)\n\t" + "{\n\t" + " memw(r18) = #9\n\t" + " if (p0) %1 = memd(%0 ++ #8)\n\t" + "}:mem_noshuf\n\t" + : "+r"(p), "=r"(dummy64) : : "r18", "p0", "memory"); + + check(ok_to_change, 9); + check((int)p, (int)NULL); + + puts(err ? "FAIL" : "PASS"); + return err ? EXIT_FAILURE : EXIT_SUCCESS; +} diff --git a/tests/tcg/loongarch64/Makefile.target b/tests/tcg/loongarch64/Makefile.target new file mode 100644 index 0000000000..0115de78ef --- /dev/null +++ b/tests/tcg/loongarch64/Makefile.target @@ -0,0 +1,19 @@ +# -*- Mode: makefile -*- +# +# LoongArch64 specific tweaks + +# Loongarch64 doesn't support gdb, so skip the EXTRA_RUNS +EXTRA_RUNS = + +LOONGARCH64_SRC=$(SRC_PATH)/tests/tcg/loongarch64 +VPATH += $(LOONGARCH64_SRC) + +LDFLAGS+=-lm + +LOONGARCH64_TESTS = test_bit +LOONGARCH64_TESTS += test_div +LOONGARCH64_TESTS += test_fclass +LOONGARCH64_TESTS += test_fpcom +LOONGARCH64_TESTS += test_pcadd + +TESTS += $(LOONGARCH64_TESTS) diff --git a/tests/tcg/loongarch64/float_convd.ref b/tests/tcg/loongarch64/float_convd.ref new file mode 100644 index 0000000000..08d3dfa2fe --- /dev/null +++ b/tests/tcg/loongarch64/float_convd.ref @@ -0,0 +1,988 @@ +### Rounding to nearest +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-inf:0xff800000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb600000000000000p+1:0x40490fdb) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.00000000000000000000p+31:0x4f000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(inf:0x7f800000) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding upwards +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000200000000000000p-25:0x33000001) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe800000000000000p-25:0x337ffff4) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801c00000000000000p-15:0x387fc00e) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000e00000000000000p-14:0x38800007) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x1.00000000000000000000p-149:0x00000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0aa00000000000000p+1:0x402df855) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb600000000000000p+1:0x40490fdb) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.00000000000000000000p+31:0x4f000000) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(inf:0x7f800000) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding downwards +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-inf:0xff800000) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x1.00000000000000000000p-149:0x80000001) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb400000000000000p+1:0x40490fda) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.fffffe00000000000000p+30:0x4effffff) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding to zero +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-nan:0x00fff8000000000000) + to single: f32(-nan:0xffc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(-inf:0x00fff0000000000000) + to single: f32(-inf:0xff800000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffffffffff0000000p+1023:0x00ffefffffffffffff) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OVERFLOW INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) + to single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.1874b135ff6540000000p+103:0x00c661874b135ff654) + to single: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.c0bab523323b90000000p+99:0x00c62c0bab523323b9) + to single: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) (INEXACT ) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from double: f64(-0x1.00000000000000000000p+1:0x00c000000000000000) + to single: f32(-0x1.00000000000000000000p+1:0xc0000000) (OK) + to int32: -2 (OK) + to int64: -2 (OK) + to uint32: -2 (OK) + to uint64: -2 (OK) +from double: f64(-0x1.00000000000000000000p+0:0x00bff0000000000000) + to single: f32(-0x1.00000000000000000000p+0:0xbf800000) (OK) + to int32: -1 (OK) + to int64: -1 (OK) + to uint32: -1 (OK) + to uint64: -1 (OK) +from double: f64(-0x1.00000000000000000000p-1022:0x008010000000000000) + to single: f32(-0x0.00000000000000000000p+0:0x80000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) + to single: f32(-0x1.00000000000000000000p-126:0x80800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.00000000000000000000p+0:00000000000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from double: f64(0x1.00000000000000000000p-126:0x003810000000000000) + to single: f32(0x1.00000000000000000000p-126:0x00800000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000001c5f680000000p-25:0x003e600000001c5f68) + to single: f32(0x1.00000000000000000000p-25:0x33000000) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ffffe6cb2fa820000000p-25:0x003e6ffffe6cb2fa82) + to single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.ff801a9af58a10000000p-15:0x003f0ff801a9af58a1) + to single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000c06a1ef50000000p-14:0x003f100000c06a1ef5) + to single: f32(0x1.00000c00000000000000p-14:0x38800006) (INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) + to single: f32(0x1.00400000000000000000p+0:0x3f802000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from double: f64(0x1.00000000000000000000p-1022:0x000010000000000000) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.9ea82a22876800000000p-1022:0x000009ea82a2287680) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x0.ab98fba8432100000000p-1022:0x00000ab98fba843210) + to single: f32(0x0.00000000000000000000p+0:0000000000) (UNDERFLOW INEXACT ) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) + to single: f32(0x1.00000000000000000000p+0:0x3f800000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from double: f64(0x1.00000000000000000000p+1:0x004000000000000000) + to single: f32(0x1.00000000000000000000p+1:0x40000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from double: f64(0x1.5bf0a8b1457690000000p+1:0x004005bf0a8b145769) + to single: f32(0x1.5bf0a800000000000000p+1:0x402df854) (INEXACT ) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from double: f64(0x1.921fb54442d180000000p+1:0x00400921fb54442d18) + to single: f32(0x1.921fb400000000000000p+1:0x40490fda) (INEXACT ) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) + to single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) + to single: f32(0x1.ffc00000000000000000p+15:0x477fe000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) + to single: f32(0x1.ffc20000000000000000p+15:0x477fe100) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) + to single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) + to single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) + to single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from double: f64(0x1.fffffffc000000000000p+30:0x0041dfffffffc00000) + to single: f32(0x1.fffffe00000000000000p+30:0x4effffff) (INEXACT ) + to int32: 2147483647 (OK) + to int64: 2147483647 (OK) + to uint32: 2147483647 (OK) + to uint64: 2147483647 (OK) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(0x1.fffffffffffff0000000p+1023:0x007fefffffffffffff) + to single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) (OVERFLOW INEXACT ) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from double: f64(inf:0x007ff0000000000000) + to single: f32(inf:0x7f800000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from double: f64(nan:0x007ff8000000000000) + to single: f32(nan:0x7fc00000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff0000000000001) + to single: f32(nan:0x7fc00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from double: f64(nan:0x007ff4000000000000) + to single: f32(nan:0x7fe00000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) diff --git a/tests/tcg/loongarch64/float_convs.ref b/tests/tcg/loongarch64/float_convs.ref new file mode 100644 index 0000000000..66c7679dec --- /dev/null +++ b/tests/tcg/loongarch64/float_convs.ref @@ -0,0 +1,748 @@ +### Rounding to nearest +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding upwards +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding downwards +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +### Rounding to zero +from single: f32(-nan:0xffa00000) + to double: f64(-nan:0x00fffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-nan:0xffc00000) + to double: f64(-nan:0x00fff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(-inf:0xff800000) + to double: f64(-inf:0x00fff0000000000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + to double: f64(-0x1.fffffe00000000000000p+127:0x00c7efffffe0000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + to double: f64(-0x1.1874b200000000000000p+103:0x00c661874b20000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + to double: f64(-0x1.c0bab600000000000000p+99:0x00c62c0bab60000000) (OK) + to int32: -2147483648 (INVALID) + to int64: -9223372036854775808 (INVALID) + to uint32: -2147483648 (INVALID) + to uint64: -9223372036854775808 (INVALID) +from single: f32(-0x1.31f75000000000000000p-40:0xab98fba8) + to double: f64(-0x1.31f75000000000000000p-40:0x00bd731f7500000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.50544400000000000000p-66:0x9ea82a22) + to double: f64(-0x1.50544400000000000000p-66:0x00bbd5054440000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(-0x1.00000000000000000000p-126:0x80800000) + to double: f64(-0x1.00000000000000000000p-126:0x00b810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x0.00000000000000000000p+0:0000000000) + to double: f64(0x0.00000000000000000000p+0:00000000000000000000) (OK) + to int32: 0 (OK) + to int64: 0 (OK) + to uint32: 0 (OK) + to uint64: 0 (OK) +from single: f32(0x1.00000000000000000000p-126:0x00800000) + to double: f64(0x1.00000000000000000000p-126:0x003810000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p-25:0x33000000) + to double: f64(0x1.00000000000000000000p-25:0x003e60000000000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ffffe600000000000000p-25:0x337ffff3) + to double: f64(0x1.ffffe600000000000000p-25:0x003e6ffffe60000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.ff801a00000000000000p-15:0x387fc00d) + to double: f64(0x1.ff801a00000000000000p-15:0x003f0ff801a0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000c00000000000000p-14:0x38800006) + to double: f64(0x1.00000c00000000000000p-14:0x003f100000c0000000) (OK) + to int32: 0 (INEXACT ) + to int64: 0 (INEXACT ) + to uint32: 0 (INEXACT ) + to uint64: 0 (INEXACT ) +from single: f32(0x1.00000000000000000000p+0:0x3f800000) + to double: f64(0x1.00000000000000000000p+0:0x003ff0000000000000) (OK) + to int32: 1 (OK) + to int64: 1 (OK) + to uint32: 1 (OK) + to uint64: 1 (OK) +from single: f32(0x1.00400000000000000000p+0:0x3f802000) + to double: f64(0x1.00400000000000000000p+0:0x003ff0040000000000) (OK) + to int32: 1 (INEXACT ) + to int64: 1 (INEXACT ) + to uint32: 1 (INEXACT ) + to uint64: 1 (INEXACT ) +from single: f32(0x1.00000000000000000000p+1:0x40000000) + to double: f64(0x1.00000000000000000000p+1:0x004000000000000000) (OK) + to int32: 2 (OK) + to int64: 2 (OK) + to uint32: 2 (OK) + to uint64: 2 (OK) +from single: f32(0x1.5bf0a800000000000000p+1:0x402df854) + to double: f64(0x1.5bf0a800000000000000p+1:0x004005bf0a80000000) (OK) + to int32: 2 (INEXACT ) + to int64: 2 (INEXACT ) + to uint32: 2 (INEXACT ) + to uint64: 2 (INEXACT ) +from single: f32(0x1.921fb600000000000000p+1:0x40490fdb) + to double: f64(0x1.921fb600000000000000p+1:0x00400921fb60000000) (OK) + to int32: 3 (INEXACT ) + to int64: 3 (INEXACT ) + to uint32: 3 (INEXACT ) + to uint64: 3 (INEXACT ) +from single: f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + to double: f64(0x1.ffbe0000000000000000p+15:0x0040effbe000000000) (OK) + to int32: 65503 (OK) + to int64: 65503 (OK) + to uint32: 65503 (OK) + to uint64: 65503 (OK) +from single: f32(0x1.ffc00000000000000000p+15:0x477fe000) + to double: f64(0x1.ffc00000000000000000p+15:0x0040effc0000000000) (OK) + to int32: 65504 (OK) + to int64: 65504 (OK) + to uint32: 65504 (OK) + to uint64: 65504 (OK) +from single: f32(0x1.ffc20000000000000000p+15:0x477fe100) + to double: f64(0x1.ffc20000000000000000p+15:0x0040effc2000000000) (OK) + to int32: 65505 (OK) + to int64: 65505 (OK) + to uint32: 65505 (OK) + to uint64: 65505 (OK) +from single: f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + to double: f64(0x1.ffbf0000000000000000p+16:0x0040fffbf000000000) (OK) + to int32: 131007 (OK) + to int64: 131007 (OK) + to uint32: 131007 (OK) + to uint64: 131007 (OK) +from single: f32(0x1.ffc00000000000000000p+16:0x47ffe000) + to double: f64(0x1.ffc00000000000000000p+16:0x0040fffc0000000000) (OK) + to int32: 131008 (OK) + to int64: 131008 (OK) + to uint32: 131008 (OK) + to uint64: 131008 (OK) +from single: f32(0x1.ffc10000000000000000p+16:0x47ffe080) + to double: f64(0x1.ffc10000000000000000p+16:0x0040fffc1000000000) (OK) + to int32: 131009 (OK) + to int64: 131009 (OK) + to uint32: 131009 (OK) + to uint64: 131009 (OK) +from single: f32(0x1.c0bab600000000000000p+99:0x71605d5b) + to double: f64(0x1.c0bab600000000000000p+99:0x00462c0bab60000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + to double: f64(0x1.fffffe00000000000000p+127:0x0047efffffe0000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INEXACT INVALID) + to uint64: -1 (INEXACT INVALID) +from single: f32(inf:0x7f800000) + to double: f64(inf:0x007ff0000000000000) (OK) + to int32: 2147483647 (INVALID) + to int64: 9223372036854775807 (INVALID) + to uint32: -1 (INVALID) + to uint64: -1 (INVALID) +from single: f32(nan:0x7fc00000) + to double: f64(nan:0x007ff8000000000000) (OK) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) +from single: f32(nan:0x7fa00000) + to double: f64(nan:0x007ffc000000000000) (INVALID) + to int32: 0 (INVALID) + to int64: 0 (INVALID) + to uint32: 0 (INVALID) + to uint64: 0 (INVALID) diff --git a/tests/tcg/loongarch64/float_madds.ref b/tests/tcg/loongarch64/float_madds.ref new file mode 100644 index 0000000000..21c0539887 --- /dev/null +++ b/tests/tcg/loongarch64/float_madds.ref @@ -0,0 +1,768 @@ +### Rounding to nearest +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffe00000) flags=INVALID (0/0) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/1) +op : f32(-inf:0xff800000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/2) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(-nan:0xffc00000) flags=OK (1/0) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=OK (1/1) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffc00000) flags=OK (1/2) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OK (2/0) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-inf:0xff800000) +res: f32(-inf:0xff800000) flags=OK (2/1) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OK (2/2) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/0) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/1) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/2) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (4/0) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) flags=INEXACT (4/1) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) flags=INEXACT (4/2) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(0x1.0c27fa00000000000000p+60:0x5d8613fd) flags=INEXACT (5/0) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) flags=INEXACT (5/1) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.26c46200000000000000p+34:0x50936231) flags=INEXACT (5/2) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(0x1.91f94000000000000000p-106:0x0ac8fca0) flags=INEXACT (6/0) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(-0x1.31f75000000000000000p-40:0xab98fba8) flags=INEXACT (6/1) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=INEXACT (6/2) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (7/0) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (8/0) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (8/1) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(-0x0.00000000000000000000p+0:0x80000000) flags=UNDERFLOW INEXACT (8/2) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=OK (9/0) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (9/1) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (9/2) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.ffffe600000000000000p-25:0x337ffff3) flags=INEXACT (10/0) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.ffffe600000000000000p-50:0x26fffff3) flags=INEXACT (10/1) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=INEXACT (10/2) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (11/0) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.0007fe00000000000000p-25:0x330003ff) flags=INEXACT (11/1) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0001f200000000000000p-24:0x338000f9) flags=INEXACT (11/2) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00000c00000000000000p-14:0x38800006) flags=INEXACT (12/0) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0ffbf400000000000000p-24:0x3387fdfa) flags=INEXACT (12/1) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801c00000000000000p-15:0x387fc00e) flags=INEXACT (12/2) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00000000000000000000p+0:0x3f800000) flags=INEXACT (13/0) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/1) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/2) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/0) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/1) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00040200000000000000p+0:0x3f800201) flags=INEXACT (14/2) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.2e23d200000000000000p+2:0x409711e9) flags=INEXACT (16/2) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.12804200000000000000p+3:0x41094021) flags=INEXACT (17/0) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.51458000000000000000p+3:0x4128a2c0) flags=INEXACT (17/1) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.200c0400000000000000p+3:0x41100602) flags=INEXACT (17/2) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ffcf1400000000000000p+15:0x477fe78a) flags=INEXACT (18/0) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.91ed3c00000000000000p+17:0x4848f69e) flags=INEXACT (18/1) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.5bc56000000000000000p+17:0x482de2b0) flags=INEXACT (18/2) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.08edf000000000000000p+18:0x488476f8) flags=INEXACT (19/0) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.ff7e0800000000000000p+31:0x4f7fbf04) flags=INEXACT (19/1) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.08ee7a00000000000000p+18:0x4884773d) flags=INEXACT (19/2) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+31:0x4f7fc004) flags=INEXACT (20/0) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ff840800000000000000p+31:0x4f7fc204) flags=INEXACT (20/1) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820800000000000000p+31:0x4f7fc104) flags=INEXACT (20/2) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff860800000000000000p+31:0x4f7fc304) flags=INEXACT (21/0) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820800000000000000p+32:0x4fffc104) flags=INEXACT (21/1) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+32:0x4fffc004) flags=INEXACT (21/2) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff830800000000000000p+32:0x4fffc184) flags=INEXACT (22/0) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff7f8800000000000000p+33:0x507fbfc4) flags=INEXACT (22/1) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff840800000000000000p+32:0x4fffc204) flags=INEXACT (22/2) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.ff800800000000000000p+33:0x507fc004) flags=INEXACT (23/0) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff820800000000000000p+33:0x507fc104) flags=INEXACT (23/1) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff810800000000000000p+33:0x507fc084) flags=INEXACT (23/2) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.c0bab600000000000000p+99:0x71605d5b) flags=INEXACT (24/0) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.c0838000000000000000p+116:0x79e041c0) flags=INEXACT (24/1) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.c0829e00000000000000p+116:0x79e0414f) flags=INEXACT (24/2) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/0) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/1) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/2) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(inf:0x7f800000) flags=OK (26/0) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OK (26/1) +op : f32(inf:0x7f800000) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OK (26/2) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fc00000) flags=OK (27/0) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(nan:0x7fc00000) flags=OK (27/1) +op : f32(nan:0x7fc00000) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=OK (27/2) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/0) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(inf:0x7f800000) +res: f32(nan:0x7fe00000) flags=INVALID (28/1) +op : f32(nan:0x7fa00000) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/2) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (29/0) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/1) +op : f32(-nan:0xffa00000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/2) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/0) +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/1) +op : f32(-nan:0xffc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (30/2) +# LP184149 +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-1:0x3f000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=OK (31/0) +op : f32(0x1.00000000000000000000p-149:0x00000001) * f32(0x1.00000000000000000000p-149:0x00000001) + f32(0x1.00000000000000000000p-149:0x00000001) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (32/0) +### Rounding upwards +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffe00000) flags=INVALID (0/0) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/1) +op : f32(-inf:0xff800000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/2) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(-nan:0xffc00000) flags=OK (1/0) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=OK (1/1) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffc00000) flags=OK (1/2) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OK (2/0) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-inf:0xff800000) +res: f32(-inf:0xff800000) flags=OK (2/1) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OK (2/2) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/0) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/1) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (3/2) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (4/0) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) flags=INEXACT (4/1) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) flags=INEXACT (4/2) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(0x1.0c27fa00000000000000p+60:0x5d8613fd) flags=INEXACT (5/0) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) flags=INEXACT (5/1) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.26c46200000000000000p+34:0x50936231) flags=INEXACT (5/2) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(0x1.91f94000000000000000p-106:0x0ac8fca0) flags=INEXACT (6/0) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(-0x1.31f74e00000000000000p-40:0xab98fba7) flags=INEXACT (6/1) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544200000000000000p-66:0x9ea82a21) flags=INEXACT (6/2) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (7/0) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (8/0) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (8/1) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(-0x0.00000000000000000000p+0:0x80000000) flags=UNDERFLOW INEXACT (8/2) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=OK (9/0) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (9/1) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (9/2) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.ffffe800000000000000p-25:0x337ffff4) flags=INEXACT (10/0) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.ffffe800000000000000p-50:0x26fffff4) flags=INEXACT (10/1) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000200000000000000p-25:0x33000001) flags=INEXACT (10/2) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801c00000000000000p-15:0x387fc00e) flags=INEXACT (11/0) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00080000000000000000p-25:0x33000400) flags=INEXACT (11/1) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0001f400000000000000p-24:0x338000fa) flags=INEXACT (11/2) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00000e00000000000000p-14:0x38800007) flags=INEXACT (12/0) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0ffbf600000000000000p-24:0x3387fdfb) flags=INEXACT (12/1) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801c00000000000000p-15:0x387fc00e) flags=INEXACT (12/2) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00000200000000000000p+0:0x3f800001) flags=INEXACT (13/0) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ffc01a00000000000000p-14:0x38ffe00d) flags=INEXACT (13/1) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.ffc01a00000000000000p-14:0x38ffe00d) flags=INEXACT (13/2) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.00440200000000000000p+0:0x3f802201) flags=INEXACT (14/0) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00440200000000000000p+0:0x3f802201) flags=INEXACT (14/1) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00040200000000000000p+0:0x3f800201) flags=INEXACT (14/2) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.2e23d400000000000000p+2:0x409711ea) flags=INEXACT (16/2) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.12804200000000000000p+3:0x41094021) flags=INEXACT (17/0) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.51458200000000000000p+3:0x4128a2c1) flags=INEXACT (17/1) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.200c0600000000000000p+3:0x41100603) flags=INEXACT (17/2) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ffcf1600000000000000p+15:0x477fe78b) flags=INEXACT (18/0) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.91ed3c00000000000000p+17:0x4848f69e) flags=INEXACT (18/1) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.5bc56200000000000000p+17:0x482de2b1) flags=INEXACT (18/2) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.08edf000000000000000p+18:0x488476f8) flags=INEXACT (19/0) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.ff7e0a00000000000000p+31:0x4f7fbf05) flags=INEXACT (19/1) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.08ee7a00000000000000p+18:0x4884773d) flags=INEXACT (19/2) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800a00000000000000p+31:0x4f7fc005) flags=INEXACT (20/0) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ff840800000000000000p+31:0x4f7fc204) flags=INEXACT (20/1) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820800000000000000p+31:0x4f7fc104) flags=INEXACT (20/2) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff860800000000000000p+31:0x4f7fc304) flags=INEXACT (21/0) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820800000000000000p+32:0x4fffc104) flags=INEXACT (21/1) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800a00000000000000p+32:0x4fffc005) flags=INEXACT (21/2) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff830800000000000000p+32:0x4fffc184) flags=INEXACT (22/0) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff7f8a00000000000000p+33:0x507fbfc5) flags=INEXACT (22/1) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff840800000000000000p+32:0x4fffc204) flags=INEXACT (22/2) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.ff800a00000000000000p+33:0x507fc005) flags=INEXACT (23/0) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff820800000000000000p+33:0x507fc104) flags=INEXACT (23/1) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff810800000000000000p+33:0x507fc084) flags=INEXACT (23/2) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.c0bab800000000000000p+99:0x71605d5c) flags=INEXACT (24/0) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.c0838000000000000000p+116:0x79e041c0) flags=INEXACT (24/1) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.c082a000000000000000p+116:0x79e04150) flags=INEXACT (24/2) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/0) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/1) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OVERFLOW INEXACT (25/2) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(inf:0x7f800000) flags=OK (26/0) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OK (26/1) +op : f32(inf:0x7f800000) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OK (26/2) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fc00000) flags=OK (27/0) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(nan:0x7fc00000) flags=OK (27/1) +op : f32(nan:0x7fc00000) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=OK (27/2) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/0) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(inf:0x7f800000) +res: f32(nan:0x7fe00000) flags=INVALID (28/1) +op : f32(nan:0x7fa00000) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/2) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (29/0) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/1) +op : f32(-nan:0xffa00000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/2) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/0) +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/1) +op : f32(-nan:0xffc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (30/2) +# LP184149 +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-1:0x3f000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=OK (31/0) +op : f32(0x1.00000000000000000000p-149:0x00000001) * f32(0x1.00000000000000000000p-149:0x00000001) + f32(0x1.00000000000000000000p-149:0x00000001) +res: f32(0x1.00000000000000000000p-148:0x00000002) flags=UNDERFLOW INEXACT (32/0) +### Rounding downwards +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffe00000) flags=INVALID (0/0) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/1) +op : f32(-inf:0xff800000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/2) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(-nan:0xffc00000) flags=OK (1/0) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=OK (1/1) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffc00000) flags=OK (1/2) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OK (2/0) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-inf:0xff800000) +res: f32(-inf:0xff800000) flags=OK (2/1) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OK (2/2) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/0) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/1) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/2) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (4/0) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(-0x1.1874b200000000000000p+103:0xf30c3a59) flags=INEXACT (4/1) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) flags=INEXACT (4/2) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(0x1.0c27f800000000000000p+60:0x5d8613fc) flags=INEXACT (5/0) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) flags=INEXACT (5/1) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.26c46000000000000000p+34:0x50936230) flags=INEXACT (5/2) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(0x1.91f93e00000000000000p-106:0x0ac8fc9f) flags=INEXACT (6/0) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(-0x1.31f75000000000000000p-40:0xab98fba8) flags=INEXACT (6/1) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=INEXACT (6/2) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (7/0) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (8/0) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (8/1) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(-0x1.00000000000000000000p-149:0x80000001) flags=UNDERFLOW INEXACT (8/2) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=OK (9/0) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (9/1) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (9/2) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.ffffe600000000000000p-25:0x337ffff3) flags=INEXACT (10/0) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.ffffe600000000000000p-50:0x26fffff3) flags=INEXACT (10/1) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=INEXACT (10/2) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (11/0) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.0007fe00000000000000p-25:0x330003ff) flags=INEXACT (11/1) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0001f200000000000000p-24:0x338000f9) flags=INEXACT (11/2) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00000c00000000000000p-14:0x38800006) flags=INEXACT (12/0) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0ffbf400000000000000p-24:0x3387fdfa) flags=INEXACT (12/1) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (12/2) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00000000000000000000p+0:0x3f800000) flags=INEXACT (13/0) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/1) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/2) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/0) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/1) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00040000000000000000p+0:0x3f800200) flags=INEXACT (14/2) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.2e23d200000000000000p+2:0x409711e9) flags=INEXACT (16/2) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.12804000000000000000p+3:0x41094020) flags=INEXACT (17/0) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.51458000000000000000p+3:0x4128a2c0) flags=INEXACT (17/1) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.200c0400000000000000p+3:0x41100602) flags=INEXACT (17/2) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ffcf1400000000000000p+15:0x477fe78a) flags=INEXACT (18/0) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.91ed3a00000000000000p+17:0x4848f69d) flags=INEXACT (18/1) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.5bc56000000000000000p+17:0x482de2b0) flags=INEXACT (18/2) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.08edee00000000000000p+18:0x488476f7) flags=INEXACT (19/0) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.ff7e0800000000000000p+31:0x4f7fbf04) flags=INEXACT (19/1) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.08ee7800000000000000p+18:0x4884773c) flags=INEXACT (19/2) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+31:0x4f7fc004) flags=INEXACT (20/0) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ff840600000000000000p+31:0x4f7fc203) flags=INEXACT (20/1) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820600000000000000p+31:0x4f7fc103) flags=INEXACT (20/2) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff860600000000000000p+31:0x4f7fc303) flags=INEXACT (21/0) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820600000000000000p+32:0x4fffc103) flags=INEXACT (21/1) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+32:0x4fffc004) flags=INEXACT (21/2) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff830600000000000000p+32:0x4fffc183) flags=INEXACT (22/0) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff7f8800000000000000p+33:0x507fbfc4) flags=INEXACT (22/1) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff840600000000000000p+32:0x4fffc203) flags=INEXACT (22/2) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.ff800800000000000000p+33:0x507fc004) flags=INEXACT (23/0) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff820600000000000000p+33:0x507fc103) flags=INEXACT (23/1) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff810600000000000000p+33:0x507fc083) flags=INEXACT (23/2) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.c0bab600000000000000p+99:0x71605d5b) flags=INEXACT (24/0) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.c0837e00000000000000p+116:0x79e041bf) flags=INEXACT (24/1) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.c0829e00000000000000p+116:0x79e0414f) flags=INEXACT (24/2) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/0) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/1) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/2) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(inf:0x7f800000) flags=OK (26/0) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OK (26/1) +op : f32(inf:0x7f800000) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OK (26/2) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fc00000) flags=OK (27/0) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(nan:0x7fc00000) flags=OK (27/1) +op : f32(nan:0x7fc00000) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=OK (27/2) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/0) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(inf:0x7f800000) +res: f32(nan:0x7fe00000) flags=INVALID (28/1) +op : f32(nan:0x7fa00000) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/2) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (29/0) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/1) +op : f32(-nan:0xffa00000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/2) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/0) +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/1) +op : f32(-nan:0xffc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (30/2) +# LP184149 +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-1:0x3f000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=OK (31/0) +op : f32(0x1.00000000000000000000p-149:0x00000001) * f32(0x1.00000000000000000000p-149:0x00000001) + f32(0x1.00000000000000000000p-149:0x00000001) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (32/0) +### Rounding to zero +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffe00000) flags=INVALID (0/0) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/1) +op : f32(-inf:0xff800000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(-nan:0xffe00000) flags=INVALID (0/2) +op : f32(-nan:0xffc00000) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(-nan:0xffc00000) flags=OK (1/0) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-nan:0xffc00000) +res: f32(-nan:0xffc00000) flags=OK (1/1) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-nan:0xffc00000) + f32(-inf:0xff800000) +res: f32(-nan:0xffc00000) flags=OK (1/2) +op : f32(-inf:0xff800000) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(inf:0x7f800000) flags=OK (2/0) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-inf:0xff800000) +res: f32(-inf:0xff800000) flags=OK (2/1) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-inf:0xff800000) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(inf:0x7f800000) flags=OK (2/2) +op : f32(-0x1.fffffe00000000000000p+127:0xff7fffff) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/0) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.fffffe00000000000000p+127:0xff7fffff) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/1) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.fffffe00000000000000p+127:0xff7fffff) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (3/2) +op : f32(-0x1.1874b200000000000000p+103:0xf30c3a59) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (4/0) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.1874b200000000000000p+103:0xf30c3a59) +res: f32(-0x1.1874b000000000000000p+103:0xf30c3a58) flags=INEXACT (4/1) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.1874b200000000000000p+103:0xf30c3a59) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) flags=INEXACT (4/2) +op : f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(0x1.0c27f800000000000000p+60:0x5d8613fc) flags=INEXACT (5/0) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) +res: f32(-0x1.c0bab400000000000000p+99:0xf1605d5a) flags=INEXACT (5/1) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.c0bab600000000000000p+99:0xf1605d5b) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(0x1.26c46000000000000000p+34:0x50936230) flags=INEXACT (5/2) +op : f32(-0x1.31f75000000000000000p-40:0xab98fba8) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(0x1.91f93e00000000000000p-106:0x0ac8fc9f) flags=INEXACT (6/0) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(-0x1.31f75000000000000000p-40:0xab98fba8) +res: f32(-0x1.31f74e00000000000000p-40:0xab98fba7) flags=INEXACT (6/1) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(-0x1.31f75000000000000000p-40:0xab98fba8) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544200000000000000p-66:0x9ea82a21) flags=INEXACT (6/2) +op : f32(-0x1.50544400000000000000p-66:0x9ea82a22) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (7/0) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(-0x1.50544400000000000000p-66:0x9ea82a22) +res: f32(-0x1.50544400000000000000p-66:0x9ea82a22) flags=OK (7/1) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(-0x1.50544400000000000000p-66:0x9ea82a22) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (7/2) +op : f32(-0x1.00000000000000000000p-126:0x80800000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (8/0) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(-0x1.00000000000000000000p-126:0x80800000) +res: f32(-0x1.00000000000000000000p-126:0x80800000) flags=OK (8/1) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(-0x1.00000000000000000000p-126:0x80800000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(-0x0.00000000000000000000p+0:0x80000000) flags=UNDERFLOW INEXACT (8/2) +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=OK (9/0) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=UNDERFLOW INEXACT (9/1) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x0.00000000000000000000p+0:0000000000) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.00000000000000000000p-126:0x00800000) flags=OK (9/2) +op : f32(0x1.00000000000000000000p-126:0x00800000) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.ffffe600000000000000p-25:0x337ffff3) flags=INEXACT (10/0) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.00000000000000000000p-126:0x00800000) +res: f32(0x1.ffffe600000000000000p-50:0x26fffff3) flags=INEXACT (10/1) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.00000000000000000000p-126:0x00800000) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.00000000000000000000p-25:0x33000000) flags=INEXACT (10/2) +op : f32(0x1.00000000000000000000p-25:0x33000000) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (11/0) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000000000000000000p-25:0x33000000) +res: f32(0x1.0007fe00000000000000p-25:0x330003ff) flags=INEXACT (11/1) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000000000000000000p-25:0x33000000) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0001f200000000000000p-24:0x338000f9) flags=INEXACT (11/2) +op : f32(0x1.ffffe600000000000000p-25:0x337ffff3) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00000c00000000000000p-14:0x38800006) flags=INEXACT (12/0) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.ffffe600000000000000p-25:0x337ffff3) +res: f32(0x1.0ffbf400000000000000p-24:0x3387fdfa) flags=INEXACT (12/1) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.ffffe600000000000000p-25:0x337ffff3) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ff801a00000000000000p-15:0x387fc00d) flags=INEXACT (12/2) +op : f32(0x1.ff801a00000000000000p-15:0x387fc00d) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00000000000000000000p+0:0x3f800000) flags=INEXACT (13/0) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.ff801a00000000000000p-15:0x387fc00d) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/1) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.ff801a00000000000000p-15:0x387fc00d) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.ffc01800000000000000p-14:0x38ffe00c) flags=INEXACT (13/2) +op : f32(0x1.00000c00000000000000p-14:0x38800006) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/0) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000c00000000000000p-14:0x38800006) +res: f32(0x1.00440000000000000000p+0:0x3f802200) flags=INEXACT (14/1) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000c00000000000000p-14:0x38800006) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.00040000000000000000p+0:0x3f800200) flags=INEXACT (14/2) +op : f32(0x1.00000000000000000000p+0:0x3f800000) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/0) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.00000000000000000000p+0:0x3f800000) +res: f32(0x1.80400000000000000000p+1:0x40402000) flags=OK (15/1) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.00000000000000000000p+0:0x3f800000) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.80200000000000000000p+1:0x40401000) flags=OK (15/2) +op : f32(0x1.00400000000000000000p+0:0x3f802000) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.2e185400000000000000p+2:0x40970c2a) flags=OK (16/0) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.00400000000000000000p+0:0x3f802000) +res: f32(0x1.9c00a800000000000000p+2:0x40ce0054) flags=OK (16/1) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.00400000000000000000p+0:0x3f802000) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.2e23d200000000000000p+2:0x409711e9) flags=INEXACT (16/2) +op : f32(0x1.00000000000000000000p+1:0x40000000) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.12804000000000000000p+3:0x41094020) flags=INEXACT (17/0) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.00000000000000000000p+1:0x40000000) +res: f32(0x1.51458000000000000000p+3:0x4128a2c0) flags=INEXACT (17/1) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.00000000000000000000p+1:0x40000000) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.200c0400000000000000p+3:0x41100602) flags=INEXACT (17/2) +op : f32(0x1.5bf0a800000000000000p+1:0x402df854) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ffcf1400000000000000p+15:0x477fe78a) flags=INEXACT (18/0) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.5bf0a800000000000000p+1:0x402df854) +res: f32(0x1.91ed3a00000000000000p+17:0x4848f69d) flags=INEXACT (18/1) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.5bf0a800000000000000p+1:0x402df854) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.5bc56000000000000000p+17:0x482de2b0) flags=INEXACT (18/2) +op : f32(0x1.921fb600000000000000p+1:0x40490fdb) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.08edee00000000000000p+18:0x488476f7) flags=INEXACT (19/0) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.921fb600000000000000p+1:0x40490fdb) +res: f32(0x1.ff7e0800000000000000p+31:0x4f7fbf04) flags=INEXACT (19/1) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.921fb600000000000000p+1:0x40490fdb) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.08ee7800000000000000p+18:0x4884773c) flags=INEXACT (19/2) +op : f32(0x1.ffbe0000000000000000p+15:0x477fdf00) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+31:0x4f7fc004) flags=INEXACT (20/0) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbe0000000000000000p+15:0x477fdf00) +res: f32(0x1.ff840600000000000000p+31:0x4f7fc203) flags=INEXACT (20/1) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbe0000000000000000p+15:0x477fdf00) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820600000000000000p+31:0x4f7fc103) flags=INEXACT (20/2) +op : f32(0x1.ffc00000000000000000p+15:0x477fe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff860600000000000000p+31:0x4f7fc303) flags=INEXACT (21/0) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+15:0x477fe000) +res: f32(0x1.ff820600000000000000p+32:0x4fffc103) flags=INEXACT (21/1) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+15:0x477fe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff800800000000000000p+32:0x4fffc004) flags=INEXACT (21/2) +op : f32(0x1.ffc20000000000000000p+15:0x477fe100) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff830600000000000000p+32:0x4fffc183) flags=INEXACT (22/0) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc20000000000000000p+15:0x477fe100) +res: f32(0x1.ff7f8800000000000000p+33:0x507fbfc4) flags=INEXACT (22/1) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc20000000000000000p+15:0x477fe100) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff840600000000000000p+32:0x4fffc203) flags=INEXACT (22/2) +op : f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.ff800800000000000000p+33:0x507fc004) flags=INEXACT (23/0) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) +res: f32(0x1.ff820600000000000000p+33:0x507fc103) flags=INEXACT (23/1) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.ffbf0000000000000000p+16:0x47ffdf80) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.ff810600000000000000p+33:0x507fc083) flags=INEXACT (23/2) +op : f32(0x1.ffc00000000000000000p+16:0x47ffe000) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.c0bab600000000000000p+99:0x71605d5b) flags=INEXACT (24/0) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.ffc00000000000000000p+16:0x47ffe000) +res: f32(0x1.c0837e00000000000000p+116:0x79e041bf) flags=INEXACT (24/1) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.ffc00000000000000000p+16:0x47ffe000) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.c0829e00000000000000p+116:0x79e0414f) flags=INEXACT (24/2) +op : f32(0x1.ffc10000000000000000p+16:0x47ffe080) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/0) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(0x1.ffc10000000000000000p+16:0x47ffe080) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/1) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(0x1.ffc10000000000000000p+16:0x47ffe080) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(0x1.fffffe00000000000000p+127:0x7f7fffff) flags=OVERFLOW INEXACT (25/2) +op : f32(0x1.c0bab600000000000000p+99:0x71605d5b) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(inf:0x7f800000) flags=OK (26/0) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(0x1.c0bab600000000000000p+99:0x71605d5b) +res: f32(inf:0x7f800000) flags=OK (26/1) +op : f32(inf:0x7f800000) * f32(0x1.c0bab600000000000000p+99:0x71605d5b) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(inf:0x7f800000) flags=OK (26/2) +op : f32(0x1.fffffe00000000000000p+127:0x7f7fffff) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fc00000) flags=OK (27/0) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(0x1.fffffe00000000000000p+127:0x7f7fffff) +res: f32(nan:0x7fc00000) flags=OK (27/1) +op : f32(nan:0x7fc00000) * f32(0x1.fffffe00000000000000p+127:0x7f7fffff) + f32(inf:0x7f800000) +res: f32(nan:0x7fc00000) flags=OK (27/2) +op : f32(inf:0x7f800000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/0) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(inf:0x7f800000) +res: f32(nan:0x7fe00000) flags=INVALID (28/1) +op : f32(nan:0x7fa00000) * f32(inf:0x7f800000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (28/2) +op : f32(nan:0x7fc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (29/0) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(nan:0x7fc00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/1) +op : f32(-nan:0xffa00000) * f32(nan:0x7fc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (29/2) +op : f32(nan:0x7fa00000) * f32(-nan:0xffa00000) + f32(-nan:0xffc00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/0) +op : f32(-nan:0xffa00000) * f32(-nan:0xffc00000) + f32(nan:0x7fa00000) +res: f32(nan:0x7fe00000) flags=INVALID (30/1) +op : f32(-nan:0xffc00000) * f32(nan:0x7fa00000) + f32(-nan:0xffa00000) +res: f32(-nan:0xffe00000) flags=INVALID (30/2) +# LP184149 +op : f32(0x0.00000000000000000000p+0:0000000000) * f32(0x1.00000000000000000000p-1:0x3f000000) + f32(0x0.00000000000000000000p+0:0000000000) +res: f32(0x0.00000000000000000000p+0:0000000000) flags=OK (31/0) +op : f32(0x1.00000000000000000000p-149:0x00000001) * f32(0x1.00000000000000000000p-149:0x00000001) + f32(0x1.00000000000000000000p-149:0x00000001) +res: f32(0x1.00000000000000000000p-149:0x00000001) flags=UNDERFLOW INEXACT (32/0) diff --git a/tests/tcg/loongarch64/test_bit.c b/tests/tcg/loongarch64/test_bit.c new file mode 100644 index 0000000000..a6d9904909 --- /dev/null +++ b/tests/tcg/loongarch64/test_bit.c @@ -0,0 +1,88 @@ +#include <assert.h> +#include <inttypes.h> + +#define ARRAY_SIZE(X) (sizeof(X) / sizeof(*(X))) +#define TEST_CLO(N) \ +static uint64_t test_clo_##N(uint64_t rj) \ +{ \ + uint64_t rd = 0; \ + \ + asm volatile("clo."#N" %0, %1\n\t" \ + : "=r"(rd) \ + : "r"(rj) \ + : ); \ + return rd; \ +} + +#define TEST_CLZ(N) \ +static uint64_t test_clz_##N(uint64_t rj) \ +{ \ + uint64_t rd = 0; \ + \ + asm volatile("clz."#N" %0, %1\n\t" \ + : "=r"(rd) \ + : "r"(rj) \ + : ); \ + return rd; \ +} + +#define TEST_CTO(N) \ +static uint64_t test_cto_##N(uint64_t rj) \ +{ \ + uint64_t rd = 0; \ + \ + asm volatile("cto."#N" %0, %1\n\t" \ + : "=r"(rd) \ + : "r"(rj) \ + : ); \ + return rd; \ +} + +#define TEST_CTZ(N) \ +static uint64_t test_ctz_##N(uint64_t rj) \ +{ \ + uint64_t rd = 0; \ + \ + asm volatile("ctz."#N" %0, %1\n\t" \ + : "=r"(rd) \ + : "r"(rj) \ + : ); \ + return rd; \ +} + +TEST_CLO(w) +TEST_CLO(d) +TEST_CLZ(w) +TEST_CLZ(d) +TEST_CTO(w) +TEST_CTO(d) +TEST_CTZ(w) +TEST_CTZ(d) + +struct vector { + uint64_t (*func)(uint64_t); + uint64_t u; + uint64_t r; +}; + +static struct vector vectors[] = { + {test_clo_w, 0xfff11fff392476ab, 0}, + {test_clo_d, 0xabd28a64000000, 0}, + {test_clz_w, 0xfaffff42392476ab, 2}, + {test_clz_d, 0xabd28a64000000, 8}, + {test_cto_w, 0xfff11fff392476ab, 2}, + {test_cto_d, 0xabd28a64000000, 0}, + {test_ctz_w, 0xfaffff42392476ab, 0}, + {test_ctz_d, 0xabd28a64000000, 26}, +}; + +int main() +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vectors); i++) { + assert((*vectors[i].func)(vectors[i].u) == vectors[i].r); + } + + return 0; +} diff --git a/tests/tcg/loongarch64/test_div.c b/tests/tcg/loongarch64/test_div.c new file mode 100644 index 0000000000..6c31fe97ae --- /dev/null +++ b/tests/tcg/loongarch64/test_div.c @@ -0,0 +1,54 @@ +#include <assert.h> +#include <inttypes.h> +#include <stdio.h> + +#define TEST_DIV(N, M) \ +static void test_div_ ##N(uint ## M ## _t rj, \ + uint ## M ## _t rk, \ + uint64_t rm) \ +{ \ + uint64_t rd = 0; \ + \ + asm volatile("div."#N" %0,%1,%2\n\t" \ + : "=r"(rd) \ + : "r"(rj), "r"(rk) \ + : ); \ + assert(rd == rm); \ +} + +#define TEST_MOD(N, M) \ +static void test_mod_ ##N(uint ## M ## _t rj, \ + uint ## M ## _t rk, \ + uint64_t rm) \ +{ \ + uint64_t rd = 0; \ + \ + asm volatile("mod."#N" %0,%1,%2\n\t" \ + : "=r"(rd) \ + : "r"(rj), "r"(rk) \ + : ); \ + assert(rd == rm); \ +} + +TEST_DIV(w, 32) +TEST_DIV(wu, 32) +TEST_DIV(d, 64) +TEST_DIV(du, 64) +TEST_MOD(w, 32) +TEST_MOD(wu, 32) +TEST_MOD(d, 64) +TEST_MOD(du, 64) + +int main(void) +{ + test_div_w(0xffaced97, 0xc36abcde, 0x0); + test_div_wu(0xffaced97, 0xc36abcde, 0x1); + test_div_d(0xffaced973582005f, 0xef56832a358b, 0xffffffffffffffa8); + test_div_du(0xffaced973582005f, 0xef56832a358b, 0x11179); + test_mod_w(0x7cf18c32, 0xa04da650, 0x1d3f3282); + test_mod_wu(0x7cf18c32, 0xc04da650, 0x7cf18c32); + test_mod_d(0x7cf18c3200000000, 0xa04da65000000000, 0x1d3f328200000000); + test_mod_du(0x7cf18c3200000000, 0xc04da65000000000, 0x7cf18c3200000000); + + return 0; +} diff --git a/tests/tcg/loongarch64/test_fclass.c b/tests/tcg/loongarch64/test_fclass.c new file mode 100644 index 0000000000..7ba1d2c151 --- /dev/null +++ b/tests/tcg/loongarch64/test_fclass.c @@ -0,0 +1,130 @@ +#include <stdio.h> + +/* float class */ +#define FLOAT_CLASS_SIGNALING_NAN 0x001 +#define FLOAT_CLASS_QUIET_NAN 0x002 +#define FLOAT_CLASS_NEGATIVE_INFINITY 0x004 +#define FLOAT_CLASS_NEGATIVE_NORMAL 0x008 +#define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010 +#define FLOAT_CLASS_NEGATIVE_ZERO 0x020 +#define FLOAT_CLASS_POSITIVE_INFINITY 0x040 +#define FLOAT_CLASS_POSITIVE_NORMAL 0x080 +#define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100 +#define FLOAT_CLASS_POSITIVE_ZERO 0x200 + +#define TEST_FCLASS(N) \ +void test_fclass_##N(long s) \ +{ \ + double fd; \ + long rd; \ + \ + asm volatile("fclass."#N" %0, %2\n\t" \ + "movfr2gr."#N" %1, %2\n\t" \ + : "=f"(fd), "=r"(rd) \ + : "f"(s) \ + : ); \ + switch (rd) { \ + case FLOAT_CLASS_SIGNALING_NAN: \ + case FLOAT_CLASS_QUIET_NAN: \ + case FLOAT_CLASS_NEGATIVE_INFINITY: \ + case FLOAT_CLASS_NEGATIVE_NORMAL: \ + case FLOAT_CLASS_NEGATIVE_SUBNORMAL: \ + case FLOAT_CLASS_NEGATIVE_ZERO: \ + case FLOAT_CLASS_POSITIVE_INFINITY: \ + case FLOAT_CLASS_POSITIVE_NORMAL: \ + case FLOAT_CLASS_POSITIVE_SUBNORMAL: \ + case FLOAT_CLASS_POSITIVE_ZERO: \ + break; \ + default: \ + printf("fclass."#N" test failed.\n"); \ + break; \ + } \ +} + +/* + * float format + * type | S | Exponent | Fraction | example value + * 31 | 30 --23 | 22 | 21 --0 | + * | bit | + * SNAN 0/1 | 0xFF | 0 | !=0 | 0x7FBFFFFF + * QNAN 0/1 | 0xFF | 1 | | 0x7FCFFFFF + * -infinity 1 | 0xFF | 0 | 0xFF800000 + * -normal 1 | [1, 0xFE] | [0, 0x7FFFFF]| 0xFF7FFFFF + * -subnormal 1 | 0 | !=0 | 0x807FFFFF + * -0 1 | 0 | 0 | 0x80000000 + * +infinity 0 | 0xFF | 0 | 0x7F800000 + * +normal 0 | [1, 0xFE] | [0, 0x7FFFFF]| 0x7F7FFFFF + * +subnormal 0 | 0 | !=0 | 0x007FFFFF + * +0 0 | 0 | 0 | 0x00000000 + */ + +long float_snan = 0x7FBFFFFF; +long float_qnan = 0x7FCFFFFF; +long float_neg_infinity = 0xFF800000; +long float_neg_normal = 0xFF7FFFFF; +long float_neg_subnormal = 0x807FFFFF; +long float_neg_zero = 0x80000000; +long float_post_infinity = 0x7F800000; +long float_post_normal = 0x7F7FFFFF; +long float_post_subnormal = 0x007FFFFF; +long float_post_zero = 0x00000000; + +/* + * double format + * type | S | Exponent | Fraction | example value + * 63 | 62 -- 52 | 51 | 50 -- 0 | + * | bit | + * SNAN 0/1 | 0x7FF | 0 | !=0 | 0x7FF7FFFFFFFFFFFF + * QNAN 0/1 | 0x7FF | 1 | | 0x7FFFFFFFFFFFFFFF + * -infinity 1 | 0x7FF | 0 | 0xFFF0000000000000 + * -normal 1 |[1, 0x7FE] | | 0xFFEFFFFFFFFFFFFF + * -subnormal 1 | 0 | !=0 | 0x8007FFFFFFFFFFFF + * -0 1 | 0 | 0 | 0x8000000000000000 + * +infinity 0 | 0x7FF | 0 | 0x7FF0000000000000 + * +normal 0 |[1, 0x7FE] | | 0x7FEFFFFFFFFFFFFF + * +subnormal 0 | 0 | !=0 | 0x000FFFFFFFFFFFFF + * +0 0 | 0 | 0 | 0x0000000000000000 + */ + +long double_snan = 0x7FF7FFFFFFFFFFFF; +long double_qnan = 0x7FFFFFFFFFFFFFFF; +long double_neg_infinity = 0xFFF0000000000000; +long double_neg_normal = 0xFFEFFFFFFFFFFFFF; +long double_neg_subnormal = 0x8007FFFFFFFFFFFF; +long double_neg_zero = 0x8000000000000000; +long double_post_infinity = 0x7FF0000000000000; +long double_post_normal = 0x7FEFFFFFFFFFFFFF; +long double_post_subnormal = 0x000FFFFFFFFFFFFF; +long double_post_zero = 0x0000000000000000; + +TEST_FCLASS(s) +TEST_FCLASS(d) + +int main() +{ + /* fclass.s */ + test_fclass_s(float_snan); + test_fclass_s(float_qnan); + test_fclass_s(float_neg_infinity); + test_fclass_s(float_neg_normal); + test_fclass_s(float_neg_subnormal); + test_fclass_s(float_neg_zero); + test_fclass_s(float_post_infinity); + test_fclass_s(float_post_normal); + test_fclass_s(float_post_subnormal); + test_fclass_s(float_post_zero); + + /* fclass.d */ + test_fclass_d(double_snan); + test_fclass_d(double_qnan); + test_fclass_d(double_neg_infinity); + test_fclass_d(double_neg_normal); + test_fclass_d(double_neg_subnormal); + test_fclass_d(double_neg_zero); + test_fclass_d(double_post_infinity); + test_fclass_d(double_post_normal); + test_fclass_d(double_post_subnormal); + test_fclass_d(double_post_zero); + + return 0; +} diff --git a/tests/tcg/loongarch64/test_fpcom.c b/tests/tcg/loongarch64/test_fpcom.c new file mode 100644 index 0000000000..9e81f767f9 --- /dev/null +++ b/tests/tcg/loongarch64/test_fpcom.c @@ -0,0 +1,37 @@ +#include <assert.h> + +#define TEST_COMP(N) \ +void test_##N(float fj, float fk) \ +{ \ + int rd = 0; \ + \ + asm volatile("fcmp."#N".s $fcc6,%1,%2\n" \ + "movcf2gr %0, $fcc6\n" \ + : "=r"(rd) \ + : "f"(fj), "f"(fk) \ + : ); \ + assert(rd == 1); \ +} + +TEST_COMP(ceq) +TEST_COMP(clt) +TEST_COMP(cle) +TEST_COMP(cne) +TEST_COMP(seq) +TEST_COMP(slt) +TEST_COMP(sle) +TEST_COMP(sne) + +int main() +{ + test_ceq(0xff700102, 0xff700102); + test_clt(0x00730007, 0xff730007); + test_cle(0xff70130a, 0xff70130b); + test_cne(0x1238acde, 0xff71111f); + test_seq(0xff766618, 0xff766619); + test_slt(0xff78881c, 0xff78901d); + test_sle(0xff780b22, 0xff790b22); + test_sne(0xff7bcd25, 0xff7a26cf); + + return 0; +} diff --git a/tests/tcg/loongarch64/test_pcadd.c b/tests/tcg/loongarch64/test_pcadd.c new file mode 100644 index 0000000000..da2a64db82 --- /dev/null +++ b/tests/tcg/loongarch64/test_pcadd.c @@ -0,0 +1,38 @@ +#include <assert.h> +#include <inttypes.h> +#include <string.h> + +#define TEST_PCADDU(N) \ +void test_##N(int a) \ +{ \ + uint64_t rd1 = 0; \ + uint64_t rd2 = 0; \ + uint64_t rm, rn; \ + \ + asm volatile(""#N" %0, 0x104\n\t" \ + ""#N" %1, 0x12345\n\t" \ + : "=r"(rd1), "=r"(rd2) \ + : ); \ + rm = rd2 - rd1; \ + if (!strcmp(#N, "pcalau12i")) { \ + rn = ((0x12345UL - 0x104) << a) & ~0xfff; \ + } else { \ + rn = ((0x12345UL - 0x104) << a) + 4; \ + } \ + assert(rm == rn); \ +} + +TEST_PCADDU(pcaddi) +TEST_PCADDU(pcaddu12i) +TEST_PCADDU(pcaddu18i) +TEST_PCADDU(pcalau12i) + +int main() +{ + test_pcaddi(2); + test_pcaddu12i(12); + test_pcaddu18i(18); + test_pcalau12i(12); + + return 0; +} diff --git a/tests/tcg/s390x/Makefile.target b/tests/tcg/s390x/Makefile.target index 3124172736..1a7a4a2f59 100644 --- a/tests/tcg/s390x/Makefile.target +++ b/tests/tcg/s390x/Makefile.target @@ -17,6 +17,13 @@ TESTS+=trap TESTS+=signals-s390x TESTS+=branch-relative-long +Z14_TESTS=vfminmax +vfminmax: LDFLAGS+=-lm +$(Z14_TESTS): CFLAGS+=-march=z14 -O2 + +TESTS+=$(if $(shell $(CC) -march=z14 -S -o /dev/null -xc /dev/null \ + >/dev/null 2>&1 && echo OK),$(Z14_TESTS)) + VECTOR_TESTS=vxeh2_vs VECTOR_TESTS+=vxeh2_vcvt VECTOR_TESTS+=vxeh2_vlstr diff --git a/tests/tcg/s390x/vfminmax.c b/tests/tcg/s390x/vfminmax.c new file mode 100644 index 0000000000..22629df160 --- /dev/null +++ b/tests/tcg/s390x/vfminmax.c @@ -0,0 +1,411 @@ +#define _GNU_SOURCE +#include <fenv.h> +#include <stdbool.h> +#include <stdio.h> +#include <string.h> + +/* + * vfmin/vfmax instruction execution. + */ +#define VFMIN 0xEE +#define VFMAX 0xEF + +extern char insn[6]; +asm(".pushsection .rwx,\"awx\",@progbits\n" + ".globl insn\n" + /* e7 89 a0 00 2e ef */ + "insn: vfmaxsb %v24,%v25,%v26,0\n" + ".popsection\n"); + +static void vfminmax(unsigned int op, + unsigned int m4, unsigned int m5, unsigned int m6, + void *v1, const void *v2, const void *v3) +{ + insn[3] = (m6 << 4) | m5; + insn[4] = (m4 << 4) | 0x0e; + insn[5] = op; + + asm("vl %%v25,%[v2]\n" + "vl %%v26,%[v3]\n" + "ex 0,%[insn]\n" + "vst %%v24,%[v1]\n" + : [v1] "=m" (*(char (*)[16])v1) + : [v2] "m" (*(char (*)[16])v2) + , [v3] "m" (*(char (*)[16])v3) + , [insn] "m"(insn) + : "v24", "v25", "v26"); +} + +/* + * Floating-point value classes. + */ +#define N_FORMATS 3 +#define N_SIGNED_CLASSES 8 +static const size_t float_sizes[N_FORMATS] = { + /* M4 == 2: short */ 4, + /* M4 == 3: long */ 8, + /* M4 == 4: extended */ 16, +}; +static const size_t e_bits[N_FORMATS] = { + /* M4 == 2: short */ 8, + /* M4 == 3: long */ 11, + /* M4 == 4: extended */ 15, +}; +static const unsigned char signed_floats[N_FORMATS][N_SIGNED_CLASSES][2][16] = { + /* M4 == 2: short */ + { + /* -inf */ {{0xff, 0x80, 0x00, 0x00}, + {0xff, 0x80, 0x00, 0x00}}, + /* -Fn */ {{0xc2, 0x28, 0x00, 0x00}, + {0xc2, 0x29, 0x00, 0x00}}, + /* -0 */ {{0x80, 0x00, 0x00, 0x00}, + {0x80, 0x00, 0x00, 0x00}}, + /* +0 */ {{0x00, 0x00, 0x00, 0x00}, + {0x00, 0x00, 0x00, 0x00}}, + /* +Fn */ {{0x42, 0x28, 0x00, 0x00}, + {0x42, 0x2a, 0x00, 0x00}}, + /* +inf */ {{0x7f, 0x80, 0x00, 0x00}, + {0x7f, 0x80, 0x00, 0x00}}, + /* QNaN */ {{0x7f, 0xff, 0xff, 0xff}, + {0x7f, 0xff, 0xff, 0xfe}}, + /* SNaN */ {{0x7f, 0xbf, 0xff, 0xff}, + {0x7f, 0xbf, 0xff, 0xfd}}, + }, + + /* M4 == 3: long */ + { + /* -inf */ {{0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* -Fn */ {{0xc0, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0xc0, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* -0 */ {{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* +0 */ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* +Fn */ {{0x40, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x40, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* +inf */ {{0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* QNaN */ {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}}, + /* SNaN */ {{0x7f, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + {0x7f, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd}}, + }, + + /* M4 == 4: extended */ + { + /* -inf */ {{0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* -Fn */ {{0xc0, 0x04, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0xc0, 0x04, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* -0 */ {{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* +0 */ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* +Fn */ {{0x40, 0x04, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x40, 0x04, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* +inf */ {{0x7f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x7f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + /* QNaN */ {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}}, + /* SNaN */ {{0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + {0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd}}, + }, +}; + +/* + * PoP tables as close to the original as possible. + */ +struct signed_test { + int op; + int m6; + const char *m6_desc; + const char *table[N_SIGNED_CLASSES][N_SIGNED_CLASSES]; +} signed_tests[] = { + { + .op = VFMIN, + .m6 = 0, + .m6_desc = "IEEE MinNum", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* -Fn */ "T(b)", "T(M(a,b))", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* -0 */ "T(b)", "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* +0 */ "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* +Fn */ "T(b)", "T(b)", "T(b)", "T(b)", "T(M(a,b))", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* +inf */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* QNaN */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(b*)"}, + {/* SNaN */ "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)"}, + }, + }, + { + .op = VFMIN, + .m6 = 1, + .m6_desc = "JAVA Math.Min()", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "Xi: T(b*)"}, + {/* -Fn */ "T(b)", "T(M(a,b))", "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "Xi: T(b*)"}, + {/* -0 */ "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "T(a)", "T(b)", "Xi: T(b*)"}, + {/* +0 */ "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "T(b)", "Xi: T(b*)"}, + {/* +Fn */ "T(b)", "T(b)", "T(b)", "T(b)", "T(M(a,b))", "T(a)", "T(b)", "Xi: T(b*)"}, + {/* +inf */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b*)"}, + {/* QNaN */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* SNaN */ "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)"}, + }, + }, + { + .op = VFMIN, + .m6 = 2, + .m6_desc = "C-style Min Macro", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b)", "Xi: T(b)"}, + {/* -Fn */ "T(b)", "T(M(a,b))", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b)", "Xi: T(b)"}, + {/* -0 */ "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "Xi: T(b)", "Xi: T(b)"}, + {/* +0 */ "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "Xi: T(b)", "Xi: T(b)"}, + {/* +Fn */ "T(b)", "T(b)", "T(b)", "T(b)", "T(M(a,b))", "T(a)", "Xi: T(b)", "Xi: T(b)"}, + {/* +inf */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(b)", "Xi: T(b)"}, + {/* QNaN */ "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* SNaN */ "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)"}, + }, + }, + { + .op = VFMIN, + .m6 = 3, + .m6_desc = "C++ algorithm.min()", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* -Fn */ "T(b)", "T(M(a,b))", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* -0 */ "T(b)", "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* +0 */ "T(b)", "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* +Fn */ "T(b)", "T(b)", "T(b)", "T(b)", "T(M(a,b))", "T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* +inf */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* QNaN */ "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* SNaN */ "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)"}, + }, + }, + { + .op = VFMIN, + .m6 = 4, + .m6_desc = "fmin()", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)"}, + {/* -Fn */ "T(b)", "T(M(a,b))", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)"}, + {/* -0 */ "T(b)", "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)"}, + {/* +0 */ "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)"}, + {/* +Fn */ "T(b)", "T(b)", "T(b)", "T(b)", "T(M(a,b))", "T(a)", "T(a)", "Xi: T(a)"}, + {/* +inf */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "T(a)", "Xi: T(a)"}, + {/* QNaN */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(a)"}, + {/* SNaN */ "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(a)", "Xi: T(a)"}, + }, + }, + + { + .op = VFMAX, + .m6 = 0, + .m6_desc = "IEEE MaxNum", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(a)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(b*)"}, + {/* -Fn */ "T(a)", "T(M(a,b))", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(b*)"}, + {/* -0 */ "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(b*)"}, + {/* +0 */ "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "T(a)", "Xi: T(b*)"}, + {/* +Fn */ "T(a)", "T(a)", "T(a)", "T(a)", "T(M(a,b))", "T(b)", "T(a)", "Xi: T(b*)"}, + {/* +inf */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* QNaN */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(b*)"}, + {/* SNaN */ "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)"}, + }, + }, + { + .op = VFMAX, + .m6 = 1, + .m6_desc = "JAVA Math.Max()", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(a)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b*)"}, + {/* -Fn */ "T(a)", "T(M(a,b))", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b*)"}, + {/* -0 */ "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b*)"}, + {/* +0 */ "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "T(b)", "Xi: T(b*)"}, + {/* +Fn */ "T(a)", "T(a)", "T(a)", "T(a)", "T(M(a,b))", "T(b)", "T(b)", "Xi: T(b*)"}, + {/* +inf */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "Xi: T(b*)"}, + {/* QNaN */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(b*)"}, + {/* SNaN */ "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)"}, + }, + }, + { + .op = VFMAX, + .m6 = 2, + .m6_desc = "C-style Max Macro", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* -Fn */ "T(a)", "T(M(a,b))", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* -0 */ "T(a)", "T(a)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* +0 */ "T(a)", "T(a)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* +Fn */ "T(a)", "T(a)", "T(a)", "T(a)", "T(M(a,b))", "T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* +inf */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* QNaN */ "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)"}, + {/* SNaN */ "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)"}, + }, + }, + { + .op = VFMAX, + .m6 = 3, + .m6_desc = "C++ algorithm.max()", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(a)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(a)", "Xi: T(a)"}, + {/* -Fn */ "T(a)", "T(M(a,b))", "T(b)", "T(b)", "T(b)", "T(b)", "Xi: T(a)", "Xi: T(a)"}, + {/* -0 */ "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "Xi: T(a)", "Xi: T(a)"}, + {/* +0 */ "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "Xi: T(a)", "Xi: T(a)"}, + {/* +Fn */ "T(a)", "T(a)", "T(a)", "T(a)", "T(M(a,b))", "T(b)", "Xi: T(a)", "Xi: T(a)"}, + {/* +inf */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* QNaN */ "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)"}, + {/* SNaN */ "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)", "Xi: T(a)"}, + }, + }, + { + .op = VFMAX, + .m6 = 4, + .m6_desc = "fmax()", + .table = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(a)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(a)"}, + {/* -Fn */ "T(a)", "T(M(a,b))", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(a)"}, + {/* -0 */ "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(a)"}, + {/* +0 */ "T(a)", "T(a)", "T(a)", "T(a)", "T(b)", "T(b)", "T(a)", "Xi: T(a)"}, + {/* +Fn */ "T(a)", "T(a)", "T(a)", "T(a)", "T(M(a,b))", "T(b)", "T(a)", "Xi: T(a)"}, + {/* +inf */ "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "T(a)", "Xi: T(a)"}, + {/* QNaN */ "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(b)", "T(a)", "Xi: T(a)"}, + {/* SNaN */ "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(b)", "Xi: T(a)", "Xi: T(a)"}, + }, + }, +}; + +static void dump_v(FILE *f, const void *v, size_t n) +{ + for (int i = 0; i < n; i++) { + fprintf(f, "%02x", ((const unsigned char *)v)[i]); + } +} + +static int signed_test(struct signed_test *test, int m4, int m5, + const void *v1_exp, bool xi_exp, + const void *v2, const void *v3) +{ + size_t n = (m5 & 8) ? float_sizes[m4 - 2] : 16; + char v1[16]; + bool xi; + + feclearexcept(FE_ALL_EXCEPT); + vfminmax(test->op, m4, m5, test->m6, v1, v2, v3); + xi = fetestexcept(FE_ALL_EXCEPT) == FE_INVALID; + + if (memcmp(v1, v1_exp, n) != 0 || xi != xi_exp) { + fprintf(stderr, "[ FAILED ] %s ", test->m6_desc); + dump_v(stderr, v2, n); + fprintf(stderr, ", "); + dump_v(stderr, v3, n); + fprintf(stderr, ", %d, %d, %d: actual=", m4, m5, test->m6); + dump_v(stderr, v1, n); + fprintf(stderr, "/%d, expected=", (int)xi); + dump_v(stderr, v1_exp, n); + fprintf(stderr, "/%d\n", (int)xi_exp); + return 1; + } + + return 0; +} + +static void snan_to_qnan(char *v, int m4) +{ + size_t bit = 1 + e_bits[m4 - 2]; + v[bit / 8] |= 1 << (7 - (bit % 8)); +} + +int main(void) +{ + int ret = 0; + size_t i; + + for (i = 0; i < sizeof(signed_tests) / sizeof(signed_tests[0]); i++) { + struct signed_test *test = &signed_tests[i]; + int m4; + + for (m4 = 2; m4 <= 4; m4++) { + const unsigned char (*floats)[2][16] = signed_floats[m4 - 2]; + size_t float_size = float_sizes[m4 - 2]; + int m5; + + for (m5 = 0; m5 <= 8; m5 += 8) { + char v1_exp[16], v2[16], v3[16]; + bool xi_exp = false; + int pos = 0; + int i2; + + for (i2 = 0; i2 < N_SIGNED_CLASSES * 2; i2++) { + int i3; + + for (i3 = 0; i3 < N_SIGNED_CLASSES * 2; i3++) { + const char *spec = test->table[i2 / 2][i3 / 2]; + + memcpy(&v2[pos], floats[i2 / 2][i2 % 2], float_size); + memcpy(&v3[pos], floats[i3 / 2][i3 % 2], float_size); + if (strcmp(spec, "T(a)") == 0 || + strcmp(spec, "Xi: T(a)") == 0) { + memcpy(&v1_exp[pos], &v2[pos], float_size); + } else if (strcmp(spec, "T(b)") == 0 || + strcmp(spec, "Xi: T(b)") == 0) { + memcpy(&v1_exp[pos], &v3[pos], float_size); + } else if (strcmp(spec, "Xi: T(a*)") == 0) { + memcpy(&v1_exp[pos], &v2[pos], float_size); + snan_to_qnan(&v1_exp[pos], m4); + } else if (strcmp(spec, "Xi: T(b*)") == 0) { + memcpy(&v1_exp[pos], &v3[pos], float_size); + snan_to_qnan(&v1_exp[pos], m4); + } else if (strcmp(spec, "T(M(a,b))") == 0) { + /* + * Comparing floats is risky, since the compiler + * might generate the same instruction that we are + * testing. Compare ints instead. This works, + * because we get here only for +-Fn, and the + * corresponding test values have identical + * exponents. + */ + int v2_int = *(int *)&v2[pos]; + int v3_int = *(int *)&v3[pos]; + + if ((v2_int < v3_int) == + ((test->op == VFMIN) != (v2_int < 0))) { + memcpy(&v1_exp[pos], &v2[pos], float_size); + } else { + memcpy(&v1_exp[pos], &v3[pos], float_size); + } + } else { + fprintf(stderr, "Unexpected spec: %s\n", spec); + return 1; + } + xi_exp |= spec[0] == 'X'; + pos += float_size; + + if ((m5 & 8) || pos == 16) { + ret |= signed_test(test, m4, m5, + v1_exp, xi_exp, v2, v3); + pos = 0; + xi_exp = false; + } + } + } + + if (pos != 0) { + ret |= signed_test(test, m4, m5, v1_exp, xi_exp, v2, v3); + } + } + } + } + + return ret; +} diff --git a/tests/unit/ptimer-test.c b/tests/unit/ptimer-test.c index a80ef5aff4..04b5f4e3d0 100644 --- a/tests/unit/ptimer-test.c +++ b/tests/unit/ptimer-test.c @@ -798,64 +798,64 @@ static void add_ptimer_tests(uint8_t policy) g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/set_count policy=%s", policy_name), - g_memdup(&policy, 1), check_set_count, g_free); + g_memdup2(&policy, 1), check_set_count, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/set_limit policy=%s", policy_name), - g_memdup(&policy, 1), check_set_limit, g_free); + g_memdup2(&policy, 1), check_set_limit, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/oneshot policy=%s", policy_name), - g_memdup(&policy, 1), check_oneshot, g_free); + g_memdup2(&policy, 1), check_oneshot, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/periodic policy=%s", policy_name), - g_memdup(&policy, 1), check_periodic, g_free); + g_memdup2(&policy, 1), check_periodic, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/on_the_fly_mode_change policy=%s", policy_name), - g_memdup(&policy, 1), check_on_the_fly_mode_change, g_free); + g_memdup2(&policy, 1), check_on_the_fly_mode_change, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/on_the_fly_period_change policy=%s", policy_name), - g_memdup(&policy, 1), check_on_the_fly_period_change, g_free); + g_memdup2(&policy, 1), check_on_the_fly_period_change, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/on_the_fly_freq_change policy=%s", policy_name), - g_memdup(&policy, 1), check_on_the_fly_freq_change, g_free); + g_memdup2(&policy, 1), check_on_the_fly_freq_change, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/run_with_period_0 policy=%s", policy_name), - g_memdup(&policy, 1), check_run_with_period_0, g_free); + g_memdup2(&policy, 1), check_run_with_period_0, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/run_with_delta_0 policy=%s", policy_name), - g_memdup(&policy, 1), check_run_with_delta_0, g_free); + g_memdup2(&policy, 1), check_run_with_delta_0, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/periodic_with_load_0 policy=%s", policy_name), - g_memdup(&policy, 1), check_periodic_with_load_0, g_free); + g_memdup2(&policy, 1), check_periodic_with_load_0, g_free); g_free(tmp); g_test_add_data_func_full( tmp = g_strdup_printf("/ptimer/oneshot_with_load_0 policy=%s", policy_name), - g_memdup(&policy, 1), check_oneshot_with_load_0, g_free); + g_memdup2(&policy, 1), check_oneshot_with_load_0, g_free); g_free(tmp); } diff --git a/tests/unit/test-iov.c b/tests/unit/test-iov.c index 93bda00f0e..6f7623d310 100644 --- a/tests/unit/test-iov.c +++ b/tests/unit/test-iov.c @@ -172,7 +172,7 @@ static void test_io(void) } iov_from_buf(iov, niov, 0, buf, sz); - siov = g_memdup(iov, sizeof(*iov) * niov); + siov = g_memdup2(iov, sizeof(*iov) * niov); if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) < 0) { perror("socketpair"); @@ -349,7 +349,7 @@ static void test_discard_front_undo(void) /* Discard zero bytes */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_tmp = iov; iov_cnt_tmp = iov_cnt; iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, 0, &undo); @@ -360,7 +360,7 @@ static void test_discard_front_undo(void) /* Discard more bytes than vector size */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_tmp = iov; iov_cnt_tmp = iov_cnt; size = iov_size(iov, iov_cnt); @@ -372,7 +372,7 @@ static void test_discard_front_undo(void) /* Discard entire vector */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_tmp = iov; iov_cnt_tmp = iov_cnt; size = iov_size(iov, iov_cnt); @@ -384,7 +384,7 @@ static void test_discard_front_undo(void) /* Discard within first element */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_tmp = iov; iov_cnt_tmp = iov_cnt; size = g_test_rand_int_range(1, iov->iov_len); @@ -396,7 +396,7 @@ static void test_discard_front_undo(void) /* Discard entire first element */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_tmp = iov; iov_cnt_tmp = iov_cnt; iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, iov->iov_len, &undo); @@ -407,7 +407,7 @@ static void test_discard_front_undo(void) /* Discard within second element */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_tmp = iov; iov_cnt_tmp = iov_cnt; size = iov->iov_len + g_test_rand_int_range(1, iov[1].iov_len); @@ -498,7 +498,7 @@ static void test_discard_back_undo(void) /* Discard zero bytes */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_cnt_tmp = iov_cnt; iov_discard_back_undoable(iov, &iov_cnt_tmp, 0, &undo); iov_discard_undo(&undo); @@ -508,7 +508,7 @@ static void test_discard_back_undo(void) /* Discard more bytes than vector size */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_cnt_tmp = iov_cnt; size = iov_size(iov, iov_cnt); iov_discard_back_undoable(iov, &iov_cnt_tmp, size + 1, &undo); @@ -519,7 +519,7 @@ static void test_discard_back_undo(void) /* Discard entire vector */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_cnt_tmp = iov_cnt; size = iov_size(iov, iov_cnt); iov_discard_back_undoable(iov, &iov_cnt_tmp, size, &undo); @@ -530,7 +530,7 @@ static void test_discard_back_undo(void) /* Discard within last element */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_cnt_tmp = iov_cnt; size = g_test_rand_int_range(1, iov[iov_cnt - 1].iov_len); iov_discard_back_undoable(iov, &iov_cnt_tmp, size, &undo); @@ -541,7 +541,7 @@ static void test_discard_back_undo(void) /* Discard entire last element */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_cnt_tmp = iov_cnt; size = iov[iov_cnt - 1].iov_len; iov_discard_back_undoable(iov, &iov_cnt_tmp, size, &undo); @@ -552,7 +552,7 @@ static void test_discard_back_undo(void) /* Discard within second-to-last element */ iov_random(&iov, &iov_cnt); - iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); + iov_orig = g_memdup2(iov, sizeof(iov[0]) * iov_cnt); iov_cnt_tmp = iov_cnt; size = iov[iov_cnt - 1].iov_len + g_test_rand_int_range(1, iov[iov_cnt - 2].iov_len); diff --git a/tests/vm/Makefile.include b/tests/vm/Makefile.include index 5f5b1fbfe6..8d2a164552 100644 --- a/tests/vm/Makefile.include +++ b/tests/vm/Makefile.include @@ -15,9 +15,9 @@ endif EFI_AARCH64 = $(wildcard $(BUILD_DIR)/pc-bios/edk2-aarch64-code.fd) -X86_IMAGES := freebsd netbsd openbsd centos fedora haiku.x86_64 +X86_IMAGES := freebsd netbsd openbsd fedora haiku.x86_64 ifneq ($(GENISOIMAGE),) -X86_IMAGES += ubuntu.i386 centos +X86_IMAGES += centos ifneq ($(EFI_AARCH64),) ARM64_IMAGES += ubuntu.aarch64 centos.aarch64 endif @@ -48,7 +48,6 @@ vm-help vm-test: @echo " vm-build-fedora - Build QEMU in Fedora VM" ifneq ($(GENISOIMAGE),) @echo " vm-build-centos - Build QEMU in CentOS VM, with Docker" - @echo " vm-build-ubuntu.i386 - Build QEMU in ubuntu i386 VM" ifneq ($(EFI_AARCH64),) @echo " vm-build-ubuntu.aarch64 - Build QEMU in ubuntu aarch64 VM" @echo " vm-build-centos.aarch64 - Build QEMU in CentOS aarch64 VM" diff --git a/tests/vm/basevm.py b/tests/vm/basevm.py index d7d0413df3..4fd9af10b7 100644 --- a/tests/vm/basevm.py +++ b/tests/vm/basevm.py @@ -99,6 +99,11 @@ class BaseVM(object): self._source_path = args.source_path # Allow input config to override defaults. self._config = DEFAULT_CONFIG.copy() + + # 1GB per core, minimum of 4. This is only a default. + mem = max(4, args.jobs) + self._config['memory'] = f"{mem}G" + if config != None: self._config.update(config) self.validate_ssh_keys() diff --git a/tests/vm/centos b/tests/vm/centos index 5c7bc1c1a9..097a9ca14d 100755 --- a/tests/vm/centos +++ b/tests/vm/centos @@ -1,8 +1,8 @@ #!/usr/bin/env python3 # -# CentOS image +# CentOS 8 Stream image # -# Copyright 2018 Red Hat Inc. +# Copyright 2018, 2022 Red Hat Inc. # # Authors: # Fam Zheng <famz@redhat.com> @@ -28,13 +28,12 @@ class CentosVM(basevm.BaseVM): tar -xf $SRC_ARCHIVE; make docker-test-block@centos8 {verbose} J={jobs} NETWORK=1; make docker-test-quick@centos8 {verbose} J={jobs} NETWORK=1; - make docker-test-mingw@fedora {verbose} J={jobs} NETWORK=1; """ def build_image(self, img): - cimg = self._download_with_cache("https://cloud.centos.org/centos/8/x86_64/images/CentOS-8-GenericCloud-8.3.2011-20201204.2.x86_64.qcow2") + cimg = self._download_with_cache("https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20220125.1.x86_64.qcow2") img_tmp = img + ".tmp" - subprocess.check_call(["ln", "-f", cimg, img_tmp]) + subprocess.check_call(['cp', '-f', cimg, img_tmp]) self.exec_qemu_img("resize", img_tmp, "50G") self.boot(img_tmp, extra_args = ["-cdrom", self.gen_cloud_init_iso()]) self.wait_ssh() diff --git a/tests/vm/centos.aarch64 b/tests/vm/centos.aarch64 index 96c450f8be..2de7ef6992 100755 --- a/tests/vm/centos.aarch64 +++ b/tests/vm/centos.aarch64 @@ -20,150 +20,38 @@ import time import traceback import aarch64vm + DEFAULT_CONFIG = { 'cpu' : "max", 'machine' : "virt,gic-version=max", - 'install_cmds' : "yum install -y make ninja-build git python3 gcc gcc-c++ flex bison, "\ - "yum install -y glib2-devel perl pixman-devel zlib-devel, "\ - "alternatives --set python /usr/bin/python3, "\ - "sudo dnf config-manager "\ - "--add-repo=https://download.docker.com/linux/centos/docker-ce.repo,"\ - "sudo dnf install --nobest -y docker-ce.aarch64,"\ - "systemctl enable docker", + 'install_cmds' : ( + "dnf config-manager --set-enabled powertools, " + "dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo, " + "dnf install -y make ninja-build git python38 gcc gcc-c++ flex bison "\ + "glib2-devel perl pixman-devel zlib-devel docker-ce.aarch64, " + "systemctl enable docker, " + ), # We increase beyond the default time since during boot # it can take some time (many seconds) to log into the VM. 'ssh_timeout' : 60, } + class CentosAarch64VM(basevm.BaseVM): - name = "centos.aarch64" + name = "centos8.aarch64" arch = "aarch64" - login_prompt = "localhost login:" - prompt = '[root@localhost ~]#' - image_name = "CentOS-8-aarch64-1905-dvd1.iso" - image_link = "http://mirrors.usc.edu/pub/linux/distributions/centos/8.0.1905/isos/aarch64/" + image_name = "CentOS-Stream-GenericCloud-8-20220125.1.aarch64.qcow2" + image_link = "https://cloud.centos.org/centos/8-stream/aarch64/images/" image_link += image_name BUILD_SCRIPT = """ set -e; cd $(mktemp -d); - sudo chmod a+r /dev/vdb; - tar --checkpoint=.10 -xf /dev/vdb; + export SRC_ARCHIVE=/dev/vdb; + sudo chmod a+r $SRC_ARCHIVE; + tar -xf $SRC_ARCHIVE; ./configure {configure_opts}; make --output-sync {target} -j{jobs} {verbose}; """ - def set_key_perm(self): - """Set permissions properly on certain files to allow - ssh access.""" - self.console_wait_send(self.prompt, - "/usr/sbin/restorecon -R -v /root/.ssh\n") - self.console_wait_send(self.prompt, - "/usr/sbin/restorecon -R -v "\ - "/home/{}/.ssh\n".format(self._config["guest_user"])) - - def create_kickstart(self): - """Generate the kickstart file used to generate the centos image.""" - # Start with the template for the kickstart. - ks_file = self._source_path + "/tests/vm/centos-8-aarch64.ks" - subprocess.check_call("cp {} ./ks.cfg".format(ks_file), shell=True) - # Append the ssh keys to the kickstart file - # as the post processing phase of installation. - with open("ks.cfg", "a") as f: - # Add in the root pw and guest user. - rootpw = "rootpw --plaintext {}\n" - f.write(rootpw.format(self._config["root_pass"])) - add_user = "user --groups=wheel --name={} "\ - "--password={} --plaintext\n" - f.write(add_user.format(self._config["guest_user"], - self._config["guest_pass"])) - # Add the ssh keys. - f.write("%post --log=/root/ks-post.log\n") - f.write("mkdir -p /root/.ssh\n") - addkey = 'echo "{}" >> /root/.ssh/authorized_keys\n' - addkey_cmd = addkey.format(self._config["ssh_pub_key"]) - f.write(addkey_cmd) - f.write('mkdir -p /home/{}/.ssh\n'.format(self._config["guest_user"])) - addkey = 'echo "{}" >> /home/{}/.ssh/authorized_keys\n' - addkey_cmd = addkey.format(self._config["ssh_pub_key"], - self._config["guest_user"]) - f.write(addkey_cmd) - f.write("%end\n") - # Take our kickstart file and create an .iso from it. - # The .iso will be provided to qemu as we boot - # from the install dvd. - # Anaconda will recognize the label "OEMDRV" and will - # start the automated installation. - gen_iso_img = 'genisoimage -output ks.iso -volid "OEMDRV" ks.cfg' - subprocess.check_call(gen_iso_img, shell=True) - - def wait_for_shutdown(self): - """We wait for qemu to shutdown the VM and exit. - While this happens we display the console view - for easier debugging.""" - # The image creation is essentially done, - # so whether or not the wait is successful we want to - # wait for qemu to exit (the self.wait()) before we return. - try: - self.console_wait("reboot: Power down") - except Exception as e: - sys.stderr.write("Exception hit\n") - if isinstance(e, SystemExit) and e.code == 0: - return 0 - traceback.print_exc() - finally: - self.wait() - - def build_base_image(self, dest_img): - """Run through the centos installer to create - a base image with name dest_img.""" - # We create the temp image, and only rename - # to destination when we are done. - img = dest_img + ".tmp" - # Create an empty image. - # We will provide this as the install destination. - qemu_img_create = "qemu-img create {} 50G".format(img) - subprocess.check_call(qemu_img_create, shell=True) - - # Create our kickstart file to be fed to the installer. - self.create_kickstart() - # Boot the install dvd with the params as our ks.iso - os_img = self._download_with_cache(self.image_link) - dvd_iso = "centos-8-dvd.iso" - subprocess.check_call(["cp", "-f", os_img, dvd_iso]) - extra_args = "-cdrom ks.iso" - extra_args += " -drive file={},if=none,id=drive1,cache=writeback" - extra_args += " -device virtio-blk,drive=drive1,bootindex=1" - extra_args = extra_args.format(dvd_iso).split(" ") - self.boot(img, extra_args=extra_args) - self.console_wait_send("change the selection", "\n") - # We seem to need to hit esc (chr(27)) twice to abort the - # media check, which takes a long time. - # Waiting a bit seems to be more reliable before hitting esc. - self.console_wait("Checking") - time.sleep(5) - self.console_wait_send("Checking", chr(27)) - time.sleep(5) - self.console_wait_send("Checking", chr(27)) - print("Found Checking") - # Give sufficient time for the installer to create the image. - self.console_init(timeout=7200) - self.wait_for_shutdown() - os.rename(img, dest_img) - print("Done with base image build: {}".format(dest_img)) - - def check_create_base_img(self, img_base, img_dest): - """Create a base image using the installer. - We will use the base image if it exists. - This helps cut down on install time in case we - need to restart image creation, - since the base image creation can take a long time.""" - if not os.path.exists(img_base): - print("Generate new base image: {}".format(img_base)) - self.build_base_image(img_base); - else: - print("Use existing base image: {}".format(img_base)) - # Save a copy of the base image and copy it to dest. - # which we will use going forward. - subprocess.check_call(["cp", img_base, img_dest]) def boot(self, img, extra_args=None): aarch64vm.create_flash_images(self._tmpdir, self._efi_aarch64) @@ -185,42 +73,28 @@ class CentosAarch64VM(basevm.BaseVM): super(CentosAarch64VM, self).boot(img, extra_args=extra_args) def build_image(self, img): + cimg = self._download_with_cache(self.image_link) img_tmp = img + ".tmp" - self.check_create_base_img(img + ".base", img_tmp) - - # Boot the new image for the first time to finish installation. - self.boot(img_tmp) - self.console_init() - self.console_wait_send(self.login_prompt, "root\n") - self.console_wait_send("Password:", - "{}\n".format(self._config["root_pass"])) - - self.set_key_perm() - self.console_wait_send(self.prompt, "rpm -q centos-release\n") - enable_adapter = "sed -i 's/ONBOOT=no/ONBOOT=yes/g'" \ - " /etc/sysconfig/network-scripts/ifcfg-enp0s1\n" - self.console_wait_send(self.prompt, enable_adapter) - self.console_wait_send(self.prompt, "ifup enp0s1\n") - self.console_wait_send(self.prompt, - 'echo "qemu ALL=(ALL) NOPASSWD:ALL" | '\ - 'sudo tee /etc/sudoers.d/qemu\n') - self.console_wait(self.prompt) - - # Rest of the commands we issue through ssh. + subprocess.run(['cp', '-f', cimg, img_tmp]) + self.exec_qemu_img("resize", img_tmp, "50G") + self.boot(img_tmp, extra_args = ["-cdrom", self.gen_cloud_init_iso()]) self.wait_ssh(wait_root=True) + self.ssh_root_check("touch /etc/cloud/cloud-init.disabled") # If the user chooses *not* to do the second phase, # then we will jump right to the graceful shutdown if self._config['install_cmds'] != "": install_cmds = self._config['install_cmds'].split(',') for cmd in install_cmds: - self.ssh_root(cmd) + self.ssh_root_check(cmd) + self.ssh_root("poweroff") - self.wait_for_shutdown() + self.wait() os.rename(img_tmp, img) print("image creation complete: {}".format(img)) return 0 + if __name__ == "__main__": defaults = aarch64vm.get_config_defaults(CentosAarch64VM, DEFAULT_CONFIG) sys.exit(basevm.main(CentosAarch64VM, defaults)) diff --git a/tests/vm/ubuntu.aarch64 b/tests/vm/ubuntu.aarch64 index b291945a7e..666947393b 100755 --- a/tests/vm/ubuntu.aarch64 +++ b/tests/vm/ubuntu.aarch64 @@ -32,9 +32,13 @@ DEFAULT_CONFIG = { class UbuntuAarch64VM(ubuntuvm.UbuntuVM): name = "ubuntu.aarch64" arch = "aarch64" - image_name = "ubuntu-18.04-server-cloudimg-arm64.img" - image_link = "https://cloud-images.ubuntu.com/releases/18.04/release/" + image_name - image_sha256="0fdcba761965735a8a903d8b88df8e47f156f48715c00508e4315c506d7d3cb1" + # NOTE: The Ubuntu 20.04 cloud images are periodically updated. The + # fixed image chosen below is the latest release at time of + # writing. Using a rolling latest instead would mean that the SHA + # would be incorrect at an indeterminate point in the future. + image_name = "focal-server-cloudimg-arm64.img" + image_link = "https://cloud-images.ubuntu.com/focal/20220615/" + image_name + image_sha256="95a027336e197debe88c92ff2e554598e23c409139e1e750b71b3b820b514832" BUILD_SCRIPT = """ set -e; cd $(mktemp -d); diff --git a/tests/vm/ubuntu.i386 b/tests/vm/ubuntu.i386 deleted file mode 100755 index 47681b6f87..0000000000 --- a/tests/vm/ubuntu.i386 +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python3 -# -# Ubuntu i386 image -# -# Copyright 2017 Red Hat Inc. -# -# Authors: -# Fam Zheng <famz@redhat.com> -# -# This code is licensed under the GPL version 2 or later. See -# the COPYING file in the top-level directory. -# - -import sys -import basevm -import ubuntuvm - -DEFAULT_CONFIG = { - 'install_cmds' : "apt-get update,"\ - "apt-get build-dep -y qemu,"\ - "apt-get install -y libfdt-dev language-pack-en ninja-build", -} - -class UbuntuX86VM(ubuntuvm.UbuntuVM): - name = "ubuntu.i386" - arch = "i386" - image_link="https://cloud-images.ubuntu.com/releases/bionic/"\ - "release-20191114/ubuntu-18.04-server-cloudimg-i386.img" - image_sha256="28969840626d1ea80bb249c08eef1a4533e8904aa51a327b40f37ac4b4ff04ef" - BUILD_SCRIPT = """ - set -e; - cd $(mktemp -d); - sudo chmod a+r /dev/vdb; - tar -xf /dev/vdb; - ./configure {configure_opts}; - make --output-sync {target} -j{jobs} {verbose}; - """ - -if __name__ == "__main__": - sys.exit(basevm.main(UbuntuX86VM, DEFAULT_CONFIG)) @@ -268,6 +268,7 @@ dbus_display_add_client_ready(GObject *source_object, } g_dbus_object_manager_server_set_connection(dbus_display->server, conn); + g_dbus_connection_start_message_processing(conn); } @@ -300,7 +301,8 @@ dbus_display_add_client(int csock, Error **errp) g_dbus_connection_new(G_IO_STREAM(conn), guid, - G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER, + G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER | + G_DBUS_CONNECTION_FLAGS_DELAY_MESSAGE_PROCESSING, NULL, dbus_display->add_client_cancellable, dbus_display_add_client_ready, @@ -2390,6 +2390,10 @@ static void gtk_display_init(DisplayState *ds, DisplayOptions *opts) opts->u.gtk.grab_on_hover) { gtk_menu_item_activate(GTK_MENU_ITEM(s->grab_on_hover_item)); } + if (opts->u.gtk.has_show_tabs && + opts->u.gtk.show_tabs) { + gtk_menu_item_activate(GTK_MENU_ITEM(s->show_tabs_item)); + } gd_clipboard_init(s); } |